]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blobdiff - drivers/video/tegra/nvmap/nvmap_pp.c
nvmap: replace page pool array with a list
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_pp.c
index b2878f0acb46a0e5447f5369f365750971808009..1e869a2fc2576d5f72449b33474445d3386a26b6 100644 (file)
@@ -56,6 +56,21 @@ static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr)
 #define pp_hit_add(pool, nr)   __pp_dbg_var_add(&(pool)->hits, nr)
 #define pp_miss_add(pool, nr)  __pp_dbg_var_add(&(pool)->misses, nr)
 
+static inline struct page *get_page_list_page(struct nvmap_page_pool *pool)
+{
+       struct page *page;
+
+       if (list_empty(&pool->page_list))
+               return NULL;
+
+       page = list_first_entry(&pool->page_list, struct page, lru);
+       list_del(&page->lru);
+
+       pool->count--;
+
+       return page;
+}
+
 /*
  * Allocate n pages one by one. Not the most efficient allocation scheme ever;
  * however, it will make it easier later on to handle single or small number of
@@ -175,7 +190,7 @@ static inline void nvmap_pp_wake_up_allocator(void)
        if (!zero_memory && pool->count > NVMAP_PP_ZERO_MEM_FILL_MIN)
                return;
 
-       if (pool->length - pool->count < NVMAP_PP_DEF_FILL_THRESH)
+       if (pool->max - pool->count < NVMAP_PP_DEF_FILL_THRESH)
                return;
 
        si_meminfo(&info);
@@ -187,7 +202,7 @@ static inline void nvmap_pp_wake_up_allocator(void)
 
        /* Let the background thread know how much memory to fill. */
        atomic_set(&bg_pages_to_fill,
-                  min(tmp, (int)(pool->length - pool->count)));
+                  min(tmp, (int)(pool->max - pool->count)));
        wake_up_process(background_allocator);
 }
 
@@ -200,10 +215,10 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
 {
        struct page *page;
 
-       if ((!force_alloc && !enable_pp) || !pool->page_array)
+       if (!force_alloc && !enable_pp)
                return NULL;
 
-       if (pp_empty(pool)) {
+       if (list_empty(&pool->page_list)) {
                pp_miss_add(pool, 1);
                return NULL;
        }
@@ -211,10 +226,9 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool,
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
                BUG_ON(pool->count == 0);
 
-       page = pool->page_array[pool->alloc];
-       pool->page_array[pool->alloc] = NULL;
-       nvmap_pp_alloc_inc(pool);
-       pool->count--;
+       page = get_page_list_page(pool);
+       if (!page)
+               return NULL;
 
        /* Sanity check. */
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
@@ -241,26 +255,23 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
        u32 real_nr;
        u32 ind = 0;
 
-       if (!enable_pp || !pool->page_array)
+       if (!enable_pp)
                return 0;
 
        real_nr = min_t(u32, nr, pool->count);
 
        while (real_nr--) {
+               struct page *page;
+               if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG))
+                       BUG_ON(list_empty(&pool->page_list));
+               page = get_page_list_page(pool);
+               pages[ind++] = page;
                if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
-                       BUG_ON(pp_empty(pool));
-                       BUG_ON(!pool->page_array[pool->alloc]);
-               }
-               pages[ind++] = pool->page_array[pool->alloc];
-               pool->page_array[pool->alloc] = NULL;
-               nvmap_pp_alloc_inc(pool);
-               if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
-                       atomic_dec(&pages[ind - 1]->_count);
-                       BUG_ON(atomic_read(&pages[ind - 1]->_count) != 1);
+                       atomic_dec(&page->_count);
+                       BUG_ON(atomic_read(&page->_count) != 1);
                }
        }
 
-       pool->count -= ind;
        pp_alloc_add(pool, ind);
        pp_hit_add(pool, ind);
        pp_miss_add(pool, nr - ind);
@@ -276,22 +287,20 @@ int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
 static bool nvmap_page_pool_fill_locked(struct nvmap_page_pool *pool,
                                        struct page *page)
 {
-       if (!enable_pp || !pool->page_array)
+       if (!enable_pp)
                return false;
 
-       if (pp_full(pool))
+       if (pool->count >= pool->max)
                return false;
 
        /* Sanity check. */
        if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
                atomic_inc(&page->_count);
                BUG_ON(atomic_read(&page->_count) != 2);
-               BUG_ON(pool->count > pool->length);
-               BUG_ON(pool->page_array[pool->fill] != NULL);
+               BUG_ON(pool->count > pool->max);
        }
 
-       pool->page_array[pool->fill] = page;
-       nvmap_pp_fill_inc(pool);
+       list_add_tail(&page->lru, &pool->page_list);
        pool->count++;
        pp_fill_add(pool, 1);
 
@@ -311,22 +320,19 @@ int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
        u32 real_nr;
        u32 ind = 0;
 
-       if (!enable_pp || !pool->page_array)
+       if (!enable_pp)
                return 0;
 
-       real_nr = min_t(u32, pool->length - pool->count, nr);
+       real_nr = min_t(u32, pool->max - pool->count, nr);
        if (real_nr == 0)
                return 0;
 
        while (real_nr--) {
                if (IS_ENABLED(CONFIG_NVMAP_PAGE_POOL_DEBUG)) {
-                       BUG_ON(pp_full(pool));
-                       BUG_ON(pool->page_array[pool->fill]);
                        atomic_inc(&pages[ind]->_count);
                        BUG_ON(atomic_read(&pages[ind]->_count) != 2);
                }
-               pool->page_array[pool->fill] = pages[ind++];
-               nvmap_pp_fill_inc(pool);
+               list_add_tail(&pages[ind++]->lru, &pool->page_list);
        }
 
        pool->count += ind;
@@ -348,11 +354,6 @@ bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page)
        return ret;
 }
 
-static int nvmap_page_pool_get_available_count(struct nvmap_page_pool *pool)
-{
-       return pool->count;
-}
-
 /*
  * Free the passed number of pages from the page pool. This happen irregardless
  * of whether ther page pools are enabled. This lets one disable the page pools
@@ -386,7 +387,7 @@ ulong nvmap_page_pool_get_unused_pages(void)
        if (!nvmap_dev)
                return 0;
 
-       total = nvmap_page_pool_get_available_count(&nvmap_dev->pool);
+       total = nvmap_dev->pool.count;
 
        return total;
 }
@@ -400,16 +401,13 @@ int nvmap_page_pool_clear(void)
        struct page *page;
        struct nvmap_page_pool *pool = &nvmap_dev->pool;
 
-       if (!pool->page_array)
-               return 0;
-
        nvmap_page_pool_lock(pool);
 
        while ((page = nvmap_page_pool_alloc_locked(pool, 1)) != NULL)
                __free_page(page);
 
        /* For some reason, if an error occured... */
-       if (!pp_empty(pool)) {
+       if (!list_empty(&pool->page_list)) {
                nvmap_page_pool_unlock(pool);
                return -ENOMEM;
        }
@@ -427,54 +425,17 @@ int nvmap_page_pool_clear(void)
  */
 static void nvmap_page_pool_resize(struct nvmap_page_pool *pool, int size)
 {
-       int ind;
-       struct page **page_array = NULL;
-
-       if (!enable_pp || size == pool->length || size < 0)
+       if (!enable_pp || size == pool->max || size < 0)
                return;
 
        nvmap_page_pool_lock(pool);
-       if (size == 0) {
-               vfree(pool->page_array);
-               pool->page_array = NULL;
-               pool->alloc = 0;
-               pool->fill = 0;
-               pool->count = 0;
-               pool->length = 0;
-               goto out;
-       }
-
-       page_array = vzalloc(sizeof(struct page *) * size);
-       if (!page_array)
-               goto fail;
 
-       /*
-        * Reuse what pages we can.
-        */
-       ind = __nvmap_page_pool_alloc_lots_locked(pool, page_array, size);
-
-       /*
-        * And free anything that might be left over.
-        */
-       while (pool->page_array && !pp_empty(pool))
+       while (pool->count > size)
                __free_page(nvmap_page_pool_alloc_locked(pool, 0));
 
-       swap(page_array, pool->page_array);
-       pool->alloc = 0;
-       pool->fill = (ind == size ? 0 : ind);
-       pool->count = ind;
-       pool->length = size;
-       pool_size = size;
-       vfree(page_array);
+       pool->max = size;
 
-out:
-       pr_debug("page pool resized to %d from %d pages\n", size, pool->length);
-       pool->length = size;
-       goto exit;
-fail:
-       vfree(page_array);
-       pr_err("page pool resize failed\n");
-exit:
+       pr_debug("page pool resized to %d from %d pages\n", size, pool->max);
        nvmap_page_pool_unlock(pool);
 }
 
@@ -606,12 +567,6 @@ int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root)
                           S_IRUGO, pp_root,
                           &nvmap_dev->pool.count);
 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
-       debugfs_create_u32("page_pool_alloc_ind",
-                          S_IRUGO, pp_root,
-                          &nvmap_dev->pool.alloc);
-       debugfs_create_u32("page_pool_fill_ind",
-                          S_IRUGO, pp_root,
-                          &nvmap_dev->pool.fill);
        debugfs_create_u64("page_pool_allocs",
                           S_IRUGO, pp_root,
                           &nvmap_dev->pool.allocs);
@@ -644,27 +599,24 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
 
        memset(pool, 0x0, sizeof(*pool));
        mutex_init(&pool->lock);
+       INIT_LIST_HEAD(&pool->page_list);
 
        si_meminfo(&info);
        totalram_mb = (info.totalram * info.mem_unit) >> 20;
        pr_info("Total MB RAM: %lu\n", totalram_mb);
 
        if (!CONFIG_NVMAP_PAGE_POOL_SIZE)
-               /* The ratio is KB to MB so this ends up being mem in KB which
-                * when >> 2 -> total pages in the pool. */
-               pool->length = (totalram_mb * NVMAP_PP_POOL_SIZE) >> 2;
+               /* The ratio is pool pages per 1K ram pages. So, the >> 10. */
+               pool->max = (info.totalram * NVMAP_PP_POOL_SIZE) >> 10;
        else
-               pool->length = CONFIG_NVMAP_PAGE_POOL_SIZE;
+               pool->max = CONFIG_NVMAP_PAGE_POOL_SIZE;
 
-       if (pool->length >= info.totalram)
+       if (pool->max >= info.totalram)
                goto fail;
-       pool_size = pool->length;
+       pool_size = pool->max;
 
-       pr_info("nvmap page pool size: %u pages (%u MB)\n", pool->length,
-               pool->length >> 8);
-       pool->page_array = vzalloc(sizeof(struct page *) * pool->length);
-       if (!pool->page_array)
-               goto fail;
+       pr_info("nvmap page pool size: %u pages (%u MB)\n", pool->max,
+               (pool->max * info.mem_unit) >> 20);
 
        if (reg) {
                reg = 0;
@@ -679,7 +631,7 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
 #ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
        pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M /
                        PAGE_SIZE;
-       pages_to_fill = pages_to_fill ? : pool->length;
+       pages_to_fill = pages_to_fill ? : pool->count;
 
        nvmap_page_pool_lock(pool);
        atomic_set(&pp_dirty, 1);
@@ -698,7 +650,7 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
        si_meminfo(&info);
        pr_info("highmem=%d, pool_size=%d,"
                "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n",
-               highmem_pages, pool->length,
+               highmem_pages, pool->count,
                info.totalram, info.freeram, info.totalhigh, info.freehigh);
 done:
        pp_clean_cache();
@@ -716,8 +668,8 @@ int nvmap_page_pool_fini(struct nvmap_device *dev)
 
        if (!IS_ERR_OR_NULL(background_allocator))
                kthread_stop(background_allocator);
-       pool->length = 0;
-       vfree(pool->page_array);
+
+       WARN_ON(!list_empty(&pool->page_list));
 
        return 0;
 }