]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
nvmap: page pool: fix background thread
authorColin Cross <ccross@android.com>
Mon, 11 Aug 2014 23:09:51 +0000 (16:09 -0700)
committerWinnie Hsu <whsu@nvidia.com>
Tue, 12 May 2015 20:28:18 +0000 (13:28 -0700)
Fix a race condition in the background allocator where
wake_up_process could be called just before set_current_state
changed the state to TASK_INTERRUPTIBLE, causing the thread
not to wake.  Use a waitqueue instead.

Also make the background allocator nicer by marking it freezable
so it doesn't compete with suspend, and setting it SCHED_IDLE so
it only runs when no other threads want to run.

Change-Id: If95da005bb1fc4c9b5e802d40730803a57057fe1
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/664673
GVS: Gerrit_Virtual_Submit
Reviewed-on: http://git-master/r/736427
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Tested-by: Alex Waterman <alexw@nvidia.com>
drivers/video/tegra/nvmap/nvmap_pp.c
drivers/video/tegra/nvmap/nvmap_priv.h

index 1c2e1f715affb49a6c4cd037559cff25f6d2963c..c00d9f20edd4a958558d324d1d5c1895628d2e43 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/shrinker.h>
 #include <linux/kthread.h>
 #include <linux/debugfs.h>
+#include <linux/freezer.h>
+#include <linux/highmem.h>
 
 #include "nvmap_priv.h"
 
@@ -39,6 +41,7 @@ static bool enable_pp = 1;
 static int pool_size;
 
 static struct task_struct *background_allocator;
+static DECLARE_WAIT_QUEUE_HEAD(nvmap_bg_wait);
 static struct page *pending_pages[PENDING_PAGES_SIZE];
 static atomic_t bg_pages_to_fill;
 
@@ -74,6 +77,17 @@ static inline struct page *get_page_list_page(struct nvmap_page_pool *pool)
        return page;
 }
 
+static inline bool nvmap_bg_should_run(struct nvmap_page_pool *pool)
+{
+       bool ret;
+
+       mutex_lock(&pool->lock);
+       ret = (pool->to_zero > 0 || atomic_read(&bg_pages_to_fill));
+       mutex_unlock(&pool->lock);
+
+       return ret;
+}
+
 /*
  * Allocate n pages one by one. Not the most efficient allocation scheme ever;
  * however, it will make it easier later on to handle single or small number of
@@ -155,17 +169,20 @@ static void nvmap_pp_do_background_fill(struct nvmap_page_pool *pool)
  */
 static int nvmap_background_zero_allocator(void *arg)
 {
+       struct nvmap_page_pool *pool = &nvmap_dev->pool;
+       struct sched_param param = { .sched_priority = 0 };
+
        pr_info("PP alloc thread starting.\n");
 
-       while (1) {
-               if (kthread_should_stop())
-                       break;
+       set_freezable();
+       sched_setscheduler(current, SCHED_IDLE, &param);
 
-               nvmap_pp_do_background_fill(&nvmap_dev->pool);
+       while (!kthread_should_stop()) {
+               nvmap_pp_do_background_fill(pool);
 
-               /* Pending work is done - go to sleep. */
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule();
+               wait_event_freezable(nvmap_bg_wait,
+                               nvmap_bg_should_run(pool) ||
+                               kthread_should_stop());
        }
 
        return 0;
@@ -206,7 +223,7 @@ static inline void nvmap_pp_wake_up_allocator(void)
        /* Let the background thread know how much memory to fill. */
        atomic_set(&bg_pages_to_fill,
                   min(tmp, (int)(pool->max - pool->count)));
-       wake_up_process(background_allocator);
+       wake_up_interruptible(&nvmap_bg_wait);
 }
 
 /*
@@ -617,12 +634,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
        unsigned long totalram_mb;
        struct sysinfo info;
        struct nvmap_page_pool *pool = &dev->pool;
-#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
-       int i;
-       struct page *page;
-       int pages_to_fill;
-       int highmem_pages = 0;
-#endif
 
        memset(pool, 0x0, sizeof(*pool));
        mutex_init(&pool->lock);
@@ -655,30 +666,6 @@ int nvmap_page_pool_init(struct nvmap_device *dev)
        if (IS_ERR_OR_NULL(background_allocator))
                goto fail;
 
-#ifdef CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP
-       pages_to_fill = CONFIG_NVMAP_PAGE_POOLS_INIT_FILLUP_SIZE * SZ_1M /
-                       PAGE_SIZE;
-       pages_to_fill = pages_to_fill ? : pool->count;
-
-       for (i = 0; i < pages_to_fill; i++) {
-               page = alloc_page(GFP_NVMAP);
-               if (!page)
-                       goto done;
-               if (!nvmap_page_pool_fill_locked(pool, page)) {
-                       __free_page(page);
-                       goto done;
-               }
-               if (PageHighMem(page))
-                       highmem_pages++;
-       }
-
-       si_meminfo(&info);
-       pr_info("highmem=%d, pool_size=%d,"
-               "totalram=%lu, freeram=%lu, totalhigh=%lu, freehigh=%lu\n",
-               highmem_pages, pool->count,
-               info.totalram, info.freeram, info.totalhigh, info.freehigh);
-done:
-#endif
        return 0;
 fail:
        nvmap_page_pool_fini(dev);
index 27cb3290a599d215b75a4e3d12e5452d3d13386e..8b715b42faac8100667b13bf6139e932bbdcf214 100644 (file)
@@ -173,7 +173,9 @@ struct nvmap_page_pool {
        struct mutex lock;
        u32 count;  /* Number of pages in the page list. */
        u32 max;    /* Max length of the page list. */
+       int to_zero; /* Number of pages on the zero list */
        struct list_head page_list;
+       struct list_head zero_list;
 
 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
        u64 allocs;