]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: Optimize work wakeup
authorArto Merilainen <amerilainen@nvidia.com>
Wed, 15 Apr 2015 17:52:39 +0000 (20:52 +0300)
committerArto Merilainen <amerilainen@nvidia.com>
Thu, 30 Apr 2015 13:01:49 +0000 (06:01 -0700)
Currently nvhost handles all interrupt events without
prioritization in the same workqueue. However, this may lead
into cases where work queue clean-up may slow down waking up
the waiters.

This patch modifies the queue to be cleaned up in two stages:
In the first stage all events are cleaned up but events related
to nvhost jobs or external events (gpu jobs) are pushed into a
global list. The handler first goes through the high-priority
handlers (ACTION_SIGNAL_SYNC_PT, ACTION_WAKEUP) and then
schedules a low-priority thread to take care of job clean-up
(ACTION_SUBMIT_COMPLETE, ACTION_NOTIFY events).

Bug 1598036

Change-Id: I744849efce27802067beeaca49f1381395dd7f90
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/733010
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
GVS: Gerrit_Virtual_Submit

drivers/video/tegra/host/nvhost_intr.c
drivers/video/tegra/host/nvhost_intr.h

index 6be0366e484d66c0b169b43fb141a8b01942a172..63e15b626d3a39875a3daa16601a3cd6694ec6c9 100644 (file)
@@ -110,7 +110,7 @@ static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
  */
 static void remove_completed_waiters(struct list_head *head, u32 sync,
                        struct timespec isr_recv,
-                       struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+                       struct list_head *completed[NVHOST_INTR_ACTION_COUNT])
 {
        struct list_head *dest;
        struct nvhost_waitlist *waiter, *next, *prev;
@@ -120,7 +120,7 @@ static void remove_completed_waiters(struct list_head *head, u32 sync,
                        break;
 
                waiter->isr_recv = isr_recv;
-               dest = completed + waiter->action;
+               dest = *(completed + waiter->action);
 
                /* consolidate submit cleanups */
                if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
@@ -224,22 +224,25 @@ static void action_signal_sync_pt(struct nvhost_waitlist *waiter)
 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
 
 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
-       action_submit_complete,
        action_signal_sync_pt,
        action_wakeup,
        action_wakeup_interruptible,
+       action_submit_complete,
        action_notify,
 };
 
-static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
+static void run_handlers(struct list_head *completed[NVHOST_INTR_ACTION_COUNT])
 {
-       struct list_head *head = completed;
        int i;
 
-       for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
+       for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i) {
+               struct list_head *head = completed[i];
                action_handler handler = action_handlers[i];
                struct nvhost_waitlist *waiter, *next;
 
+               if (!head)
+                       continue;
+
                list_for_each_entry_safe(waiter, next, head, list) {
                        list_del(&waiter->list);
                        handler(waiter);
@@ -256,33 +259,90 @@ static int process_wait_list(struct nvhost_intr *intr,
                             struct nvhost_intr_syncpt *syncpt,
                             u32 threshold)
 {
-       struct list_head completed[NVHOST_INTR_ACTION_COUNT];
-       unsigned int i;
+       struct list_head *completed[NVHOST_INTR_ACTION_COUNT] = {NULL};
+       struct list_head high_prio_handlers[NVHOST_INTR_HIGH_PRIO_COUNT];
+       bool run_low_prio_work = false;
+       unsigned int i, j;
        int empty;
 
-       for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
-               INIT_LIST_HEAD(completed + i);
-
        /* take lock on waiter list */
        mutex_lock(&syncpt->lock);
 
+       /* keep high priority workers in local list */
+       for (i = 0; i < NVHOST_INTR_HIGH_PRIO_COUNT; ++i) {
+               INIT_LIST_HEAD(high_prio_handlers + i);
+               completed[i] = high_prio_handlers + i;
+       }
+
+       /* .. and low priority workers in global list */
+       for (j = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++j)
+               completed[i] = syncpt->low_prio_handlers + j;
+
+       /* this functions fills completed data */
        remove_completed_waiters(&syncpt->wait_head, threshold,
                syncpt->isr_recv, completed);
 
+       /* check if there are still waiters left */
        empty = list_empty(&syncpt->wait_head);
+
+       /* if not, disable interrupt. If yes, update the inetrrupt */
        if (empty)
                intr_op().disable_syncpt_intr(intr, syncpt->id);
        else
                reset_threshold_interrupt(intr, &syncpt->wait_head,
                                          syncpt->id);
 
+       /* remove low priority handlers from this list */
+       for (i = NVHOST_INTR_HIGH_PRIO_COUNT;
+            i < NVHOST_INTR_ACTION_COUNT; ++i) {
+               if (!list_empty(completed[i]))
+                       run_low_prio_work = true;
+               completed[i] = NULL;
+       }
+
+       /* release waiter lock */
        mutex_unlock(&syncpt->lock);
 
        run_handlers(completed);
 
+       /* schedule a separate task to handle low priority handlers */
+       if (run_low_prio_work)
+               schedule_work(&syncpt->low_prio_work);
+
        return empty;
 }
 
+static void nvhost_syncpt_low_prio_work(struct work_struct *work)
+{
+       struct nvhost_intr_syncpt *syncpt = container_of(work,
+                                                    struct nvhost_intr_syncpt,
+                                                    low_prio_work);
+       struct list_head *completed[NVHOST_INTR_ACTION_COUNT] = {NULL};
+       struct list_head low_prio_handlers[NVHOST_INTR_LOW_PRIO_COUNT];
+       unsigned int i, j;
+
+       /* go through low priority handlers.. */
+       mutex_lock(&syncpt->lock);
+       for (i = 0, j = NVHOST_INTR_HIGH_PRIO_COUNT;
+            j < NVHOST_INTR_ACTION_COUNT;
+            i++, j++) {
+               struct list_head *handler = low_prio_handlers + i;
+
+               /* move entries from low priority queue into local queue */
+               INIT_LIST_HEAD(handler);
+               list_cut_position(handler,
+                                 &syncpt->low_prio_handlers[i],
+                                 syncpt->low_prio_handlers[i].prev);
+
+               /* maintain local completed list */
+               completed[j] = handler;
+       }
+       mutex_unlock(&syncpt->lock);
+
+       /* ..and run them */
+       run_handlers(completed);
+}
+
 /*** host syncpt interrupt service functions ***/
 /**
  * Sync point threshold interrupt service thread function
@@ -459,7 +519,7 @@ void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref)
 
 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
 {
-       unsigned int id;
+       unsigned int id, i;
        struct nvhost_intr_syncpt *syncpt;
        struct nvhost_master *host = intr_to_dev(intr);
        u32 nb_pts = nvhost_syncpt_nb_hw_pts(&host->syncpt);
@@ -479,6 +539,10 @@ int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
                snprintf(syncpt->thresh_irq_name,
                        sizeof(syncpt->thresh_irq_name),
                        "host_sp_%02d", id);
+               for (i = 0; i < NVHOST_INTR_LOW_PRIO_COUNT; ++i)
+                       INIT_LIST_HEAD(syncpt->low_prio_handlers + i);
+               INIT_WORK(&syncpt->low_prio_work,
+                         nvhost_syncpt_low_prio_work);
        }
 
        return 0;
index 1e003e9c1fc2944627f5c99064bb1fe7c85577cb..0a9430c98b6afb71e9c920779e571d5b629b2079 100644 (file)
@@ -30,17 +30,11 @@ struct nvhost_channel;
 struct platform_device;
 
 enum nvhost_intr_action {
-       /**
-        * Perform cleanup after a submit has completed.
-        * 'data' points to a channel
-        */
-       NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
-
        /**
         * Signal a nvhost_sync_pt.
         * 'data' points to a nvhost_sync_pt
         */
-       NVHOST_INTR_ACTION_SIGNAL_SYNC_PT,
+       NVHOST_INTR_ACTION_SIGNAL_SYNC_PT = 0,
 
        /**
         * Wake up a  task.
@@ -54,6 +48,12 @@ enum nvhost_intr_action {
         */
        NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
 
+       /**
+        * Perform cleanup after a submit has completed.
+        * 'data' points to a channel
+        */
+       NVHOST_INTR_ACTION_SUBMIT_COMPLETE,
+
        /**
         * Notify some external function about completion
         * 'data' holds pointer to an internal structure that holds a
@@ -64,6 +64,10 @@ enum nvhost_intr_action {
        NVHOST_INTR_ACTION_COUNT
 };
 
+#define NVHOST_INTR_HIGH_PRIO_COUNT NVHOST_INTR_ACTION_SUBMIT_COMPLETE
+#define NVHOST_INTR_LOW_PRIO_COUNT \
+       (NVHOST_INTR_ACTION_COUNT - NVHOST_INTR_HIGH_PRIO_COUNT)
+
 struct nvhost_intr;
 
 struct nvhost_intr_syncpt {
@@ -74,6 +78,8 @@ struct nvhost_intr_syncpt {
        char thresh_irq_name[12];
        struct work_struct work;
        struct timespec isr_recv;
+       struct work_struct low_prio_work;
+       struct list_head low_prio_handlers[NVHOST_INTR_LOW_PRIO_COUNT];
 };
 
 struct nvhost_intr {