]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
gpu: nvgpu: gk20a: Use spin_lock for jobs_lock
authorBharat Nihalani <bnihalani@nvidia.com>
Tue, 9 Aug 2016 13:00:12 +0000 (18:30 +0530)
committerWinnie Hsu <whsu@nvidia.com>
Fri, 12 Aug 2016 04:16:00 +0000 (21:16 -0700)
This is done to boost performance of the GPU submit time, which
is critical for compute use-cases.

Bug 200215465

Change-Id: Ic4884ee4eac910b92b84a47fdc1b2e9f26b2f1f0
Signed-off-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-on: http://git-master/r/1199860
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/1201496
GVS: Gerrit_Virtual_Submit

drivers/gpu/nvgpu/gk20a/cde_gk20a.c
drivers/gpu/nvgpu/gk20a/channel_gk20a.c
drivers/gpu/nvgpu/gk20a/channel_gk20a.h

index ffe6a1edf576534da50623a5c3d2af14fa3c362b..68cabcef630631bbfe52b64db6e90a16ee8c2025 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Color decompression engine support
  *
- * Copyright (c) 2014-2015, NVIDIA Corporation.  All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1040,9 +1040,9 @@ __releases(&cde_app->mutex)
        struct gk20a_cde_app *cde_app = &g->cde_app;
        bool channel_idle;
 
-       mutex_lock(&ch->jobs_lock);
+       spin_lock(&ch->jobs_lock);
        channel_idle = list_empty(&ch->jobs);
-       mutex_unlock(&ch->jobs_lock);
+       spin_unlock(&ch->jobs_lock);
 
        if (!channel_idle)
                return;
index 109c7b17c49f5bad97d69ac1d9d997460e5530e1..5e51c62b32a74e8b1e693cfdc0db5f559eef1b01 100644 (file)
@@ -389,14 +389,14 @@ void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt)
 
        /* release all job semaphores (applies only to jobs that use
           semaphore synchronization) */
-       mutex_lock(&ch->jobs_lock);
+       spin_lock(&ch->jobs_lock);
        list_for_each_entry_safe(job, n, &ch->jobs, list) {
                if (job->post_fence->semaphore) {
                        gk20a_semaphore_release(job->post_fence->semaphore);
                        released_job_semaphore = true;
                }
        }
-       mutex_unlock(&ch->jobs_lock);
+       spin_unlock(&ch->jobs_lock);
 
        if (released_job_semaphore)
                wake_up_interruptible_all(&ch->semaphore_wq);
@@ -411,9 +411,9 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
                msecs_to_jiffies(gk20a_get_gr_idle_timeout(ch->g));
 
        do {
-               mutex_lock(&ch->jobs_lock);
+               spin_lock(&ch->jobs_lock);
                channel_idle = list_empty(&ch->jobs);
-               mutex_unlock(&ch->jobs_lock);
+               spin_unlock(&ch->jobs_lock);
                if (channel_idle)
                        break;
 
@@ -1540,9 +1540,9 @@ static int gk20a_channel_add_job(struct channel_gk20a *c,
                job->wait_cmd = wait_cmd;
                job->incr_cmd = incr_cmd;
 
-               mutex_lock(&c->jobs_lock);
+               spin_lock(&c->jobs_lock);
                list_add_tail(&job->list, &c->jobs);
-               mutex_unlock(&c->jobs_lock);
+               spin_unlock(&c->jobs_lock);
        } else {
                return -ETIMEDOUT;
        }
@@ -1577,14 +1577,14 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
        while (1) {
                bool completed;
 
-               mutex_lock(&c->jobs_lock);
+               spin_lock(&c->jobs_lock);
                if (list_empty(&c->jobs)) {
-                       mutex_unlock(&c->jobs_lock);
+                       spin_unlock(&c->jobs_lock);
                        break;
                }
                job = list_first_entry(&c->jobs,
                                       struct channel_gk20a_job, list);
-               mutex_unlock(&c->jobs_lock);
+               spin_unlock(&c->jobs_lock);
 
                completed = gk20a_fence_is_expired(job->post_fence);
                if (!completed)
@@ -1623,9 +1623,9 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
                 * so this wouldn't get freed here. */
                gk20a_channel_put(c);
 
-               mutex_lock(&c->jobs_lock);
+               spin_lock(&c->jobs_lock);
                list_del_init(&job->list);
-               mutex_unlock(&c->jobs_lock);
+               spin_unlock(&c->jobs_lock);
 
                kfree(job);
 
@@ -1986,7 +1986,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
        c->referenceable = false;
        init_waitqueue_head(&c->ref_count_dec_wq);
        mutex_init(&c->ioctl_lock);
-       mutex_init(&c->jobs_lock);
+       spin_lock_init(&c->jobs_lock);
        mutex_init(&c->last_submit.fence_lock);
        INIT_DELAYED_WORK(&c->clean_up.wq, gk20a_channel_clean_up_jobs);
        mutex_init(&c->clean_up.lock);
index 21a4de0d579288150adbc3850df1fb2ce9f9101d..00721c6c17294d9d95d29382d333b9fd73543d53 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * GK20A graphics channel
  *
- * Copyright (c) 2011-2015, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2016, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,7 @@ struct channel_gk20a {
        struct list_head ch_entry; /* channel's entry in TSG */
 
        struct list_head jobs;
-       struct mutex jobs_lock;
+       spinlock_t jobs_lock;
 
        struct vm_gk20a *vm;