]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: implement gk20a timeout control interface
authorKirill Artamonov <kartamonov@nvidia.com>
Mon, 16 Dec 2013 12:28:21 +0000 (14:28 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Mon, 13 Jan 2014 14:32:37 +0000 (06:32 -0800)
Implement per-channel scheduling timeout handling for gk20a.

Reuse nvhost timeout control interface for gk20a channels.

Disable timeout check if /d/gk20a/timeouts_enabled is set to
false.

bug 1355640
bug 1402282
bug 1415315

Change-Id: Iff953ed89ab1f2096e5f799038d853c034527e9d
Signed-off-by: Kirill Artamonov <kartamonov@nvidia.com>
Reviewed-on: http://git-master/r/345840
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
drivers/video/tegra/host/bus_client.c
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/channel_gk20a.h
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.h
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/hw_fifo_gk20a.h
drivers/video/tegra/host/nvhost_hwctx.h

index 47f46ff5bbbc2234ba1cec2b323a61ac2ac54373..693eab5059b0660e1ae9d39d7b12b6586c397361 100644 (file)
@@ -235,7 +235,6 @@ static int nvhost_channelopen(struct inode *inode, struct file *filp)
        priv->timeout_debug_dump = true;
        if (!tegra_platform_is_silicon())
                priv->timeout = 0;
-
        return 0;
 fail:
        nvhost_channelrelease(inode, filp);
@@ -998,12 +997,18 @@ static long nvhost_channelctl(struct file *filp,
                break;
        }
        case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
-               priv->timeout =
+       {
+               u32 timeout =
                        (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
+
+               priv->timeout = timeout;
                dev_dbg(&priv->ch->dev->dev,
                        "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
                        __func__, priv->timeout, priv);
+               if (priv->hwctx)
+                       priv->hwctx->timeout_ms_max = timeout;
                break;
+       }
        case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
                ((struct nvhost_get_param_args *)buf)->value =
                                priv->hwctx->has_timedout;
@@ -1061,15 +1066,23 @@ static long nvhost_channelctl(struct file *filp,
                err = nvhost_ioctl_channel_submit(priv, (void *)buf);
                break;
        case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
-               priv->timeout = (u32)
-                       ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
-               priv->timeout_debug_dump = !((u32)
+       {
+               u32 timeout =
+                       (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
+               bool timeout_debug_dump = !((u32)
                        ((struct nvhost_set_timeout_ex_args *)buf)->flags &
                        (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
+               priv->timeout = timeout;
+               priv->timeout_debug_dump = timeout_debug_dump;
                dev_dbg(&priv->ch->dev->dev,
                        "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
                        __func__, priv->timeout, priv);
+               if (priv->hwctx) {
+                       priv->hwctx->timeout_ms_max = timeout;
+                       priv->hwctx->timeout_debug_dump = timeout_debug_dump;
+               }
                break;
+       }
        case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
                err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
                break;
index e5f8a3a0be1dd34ff2f6403c729068558cbea577..679aa22db59460a3c08becf7116ca4a99de0b4e2 100644 (file)
@@ -637,7 +637,7 @@ void gk20a_free_channel(struct nvhost_hwctx *ctx, bool finish)
        nvhost_dbg_info("freeing bound channel context, timeout=%ld",
                        timeout);
 
-       gk20a_disable_channel(ch, finish, timeout);
+       gk20a_disable_channel(ch, finish && !ch->hwctx->has_timedout, timeout);
 
        gk20a_free_error_notifiers(ctx);
 
@@ -724,10 +724,15 @@ struct nvhost_hwctx *gk20a_open_channel(struct nvhost_channel *ch,
        channel_gk20a_bind(ch_gk20a);
        ch_gk20a->pid = current->pid;
 
+       /* reset timeout counter and update timestamp */
+       ch_gk20a->timeout_accumulated_ms = 0;
+       ch_gk20a->timeout_gpfifo_get = 0;
+       /* set gr host default timeout */
+       ch_gk20a->hwctx->timeout_ms_max = gk20a_get_gr_idle_timeout(g);
+
        /* The channel is *not* runnable at this point. It still needs to have
         * an address space bound and allocate a gpfifo and grctx. */
 
-
        init_waitqueue_head(&ch_gk20a->notifier_wq);
        init_waitqueue_head(&ch_gk20a->semaphore_wq);
        init_waitqueue_head(&ch_gk20a->submit_wq);
@@ -1243,6 +1248,26 @@ static inline u32 gp_free_count(struct channel_gk20a *c)
                c->gpfifo.entry_num;
 }
 
+bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
+               u32 timeout_delta_ms)
+{
+       u32 gpfifo_get = update_gp_get(ch->g, ch);
+       /* Count consequent timeout isr */
+       if (gpfifo_get == ch->timeout_gpfifo_get) {
+               /* we didn't advance since previous channel timeout check */
+               ch->timeout_accumulated_ms += timeout_delta_ms;
+       } else {
+               /* first timeout isr encountered */
+               ch->timeout_accumulated_ms = timeout_delta_ms;
+       }
+
+       ch->timeout_gpfifo_get = gpfifo_get;
+
+       return ch->g->timeouts_enabled &&
+               ch->timeout_accumulated_ms > ch->hwctx->timeout_ms_max;
+}
+
+
 /* Issue a syncpoint increment *preceded* by a wait-for-idle
  * command.  All commands on the channel will have been
  * consumed at the time the fence syncpoint increment occurs.
@@ -1257,6 +1282,9 @@ int gk20a_channel_submit_wfi_fence(struct gk20a *g,
        u32 free_count;
        int err;
 
+       if (c->hwctx->has_timedout)
+               return -ETIMEDOUT;
+
        cmd_size =  4 + wfi_cmd_size();
 
        update_gp_get(g, c);
@@ -1473,6 +1501,9 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
         * wait and one for syncpoint increment */
        const int extra_entries = 2;
 
+       if (c->hwctx->has_timedout)
+               return -ETIMEDOUT;
+
        if ((flags & (NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT |
                      NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET)) &&
            !fence)
@@ -1515,7 +1546,13 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
         * wait for signals from completed submits */
        if (gp_free_count(c) < num_entries + extra_entries) {
                err = wait_event_interruptible(c->submit_wq,
-                       get_gp_free_count(c) >= num_entries + extra_entries);
+                       get_gp_free_count(c) >= num_entries + extra_entries ||
+                       c->hwctx->has_timedout);
+       }
+
+       if (c->hwctx->has_timedout) {
+               err = -ETIMEDOUT;
+               goto clean_up;
        }
 
        if (err) {
@@ -1779,6 +1816,10 @@ int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout)
        if (!ch->cmds_pending)
                return 0;
 
+       /* Do not wait for a timedout channel */
+       if (ch->hwctx && ch->hwctx->has_timedout)
+               return -ETIMEDOUT;
+
        if (!(ch->last_submit_fence.valid && ch->last_submit_fence.wfi)) {
                nvhost_dbg_fn("issuing wfi, incr to finish the channel");
                fence.syncpt_id = ch->hw_chid + pdata->syncpt_base;
@@ -1794,10 +1835,6 @@ int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout)
                      ch->last_submit_fence.syncpt_id,
                      ch->last_submit_fence.syncpt_value);
 
-       /* Do not wait for a timedout channel. Just check if it's done */
-       if (ch->hwctx && ch->hwctx->has_timedout)
-               timeout = 0;
-
        err = nvhost_syncpt_wait_timeout(sp,
                                         ch->last_submit_fence.syncpt_id,
                                         ch->last_submit_fence.syncpt_value,
@@ -1823,6 +1860,10 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
        int ret = 0;
        long remain;
 
+       /* do not wait if channel has timed out */
+       if (ch->hwctx->has_timedout)
+               return -ETIMEDOUT;
+
        handle_ref = nvhost_memmgr_get(memmgr, id, pdev);
        if (IS_ERR(handle_ref)) {
                nvhost_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
@@ -1841,7 +1882,7 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
 
        remain = wait_event_interruptible_timeout(
                        ch->semaphore_wq,
-                       *semaphore == payload,
+                       *semaphore == payload || ch->hwctx->has_timedout,
                        timeout);
 
        if (remain == 0 && *semaphore != payload)
@@ -1872,6 +1913,9 @@ int gk20a_channel_wait(struct channel_gk20a *ch,
 
        nvhost_dbg_fn("");
 
+       if (ch->hwctx->has_timedout)
+               return -ETIMEDOUT;
+
        if (args->timeout == NVHOST_NO_TIMEOUT)
                timeout = MAX_SCHEDULE_TIMEOUT;
        else
@@ -1901,7 +1945,7 @@ int gk20a_channel_wait(struct channel_gk20a *ch,
                 * calling this ioctl */
                remain = wait_event_interruptible_timeout(
                                ch->notifier_wq,
-                               notif->status == 0,
+                               notif->status == 0 || ch->hwctx->has_timedout,
                                timeout);
 
                if (remain == 0 && notif->status != 0) {
index d0f6d0859afdab5a84162a76329d7bdc5dde4281..5b14c2334accd72fad8d6850242743633958ae3d 100644 (file)
@@ -114,6 +114,9 @@ struct channel_gk20a {
        wait_queue_head_t semaphore_wq;
        wait_queue_head_t submit_wq;
 
+       u32 timeout_accumulated_ms;
+       u32 timeout_gpfifo_get;
+
        bool cmds_pending;
        struct {
                bool valid;
@@ -171,6 +174,8 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
 void gk20a_free_channel(struct nvhost_hwctx *ctx, bool finish);
 int gk20a_init_error_notifier(struct nvhost_hwctx *ctx, u32 memhandle,
                        u64 offset);
+bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
+               u32 timeout_delta_ms);
 void gk20a_free_error_notifiers(struct nvhost_hwctx *ctx);
 void gk20a_disable_channel(struct channel_gk20a *ch,
                           bool wait_for_finish,
index ab54f30342a5e511bbc8fe672bfc9364d60dcd26..44c6dcbae6837f517e9e7c817b74a33282499493 100644 (file)
@@ -362,6 +362,8 @@ clean_up:
        return -ENOMEM;
 }
 
+#define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
+
 int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
 {
        u32 intr_stall;
@@ -415,6 +417,10 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
        timeout &= ~fifo_pb_timeout_detection_enabled_f();
        gk20a_writel(g, fifo_pb_timeout_r(), timeout);
 
+       timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US |
+                       fifo_eng_timeout_detection_enabled_f();
+       gk20a_writel(g, fifo_eng_timeout_r(), timeout);
+
        nvhost_dbg_fn("done");
 
        return 0;
@@ -609,6 +615,7 @@ static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
                runlist = &f->runlist_info[runlist_id];
                wake_up(&runlist->runlist_wq);
        }
+
 }
 
 static int gk20a_init_fifo_setup_hw(struct gk20a *g)
@@ -898,35 +905,49 @@ void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
        schedule_work(&g->fifo.fault_restore_thread);
 }
 
-static void gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
-               struct nvhost_hwctx *hwctx) {
-       if (hwctx) {
-               nvhost_err(dev_from_gk20a(g),
-                       "channel with hwctx generated a mmu fault");
-               if (hwctx->error_notifier) {
-                       if (hwctx->error_notifier->info32) {
-                               /* If error code is already set, this mmu fault
-                                * was triggered as part of recovery from other
-                                * error condition.
-                                * Don't overwrite error flag. */
-                       } else {
-                               gk20a_set_error_notifier(hwctx,
-                                       NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
-                       }
+static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
+               struct channel_gk20a *ch) {
+       bool verbose = true;
+       if (!ch || !ch->hwctx)
+               return;
+
+       nvhost_err(dev_from_gk20a(g),
+               "channel %d with hwctx generated a mmu fault",
+               ch->hw_chid);
+       if (ch->hwctx->error_notifier) {
+               u32 err = ch->hwctx->error_notifier->info32;
+               if (err) {
+                       /* If error code is already set, this mmu fault
+                        * was triggered as part of recovery from other
+                        * error condition.
+                        * Don't overwrite error flag. */
+
+                       /* Fifo timeout debug spew is controlled by user */
+                       if (err == NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT)
+                               verbose = ch->hwctx->timeout_debug_dump;
+               } else {
+                       gk20a_set_error_notifier(ch->hwctx,
+                               NVHOST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
                }
-               /* mark channel as faulted */
-               hwctx->has_timedout = true;
        }
+       /* mark channel as faulted */
+       ch->hwctx->has_timedout = true;
+       wmb();
+       /* unblock pending waits */
+       wake_up(&ch->semaphore_wq);
+       wake_up(&ch->notifier_wq);
+       wake_up(&ch->submit_wq);
+       return verbose;
 }
 
 
-static void gk20a_fifo_handle_mmu_fault(struct gk20a *g)
+static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
 {
        bool fake_fault;
        unsigned long fault_id;
        u32 engine_mmu_id;
        int i;
-
+       bool verbose = true;
        nvhost_dbg_fn("");
 
        g->fifo.deferred_reset_pending = false;
@@ -1007,7 +1028,7 @@ static void gk20a_fifo_handle_mmu_fault(struct gk20a *g)
                }
 
                if (ch) {
-                       gk20a_fifo_set_ctx_mmu_error(g, ch->hwctx);
+                       verbose = gk20a_fifo_set_ctx_mmu_error(g, ch);
                        if (ch->in_use) {
                                /* disable the channel from hw and increment
                                 * syncpoints */
@@ -1063,7 +1084,8 @@ static void gk20a_fifo_get_faulty_channel(struct gk20a *g, int engine_id,
                fifo_engine_status_id_v(status);
 }
 
-void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids)
+void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
+               bool verbose)
 {
        unsigned long end_jiffies = jiffies +
                msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
@@ -1073,7 +1095,8 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids)
        unsigned long engine_ids = 0;
        int ret;
 
-       nvhost_debug_dump(g->host);
+       if (verbose)
+               nvhost_debug_dump(g->host);
 
        /* store faulted engines in advance */
        g->fifo.mmu_fault_engines = 0;
@@ -1133,6 +1156,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids)
                gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
 }
 
+
 static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
 {
        u32 sched_error;
@@ -1173,32 +1197,49 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
                }
        }
 
-       nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
-                  sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
-
        /* could not find the engine - should never happen */
        if (unlikely(engine_id >= g->fifo.max_engines))
-               return true;
+               goto err;
 
        if (fifo_intr_sched_error_code_f(sched_error) ==
-           fifo_intr_sched_error_code_ctxsw_timeout_v()) {
-               if (!non_chid) {
-                       struct fifo_gk20a *f = &g->fifo;
-                       struct nvhost_hwctx *hwctx = f->channel[id].hwctx;
+                       fifo_intr_sched_error_code_ctxsw_timeout_v()) {
+               struct fifo_gk20a *f = &g->fifo;
+               struct channel_gk20a *ch = &f->channel[id];
+               struct nvhost_hwctx *hwctx = ch->hwctx;
+
+               if (non_chid) {
+                       gk20a_fifo_recover(g, BIT(engine_id), true);
+                       goto err;
+               }
+
+               if (gk20a_channel_update_and_check_timeout(ch,
+                       GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
                        gk20a_set_error_notifier(hwctx,
                                NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
-                       hwctx->has_timedout = true;
+                       nvhost_err(dev_from_gk20a(g),
+                               "fifo sched ctxsw timeout error:"
+                               "engine = %u, ch = %d", engine_id, id);
+                       gk20a_fifo_recover(g, BIT(engine_id),
+                               hwctx ? hwctx->timeout_debug_dump : true);
+               } else {
+                       nvhost_warn(dev_from_gk20a(g),
+                               "fifo is waiting for ctx switch for %d ms,"
+                               "ch = %d\n",
+                               ch->timeout_accumulated_ms,
+                               id);
                }
-               gk20a_fifo_recover(g, BIT(engine_id));
-               return false;
+               return hwctx->timeout_debug_dump;
        }
+err:
+       nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
+                  sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
 
        return true;
 }
 
 static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
 {
-       bool reset_channel = false, reset_engine = false;
+       bool print_channel_reset_log = false, reset_engine = false;
        struct device *dev = dev_from_gk20a(g);
        u32 handled = 0;
 
@@ -1214,12 +1255,12 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
        if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
                u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
                nvhost_err(dev, "fifo bind error: 0x%08x", bind_error);
-               reset_channel = true;
+               print_channel_reset_log = true;
                handled |= fifo_intr_0_bind_error_pending_f();
        }
 
        if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
-               reset_channel = gk20a_fifo_handle_sched_error(g);
+               print_channel_reset_log = gk20a_fifo_handle_sched_error(g);
                handled |= fifo_intr_0_sched_error_pending_f();
        }
 
@@ -1229,8 +1270,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
        }
 
        if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) {
-               gk20a_fifo_handle_mmu_fault(g);
-               reset_channel = true;
+               print_channel_reset_log = gk20a_fifo_handle_mmu_fault(g);
                reset_engine  = true;
                handled |= fifo_intr_0_mmu_fault_pending_f();
        }
@@ -1240,9 +1280,10 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
                handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
        }
 
-       reset_channel = !g->fifo.deferred_reset_pending && (reset_channel || fifo_intr);
+       print_channel_reset_log = !g->fifo.deferred_reset_pending
+                       && print_channel_reset_log;
 
-       if (reset_channel) {
+       if (print_channel_reset_log) {
                int engine_id;
                nvhost_err(dev_from_gk20a(g),
                           "channel reset initated from %s", __func__);
@@ -1250,7 +1291,7 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
                     engine_id < g->fifo.max_engines;
                     engine_id++) {
                        nvhost_dbg_fn("enum:%d -> engine_id:%d", engine_id,
-                                     g->fifo.engine_info[engine_id].engine_id);
+                               g->fifo.engine_info[engine_id].engine_id);
                        fifo_pbdma_exception_status(g,
                                        &g->fifo.engine_info[engine_id]);
                        fifo_engine_exception_status(g,
@@ -1456,7 +1497,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
                }
                gk20a_set_error_notifier(ch->hwctx,
                                NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
-               gk20a_fifo_recover(g, engines);
+               gk20a_fifo_recover(g, engines, true);
        }
 
        /* re-enable elpg or release pmu mutex */
@@ -1594,7 +1635,7 @@ static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
                    (f->engine_info[i].runlist_id == runlist_id))
                        engines |= BIT(i);
        }
-       gk20a_fifo_recover(g, engines);
+       gk20a_fifo_recover(g, engines, true);
 }
 
 static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
@@ -1604,7 +1645,6 @@ static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
        bool pending;
 
        runlist = &g->fifo.runlist_info[runlist_id];
-
        remain = wait_event_timeout(runlist->runlist_wq,
                ((pending = gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) &
                        fifo_eng_runlist_pending_true_f()) == 0),
@@ -1630,7 +1670,6 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
        u32 old_buf, new_buf;
        u32 chid;
        u32 count = 0;
-
        runlist = &f->runlist_info[runlist_id];
 
        /* valid channel, add/remove it from active list.
index 71f9d5226889063615873d0eafe146302bd39571..051acda23bcbdd6b23799fe3b15b47eecf64e596 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A graphics fifo (gr host)
  *
- * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -156,7 +156,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 hw_chid,
 int gk20a_fifo_suspend(struct gk20a *g);
 
 bool gk20a_fifo_mmu_fault_pending(struct gk20a *g);
-void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids);
+void gk20a_fifo_recover(struct gk20a *g, u32 engine_ids, bool verbose);
 int gk20a_init_fifo_reset_enable_hw(struct gk20a *g);
 
 void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
index 7bfd3bda4bbe350ad46fc6e0b5b1b9e985599726..a22a004bc8343d1b3a5df67a2a6370e6f9585a7d 100644 (file)
@@ -2649,8 +2649,9 @@ int gk20a_free_obj_ctx(struct channel_gk20a  *c,
 
        if (c->num_objects == 0) {
                c->first_init = false;
-               gk20a_disable_channel(c, true, /*wait for finish*/
-                                     timeout);
+               gk20a_disable_channel(c,
+                       !c->hwctx->has_timedout,
+                       timeout);
                gr_gk20a_unmap_channel_patch_ctx(c);
        }
 
@@ -5281,7 +5282,7 @@ int gk20a_gr_isr(struct gk20a *g)
        }
 
        if (need_reset)
-               gk20a_fifo_recover(g, BIT(ENGINE_GR_GK20A));
+               gk20a_fifo_recover(g, BIT(ENGINE_GR_GK20A), true);
 
 clean_up:
        gk20a_writel(g, gr_gpfifo_ctl_r(),
index e94ae4b45eceee2d0956a175f57f7637f8945ea3..a39d3c51e1eae3397dce44661d373e62fb2b78a2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -346,6 +346,30 @@ static inline u32 fifo_pb_timeout_detection_enabled_f(void)
 {
        return 0x80000000;
 }
+static inline u32 fifo_eng_timeout_r(void)
+{
+       return 0x00002a0c;
+}
+static inline u32 fifo_eng_timeout_period_m(void)
+{
+       return 0x7fffffff << 0;
+}
+static inline u32 fifo_eng_timeout_period_max_f(void)
+{
+       return 0x7fffffff;
+}
+static inline u32 fifo_eng_timeout_detection_m(void)
+{
+       return 0x1 << 31;
+}
+static inline u32 fifo_eng_timeout_detection_enabled_f(void)
+{
+       return 0x80000000;
+}
+static inline u32 fifo_eng_timeout_detection_disabled_f(void)
+{
+       return 0x0;
+}
 static inline u32 fifo_error_sched_disable_r(void)
 {
        return 0x0000262c;
index b2ab6998fd3b2f3585495bffe64913e4f8e7cb76..0f2975b13e243d5b65316ef39ea4ffa712d19844 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Tegra Graphics Host Hardware Context Interface
  *
- * Copyright (c) 2010-2013, NVIDIA Corporation.  All rights reserved.
+ * Copyright (c) 2010-2014, NVIDIA Corporation.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,9 @@ struct nvhost_hwctx {
        struct nvhost_channel *channel;
        bool valid;
        bool has_timedout;
+       u32 timeout_ms_max;
+       bool timeout_debug_dump;
+
        struct mem_mgr *memmgr;
 
        struct mem_handle *error_notifier_ref;