]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
Revert "gpu: nvgpu: enable semaphore acquire timeout"
[sojka/nv-tegra/linux-3.10.git] / drivers / gpu / nvgpu / gk20a / fifo_gk20a.c
1 /*
2  * GK20A Graphics FIFO (gr host)
3  *
4  * Copyright (c) 2011-2016, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <trace/events/gk20a.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/nvhost.h>
25
26 #include "gk20a.h"
27 #include "debug_gk20a.h"
28 #include "semaphore_gk20a.h"
29 #include "hw_fifo_gk20a.h"
30 #include "hw_pbdma_gk20a.h"
31 #include "hw_ccsr_gk20a.h"
32 #include "hw_ram_gk20a.h"
33 #include "hw_proj_gk20a.h"
34 #include "hw_top_gk20a.h"
35 #include "hw_mc_gk20a.h"
36 #include "hw_gr_gk20a.h"
37 #define FECS_METHOD_WFI_RESTORE 0x80000
38
39 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
40                                             u32 hw_chid, bool add,
41                                             bool wait_for_finish);
42
43 /*
44  * Link engine IDs to MMU IDs and vice versa.
45  */
46
47 static inline u32 gk20a_engine_id_to_mmu_id(u32 engine_id)
48 {
49         switch (engine_id) {
50         case ENGINE_GR_GK20A:
51                 return 0x00;
52         case ENGINE_CE2_GK20A:
53                 return 0x1b;
54         default:
55                 return ~0;
56         }
57 }
58
59 static inline u32 gk20a_mmu_id_to_engine_id(u32 engine_id)
60 {
61         switch (engine_id) {
62         case 0x00:
63                 return ENGINE_GR_GK20A;
64         case 0x1b:
65                 return ENGINE_CE2_GK20A;
66         default:
67                 return ~0;
68         }
69 }
70
71
72 static int init_engine_info(struct fifo_gk20a *f)
73 {
74         struct gk20a *g = f->g;
75         struct device *d = dev_from_gk20a(g);
76         u32 i;
77         u32 max_info_entries = top_device_info__size_1_v();
78
79         gk20a_dbg_fn("");
80
81         /* all we really care about finding is the graphics entry    */
82         /* especially early on in sim it probably thinks it has more */
83         f->num_engines = 2;
84
85         for (i = 0; i < max_info_entries; i++) {
86                 struct fifo_engine_info_gk20a *info = NULL;
87                 u32 table_entry = gk20a_readl(f->g, top_device_info_r(i));
88                 u32 entry = top_device_info_entry_v(table_entry);
89                 u32 engine_enum;
90                 int pbdma_id;
91                 u32 runlist_bit;
92
93                 if (entry != top_device_info_entry_enum_v())
94                         continue;
95
96                 /* we only care about GR engine here */
97                 engine_enum = top_device_info_engine_enum_v(table_entry);
98                 if (engine_enum >= ENGINE_INVAL_GK20A)
99                         continue;
100
101                 gk20a_dbg_info("info: engine_id %d",
102                                 top_device_info_engine_enum_v(table_entry));
103                 info = &g->fifo.engine_info[engine_enum];
104
105                 info->runlist_id =
106                         top_device_info_runlist_enum_v(table_entry);
107                 gk20a_dbg_info("gr info: runlist_id %d", info->runlist_id);
108
109                 info->engine_id =
110                         top_device_info_engine_enum_v(table_entry);
111                 gk20a_dbg_info("gr info: engine_id %d", info->engine_id);
112
113                 runlist_bit = 1 << info->runlist_id;
114
115                 for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
116                         gk20a_dbg_info("gr info: pbdma_map[%d]=%d",
117                                 pbdma_id, f->pbdma_map[pbdma_id]);
118                         if (f->pbdma_map[pbdma_id] & runlist_bit)
119                                 break;
120                 }
121
122                 if (pbdma_id == f->num_pbdma) {
123                         gk20a_err(d, "busted pbmda map");
124                         return -EINVAL;
125                 }
126                 info->pbdma_id = pbdma_id;
127
128                 info->intr_id =
129                         top_device_info_intr_enum_v(table_entry);
130                 gk20a_dbg_info("gr info: intr_id %d", info->intr_id);
131
132                 info->reset_id =
133                         top_device_info_reset_enum_v(table_entry);
134                 gk20a_dbg_info("gr info: reset_id %d",
135                                 info->reset_id);
136
137         }
138
139         return 0;
140 }
141
142 u32 gk20a_fifo_engine_interrupt_mask(struct gk20a *g)
143 {
144         u32 eng_intr_mask = 0;
145         int i = 0;
146
147         for (i = 0; i < g->fifo.max_engines; i++) {
148                 u32 intr_id = g->fifo.engine_info[i].intr_id;
149                 if (i == ENGINE_CE2_GK20A &&
150                         (!g->ops.ce2.isr_stall || !g->ops.ce2.isr_nonstall))
151                         continue;
152
153                 if (intr_id)
154                         eng_intr_mask |= BIT(intr_id);
155         }
156
157         return eng_intr_mask;
158 }
159
160 static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
161 {
162         struct gk20a *g = f->g;
163         struct fifo_engine_info_gk20a *engine_info;
164         struct fifo_runlist_info_gk20a *runlist;
165         u32 runlist_id;
166         u32 i;
167
168         gk20a_dbg_fn("");
169
170         if (f->channel) {
171                 int c;
172                 for (c = 0; c < f->num_channels; c++) {
173                         if (f->channel[c].remove_support)
174                                 f->channel[c].remove_support(f->channel+c);
175                 }
176                 kfree(f->channel);
177         }
178         gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
179
180         engine_info = f->engine_info + ENGINE_GR_GK20A;
181         runlist_id = engine_info->runlist_id;
182         runlist = &f->runlist_info[runlist_id];
183
184         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++)
185                 gk20a_gmmu_free(g, &runlist->mem[i]);
186
187         kfree(runlist->active_channels);
188         kfree(runlist->active_tsgs);
189
190         kfree(f->runlist_info);
191         kfree(f->pbdma_map);
192         kfree(f->engine_info);
193 }
194
195 /* reads info from hardware and fills in pbmda exception info record */
196 static inline void get_exception_pbdma_info(
197         struct gk20a *g,
198         struct fifo_engine_info_gk20a *eng_info)
199 {
200         struct fifo_pbdma_exception_info_gk20a *e =
201                 &eng_info->pbdma_exception_info;
202
203         u32 pbdma_status_r = e->status_r = gk20a_readl(g,
204                    fifo_pbdma_status_r(eng_info->pbdma_id));
205         e->id = fifo_pbdma_status_id_v(pbdma_status_r); /* vs. id_hw_v()? */
206         e->id_is_chid = fifo_pbdma_status_id_type_v(pbdma_status_r) ==
207                 fifo_pbdma_status_id_type_chid_v();
208         e->chan_status_v  = fifo_pbdma_status_chan_status_v(pbdma_status_r);
209         e->next_id_is_chid =
210                 fifo_pbdma_status_next_id_type_v(pbdma_status_r) ==
211                 fifo_pbdma_status_next_id_type_chid_v();
212         e->next_id = fifo_pbdma_status_next_id_v(pbdma_status_r);
213         e->chsw_in_progress =
214                 fifo_pbdma_status_chsw_v(pbdma_status_r) ==
215                 fifo_pbdma_status_chsw_in_progress_v();
216 }
217
218 static void fifo_pbdma_exception_status(struct gk20a *g,
219         struct fifo_engine_info_gk20a *eng_info)
220 {
221         struct fifo_pbdma_exception_info_gk20a *e;
222         get_exception_pbdma_info(g, eng_info);
223         e = &eng_info->pbdma_exception_info;
224
225         gk20a_dbg_fn("pbdma_id %d, "
226                       "id_type %s, id %d, chan_status %d, "
227                       "next_id_type %s, next_id %d, "
228                       "chsw_in_progress %d",
229                       eng_info->pbdma_id,
230                       e->id_is_chid ? "chid" : "tsgid", e->id, e->chan_status_v,
231                       e->next_id_is_chid ? "chid" : "tsgid", e->next_id,
232                       e->chsw_in_progress);
233 }
234
235 /* reads info from hardware and fills in pbmda exception info record */
236 static inline void get_exception_engine_info(
237         struct gk20a *g,
238         struct fifo_engine_info_gk20a *eng_info)
239 {
240         struct fifo_engine_exception_info_gk20a *e =
241                 &eng_info->engine_exception_info;
242         u32 engine_status_r = e->status_r =
243                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
244         e->id = fifo_engine_status_id_v(engine_status_r); /* vs. id_hw_v()? */
245         e->id_is_chid = fifo_engine_status_id_type_v(engine_status_r) ==
246                 fifo_engine_status_id_type_chid_v();
247         e->ctx_status_v = fifo_engine_status_ctx_status_v(engine_status_r);
248         e->faulted =
249                 fifo_engine_status_faulted_v(engine_status_r) ==
250                 fifo_engine_status_faulted_true_v();
251         e->idle =
252                 fifo_engine_status_engine_v(engine_status_r) ==
253                 fifo_engine_status_engine_idle_v();
254         e->ctxsw_in_progress =
255                 fifo_engine_status_ctxsw_v(engine_status_r) ==
256                 fifo_engine_status_ctxsw_in_progress_v();
257 }
258
259 static void fifo_engine_exception_status(struct gk20a *g,
260                                struct fifo_engine_info_gk20a *eng_info)
261 {
262         struct fifo_engine_exception_info_gk20a *e;
263         get_exception_engine_info(g, eng_info);
264         e = &eng_info->engine_exception_info;
265
266         gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
267                       "faulted %d, idle %d, ctxsw_in_progress %d, ",
268                       eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
269                       e->id, e->ctx_status_v,
270                       e->faulted, e->idle,  e->ctxsw_in_progress);
271 }
272
273 static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
274 {
275         struct fifo_engine_info_gk20a *engine_info;
276         struct fifo_runlist_info_gk20a *runlist;
277         struct device *d = dev_from_gk20a(g);
278         u32 runlist_id;
279         u32 i;
280         u64 runlist_size;
281
282         gk20a_dbg_fn("");
283
284         f->max_runlists = fifo_eng_runlist_base__size_1_v();
285         f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
286                                   f->max_runlists, GFP_KERNEL);
287         if (!f->runlist_info)
288                 goto clean_up;
289
290         engine_info = f->engine_info + ENGINE_GR_GK20A;
291         runlist_id = engine_info->runlist_id;
292         runlist = &f->runlist_info[runlist_id];
293
294         runlist->active_channels =
295                 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
296                         GFP_KERNEL);
297         if (!runlist->active_channels)
298                 goto clean_up_runlist_info;
299
300         runlist->active_tsgs =
301                 kzalloc(DIV_ROUND_UP(f->num_channels, BITS_PER_BYTE),
302                         GFP_KERNEL);
303         if (!runlist->active_tsgs)
304                 goto clean_up_runlist_info;
305
306         runlist_size  = ram_rl_entry_size_v() * f->num_channels;
307         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
308                 int err = gk20a_gmmu_alloc(g, runlist_size, &runlist->mem[i]);
309                 if (err) {
310                         dev_err(d, "memory allocation failed\n");
311                         goto clean_up_runlist;
312                 }
313         }
314         mutex_init(&runlist->mutex);
315
316         /* None of buffers is pinned if this value doesn't change.
317             Otherwise, one of them (cur_buffer) must have been pinned. */
318         runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
319
320         gk20a_dbg_fn("done");
321         return 0;
322
323 clean_up_runlist:
324         for (i = 0; i < MAX_RUNLIST_BUFFERS; i++)
325                 gk20a_gmmu_free(g, &runlist->mem[i]);
326
327         kfree(runlist->active_channels);
328         runlist->active_channels = NULL;
329
330 clean_up_runlist_info:
331         kfree(f->runlist_info);
332         f->runlist_info = NULL;
333
334 clean_up:
335         gk20a_dbg_fn("fail");
336         return -ENOMEM;
337 }
338
339 #define GRFIFO_TIMEOUT_CHECK_PERIOD_US 100000
340
341 int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
342 {
343         u32 intr_stall;
344         u32 mask;
345         u32 timeout;
346         int i;
347
348         gk20a_dbg_fn("");
349         /* enable pmc pfifo */
350         gk20a_reset(g, mc_enable_pfifo_enabled_f()
351                         | mc_enable_ce2_enabled_f());
352
353         if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
354                 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
355                                 g->slcg_enabled);
356         if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
357                 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
358                                 g->slcg_enabled);
359         if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
360                 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
361                                 g->blcg_enabled);
362
363         /* enable pbdma */
364         mask = 0;
365         for (i = 0; i < proj_host_num_pbdma_v(); ++i)
366                 mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i);
367         gk20a_writel(g, mc_enable_pb_r(), mask);
368
369         /* enable pfifo interrupt */
370         gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF);
371         gk20a_writel(g, fifo_intr_en_0_r(), 0x7FFFFFFF);
372         gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000);
373
374         /* enable pbdma interrupt */
375         mask = 0;
376         for (i = 0; i < proj_host_num_pbdma_v(); i++) {
377                 intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i));
378                 intr_stall &= ~pbdma_intr_stall_lbreq_enabled_f();
379                 gk20a_writel(g, pbdma_intr_stall_r(i), intr_stall);
380                 gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF);
381                 gk20a_writel(g, pbdma_intr_en_0_r(i),
382                         ~pbdma_intr_en_0_lbreq_enabled_f());
383                 gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF);
384                 gk20a_writel(g, pbdma_intr_en_1_r(i),
385                         ~pbdma_intr_en_0_lbreq_enabled_f());
386         }
387
388         /* TBD: apply overrides */
389
390         /* TBD: BLCG prod */
391
392         /* reset runlist interrupts */
393         gk20a_writel(g, fifo_intr_runlist_r(), ~0);
394
395         /* TBD: do we need those? */
396         timeout = gk20a_readl(g, fifo_fb_timeout_r());
397         timeout = set_field(timeout, fifo_fb_timeout_period_m(),
398                         fifo_fb_timeout_period_max_f());
399         gk20a_writel(g, fifo_fb_timeout_r(), timeout);
400
401         for (i = 0; i < pbdma_timeout__size_1_v(); i++) {
402                 timeout = gk20a_readl(g, pbdma_timeout_r(i));
403                 timeout = set_field(timeout, pbdma_timeout_period_m(),
404                                     pbdma_timeout_period_max_f());
405                 gk20a_writel(g, pbdma_timeout_r(i), timeout);
406         }
407
408         if (g->ops.fifo.apply_pb_timeout)
409                 g->ops.fifo.apply_pb_timeout(g);
410
411         timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US |
412                         fifo_eng_timeout_detection_enabled_f();
413         gk20a_writel(g, fifo_eng_timeout_r(), timeout);
414
415         gk20a_dbg_fn("done");
416
417         return 0;
418 }
419
420 static void gk20a_init_fifo_pbdma_intr_descs(struct fifo_gk20a *f)
421 {
422         /* These are all errors which indicate something really wrong
423          * going on in the device. */
424         f->intr.pbdma.device_fatal_0 =
425                 pbdma_intr_0_memreq_pending_f() |
426                 pbdma_intr_0_memack_timeout_pending_f() |
427                 pbdma_intr_0_memack_extra_pending_f() |
428                 pbdma_intr_0_memdat_timeout_pending_f() |
429                 pbdma_intr_0_memdat_extra_pending_f() |
430                 pbdma_intr_0_memflush_pending_f() |
431                 pbdma_intr_0_memop_pending_f() |
432                 pbdma_intr_0_lbconnect_pending_f() |
433                 pbdma_intr_0_lback_timeout_pending_f() |
434                 pbdma_intr_0_lback_extra_pending_f() |
435                 pbdma_intr_0_lbdat_timeout_pending_f() |
436                 pbdma_intr_0_lbdat_extra_pending_f() |
437                 pbdma_intr_0_xbarconnect_pending_f() |
438                 pbdma_intr_0_pri_pending_f();
439
440         /* These are data parsing, framing errors or others which can be
441          * recovered from with intervention... or just resetting the
442          * channel. */
443         f->intr.pbdma.channel_fatal_0 =
444                 pbdma_intr_0_gpfifo_pending_f() |
445                 pbdma_intr_0_gpptr_pending_f() |
446                 pbdma_intr_0_gpentry_pending_f() |
447                 pbdma_intr_0_gpcrc_pending_f() |
448                 pbdma_intr_0_pbptr_pending_f() |
449                 pbdma_intr_0_pbentry_pending_f() |
450                 pbdma_intr_0_pbcrc_pending_f() |
451                 pbdma_intr_0_method_pending_f() |
452                 pbdma_intr_0_methodcrc_pending_f() |
453                 pbdma_intr_0_pbseg_pending_f() |
454                 pbdma_intr_0_signature_pending_f();
455
456         /* Can be used for sw-methods, or represents
457          * a recoverable timeout. */
458         f->intr.pbdma.restartable_0 =
459                 pbdma_intr_0_device_pending_f() |
460                 pbdma_intr_0_acquire_pending_f();
461 }
462
463 static int gk20a_init_fifo_setup_sw(struct gk20a *g)
464 {
465         struct fifo_gk20a *f = &g->fifo;
466         struct device *d = dev_from_gk20a(g);
467         int chid, i, err = 0;
468
469         gk20a_dbg_fn("");
470
471         if (f->sw_ready) {
472                 gk20a_dbg_fn("skip init");
473                 return 0;
474         }
475
476         f->g = g;
477
478         mutex_init(&f->intr.isr.mutex);
479         mutex_init(&f->gr_reset_mutex);
480         gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */
481
482         f->num_channels = g->ops.fifo.get_num_fifos(g);
483         f->num_pbdma = proj_host_num_pbdma_v();
484         f->max_engines = ENGINE_INVAL_GK20A;
485
486         f->userd_entry_size = 1 << ram_userd_base_shift_v();
487
488         err = gk20a_gmmu_alloc_map(&g->mm.bar1.vm,
489                                    f->userd_entry_size * f->num_channels,
490                                    &f->userd);
491         if (err) {
492                 dev_err(d, "memory allocation failed\n");
493                 goto clean_up;
494         }
495
496         gk20a_dbg(gpu_dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
497
498         f->channel = kzalloc(f->num_channels * sizeof(*f->channel),
499                                 GFP_KERNEL);
500         f->tsg = kzalloc(f->num_channels * sizeof(*f->tsg),
501                                 GFP_KERNEL);
502         f->pbdma_map = kzalloc(f->num_pbdma * sizeof(*f->pbdma_map),
503                                 GFP_KERNEL);
504         f->engine_info = kzalloc(f->max_engines * sizeof(*f->engine_info),
505                                 GFP_KERNEL);
506
507         if (!(f->channel && f->pbdma_map && f->engine_info)) {
508                 err = -ENOMEM;
509                 goto clean_up;
510         }
511
512         /* pbdma map needs to be in place before calling engine info init */
513         for (i = 0; i < f->num_pbdma; ++i)
514                 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
515
516         init_engine_info(f);
517
518         init_runlist(g, f);
519
520         INIT_LIST_HEAD(&f->free_chs);
521         mutex_init(&f->free_chs_mutex);
522
523         for (chid = 0; chid < f->num_channels; chid++) {
524                 f->channel[chid].userd_cpu_va =
525                         f->userd.cpu_va + chid * f->userd_entry_size;
526                 f->channel[chid].userd_iova =
527                         g->ops.mm.get_iova_addr(g, f->userd.sgt->sgl, 0)
528                                 + chid * f->userd_entry_size;
529                 f->channel[chid].userd_gpu_va =
530                         f->userd.gpu_va + chid * f->userd_entry_size;
531
532                 gk20a_init_channel_support(g, chid);
533                 gk20a_init_tsg_support(g, chid);
534         }
535         mutex_init(&f->tsg_inuse_mutex);
536
537         f->remove_support = gk20a_remove_fifo_support;
538
539         f->deferred_reset_pending = false;
540         mutex_init(&f->deferred_reset_mutex);
541
542         f->sw_ready = true;
543
544         gk20a_dbg_fn("done");
545         return 0;
546
547 clean_up:
548         gk20a_dbg_fn("fail");
549         gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd);
550
551         kfree(f->channel);
552         f->channel = NULL;
553         kfree(f->pbdma_map);
554         f->pbdma_map = NULL;
555         kfree(f->engine_info);
556         f->engine_info = NULL;
557
558         return err;
559 }
560
561 static void gk20a_fifo_handle_runlist_event(struct gk20a *g)
562 {
563         u32 runlist_event = gk20a_readl(g, fifo_intr_runlist_r());
564
565         gk20a_dbg(gpu_dbg_intr, "runlist event %08x\n",
566                   runlist_event);
567
568         gk20a_writel(g, fifo_intr_runlist_r(), runlist_event);
569 }
570
571 static int gk20a_init_fifo_setup_hw(struct gk20a *g)
572 {
573         struct fifo_gk20a *f = &g->fifo;
574
575         gk20a_dbg_fn("");
576
577         /* test write, read through bar1 @ userd region before
578          * turning on the snooping */
579         {
580                 struct fifo_gk20a *f = &g->fifo;
581                 u32 v, v1 = 0x33, v2 = 0x55;
582
583                 u32 bar1_vaddr = f->userd.gpu_va;
584                 volatile u32 *cpu_vaddr = f->userd.cpu_va;
585
586                 gk20a_dbg_info("test bar1 @ vaddr 0x%x",
587                            bar1_vaddr);
588
589                 v = gk20a_bar1_readl(g, bar1_vaddr);
590
591                 *cpu_vaddr = v1;
592                 smp_mb();
593
594                 if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
595                         gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: CPU wrote 0x%x, \
596                                 GPU read 0x%x", *cpu_vaddr, gk20a_bar1_readl(g, bar1_vaddr));
597                         return -EINVAL;
598                 }
599
600                 gk20a_bar1_writel(g, bar1_vaddr, v2);
601
602                 if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
603                         gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a: GPU wrote 0x%x, \
604                                 CPU read 0x%x", gk20a_bar1_readl(g, bar1_vaddr), *cpu_vaddr);
605                         return -EINVAL;
606                 }
607
608                 /* is it visible to the cpu? */
609                 if (*cpu_vaddr != v2) {
610                         gk20a_err(dev_from_gk20a(g),
611                                 "cpu didn't see bar1 write @ %p!",
612                                 cpu_vaddr);
613                 }
614
615                 /* put it back */
616                 gk20a_bar1_writel(g, bar1_vaddr, v);
617         }
618
619         /*XXX all manner of flushes and caching worries, etc */
620
621         /* set the base for the userd region now */
622         gk20a_writel(g, fifo_bar1_base_r(),
623                         fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
624                         fifo_bar1_base_valid_true_f());
625
626         gk20a_dbg_fn("done");
627
628         return 0;
629 }
630
631 int gk20a_init_fifo_support(struct gk20a *g)
632 {
633         u32 err;
634
635         err = gk20a_init_fifo_setup_sw(g);
636         if (err)
637                 return err;
638
639         err = gk20a_init_fifo_setup_hw(g);
640         if (err)
641                 return err;
642
643         return err;
644 }
645
646 /* return with a reference to the channel, caller must put it back */
647 static struct channel_gk20a *
648 channel_from_inst_ptr(struct fifo_gk20a *f, u64 inst_ptr)
649 {
650         int ci;
651         if (unlikely(!f->channel))
652                 return NULL;
653         for (ci = 0; ci < f->num_channels; ci++) {
654                 struct channel_gk20a *ch = gk20a_channel_get(&f->channel[ci]);
655                 /* only alive channels are searched */
656                 if (!ch)
657                         continue;
658
659                 if (ch->inst_block.cpu_va &&
660                     (inst_ptr == gk20a_mem_phys(&ch->inst_block)))
661                         return ch;
662
663                 gk20a_channel_put(ch);
664         }
665         return NULL;
666 }
667
668 /* fault info/descriptions.
669  * tbd: move to setup
670  *  */
671 static const char * const fault_type_descs[] = {
672          "pde", /*fifo_intr_mmu_fault_info_type_pde_v() == 0 */
673          "pde size",
674          "pte",
675          "va limit viol",
676          "unbound inst",
677          "priv viol",
678          "ro viol",
679          "wo viol",
680          "pitch mask",
681          "work creation",
682          "bad aperture",
683          "compression failure",
684          "bad kind",
685          "region viol",
686          "dual ptes",
687          "poisoned",
688 };
689 /* engine descriptions */
690 static const char * const engine_subid_descs[] = {
691         "gpc",
692         "hub",
693 };
694
695 static const char * const hub_client_descs[] = {
696         "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
697         "host cpu nb", "iso", "mmu", "mspdec", "msppp", "msvld",
698         "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
699         "scc nb", "sec", "ssync", "gr copy", "ce2", "xv", "mmu nb",
700         "msenc", "d falcon", "sked", "a falcon", "n/a",
701 };
702
703 static const char * const gpc_client_descs[] = {
704         "l1 0", "t1 0", "pe 0",
705         "l1 1", "t1 1", "pe 1",
706         "l1 2", "t1 2", "pe 2",
707         "l1 3", "t1 3", "pe 3",
708         "rast", "gcc", "gpccs",
709         "prop 0", "prop 1", "prop 2", "prop 3",
710         "l1 4", "t1 4", "pe 4",
711         "l1 5", "t1 5", "pe 5",
712         "l1 6", "t1 6", "pe 6",
713         "l1 7", "t1 7", "pe 7",
714         "gpm",
715         "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
716         "rgg utlb",
717 };
718
719 /* reads info from hardware and fills in mmu fault info record */
720 static inline void get_exception_mmu_fault_info(
721         struct gk20a *g, u32 engine_id,
722         struct fifo_mmu_fault_info_gk20a *f)
723 {
724         u32 fault_info_v;
725
726         gk20a_dbg_fn("engine_id %d", engine_id);
727
728         memset(f, 0, sizeof(*f));
729
730         f->fault_info_v = fault_info_v = gk20a_readl(g,
731              fifo_intr_mmu_fault_info_r(engine_id));
732         f->fault_type_v =
733                 fifo_intr_mmu_fault_info_type_v(fault_info_v);
734         f->engine_subid_v =
735                 fifo_intr_mmu_fault_info_engine_subid_v(fault_info_v);
736         f->client_v = fifo_intr_mmu_fault_info_client_v(fault_info_v);
737
738         BUG_ON(f->fault_type_v >= ARRAY_SIZE(fault_type_descs));
739         f->fault_type_desc =  fault_type_descs[f->fault_type_v];
740
741         BUG_ON(f->engine_subid_v >= ARRAY_SIZE(engine_subid_descs));
742         f->engine_subid_desc = engine_subid_descs[f->engine_subid_v];
743
744         if (f->engine_subid_v ==
745             fifo_intr_mmu_fault_info_engine_subid_hub_v()) {
746
747                 BUG_ON(f->client_v >= ARRAY_SIZE(hub_client_descs));
748                 f->client_desc = hub_client_descs[f->client_v];
749         } else if (f->engine_subid_v ==
750                    fifo_intr_mmu_fault_info_engine_subid_gpc_v()) {
751                 BUG_ON(f->client_v >= ARRAY_SIZE(gpc_client_descs));
752                 f->client_desc = gpc_client_descs[f->client_v];
753         } else {
754                 BUG_ON(1);
755         }
756
757         f->fault_hi_v = gk20a_readl(g, fifo_intr_mmu_fault_hi_r(engine_id));
758         f->fault_lo_v = gk20a_readl(g, fifo_intr_mmu_fault_lo_r(engine_id));
759         /* note:ignoring aperture on gk20a... */
760         f->inst_ptr = fifo_intr_mmu_fault_inst_ptr_v(
761                  gk20a_readl(g, fifo_intr_mmu_fault_inst_r(engine_id)));
762         /* note: inst_ptr is a 40b phys addr.  */
763         f->inst_ptr <<= fifo_intr_mmu_fault_inst_ptr_align_shift_v();
764 }
765
766 void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
767 {
768         gk20a_dbg_fn("");
769
770         if (engine_id == top_device_info_type_enum_graphics_v()) {
771                 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
772                         gk20a_pmu_disable_elpg(g);
773                         /*HALT_PIPELINE method, halt GR engine*/
774                         if (gr_gk20a_halt_pipe(g))
775                                 gk20a_err(dev_from_gk20a(g),
776                                         "failed to HALT gr pipe");
777                         /* resetting engine using mc_enable_r() is not
778                         enough, we do full init sequence */
779                         gk20a_gr_reset(g);
780                 if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
781                         gk20a_pmu_enable_elpg(g);
782         }
783         if (engine_id == top_device_info_type_enum_copy0_v())
784                 gk20a_reset(g, mc_enable_ce2_m());
785 }
786
787 static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
788 {
789         u32 intr;
790
791         intr = gk20a_readl(g, fifo_intr_chsw_error_r());
792         gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
793         gk20a_fecs_dump_falcon_stats(g);
794         gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
795 }
796
797 static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
798 {
799         struct device *dev = dev_from_gk20a(g);
800         u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
801         gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id);
802 }
803
804 static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
805                 struct fifo_mmu_fault_info_gk20a *f, bool fake_fault)
806 {
807         /* channel recovery is only deferred if an sm debugger
808            is attached and has MMU debug mode is enabled */
809         if (!gk20a_gr_sm_debugger_attached(g) ||
810             !g->ops.mm.is_debug_mode_enabled(g))
811                 return false;
812
813         /* if this fault is fake (due to RC recovery), don't defer recovery */
814         if (fake_fault)
815                 return false;
816
817         if (engine_id != ENGINE_GR_GK20A ||
818             f->engine_subid_v != fifo_intr_mmu_fault_info_engine_subid_gpc_v())
819                 return false;
820
821         return true;
822 }
823
824 /* caller must hold a channel reference */
825 static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
826                 struct channel_gk20a *ch)
827 {
828         bool verbose = true;
829         if (!ch)
830                 return verbose;
831
832         if (ch->error_notifier) {
833                 u32 err = ch->error_notifier->info32;
834                 if (ch->error_notifier->status == 0xffff) {
835                         /* If error code is already set, this mmu fault
836                          * was triggered as part of recovery from other
837                          * error condition.
838                          * Don't overwrite error flag. */
839                         /* Fifo timeout debug spew is controlled by user */
840                         if (err == NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT)
841                                 verbose = ch->timeout_debug_dump;
842                 } else {
843                         gk20a_set_error_notifier(ch,
844                                 NVGPU_CHANNEL_FIFO_ERROR_MMU_ERR_FLT);
845                 }
846         }
847         /* mark channel as faulted */
848         ch->has_timedout = true;
849         wmb();
850         /* unblock pending waits */
851         wake_up(&ch->semaphore_wq);
852         wake_up(&ch->notifier_wq);
853         wake_up(&ch->submit_wq);
854         return verbose;
855 }
856
857 static bool gk20a_fifo_set_ctx_mmu_error_ch(struct gk20a *g,
858                 struct channel_gk20a *ch)
859 {
860         gk20a_err(dev_from_gk20a(g),
861                 "channel %d generated a mmu fault", ch->hw_chid);
862
863         return gk20a_fifo_set_ctx_mmu_error(g, ch);
864 }
865
866 static bool gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
867                 struct tsg_gk20a *tsg)
868 {
869         bool ret = true;
870         struct channel_gk20a *ch = NULL;
871
872         gk20a_err(dev_from_gk20a(g),
873                 "TSG %d generated a mmu fault", tsg->tsgid);
874
875         mutex_lock(&tsg->ch_list_lock);
876         list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
877                 if (gk20a_channel_get(ch)) {
878                         if (!gk20a_fifo_set_ctx_mmu_error(g, ch))
879                                 ret = false;
880                         gk20a_channel_put(ch);
881                 }
882         }
883         mutex_unlock(&tsg->ch_list_lock);
884
885         return ret;
886 }
887
888 static void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid)
889 {
890         struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid];
891         struct channel_gk20a *ch;
892
893         mutex_lock(&tsg->ch_list_lock);
894         list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
895                 if (gk20a_channel_get(ch)) {
896                         gk20a_channel_abort(ch, false);
897                         gk20a_channel_put(ch);
898                 }
899         }
900         mutex_unlock(&tsg->ch_list_lock);
901 }
902
903 static bool gk20a_fifo_handle_mmu_fault(
904         struct gk20a *g,
905         u32 mmu_fault_engines, /* queried from HW if 0 */
906         u32 hw_id, /* queried from HW if ~(u32)0 OR mmu_fault_engines == 0*/
907         bool id_is_tsg)
908 {
909         bool fake_fault;
910         unsigned long fault_id;
911         unsigned long engine_mmu_id;
912         bool verbose = true;
913         u32 grfifo_ctl;
914
915         gk20a_dbg_fn("");
916
917         g->fifo.deferred_reset_pending = false;
918
919         /* Disable power management */
920         if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
921                 gk20a_pmu_disable_elpg(g);
922         g->ops.clock_gating.slcg_gr_load_gating_prod(g,
923                         false);
924         g->ops.clock_gating.slcg_perf_load_gating_prod(g,
925                         false);
926         g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
927                         false);
928         gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_GR_GK20A);
929         gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_CE2_GK20A);
930
931         /* Disable fifo access */
932         grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r());
933         grfifo_ctl &= ~gr_gpfifo_ctl_semaphore_access_f(1);
934         grfifo_ctl &= ~gr_gpfifo_ctl_access_f(1);
935
936         gk20a_writel(g, gr_gpfifo_ctl_r(),
937                 grfifo_ctl | gr_gpfifo_ctl_access_f(0) |
938                 gr_gpfifo_ctl_semaphore_access_f(0));
939
940         if (mmu_fault_engines) {
941                 fault_id = mmu_fault_engines;
942                 fake_fault = true;
943         } else {
944                 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
945                 fake_fault = false;
946                 gk20a_debug_dump(g->dev);
947         }
948
949
950         /* go through all faulted engines */
951         for_each_set_bit(engine_mmu_id, &fault_id, 32) {
952                 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to
953                  * engines. Convert engine_mmu_id to engine_id */
954                 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
955                 struct fifo_mmu_fault_info_gk20a f;
956                 struct channel_gk20a *ch = NULL;
957                 struct tsg_gk20a *tsg = NULL;
958                 struct channel_gk20a *referenced_channel = NULL;
959                 bool was_reset;
960                 /* read and parse engine status */
961                 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
962                 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
963                 bool ctxsw = (ctx_status ==
964                                 fifo_engine_status_ctx_status_ctxsw_switch_v()
965                                 || ctx_status ==
966                                 fifo_engine_status_ctx_status_ctxsw_save_v()
967                                 || ctx_status ==
968                                 fifo_engine_status_ctx_status_ctxsw_load_v());
969
970                 get_exception_mmu_fault_info(g, engine_mmu_id, &f);
971                 trace_gk20a_mmu_fault(f.fault_hi_v,
972                                       f.fault_lo_v,
973                                       f.fault_info_v,
974                                       f.inst_ptr,
975                                       engine_id,
976                                       f.engine_subid_desc,
977                                       f.client_desc,
978                                       f.fault_type_desc);
979                 gk20a_err(dev_from_gk20a(g), "mmu fault on engine %d, "
980                            "engine subid %d (%s), client %d (%s), "
981                            "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
982                            "inst_ptr 0x%llx\n",
983                            engine_id,
984                            f.engine_subid_v, f.engine_subid_desc,
985                            f.client_v, f.client_desc,
986                            f.fault_hi_v, f.fault_lo_v,
987                            f.fault_type_v, f.fault_type_desc,
988                            f.fault_info_v, f.inst_ptr);
989
990                 if (ctxsw) {
991                         gk20a_fecs_dump_falcon_stats(g);
992                         gk20a_err(dev_from_gk20a(g), "gr_status_r : 0x%x",
993                                         gk20a_readl(g, gr_status_r()));
994                 }
995
996                 /* get the channel/TSG */
997                 if (fake_fault) {
998                         /* use next_id if context load is failing */
999                         u32 id, type;
1000
1001                         if (hw_id == ~(u32)0) {
1002                                 id = (ctx_status ==
1003                                       fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1004                                         fifo_engine_status_next_id_v(status) :
1005                                         fifo_engine_status_id_v(status);
1006                                 type = (ctx_status ==
1007                                         fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1008                                         fifo_engine_status_next_id_type_v(status) :
1009                                         fifo_engine_status_id_type_v(status);
1010                         } else {
1011                                 id = hw_id;
1012                                 type = id_is_tsg ?
1013                                         fifo_engine_status_id_type_tsgid_v() :
1014                                         fifo_engine_status_id_type_chid_v();
1015                         }
1016
1017                         if (type == fifo_engine_status_id_type_tsgid_v())
1018                                 tsg = &g->fifo.tsg[id];
1019                         else if (type == fifo_engine_status_id_type_chid_v()) {
1020                                 ch = &g->fifo.channel[id];
1021                                 referenced_channel = gk20a_channel_get(ch);
1022                         }
1023                 } else {
1024                         /* read channel based on instruction pointer */
1025                         ch = channel_from_inst_ptr(&g->fifo, f.inst_ptr);
1026                         referenced_channel = ch;
1027                 }
1028
1029                 if (ch && gk20a_is_channel_marked_as_tsg(ch))
1030                         tsg = &g->fifo.tsg[ch->tsgid];
1031
1032                 /* check if engine reset should be deferred */
1033                 if ((ch || tsg) && gk20a_fifo_should_defer_engine_reset(g,
1034                                 engine_id, &f, fake_fault)) {
1035                         g->fifo.deferred_fault_engines = fault_id;
1036
1037                         /* handled during channel free */
1038                         g->fifo.deferred_reset_pending = true;
1039                 } else if (engine_id != ~0) {
1040                         was_reset = mutex_is_locked(&g->fifo.gr_reset_mutex);
1041                         mutex_lock(&g->fifo.gr_reset_mutex);
1042                         /* if lock is already taken, a reset is taking place
1043                         so no need to repeat */
1044                         if (!was_reset)
1045                                 gk20a_fifo_reset_engine(g, engine_id);
1046                         mutex_unlock(&g->fifo.gr_reset_mutex);
1047                 }
1048                 /* disable the channel/TSG from hw and increment
1049                  * syncpoints */
1050
1051                 if (tsg) {
1052                         if (!g->fifo.deferred_reset_pending)
1053                                 verbose =
1054                                        gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg);
1055
1056                         gk20a_fifo_abort_tsg(g, tsg->tsgid);
1057
1058                         /* put back the ref taken early above */
1059                         if (referenced_channel)
1060                                 gk20a_channel_put(ch);
1061                 } else if (ch) {
1062                         if (referenced_channel) {
1063                                 if (!g->fifo.deferred_reset_pending)
1064                                         verbose = gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
1065                                 gk20a_channel_abort(ch, false);
1066                                 gk20a_channel_put(ch);
1067                         } else {
1068                                 gk20a_err(dev_from_gk20a(g),
1069                                                 "mmu error in freed channel %d",
1070                                                 ch->hw_chid);
1071                         }
1072                 } else if (f.inst_ptr ==
1073                                 gk20a_mem_phys(&g->mm.bar1.inst_block)) {
1074                         gk20a_err(dev_from_gk20a(g), "mmu fault from bar1");
1075                 } else if (f.inst_ptr ==
1076                                 gk20a_mem_phys(&g->mm.pmu.inst_block)) {
1077                         gk20a_err(dev_from_gk20a(g), "mmu fault from pmu");
1078                 } else
1079                         gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
1080         }
1081
1082         if (g->fifo.deferred_reset_pending) {
1083                 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached,"
1084                            " deferring channel recovery to channel free");
1085                 /* clear interrupt */
1086                 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
1087                 goto exit_enable;
1088         }
1089
1090         /* clear interrupt */
1091         gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
1092
1093         /* resume scheduler */
1094         gk20a_writel(g, fifo_error_sched_disable_r(),
1095                      gk20a_readl(g, fifo_error_sched_disable_r()));
1096
1097         /* Re-enable fifo access */
1098         gk20a_writel(g, gr_gpfifo_ctl_r(),
1099                      gr_gpfifo_ctl_access_enabled_f() |
1100                      gr_gpfifo_ctl_semaphore_access_enabled_f());
1101
1102 exit_enable:
1103         /* It is safe to enable ELPG again. */
1104         if (support_gk20a_pmu(g->dev) && g->elpg_enabled)
1105                 gk20a_pmu_enable_elpg(g);
1106         return verbose;
1107 }
1108
1109 static void gk20a_fifo_get_faulty_id_type(struct gk20a *g, int engine_id,
1110                                           u32 *id, u32 *type)
1111 {
1112         u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1113         u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1114
1115         /* use next_id if context load is failing */
1116         *id = (ctx_status ==
1117                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1118                 fifo_engine_status_next_id_v(status) :
1119                 fifo_engine_status_id_v(status);
1120
1121         *type = (ctx_status ==
1122                 fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1123                 fifo_engine_status_next_id_type_v(status) :
1124                 fifo_engine_status_id_type_v(status);
1125 }
1126
1127 static void gk20a_fifo_trigger_mmu_fault(struct gk20a *g,
1128                 unsigned long engine_ids)
1129 {
1130         unsigned long end_jiffies = jiffies +
1131                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1132         unsigned long delay = GR_IDLE_CHECK_DEFAULT;
1133         unsigned long engine_id;
1134         int ret;
1135
1136         /* trigger faults for all bad engines */
1137         for_each_set_bit(engine_id, &engine_ids, 32) {
1138                 if (engine_id > g->fifo.max_engines) {
1139                         WARN_ON(true);
1140                         break;
1141                 }
1142
1143                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id),
1144                              fifo_trigger_mmu_fault_id_f(
1145                              gk20a_engine_id_to_mmu_id(engine_id)) |
1146                              fifo_trigger_mmu_fault_enable_f(1));
1147         }
1148
1149         /* Wait for MMU fault to trigger */
1150         ret = -EBUSY;
1151         do {
1152                 if (gk20a_readl(g, fifo_intr_0_r()) &
1153                                 fifo_intr_0_mmu_fault_pending_f()) {
1154                         ret = 0;
1155                         break;
1156                 }
1157
1158                 usleep_range(delay, delay * 2);
1159                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1160         } while (time_before(jiffies, end_jiffies) ||
1161                         !tegra_platform_is_silicon());
1162
1163         if (ret)
1164                 gk20a_err(dev_from_gk20a(g), "mmu fault timeout");
1165
1166         /* release mmu fault trigger */
1167         for_each_set_bit(engine_id, &engine_ids, 32)
1168                 gk20a_writel(g, fifo_trigger_mmu_fault_r(engine_id), 0);
1169 }
1170
1171 static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg)
1172 {
1173         int i;
1174         u32 engines = 0;
1175
1176         for (i = 0; i < g->fifo.max_engines; i++) {
1177                 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
1178                 u32 ctx_status =
1179                         fifo_engine_status_ctx_status_v(status);
1180                 u32 ctx_id = (ctx_status ==
1181                         fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1182                         fifo_engine_status_next_id_v(status) :
1183                         fifo_engine_status_id_v(status);
1184                 u32 type = (ctx_status ==
1185                         fifo_engine_status_ctx_status_ctxsw_load_v()) ?
1186                         fifo_engine_status_next_id_type_v(status) :
1187                         fifo_engine_status_id_type_v(status);
1188                 bool busy = fifo_engine_status_engine_v(status) ==
1189                         fifo_engine_status_engine_busy_v();
1190                 if (busy && ctx_id == id) {
1191                         if ((is_tsg && type ==
1192                                         fifo_engine_status_id_type_tsgid_v()) ||
1193                                     (!is_tsg && type ==
1194                                         fifo_engine_status_id_type_chid_v()))
1195                                 engines |= BIT(i);
1196                 }
1197         }
1198
1199         return engines;
1200 }
1201
1202 void gk20a_fifo_recover_ch(struct gk20a *g, u32 hw_chid, bool verbose)
1203 {
1204         u32 engines;
1205
1206         /* stop context switching to prevent engine assignments from
1207            changing until channel is recovered */
1208         mutex_lock(&g->dbg_sessions_lock);
1209         gr_gk20a_disable_ctxsw(g);
1210
1211         engines = gk20a_fifo_engines_on_id(g, hw_chid, false);
1212
1213         if (engines)
1214                 gk20a_fifo_recover(g, engines, hw_chid, false, true, verbose);
1215         else {
1216                 struct channel_gk20a *ch = &g->fifo.channel[hw_chid];
1217
1218                 if (gk20a_channel_get(ch)) {
1219                         gk20a_channel_abort(ch, false);
1220
1221                         if (gk20a_fifo_set_ctx_mmu_error_ch(g, ch))
1222                                 gk20a_debug_dump(g->dev);
1223
1224                         gk20a_channel_put(ch);
1225                 }
1226         }
1227
1228         gr_gk20a_enable_ctxsw(g);
1229         mutex_unlock(&g->dbg_sessions_lock);
1230 }
1231
1232 void gk20a_fifo_recover_tsg(struct gk20a *g, u32 tsgid, bool verbose)
1233 {
1234         u32 engines;
1235
1236         /* stop context switching to prevent engine assignments from
1237            changing until TSG is recovered */
1238         mutex_lock(&g->dbg_sessions_lock);
1239         gr_gk20a_disable_ctxsw(g);
1240
1241         engines = gk20a_fifo_engines_on_id(g, tsgid, true);
1242
1243         if (engines)
1244                 gk20a_fifo_recover(g, engines, tsgid, true, true, verbose);
1245         else {
1246                 struct tsg_gk20a *tsg = &g->fifo.tsg[tsgid];
1247
1248                 if (gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg))
1249                         gk20a_debug_dump(g->dev);
1250
1251                 gk20a_fifo_abort_tsg(g, tsgid);
1252         }
1253
1254         gr_gk20a_enable_ctxsw(g);
1255         mutex_unlock(&g->dbg_sessions_lock);
1256 }
1257
1258 void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1259                         u32 hw_id, bool id_is_tsg,
1260                         bool id_is_known, bool verbose)
1261 {
1262         unsigned long engine_id, i;
1263         unsigned long _engine_ids = __engine_ids;
1264         unsigned long engine_ids = 0;
1265         u32 val;
1266         u32 mmu_fault_engines = 0;
1267         u32 ref_type;
1268         u32 ref_id;
1269         u32 ref_id_is_tsg = false;
1270
1271         if (verbose)
1272                 gk20a_debug_dump(g->dev);
1273
1274         if (g->ops.ltc.flush)
1275                 g->ops.ltc.flush(g);
1276
1277         if (id_is_known) {
1278                 engine_ids = gk20a_fifo_engines_on_id(g, hw_id, id_is_tsg);
1279                 ref_id = hw_id;
1280                 ref_type = id_is_tsg ?
1281                         fifo_engine_status_id_type_tsgid_v() :
1282                         fifo_engine_status_id_type_chid_v();
1283                 ref_id_is_tsg = id_is_tsg;
1284                 /* atleast one engine will get passed during sched err*/
1285                 engine_ids |= __engine_ids;
1286                 for_each_set_bit(engine_id, &engine_ids, 32) {
1287                         mmu_fault_engines |=
1288                                 BIT(gk20a_engine_id_to_mmu_id(engine_id));
1289                 }
1290         } else {
1291                 /* store faulted engines in advance */
1292                 for_each_set_bit(engine_id, &_engine_ids, 32) {
1293                         gk20a_fifo_get_faulty_id_type(g, engine_id, &ref_id,
1294                                                       &ref_type);
1295                         if (ref_type == fifo_engine_status_id_type_tsgid_v())
1296                                 ref_id_is_tsg = true;
1297                         else
1298                                 ref_id_is_tsg = false;
1299                         /* Reset *all* engines that use the
1300                          * same channel as faulty engine */
1301                         for (i = 0; i < g->fifo.max_engines; i++) {
1302                                 u32 type;
1303                                 u32 id;
1304                                 gk20a_fifo_get_faulty_id_type(g, i, &id, &type);
1305                                 if (ref_type == type && ref_id == id) {
1306                                         engine_ids |= BIT(i);
1307                                         mmu_fault_engines |=
1308                                         BIT(gk20a_engine_id_to_mmu_id(i));
1309                                 }
1310                         }
1311                 }
1312         }
1313
1314         if (mmu_fault_engines) {
1315                 /*
1316                  * sched error prevents recovery, and ctxsw error will retrigger
1317                  * every 100ms. Disable the sched error to allow recovery.
1318                  */
1319                 val = gk20a_readl(g, fifo_intr_en_0_r());
1320                 val &= ~(fifo_intr_en_0_sched_error_m() |
1321                         fifo_intr_en_0_mmu_fault_m());
1322                 gk20a_writel(g, fifo_intr_en_0_r(), val);
1323                 gk20a_writel(g, fifo_intr_0_r(),
1324                                 fifo_intr_0_sched_error_reset_f());
1325
1326                 g->ops.fifo.trigger_mmu_fault(g, engine_ids);
1327                 gk20a_fifo_handle_mmu_fault(g, mmu_fault_engines, ref_id,
1328                                 ref_id_is_tsg);
1329
1330                 val = gk20a_readl(g, fifo_intr_en_0_r());
1331                 val |= fifo_intr_en_0_mmu_fault_f(1)
1332                         | fifo_intr_en_0_sched_error_f(1);
1333                 gk20a_writel(g, fifo_intr_en_0_r(), val);
1334         }
1335 }
1336
1337 /* force reset channel and tsg (if it's part of one) */
1338 int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch, bool verbose)
1339 {
1340         struct tsg_gk20a *tsg = NULL;
1341         struct channel_gk20a *ch_tsg = NULL;
1342         struct gk20a *g = ch->g;
1343
1344         if (gk20a_is_channel_marked_as_tsg(ch)) {
1345                 tsg = &g->fifo.tsg[ch->hw_chid];
1346
1347                 mutex_lock(&tsg->ch_list_lock);
1348
1349                 list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
1350                         if (gk20a_channel_get(ch_tsg)) {
1351                                 gk20a_set_error_notifier(ch_tsg,
1352                                        NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1353                                 gk20a_channel_put(ch_tsg);
1354                         }
1355                 }
1356
1357                 mutex_unlock(&tsg->ch_list_lock);
1358                 gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
1359         } else {
1360                 gk20a_set_error_notifier(ch,
1361                         NVGPU_CHANNEL_RESETCHANNEL_VERIF_ERROR);
1362                 gk20a_fifo_recover_ch(g, ch->hw_chid, verbose);
1363         }
1364
1365         return 0;
1366 }
1367
1368 static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
1369 {
1370         u32 sched_error;
1371         u32 engine_id;
1372         int id = -1;
1373         bool non_chid = false;
1374         bool ret = false;
1375         u32 mailbox2;
1376         /* read the scheduler error register */
1377         sched_error = gk20a_readl(g, fifo_intr_sched_error_r());
1378
1379         for (engine_id = 0; engine_id < g->fifo.max_engines; engine_id++) {
1380                 u32 status = gk20a_readl(g, fifo_engine_status_r(engine_id));
1381                 u32 ctx_status = fifo_engine_status_ctx_status_v(status);
1382                 bool failing_engine;
1383
1384                 /* we are interested in busy engines */
1385                 failing_engine = fifo_engine_status_engine_v(status) ==
1386                         fifo_engine_status_engine_busy_v();
1387
1388                 /* ..that are doing context switch */
1389                 failing_engine = failing_engine &&
1390                         (ctx_status ==
1391                                 fifo_engine_status_ctx_status_ctxsw_switch_v()
1392                         || ctx_status ==
1393                                 fifo_engine_status_ctx_status_ctxsw_save_v()
1394                         || ctx_status ==
1395                                 fifo_engine_status_ctx_status_ctxsw_load_v());
1396
1397                 if (!failing_engine)
1398                         continue;
1399                 if (ctx_status ==
1400                 fifo_engine_status_ctx_status_ctxsw_load_v()) {
1401                         id = fifo_engine_status_next_id_v(status);
1402                         non_chid = fifo_pbdma_status_id_type_v(status)
1403                                 != fifo_pbdma_status_id_type_chid_v();
1404                 } else if (ctx_status ==
1405                 fifo_engine_status_ctx_status_ctxsw_switch_v()) {
1406                         mailbox2 = gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(2));
1407                         if (mailbox2 & FECS_METHOD_WFI_RESTORE)
1408                                 id = fifo_engine_status_next_id_v(status);
1409                         else
1410                                 id = fifo_engine_status_id_v(status);
1411                 } else {
1412                         id = fifo_engine_status_id_v(status);
1413                 }
1414                 break;
1415         }
1416
1417         /* could not find the engine - should never happen */
1418         if (unlikely(engine_id >= g->fifo.max_engines)) {
1419                 gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, failed to find engine\n",
1420                         sched_error);
1421                 ret = false;
1422                 goto err;
1423         }
1424
1425         if (fifo_intr_sched_error_code_f(sched_error) ==
1426                         fifo_intr_sched_error_code_ctxsw_timeout_v()) {
1427                 struct fifo_gk20a *f = &g->fifo;
1428                 struct channel_gk20a *ch = &f->channel[id];
1429
1430                 if (non_chid) {
1431                         gk20a_fifo_recover(g, BIT(engine_id), id, true,
1432                                         true, true);
1433                         ret = true;
1434                         goto err;
1435                 }
1436
1437                 if (!gk20a_channel_get(ch))
1438                         goto err;
1439
1440                 if (gk20a_channel_update_and_check_timeout(ch,
1441                         GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
1442                         gk20a_set_error_notifier(ch,
1443                                 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1444                         gk20a_err(dev_from_gk20a(g),
1445                                 "fifo sched ctxsw timeout error:"
1446                                 "engine = %u, ch = %d", engine_id, id);
1447                         gk20a_gr_debug_dump(g->dev);
1448                         gk20a_fifo_recover(g, BIT(engine_id), id, false,
1449                                 true, ch->timeout_debug_dump);
1450                         ret = true;
1451                 } else {
1452                         gk20a_dbg_info(
1453                                 "fifo is waiting for ctx switch for %d ms,"
1454                                 "ch = %d\n",
1455                                 ch->timeout_accumulated_ms,
1456                                 id);
1457                         ret = false;
1458                 }
1459                 gk20a_channel_put(ch);
1460                 return ret;
1461         }
1462
1463         gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
1464                 sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
1465
1466 err:
1467         return ret;
1468 }
1469
1470 static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
1471 {
1472         bool print_channel_reset_log = false;
1473         struct device *dev = dev_from_gk20a(g);
1474         u32 handled = 0;
1475
1476         gk20a_dbg_fn("");
1477
1478         if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
1479                 /* pio mode is unused.  this shouldn't happen, ever. */
1480                 /* should we clear it or just leave it pending? */
1481                 gk20a_err(dev, "fifo pio error!\n");
1482                 BUG_ON(1);
1483         }
1484
1485         if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
1486                 u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
1487                 gk20a_err(dev, "fifo bind error: 0x%08x", bind_error);
1488                 print_channel_reset_log = true;
1489                 handled |= fifo_intr_0_bind_error_pending_f();
1490         }
1491
1492         if (fifo_intr & fifo_intr_0_sched_error_pending_f()) {
1493                 print_channel_reset_log = gk20a_fifo_handle_sched_error(g);
1494                 handled |= fifo_intr_0_sched_error_pending_f();
1495         }
1496
1497         if (fifo_intr & fifo_intr_0_chsw_error_pending_f()) {
1498                 gk20a_fifo_handle_chsw_fault(g);
1499                 handled |= fifo_intr_0_chsw_error_pending_f();
1500         }
1501
1502         if (fifo_intr & fifo_intr_0_mmu_fault_pending_f()) {
1503                 print_channel_reset_log =
1504                         gk20a_fifo_handle_mmu_fault(g, 0, ~(u32)0, false);
1505                 handled |= fifo_intr_0_mmu_fault_pending_f();
1506         }
1507
1508         if (fifo_intr & fifo_intr_0_dropped_mmu_fault_pending_f()) {
1509                 gk20a_fifo_handle_dropped_mmu_fault(g);
1510                 handled |= fifo_intr_0_dropped_mmu_fault_pending_f();
1511         }
1512
1513         print_channel_reset_log = !g->fifo.deferred_reset_pending
1514                         && print_channel_reset_log;
1515
1516         if (print_channel_reset_log) {
1517                 int engine_id;
1518                 gk20a_err(dev_from_gk20a(g),
1519                            "channel reset initiated from %s; intr=0x%08x",
1520                            __func__, fifo_intr);
1521                 for (engine_id = 0;
1522                      engine_id < g->fifo.max_engines;
1523                      engine_id++) {
1524                         gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_id,
1525                                 g->fifo.engine_info[engine_id].engine_id);
1526                         fifo_pbdma_exception_status(g,
1527                                         &g->fifo.engine_info[engine_id]);
1528                         fifo_engine_exception_status(g,
1529                                         &g->fifo.engine_info[engine_id]);
1530                 }
1531         }
1532
1533         return handled;
1534 }
1535
1536 static inline void gk20a_fifo_reset_pbdma_header(struct gk20a *g, int pbdma_id)
1537 {
1538         gk20a_writel(g, pbdma_pb_header_r(pbdma_id),
1539                         pbdma_pb_header_first_true_f() |
1540                         pbdma_pb_header_type_non_inc_f());
1541 }
1542
1543 static inline void gk20a_fifo_reset_pbdma_method(struct gk20a *g, int pbdma_id,
1544                                                 int pbdma_method_index)
1545 {
1546         u32 pbdma_method_stride;
1547         u32 pbdma_method_reg;
1548
1549         pbdma_method_stride = pbdma_method1_r(pbdma_id) -
1550                                 pbdma_method0_r(pbdma_id);
1551
1552         pbdma_method_reg = pbdma_method0_r(pbdma_id) +
1553                 (pbdma_method_index * pbdma_method_stride);
1554
1555         gk20a_writel(g, pbdma_method_reg,
1556                         pbdma_method0_valid_true_f() |
1557                         pbdma_method0_first_true_f() |
1558                         pbdma_method0_addr_f(
1559                              pbdma_udma_nop_r() >> 2));
1560 }
1561
1562 static bool gk20a_fifo_is_sw_method_subch(struct gk20a *g, int pbdma_id,
1563                                                 int pbdma_method_index)
1564 {
1565         u32 pbdma_method_stride;
1566         u32 pbdma_method_reg, pbdma_method_subch;
1567
1568         pbdma_method_stride = pbdma_method1_r(pbdma_id) -
1569                                 pbdma_method0_r(pbdma_id);
1570
1571         pbdma_method_reg = pbdma_method0_r(pbdma_id) +
1572                         (pbdma_method_index * pbdma_method_stride);
1573
1574         pbdma_method_subch = pbdma_method0_subch_v(
1575                         gk20a_readl(g, pbdma_method_reg));
1576
1577         if (pbdma_method_subch == 5 || pbdma_method_subch == 6 ||
1578                                        pbdma_method_subch == 7)
1579                 return true;
1580
1581         return false;
1582 }
1583
1584 static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
1585                                         struct gk20a *g,
1586                                         struct fifo_gk20a *f,
1587                                         u32 pbdma_id)
1588 {
1589         u32 pbdma_intr_0 = gk20a_readl(g, pbdma_intr_0_r(pbdma_id));
1590         u32 pbdma_intr_1 = gk20a_readl(g, pbdma_intr_1_r(pbdma_id));
1591         u32 handled = 0;
1592         bool reset = false;
1593         int i;
1594
1595         gk20a_dbg_fn("");
1596
1597         gk20a_dbg(gpu_dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
1598                         pbdma_intr_0, pbdma_intr_1);
1599         if (pbdma_intr_0) {
1600                 if ((f->intr.pbdma.device_fatal_0 |
1601                      f->intr.pbdma.channel_fatal_0 |
1602                      f->intr.pbdma.restartable_0) & pbdma_intr_0) {
1603                         gk20a_err(dev_from_gk20a(g),
1604                                 "pbdma_intr_0(%d):0x%08x PBH: %08x SHADOW: %08x M0: %08x %08x %08x %08x",
1605                                 pbdma_id, pbdma_intr_0,
1606                                 gk20a_readl(g, pbdma_pb_header_r(pbdma_id)),
1607                                 gk20a_readl(g, pbdma_hdr_shadow_r(pbdma_id)),
1608                                 gk20a_readl(g, pbdma_method0_r(pbdma_id)),
1609                                 gk20a_readl(g, pbdma_method1_r(pbdma_id)),
1610                                 gk20a_readl(g, pbdma_method2_r(pbdma_id)),
1611                                 gk20a_readl(g, pbdma_method3_r(pbdma_id))
1612                                 );
1613                         reset = true;
1614                         handled |= ((f->intr.pbdma.device_fatal_0 |
1615                                      f->intr.pbdma.channel_fatal_0 |
1616                                      f->intr.pbdma.restartable_0) &
1617                                     pbdma_intr_0);
1618                 }
1619
1620                 if (pbdma_intr_0 & pbdma_intr_0_pbentry_pending_f()) {
1621                         gk20a_fifo_reset_pbdma_header(g, pbdma_id);
1622                         gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1623                         reset = true;
1624                 }
1625
1626                 if (pbdma_intr_0 & pbdma_intr_0_method_pending_f()) {
1627                         gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0);
1628                         reset = true;
1629                 }
1630
1631                 if (pbdma_intr_0 & pbdma_intr_0_device_pending_f()) {
1632                         gk20a_fifo_reset_pbdma_header(g, pbdma_id);
1633
1634                         for (i = 0; i < 4; i++) {
1635                                 if (gk20a_fifo_is_sw_method_subch(g,
1636                                                 pbdma_id, i))
1637                                         gk20a_fifo_reset_pbdma_method(g,
1638                                                         pbdma_id, i);
1639                         }
1640                         reset = true;
1641                 }
1642
1643                 gk20a_writel(g, pbdma_intr_0_r(pbdma_id), pbdma_intr_0);
1644         }
1645
1646         /* all intrs in _intr_1 are "host copy engine" related,
1647          * which gk20a doesn't have. for now just make them channel fatal. */
1648         if (pbdma_intr_1) {
1649                 dev_err(dev, "channel hce error: pbdma_intr_1(%d): 0x%08x",
1650                         pbdma_id, pbdma_intr_1);
1651                 reset = true;
1652                 gk20a_writel(g, pbdma_intr_1_r(pbdma_id), pbdma_intr_1);
1653         }
1654
1655         if (reset) {
1656                 /* Remove the channel from runlist */
1657                 u32 status = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id));
1658                 u32 id = fifo_pbdma_status_id_v(status);
1659                 if (fifo_pbdma_status_id_type_v(status)
1660                                 == fifo_pbdma_status_id_type_chid_v()) {
1661                         struct channel_gk20a *ch = &f->channel[id];
1662
1663                         if (gk20a_channel_get(ch)) {
1664                                 gk20a_set_error_notifier(ch,
1665                                                 NVGPU_CHANNEL_PBDMA_ERROR);
1666                                 gk20a_fifo_recover_ch(g, id, true);
1667                                 gk20a_channel_put(ch);
1668                         }
1669                 } else if (fifo_pbdma_status_id_type_v(status)
1670                                 == fifo_pbdma_status_id_type_tsgid_v()) {
1671                         struct tsg_gk20a *tsg = &f->tsg[id];
1672                         struct channel_gk20a *ch = NULL;
1673
1674                         mutex_lock(&tsg->ch_list_lock);
1675                         list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1676                                 if (gk20a_channel_get(ch)) {
1677                                         gk20a_set_error_notifier(ch,
1678                                                 NVGPU_CHANNEL_PBDMA_ERROR);
1679                                         gk20a_channel_put(ch);
1680                                 }
1681                         }
1682                         mutex_unlock(&tsg->ch_list_lock);
1683                         gk20a_fifo_recover_tsg(g, id, true);
1684                 }
1685         }
1686
1687         return handled;
1688 }
1689
1690 static u32 fifo_channel_isr(struct gk20a *g, u32 fifo_intr)
1691 {
1692         gk20a_channel_semaphore_wakeup(g);
1693         return fifo_intr_0_channel_intr_pending_f();
1694 }
1695
1696
1697 static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
1698 {
1699         struct device *dev = dev_from_gk20a(g);
1700         struct fifo_gk20a *f = &g->fifo;
1701         u32 clear_intr = 0, i;
1702         u32 pbdma_pending = gk20a_readl(g, fifo_intr_pbdma_id_r());
1703
1704         for (i = 0; i < fifo_intr_pbdma_id_status__size_1_v(); i++) {
1705                 if (fifo_intr_pbdma_id_status_f(pbdma_pending, i)) {
1706                         gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
1707                         clear_intr |=
1708                                 gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
1709                 }
1710         }
1711         return fifo_intr_0_pbdma_intr_pending_f();
1712 }
1713
1714 void gk20a_fifo_isr(struct gk20a *g)
1715 {
1716         u32 error_intr_mask =
1717                 fifo_intr_0_bind_error_pending_f() |
1718                 fifo_intr_0_sched_error_pending_f() |
1719                 fifo_intr_0_chsw_error_pending_f() |
1720                 fifo_intr_0_fb_flush_timeout_pending_f() |
1721                 fifo_intr_0_dropped_mmu_fault_pending_f() |
1722                 fifo_intr_0_mmu_fault_pending_f() |
1723                 fifo_intr_0_lb_error_pending_f() |
1724                 fifo_intr_0_pio_error_pending_f();
1725
1726         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1727         u32 clear_intr = 0;
1728
1729         if (g->fifo.sw_ready) {
1730                 /* note we're not actually in an "isr", but rather
1731                  * in a threaded interrupt context... */
1732                 mutex_lock(&g->fifo.intr.isr.mutex);
1733
1734                 gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
1735
1736                 /* handle runlist update */
1737                 if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
1738                         gk20a_fifo_handle_runlist_event(g);
1739                         clear_intr |= fifo_intr_0_runlist_event_pending_f();
1740                 }
1741                 if (fifo_intr & fifo_intr_0_pbdma_intr_pending_f())
1742                         clear_intr |= fifo_pbdma_isr(g, fifo_intr);
1743
1744                 if (unlikely(fifo_intr & error_intr_mask))
1745                         clear_intr = fifo_error_isr(g, fifo_intr);
1746
1747                 mutex_unlock(&g->fifo.intr.isr.mutex);
1748         }
1749         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1750
1751         return;
1752 }
1753
1754 void gk20a_fifo_nonstall_isr(struct gk20a *g)
1755 {
1756         u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
1757         u32 clear_intr = 0;
1758
1759         gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
1760
1761         if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
1762                 clear_intr |= fifo_channel_isr(g, fifo_intr);
1763
1764         gk20a_writel(g, fifo_intr_0_r(), clear_intr);
1765
1766         return;
1767 }
1768
1769 static int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg)
1770 {
1771         u32 delay = GR_IDLE_CHECK_DEFAULT;
1772         unsigned long end_jiffies = jiffies
1773                 + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
1774         u32 ret = 0;
1775
1776         gk20a_dbg_fn("%d", id);
1777
1778         /* issue preempt */
1779         if (is_tsg)
1780                 gk20a_writel(g, fifo_preempt_r(),
1781                         fifo_preempt_id_f(id) |
1782                         fifo_preempt_type_tsg_f());
1783         else
1784                 gk20a_writel(g, fifo_preempt_r(),
1785                         fifo_preempt_chid_f(id) |
1786                         fifo_preempt_type_channel_f());
1787
1788         gk20a_dbg_fn("%d", id);
1789         /* wait for preempt */
1790         ret = -EBUSY;
1791         do {
1792                 if (!(gk20a_readl(g, fifo_preempt_r()) &
1793                         fifo_preempt_pending_true_f())) {
1794                         ret = 0;
1795                         break;
1796                 }
1797
1798                 usleep_range(delay, delay * 2);
1799                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
1800         } while (time_before(jiffies, end_jiffies) ||
1801                         !tegra_platform_is_silicon());
1802
1803         gk20a_dbg_fn("%d", id);
1804         if (ret) {
1805                 if (is_tsg) {
1806                         struct tsg_gk20a *tsg = &g->fifo.tsg[id];
1807                         struct channel_gk20a *ch = NULL;
1808
1809                         gk20a_err(dev_from_gk20a(g),
1810                                 "preempt TSG %d timeout\n", id);
1811
1812                         mutex_lock(&tsg->ch_list_lock);
1813                         list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
1814                                 if (!gk20a_channel_get(ch))
1815                                         continue;
1816                                 gk20a_set_error_notifier(ch,
1817                                         NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1818                                 gk20a_channel_put(ch);
1819                         }
1820                         mutex_unlock(&tsg->ch_list_lock);
1821                         gk20a_fifo_recover_tsg(g, id, true);
1822                 } else {
1823                         struct channel_gk20a *ch = &g->fifo.channel[id];
1824
1825                         gk20a_err(dev_from_gk20a(g),
1826                                 "preempt channel %d timeout\n", id);
1827
1828                         if (gk20a_channel_get(ch)) {
1829                                 gk20a_set_error_notifier(ch,
1830                                                 NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
1831                                 gk20a_fifo_recover_ch(g, id, true);
1832                                 gk20a_channel_put(ch);
1833                         }
1834                 }
1835         }
1836
1837         return ret;
1838 }
1839
1840 int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
1841 {
1842         struct fifo_gk20a *f = &g->fifo;
1843         u32 ret = 0;
1844         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1845         u32 mutex_ret = 0;
1846         u32 i;
1847
1848         gk20a_dbg_fn("%d", hw_chid);
1849
1850         /* we have no idea which runlist we are using. lock all */
1851         for (i = 0; i < g->fifo.max_runlists; i++)
1852                 mutex_lock(&f->runlist_info[i].mutex);
1853
1854         mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1855
1856         ret = __locked_fifo_preempt(g, hw_chid, false);
1857
1858         if (!mutex_ret)
1859                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1860
1861         for (i = 0; i < g->fifo.max_runlists; i++)
1862                 mutex_unlock(&f->runlist_info[i].mutex);
1863
1864         return ret;
1865 }
1866
1867 int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
1868 {
1869         struct fifo_gk20a *f = &g->fifo;
1870         u32 ret = 0;
1871         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1872         u32 mutex_ret = 0;
1873         u32 i;
1874
1875         gk20a_dbg_fn("%d", tsgid);
1876
1877         /* we have no idea which runlist we are using. lock all */
1878         for (i = 0; i < g->fifo.max_runlists; i++)
1879                 mutex_lock(&f->runlist_info[i].mutex);
1880
1881         mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1882
1883         ret = __locked_fifo_preempt(g, tsgid, true);
1884
1885         if (!mutex_ret)
1886                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1887
1888         for (i = 0; i < g->fifo.max_runlists; i++)
1889                 mutex_unlock(&f->runlist_info[i].mutex);
1890
1891         return ret;
1892 }
1893
1894 int gk20a_fifo_preempt(struct gk20a *g, struct channel_gk20a *ch)
1895 {
1896         int err;
1897
1898         if (gk20a_is_channel_marked_as_tsg(ch))
1899                 err = gk20a_fifo_preempt_tsg(ch->g, ch->tsgid);
1900         else
1901                 err = gk20a_fifo_preempt_channel(ch->g, ch->hw_chid);
1902
1903         return err;
1904 }
1905
1906 int gk20a_fifo_enable_engine_activity(struct gk20a *g,
1907                                 struct fifo_engine_info_gk20a *eng_info)
1908 {
1909         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1910         u32 mutex_ret;
1911         u32 enable;
1912
1913         gk20a_dbg_fn("");
1914
1915         mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1916
1917         enable = gk20a_readl(g, fifo_sched_disable_r());
1918         enable &= ~(fifo_sched_disable_true_v() >> eng_info->runlist_id);
1919         gk20a_writel(g, fifo_sched_disable_r(), enable);
1920
1921         if (!mutex_ret)
1922                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1923
1924         gk20a_dbg_fn("done");
1925         return 0;
1926 }
1927
1928 int gk20a_fifo_enable_all_engine_activity(struct gk20a *g)
1929 {
1930         int i;
1931         int err = 0, ret = 0;
1932
1933         for (i = 0; i < g->fifo.max_engines; i++) {
1934                 err = gk20a_fifo_enable_engine_activity(g,
1935                                 &g->fifo.engine_info[i]);
1936                 if (err) {
1937                         gk20a_err(dev_from_gk20a(g),
1938                                 "failed to enable engine %d activity\n", i);
1939                         ret = err;
1940                 }
1941         }
1942
1943         return ret;
1944 }
1945
1946 int gk20a_fifo_disable_engine_activity(struct gk20a *g,
1947                                 struct fifo_engine_info_gk20a *eng_info,
1948                                 bool wait_for_idle)
1949 {
1950         u32 gr_stat, pbdma_stat, chan_stat, eng_stat, ctx_stat;
1951         u32 pbdma_chid = ~0, engine_chid = ~0, disable;
1952         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
1953         u32 mutex_ret;
1954         u32 err = 0;
1955
1956         gk20a_dbg_fn("");
1957
1958         gr_stat =
1959                 gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1960         if (fifo_engine_status_engine_v(gr_stat) ==
1961             fifo_engine_status_engine_busy_v() && !wait_for_idle)
1962                 return -EBUSY;
1963
1964         mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
1965
1966         disable = gk20a_readl(g, fifo_sched_disable_r());
1967         disable = set_field(disable,
1968                         fifo_sched_disable_runlist_m(eng_info->runlist_id),
1969                         fifo_sched_disable_runlist_f(fifo_sched_disable_true_v(),
1970                                 eng_info->runlist_id));
1971         gk20a_writel(g, fifo_sched_disable_r(), disable);
1972
1973         /* chid from pbdma status */
1974         pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(eng_info->pbdma_id));
1975         chan_stat  = fifo_pbdma_status_chan_status_v(pbdma_stat);
1976         if (chan_stat == fifo_pbdma_status_chan_status_valid_v() ||
1977             chan_stat == fifo_pbdma_status_chan_status_chsw_save_v())
1978                 pbdma_chid = fifo_pbdma_status_id_v(pbdma_stat);
1979         else if (chan_stat == fifo_pbdma_status_chan_status_chsw_load_v() ||
1980                  chan_stat == fifo_pbdma_status_chan_status_chsw_switch_v())
1981                 pbdma_chid = fifo_pbdma_status_next_id_v(pbdma_stat);
1982
1983         if (pbdma_chid != ~0) {
1984                 err = g->ops.fifo.preempt_channel(g, pbdma_chid);
1985                 if (err)
1986                         goto clean_up;
1987         }
1988
1989         /* chid from engine status */
1990         eng_stat = gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
1991         ctx_stat  = fifo_engine_status_ctx_status_v(eng_stat);
1992         if (ctx_stat == fifo_engine_status_ctx_status_valid_v() ||
1993             ctx_stat == fifo_engine_status_ctx_status_ctxsw_save_v())
1994                 engine_chid = fifo_engine_status_id_v(eng_stat);
1995         else if (ctx_stat == fifo_engine_status_ctx_status_ctxsw_load_v() ||
1996                  ctx_stat == fifo_engine_status_ctx_status_ctxsw_switch_v())
1997                 engine_chid = fifo_engine_status_next_id_v(eng_stat);
1998
1999         if (engine_chid != ~0 && engine_chid != pbdma_chid) {
2000                 err = g->ops.fifo.preempt_channel(g, engine_chid);
2001                 if (err)
2002                         goto clean_up;
2003         }
2004
2005 clean_up:
2006         if (!mutex_ret)
2007                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2008
2009         if (err) {
2010                 gk20a_dbg_fn("failed");
2011                 if (gk20a_fifo_enable_engine_activity(g, eng_info))
2012                         gk20a_err(dev_from_gk20a(g),
2013                                 "failed to enable gr engine activity\n");
2014         } else {
2015                 gk20a_dbg_fn("done");
2016         }
2017         return err;
2018 }
2019
2020 int gk20a_fifo_disable_all_engine_activity(struct gk20a *g,
2021                                 bool wait_for_idle)
2022 {
2023         int i;
2024         int err = 0, ret = 0;
2025
2026         for (i = 0; i < g->fifo.max_engines; i++) {
2027                 err = gk20a_fifo_disable_engine_activity(g,
2028                                 &g->fifo.engine_info[i],
2029                                 wait_for_idle);
2030                 if (err) {
2031                         gk20a_err(dev_from_gk20a(g),
2032                                 "failed to disable engine %d activity\n", i);
2033                         ret = err;
2034                         break;
2035                 }
2036         }
2037
2038         if (err) {
2039                 while (--i >= 0) {
2040                         err = gk20a_fifo_enable_engine_activity(g,
2041                                                 &g->fifo.engine_info[i]);
2042                         if (err)
2043                                 gk20a_err(dev_from_gk20a(g),
2044                                  "failed to re-enable engine %d activity\n", i);
2045                 }
2046         }
2047
2048         return ret;
2049 }
2050
2051 static void gk20a_fifo_runlist_reset_engines(struct gk20a *g, u32 runlist_id)
2052 {
2053         struct fifo_gk20a *f = &g->fifo;
2054         u32 engines = 0;
2055         int i;
2056
2057         for (i = 0; i < f->max_engines; i++) {
2058                 u32 status = gk20a_readl(g, fifo_engine_status_r(i));
2059                 bool engine_busy = fifo_engine_status_engine_v(status) ==
2060                         fifo_engine_status_engine_busy_v();
2061
2062                 if (engine_busy &&
2063                     (f->engine_info[i].runlist_id == runlist_id))
2064                         engines |= BIT(i);
2065         }
2066
2067         if (engines)
2068                 gk20a_fifo_recover(g, engines, ~(u32)0, false, false, true);
2069 }
2070
2071 static int gk20a_fifo_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
2072 {
2073         struct fifo_runlist_info_gk20a *runlist;
2074         unsigned long end_jiffies = jiffies +
2075                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
2076         unsigned long delay = GR_IDLE_CHECK_DEFAULT;
2077         int ret = -ETIMEDOUT;
2078
2079         runlist = &g->fifo.runlist_info[runlist_id];
2080         do {
2081                 if ((gk20a_readl(g, fifo_eng_runlist_r(runlist_id)) &
2082                                 fifo_eng_runlist_pending_true_f()) == 0) {
2083                         ret = 0;
2084                         break;
2085                 }
2086
2087                 usleep_range(delay, delay * 2);
2088                 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
2089         } while (time_before(jiffies, end_jiffies) ||
2090                  !tegra_platform_is_silicon());
2091
2092         return ret;
2093 }
2094
2095 static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
2096                                             u32 hw_chid, bool add,
2097                                             bool wait_for_finish)
2098 {
2099         u32 ret = 0;
2100         struct fifo_gk20a *f = &g->fifo;
2101         struct fifo_runlist_info_gk20a *runlist = NULL;
2102         u32 *runlist_entry_base = NULL;
2103         u32 *runlist_entry = NULL;
2104         phys_addr_t runlist_pa;
2105         u32 old_buf, new_buf;
2106         u32 chid, tsgid;
2107         struct channel_gk20a *ch = NULL;
2108         struct tsg_gk20a *tsg = NULL;
2109         u32 count = 0;
2110         u32 count_channels_in_tsg;
2111         runlist = &f->runlist_info[runlist_id];
2112
2113         /* valid channel, add/remove it from active list.
2114            Otherwise, keep active list untouched for suspend/resume. */
2115         if (hw_chid != ~0) {
2116                 ch = &f->channel[hw_chid];
2117                 if (gk20a_is_channel_marked_as_tsg(ch))
2118                         tsg = &f->tsg[ch->tsgid];
2119
2120                 if (add) {
2121                         if (test_and_set_bit(hw_chid,
2122                                 runlist->active_channels) == 1)
2123                                 return 0;
2124                         if (tsg && ++tsg->num_active_channels)
2125                                 set_bit(f->channel[hw_chid].tsgid,
2126                                         runlist->active_tsgs);
2127                 } else {
2128                         if (test_and_clear_bit(hw_chid,
2129                                 runlist->active_channels) == 0)
2130                                 return 0;
2131                         if (tsg && --tsg->num_active_channels == 0)
2132                                 clear_bit(f->channel[hw_chid].tsgid,
2133                                         runlist->active_tsgs);
2134                 }
2135         }
2136
2137         old_buf = runlist->cur_buffer;
2138         new_buf = !runlist->cur_buffer;
2139
2140         gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
2141                 runlist_id, (u64)gk20a_mem_phys(&runlist->mem[new_buf]));
2142
2143         runlist_pa = gk20a_mem_phys(&runlist->mem[new_buf]);
2144         if (!runlist_pa) {
2145                 ret = -EINVAL;
2146                 goto clean_up;
2147         }
2148
2149         runlist_entry_base = runlist->mem[new_buf].cpu_va;
2150         if (!runlist_entry_base) {
2151                 ret = -ENOMEM;
2152                 goto clean_up;
2153         }
2154
2155         if (hw_chid != ~0 || /* add/remove a valid channel */
2156             add /* resume to add all channels back */) {
2157                 runlist_entry = runlist_entry_base;
2158
2159                 /* add non-TSG channels first */
2160                 for_each_set_bit(chid,
2161                         runlist->active_channels, f->num_channels) {
2162                         ch = &f->channel[chid];
2163
2164                         if (!gk20a_is_channel_marked_as_tsg(ch)) {
2165                                 gk20a_dbg_info("add channel %d to runlist",
2166                                         chid);
2167                                 runlist_entry[0] = ram_rl_entry_chid_f(chid);
2168                                 runlist_entry[1] = 0;
2169                                 runlist_entry += 2;
2170                                 count++;
2171                         }
2172                 }
2173
2174                 /* now add TSG entries and channels bound to TSG */
2175                 mutex_lock(&f->tsg_inuse_mutex);
2176                 for_each_set_bit(tsgid,
2177                                 runlist->active_tsgs, f->num_channels) {
2178                         tsg = &f->tsg[tsgid];
2179                         /* add TSG entry */
2180                         gk20a_dbg_info("add TSG %d to runlist", tsg->tsgid);
2181                         runlist_entry[0] = ram_rl_entry_id_f(tsg->tsgid) |
2182                                 ram_rl_entry_type_tsg_f() |
2183                                 ram_rl_entry_timeslice_scale_3_f() |
2184                                 ram_rl_entry_timeslice_timeout_128_f() |
2185                                 ram_rl_entry_tsg_length_f(
2186                                         tsg->num_active_channels);
2187                         runlist_entry[1] = 0;
2188                         runlist_entry += 2;
2189                         count++;
2190
2191                         /* add runnable channels bound to this TSG */
2192                         count_channels_in_tsg = 0;
2193                         mutex_lock(&tsg->ch_list_lock);
2194                         list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
2195                                 if (!test_bit(ch->hw_chid,
2196                                                 runlist->active_channels))
2197                                         continue;
2198                                 gk20a_dbg_info("add channel %d to runlist",
2199                                         ch->hw_chid);
2200                                 runlist_entry[0] =
2201                                         ram_rl_entry_chid_f(ch->hw_chid);
2202                                 runlist_entry[1] = 0;
2203                                 runlist_entry += 2;
2204                                 count++;
2205                                 count_channels_in_tsg++;
2206                         }
2207                         mutex_unlock(&tsg->ch_list_lock);
2208
2209                         WARN_ON(tsg->num_active_channels !=
2210                                 count_channels_in_tsg);
2211                 }
2212                 mutex_unlock(&f->tsg_inuse_mutex);
2213         } else  /* suspend to remove all channels */
2214                 count = 0;
2215
2216         if (count != 0) {
2217                 gk20a_writel(g, fifo_runlist_base_r(),
2218                         fifo_runlist_base_ptr_f(u64_lo32(runlist_pa >> 12)) |
2219                         fifo_runlist_base_target_vid_mem_f());
2220         }
2221
2222         gk20a_writel(g, fifo_runlist_r(),
2223                 fifo_runlist_engine_f(runlist_id) |
2224                 fifo_eng_runlist_length_f(count));
2225
2226         if (wait_for_finish) {
2227                 ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
2228
2229                 if (ret == -ETIMEDOUT) {
2230                         gk20a_err(dev_from_gk20a(g),
2231                                    "runlist update timeout");
2232
2233                         gk20a_fifo_runlist_reset_engines(g, runlist_id);
2234
2235                         /* engine reset needs the lock. drop it */
2236                         /* wait until the runlist is active again */
2237                         ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
2238                         /* get the lock back. at this point everything should
2239                          * should be fine */
2240
2241                         if (ret)
2242                                 gk20a_err(dev_from_gk20a(g),
2243                                            "runlist update failed: %d", ret);
2244                 } else if (ret == -EINTR)
2245                         gk20a_err(dev_from_gk20a(g),
2246                                    "runlist update interrupted");
2247         }
2248
2249         runlist->cur_buffer = new_buf;
2250
2251 clean_up:
2252         return ret;
2253 }
2254
2255 /* add/remove a channel from runlist
2256    special cases below: runlist->active_channels will NOT be changed.
2257    (hw_chid == ~0 && !add) means remove all active channels from runlist.
2258    (hw_chid == ~0 &&  add) means restore all active channels on runlist. */
2259 int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
2260                               bool add, bool wait_for_finish)
2261 {
2262         struct fifo_runlist_info_gk20a *runlist = NULL;
2263         struct fifo_gk20a *f = &g->fifo;
2264         u32 token = PMU_INVALID_MUTEX_OWNER_ID;
2265         u32 mutex_ret;
2266         u32 ret = 0;
2267
2268         gk20a_dbg_fn("");
2269
2270         runlist = &f->runlist_info[runlist_id];
2271
2272         mutex_lock(&runlist->mutex);
2273
2274         mutex_ret = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2275
2276         ret = gk20a_fifo_update_runlist_locked(g, runlist_id, hw_chid, add,
2277                                                wait_for_finish);
2278
2279         if (!mutex_ret)
2280                 pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2281
2282         mutex_unlock(&runlist->mutex);
2283         return ret;
2284 }
2285
2286 int gk20a_fifo_suspend(struct gk20a *g)
2287 {
2288         gk20a_dbg_fn("");
2289
2290         /* stop bar1 snooping */
2291         gk20a_writel(g, fifo_bar1_base_r(),
2292                         fifo_bar1_base_valid_false_f());
2293
2294         /* disable fifo intr */
2295         gk20a_writel(g, fifo_intr_en_0_r(), 0);
2296         gk20a_writel(g, fifo_intr_en_1_r(), 0);
2297
2298         gk20a_dbg_fn("done");
2299         return 0;
2300 }
2301
2302 bool gk20a_fifo_mmu_fault_pending(struct gk20a *g)
2303 {
2304         if (gk20a_readl(g, fifo_intr_0_r()) &
2305                         fifo_intr_0_mmu_fault_pending_f())
2306                 return true;
2307         else
2308                 return false;
2309 }
2310
2311 int gk20a_fifo_wait_engine_idle(struct gk20a *g)
2312 {
2313         unsigned long end_jiffies = jiffies +
2314                 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
2315         unsigned long delay = GR_IDLE_CHECK_DEFAULT;
2316         int ret = -ETIMEDOUT;
2317         u32 i;
2318         struct device *d = dev_from_gk20a(g);
2319
2320         gk20a_dbg_fn("");
2321
2322         for (i = 0; i < fifo_engine_status__size_1_v(); i++) {
2323                 do {
2324                         u32 status = gk20a_readl(g, fifo_engine_status_r(i));
2325                         if (!fifo_engine_status_engine_v(status)) {
2326                                 ret = 0;
2327                                 break;
2328                         }
2329
2330                         usleep_range(delay, delay * 2);
2331                         delay = min_t(unsigned long,
2332                                         delay << 1, GR_IDLE_CHECK_MAX);
2333                 } while (time_before(jiffies, end_jiffies) ||
2334                                 !tegra_platform_is_silicon());
2335                 if (ret) {
2336                         gk20a_err(d, "cannot idle engine %u\n", i);
2337                         break;
2338                 }
2339         }
2340
2341         gk20a_dbg_fn("done");
2342
2343         return ret;
2344 }
2345
2346 static void gk20a_fifo_apply_pb_timeout(struct gk20a *g)
2347 {
2348         u32 timeout;
2349
2350         if (tegra_platform_is_silicon()) {
2351                 timeout = gk20a_readl(g, fifo_pb_timeout_r());
2352                 timeout &= ~fifo_pb_timeout_detection_enabled_f();
2353                 gk20a_writel(g, fifo_pb_timeout_r(), timeout);
2354         }
2355 }
2356
2357 static u32 gk20a_fifo_get_num_fifos(struct gk20a *g)
2358 {
2359         return ccsr_channel__size_1_v();
2360 }
2361
2362 u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g)
2363 {
2364         return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();
2365 }
2366
2367 void gk20a_init_fifo(struct gpu_ops *gops)
2368 {
2369         gk20a_init_channel(gops);
2370         gops->fifo.preempt_channel = gk20a_fifo_preempt_channel;
2371         gops->fifo.update_runlist = gk20a_fifo_update_runlist;
2372         gops->fifo.trigger_mmu_fault = gk20a_fifo_trigger_mmu_fault;
2373         gops->fifo.apply_pb_timeout = gk20a_fifo_apply_pb_timeout;
2374         gops->fifo.wait_engine_idle = gk20a_fifo_wait_engine_idle;
2375         gops->fifo.get_num_fifos = gk20a_fifo_get_num_fifos;
2376         gops->fifo.get_pbdma_signature = gk20a_fifo_get_pbdma_signature;
2377 }