]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
gpu: nvgpu: IOCTL to disable timeouts
[sojka/nv-tegra/linux-3.10.git] / drivers / gpu / nvgpu / gk20a / dbg_gpu_gk20a.c
1 /*
2  * Tegra GK20A GPU Debugger/Profiler Driver
3  *
4  * Copyright (c) 2013-2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/cdev.h>
22 #include <linux/uaccess.h>
23 #include <linux/nvhost.h>
24 #include <uapi/linux/nvgpu.h>
25
26 #include "gk20a.h"
27 #include "gr_gk20a.h"
28 #include "dbg_gpu_gk20a.h"
29 #include "regops_gk20a.h"
30 #include "hw_therm_gk20a.h"
31 #include "hw_gr_gk20a.h"
32 #include "hw_perf_gk20a.h"
33
34 struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a = {
35         .exec_reg_ops = exec_regops_gk20a,
36 };
37
38 /* silly allocator - just increment session id */
39 static atomic_t session_id = ATOMIC_INIT(0);
40 static int generate_session_id(void)
41 {
42         return atomic_add_return(1, &session_id);
43 }
44
45 static int alloc_session(struct dbg_session_gk20a **_dbg_s)
46 {
47         struct dbg_session_gk20a *dbg_s;
48         *_dbg_s = NULL;
49
50         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
51
52         dbg_s = kzalloc(sizeof(*dbg_s), GFP_KERNEL);
53         if (!dbg_s)
54                 return -ENOMEM;
55
56         dbg_s->id = generate_session_id();
57         dbg_s->ops = &dbg_gpu_session_ops_gk20a;
58         *_dbg_s = dbg_s;
59         return 0;
60 }
61
62 static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
63                 struct file *filp, bool is_profiler)
64 {
65         struct dbg_session_gk20a *dbg_session;
66         struct gk20a *g;
67
68         struct platform_device *pdev;
69         struct device *dev;
70
71         int err;
72
73         if (!is_profiler)
74                 g = container_of(inode->i_cdev,
75                                  struct gk20a, dbg.cdev);
76         else
77                 g = container_of(inode->i_cdev,
78                                  struct gk20a, prof.cdev);
79         pdev = g->dev;
80         dev  = &pdev->dev;
81
82         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
83
84         err  = alloc_session(&dbg_session);
85         if (err)
86                 return err;
87
88         filp->private_data = dbg_session;
89         dbg_session->pdev  = pdev;
90         dbg_session->dev   = dev;
91         dbg_session->g     = g;
92         dbg_session->is_profiler = is_profiler;
93         dbg_session->is_pg_disabled = false;
94         dbg_session->is_timeout_disabled = false;
95         /* For vgpu, all power-gating features are currently disabled
96          * in the server. Set is_pg_disable to true to reflect this
97          * on the client side. */
98         if (gk20a_gpu_is_virtual(pdev))
99                 dbg_session->is_pg_disabled = true;
100
101         INIT_LIST_HEAD(&dbg_session->dbg_s_list_node);
102         init_waitqueue_head(&dbg_session->dbg_events.wait_queue);
103         dbg_session->dbg_events.events_enabled = false;
104         dbg_session->dbg_events.num_pending_events = 0;
105
106         return 0;
107 }
108
109 /* used in scenarios where the debugger session can take just the inter-session
110  * lock for performance, but the profiler session must take the per-gpu lock
111  * since it might not have an associated channel. */
112 static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s)
113 {
114         if (dbg_s->is_profiler)
115                 mutex_lock(&dbg_s->g->dbg_sessions_lock);
116         else
117                 mutex_lock(&dbg_s->ch->dbg_s_lock);
118 }
119
120 static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s)
121 {
122         if (dbg_s->is_profiler)
123                 mutex_unlock(&dbg_s->g->dbg_sessions_lock);
124         else
125                 mutex_unlock(&dbg_s->ch->dbg_s_lock);
126 }
127
128 static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
129 {
130         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
131
132         gk20a_dbg_session_mutex_lock(dbg_s);
133
134         dbg_s->dbg_events.events_enabled = true;
135         dbg_s->dbg_events.num_pending_events = 0;
136
137         gk20a_dbg_session_mutex_unlock(dbg_s);
138 }
139
140 static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
141 {
142         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
143
144         gk20a_dbg_session_mutex_lock(dbg_s);
145
146         dbg_s->dbg_events.events_enabled = false;
147         dbg_s->dbg_events.num_pending_events = 0;
148
149         gk20a_dbg_session_mutex_unlock(dbg_s);
150 }
151
152 static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
153 {
154         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
155
156         gk20a_dbg_session_mutex_lock(dbg_s);
157
158         if (dbg_s->dbg_events.events_enabled &&
159                         dbg_s->dbg_events.num_pending_events > 0)
160                 dbg_s->dbg_events.num_pending_events--;
161
162         gk20a_dbg_session_mutex_unlock(dbg_s);
163 }
164
165 static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
166                           struct nvgpu_dbg_gpu_events_ctrl_args *args)
167 {
168         int ret = 0;
169
170         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
171
172         if (!dbg_s->ch) {
173                 gk20a_err(dev_from_gk20a(dbg_s->g),
174                            "no channel bound to dbg session\n");
175                 return -EINVAL;
176         }
177
178         switch (args->cmd) {
179         case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
180                 gk20a_dbg_gpu_events_enable(dbg_s);
181                 break;
182
183         case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
184                 gk20a_dbg_gpu_events_disable(dbg_s);
185                 break;
186
187         case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
188                 gk20a_dbg_gpu_events_clear(dbg_s);
189                 break;
190
191         default:
192                 gk20a_err(dev_from_gk20a(dbg_s->g),
193                            "unrecognized dbg gpu events ctrl cmd: 0x%x",
194                            args->cmd);
195                 ret = -EINVAL;
196                 break;
197         }
198
199         return ret;
200 }
201
202 unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
203 {
204         unsigned int mask = 0;
205         struct dbg_session_gk20a *dbg_s = filep->private_data;
206
207         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
208
209         poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
210
211         gk20a_dbg_session_mutex_lock(dbg_s);
212
213         if (dbg_s->dbg_events.events_enabled &&
214                         dbg_s->dbg_events.num_pending_events > 0) {
215                 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
216                                 dbg_s->id);
217                 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
218                                 dbg_s->dbg_events.num_pending_events);
219                 mask = (POLLPRI | POLLIN);
220         }
221
222         gk20a_dbg_session_mutex_unlock(dbg_s);
223
224         return mask;
225 }
226
227 int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
228 {
229         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
230         return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
231 }
232
233 int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
234 {
235         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
236         return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
237 }
238
239 void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
240 {
241         struct dbg_session_gk20a *dbg_s;
242
243         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
244
245         /* guard against the session list being modified */
246         mutex_lock(&ch->dbg_s_lock);
247
248         list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) {
249                 if (dbg_s->dbg_events.events_enabled) {
250                         gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d",
251                                         dbg_s->id);
252                         gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
253                                         dbg_s->dbg_events.num_pending_events);
254
255                         dbg_s->dbg_events.num_pending_events++;
256
257                         wake_up_interruptible_all(&dbg_s->dbg_events.wait_queue);
258                 }
259         }
260
261         mutex_unlock(&ch->dbg_s_lock);
262 }
263
264
265 static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
266                                 __u32  powermode);
267
268 static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
269                           int timeout_mode)
270 {
271         struct gk20a *g = dbg_s->g;
272         int err = 0;
273
274         gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
275                         timeout_mode);
276
277         switch (timeout_mode) {
278         case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
279                 if (dbg_s->is_timeout_disabled &&
280                     --g->dbg_timeout_disabled_refcount == 0) {
281                         g->timeouts_enabled = true;
282                 }
283                 dbg_s->is_timeout_disabled = false;
284                 break;
285
286         case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
287                 if ((dbg_s->is_timeout_disabled == false) &&
288                     (g->dbg_timeout_disabled_refcount++ == 0)) {
289                         g->timeouts_enabled = false;
290                 }
291                 dbg_s->is_timeout_disabled = true;
292                 break;
293
294         default:
295                 gk20a_err(dev_from_gk20a(g),
296                            "unrecognized dbg gpu timeout mode : 0x%x",
297                            timeout_mode);
298                 err = -EINVAL;
299                 break;
300         }
301
302         gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
303                         g->timeouts_enabled ? "Yes" : "No");
304
305         return err;
306 }
307
308 static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s)
309 {
310         struct channel_gk20a *ch_gk20a = dbg_s->ch;
311         struct gk20a *g = dbg_s->g;
312
313         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
314
315         /* wasn't bound to start with ? */
316         if (!ch_gk20a) {
317                 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "not bound already?");
318                 return -ENODEV;
319         }
320
321         mutex_lock(&g->dbg_sessions_lock);
322         mutex_lock(&ch_gk20a->dbg_s_lock);
323
324         --g->dbg_sessions;
325
326         dbg_s->ch = NULL;
327         fput(dbg_s->ch_f);
328         dbg_s->ch_f = NULL;
329
330         list_del_init(&dbg_s->dbg_s_list_node);
331
332         mutex_unlock(&ch_gk20a->dbg_s_lock);
333         mutex_unlock(&g->dbg_sessions_lock);
334
335         return 0;
336 }
337
338 int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
339 {
340         struct dbg_session_gk20a *dbg_s = filp->private_data;
341         struct gk20a *g = dbg_s->g;
342
343         gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev));
344
345         /* unbind if it was bound */
346         if (dbg_s->ch)
347                 dbg_unbind_channel_gk20a(dbg_s);
348
349         /* Powergate/Timeout enable is called here as possibility of dbg_session
350          * which called powergate/timeout disable ioctl, to be killed without
351          * calling powergate/timeout enable ioctl
352          */
353         mutex_lock(&g->dbg_sessions_lock);
354         dbg_set_powergate(dbg_s, NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
355         nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
356         mutex_unlock(&g->dbg_sessions_lock);
357
358         kfree(dbg_s);
359         return 0;
360 }
361
362 static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
363                           struct nvgpu_dbg_gpu_bind_channel_args *args)
364 {
365         struct file *f;
366         struct gk20a *g;
367         struct channel_gk20a *ch;
368
369         gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
370                    dev_name(dbg_s->dev), args->channel_fd);
371
372         if (args->channel_fd == ~0)
373                 return dbg_unbind_channel_gk20a(dbg_s);
374
375         /* even though get_file_channel is doing this it releases it as well */
376         /* by holding it here we'll keep it from disappearing while the
377          * debugger is in session */
378         f = fget(args->channel_fd);
379         if (!f)
380                 return -ENODEV;
381
382         ch = gk20a_get_channel_from_file(args->channel_fd);
383         if (!ch) {
384                 gk20a_dbg_fn("no channel found for fd");
385                 fput(f);
386                 return -EINVAL;
387         }
388
389         g = dbg_s->g;
390         gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
391
392         mutex_lock(&g->dbg_sessions_lock);
393         mutex_lock(&ch->dbg_s_lock);
394
395         dbg_s->ch_f = f;
396         dbg_s->ch = ch;
397         list_add(&dbg_s->dbg_s_list_node, &dbg_s->ch->dbg_s_list);
398
399         g->dbg_sessions++;
400
401         mutex_unlock(&ch->dbg_s_lock);
402         mutex_unlock(&g->dbg_sessions_lock);
403         return 0;
404 }
405
406 static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
407                                 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
408
409 static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
410                                 struct nvgpu_dbg_gpu_powergate_args *args);
411
412 static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
413                               struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
414
415 static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
416                 struct dbg_session_gk20a *dbg_s,
417                 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
418
419 static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
420                 struct nvgpu_dbg_gpu_perfbuf_map_args *args);
421
422 static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
423                 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args);
424
425 static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
426                           struct nvgpu_dbg_gpu_pc_sampling_args *args)
427 {
428         struct channel_gk20a *ch = dbg_s->ch;
429         struct gk20a *g = ch->g;
430
431         gk20a_dbg_fn("");
432
433         return g->ops.gr.update_pc_sampling ?
434                 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
435 }
436
437 static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
438                          struct nvgpu_dbg_gpu_timeout_args *args)
439 {
440         int err;
441         struct gk20a *g = get_gk20a(dbg_s->pdev);
442
443         gk20a_dbg_fn("powergate mode = %d", args->enable);
444
445         mutex_lock(&g->dbg_sessions_lock);
446         err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
447         mutex_unlock(&g->dbg_sessions_lock);
448
449         return err;
450 }
451
452 long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
453                              unsigned long arg)
454 {
455         struct dbg_session_gk20a *dbg_s = filp->private_data;
456         struct gk20a *g = get_gk20a(dbg_s->pdev);
457         u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
458         int err = 0;
459
460         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
461
462         if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
463             (_IOC_NR(cmd) == 0) ||
464             (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST))
465                 return -EINVAL;
466
467         BUG_ON(_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE);
468
469         memset(buf, 0, sizeof(buf));
470         if (_IOC_DIR(cmd) & _IOC_WRITE) {
471                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
472                         return -EFAULT;
473         }
474
475         if (!g->gr.sw_ready) {
476                 err = gk20a_busy(g->dev);
477                 if (err)
478                         return err;
479
480                 gk20a_idle(g->dev);
481         }
482
483         switch (cmd) {
484         case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
485                 err = dbg_bind_channel_gk20a(dbg_s,
486                              (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
487                 break;
488
489         case NVGPU_DBG_GPU_IOCTL_REG_OPS:
490                 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
491                            (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
492                 break;
493
494         case NVGPU_DBG_GPU_IOCTL_POWERGATE:
495                 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
496                            (struct nvgpu_dbg_gpu_powergate_args *)buf);
497                 break;
498
499         case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
500                 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
501                            (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
502                 break;
503
504         case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
505                 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
506                            (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
507                 break;
508
509         case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
510                 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
511                        (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
512                 break;
513
514         case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP:
515                 err = gk20a_perfbuf_map(dbg_s,
516                        (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf);
517                 break;
518
519         case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP:
520                 err = gk20a_perfbuf_unmap(dbg_s,
521                        (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf);
522                 break;
523
524         case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING:
525                 err = gk20a_dbg_pc_sampling(dbg_s,
526                            (struct nvgpu_dbg_gpu_pc_sampling_args *)buf);
527                 break;
528
529         case NVGPU_DBG_GPU_IOCTL_TIMEOUT:
530                 err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s,
531                            (struct nvgpu_dbg_gpu_timeout_args *)buf);
532                 break;
533
534         default:
535                 gk20a_err(dev_from_gk20a(g),
536                            "unrecognized dbg gpu ioctl cmd: 0x%x",
537                            cmd);
538                 err = -ENOTTY;
539                 break;
540         }
541
542         gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
543
544         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
545                 err = copy_to_user((void __user *)arg,
546                                    buf, _IOC_SIZE(cmd));
547
548         return err;
549 }
550
551 /* In order to perform a context relative op the context has
552  * to be created already... which would imply that the
553  * context switch mechanism has already been put in place.
554  * So by the time we perform such an opertation it should always
555  * be possible to query for the appropriate context offsets, etc.
556  *
557  * But note: while the dbg_gpu bind requires the a channel fd,
558  * it doesn't require an allocated gr/compute obj at that point...
559  */
560 static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
561                                       struct gr_gk20a *gr)
562 {
563         int err;
564
565         mutex_lock(&gr->ctx_mutex);
566         err = !gr->ctx_vars.golden_image_initialized;
567         mutex_unlock(&gr->ctx_mutex);
568         if (err)
569                 return false;
570         return true;
571
572 }
573
574 static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
575                                 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
576 {
577         int err = 0, powergate_err = 0;
578         bool is_pg_disabled = false;
579
580         struct device *dev = dbg_s->dev;
581         struct gk20a *g = get_gk20a(dbg_s->pdev);
582         struct nvgpu_dbg_gpu_reg_op *ops;
583         u64 ops_size = sizeof(ops[0]) * args->num_ops;
584
585         gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
586
587         if (!dbg_s->ops) {
588                 gk20a_err(dev, "can't call reg_ops on an unbound debugger session");
589                 return -EINVAL;
590         }
591
592         if (!dbg_s->is_profiler && !dbg_s->ch) {
593                 gk20a_err(dev, "bind a channel before regops for a debugging session");
594                 return -EINVAL;
595         }
596
597         /* be sure that ctx info is in place */
598         if (!gk20a_gpu_is_virtual(dbg_s->pdev) &&
599                 !gr_context_info_available(dbg_s, &g->gr)) {
600                 gk20a_err(dev, "gr context data not available\n");
601                 return -ENODEV;
602         }
603
604         ops = kzalloc(ops_size, GFP_KERNEL);
605         if (!ops) {
606                 gk20a_err(dev, "Allocating memory failed!");
607                 return -ENOMEM;
608         }
609
610         gk20a_dbg_fn("Copying regops from userspace");
611
612         if (copy_from_user(ops, (void __user *)(uintptr_t)args->ops,
613                                                         ops_size)) {
614                 dev_err(dev, "copy_from_user failed!");
615                 err = -EFAULT;
616                 goto clean_up;
617         }
618
619         /* since exec_reg_ops sends methods to the ucode, it must take the
620          * global gpu lock to protect against mixing methods from debug sessions
621          * on other channels */
622         mutex_lock(&g->dbg_sessions_lock);
623
624         if (!dbg_s->is_pg_disabled) {
625                 powergate_err = dbg_set_powergate(dbg_s,
626                                         NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
627                 is_pg_disabled = true;
628         }
629
630         if (!powergate_err) {
631                 err = dbg_s->ops->exec_reg_ops(dbg_s, ops, args->num_ops);
632                 /* enable powergate, if previously disabled */
633                 if (is_pg_disabled) {
634                         powergate_err = dbg_set_powergate(dbg_s,
635                                         NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
636                 }
637         }
638
639         mutex_unlock(&g->dbg_sessions_lock);
640
641         if (!err && powergate_err)
642                 err = powergate_err;
643
644         if (err) {
645                 gk20a_err(dev, "dbg regops failed");
646                 goto clean_up;
647         }
648
649         gk20a_dbg_fn("Copying result to userspace");
650
651         if (copy_to_user((void __user *)(uintptr_t)args->ops, ops, ops_size)) {
652                 dev_err(dev, "copy_to_user failed!");
653                 err = -EFAULT;
654                 goto clean_up;
655         }
656
657  clean_up:
658         kfree(ops);
659         return err;
660 }
661
662 static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
663                                 __u32  powermode)
664 {
665         int err = 0;
666         struct gk20a *g = get_gk20a(dbg_s->pdev);
667
668          /* This function must be called with g->dbg_sessions_lock held */
669
670         gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d",
671                    dev_name(dbg_s->dev), powermode);
672
673         switch (powermode) {
674         case NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE:
675                 /* save off current powergate, clk state.
676                  * set gpu module's can_powergate = 0.
677                  * set gpu module's clk to max.
678                  * while *a* debug session is active there will be no power or
679                  * clocking state changes allowed from mainline code (but they
680                  * should be saved).
681                  */
682                 /* Allow powergate disable if the current dbg_session doesn't
683                  * call a powergate disable ioctl and the global
684                  * powergating_disabled_refcount is zero
685                  */
686
687                 if ((dbg_s->is_pg_disabled == false) &&
688                     (g->dbg_powergating_disabled_refcount++ == 0)) {
689
690                         gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
691                         err = gk20a_busy(g->dev);
692                         if (err)
693                                 return err;
694
695                         err = gk20a_busy(dbg_s->pdev);
696                         if (err)
697                                 return -EPERM;
698
699                         /*do elpg disable before clock gating */
700                         if (support_gk20a_pmu(g->dev))
701                                 gk20a_pmu_disable_elpg(g);
702                         g->ops.clock_gating.slcg_gr_load_gating_prod(g,
703                                         false);
704                         g->ops.clock_gating.slcg_perf_load_gating_prod(g,
705                                         false);
706                         g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
707                                         false);
708                         gr_gk20a_init_blcg_mode(g, BLCG_RUN, ENGINE_GR_GK20A);
709
710                         g->elcg_enabled = false;
711                         gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_GR_GK20A);
712                         gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_CE2_GK20A);
713
714                 }
715
716                 dbg_s->is_pg_disabled = true;
717                 break;
718
719         case NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE:
720                 /* restore (can) powergate, clk state */
721                 /* release pending exceptions to fault/be handled as usual */
722                 /*TBD: ordering of these? */
723
724                 /* Re-enabling powergate as no other sessions want
725                  * powergate disabled and the current dbg-sessions had
726                  * requested the powergate disable through ioctl
727                 */
728                 if (dbg_s->is_pg_disabled &&
729                     --g->dbg_powergating_disabled_refcount == 0) {
730
731                         g->elcg_enabled = true;
732                         gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_GR_GK20A);
733                         gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_CE2_GK20A);
734                         gr_gk20a_init_blcg_mode(g, BLCG_AUTO, ENGINE_GR_GK20A);
735
736                         g->ops.clock_gating.slcg_gr_load_gating_prod(g,
737                                         g->slcg_enabled);
738                         g->ops.clock_gating.slcg_perf_load_gating_prod(g,
739                                         g->slcg_enabled);
740
741                         if (support_gk20a_pmu(g->dev))
742                                 gk20a_pmu_enable_elpg(g);
743
744                         gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
745                         gk20a_idle(dbg_s->pdev);
746                         gk20a_idle(g->dev);
747                 }
748
749                 dbg_s->is_pg_disabled = false;
750                 break;
751
752         default:
753                 gk20a_err(dev_from_gk20a(g),
754                            "unrecognized dbg gpu powergate mode: 0x%x",
755                            powermode);
756                 err = -ENOTTY;
757                 break;
758         }
759
760         gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d done",
761                    dev_name(dbg_s->dev), powermode);
762         return err;
763 }
764
765 static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
766                                 struct nvgpu_dbg_gpu_powergate_args *args)
767 {
768         int err;
769         struct gk20a *g = get_gk20a(dbg_s->pdev);
770         gk20a_dbg_fn("%s  powergate mode = %d",
771                       dev_name(dbg_s->dev), args->mode);
772
773         mutex_lock(&g->dbg_sessions_lock);
774         err = dbg_set_powergate(dbg_s, args->mode);
775         mutex_unlock(&g->dbg_sessions_lock);
776         return  err;
777 }
778
779 static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
780                                struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
781 {
782         int err;
783         struct gk20a *g = get_gk20a(dbg_s->pdev);
784         struct channel_gk20a *ch_gk20a;
785
786         gk20a_dbg_fn("%s smpc ctxsw mode = %d",
787                      dev_name(dbg_s->dev), args->mode);
788
789         /* Take the global lock, since we'll be doing global regops */
790         mutex_lock(&g->dbg_sessions_lock);
791
792         ch_gk20a = dbg_s->ch;
793
794         if (!ch_gk20a) {
795                 gk20a_err(dev_from_gk20a(dbg_s->g),
796                           "no bound channel for smpc ctxsw mode update\n");
797                 err = -EINVAL;
798                 goto clean_up;
799         }
800
801         err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
802                       args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
803         if (err) {
804                 gk20a_err(dev_from_gk20a(dbg_s->g),
805                           "error (%d) during smpc ctxsw mode update\n", err);
806                 goto clean_up;
807         }
808
809         err = g->ops.regops.apply_smpc_war(dbg_s);
810
811  clean_up:
812         mutex_unlock(&g->dbg_sessions_lock);
813         return  err;
814 }
815
816 static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
817                 struct dbg_session_gk20a *dbg_s,
818                 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
819 {
820         struct gk20a *g = get_gk20a(dbg_s->pdev);
821         struct channel_gk20a *ch = dbg_s->ch;
822         bool ch_is_curr_ctx;
823         int err = 0, action = args->mode;
824
825         gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
826
827         mutex_lock(&g->dbg_sessions_lock);
828
829         /* Suspend GPU context switching */
830         /* Disable channel switching.
831          * at that point the hardware state can be inspected to
832          * determine if the context we're interested in is current.
833          */
834         err = gr_gk20a_disable_ctxsw(g);
835         if (err) {
836                 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
837                 /* this should probably be ctx-fatal... */
838                 goto clean_up;
839         }
840
841         /* find out whether the current channel is resident */
842         ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch);
843
844         if (ch_is_curr_ctx) {
845                 switch (action) {
846                 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
847                         gk20a_suspend_all_sms(g);
848                         break;
849
850                 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
851                         gk20a_resume_all_sms(g);
852                         break;
853                 }
854         } else {
855                 switch (action) {
856                 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
857                         /* Disable the channel */
858                         channel_gk20a_disable(ch);
859                         break;
860
861                 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
862                         /* Enable the channel */
863                         channel_gk20a_enable(ch);
864                         break;
865                 }
866         }
867
868         /* Resume GPU context switching */
869         err = gr_gk20a_enable_ctxsw(g);
870         if (err)
871                 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
872
873  clean_up:
874         mutex_unlock(&g->dbg_sessions_lock);
875         return  err;
876 }
877
878 static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
879                 struct nvgpu_dbg_gpu_perfbuf_map_args *args)
880 {
881         struct gk20a *g = dbg_s->g;
882         int err;
883         u32 virt_size;
884         u32 virt_addr_lo;
885         u32 virt_addr_hi;
886         u32 inst_pa_page;
887
888         if (!g->allow_all)
889                 return -EACCES;
890
891         err = gk20a_vm_map_buffer(&g->mm.pmu.vm,
892                         args->dmabuf_fd,
893                         &args->offset,
894                         0,
895                         0,
896                         0,
897                         args->mapping_size);
898         if (err)
899                 return err;
900
901         /* perf output buffer may not cross a 4GB boundary - with a separate va
902          * smaller than that, it won't */
903         virt_size = u64_lo32(args->mapping_size);
904         virt_addr_lo = u64_lo32(args->offset);
905         virt_addr_hi = u64_hi32(args->offset);
906         /* but check anyway */
907         if (args->offset + virt_size > SZ_4G) {
908                 gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset);
909                 return -EINVAL;
910         }
911
912         /* address and size are aligned to 32 bytes, the lowest bits read back
913          * as zeros */
914         gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
915         gk20a_writel(g, perf_pmasys_outbaseupper_r(),
916                         perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
917         gk20a_writel(g, perf_pmasys_outsize_r(), virt_size);
918
919         /* this field is aligned to 4K */
920         inst_pa_page = gk20a_mem_phys(&g->mm.hwpm.inst_block) >> 12;
921
922         /* A write to MEM_BLOCK triggers the block bind operation. MEM_BLOCK
923          * should be written last */
924         gk20a_writel(g, perf_pmasys_mem_block_r(),
925                         perf_pmasys_mem_block_base_f(inst_pa_page) |
926                         perf_pmasys_mem_block_valid_true_f() |
927                         perf_pmasys_mem_block_target_lfb_f());
928
929         return 0;
930 }
931
932 static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
933                 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args)
934 {
935         struct gk20a *g = dbg_s->g;
936
937         if (!g->allow_all)
938                 return -EACCES;
939
940         gk20a_writel(g, perf_pmasys_outbase_r(), 0);
941         gk20a_writel(g, perf_pmasys_outbaseupper_r(),
942                         perf_pmasys_outbaseupper_ptr_f(0));
943         gk20a_writel(g, perf_pmasys_outsize_r(), 0);
944
945         gk20a_writel(g, perf_pmasys_mem_block_r(),
946                         perf_pmasys_mem_block_base_f(0) |
947                         perf_pmasys_mem_block_valid_false_f() |
948                         perf_pmasys_mem_block_target_f(0));
949
950         gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset);
951
952         return 0;
953 }