2 * Tegra GK20A GPU Debugger/Profiler Driver
4 * Copyright (c) 2013-2015, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/file.h>
21 #include <linux/cdev.h>
22 #include <linux/uaccess.h>
23 #include <linux/nvhost.h>
24 #include <uapi/linux/nvgpu.h>
28 #include "dbg_gpu_gk20a.h"
29 #include "regops_gk20a.h"
30 #include "hw_therm_gk20a.h"
31 #include "hw_gr_gk20a.h"
32 #include "hw_perf_gk20a.h"
34 struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a = {
35 .exec_reg_ops = exec_regops_gk20a,
38 /* silly allocator - just increment session id */
39 static atomic_t session_id = ATOMIC_INIT(0);
40 static int generate_session_id(void)
42 return atomic_add_return(1, &session_id);
45 static int alloc_session(struct dbg_session_gk20a **_dbg_s)
47 struct dbg_session_gk20a *dbg_s;
50 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
52 dbg_s = kzalloc(sizeof(*dbg_s), GFP_KERNEL);
56 dbg_s->id = generate_session_id();
57 dbg_s->ops = &dbg_gpu_session_ops_gk20a;
62 static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
63 struct file *filp, bool is_profiler)
65 struct dbg_session_gk20a *dbg_session;
68 struct platform_device *pdev;
74 g = container_of(inode->i_cdev,
75 struct gk20a, dbg.cdev);
77 g = container_of(inode->i_cdev,
78 struct gk20a, prof.cdev);
82 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
84 err = alloc_session(&dbg_session);
88 filp->private_data = dbg_session;
89 dbg_session->pdev = pdev;
90 dbg_session->dev = dev;
92 dbg_session->is_profiler = is_profiler;
93 dbg_session->is_pg_disabled = false;
94 dbg_session->is_timeout_disabled = false;
95 /* For vgpu, all power-gating features are currently disabled
96 * in the server. Set is_pg_disable to true to reflect this
97 * on the client side. */
98 if (gk20a_gpu_is_virtual(pdev))
99 dbg_session->is_pg_disabled = true;
101 INIT_LIST_HEAD(&dbg_session->dbg_s_list_node);
102 init_waitqueue_head(&dbg_session->dbg_events.wait_queue);
103 dbg_session->dbg_events.events_enabled = false;
104 dbg_session->dbg_events.num_pending_events = 0;
109 /* used in scenarios where the debugger session can take just the inter-session
110 * lock for performance, but the profiler session must take the per-gpu lock
111 * since it might not have an associated channel. */
112 static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s)
114 if (dbg_s->is_profiler)
115 mutex_lock(&dbg_s->g->dbg_sessions_lock);
117 mutex_lock(&dbg_s->ch->dbg_s_lock);
120 static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s)
122 if (dbg_s->is_profiler)
123 mutex_unlock(&dbg_s->g->dbg_sessions_lock);
125 mutex_unlock(&dbg_s->ch->dbg_s_lock);
128 static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
130 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
132 gk20a_dbg_session_mutex_lock(dbg_s);
134 dbg_s->dbg_events.events_enabled = true;
135 dbg_s->dbg_events.num_pending_events = 0;
137 gk20a_dbg_session_mutex_unlock(dbg_s);
140 static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
142 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
144 gk20a_dbg_session_mutex_lock(dbg_s);
146 dbg_s->dbg_events.events_enabled = false;
147 dbg_s->dbg_events.num_pending_events = 0;
149 gk20a_dbg_session_mutex_unlock(dbg_s);
152 static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
154 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
156 gk20a_dbg_session_mutex_lock(dbg_s);
158 if (dbg_s->dbg_events.events_enabled &&
159 dbg_s->dbg_events.num_pending_events > 0)
160 dbg_s->dbg_events.num_pending_events--;
162 gk20a_dbg_session_mutex_unlock(dbg_s);
165 static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
166 struct nvgpu_dbg_gpu_events_ctrl_args *args)
170 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
173 gk20a_err(dev_from_gk20a(dbg_s->g),
174 "no channel bound to dbg session\n");
179 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
180 gk20a_dbg_gpu_events_enable(dbg_s);
183 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
184 gk20a_dbg_gpu_events_disable(dbg_s);
187 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
188 gk20a_dbg_gpu_events_clear(dbg_s);
192 gk20a_err(dev_from_gk20a(dbg_s->g),
193 "unrecognized dbg gpu events ctrl cmd: 0x%x",
202 unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
204 unsigned int mask = 0;
205 struct dbg_session_gk20a *dbg_s = filep->private_data;
207 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
209 poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
211 gk20a_dbg_session_mutex_lock(dbg_s);
213 if (dbg_s->dbg_events.events_enabled &&
214 dbg_s->dbg_events.num_pending_events > 0) {
215 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
217 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
218 dbg_s->dbg_events.num_pending_events);
219 mask = (POLLPRI | POLLIN);
222 gk20a_dbg_session_mutex_unlock(dbg_s);
227 int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
229 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
230 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
233 int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
235 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
236 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
239 void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
241 struct dbg_session_gk20a *dbg_s;
243 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
245 /* guard against the session list being modified */
246 mutex_lock(&ch->dbg_s_lock);
248 list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) {
249 if (dbg_s->dbg_events.events_enabled) {
250 gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d",
252 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
253 dbg_s->dbg_events.num_pending_events);
255 dbg_s->dbg_events.num_pending_events++;
257 wake_up_interruptible_all(&dbg_s->dbg_events.wait_queue);
261 mutex_unlock(&ch->dbg_s_lock);
265 static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
268 static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
271 struct gk20a *g = dbg_s->g;
274 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
277 switch (timeout_mode) {
278 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
279 if (dbg_s->is_timeout_disabled &&
280 --g->dbg_timeout_disabled_refcount == 0) {
281 g->timeouts_enabled = true;
283 dbg_s->is_timeout_disabled = false;
286 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
287 if ((dbg_s->is_timeout_disabled == false) &&
288 (g->dbg_timeout_disabled_refcount++ == 0)) {
289 g->timeouts_enabled = false;
291 dbg_s->is_timeout_disabled = true;
295 gk20a_err(dev_from_gk20a(g),
296 "unrecognized dbg gpu timeout mode : 0x%x",
302 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
303 g->timeouts_enabled ? "Yes" : "No");
308 static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s)
310 struct channel_gk20a *ch_gk20a = dbg_s->ch;
311 struct gk20a *g = dbg_s->g;
313 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
315 /* wasn't bound to start with ? */
317 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "not bound already?");
321 mutex_lock(&g->dbg_sessions_lock);
322 mutex_lock(&ch_gk20a->dbg_s_lock);
330 list_del_init(&dbg_s->dbg_s_list_node);
332 mutex_unlock(&ch_gk20a->dbg_s_lock);
333 mutex_unlock(&g->dbg_sessions_lock);
338 int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
340 struct dbg_session_gk20a *dbg_s = filp->private_data;
341 struct gk20a *g = dbg_s->g;
343 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev));
345 /* unbind if it was bound */
347 dbg_unbind_channel_gk20a(dbg_s);
349 /* Powergate/Timeout enable is called here as possibility of dbg_session
350 * which called powergate/timeout disable ioctl, to be killed without
351 * calling powergate/timeout enable ioctl
353 mutex_lock(&g->dbg_sessions_lock);
354 dbg_set_powergate(dbg_s, NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
355 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
356 mutex_unlock(&g->dbg_sessions_lock);
362 static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
363 struct nvgpu_dbg_gpu_bind_channel_args *args)
367 struct channel_gk20a *ch;
369 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
370 dev_name(dbg_s->dev), args->channel_fd);
372 if (args->channel_fd == ~0)
373 return dbg_unbind_channel_gk20a(dbg_s);
375 /* even though get_file_channel is doing this it releases it as well */
376 /* by holding it here we'll keep it from disappearing while the
377 * debugger is in session */
378 f = fget(args->channel_fd);
382 ch = gk20a_get_channel_from_file(args->channel_fd);
384 gk20a_dbg_fn("no channel found for fd");
390 gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
392 mutex_lock(&g->dbg_sessions_lock);
393 mutex_lock(&ch->dbg_s_lock);
397 list_add(&dbg_s->dbg_s_list_node, &dbg_s->ch->dbg_s_list);
401 mutex_unlock(&ch->dbg_s_lock);
402 mutex_unlock(&g->dbg_sessions_lock);
406 static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
407 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
409 static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
410 struct nvgpu_dbg_gpu_powergate_args *args);
412 static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
413 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
415 static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
416 struct dbg_session_gk20a *dbg_s,
417 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
419 static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
420 struct nvgpu_dbg_gpu_perfbuf_map_args *args);
422 static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
423 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args);
425 static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
426 struct nvgpu_dbg_gpu_pc_sampling_args *args)
428 struct channel_gk20a *ch = dbg_s->ch;
429 struct gk20a *g = ch->g;
433 return g->ops.gr.update_pc_sampling ?
434 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
437 static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
438 struct nvgpu_dbg_gpu_timeout_args *args)
441 struct gk20a *g = get_gk20a(dbg_s->pdev);
443 gk20a_dbg_fn("powergate mode = %d", args->enable);
445 mutex_lock(&g->dbg_sessions_lock);
446 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
447 mutex_unlock(&g->dbg_sessions_lock);
452 long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
455 struct dbg_session_gk20a *dbg_s = filp->private_data;
456 struct gk20a *g = get_gk20a(dbg_s->pdev);
457 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
460 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
462 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
463 (_IOC_NR(cmd) == 0) ||
464 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST))
467 BUG_ON(_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE);
469 memset(buf, 0, sizeof(buf));
470 if (_IOC_DIR(cmd) & _IOC_WRITE) {
471 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
475 if (!g->gr.sw_ready) {
476 err = gk20a_busy(g->dev);
484 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
485 err = dbg_bind_channel_gk20a(dbg_s,
486 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
489 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
490 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
491 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
494 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
495 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
496 (struct nvgpu_dbg_gpu_powergate_args *)buf);
499 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
500 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
501 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
504 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
505 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
506 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
509 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
510 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
511 (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
514 case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP:
515 err = gk20a_perfbuf_map(dbg_s,
516 (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf);
519 case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP:
520 err = gk20a_perfbuf_unmap(dbg_s,
521 (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf);
524 case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING:
525 err = gk20a_dbg_pc_sampling(dbg_s,
526 (struct nvgpu_dbg_gpu_pc_sampling_args *)buf);
529 case NVGPU_DBG_GPU_IOCTL_TIMEOUT:
530 err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s,
531 (struct nvgpu_dbg_gpu_timeout_args *)buf);
535 gk20a_err(dev_from_gk20a(g),
536 "unrecognized dbg gpu ioctl cmd: 0x%x",
542 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
544 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
545 err = copy_to_user((void __user *)arg,
546 buf, _IOC_SIZE(cmd));
551 /* In order to perform a context relative op the context has
552 * to be created already... which would imply that the
553 * context switch mechanism has already been put in place.
554 * So by the time we perform such an opertation it should always
555 * be possible to query for the appropriate context offsets, etc.
557 * But note: while the dbg_gpu bind requires the a channel fd,
558 * it doesn't require an allocated gr/compute obj at that point...
560 static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
565 mutex_lock(&gr->ctx_mutex);
566 err = !gr->ctx_vars.golden_image_initialized;
567 mutex_unlock(&gr->ctx_mutex);
574 static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
575 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
577 int err = 0, powergate_err = 0;
578 bool is_pg_disabled = false;
580 struct device *dev = dbg_s->dev;
581 struct gk20a *g = get_gk20a(dbg_s->pdev);
582 struct nvgpu_dbg_gpu_reg_op *ops;
583 u64 ops_size = sizeof(ops[0]) * args->num_ops;
585 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
588 gk20a_err(dev, "can't call reg_ops on an unbound debugger session");
592 if (!dbg_s->is_profiler && !dbg_s->ch) {
593 gk20a_err(dev, "bind a channel before regops for a debugging session");
597 /* be sure that ctx info is in place */
598 if (!gk20a_gpu_is_virtual(dbg_s->pdev) &&
599 !gr_context_info_available(dbg_s, &g->gr)) {
600 gk20a_err(dev, "gr context data not available\n");
604 ops = kzalloc(ops_size, GFP_KERNEL);
606 gk20a_err(dev, "Allocating memory failed!");
610 gk20a_dbg_fn("Copying regops from userspace");
612 if (copy_from_user(ops, (void __user *)(uintptr_t)args->ops,
614 dev_err(dev, "copy_from_user failed!");
619 /* since exec_reg_ops sends methods to the ucode, it must take the
620 * global gpu lock to protect against mixing methods from debug sessions
621 * on other channels */
622 mutex_lock(&g->dbg_sessions_lock);
624 if (!dbg_s->is_pg_disabled) {
625 powergate_err = dbg_set_powergate(dbg_s,
626 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
627 is_pg_disabled = true;
630 if (!powergate_err) {
631 err = dbg_s->ops->exec_reg_ops(dbg_s, ops, args->num_ops);
632 /* enable powergate, if previously disabled */
633 if (is_pg_disabled) {
634 powergate_err = dbg_set_powergate(dbg_s,
635 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
639 mutex_unlock(&g->dbg_sessions_lock);
641 if (!err && powergate_err)
645 gk20a_err(dev, "dbg regops failed");
649 gk20a_dbg_fn("Copying result to userspace");
651 if (copy_to_user((void __user *)(uintptr_t)args->ops, ops, ops_size)) {
652 dev_err(dev, "copy_to_user failed!");
662 static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
666 struct gk20a *g = get_gk20a(dbg_s->pdev);
668 /* This function must be called with g->dbg_sessions_lock held */
670 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d",
671 dev_name(dbg_s->dev), powermode);
674 case NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE:
675 /* save off current powergate, clk state.
676 * set gpu module's can_powergate = 0.
677 * set gpu module's clk to max.
678 * while *a* debug session is active there will be no power or
679 * clocking state changes allowed from mainline code (but they
682 /* Allow powergate disable if the current dbg_session doesn't
683 * call a powergate disable ioctl and the global
684 * powergating_disabled_refcount is zero
687 if ((dbg_s->is_pg_disabled == false) &&
688 (g->dbg_powergating_disabled_refcount++ == 0)) {
690 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
691 err = gk20a_busy(g->dev);
695 err = gk20a_busy(dbg_s->pdev);
699 /*do elpg disable before clock gating */
700 if (support_gk20a_pmu(g->dev))
701 gk20a_pmu_disable_elpg(g);
702 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
704 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
706 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
708 gr_gk20a_init_blcg_mode(g, BLCG_RUN, ENGINE_GR_GK20A);
710 g->elcg_enabled = false;
711 gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_GR_GK20A);
712 gr_gk20a_init_elcg_mode(g, ELCG_RUN, ENGINE_CE2_GK20A);
716 dbg_s->is_pg_disabled = true;
719 case NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE:
720 /* restore (can) powergate, clk state */
721 /* release pending exceptions to fault/be handled as usual */
722 /*TBD: ordering of these? */
724 /* Re-enabling powergate as no other sessions want
725 * powergate disabled and the current dbg-sessions had
726 * requested the powergate disable through ioctl
728 if (dbg_s->is_pg_disabled &&
729 --g->dbg_powergating_disabled_refcount == 0) {
731 g->elcg_enabled = true;
732 gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_GR_GK20A);
733 gr_gk20a_init_elcg_mode(g, ELCG_AUTO, ENGINE_CE2_GK20A);
734 gr_gk20a_init_blcg_mode(g, BLCG_AUTO, ENGINE_GR_GK20A);
736 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
738 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
741 if (support_gk20a_pmu(g->dev))
742 gk20a_pmu_enable_elpg(g);
744 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
745 gk20a_idle(dbg_s->pdev);
749 dbg_s->is_pg_disabled = false;
753 gk20a_err(dev_from_gk20a(g),
754 "unrecognized dbg gpu powergate mode: 0x%x",
760 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d done",
761 dev_name(dbg_s->dev), powermode);
765 static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
766 struct nvgpu_dbg_gpu_powergate_args *args)
769 struct gk20a *g = get_gk20a(dbg_s->pdev);
770 gk20a_dbg_fn("%s powergate mode = %d",
771 dev_name(dbg_s->dev), args->mode);
773 mutex_lock(&g->dbg_sessions_lock);
774 err = dbg_set_powergate(dbg_s, args->mode);
775 mutex_unlock(&g->dbg_sessions_lock);
779 static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
780 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
783 struct gk20a *g = get_gk20a(dbg_s->pdev);
784 struct channel_gk20a *ch_gk20a;
786 gk20a_dbg_fn("%s smpc ctxsw mode = %d",
787 dev_name(dbg_s->dev), args->mode);
789 /* Take the global lock, since we'll be doing global regops */
790 mutex_lock(&g->dbg_sessions_lock);
792 ch_gk20a = dbg_s->ch;
795 gk20a_err(dev_from_gk20a(dbg_s->g),
796 "no bound channel for smpc ctxsw mode update\n");
801 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
802 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
804 gk20a_err(dev_from_gk20a(dbg_s->g),
805 "error (%d) during smpc ctxsw mode update\n", err);
809 err = g->ops.regops.apply_smpc_war(dbg_s);
812 mutex_unlock(&g->dbg_sessions_lock);
816 static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
817 struct dbg_session_gk20a *dbg_s,
818 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
820 struct gk20a *g = get_gk20a(dbg_s->pdev);
821 struct channel_gk20a *ch = dbg_s->ch;
823 int err = 0, action = args->mode;
825 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
827 mutex_lock(&g->dbg_sessions_lock);
829 /* Suspend GPU context switching */
830 /* Disable channel switching.
831 * at that point the hardware state can be inspected to
832 * determine if the context we're interested in is current.
834 err = gr_gk20a_disable_ctxsw(g);
836 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
837 /* this should probably be ctx-fatal... */
841 /* find out whether the current channel is resident */
842 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch);
844 if (ch_is_curr_ctx) {
846 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
847 gk20a_suspend_all_sms(g);
850 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
851 gk20a_resume_all_sms(g);
856 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
857 /* Disable the channel */
858 channel_gk20a_disable(ch);
861 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
862 /* Enable the channel */
863 channel_gk20a_enable(ch);
868 /* Resume GPU context switching */
869 err = gr_gk20a_enable_ctxsw(g);
871 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
874 mutex_unlock(&g->dbg_sessions_lock);
878 static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
879 struct nvgpu_dbg_gpu_perfbuf_map_args *args)
881 struct gk20a *g = dbg_s->g;
891 err = gk20a_vm_map_buffer(&g->mm.pmu.vm,
901 /* perf output buffer may not cross a 4GB boundary - with a separate va
902 * smaller than that, it won't */
903 virt_size = u64_lo32(args->mapping_size);
904 virt_addr_lo = u64_lo32(args->offset);
905 virt_addr_hi = u64_hi32(args->offset);
906 /* but check anyway */
907 if (args->offset + virt_size > SZ_4G) {
908 gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset);
912 /* address and size are aligned to 32 bytes, the lowest bits read back
914 gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo);
915 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
916 perf_pmasys_outbaseupper_ptr_f(virt_addr_hi));
917 gk20a_writel(g, perf_pmasys_outsize_r(), virt_size);
919 /* this field is aligned to 4K */
920 inst_pa_page = gk20a_mem_phys(&g->mm.hwpm.inst_block) >> 12;
922 /* A write to MEM_BLOCK triggers the block bind operation. MEM_BLOCK
923 * should be written last */
924 gk20a_writel(g, perf_pmasys_mem_block_r(),
925 perf_pmasys_mem_block_base_f(inst_pa_page) |
926 perf_pmasys_mem_block_valid_true_f() |
927 perf_pmasys_mem_block_target_lfb_f());
932 static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
933 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args)
935 struct gk20a *g = dbg_s->g;
940 gk20a_writel(g, perf_pmasys_outbase_r(), 0);
941 gk20a_writel(g, perf_pmasys_outbaseupper_r(),
942 perf_pmasys_outbaseupper_ptr_f(0));
943 gk20a_writel(g, perf_pmasys_outsize_r(), 0);
945 gk20a_writel(g, perf_pmasys_mem_block_r(),
946 perf_pmasys_mem_block_base_f(0) |
947 perf_pmasys_mem_block_valid_false_f() |
948 perf_pmasys_mem_block_target_f(0));
950 gk20a_vm_unmap_buffer(&g->mm.pmu.vm, args->offset);