]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a: Local debug functions
authorArto Merilainen <amerilainen@nvidia.com>
Fri, 7 Mar 2014 11:56:52 +0000 (13:56 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Wed, 12 Mar 2014 11:07:05 +0000 (04:07 -0700)
This patch modifies the code to use gk20a internal debug functions
instead of nvhost debug functions.

Bug 1468086

Change-Id: Ib606ebad6507c912de28a2d6ee391dbc8669b221
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/376652
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
21 files changed:
drivers/video/tegra/host/gk20a/as_gk20a.c
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/channel_sync_gk20a.c
drivers/video/tegra/host/gk20a/clk_gk20a.c
drivers/video/tegra/host/gk20a/ctrl_gk20a.c
drivers/video/tegra/host/gk20a/dbg_gpu_gk20a.c
drivers/video/tegra/host/gk20a/debug_gk20a.c
drivers/video/tegra/host/gk20a/fb_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gk20a.c
drivers/video/tegra/host/gk20a/gk20a.h
drivers/video/tegra/host/gk20a/gr_ctx_gk20a.c
drivers/video/tegra/host/gk20a/gr_ctx_gk20a_sim.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/hal.c
drivers/video/tegra/host/gk20a/ltc_common.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/pmu_gk20a.c
drivers/video/tegra/host/gk20a/priv_ring_gk20a.c
drivers/video/tegra/host/gk20a/regops_gk20a.c
drivers/video/tegra/host/gk20a/therm_gk20a.c

index e65c38a0887d027586a4a4a86f9b63f97066c16d..65c26938ea80d5c05acc6df4d5a2e0e02b12bdb9 100644 (file)
 /* dumb allocator... */
 static int generate_as_share_id(struct gk20a_as *as)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return ++as->last_share_id;
 }
 /* still dumb */
 static void release_as_share_id(struct gk20a_as *as, int id)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return;
 }
 
@@ -43,7 +43,7 @@ static int gk20a_as_alloc_share(struct gk20a_as *as,
        struct gk20a_as_share *as_share;
        int err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        *out = 0;
        as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
@@ -75,7 +75,7 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
 {
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (atomic_dec_return(&as_share->ref_cnt) > 0)
                return 0;
@@ -93,7 +93,7 @@ static int gk20a_as_ioctl_bind_channel(
        int err = 0;
        struct channel_gk20a *ch;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ch = gk20a_get_channel_from_file(args->channel_fd);
        if (!ch || gk20a_channel_as_bound(ch))
@@ -115,7 +115,7 @@ static int gk20a_as_ioctl_alloc_space(
                struct gk20a_as_share *as_share,
                struct nvhost_as_alloc_space_args *args)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return gk20a_vm_alloc_space(as_share, args);
 }
 
@@ -123,7 +123,7 @@ static int gk20a_as_ioctl_free_space(
                struct gk20a_as_share *as_share,
                struct nvhost_as_free_space_args *args)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return gk20a_vm_free_space(as_share, args);
 }
 
@@ -133,7 +133,7 @@ static int gk20a_as_ioctl_map_buffer_ex(
 {
        int i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* ensure that padding is not set. this is required for ensuring that
         * we can safely use these fields later */
@@ -150,7 +150,7 @@ static int gk20a_as_ioctl_map_buffer(
                struct gk20a_as_share *as_share,
                struct nvhost_as_map_buffer_args *args)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return gk20a_vm_map_buffer(as_share, args->nvmap_handle,
                                   &args->o_a.align,
                                   args->flags, NV_KIND_DEFAULT);
@@ -161,7 +161,7 @@ static int gk20a_as_ioctl_unmap_buffer(
                struct gk20a_as_share *as_share,
                struct nvhost_as_unmap_buffer_args *args)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return gk20a_vm_unmap_buffer(as_share, args->offset);
 }
 
@@ -171,19 +171,19 @@ int gk20a_as_dev_open(struct inode *inode, struct file *filp)
        struct gk20a *g;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        g = container_of(inode->i_cdev, struct gk20a, as.cdev);
 
        err = gk20a_get_client(g);
        if (err) {
-               nvhost_dbg_fn("fail to get channel!");
+               gk20a_dbg_fn("fail to get channel!");
                return err;
        }
 
        err = gk20a_as_alloc_share(&g->as, &as_share);
        if (err) {
-               nvhost_dbg_fn("failed to alloc share");
+               gk20a_dbg_fn("failed to alloc share");
                gk20a_put_client(g);
                return err;
        }
@@ -198,7 +198,7 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
        int ret;
        struct gk20a *g = gk20a_from_as(as_share->as);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ret = gk20a_as_release_share(as_share);
 
index 630ddff29692722f67b078c05405a0f1073f36bc..2a6f0b421db4bfa8d2eea1c4731e2bdc5da40918 100644 (file)
@@ -108,7 +108,7 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
        u32 addr_hi;
        void *inst_ptr;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        inst_ptr = c->inst_block.cpuva;
        if (!inst_ptr)
@@ -118,21 +118,21 @@ int channel_gk20a_commit_va(struct channel_gk20a *c)
        addr_lo = u64_lo32(addr >> 12);
        addr_hi = u64_hi32(addr);
 
-       nvhost_dbg_info("pde pa=0x%llx addr_lo=0x%x addr_hi=0x%x",
+       gk20a_dbg_info("pde pa=0x%llx addr_lo=0x%x addr_hi=0x%x",
                   (u64)addr, addr_lo, addr_hi);
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
                ram_in_page_dir_base_target_vid_mem_f() |
                ram_in_page_dir_base_vol_true_f() |
                ram_in_page_dir_base_lo_f(addr_lo));
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
                ram_in_page_dir_base_hi_f(addr_hi));
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
                 u64_lo32(c->vm->va_limit) | 0xFFF);
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
                ram_in_adr_limit_hi_f(u64_hi32(c->vm->va_limit)));
 
        gk20a_mm_l2_invalidate(c->g);
@@ -146,7 +146,7 @@ static int channel_gk20a_commit_userd(struct channel_gk20a *c)
        u32 addr_hi;
        void *inst_ptr;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        inst_ptr = c->inst_block.cpuva;
        if (!inst_ptr)
@@ -155,14 +155,14 @@ static int channel_gk20a_commit_userd(struct channel_gk20a *c)
        addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
        addr_hi = u64_hi32(c->userd_iova);
 
-       nvhost_dbg_info("channel %d : set ramfc userd 0x%16llx",
+       gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
                c->hw_chid, (u64)c->userd_iova);
 
-       mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
                 pbdma_userd_target_vid_mem_f() |
                 pbdma_userd_addr_f(addr_lo));
 
-       mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_hi_w(),
                 pbdma_userd_target_vid_mem_f() |
                 pbdma_userd_hi_addr_f(addr_hi));
 
@@ -207,7 +207,7 @@ static int channel_gk20a_set_schedule_params(struct channel_gk20a *c,
        }
 
        /* set new timeslice */
-       mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
                value | (shift << 12) |
                fifo_eng_timeslice_enable_true_f());
 
@@ -226,7 +226,7 @@ static int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
 {
        void *inst_ptr;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        inst_ptr = c->inst_block.cpuva;
        if (!inst_ptr)
@@ -234,23 +234,23 @@ static int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
 
        memset(inst_ptr, 0, ram_fc_size_val_v());
 
-       mem_wr32(inst_ptr, ram_fc_gp_base_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_w(),
                pbdma_gp_base_offset_f(
                u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
 
-       mem_wr32(inst_ptr, ram_fc_gp_base_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_hi_w(),
                pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
                pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
 
-       mem_wr32(inst_ptr, ram_fc_signature_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_signature_w(),
                 pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f());
 
-       mem_wr32(inst_ptr, ram_fc_formats_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_formats_w(),
                pbdma_formats_gp_fermi0_f() |
                pbdma_formats_pb_fermi1_f() |
                pbdma_formats_mp_fermi0_f());
 
-       mem_wr32(inst_ptr, ram_fc_pb_header_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_pb_header_w(),
                pbdma_pb_header_priv_user_f() |
                pbdma_pb_header_method_zero_f() |
                pbdma_pb_header_subchannel_zero_f() |
@@ -258,34 +258,34 @@ static int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
                pbdma_pb_header_first_true_f() |
                pbdma_pb_header_type_inc_f());
 
-       mem_wr32(inst_ptr, ram_fc_subdevice_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_subdevice_w(),
                pbdma_subdevice_id_f(1) |
                pbdma_subdevice_status_active_f() |
                pbdma_subdevice_channel_dma_enable_f());
 
-       mem_wr32(inst_ptr, ram_fc_target_w(), pbdma_target_engine_sw_f());
+       gk20a_mem_wr32(inst_ptr, ram_fc_target_w(), pbdma_target_engine_sw_f());
 
-       mem_wr32(inst_ptr, ram_fc_acquire_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_acquire_w(),
                pbdma_acquire_retry_man_2_f() |
                pbdma_acquire_retry_exp_2_f() |
                pbdma_acquire_timeout_exp_max_f() |
                pbdma_acquire_timeout_man_max_f() |
                pbdma_acquire_timeout_en_disable_f());
 
-       mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
                fifo_eng_timeslice_timeout_128_f() |
                fifo_eng_timeslice_timescale_3_f() |
                fifo_eng_timeslice_enable_true_f());
 
-       mem_wr32(inst_ptr, ram_fc_pb_timeslice_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_pb_timeslice_w(),
                fifo_pb_timeslice_timeout_16_f() |
                fifo_pb_timeslice_timescale_0_f() |
                fifo_pb_timeslice_enable_true_f());
 
-       mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
+       gk20a_mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
 
        /* TBD: alwasy priv mode? */
-       mem_wr32(inst_ptr, ram_fc_hce_ctrl_w(),
+       gk20a_mem_wr32(inst_ptr, ram_fc_hce_ctrl_w(),
                 pbdma_hce_ctrl_hce_priv_mode_yes_f());
 
        gk20a_mm_l2_invalidate(c->g);
@@ -297,18 +297,18 @@ static int channel_gk20a_setup_userd(struct channel_gk20a *c)
 {
        BUG_ON(!c->userd_cpu_va);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
-       mem_wr32(c->userd_cpu_va, ram_userd_put_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_get_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_ref_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_put_hi_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_ref_threshold_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_hi_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_get_hi_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0);
-       mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_put_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_get_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_ref_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_put_hi_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_ref_threshold_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_hi_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_get_hi_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0);
+       gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0);
 
        gk20a_mm_l2_invalidate(c->g);
 
@@ -325,7 +325,7 @@ static void channel_gk20a_bind(struct channel_gk20a *ch_gk20a)
        u32 inst_ptr = ch_gk20a->inst_block.cpu_pa
                >> ram_in_base_shift_v();
 
-       nvhost_dbg_info("bind channel %d inst ptr 0x%08x",
+       gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
                ch_gk20a->hw_chid, inst_ptr);
 
        ch_gk20a->bound = true;
@@ -350,7 +350,7 @@ static void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
 {
        struct gk20a *g = ch_gk20a->g;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (ch_gk20a->bound)
                gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid),
@@ -367,7 +367,7 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
        int err = 0;
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ch->inst_block.size = ram_in_alloc_size_v();
        ch->inst_block.cpuva = dma_alloc_coherent(d,
@@ -375,7 +375,7 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
                                        &iova,
                                        GFP_KERNEL);
        if (!ch->inst_block.cpuva) {
-               nvhost_err(d, "%s: memory allocation failed\n", __func__);
+               gk20a_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -384,19 +384,19 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
        ch->inst_block.cpu_pa = gk20a_get_phys_from_iova(d,
                                                        ch->inst_block.iova);
        if (!ch->inst_block.cpu_pa) {
-               nvhost_err(d, "%s: failed to get physical address\n", __func__);
+               gk20a_err(d, "%s: failed to get physical address\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
 
-       nvhost_dbg_info("channel %d inst block physical addr: 0x%16llx",
+       gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
                ch->hw_chid, (u64)ch->inst_block.cpu_pa);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
 clean_up:
-       nvhost_err(d, "fail");
+       gk20a_err(d, "fail");
        channel_gk20a_free_inst(g, ch);
        return err;
 }
@@ -450,7 +450,7 @@ static int gk20a_wait_channel_idle(struct channel_gk20a *ch)
                        || !tegra_platform_is_silicon());
 
        if (!channel_idle)
-               nvhost_err(dev_from_gk20a(ch->g), "channel jobs not freed");
+               gk20a_err(dev_from_gk20a(ch->g), "channel jobs not freed");
 
        return 0;
 }
@@ -581,7 +581,7 @@ void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error)
                                (u32)(nsec >> 32);
                ch->error_notifier->info32 = error;
                ch->error_notifier->status = 0xffff;
-               nvhost_err(dev_from_gk20a(ch->g),
+               gk20a_err(dev_from_gk20a(ch->g),
                                "error notifier set to %d\n", error);
        }
 }
@@ -607,12 +607,12 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
        unsigned long timeout = gk20a_get_gr_idle_timeout(g);
        struct dbg_session_gk20a *dbg_s;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* if engine reset was deferred, perform it now */
        mutex_lock(&f->deferred_reset_mutex);
        if (g->fifo.deferred_reset_pending) {
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "engine reset was"
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
                           " deferred, running now");
                fifo_gk20a_finish_mmu_fault_handling(g, g->fifo.mmu_fault_engines);
                g->fifo.mmu_fault_engines = 0;
@@ -626,7 +626,7 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
        if (!gk20a_channel_as_bound(ch))
                goto unbind;
 
-       nvhost_dbg_info("freeing bound channel context, timeout=%ld",
+       gk20a_dbg_info("freeing bound channel context, timeout=%ld",
                        timeout);
 
        gk20a_disable_channel(ch, finish && !ch->has_timedout, timeout);
@@ -714,7 +714,7 @@ static struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
        ch = acquire_unused_channel(f);
        if (ch == NULL) {
                /* TBD: we want to make this virtualizable */
-               nvhost_err(dev_from_gk20a(g), "out of hw chids");
+               gk20a_err(dev_from_gk20a(g), "out of hw chids");
                return 0;
        }
 
@@ -722,7 +722,7 @@ static struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g)
 
        if (channel_gk20a_alloc_inst(g, ch)) {
                ch->in_use = false;
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "failed to open gk20a channel, out of inst mem");
 
                return 0;
@@ -757,7 +757,7 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
 
        err = gk20a_get_client(g);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to get client ref");
                return err;
        }
@@ -765,14 +765,14 @@ static int __gk20a_channel_open(struct gk20a *g, struct file *filp)
        err = gk20a_channel_busy(g->dev);
        if (err) {
                gk20a_put_client(g);
-               nvhost_err(dev_from_gk20a(g), "failed to power on, %d", err);
+               gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
                return err;
        }
        ch = gk20a_open_new_channel(g);
        gk20a_channel_idle(g->dev);
        if (!ch) {
                gk20a_put_client(g);
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to get f");
                return -ENOMEM;
        }
@@ -817,7 +817,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
                                        &iova,
                                        GFP_KERNEL);
        if (!q->mem.base_cpuva) {
-               nvhost_err(d, "%s: memory allocation failed\n", __func__);
+               gk20a_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -828,7 +828,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        err = gk20a_get_sgtable(d, &sgt,
                        q->mem.base_cpuva, q->mem.base_iova, size);
        if (err) {
-               nvhost_err(d, "%s: failed to create sg table\n", __func__);
+               gk20a_err(d, "%s: failed to create sg table\n", __func__);
                goto clean_up;
        }
 
@@ -839,7 +839,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
                                        0, /* flags */
                                        gk20a_mem_flag_none);
        if (!q->base_gpuva) {
-               nvhost_err(d, "ch %d : failed to map gpu va"
+               gk20a_err(d, "ch %d : failed to map gpu va"
                           "for priv cmd buffer", c->hw_chid);
                err = -ENOMEM;
                goto clean_up_sgt;
@@ -854,7 +854,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        for (i = 0; i < q->size / 4; i++) {
                e = kzalloc(sizeof(struct priv_cmd_entry), GFP_KERNEL);
                if (!e) {
-                       nvhost_err(d, "ch %d: fail to pre-alloc cmd entry",
+                       gk20a_err(d, "ch %d: fail to pre-alloc cmd entry",
                                c->hw_chid);
                        err = -ENOMEM;
                        goto clean_up_sgt;
@@ -923,7 +923,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
        u32 size = orig_size;
        bool no_retry = false;
 
-       nvhost_dbg_fn("size %d", orig_size);
+       gk20a_dbg_fn("size %d", orig_size);
 
        *entry = NULL;
 
@@ -932,7 +932,7 @@ int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 orig_size,
        if (q->put + size > q->size)
                size = orig_size + (q->size - q->put);
 
-       nvhost_dbg_info("ch %d: priv cmd queue get:put %d:%d",
+       gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d",
                        c->hw_chid, q->get, q->put);
 
 TRY_AGAIN:
@@ -949,12 +949,12 @@ TRY_AGAIN:
 
        if (unlikely(list_empty(&q->free))) {
 
-               nvhost_dbg_info("ch %d: run out of pre-alloc entries",
+               gk20a_dbg_info("ch %d: run out of pre-alloc entries",
                        c->hw_chid);
 
                e = kzalloc(sizeof(struct priv_cmd_entry), GFP_KERNEL);
                if (!e) {
-                       nvhost_err(dev_from_gk20a(c->g),
+                       gk20a_err(dev_from_gk20a(c->g),
                                "ch %d: fail to allocate priv cmd entry",
                                c->hw_chid);
                        return -ENOMEM;
@@ -990,7 +990,7 @@ TRY_AGAIN:
 
        *entry = e;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -1024,12 +1024,12 @@ static void recycle_priv_cmdbuf(struct channel_gk20a *c)
        struct list_head *head = &q->head;
        bool wrap_around, found = false;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* Find the most recent free entry. Free it and everything before it */
        list_for_each_entry(e, head, list) {
 
-               nvhost_dbg_info("ch %d: cmd entry get:put:wrap %d:%d:%d "
+               gk20a_dbg_info("ch %d: cmd entry get:put:wrap %d:%d:%d "
                        "curr get:put:wrap %d:%d:%d",
                        c->hw_chid, e->gp_get, e->gp_put, e->gp_wrap,
                        c->gpfifo.get, c->gpfifo.put, c->gpfifo.wrap);
@@ -1055,7 +1055,7 @@ static void recycle_priv_cmdbuf(struct channel_gk20a *c)
        if (found)
                q->get = (e->ptr - q->mem.base_cpuva) + e->size;
        else {
-               nvhost_dbg_info("no free entry recycled");
+               gk20a_dbg_info("no free entry recycled");
                return;
        }
 
@@ -1063,7 +1063,7 @@ static void recycle_priv_cmdbuf(struct channel_gk20a *c)
                free_priv_cmdbuf(c, e);
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 
@@ -1087,7 +1087,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
 
        /* an address space needs to have been bound at this point.   */
        if (!gk20a_channel_as_bound(c)) {
-               nvhost_err(d,
+               gk20a_err(d,
                            "not bound to an address space at time of gpfifo"
                            " allocation.  Attempting to create and bind to"
                            " one...");
@@ -1102,7 +1102,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
        c->ramfc.size = ram_in_ramfc_s() / 8;
 
        if (c->gpfifo.cpu_va) {
-               nvhost_err(d, "channel %d :"
+               gk20a_err(d, "channel %d :"
                           "gpfifo already allocated", c->hw_chid);
                return -EEXIST;
        }
@@ -1113,7 +1113,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
                                                &iova,
                                                GFP_KERNEL);
        if (!c->gpfifo.cpu_va) {
-               nvhost_err(d, "%s: memory allocation failed\n", __func__);
+               gk20a_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -1126,7 +1126,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
        err = gk20a_get_sgtable(d, &sgt,
                        c->gpfifo.cpu_va, c->gpfifo.iova, c->gpfifo.size);
        if (err) {
-               nvhost_err(d, "%s: failed to allocate sg table\n", __func__);
+               gk20a_err(d, "%s: failed to allocate sg table\n", __func__);
                goto clean_up;
        }
 
@@ -1136,13 +1136,13 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
                                        0, /* flags */
                                        gk20a_mem_flag_none);
        if (!c->gpfifo.gpu_va) {
-               nvhost_err(d, "channel %d : failed to map"
+               gk20a_err(d, "channel %d : failed to map"
                           " gpu_va for gpfifo", c->hw_chid);
                err = -ENOMEM;
                goto clean_up_sgt;
        }
 
-       nvhost_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
+       gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
                c->hw_chid, c->gpfifo.gpu_va, c->gpfifo.entry_num);
 
        channel_gk20a_setup_ramfc(c, c->gpfifo.gpu_va, c->gpfifo.entry_num);
@@ -1164,7 +1164,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
 
        gk20a_free_sgtable(&sgt);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
 clean_up_unmap:
@@ -1178,7 +1178,7 @@ clean_up:
        c->gpfifo.cpu_va = NULL;
        c->gpfifo.iova = 0;
        memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
-       nvhost_err(d, "fail");
+       gk20a_err(d, "fail");
        return err;
 }
 
@@ -1203,7 +1203,7 @@ static inline bool check_gp_put(struct gk20a *g,
               c->userd_gpu_va + 4 * ram_userd_gp_put_w());
        if (c->gpfifo.put != put) {
                /*TBD: BUG_ON/teardown on this*/
-               nvhost_err(dev_from_gk20a(g), "gp_put changed unexpectedly "
+               gk20a_err(dev_from_gk20a(g), "gp_put changed unexpectedly "
                           "since last update");
                c->gpfifo.put = put;
                return false; /* surprise! */
@@ -1272,7 +1272,7 @@ static int gk20a_channel_submit_wfi(struct channel_gk20a *c)
        update_gp_get(g, c);
        free_count = gp_free_count(c);
        if (unlikely(!free_count)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "not enough gpfifo space");
                return -EAGAIN;
        }
@@ -1296,7 +1296,7 @@ static int gk20a_channel_submit_wfi(struct channel_gk20a *c)
                c->userd_gpu_va + 4 * ram_userd_gp_put_w(),
                c->gpfifo.put);
 
-       nvhost_dbg_info("post-submit put %d, get %d, size %d",
+       gk20a_dbg_info("post-submit put %d, get %d, size %d",
                c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
 
        return 0;
@@ -1448,7 +1448,7 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
                g->ops.ltc.sync_debugfs(g);
 #endif
 
-       nvhost_dbg_info("channel %d", c->hw_chid);
+       gk20a_dbg_info("channel %d", c->hw_chid);
 
        /* gk20a_channel_update releases this ref. */
        gk20a_channel_busy(g->dev);
@@ -1461,7 +1461,7 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
        check_gp_put(g, c);
        update_gp_get(g, c);
 
-       nvhost_dbg_info("pre-submit put %d, get %d, size %d",
+       gk20a_dbg_info("pre-submit put %d, get %d, size %d",
                c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
 
        /* Invalidate tlb if it's dirty...                                   */
@@ -1485,7 +1485,7 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
        }
 
        if (err) {
-               nvhost_err(d, "not enough gpfifo space");
+               gk20a_err(d, "not enough gpfifo space");
                err = -EAGAIN;
                goto clean_up;
        }
@@ -1588,14 +1588,14 @@ static int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
                c->userd_gpu_va + 4 * ram_userd_gp_put_w(),
                c->gpfifo.put);
 
-       nvhost_dbg_info("post-submit put %d, get %d, size %d",
+       gk20a_dbg_info("post-submit put %d, get %d, size %d",
                c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return err;
 
 clean_up:
-       nvhost_err(d, "fail");
+       gk20a_err(d, "fail");
        free_priv_cmdbuf(c, wait_cmd);
        free_priv_cmdbuf(c, incr_cmd);
        gk20a_channel_idle(g->dev);
@@ -1638,7 +1638,7 @@ int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout)
                return -ETIMEDOUT;
 
        if (!(ch->last_submit_fence.valid && ch->last_submit_fence.wfi)) {
-               nvhost_dbg_fn("issuing wfi, incr to finish the channel");
+               gk20a_dbg_fn("issuing wfi, incr to finish the channel");
                err = gk20a_channel_submit_wfi(ch);
        }
        if (err)
@@ -1646,7 +1646,7 @@ int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout)
 
        BUG_ON(!(ch->last_submit_fence.valid && ch->last_submit_fence.wfi));
 
-       nvhost_dbg_fn("waiting for channel to finish thresh:%d",
+       gk20a_dbg_fn("waiting for channel to finish thresh:%d",
                      ch->last_submit_fence.thresh);
 
        err = ch->sync->wait_cpu(ch->sync, &ch->last_submit_fence, timeout);
@@ -1676,14 +1676,14 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
 
        dmabuf = dma_buf_get(id);
        if (IS_ERR(dmabuf)) {
-               nvhost_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
+               gk20a_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
                           id);
                return -EINVAL;
        }
 
        data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
        if (!data) {
-               nvhost_err(&pdev->dev, "failed to map notifier memory");
+               gk20a_err(&pdev->dev, "failed to map notifier memory");
                ret = -EINVAL;
                goto cleanup_put;
        }
@@ -1719,7 +1719,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
        unsigned long timeout;
        int remain, ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (ch->has_timedout)
                return -ETIMEDOUT;
@@ -1736,14 +1736,14 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
 
                dmabuf = dma_buf_get(id);
                if (IS_ERR(dmabuf)) {
-                       nvhost_err(d, "invalid notifier nvmap handle 0x%lx",
+                       gk20a_err(d, "invalid notifier nvmap handle 0x%lx",
                                   id);
                        return -EINVAL;
                }
 
                notif = dma_buf_vmap(dmabuf);
                if (!notif) {
-                       nvhost_err(d, "failed to map notifier memory");
+                       gk20a_err(d, "failed to map notifier memory");
                        return -ENOMEM;
                }
 
@@ -1826,7 +1826,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
        struct gk20a *g = ch->g;
        struct gr_gk20a *gr = &g->gr;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        return gr_gk20a_bind_ctxsw_zcull(g, gr, ch,
                                args->gpu_va, args->mode);
@@ -1842,7 +1842,7 @@ int gk20a_channel_suspend(struct gk20a *g)
        struct device *d = dev_from_gk20a(g);
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* idle the engine by submitting WFI on non-KEPLER_C channel */
        for (chid = 0; chid < f->num_channels; chid++) {
@@ -1850,7 +1850,7 @@ int gk20a_channel_suspend(struct gk20a *g)
                if (c->in_use && c->obj_class != KEPLER_C) {
                        err = gk20a_channel_submit_wfi(c);
                        if (err) {
-                               nvhost_err(d, "cannot idle channel %d\n",
+                               gk20a_err(d, "cannot idle channel %d\n",
                                                chid);
                                return err;
                        }
@@ -1864,7 +1864,7 @@ int gk20a_channel_suspend(struct gk20a *g)
        for (chid = 0; chid < f->num_channels; chid++) {
                if (f->channel[chid].in_use) {
 
-                       nvhost_dbg_info("suspend channel %d", chid);
+                       gk20a_dbg_info("suspend channel %d", chid);
                        /* disable channel */
                        gk20a_writel(g, ccsr_channel_r(chid),
                                gk20a_readl(g, ccsr_channel_r(chid)) |
@@ -1885,7 +1885,7 @@ int gk20a_channel_suspend(struct gk20a *g)
                }
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -1897,11 +1897,11 @@ int gk20a_channel_resume(struct gk20a *g)
        u32 chid;
        bool channels_in_use = false;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        for (chid = 0; chid < f->num_channels; chid++) {
                if (f->channel[chid].in_use) {
-                       nvhost_dbg_info("resume channel %d", chid);
+                       gk20a_dbg_info("resume channel %d", chid);
                        channel_gk20a_bind(&f->channel[chid]);
                        channels_in_use = true;
                }
@@ -1910,7 +1910,7 @@ int gk20a_channel_resume(struct gk20a *g)
        if (channels_in_use)
                gk20a_fifo_update_runlist(g, 0, ~0, true, true);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -1919,7 +1919,7 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g)
        struct fifo_gk20a *f = &g->fifo;
        u32 chid;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        for (chid = 0; chid < f->num_channels; chid++) {
                struct channel_gk20a *c = g->fifo.channel+chid;
@@ -1936,7 +1936,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
        u32 size;
        int ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (ch->has_timedout)
                return -ETIMEDOUT;
@@ -2073,7 +2073,7 @@ long gk20a_channel_ioctl(struct file *filp,
        {
                u32 timeout =
                        (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
-               nvhost_dbg(dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
+               gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
                           timeout, ch->hw_chid);
                ch->timeout_ms_max = timeout;
                break;
@@ -2085,7 +2085,7 @@ long gk20a_channel_ioctl(struct file *filp,
                bool timeout_debug_dump = !((u32)
                        ((struct nvhost_set_timeout_ex_args *)buf)->flags &
                        (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
-               nvhost_dbg(dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
+               gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
                           timeout, ch->hw_chid);
                ch->timeout_ms_max = timeout;
                ch->timeout_debug_dump = timeout_debug_dump;
index a9a383066df8e0264ef5d0e684a1999c9068a627..ac623315982e4accd4808965853851fe95f58f9a 100644 (file)
@@ -89,7 +89,7 @@ int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
 
        gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
        if (wait_cmd == NULL) {
-               nvhost_err(dev_from_gk20a(sp->c->g),
+               gk20a_err(dev_from_gk20a(sp->c->g),
                                "not enough priv cmd buffer space");
                return -EAGAIN;
        }
@@ -120,7 +120,7 @@ int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
        num_wait_cmds = nvhost_sync_num_pts(sync_fence);
        gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
        if (wait_cmd == NULL) {
-               nvhost_err(dev_from_gk20a(c->g),
+               gk20a_err(dev_from_gk20a(c->g),
                                "not enough priv cmd buffer space");
                sync_fence_put(sync_fence);
                return -EAGAIN;
@@ -186,7 +186,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
        if (incr_cmd == NULL) {
                gk20a_channel_idle(c->g->dev);
                kfree(completed_waiter);
-               nvhost_err(dev_from_gk20a(c->g),
+               gk20a_err(dev_from_gk20a(c->g),
                                "not enough priv cmd buffer space");
                return -EAGAIN;
        }
index 2e45b40b9eb6e6d77cd4a51c97beda4977cb5774..acc7f8f36d03438a360053c92483a7ae1d93afdb 100644 (file)
@@ -31,8 +31,8 @@
 #include "hw_trim_gk20a.h"
 #include "hw_timer_gk20a.h"
 
-#define nvhost_dbg_clk(fmt, arg...) \
-       nvhost_dbg(dbg_clk, fmt, ##arg)
+#define gk20a_dbg_clk(fmt, arg...) \
+       gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
 
 /* from vbios PLL info table */
 struct pll_parms gpc_pll_params = {
@@ -81,7 +81,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
 
        BUG_ON(target_freq == NULL);
 
-       nvhost_dbg_fn("request target freq %d MHz", *target_freq);
+       gk20a_dbg_fn("request target freq %d MHz", *target_freq);
 
        ref_clk_f = pll->clk_in;
        target_clk_f = *target_freq;
@@ -116,7 +116,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
                        break;
                }
        }
-       nvhost_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
+       gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
                        low_PL, pl_to_div[low_PL], high_PL, pl_to_div[high_PL]);
 
        for (pl = low_PL; pl <= high_PL; pl++) {
@@ -161,7 +161,7 @@ static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
                                                        goto found_match;
                                                }
 
-                                               nvhost_dbg_info("delta %d @ M %d, N %d, PL %d",
+                                               gk20a_dbg_info("delta %d @ M %d, N %d, PL %d",
                                                        delta, m, n, pl);
                                        }
                                }
@@ -173,7 +173,7 @@ found_match:
        BUG_ON(best_delta == ~0);
 
        if (best_fit && best_delta != 0)
-               nvhost_dbg_clk("no best match for target @ %dMHz on gpc_pll",
+               gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll",
                        target_clk_f);
 
        pll->M = best_M;
@@ -185,10 +185,10 @@ found_match:
 
        *target_freq = pll->freq;
 
-       nvhost_dbg_clk("actual target freq %d MHz, M %d, N %d, PL %d(div%d)",
+       gk20a_dbg_clk("actual target freq %d MHz, M %d, N %d, PL %d(div%d)",
                *target_freq, pll->M, pll->N, pll->PL, pl_to_div[pll->PL]);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -260,7 +260,7 @@ static int clk_slide_gpc_pll(struct gk20a *g, u32 n)
        gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
 
        if (ramp_timeout <= 0) {
-               nvhost_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
+               gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
                return -ETIMEDOUT;
        }
        return 0;
@@ -273,7 +273,7 @@ static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
        u32 m, n, pl;
        u32 nlo;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!tegra_platform_is_silicon())
                return 0;
@@ -418,7 +418,7 @@ static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
 
 static int gk20a_init_clk_reset_enable_hw(struct gk20a *g)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        return 0;
 }
 
@@ -429,7 +429,7 @@ struct clk *gk20a_clk_get(struct gk20a *g)
 
                clk = clk_get_sys("tegra_gk20a", "gpu");
                if (IS_ERR(clk)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to get tegra gpu clk tegra_gk20a/gpu");
                        return NULL;
                }
@@ -448,10 +448,10 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
        struct clk *ref;
        unsigned long ref_rate;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (clk->sw_ready) {
-               nvhost_dbg_fn("skip init");
+               gk20a_dbg_fn("skip init");
                return 0;
        }
 
@@ -460,7 +460,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
 
        ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
        if (IS_ERR(ref)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to get GPCPLL reference clock");
                return -EINVAL;
        }
@@ -512,7 +512,7 @@ static int gk20a_init_clk_setup_sw(struct gk20a *g)
 
        clk->sw_ready = true;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -520,7 +520,7 @@ static int gk20a_init_clk_setup_hw(struct gk20a *g)
 {
        u32 data;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
        data = set_field(data,
@@ -548,7 +548,7 @@ static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
                /* gpc_pll.freq is changed to new value here */
                if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
                                   &freq, true)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "failed to set pll target for %d", freq);
                        return -EINVAL;
                }
@@ -561,7 +561,7 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq)
        struct clk_gk20a *clk = &g->clk;
        int err = 0;
 
-       nvhost_dbg_fn("curr freq: %dMHz, target freq %dMHz", old_freq, freq);
+       gk20a_dbg_fn("curr freq: %dMHz, target freq %dMHz", old_freq, freq);
 
        if ((freq == old_freq) && clk->gpc_pll.enabled)
                return 0;
@@ -576,7 +576,7 @@ static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq)
        /* Just report error but not restore PLL since dvfs could already change
            voltage even when it returns error. */
        if (err)
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to set pll to %d", freq);
        return err;
 }
@@ -668,7 +668,7 @@ int gk20a_init_clk_support(struct gk20a *g)
        struct clk_gk20a *clk = &g->clk;
        u32 err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        clk->g = g;
 
index 66ff839157cc29d314ba56966cb09664a9c8b05b..1d959b739291e292110353b2b122e6d8b991c489 100644 (file)
@@ -32,7 +32,7 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
        int err;
        struct gk20a *g;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        g = container_of(inode->i_cdev,
                         struct gk20a, ctrl.cdev);
@@ -41,7 +41,7 @@ int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
 
        err = gk20a_get_client(g);
        if (err) {
-               nvhost_dbg_fn("fail to get channel!");
+               gk20a_dbg_fn("fail to get channel!");
                return err;
        }
 
@@ -52,7 +52,7 @@ int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
 {
        struct platform_device *dev = filp->private_data;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_put_client(get_gk20a(dev));
        return 0;
@@ -97,7 +97,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
        struct zbc_query_params *zbc_tbl;
        int i, err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if ((_IOC_TYPE(cmd) != NVHOST_GPU_IOCTL_MAGIC) ||
                (_IOC_NR(cmd) == 0) ||
@@ -232,7 +232,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
                break;
 
        default:
-               nvhost_err(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
+               gk20a_err(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
                err = -ENOTTY;
                break;
        }
index 8ccea8e2fae9afab88f10f1a8bc8203cab680b51..4e556ff679379348a0a53318be93425da26b16ca 100644 (file)
@@ -48,7 +48,7 @@ static int alloc_session(struct dbg_session_gk20a **_dbg_s)
        struct dbg_session_gk20a *dbg_s;
        *_dbg_s = NULL;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        dbg_s = kzalloc(sizeof(*dbg_s), GFP_KERNEL);
        if (!dbg_s)
@@ -79,7 +79,7 @@ int gk20a_dbg_gpu_do_dev_open(struct inode *inode, struct file *filp, bool is_pr
        pdev = g->dev;
        dev  = &pdev->dev;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
 
        err  = alloc_session(&dbg_session);
        if (err)
@@ -121,7 +121,7 @@ static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s)
 
 static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        gk20a_dbg_session_mutex_lock(dbg_s);
 
@@ -133,7 +133,7 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
 
 static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        gk20a_dbg_session_mutex_lock(dbg_s);
 
@@ -145,7 +145,7 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
 
 static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        gk20a_dbg_session_mutex_lock(dbg_s);
 
@@ -161,10 +161,10 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
 {
        int ret = 0;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
 
        if (!dbg_s->ch) {
-               nvhost_err(dev_from_gk20a(dbg_s->g),
+               gk20a_err(dev_from_gk20a(dbg_s->g),
                           "no channel bound to dbg session\n");
                return -EINVAL;
        }
@@ -183,7 +183,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
                break;
 
        default:
-               nvhost_err(dev_from_gk20a(dbg_s->g),
+               gk20a_err(dev_from_gk20a(dbg_s->g),
                           "unrecognized dbg gpu events ctrl cmd: 0x%x",
                           args->cmd);
                ret = -EINVAL;
@@ -198,7 +198,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
        unsigned int mask = 0;
        struct dbg_session_gk20a *dbg_s = filep->private_data;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
 
@@ -206,9 +206,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
 
        if (dbg_s->dbg_events.events_enabled &&
                        dbg_s->dbg_events.num_pending_events > 0) {
-               nvhost_dbg(dbg_gpu_dbg, "found pending event on session id %d",
+               gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
                                dbg_s->id);
-               nvhost_dbg(dbg_gpu_dbg, "%d events pending",
+               gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
                                dbg_s->dbg_events.num_pending_events);
                mask = (POLLPRI | POLLIN);
        }
@@ -220,13 +220,13 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
 
 int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
        return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
 }
 
 int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
        return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
 }
 
@@ -234,16 +234,16 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
 {
        struct dbg_session_gk20a *dbg_s;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        /* guard against the session list being modified */
        mutex_lock(&ch->dbg_s_lock);
 
        list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) {
                if (dbg_s->dbg_events.events_enabled) {
-                       nvhost_dbg(dbg_gpu_dbg, "posting event on session id %d",
+                       gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d",
                                        dbg_s->id);
-                       nvhost_dbg(dbg_gpu_dbg, "%d events pending",
+                       gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
                                        dbg_s->dbg_events.num_pending_events);
 
                        dbg_s->dbg_events.num_pending_events++;
@@ -264,11 +264,11 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s)
        struct channel_gk20a *ch_gk20a = dbg_s->ch;
        struct gk20a *g = dbg_s->g;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        /* wasn't bound to start with ? */
        if (!ch_gk20a) {
-               nvhost_dbg(dbg_gpu_dbg | dbg_fn, "not bound already?");
+               gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "not bound already?");
                return -ENODEV;
        }
 
@@ -299,7 +299,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
 {
        struct dbg_session_gk20a *dbg_s = filp->private_data;
 
-       nvhost_dbg(dbg_gpu_dbg | dbg_fn, "%s", dev_name(dbg_s->dev));
+       gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev));
 
        /* unbind if it was bound */
        if (!dbg_s->ch)
@@ -317,7 +317,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
        struct gk20a *g;
        struct channel_gk20a *ch;
 
-       nvhost_dbg(dbg_fn|dbg_gpu_dbg, "%s fd=%d",
+       gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
                   dev_name(dbg_s->dev), args->channel_fd);
 
        if (args->channel_fd == ~0)
@@ -332,13 +332,13 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
 
        ch = gk20a_get_channel_from_file(args->channel_fd);
        if (!ch) {
-               nvhost_dbg_fn("no channel found for fd");
+               gk20a_dbg_fn("no channel found for fd");
                fput(f);
                return -EINVAL;
        }
 
        g = dbg_s->g;
-       nvhost_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
+       gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
 
        mutex_lock(&g->dbg_sessions_lock);
        mutex_lock(&ch->dbg_s_lock);
@@ -371,7 +371,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
        u8 buf[NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE];
        int err = 0;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        if ((_IOC_TYPE(cmd) != NVHOST_DBG_GPU_IOCTL_MAGIC) ||
            (_IOC_NR(cmd) == 0) ||
@@ -397,19 +397,19 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
        case NVHOST_DBG_GPU_IOCTL_BIND_CHANNEL:
                err = dbg_bind_channel_gk20a(dbg_s,
                             (struct nvhost_dbg_gpu_bind_channel_args *)buf);
-               nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+               gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
                break;
 
        case NVHOST_DBG_GPU_IOCTL_REG_OPS:
                err = nvhost_ioctl_channel_reg_ops(dbg_s,
                           (struct nvhost_dbg_gpu_exec_reg_ops_args *)buf);
-               nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+               gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
                break;
 
        case NVHOST_DBG_GPU_IOCTL_POWERGATE:
                err = nvhost_ioctl_powergate_gk20a(dbg_s,
                           (struct nvhost_dbg_gpu_powergate_args *)buf);
-               nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+               gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
                break;
 
        case NVHOST_DBG_GPU_IOCTL_EVENTS_CTRL:
@@ -423,7 +423,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
                break;
 
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "unrecognized dbg gpu ioctl cmd: 0x%x",
                           cmd);
                err = -ENOTTY;
@@ -469,31 +469,31 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
        struct nvhost_dbg_gpu_reg_op *ops;
        u64 ops_size = sizeof(ops[0]) * args->num_ops;
 
-       nvhost_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
+       gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
 
        if (!dbg_s->ops) {
-               nvhost_err(dev, "can't call reg_ops on an unbound debugger session");
+               gk20a_err(dev, "can't call reg_ops on an unbound debugger session");
                return -EINVAL;
        }
 
        if (!dbg_s->is_profiler && !dbg_s->ch) {
-               nvhost_err(dev, "bind a channel before regops for a debugging session");
+               gk20a_err(dev, "bind a channel before regops for a debugging session");
                return -EINVAL;
        }
 
        /* be sure that ctx info is in place */
        if (!gr_context_info_available(dbg_s, &g->gr)) {
-               nvhost_err(dev, "gr context data not available\n");
+               gk20a_err(dev, "gr context data not available\n");
                return -ENODEV;
        }
 
        ops = kzalloc(ops_size, GFP_KERNEL);
        if (!ops) {
-               nvhost_err(dev, "Allocating memory failed!");
+               gk20a_err(dev, "Allocating memory failed!");
                return -ENOMEM;
        }
 
-       nvhost_dbg_fn("Copying regops from userspace");
+       gk20a_dbg_fn("Copying regops from userspace");
 
        if (copy_from_user(ops, (void *)(uintptr_t)args->ops, ops_size)) {
                dev_err(dev, "copy_from_user failed!");
@@ -511,11 +511,11 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
        mutex_unlock(&g->dbg_sessions_lock);
 
        if (err) {
-               nvhost_err(dev, "dbg regops failed");
+               gk20a_err(dev, "dbg regops failed");
                goto clean_up;
        }
 
-       nvhost_dbg_fn("Copying result to userspace");
+       gk20a_dbg_fn("Copying result to userspace");
 
        if (copy_to_user((void *)(uintptr_t)args->ops, ops, ops_size)) {
                dev_err(dev, "copy_to_user failed!");
@@ -536,7 +536,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
 
         /* This function must be called with g->dbg_sessions_lock held */
 
-       nvhost_dbg(dbg_fn|dbg_gpu_dbg, "%s powergate mode = %d",
+       gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d",
                   dev_name(dbg_s->dev), powermode);
 
        switch (powermode) {
@@ -556,7 +556,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
                if ((dbg_s->is_pg_disabled == false) &&
                    (g->dbg_powergating_disabled_refcount++ == 0)) {
 
-                       nvhost_dbg(dbg_gpu_dbg | dbg_fn, "module busy");
+                       gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
                        gk20a_busy(g->dev);
                        gk20a_channel_busy(dbg_s->pdev);
 
@@ -596,7 +596,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
 
                        gk20a_pmu_enable_elpg(g);
 
-                       nvhost_dbg(dbg_gpu_dbg | dbg_fn, "module idle");
+                       gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
                        gk20a_channel_idle(dbg_s->pdev);
                        gk20a_idle(g->dev);
                }
@@ -605,7 +605,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
                break;
 
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "unrecognized dbg gpu powergate mode: 0x%x",
                           powermode);
                err = -ENOTTY;
@@ -620,7 +620,7 @@ static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
 {
        int err;
        struct gk20a *g = get_gk20a(dbg_s->pdev);
-       nvhost_dbg_fn("%s  powergate mode = %d",
+       gk20a_dbg_fn("%s  powergate mode = %d",
                      dev_name(dbg_s->dev), args->mode);
 
        mutex_lock(&g->dbg_sessions_lock);
@@ -636,8 +636,8 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
        struct gk20a *g = get_gk20a(dbg_s->pdev);
        struct channel_gk20a *ch_gk20a;
 
-       nvhost_dbg_fn("%s smpc ctxsw mode = %d",
-                     dev_name(dbg_s->dev), args->mode);
+       gk20a_dbg_fn("%s smpc ctxsw mode = %d",
+                    dev_name(dbg_s->dev), args->mode);
 
        /* Take the global lock, since we'll be doing global regops */
        mutex_lock(&g->dbg_sessions_lock);
@@ -645,8 +645,8 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
        ch_gk20a = dbg_s->ch;
 
        if (!ch_gk20a) {
-               nvhost_err(dev_from_gk20a(dbg_s->g),
-                  "no bound channel for smpc ctxsw mode update\n");
+               gk20a_err(dev_from_gk20a(dbg_s->g),
+                         "no bound channel for smpc ctxsw mode update\n");
                err = -EINVAL;
                goto clean_up;
        }
@@ -654,8 +654,8 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
        err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a,
                      args->mode == NVHOST_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
        if (err) {
-               nvhost_err(dev_from_gk20a(dbg_s->g),
-                          "error (%d) during smpc ctxsw mode update\n", err);
+               gk20a_err(dev_from_gk20a(dbg_s->g),
+                         "error (%d) during smpc ctxsw mode update\n", err);
                goto clean_up;
        }
        /* The following regops are a hack/war to make up for the fact that we
index 5cd3602595efb4f28ee9a8b64d2fd58296c000b1..81094aa13d4256a5244f439d63ac33230b6374e5 100644 (file)
@@ -118,23 +118,23 @@ static void gk20a_debug_show_channel(struct gk20a *g,
        gk20a_debug_output(o, "TOP: %016llx PUT: %016llx GET: %016llx "
                        "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n"
                        "SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n",
-               (u64)mem_rd32(inst_ptr, ram_fc_pb_top_level_get_w()) +
-               ((u64)mem_rd32(inst_ptr,
+               (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_top_level_get_w()) +
+               ((u64)gk20a_mem_rd32(inst_ptr,
                        ram_fc_pb_top_level_get_hi_w()) << 32ULL),
-               (u64)mem_rd32(inst_ptr, ram_fc_pb_put_w()) +
-               ((u64)mem_rd32(inst_ptr, ram_fc_pb_put_hi_w()) << 32ULL),
-               (u64)mem_rd32(inst_ptr, ram_fc_pb_get_w()) +
-               ((u64)mem_rd32(inst_ptr, ram_fc_pb_get_hi_w()) << 32ULL),
-               (u64)mem_rd32(inst_ptr, ram_fc_pb_fetch_w()) +
-               ((u64)mem_rd32(inst_ptr, ram_fc_pb_fetch_hi_w()) << 32ULL),
-               mem_rd32(inst_ptr, ram_fc_pb_header_w()),
-               mem_rd32(inst_ptr, ram_fc_pb_count_w()),
-               mem_rd32(inst_ptr, ram_fc_syncpointa_w()),
-               mem_rd32(inst_ptr, ram_fc_syncpointb_w()),
-               mem_rd32(inst_ptr, ram_fc_semaphorea_w()),
-               mem_rd32(inst_ptr, ram_fc_semaphoreb_w()),
-               mem_rd32(inst_ptr, ram_fc_semaphorec_w()),
-               mem_rd32(inst_ptr, ram_fc_semaphored_w()));
+               (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_put_w()) +
+               ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_put_hi_w()) << 32ULL),
+               (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_get_w()) +
+               ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_get_hi_w()) << 32ULL),
+               (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_fetch_w()) +
+               ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_fetch_hi_w()) << 32ULL),
+               gk20a_mem_rd32(inst_ptr, ram_fc_pb_header_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_pb_count_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_syncpointa_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_syncpointb_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_semaphorea_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_semaphoreb_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_semaphorec_w()),
+               gk20a_mem_rd32(inst_ptr, ram_fc_semaphored_w()));
 
        gk20a_debug_output(o, "\n");
 }
index 6bdfc0f31b07e7ffa696f32cc2eabcf8fb42af86..e82d1b2114c86f4cd91ec3259ef0f0ce1832f2e1 100644 (file)
@@ -21,7 +21,7 @@
 
 static void fb_gk20a_reset(struct gk20a *g)
 {
-       nvhost_dbg_info("reset gk20a fb");
+       gk20a_dbg_info("reset gk20a fb");
 
        gk20a_reset(g, mc_enable_pfb_enabled_f()
                        | mc_enable_l2_enabled_f()
index 0454f1a7643a73d63c0b05c2ca20b6350d84ff9d..00968d36c9563be65b2f92a35eb4b1f2a386d9c9 100644 (file)
@@ -80,7 +80,7 @@ static int init_engine_info(struct fifo_gk20a *f)
        u32 i;
        u32 max_info_entries = top_device_info__size_1_v();
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* all we really care about finding is the graphics entry    */
        /* especially early on in sim it probably thinks it has more */
@@ -122,23 +122,23 @@ static int init_engine_info(struct fifo_gk20a *f)
 
                        gr_info->runlist_id =
                                top_device_info_runlist_enum_v(table_entry);
-                       nvhost_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
+                       gk20a_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
 
                        gr_info->engine_id =
                                top_device_info_engine_enum_v(table_entry);
-                       nvhost_dbg_info("gr info: engine_id %d", gr_info->engine_id);
+                       gk20a_dbg_info("gr info: engine_id %d", gr_info->engine_id);
 
                        runlist_bit = 1 << gr_info->runlist_id;
 
                        for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
-                               nvhost_dbg_info("gr info: pbdma_map[%d]=%d",
+                               gk20a_dbg_info("gr info: pbdma_map[%d]=%d",
                                        pbdma_id, f->pbdma_map[pbdma_id]);
                                if (f->pbdma_map[pbdma_id] & runlist_bit)
                                        break;
                        }
 
                        if (pbdma_id == f->num_pbdma) {
-                               nvhost_err(d, "busted pbmda map");
+                               gk20a_err(d, "busted pbmda map");
                                return -EINVAL;
                        }
                        gr_info->pbdma_id = pbdma_id;
@@ -148,7 +148,7 @@ static int init_engine_info(struct fifo_gk20a *f)
        }
 
        if (gr_info->runlist_id == ~0) {
-               nvhost_err(d, "busted device info");
+               gk20a_err(d, "busted device info");
                return -EINVAL;
        }
 
@@ -164,7 +164,7 @@ void gk20a_remove_fifo_support(struct fifo_gk20a *f)
        u32 runlist_id;
        u32 i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (f->channel) {
                int c;
@@ -242,7 +242,7 @@ static void fifo_pbdma_exception_status(struct gk20a *g,
        get_exception_pbdma_info(g, eng_info);
        e = &eng_info->pbdma_exception_info;
 
-       nvhost_dbg_fn("pbdma_id %d, "
+       gk20a_dbg_fn("pbdma_id %d, "
                      "id_type %s, id %d, chan_status %d, "
                      "next_id_type %s, next_id %d, "
                      "chsw_in_progress %d",
@@ -283,7 +283,7 @@ static void fifo_engine_exception_status(struct gk20a *g,
        get_exception_engine_info(g, eng_info);
        e = &eng_info->engine_exception_info;
 
-       nvhost_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
+       gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
                      "faulted %d, idle %d, ctxsw_in_progress %d, ",
                      eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
                      e->id, e->ctx_status_v,
@@ -299,7 +299,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
        u32 i;
        u64 runlist_size;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        f->max_runlists = fifo_eng_runlist_base__size_1_v();
        f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
@@ -340,7 +340,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
            Otherwise, one of them (cur_buffer) must have been pinned. */
        runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
 clean_up_runlist:
@@ -362,7 +362,7 @@ clean_up_runlist_info:
        f->runlist_info = NULL;
 
 clean_up:
-       nvhost_dbg_fn("fail");
+       gk20a_dbg_fn("fail");
        return -ENOMEM;
 }
 
@@ -375,7 +375,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
        u32 timeout;
        int i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        /* enable pmc pfifo */
        gk20a_reset(g, mc_enable_pfifo_enabled_f()
                        | mc_enable_ce2_enabled_f());
@@ -427,7 +427,7 @@ int gk20a_init_fifo_reset_enable_hw(struct gk20a *g)
                        fifo_eng_timeout_detection_enabled_f();
        gk20a_writel(g, fifo_eng_timeout_r(), timeout);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -483,10 +483,10 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
        int chid, i, err = 0;
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (f->sw_ready) {
-               nvhost_dbg_fn("skip init");
+               gk20a_dbg_fn("skip init");
                return 0;
        }
 
@@ -533,7 +533,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
                goto clean_up;
        }
 
-       nvhost_dbg(dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
+       gk20a_dbg(gpu_dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
 
        f->userd.size = f->userd_total_size;
 
@@ -577,11 +577,11 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
 
        f->sw_ready = true;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
 clean_up:
-       nvhost_dbg_fn("fail");
+       gk20a_dbg_fn("fail");
        if (f->userd.gpu_va)
                gk20a_gmmu_unmap(&g->mm.bar1.vm,
                                        f->userd.gpu_va,
@@ -630,7 +630,7 @@ static int gk20a_init_fifo_setup_hw(struct gk20a *g)
 {
        struct fifo_gk20a *f = &g->fifo;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* test write, read through bar1 @ userd region before
         * turning on the snooping */
@@ -641,7 +641,7 @@ static int gk20a_init_fifo_setup_hw(struct gk20a *g)
                u32 bar1_vaddr = f->userd.gpu_va;
                volatile u32 *cpu_vaddr = f->userd.cpuva;
 
-               nvhost_dbg_info("test bar1 @ vaddr 0x%x",
+               gk20a_dbg_info("test bar1 @ vaddr 0x%x",
                           bar1_vaddr);
 
                v = gk20a_bar1_readl(g, bar1_vaddr);
@@ -650,20 +650,20 @@ static int gk20a_init_fifo_setup_hw(struct gk20a *g)
                smp_mb();
 
                if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
-                       nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
+                       gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
                        return -EINVAL;
                }
 
                gk20a_bar1_writel(g, bar1_vaddr, v2);
 
                if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
-                       nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
+                       gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
                        return -EINVAL;
                }
 
                /* is it visible to the cpu? */
                if (*cpu_vaddr != v2) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "cpu didn't see bar1 write @ %p!",
                                cpu_vaddr);
                }
@@ -679,7 +679,7 @@ static int gk20a_init_fifo_setup_hw(struct gk20a *g)
                        fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
                        fifo_bar1_base_valid_true_f());
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -772,7 +772,7 @@ static inline void get_exception_mmu_fault_info(
 {
        u32 fault_info_v;
 
-       nvhost_dbg_fn("engine_id %d", engine_id);
+       gk20a_dbg_fn("engine_id %d", engine_id);
 
        memset(f, 0, sizeof(*f));
 
@@ -814,7 +814,7 @@ static inline void get_exception_mmu_fault_info(
 
 static void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (engine_id == top_device_info_type_enum_graphics_v()) {
                /* resetting engine using mc_enable_r() is not enough,
@@ -853,7 +853,7 @@ static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
        u32 intr;
 
        intr = gk20a_readl(g, fifo_intr_chsw_error_r());
-       nvhost_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
+       gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
        gk20a_fecs_dump_falcon_stats(g);
        gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
 }
@@ -862,7 +862,7 @@ static void gk20a_fifo_handle_dropped_mmu_fault(struct gk20a *g)
 {
        struct device *dev = dev_from_gk20a(g);
        u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
-       nvhost_err(dev, "dropped mmu fault (0x%08x)", fault_id);
+       gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id);
 }
 
 static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
@@ -919,7 +919,7 @@ static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
        if (!ch)
                return verbose;
 
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                "channel %d generated a mmu fault",
                ch->hw_chid);
        if (ch->error_notifier) {
@@ -955,7 +955,7 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
        unsigned long engine_mmu_id;
        int i;
        bool verbose = true;
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        g->fifo.deferred_reset_pending = false;
 
@@ -996,7 +996,7 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
                                      f.engine_subid_desc,
                                      f.client_desc,
                                      f.fault_type_desc);
-               nvhost_err(dev_from_gk20a(g), "mmu fault on engine %d, "
+               gk20a_err(dev_from_gk20a(g), "mmu fault on engine %d, "
                           "engine subid %d (%s), client %d (%s), "
                           "addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
                           "inst_ptr 0x%llx\n",
@@ -1026,7 +1026,7 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
                        if (type_ch) {
                                ch = g->fifo.channel + id;
                        } else {
-                               nvhost_err(dev_from_gk20a(g), "non-chid type not supported");
+                               gk20a_err(dev_from_gk20a(g), "non-chid type not supported");
                                WARN_ON(1);
                        }
                } else {
@@ -1056,16 +1056,16 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
 
                } else if (f.inst_ptr ==
                                g->mm.bar1.inst_block.cpu_pa) {
-                       nvhost_err(dev_from_gk20a(g), "mmu fault from bar1");
+                       gk20a_err(dev_from_gk20a(g), "mmu fault from bar1");
                } else if (f.inst_ptr ==
                                g->mm.pmu.inst_block.cpu_pa) {
-                       nvhost_err(dev_from_gk20a(g), "mmu fault from pmu");
+                       gk20a_err(dev_from_gk20a(g), "mmu fault from pmu");
                } else
-                       nvhost_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
+                       gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
        }
 
        if (g->fifo.deferred_reset_pending) {
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "sm debugger attached,"
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached,"
                           " deferring channel recovery to channel free");
                /* clear interrupt */
                gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
@@ -1158,7 +1158,7 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
                        !tegra_platform_is_silicon());
 
        if (ret)
-               nvhost_err(dev_from_gk20a(g), "mmu fault timeout");
+               gk20a_err(dev_from_gk20a(g), "mmu fault timeout");
 
        /* release mmu fault trigger */
        for_each_set_bit(engine_id, &engine_ids, 32)
@@ -1224,13 +1224,13 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
                        GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
                        gk20a_set_error_notifier(ch,
                                NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fifo sched ctxsw timeout error:"
                                "engine = %u, ch = %d", engine_id, id);
                        gk20a_fifo_recover(g, BIT(engine_id),
                                ch->timeout_debug_dump);
                } else {
-                       nvhost_warn(dev_from_gk20a(g),
+                       gk20a_warn(dev_from_gk20a(g),
                                "fifo is waiting for ctx switch for %d ms,"
                                "ch = %d\n",
                                ch->timeout_accumulated_ms,
@@ -1239,7 +1239,7 @@ static bool gk20a_fifo_handle_sched_error(struct gk20a *g)
                return ch->timeout_debug_dump;
        }
 err:
-       nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
+       gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
                   sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
 
        return true;
@@ -1251,18 +1251,18 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
        struct device *dev = dev_from_gk20a(g);
        u32 handled = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
                /* pio mode is unused.  this shouldn't happen, ever. */
                /* should we clear it or just leave it pending? */
-               nvhost_err(dev, "fifo pio error!\n");
+               gk20a_err(dev, "fifo pio error!\n");
                BUG_ON(1);
        }
 
        if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
                u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
-               nvhost_err(dev, "fifo bind error: 0x%08x", bind_error);
+               gk20a_err(dev, "fifo bind error: 0x%08x", bind_error);
                print_channel_reset_log = true;
                handled |= fifo_intr_0_bind_error_pending_f();
        }
@@ -1293,12 +1293,12 @@ static u32 fifo_error_isr(struct gk20a *g, u32 fifo_intr)
 
        if (print_channel_reset_log) {
                int engine_id;
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "channel reset initated from %s", __func__);
                for (engine_id = 0;
                     engine_id < g->fifo.max_engines;
                     engine_id++) {
-                       nvhost_dbg_fn("enum:%d -> engine_id:%d", engine_id,
+                       gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_id,
                                g->fifo.engine_info[engine_id].engine_id);
                        fifo_pbdma_exception_status(g,
                                        &g->fifo.engine_info[engine_id]);
@@ -1322,9 +1322,9 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct device *dev,
        bool reset_device = false;
        bool reset_channel = false;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
-       nvhost_dbg(dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
+       gk20a_dbg(gpu_dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
                        pbdma_intr_0, pbdma_intr_1);
        if (pbdma_intr_0) {
                if (f->intr.pbdma.device_fatal_0 & pbdma_intr_0) {
@@ -1383,7 +1383,7 @@ static u32 fifo_pbdma_isr(struct gk20a *g, u32 fifo_intr)
 
        for (i = 0; i < fifo_intr_pbdma_id_status__size_1_v(); i++) {
                if (fifo_intr_pbdma_id_status_f(pbdma_pending, i)) {
-                       nvhost_dbg(dbg_intr, "pbdma id %d intr pending", i);
+                       gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
                        clear_intr |=
                                gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
                }
@@ -1410,7 +1410,7 @@ void gk20a_fifo_isr(struct gk20a *g)
         * in a threaded interrupt context... */
        mutex_lock(&g->fifo.intr.isr.mutex);
 
-       nvhost_dbg(dbg_intr, "fifo isr %08x\n", fifo_intr);
+       gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
 
        /* handle runlist update */
        if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
@@ -1435,7 +1435,7 @@ void gk20a_fifo_nonstall_isr(struct gk20a *g)
        u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
        u32 clear_intr = 0;
 
-       nvhost_dbg(dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
+       gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
 
        if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
                clear_intr |= fifo_channel_isr(g, fifo_intr);
@@ -1456,7 +1456,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
        u32 elpg_off = 0;
        u32 i;
 
-       nvhost_dbg_fn("%d", hw_chid);
+       gk20a_dbg_fn("%d", hw_chid);
 
        /* we have no idea which runlist we are using. lock all */
        for (i = 0; i < g->fifo.max_runlists; i++)
@@ -1492,7 +1492,7 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 hw_chid)
                struct fifo_gk20a *f = &g->fifo;
                struct channel_gk20a *ch = &f->channel[hw_chid];
 
-               nvhost_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
+               gk20a_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
                            hw_chid);
 
                /* forcefully reset all busy engines using this channel */
@@ -1536,7 +1536,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g,
        u32 elpg_off;
        u32 enable;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* disable elpg if failed to acquire pmu mutex */
        elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
@@ -1553,7 +1553,7 @@ int gk20a_fifo_enable_engine_activity(struct gk20a *g,
        else
                pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -1567,7 +1567,7 @@ int gk20a_fifo_disable_engine_activity(struct gk20a *g,
        u32 elpg_off;
        u32 err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gr_stat =
                gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
@@ -1627,12 +1627,12 @@ clean_up:
                pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
 
        if (err) {
-               nvhost_dbg_fn("failed");
+               gk20a_dbg_fn("failed");
                if (gk20a_fifo_enable_engine_activity(g, eng_info))
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "failed to enable gr engine activity\n");
        } else {
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
        }
        return err;
 }
@@ -1706,7 +1706,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
        old_buf = runlist->cur_buffer;
        new_buf = !runlist->cur_buffer;
 
-       nvhost_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
+       gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
                runlist_id, runlist->mem[new_buf].iova);
 
        runlist_pa = gk20a_get_phys_from_iova(d, runlist->mem[new_buf].iova);
@@ -1726,7 +1726,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
                runlist_entry = runlist_entry_base;
                for_each_set_bit(chid,
                        runlist->active_channels, f->num_channels) {
-                       nvhost_dbg_info("add channel %d to runlist", chid);
+                       gk20a_dbg_info("add channel %d to runlist", chid);
                        runlist_entry[0] = chid;
                        runlist_entry[1] = 0;
                        runlist_entry += 2;
@@ -1749,7 +1749,7 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
                ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
 
                if (ret == -ETIMEDOUT) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "runlist update timeout");
 
                        gk20a_fifo_runlist_reset_engines(g, runlist_id);
@@ -1763,10 +1763,10 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
                        mutex_lock(&runlist->mutex);
 
                        if (ret)
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                           "runlist update failed: %d", ret);
                } else if (ret == -EINTR)
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "runlist update interrupted");
        }
 
@@ -1813,7 +1813,7 @@ int gk20a_fifo_update_runlist(struct gk20a *g, u32 runlist_id, u32 hw_chid,
 
 int gk20a_fifo_suspend(struct gk20a *g)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* stop bar1 snooping */
        gk20a_writel(g, fifo_bar1_base_r(),
@@ -1823,7 +1823,7 @@ int gk20a_fifo_suspend(struct gk20a *g)
        gk20a_writel(g, fifo_intr_en_0_r(), 0);
        gk20a_writel(g, fifo_intr_en_1_r(), 0);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
index 9d86824fdc0499a7a14ab653a22abd2d04814dd3..d00cc6e70b657382ca7621fbbfa02f6fbafd67a7 100644 (file)
@@ -531,23 +531,23 @@ static void gk20a_pbus_isr(struct gk20a *g)
        if (val & (bus_intr_0_pri_squash_m() |
                        bus_intr_0_pri_fecserr_m() |
                        bus_intr_0_pri_timeout_m())) {
-               nvhost_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
                        gk20a_readl(g, top_fs_status_r()));
-               nvhost_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
                        gk20a_readl(g, mc_enable_r()));
-               nvhost_err(&g->dev->dev,
+               gk20a_err(&g->dev->dev,
                        "NV_PTIMER_PRI_TIMEOUT_SAVE_0: 0x%x\n",
                        gk20a_readl(g, timer_pri_timeout_save_0_r()));
-               nvhost_err(&g->dev->dev,
+               gk20a_err(&g->dev->dev,
                        "NV_PTIMER_PRI_TIMEOUT_SAVE_1: 0x%x\n",
                        gk20a_readl(g, timer_pri_timeout_save_1_r()));
-               nvhost_err(&g->dev->dev,
+               gk20a_err(&g->dev->dev,
                        "NV_PTIMER_PRI_TIMEOUT_FECS_ERRCODE: 0x%x\n",
                        gk20a_readl(g, timer_pri_timeout_fecs_errcode_r()));
        }
 
        if (val)
-               nvhost_err(&g->dev->dev,
+               gk20a_err(&g->dev->dev,
                        "Unhandled pending pbus interrupt\n");
 
        gk20a_writel(g, bus_intr_0_r(), val);
@@ -558,11 +558,11 @@ static irqreturn_t gk20a_intr_thread_stall(int irq, void *dev_id)
        struct gk20a *g = dev_id;
        u32 mc_intr_0;
 
-       nvhost_dbg(dbg_intr, "interrupt thread launched");
+       gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
 
        mc_intr_0 = gk20a_readl(g, mc_intr_0_r());
 
-       nvhost_dbg(dbg_intr, "stall intr %08x\n", mc_intr_0);
+       gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
 
        if (mc_intr_0 & mc_intr_0_pgraph_pending_f())
                gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
@@ -591,11 +591,11 @@ static irqreturn_t gk20a_intr_thread_nonstall(int irq, void *dev_id)
        struct gk20a *g = dev_id;
        u32 mc_intr_1;
 
-       nvhost_dbg(dbg_intr, "interrupt thread launched");
+       gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
 
        mc_intr_1 = gk20a_readl(g, mc_intr_1_r());
 
-       nvhost_dbg(dbg_intr, "non-stall intr %08x\n", mc_intr_1);
+       gk20a_dbg(gpu_dbg_intr, "non-stall intr %08x\n", mc_intr_1);
 
        if (mc_intr_1 & mc_intr_0_pfifo_pending_f())
                gk20a_fifo_nonstall_isr(g);
@@ -703,7 +703,7 @@ static int gk20a_init_client(struct platform_device *dev)
        struct gk20a *g = get_gk20a(dev);
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
 #ifndef CONFIG_PM_RUNTIME
        gk20a_pm_finalize_poweron(&dev->dev);
@@ -720,7 +720,7 @@ static int gk20a_init_client(struct platform_device *dev)
 
 static void gk20a_deinit_client(struct platform_device *dev)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 #ifndef CONFIG_PM_RUNTIME
        gk20a_pm_prepare_poweroff(&dev->dev);
 #endif
@@ -755,7 +755,7 @@ static int gk20a_pm_prepare_poweroff(struct device *_dev)
        struct gk20a *g = get_gk20a(dev);
        int ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!g->power_on)
                return 0;
@@ -797,7 +797,7 @@ static void gk20a_detect_chip(struct gk20a *g)
                (mc_boot_0_major_revision_v(mc_boot_0_value) << 4) |
                mc_boot_0_minor_revision_v(mc_boot_0_value);
 
-       nvhost_dbg_info("arch: %x, impl: %x, rev: %x\n",
+       gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
                        g->gpu_characteristics.arch,
                        g->gpu_characteristics.impl,
                        g->gpu_characteristics.rev);
@@ -809,7 +809,7 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
        struct gk20a *g = get_gk20a(dev);
        int err, nice_value;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (g->power_on)
                return 0;
@@ -880,7 +880,7 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
           saving features (blcg/slcg) are enabled. For now, do it here. */
        err = gk20a_init_clk_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a clk");
+               gk20a_err(&dev->dev, "failed to init gk20a clk");
                goto done;
        }
 
@@ -899,49 +899,49 @@ static int gk20a_pm_finalize_poweron(struct device *_dev)
 
        err = gk20a_init_fifo_reset_enable_hw(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to reset gk20a fifo");
+               gk20a_err(&dev->dev, "failed to reset gk20a fifo");
                goto done;
        }
 
        err = gk20a_init_mm_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a mm");
+               gk20a_err(&dev->dev, "failed to init gk20a mm");
                goto done;
        }
 
        err = gk20a_init_pmu_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a pmu");
+               gk20a_err(&dev->dev, "failed to init gk20a pmu");
                goto done;
        }
 
        err = gk20a_init_fifo_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a fifo");
+               gk20a_err(&dev->dev, "failed to init gk20a fifo");
                goto done;
        }
 
        err = gk20a_init_gr_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a gr");
+               gk20a_err(&dev->dev, "failed to init gk20a gr");
                goto done;
        }
 
        err = gk20a_init_pmu_setup_hw2(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a pmu_hw2");
+               gk20a_err(&dev->dev, "failed to init gk20a pmu_hw2");
                goto done;
        }
 
        err = gk20a_init_therm_support(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a therm");
+               gk20a_err(&dev->dev, "failed to init gk20a therm");
                goto done;
        }
 
        err = gk20a_init_gpu_characteristics(g);
        if (err) {
-               nvhost_err(&dev->dev, "failed to init gk20a gpu characteristics");
+               gk20a_err(&dev->dev, "failed to init gk20a gpu characteristics");
                goto done;
        }
 
@@ -1018,7 +1018,7 @@ static int gk20a_create_device(
        int err;
        struct gk20a *g = get_gk20a(pdev);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        cdev_init(cdev, ops);
        cdev->owner = THIS_MODULE;
@@ -1340,7 +1340,7 @@ static int gk20a_probe(struct platform_device *dev)
                return -ENODATA;
        }
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        platform_set_drvdata(dev, platform);
 
@@ -1450,7 +1450,7 @@ static int gk20a_probe(struct platform_device *dev)
 static int __exit gk20a_remove(struct platform_device *dev)
 {
        struct gk20a *g = get_gk20a(dev);
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
 #ifdef CONFIG_INPUT_CFBOOST
        cfb_remove_device(&dev->dev);
@@ -1564,7 +1564,7 @@ void gk20a_disable(struct gk20a *g, u32 units)
 {
        u32 pmc;
 
-       nvhost_dbg(dbg_info, "pmc disable: %08x\n", units);
+       gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units);
 
        spin_lock(&g->mc_enable_lock);
        pmc = gk20a_readl(g, mc_enable_r());
@@ -1577,7 +1577,7 @@ void gk20a_enable(struct gk20a *g, u32 units)
 {
        u32 pmc;
 
-       nvhost_dbg(dbg_info, "pmc enable: %08x\n", units);
+       gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units);
 
        spin_lock(&g->mc_enable_lock);
        pmc = gk20a_readl(g, mc_enable_r());
index bdf66192970a18ffaed0b051994eaa0ddcc6825c..c7e16492c224ebd371272a0651f0f25aa054f662 100644 (file)
@@ -375,26 +375,26 @@ static inline void gk20a_mem_wr32(void *ptr, int w, u32 data)
 /* register accessors */
 static inline void gk20a_writel(struct gk20a *g, u32 r, u32 v)
 {
-       nvhost_dbg(dbg_reg, " r=0x%x v=0x%x", r, v);
+       gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
        writel(v, g->regs + r);
 }
 static inline u32 gk20a_readl(struct gk20a *g, u32 r)
 {
        u32 v = readl(g->regs + r);
-       nvhost_dbg(dbg_reg, " r=0x%x v=0x%x", r, v);
+       gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
        return v;
 }
 
 static inline void gk20a_bar1_writel(struct gk20a *g, u32 b, u32 v)
 {
-       nvhost_dbg(dbg_reg, " b=0x%x v=0x%x", b, v);
+       gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
        writel(v, g->bar1 + b);
 }
 
 static inline u32 gk20a_bar1_readl(struct gk20a *g, u32 b)
 {
        u32 v = readl(g->bar1 + b);
-       nvhost_dbg(dbg_reg, " b=0x%x v=0x%x", b, v);
+       gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
        return v;
 }
 
index 00d25bfb57a18028183beab5a4a8206c67901a5b..525dc28f21a363f4023d7d872616d52838127e56 100644 (file)
@@ -108,7 +108,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
        u32 i, major_v = ~0, major_v_hw, netlist_num;
        int net, max, err = -ENOENT;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
 #ifdef GK20A_NETLIST_IMAGE_FW_NAME
        net = NETLIST_FINAL;
@@ -125,13 +125,13 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
        for (; net < max; net++) {
 
                if (gr_gk20a_get_netlist_name(net, name) != 0) {
-                       nvhost_warn(d, "invalid netlist index %d", net);
+                       gk20a_warn(d, "invalid netlist index %d", net);
                        continue;
                }
 
                netlist_fw = gk20a_request_firmware(g, name);
                if (!netlist_fw) {
-                       nvhost_warn(d, "failed to load netlist %s", name);
+                       gk20a_warn(d, "failed to load netlist %s", name);
                        continue;
                }
 
@@ -143,112 +143,112 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
 
                        switch (netlist->regions[i].region_id) {
                        case NETLIST_REGIONID_FECS_UCODE_DATA:
-                               nvhost_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA");
+                               gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA");
                                err = gr_gk20a_alloc_load_netlist_u32(
                                        src, size, &g->gr.ctx_vars.ucode.fecs.data);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_FECS_UCODE_INST:
-                               nvhost_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST");
+                               gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST");
                                err = gr_gk20a_alloc_load_netlist_u32(
                                        src, size, &g->gr.ctx_vars.ucode.fecs.inst);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_GPCCS_UCODE_DATA:
-                               nvhost_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA");
+                               gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA");
                                err = gr_gk20a_alloc_load_netlist_u32(
                                        src, size, &g->gr.ctx_vars.ucode.gpccs.data);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_GPCCS_UCODE_INST:
-                               nvhost_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST");
+                               gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST");
                                err = gr_gk20a_alloc_load_netlist_u32(
                                        src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_SW_BUNDLE_INIT:
-                               nvhost_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT");
+                               gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT");
                                err = gr_gk20a_alloc_load_netlist_av(
                                        src, size, &g->gr.ctx_vars.sw_bundle_init);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_SW_METHOD_INIT:
-                               nvhost_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT");
+                               gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT");
                                err = gr_gk20a_alloc_load_netlist_av(
                                        src, size, &g->gr.ctx_vars.sw_method_init);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_SW_CTX_LOAD:
-                               nvhost_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD");
+                               gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.sw_ctx_load);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_SW_NON_CTX_LOAD:
-                               nvhost_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD");
+                               gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD");
                                err = gr_gk20a_alloc_load_netlist_av(
                                        src, size, &g->gr.ctx_vars.sw_non_ctx_load);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_SYS:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_SYS");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_GPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_GPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_TPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_TPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_PPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_PM_SYS:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_PM_GPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
                                if (err)
                                        goto clean_up;
                                break;
                        case NETLIST_REGIONID_CTXREG_PM_TPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC");
                                err = gr_gk20a_alloc_load_netlist_aiv(
                                        src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
                                if (err)
@@ -256,35 +256,35 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
                                break;
                        case NETLIST_REGIONID_BUFFER_SIZE:
                                g->gr.ctx_vars.buffer_size = *src;
-                               nvhost_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d",
+                               gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d",
                                        g->gr.ctx_vars.buffer_size);
                                break;
                        case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX:
                                g->gr.ctx_vars.regs_base_index = *src;
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %d",
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %d",
                                        g->gr.ctx_vars.regs_base_index);
                                break;
                        case NETLIST_REGIONID_MAJORV:
                                major_v = *src;
-                               nvhost_dbg_info("NETLIST_REGIONID_MAJORV : %d",
+                               gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d",
                                        major_v);
                                break;
                        case NETLIST_REGIONID_NETLIST_NUM:
                                netlist_num = *src;
-                               nvhost_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d",
+                               gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d",
                                        netlist_num);
                                break;
                        case NETLIST_REGIONID_CTXREG_PMPPC:
-                               nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC skipped");
+                               gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC skipped");
                                break;
                        default:
-                               nvhost_warn(d, "unrecognized region %d skipped", i);
+                               gk20a_warn(d, "unrecognized region %d skipped", i);
                                break;
                        }
                }
 
                if (net != NETLIST_FINAL && major_v != major_v_hw) {
-                       nvhost_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x",
+                       gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x",
                                name, major_v, major_v_hw);
                        goto clean_up;
                }
@@ -293,7 +293,7 @@ static int gr_gk20a_init_ctx_vars_fw(struct gk20a *g, struct gr_gk20a *gr)
                g->gr.netlist = net;
 
                release_firmware(netlist_fw);
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
                goto done;
 
 clean_up:
@@ -319,10 +319,10 @@ clean_up:
 
 done:
        if (g->gr.ctx_vars.valid) {
-               nvhost_dbg_info("netlist image %s loaded", name);
+               gk20a_dbg_info("netlist image %s loaded", name);
                return 0;
        } else {
-               nvhost_err(d, "failed to load netlist image!!");
+               gk20a_err(d, "failed to load netlist image!!");
                return err;
        }
 }
index fbb3a922b544b480c6179a16e1602b4e58598948..d4393439a5d6f51d348a617cc8facb5c29ad7eea 100644 (file)
@@ -32,7 +32,7 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
        char *reg_path   = NULL;
        char *value_path = NULL;
 
-       nvhost_dbg(dbg_fn | dbg_info,
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_info,
                   "querying grctx info from chiplib");
 
        g->gr.ctx_vars.dynamic = true;
@@ -247,10 +247,10 @@ int gr_gk20a_init_ctx_vars_sim(struct gk20a *g, struct gr_gk20a *gr)
        gk20a_sim_esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0,
                            &g->gr.ctx_vars.regs_base_index);
 
-       nvhost_dbg(dbg_info | dbg_fn, "finished querying grctx info from chiplib");
+       gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
        return 0;
 fail:
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                   "failed querying grctx info from chiplib");
        return err;
 
index 3999489b6b173e4a68777b6e55ecbb562fe035d4..68ddad01c86ff798c199b7573bc37c3443083afb 100644 (file)
@@ -87,81 +87,81 @@ void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
 {
        int i;
 
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
                gk20a_readl(g, gr_fecs_os_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x",
                gk20a_readl(g, gr_fecs_cpuctl_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x",
                gk20a_readl(g, gr_fecs_idlestate_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x",
                gk20a_readl(g, gr_fecs_mailbox0_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x",
                gk20a_readl(g, gr_fecs_mailbox1_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x",
                gk20a_readl(g, gr_fecs_irqstat_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x",
                gk20a_readl(g, gr_fecs_irqmode_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x",
                gk20a_readl(g, gr_fecs_irqmask_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x",
                gk20a_readl(g, gr_fecs_irqdest_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x",
                gk20a_readl(g, gr_fecs_debug1_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x",
                gk20a_readl(g, gr_fecs_debuginfo_r()));
 
        for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++)
-               nvhost_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
                        i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
 
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x",
                gk20a_readl(g, gr_fecs_engctl_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x",
                gk20a_readl(g, gr_fecs_curctx_r()));
-       nvhost_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x",
                gk20a_readl(g, gr_fecs_nxtctx_r()));
 
        gk20a_writel(g, gr_fecs_icd_cmd_r(),
                gr_fecs_icd_cmd_opc_rreg_f() |
                gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
-       nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x",
                gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
        gk20a_writel(g, gr_fecs_icd_cmd_r(),
                gr_fecs_icd_cmd_opc_rreg_f() |
                gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
-       nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x",
                gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
        gk20a_writel(g, gr_fecs_icd_cmd_r(),
                gr_fecs_icd_cmd_opc_rreg_f() |
                gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
-       nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x",
                gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
        gk20a_writel(g, gr_fecs_icd_cmd_r(),
                gr_fecs_icd_cmd_opc_rreg_f() |
                gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
-       nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x",
                gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
        gk20a_writel(g, gr_fecs_icd_cmd_r(),
                gr_fecs_icd_cmd_opc_rreg_f() |
                gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
-       nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x",
                gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
        for (i = 0; i < 4; i++) {
                gk20a_writel(g, gr_fecs_icd_cmd_r(),
                        gr_fecs_icd_cmd_opc_rreg_f() |
                        gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC));
-               nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x",
                        gk20a_readl(g, gr_fecs_icd_rdata_r()));
 
                gk20a_writel(g, gr_fecs_icd_cmd_r(),
                        gr_fecs_icd_cmd_opc_rreg_f() |
                        gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP));
-               nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x",
                        gk20a_readl(g, gr_fecs_icd_rdata_r()));
        }
 }
@@ -172,7 +172,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
        const u32 *ucode_u32_data;
        u32 checksum;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) |
                                              gr_gpccs_dmemc_blk_f(0)  |
@@ -197,7 +197,7 @@ static void gr_gk20a_load_falcon_dmem(struct gk20a *g)
                gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]);
                checksum += ucode_u32_data[i];
        }
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 static void gr_gk20a_load_falcon_imem(struct gk20a *g)
@@ -207,7 +207,7 @@ static void gr_gk20a_load_falcon_imem(struct gk20a *g)
        u32 tag, i, pad_start, pad_end;
        u32 checksum;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        cfg = gk20a_readl(g, gr_fecs_cfg_r());
        fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg);
@@ -291,7 +291,7 @@ static int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
        bool ctxsw_active;
        bool gr_busy;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        do {
                /* fmodel: host gets fifo_engine_status(gr) from gr
@@ -309,7 +309,7 @@ static int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
                        gr_engine_status_value_busy_f();
 
                if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
-                       nvhost_dbg_fn("done");
+                       gk20a_dbg_fn("done");
                        return 0;
                }
 
@@ -319,7 +319,7 @@ static int gr_gk20a_wait_idle(struct gk20a *g, unsigned long end_jiffies,
        } while (time_before(jiffies, end_jiffies)
                        || !tegra_platform_is_silicon());
 
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                "timeout, ctxsw busy : %d, gr busy : %d",
                ctxsw_active, gr_busy);
 
@@ -333,7 +333,7 @@ static int gr_gk20a_ctx_reset(struct gk20a *g, u32 rst_mask)
                msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
        u32 reg;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!tegra_platform_is_linsim()) {
                /* Force clocks on */
@@ -355,7 +355,7 @@ static int gr_gk20a_ctx_reset(struct gk20a *g, u32 rst_mask)
                } while (time_before(jiffies, end_jiffies));
 
                if (!time_before(jiffies, end_jiffies)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "failed to force the clocks on\n");
                        WARN_ON(1);
                }
@@ -418,7 +418,7 @@ static int gr_gk20a_ctx_reset(struct gk20a *g, u32 rst_mask)
                } while (time_before(jiffies, end_jiffies));
 
                if (!time_before(jiffies, end_jiffies))
-                       nvhost_warn(dev_from_gk20a(g),
+                       gk20a_warn(dev_from_gk20a(g),
                                   "failed to set power mode to auto\n");
        }
 
@@ -436,7 +436,7 @@ static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
        u32 check = WAIT_UCODE_LOOP;
        u32 reg;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        while (check == WAIT_UCODE_LOOP) {
                if (!time_before(jiffies, end_jiffies) &&
@@ -473,7 +473,7 @@ static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
                        /* do no success check */
                        break;
                default:
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "invalid success opcode 0x%x", opc_success);
 
                        check = WAIT_UCODE_ERROR;
@@ -505,7 +505,7 @@ static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
                        /* do no check on fail*/
                        break;
                default:
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "invalid fail opcode 0x%x", opc_fail);
                        check = WAIT_UCODE_ERROR;
                        break;
@@ -516,19 +516,19 @@ static int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
        }
 
        if (check == WAIT_UCODE_TIMEOUT) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "timeout waiting on ucode response");
                gk20a_fecs_dump_falcon_stats(g);
                return -1;
        } else if (check == WAIT_UCODE_ERROR) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "ucode method failed on mailbox=%d value=0x%08x",
                           mailbox_id, reg);
                gk20a_fecs_dump_falcon_stats(g);
                return -1;
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -608,14 +608,14 @@ int gr_gk20a_ctrl_ctxsw(struct gk20a *g, u32 fecs_method, u32 *ret)
  * are sent to the ucode in sequence, it can get into an undefined state. */
 int gr_gk20a_disable_ctxsw(struct gk20a *g)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
        return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_stop_ctxsw_v(), 0);
 }
 
 /* Start processing (continue) context switches at FECS */
 int gr_gk20a_enable_ctxsw(struct gk20a *g)
 {
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
        return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_start_ctxsw_v(), 0);
 }
 
@@ -626,7 +626,7 @@ static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
        u32 addr_hi;
        void *inst_ptr = NULL;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* flush gpu_va before commit */
        gk20a_mm_fb_flush(c->g);
@@ -639,11 +639,11 @@ static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va)
        addr_lo = u64_lo32(gpu_va) >> 12;
        addr_hi = u64_hi32(gpu_va);
 
-       mem_wr32(inst_ptr, ram_in_gr_wfi_target_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_gr_wfi_target_w(),
                 ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() |
                 ram_in_gr_wfi_ptr_lo_f(addr_lo));
 
-       mem_wr32(inst_ptr, ram_in_gr_wfi_ptr_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_gr_wfi_ptr_hi_w(),
                 ram_in_gr_wfi_ptr_hi_f(addr_hi));
 
        gk20a_mm_l2_invalidate(c->g);
@@ -665,7 +665,7 @@ int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
 {
        /* being defensive still... */
        if (ch_ctx->patch_ctx.cpu_va) {
-               nvhost_err(dev_from_gk20a(g), "nested ctx patch begin?");
+               gk20a_err(dev_from_gk20a(g), "nested ctx patch begin?");
                return -EBUSY;
        }
 
@@ -684,7 +684,7 @@ int gr_gk20a_ctx_patch_write_end(struct gk20a *g,
 {
        /* being defensive still... */
        if (!ch_ctx->patch_ctx.cpu_va) {
-               nvhost_err(dev_from_gk20a(g), "dangling ctx patch end?");
+               gk20a_err(dev_from_gk20a(g), "dangling ctx patch end?");
                return -EINVAL;
        }
 
@@ -713,7 +713,7 @@ int gr_gk20a_ctx_patch_write(struct gk20a *g,
                 * but be defensive still... */
                if (!ch_ctx->patch_ctx.cpu_va) {
                        int err;
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "per-write ctx patch begin?");
                        /* yes, gr_gk20a_ctx_patch_smpc causes this one */
                        err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
@@ -726,8 +726,8 @@ int gr_gk20a_ctx_patch_write(struct gk20a *g,
                patch_ptr = ch_ctx->patch_ctx.cpu_va;
                patch_slot = ch_ctx->patch_ctx.data_count * 2;
 
-               mem_wr32(patch_ptr, patch_slot++, addr);
-               mem_wr32(patch_ptr, patch_slot++, data);
+               gk20a_mem_wr32(patch_ptr, patch_slot++, addr);
+               gk20a_mem_wr32(patch_ptr, patch_slot++, data);
 
                ch_ctx->patch_ctx.data_count++;
 
@@ -747,7 +747,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
                                     >> ram_in_base_shift_v());
        u32 ret;
 
-       nvhost_dbg_info("bind channel %d inst ptr 0x%08x",
+       gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
                   c->hw_chid, inst_base_ptr);
 
        ret = gr_gk20a_submit_fecs_method_op(g,
@@ -764,7 +764,7 @@ static int gr_gk20a_fecs_ctx_bind_channel(struct gk20a *g,
                     .cond.ok = GR_IS_UCODE_OP_AND,
                     .cond.fail = GR_IS_UCODE_OP_AND});
        if (ret)
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "bind channel instance failed");
 
        return ret;
@@ -780,7 +780,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
        int ret = 0;
        void *ctx_ptr = NULL;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
                        PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
@@ -802,7 +802,7 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
        if (disable_fifo) {
                ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
                if (ret) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "failed to disable gr engine activity\n");
                        goto clean_up;
                }
@@ -813,15 +813,15 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
        gk20a_mm_fb_flush(g);
        gk20a_mm_l2_flush(g, true);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_o(), 0,
                 ch_ctx->zcull_ctx.ctx_sw_mode);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, va);
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, va);
 
        if (disable_fifo) {
                ret = gk20a_fifo_enable_engine_activity(g, gr_info);
                if (ret) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "failed to enable gr engine activity\n");
                        goto clean_up;
                }
@@ -846,7 +846,7 @@ static int gr_gk20a_commit_global_cb_manager(struct gk20a *g,
        u32 temp;
        u32 cbm_cfg_size1, cbm_cfg_size2;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (patch) {
                int err;
@@ -916,7 +916,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
        u64 addr;
        u32 size;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        if (patch) {
                int err;
                err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
@@ -936,7 +936,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
        if (size == gr_scc_pagepool_total_pages_hwmax_value_v())
                size = gr_scc_pagepool_total_pages_hwmax_v();
 
-       nvhost_dbg_info("pagepool buffer addr : 0x%016llx, size : %d",
+       gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d",
                addr, size);
 
        g->ops.gr.commit_global_pagepool(g, ch_ctx, addr, size, patch);
@@ -949,7 +949,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
 
        size = gr->bundle_cb_default_size;
 
-       nvhost_dbg_info("bundle cb addr : 0x%016llx, size : %d",
+       gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d",
                addr, size);
 
        g->ops.gr.commit_global_bundle_cb(g, ch_ctx, addr, size, patch);
@@ -960,7 +960,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
                (u64_hi32(ch_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) <<
                 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
 
-       nvhost_dbg_info("attrib cb addr : 0x%016llx", addr);
+       gk20a_dbg_info("attrib cb addr : 0x%016llx", addr);
        g->ops.gr.commit_global_attrib_cb(g, ch_ctx, addr, patch);
 
        if (patch)
@@ -1009,7 +1009,7 @@ static void gr_gk20a_commit_global_bundle_cb(struct gk20a *g,
 
        data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
 
-       nvhost_dbg_info("bundle cb token limit : %d, state limit : %d",
+       gk20a_dbg_info("bundle cb token limit : %d, state limit : %d",
                   g->gr.bundle_cb_token_limit, data);
 
        gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
@@ -1029,7 +1029,7 @@ static int gr_gk20a_commit_global_timeslice(struct gk20a *g, struct channel_gk20
        u32 pe_vaf;
        u32 pe_vsc_vpc;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r());
        pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
@@ -1088,7 +1088,7 @@ int gr_gk20a_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr)
        if (!gr->map_tiles)
                return -1;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_writel(g, gr_crstr_map_table_cfg_r(),
                     gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) |
@@ -1261,7 +1261,7 @@ static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g,
        u32 map_beta[gr_pd_alpha_ratio_table__size_1_v()];
        u32 map_reg_used[gr_pd_alpha_ratio_table__size_1_v()];
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        memset(map_alpha, 0, gr_pd_alpha_ratio_table__size_1_v() * sizeof(u32));
        memset(map_beta, 0, gr_pd_alpha_ratio_table__size_1_v() * sizeof(u32));
@@ -1345,7 +1345,7 @@ static int gr_gk20a_ctx_state_floorsweep(struct gk20a *g)
        u32 max_ways_evict = INVALID_MAX_WAYS;
        u32 l1c_dbg_reg_val;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        for (tpc_index = 0; tpc_index < gr->max_tpc_per_gpc_count; tpc_index++) {
                for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
@@ -1443,7 +1443,7 @@ static int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
                >> ram_in_base_shift_v());
 
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ret = gr_gk20a_submit_fecs_method_op(g,
                (struct fecs_method_op_gk20a) {
@@ -1459,7 +1459,7 @@ static int gr_gk20a_fecs_ctx_image_save(struct channel_gk20a *c, u32 save_type)
                 });
 
        if (ret)
-               nvhost_err(dev_from_gk20a(g), "save context image failed");
+               gk20a_err(dev_from_gk20a(g), "save context image failed");
 
        return ret;
 }
@@ -1517,7 +1517,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
        void *gold_ptr = NULL;
        u32 err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* golden ctx is global to all channels. Although only the first
           channel initializes golden image, driver needs to prevent multiple
@@ -1561,14 +1561,14 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
        gk20a_mm_l2_flush(g, false);
 
        for (i = 0; i < ctx_header_words; i++) {
-               data = mem_rd32(ctx_ptr, i);
-               mem_wr32(gold_ptr, i, data);
+               data = gk20a_mem_rd32(ctx_ptr, i);
+               gk20a_mem_wr32(gold_ptr, i, data);
        }
 
-       mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_o(), 0,
+       gk20a_mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_o(), 0,
                 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v());
 
-       mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, 0);
+       gk20a_mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, 0);
 
        gr_gk20a_commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]);
 
@@ -1586,7 +1586,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
 
                for (i = 0; i < gr->ctx_vars.golden_image_size / 4; i++)
                        gr->ctx_vars.local_golden_image[i] =
-                               mem_rd32(gold_ptr, i);
+                               gk20a_mem_rd32(gold_ptr, i);
        }
 
        gr_gk20a_commit_inst(c, ch_ctx->gr_ctx.gpu_va);
@@ -1600,9 +1600,9 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
 
 clean_up:
        if (err)
-               nvhost_err(dev_from_gk20a(g), "fail");
+               gk20a_err(dev_from_gk20a(g), "fail");
        else
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
 
        if (gold_ptr)
                vunmap(gold_ptr);
@@ -1634,12 +1634,12 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
        if (!ctx_ptr)
                return -ENOMEM;
 
-       data = mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
+       data = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
        data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
        data |= enable_smpc_ctxsw ?
                ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
                ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
                 data);
 
        vunmap(ctx_ptr);
@@ -1661,7 +1661,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
        int ret = 0;
        void *ctx_ptr = NULL;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (gr->ctx_vars.local_golden_image == NULL)
                return -1;
@@ -1678,31 +1678,31 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
                return -ENOMEM;
 
        for (i = 0; i < gr->ctx_vars.golden_image_size / 4; i++)
-               mem_wr32(ctx_ptr, i, gr->ctx_vars.local_golden_image[i]);
+               gk20a_mem_wr32(ctx_ptr, i, gr->ctx_vars.local_golden_image[i]);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_save_ops_o(), 0, 0);
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_restore_ops_o(), 0, 0);
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_save_ops_o(), 0, 0);
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_restore_ops_o(), 0, 0);
 
        virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.gpu_va);
        virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.gpu_va);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_count_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_count_o(), 0,
                 ch_ctx->patch_ctx.data_count);
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_lo_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_lo_o(), 0,
                 virt_addr_lo);
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_hi_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_hi_o(), 0,
                 virt_addr_hi);
 
        /* no user for client managed performance counter ctx */
        ch_ctx->pm_ctx.ctx_sw_mode =
                ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
-       data = mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
+       data = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
        data = data & ~ctxsw_prog_main_image_pm_mode_m();
        data |= ch_ctx->pm_ctx.ctx_sw_mode;
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
                 data);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
 
        /* set priv access map */
        virt_addr_lo =
@@ -1710,17 +1710,17 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
        virt_addr_hi =
                 u64_hi32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
 
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
                 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f());
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
                 virt_addr_lo);
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
                 virt_addr_hi);
        /* disable verif features */
-       v = mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
+       v = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
        v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
        v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
-       mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
+       gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
 
 
        vunmap(ctx_ptr);
@@ -1749,7 +1749,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
                                  .cond.fail = GR_IS_UCODE_OP_SKIP});
 
                if (ret)
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "restore context image failed");
        }
 
@@ -1758,7 +1758,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
 
 static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0),
                     gr_fecs_ctxsw_mailbox_clear_value_f(~0));
@@ -1769,7 +1769,7 @@ static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
        gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1));
        gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1));
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
@@ -1791,7 +1791,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
                                        &iova,
                                        GFP_KERNEL);
        if (!ucode_info->inst_blk_desc.cpuva) {
-               nvhost_err(d, "failed to allocate memory\n");
+               gk20a_err(d, "failed to allocate memory\n");
                return -ENOMEM;
        }
 
@@ -1802,19 +1802,19 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
        inst_ptr = ucode_info->inst_blk_desc.cpuva;
 
        /* Set inst block */
-       mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
                 u64_lo32(vm->va_limit) | 0xFFF);
-       mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
                ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
 
        pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl);
        pde_addr_lo = u64_lo32(pde_addr >> 12);
        pde_addr_hi = u64_hi32(pde_addr);
-       mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
                ram_in_page_dir_base_target_vid_mem_f() |
                ram_in_page_dir_base_vol_true_f() |
                ram_in_page_dir_base_lo_f(pde_addr_lo));
-       mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
                ram_in_page_dir_base_hi_f(pde_addr_hi));
 
        /* Map ucode surface to GMMU */
@@ -1824,7 +1824,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
                                        0, /* flags */
                                        gk20a_mem_flag_read_only);
        if (!ucode_info->ucode_gpuva) {
-               nvhost_err(d, "failed to update gmmu ptes\n");
+               gk20a_err(d, "failed to update gmmu ptes\n");
                return -ENOMEM;
        }
 
@@ -1884,7 +1884,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
 
        fecs_fw = gk20a_request_firmware(g, GK20A_FECS_UCODE_IMAGE);
        if (!fecs_fw) {
-               nvhost_err(d, "failed to load fecs ucode!!");
+               gk20a_err(d, "failed to load fecs ucode!!");
                return -ENOENT;
        }
 
@@ -1895,7 +1895,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
        gpccs_fw = gk20a_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE);
        if (!gpccs_fw) {
                release_firmware(fecs_fw);
-               nvhost_err(d, "failed to load gpccs ucode!!");
+               gk20a_err(d, "failed to load gpccs ucode!!");
                return -ENOENT;
        }
 
@@ -1921,7 +1921,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
                                        GFP_KERNEL,
                                        &attrs);
        if (!ucode_info->surface_desc.cpuva) {
-               nvhost_err(d, "memory allocation failed\n");
+               gk20a_err(d, "memory allocation failed\n");
                err = -ENOMEM;
                goto clean_up;
        }
@@ -1932,7 +1932,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
                                ucode_info->surface_desc.iova,
                                ucode_info->surface_desc.size);
        if (err) {
-               nvhost_err(d, "failed to create sg table\n");
+               gk20a_err(d, "failed to create sg table\n");
                goto clean_up;
        }
 
@@ -1940,7 +1940,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
        if (!buf) {
                release_firmware(fecs_fw);
                release_firmware(gpccs_fw);
-               nvhost_err(d, "failed to map surface desc buffer");
+               gk20a_err(d, "failed to map surface desc buffer");
                return -ENOMEM;
        }
 
@@ -1992,7 +1992,7 @@ static void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
                retries--;
        }
        if (!retries)
-               nvhost_err(dev_from_gk20a(g), "arbiter idle timeout");
+               gk20a_err(dev_from_gk20a(g), "arbiter idle timeout");
 
        gk20a_writel(g, gr_fecs_arb_ctx_adr_r(), 0x0);
 
@@ -2017,7 +2017,7 @@ static void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
                val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
        }
        if (!retries)
-               nvhost_err(dev_from_gk20a(g), "arbiter complete timeout");
+               gk20a_err(dev_from_gk20a(g), "arbiter complete timeout");
 
        gk20a_writel(g, gr_fecs_current_ctx_r(),
                        gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) |
@@ -2034,7 +2034,7 @@ static void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
                val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
        }
        if (!retries)
-               nvhost_err(dev_from_gk20a(g), "arbiter complete timeout");
+               gk20a_err(dev_from_gk20a(g), "arbiter complete timeout");
 }
 
 static int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
@@ -2137,7 +2137,7 @@ static int gr_gk20a_load_ctxsw_ucode(struct gk20a *g, struct gr_gk20a *gr)
 {
        u32 ret;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (tegra_platform_is_linsim()) {
                gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
@@ -2166,7 +2166,7 @@ static int gr_gk20a_load_ctxsw_ucode(struct gk20a *g, struct gr_gk20a *gr)
                                      eUcodeHandshakeInitComplete,
                                      GR_IS_UCODE_OP_SKIP, 0);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g), "falcon ucode init timeout");
+               gk20a_err(dev_from_gk20a(g), "falcon ucode init timeout");
                return ret;
        }
 
@@ -2179,7 +2179,7 @@ static int gr_gk20a_load_ctxsw_ucode(struct gk20a *g, struct gr_gk20a *gr)
        gk20a_writel(g, gr_fecs_method_push_r(),
                     gr_fecs_method_push_adr_set_watchdog_timeout_f());
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -2197,12 +2197,12 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
                .cond.fail = GR_IS_UCODE_OP_SKIP,
                };
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        op.method.addr = gr_fecs_method_push_adr_discover_image_size_v();
        op.mailbox.ret = &golden_ctx_image_size;
        ret = gr_gk20a_submit_fecs_method_op(g, op);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "query golden image size failed");
                return ret;
        }
@@ -2210,7 +2210,7 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
        op.mailbox.ret = &zcull_ctx_image_size;
        ret = gr_gk20a_submit_fecs_method_op(g, op);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "query zcull ctx image size failed");
                return ret;
        }
@@ -2218,7 +2218,7 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
        op.mailbox.ret = &pm_ctx_image_size;
        ret = gr_gk20a_submit_fecs_method_op(g, op);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "query pm ctx image size failed");
                return ret;
        }
@@ -2235,7 +2235,7 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
 
        g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -2292,11 +2292,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        u32 pagepool_buffer_size = gr_scc_pagepool_total_pages_hwmax_value_v() *
                gr_scc_pagepool_total_pages_byte_granularity_v();
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
 
-       nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
+       gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
 
        err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR],
                                        cb_buffer_size);
@@ -2308,7 +2308,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                       &gr->global_ctx_buffer[CIRCULAR_VPR],
                                       cb_buffer_size);
 
-       nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
+       gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
 
        err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL],
                                        pagepool_buffer_size);
@@ -2320,7 +2320,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                       &gr->global_ctx_buffer[PAGEPOOL_VPR],
                                       pagepool_buffer_size);
 
-       nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
+       gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
 
        err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE],
                                        attr_buffer_size);
@@ -2332,7 +2332,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
                                       &gr->global_ctx_buffer[ATTRIBUTE_VPR],
                                       attr_buffer_size);
 
-       nvhost_dbg_info("golden_image_size : %d",
+       gk20a_dbg_info("golden_image_size : %d",
                   gr->ctx_vars.golden_image_size);
 
        err = gk20a_gr_alloc_ctx_buffer(pdev,
@@ -2341,7 +2341,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        if (err)
                goto clean_up;
 
-       nvhost_dbg_info("priv_access_map_size : %d",
+       gk20a_dbg_info("priv_access_map_size : %d",
                   gr->ctx_vars.priv_access_map_size);
 
        err = gk20a_gr_alloc_ctx_buffer(pdev,
@@ -2351,11 +2351,11 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        if (err)
                goto clean_up;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
  clean_up:
-       nvhost_err(dev_from_gk20a(g), "fail");
+       gk20a_err(dev_from_gk20a(g), "fail");
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
                if (gr->global_ctx_buffer[i].destroy) {
                        gr->global_ctx_buffer[i].destroy(pdev,
@@ -2379,7 +2379,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
                                &gr->global_ctx_buffer[i]);
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
@@ -2392,7 +2392,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        u64 size;
        u64 gpu_va;
        u32 i;
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* Circular Buffer */
        if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) {
@@ -2482,7 +2482,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
        u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
        u32 i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
                if (g_bfr_va[i]) {
@@ -2507,7 +2507,7 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
        int err = 0;
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (gr->ctx_vars.buffer_size == 0)
                return 0;
@@ -2558,7 +2558,7 @@ static void gr_gk20a_free_channel_gr_ctx(struct channel_gk20a *c)
        struct device *d = dev_from_gk20a(g);
        DEFINE_DMA_ATTRS(attrs);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_gmmu_unmap(ch_vm, ch_ctx->gr_ctx.gpu_va,
                        ch_ctx->gr_ctx.size, gk20a_mem_flag_none);
@@ -2580,7 +2580,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
        int err = 0;
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        patch_ctx->size = 128 * sizeof(u32);
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
@@ -2603,7 +2603,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
 
        gk20a_free_sgtable(&sgt);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
  err_free_sgtable:
@@ -2613,7 +2613,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
                patch_ctx->pages, patch_ctx->iova, &attrs);
        patch_ctx->pages = NULL;
        patch_ctx->iova = 0;
-       nvhost_err(dev_from_gk20a(g), "fail");
+       gk20a_err(dev_from_gk20a(g), "fail");
        return err;
 }
 
@@ -2622,7 +2622,7 @@ static void gr_gk20a_unmap_channel_patch_ctx(struct channel_gk20a *c)
        struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
        struct vm_gk20a *ch_vm = c->vm;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (patch_ctx->gpu_va)
                gk20a_gmmu_unmap(ch_vm, patch_ctx->gpu_va,
@@ -2638,7 +2638,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
        struct device *d = dev_from_gk20a(g);
        DEFINE_DMA_ATTRS(attrs);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gr_gk20a_unmap_channel_patch_ctx(c);
 
@@ -2691,18 +2691,18 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
        int err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* an address space needs to have been bound at this point.*/
        if (!gk20a_channel_as_bound(c)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "not bound to address space at time"
                           " of grctx allocation");
                return -EINVAL;
        }
 
        if (!g->ops.gr.is_valid_class(g, args->class_num)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "invalid obj class 0x%x", args->class_num);
                err = -EINVAL;
                goto out;
@@ -2712,7 +2712,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        if (ch_ctx->gr_ctx.pages == NULL) {
                err = gr_gk20a_alloc_channel_gr_ctx(g, c);
                if (err) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to allocate gr ctx buffer");
                        goto out;
                }
@@ -2720,7 +2720,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        } else {
                /*TBD: needs to be more subtle about which is being allocated
                * as some are allowed to be allocated along same channel */
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "too many classes alloc'd on same channel");
                err = -EINVAL;
                goto out;
@@ -2729,7 +2729,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        /* commit gr ctx buffer */
        err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx.gpu_va);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to commit gr ctx buffer");
                goto out;
        }
@@ -2738,7 +2738,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        if (ch_ctx->patch_ctx.pages == NULL) {
                err = gr_gk20a_alloc_channel_patch_ctx(g, c);
                if (err) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to allocate patch buffer");
                        goto out;
                }
@@ -2748,7 +2748,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        if (!ch_ctx->global_ctx_buffer_mapped) {
                err = gr_gk20a_map_global_ctx_buffers(g, c);
                if (err) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to map global ctx buffer");
                        goto out;
                }
@@ -2759,7 +2759,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        /* init golden image, ELPG enabled after this is done */
        err = gr_gk20a_init_golden_ctx_image(g, c);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to init golden ctx image");
                goto out;
        }
@@ -2769,7 +2769,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
                err = gr_gk20a_elpg_protected_call(g,
                        gr_gk20a_load_golden_ctx_image(g, c));
                if (err) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to load golden ctx image");
                        goto out;
                }
@@ -2779,14 +2779,14 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
 
        c->num_objects++;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 out:
        /* 1. gr_ctx, patch_ctx and global ctx buffer mapping
           can be reused so no need to release them.
           2. golden image init and load is a one time thing so if
           they pass, no need to undo. */
-       nvhost_err(dev_from_gk20a(g), "fail");
+       gk20a_err(dev_from_gk20a(g), "fail");
        return err;
 }
 
@@ -2795,7 +2795,7 @@ int gk20a_free_obj_ctx(struct channel_gk20a  *c,
 {
        unsigned long timeout = gk20a_get_gr_idle_timeout(c->g);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (c->num_objects == 0)
                return 0;
@@ -2819,7 +2819,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
        struct device *d = dev_from_gk20a(g);
        DEFINE_DMA_ATTRS(attrs);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gr_gk20a_free_global_ctx_buffers(g);
 
@@ -2927,7 +2927,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
        gr->max_zcull_per_gpc_count = proj_scal_litter_num_zcull_banks_v();
 
        if (!gr->gpc_count) {
-               nvhost_err(dev_from_gk20a(g), "gpc_count==0!");
+               gk20a_err(dev_from_gk20a(g), "gpc_count==0!");
                goto clean_up;
        }
 
@@ -3002,35 +3002,35 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
                gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask;
        }
 
-       nvhost_dbg_info("fbps: %d", gr->num_fbps);
-       nvhost_dbg_info("max_gpc_count: %d", gr->max_gpc_count);
-       nvhost_dbg_info("max_fbps_count: %d", gr->max_fbps_count);
-       nvhost_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
-       nvhost_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
-       nvhost_dbg_info("max_tpc_count: %d", gr->max_tpc_count);
-       nvhost_dbg_info("sys_count: %d", gr->sys_count);
-       nvhost_dbg_info("gpc_count: %d", gr->gpc_count);
-       nvhost_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc);
-       nvhost_dbg_info("tpc_count: %d", gr->tpc_count);
-       nvhost_dbg_info("ppc_count: %d", gr->ppc_count);
+       gk20a_dbg_info("fbps: %d", gr->num_fbps);
+       gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count);
+       gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count);
+       gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
+       gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
+       gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count);
+       gk20a_dbg_info("sys_count: %d", gr->sys_count);
+       gk20a_dbg_info("gpc_count: %d", gr->gpc_count);
+       gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc);
+       gk20a_dbg_info("tpc_count: %d", gr->tpc_count);
+       gk20a_dbg_info("ppc_count: %d", gr->ppc_count);
 
        for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
-               nvhost_dbg_info("gpc_tpc_count[%d] : %d",
+               gk20a_dbg_info("gpc_tpc_count[%d] : %d",
                           gpc_index, gr->gpc_tpc_count[gpc_index]);
        for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
-               nvhost_dbg_info("gpc_zcb_count[%d] : %d",
+               gk20a_dbg_info("gpc_zcb_count[%d] : %d",
                           gpc_index, gr->gpc_zcb_count[gpc_index]);
        for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
-               nvhost_dbg_info("gpc_ppc_count[%d] : %d",
+               gk20a_dbg_info("gpc_ppc_count[%d] : %d",
                           gpc_index, gr->gpc_ppc_count[gpc_index]);
        for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
-               nvhost_dbg_info("gpc_skip_mask[%d] : %d",
+               gk20a_dbg_info("gpc_skip_mask[%d] : %d",
                           gpc_index, gr->gpc_skip_mask[gpc_index]);
        for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
                for (pes_index = 0;
                     pes_index < gr->pe_count_per_gpc;
                     pes_index++)
-                       nvhost_dbg_info("pes_tpc_count[%d][%d] : %d",
+                       gk20a_dbg_info("pes_tpc_count[%d][%d] : %d",
                                   pes_index, gpc_index,
                                   gr->pes_tpc_count[pes_index][gpc_index]);
 
@@ -3038,7 +3038,7 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
                for (pes_index = 0;
                     pes_index < gr->pe_count_per_gpc;
                     pes_index++)
-                       nvhost_dbg_info("pes_tpc_mask[%d][%d] : %d",
+                       gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d",
                                   pes_index, gpc_index,
                                   gr->pes_tpc_mask[pes_index][gpc_index]);
 
@@ -3047,16 +3047,16 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
        g->ops.gr.calc_global_ctx_buffer_size(g);
        gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v();
 
-       nvhost_dbg_info("bundle_cb_default_size: %d",
+       gk20a_dbg_info("bundle_cb_default_size: %d",
                   gr->bundle_cb_default_size);
-       nvhost_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
-       nvhost_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
-       nvhost_dbg_info("attrib_cb_default_size: %d",
+       gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
+       gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
+       gk20a_dbg_info("attrib_cb_default_size: %d",
                   gr->attrib_cb_default_size);
-       nvhost_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size);
-       nvhost_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size);
-       nvhost_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size);
-       nvhost_dbg_info("timeslice_mode: %d", gr->timeslice_mode);
+       gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size);
+       gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size);
+       gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size);
+       gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode);
 
        return 0;
 
@@ -3269,9 +3269,9 @@ clean_up:
        kfree(sorted_to_unsorted_gpc_map);
 
        if (ret)
-               nvhost_err(dev_from_gk20a(g), "fail");
+               gk20a_err(dev_from_gk20a(g), "fail");
        else
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
 
        return ret;
 }
@@ -3361,14 +3361,14 @@ static int gr_gk20a_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr,
 
        ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to disable gr engine activity\n");
                return ret;
        }
 
        ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to idle graphics\n");
                goto clean_up;
        }
@@ -3409,7 +3409,7 @@ static int gr_gk20a_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr,
 clean_up:
        ret = gk20a_fifo_enable_engine_activity(g, gr_info);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to enable gr engine activity\n");
        }
 
@@ -3427,14 +3427,14 @@ static int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
 
        ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to disable gr engine activity\n");
                return ret;
        }
 
        ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to idle graphics\n");
                goto clean_up;
        }
@@ -3466,7 +3466,7 @@ static int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
 clean_up:
        ret = gk20a_fifo_enable_engine_activity(g, gr_info);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to enable gr engine activity\n");
        }
 
@@ -3497,7 +3497,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
 
                                if (memcmp(c_tbl->color_l2, zbc_val->color_l2,
                                    sizeof(zbc_val->color_l2))) {
-                                       nvhost_err(dev_from_gk20a(g),
+                                       gk20a_err(dev_from_gk20a(g),
                                                "zbc l2 and ds color don't match with existing entries");
                                        return -EINVAL;
                                }
@@ -3553,7 +3553,7 @@ int gr_gk20a_add_zbc(struct gk20a *g, struct gr_gk20a *gr,
                }
                break;
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "invalid zbc table type %d", zbc_val->type);
                return -EINVAL;
        }
@@ -3579,14 +3579,14 @@ int gr_gk20a_clear_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
 
        ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to disable gr engine activity\n");
                return ret;
        }
 
        ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to idle graphics\n");
                goto clean_up;
        }
@@ -3642,7 +3642,7 @@ int gr_gk20a_clear_zbc_table(struct gk20a *g, struct gr_gk20a *gr)
 clean_up:
        ret = gk20a_fifo_enable_engine_activity(g, gr_info);
        if (ret) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to enable gr engine activity\n");
        }
 
@@ -3665,7 +3665,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
                break;
        case GK20A_ZBC_TYPE_COLOR:
                if (index >= GK20A_ZBC_TABLE_SIZE) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "invalid zbc color table index\n");
                        return -EINVAL;
                }
@@ -3680,7 +3680,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
                break;
        case GK20A_ZBC_TYPE_DEPTH:
                if (index >= GK20A_ZBC_TABLE_SIZE) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "invalid zbc depth table index\n");
                        return -EINVAL;
                }
@@ -3689,7 +3689,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
                query_params->ref_cnt = gr->zbc_dep_tbl[index].ref_cnt;
                break;
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                                "invalid zbc table type\n");
                return -EINVAL;
        }
@@ -3736,7 +3736,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
        if (!err)
                gr->max_default_color_index = 4;
        else {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "fail to load default zbc color table\n");
                return err;
        }
@@ -3755,7 +3755,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
        if (!err)
                gr->max_default_depth_index = 2;
        else {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "fail to load default zbc depth table\n");
                return err;
        }
@@ -3766,7 +3766,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
 int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
                        struct zbc_entry *zbc_val)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        return gr_gk20a_elpg_protected_call(g,
                gr_gk20a_add_zbc(g, gr, zbc_val));
@@ -3790,7 +3790,7 @@ void gr_gk20a_init_blcg_mode(struct gk20a *g, u32 mode, u32 engine)
                                therm_gate_ctrl_blk_clk_auto_f());
                break;
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "invalid blcg mode %d", mode);
                return;
        }
@@ -3825,7 +3825,7 @@ void gr_gk20a_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine)
                                therm_gate_ctrl_eng_clk_auto_f());
                break;
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "invalid elcg mode %d", mode);
        }
 
@@ -3869,7 +3869,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
        zcull_map_tiles = kzalloc(proj_scal_max_gpcs_v() *
                        proj_scal_max_tpc_per_gpc_v() * sizeof(u32), GFP_KERNEL);
        if (!zcull_map_tiles) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to allocate zcull temp buffers");
                return -ENOMEM;
        }
@@ -3877,7 +3877,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
                        proj_scal_max_tpc_per_gpc_v() * sizeof(u32), GFP_KERNEL);
 
        if (!zcull_bank_counters) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to allocate zcull temp buffers");
                kfree(zcull_map_tiles);
                return -ENOMEM;
@@ -3938,7 +3938,7 @@ static int gr_gk20a_zcull_init_hw(struct gk20a *g, struct gr_gk20a *gr)
 
                if (gpc_zcull_count != gr->max_zcull_per_gpc_count &&
                    gpc_zcull_count < gpc_tpc_count) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "zcull_banks (%d) less than tpcs (%d) for gpc (%d)",
                                gpc_zcull_count, gpc_tpc_count, gpc_index);
                        return -EINVAL;
@@ -4091,7 +4091,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
        u32 last_method_data = 0;
        u32 i, err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* slcg prod values */
        gr_gk20a_slcg_gr_load_gating_prod(g, g->slcg_enabled);
@@ -4249,7 +4249,7 @@ restore_fe_go_idle:
                goto out;
 
 out:
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -4279,7 +4279,7 @@ static int gk20a_init_gr_prepare(struct gk20a *g)
        if (!g->gr.ctx_vars.valid) {
                err = gr_gk20a_init_ctx_vars(g, &g->gr);
                if (err)
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to load gr init ctx");
        }
        return err;
@@ -4291,7 +4291,7 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
        bool fecs_scrubbing;
        bool gpccs_scrubbing;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        do {
                fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) &
@@ -4303,14 +4303,14 @@ static int gr_gk20a_wait_mem_scrubbing(struct gk20a *g)
                         gr_gpccs_dmactl_imem_scrubbing_m());
 
                if (!fecs_scrubbing && !gpccs_scrubbing) {
-                       nvhost_dbg_fn("done");
+                       gk20a_dbg_fn("done");
                        return 0;
                }
 
                udelay(GR_IDLE_CHECK_DEFAULT);
        } while (--retries || !tegra_platform_is_silicon());
 
-       nvhost_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
+       gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
        return -ETIMEDOUT;
 }
 
@@ -4322,7 +4322,7 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
                msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
        u32 i, err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* enable interrupts */
        gk20a_writel(g, gr_intr_r(), ~0);
@@ -4360,9 +4360,9 @@ static int gk20a_init_gr_reset_enable_hw(struct gk20a *g)
 
 out:
        if (err)
-               nvhost_err(dev_from_gk20a(g), "fail");
+               gk20a_err(dev_from_gk20a(g), "fail");
        else
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -4394,8 +4394,8 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
                    PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >>
                    PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!data) {
-               nvhost_err(dev_from_gk20a(g),
-                          "failed to map priv access map memory");
+               gk20a_err(dev_from_gk20a(g),
+                         "failed to map priv access map memory");
                err = -ENOMEM;
                goto clean_up;
        }
@@ -4407,7 +4407,7 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
                map_bit = wl_addr_gk20a[w] >> 2;
                map_byte = map_bit >> 3;
                map_shift = map_bit & 0x7; /* i.e. 0-7 */
-               nvhost_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
+               gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
                  wl_addr_gk20a[w], map_byte, map_shift);
                ((u8 *)data)[map_byte] |= 1 << map_shift;
        }
@@ -4423,10 +4423,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
        struct gr_gk20a *gr = &g->gr;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (gr->sw_ready) {
-               nvhost_dbg_fn("skip init");
+               gk20a_dbg_fn("skip init");
                return 0;
        }
 
@@ -4447,7 +4447,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
        if (tegra_cpu_is_asim())
                gr->max_comptag_mem = 1; /* MBs worth of comptag coverage */
        else {
-               nvhost_dbg_info("total ram pages : %lu", totalram_pages);
+               gk20a_dbg_info("total ram pages : %lu", totalram_pages);
                gr->max_comptag_mem = totalram_pages
                                         >> (10 - (PAGE_SHIFT - 10));
        }
@@ -4473,11 +4473,11 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
        gr->remove_support = gk20a_remove_gr_support;
        gr->sw_ready = true;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
 clean_up:
-       nvhost_err(dev_from_gk20a(g), "fail");
+       gk20a_err(dev_from_gk20a(g), "fail");
        gk20a_remove_gr_support(gr);
        return err;
 }
@@ -4486,7 +4486,7 @@ int gk20a_init_gr_support(struct gk20a *g)
 {
        u32 err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        err = gk20a_init_gr_prepare(g);
        if (err)
@@ -4530,7 +4530,7 @@ struct gr_isr_data {
 
 void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
                gk20a_writel(g,
@@ -4579,7 +4579,7 @@ static void gk20a_gr_set_circular_buffer_size(struct gk20a *g, u32 data)
        u32 gpc_index, ppc_index, stride, val, offset;
        u32 cb_size = data * 4;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (cb_size > gr->attrib_cb_size)
                cb_size = gr->attrib_cb_size;
@@ -4631,7 +4631,7 @@ static void gk20a_gr_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
        u32 pd_ab_max_output;
        u32 alpha_cb_size = data * 4;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        /* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
                return; */
 
@@ -4685,7 +4685,7 @@ void gk20a_gr_reset(struct gk20a *g)
 static int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr,
                                          u32 class_num, u32 offset, u32 data)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (class_num == KEPLER_COMPUTE_A) {
                switch (offset << 2) {
@@ -4723,10 +4723,10 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
 {
        struct fifo_gk20a *f = &g->fifo;
        struct channel_gk20a *ch = &f->channel[isr_data->chid];
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        gk20a_set_error_notifier(ch,
                                NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT);
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                   "gr semaphore timeout\n");
        return -EINVAL;
 }
@@ -4736,11 +4736,11 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
 {
        struct fifo_gk20a *f = &g->fifo;
        struct channel_gk20a *ch = &f->channel[isr_data->chid];
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        gk20a_set_error_notifier(ch,
                                NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY);
        /* This is an unrecoverable error, reset is needed */
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                   "gr semaphore timeout\n");
        return -EINVAL;
 }
@@ -4752,7 +4752,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
                        isr_data->class_num, isr_data->offset,
                        isr_data->data_lo);
        if (ret)
-               nvhost_err(dev_from_gk20a(g), "invalid method class 0x%08x"
+               gk20a_err(dev_from_gk20a(g), "invalid method class 0x%08x"
                        ", offset 0x%08x address 0x%08x\n",
                        isr_data->class_num, isr_data->offset, isr_data->addr);
 
@@ -4764,10 +4764,10 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
 {
        struct fifo_gk20a *f = &g->fifo;
        struct channel_gk20a *ch = &f->channel[isr_data->chid];
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        gk20a_set_error_notifier(ch,
                                NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY);
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                   "invalid class 0x%08x, offset 0x%08x",
                   isr_data->class_num, isr_data->offset);
        return -EINVAL;
@@ -4778,11 +4778,11 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
 {
        struct fifo_gk20a *f = &g->fifo;
        struct channel_gk20a *ch = &f->channel[isr_data->chid];
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_set_error_notifier(ch,
                        NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY);
-       nvhost_err(dev_from_gk20a(g),
+       gk20a_err(dev_from_gk20a(g),
                   "class error 0x%08x, offset 0x%08x",
                   isr_data->class_num, isr_data->offset);
        return -EINVAL;
@@ -4873,7 +4873,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
                        valid = is_valid_cyclestats_bar0_offset_gk20a(g,
                                                        op_elem->offset_bar0);
                        if (!valid) {
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                           "invalid cycletstats op offset: 0x%x\n",
                                           op_elem->offset_bar0);
 
@@ -4932,7 +4932,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
        }
        mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex);
 #endif
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        wake_up(&ch->notifier_wq);
        return 0;
 }
@@ -5001,7 +5001,7 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
        bool mmu_debug_mode_enabled = gk20a_mm_mmu_debug_mode_enabled(g);
        u32 dbgr_control0;
 
-       nvhost_dbg(dbg_intr | dbg_gpu_dbg, "locking down SM");
+       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locking down SM");
 
        /* assert stop trigger */
        dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r());
@@ -5022,7 +5022,7 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
                        ((global_esr & ~global_esr_mask) != 0);
 
                if (locked_down || !error_pending) {
-                       nvhost_dbg(dbg_intr | dbg_gpu_dbg, "locked down SM");
+                       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locked down SM");
 
                        /* de-assert stop trigger */
                        dbgr_control0 &= ~gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f();
@@ -5034,7 +5034,7 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
                /* if an mmu fault is pending and mmu debug mode is not
                 * enabled, the sm will never lock down. */
                if (!mmu_debug_mode_enabled && gk20a_fifo_mmu_fault_pending(g)) {
-                       nvhost_err(dev_from_gk20a(g), "mmu fault pending, sm will"
+                       gk20a_err(dev_from_gk20a(g), "mmu fault pending, sm will"
                                   " never lock down!");
                        return -EFAULT;
                }
@@ -5045,7 +5045,7 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g, u32 global_esr_mask)
        } while (time_before(jiffies, end_jiffies)
                        || !tegra_platform_is_silicon());
 
-       nvhost_err(dev_from_gk20a(g), "timed out while trying to lock down SM");
+       gk20a_err(dev_from_gk20a(g), "timed out while trying to lock down SM");
 
        return -EAGAIN;
 }
@@ -5094,7 +5094,7 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g,
        bool sm_debugger_attached = gk20a_gr_sm_debugger_attached(g);
        struct channel_gk20a *fault_ch;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r());
        warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r());
@@ -5105,19 +5105,19 @@ static int gk20a_gr_handle_sm_exception(struct gk20a *g,
                u32 tpc_exception_en = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r());
                tpc_exception_en &= ~gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f();
                gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), tpc_exception_en);
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "SM debugger attached");
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM debugger attached");
        }
 
        /* if a debugger is present and an error has occurred, do a warp sync */
        if (sm_debugger_attached && ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
-               nvhost_dbg(dbg_intr, "warp sync needed");
+               gk20a_dbg(gpu_dbg_intr, "warp sync needed");
                do_warp_sync = true;
        }
 
        if (do_warp_sync) {
                ret = gk20a_gr_lock_down_sm(g, global_mask);
                if (ret) {
-                       nvhost_err(dev_from_gk20a(g), "sm did not lock down!\n");
+                       gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n");
                        return ret;
                }
        }
@@ -5136,12 +5136,12 @@ static int gk20a_gr_handle_tpc_exception(struct gk20a *g,
        int ret = 0;
        u32 tpc_exception = gk20a_readl(g, gr_gpcs_tpcs_tpccs_tpc_exception_r());
 
-       nvhost_dbg(dbg_intr | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
 
        /* check if an sm exeption is pending  */
        if (gr_gpcs_tpcs_tpccs_tpc_exception_sm_v(tpc_exception) ==
                        gr_gpcs_tpcs_tpccs_tpc_exception_sm_pending_v()) {
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "SM exception pending");
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM exception pending");
                ret = gk20a_gr_handle_sm_exception(g, isr_data);
        }
 
@@ -5154,12 +5154,12 @@ static int gk20a_gr_handle_gpc_exception(struct gk20a *g,
        int ret = 0;
        u32 gpc_exception = gk20a_readl(g, gr_gpcs_gpccs_gpc_exception_r());
 
-       nvhost_dbg(dbg_intr | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
 
        /* check if tpc 0 has an exception */
        if (gr_gpcs_gpccs_gpc_exception_tpc_v(gpc_exception) ==
                        gr_gpcs_gpccs_gpc_exception_tpc_0_pending_v()) {
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "TPC exception pending");
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "TPC exception pending");
                ret = gk20a_gr_handle_tpc_exception(g, isr_data);
        }
 
@@ -5174,8 +5174,8 @@ int gk20a_gr_isr(struct gk20a *g)
        int need_reset = 0;
        u32 gr_intr = gk20a_readl(g, gr_intr_r());
 
-       nvhost_dbg_fn("");
-       nvhost_dbg(dbg_intr, "pgraph intr %08x", gr_intr);
+       gk20a_dbg_fn("");
+       gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr);
 
        if (!gr_intr)
                return 0;
@@ -5201,12 +5201,12 @@ int gk20a_gr_isr(struct gk20a *g)
        isr_data.chid =
                gk20a_gr_get_chid_from_ctx(g, isr_data.curr_ctx);
        if (isr_data.chid == -1) {
-               nvhost_err(dev_from_gk20a(g), "invalid channel ctx 0x%08x",
+               gk20a_err(dev_from_gk20a(g), "invalid channel ctx 0x%08x",
                           isr_data.curr_ctx);
                goto clean_up;
        }
 
-       nvhost_dbg(dbg_intr | dbg_gpu_dbg,
+       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
                "channel %d: addr 0x%08x, "
                "data 0x%08x 0x%08x,"
                "ctx 0x%08x, offset 0x%08x, "
@@ -5271,7 +5271,7 @@ int gk20a_gr_isr(struct gk20a *g)
         * register using set_falcon[4] */
        if (gr_intr & gr_intr_firmware_method_pending_f()) {
                need_reset |= true;
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "firmware method intr pending\n");
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n");
                gk20a_writel(g, gr_intr_r(),
                        gr_intr_firmware_method_reset_f());
                gr_intr &= ~gr_intr_firmware_method_pending_f();
@@ -5282,11 +5282,11 @@ int gk20a_gr_isr(struct gk20a *g)
                struct fifo_gk20a *f = &g->fifo;
                struct channel_gk20a *ch = &f->channel[isr_data.chid];
 
-               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "exception %08x\n", exception);
+               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception);
 
                if (exception & gr_exception_fe_m()) {
                        u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
-                       nvhost_dbg(dbg_intr, "fe warning %08x\n", fe);
+                       gk20a_dbg(gpu_dbg_intr, "fe warning %08x\n", fe);
                        gk20a_writel(g, gr_fe_hww_esr_r(), fe);
                }
 
@@ -5295,11 +5295,11 @@ int gk20a_gr_isr(struct gk20a *g)
                        u32 exception1 = gk20a_readl(g, gr_exception1_r());
                        u32 global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r());
 
-                       nvhost_dbg(dbg_intr | dbg_gpu_dbg, "GPC exception pending");
+                       gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending");
 
                        /* if no sm debugger is present, clean up the channel */
                        if (!gk20a_gr_sm_debugger_attached(g)) {
-                               nvhost_dbg(dbg_intr | dbg_gpu_dbg,
+                               gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
                                           "SM debugger not attached, clearing interrupt");
                                need_reset |= -EFAULT;
                        } else {
@@ -5329,7 +5329,7 @@ clean_up:
                gr_gpfifo_ctl_semaphore_access_f(1));
 
        if (gr_intr)
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "unhandled gr interrupt 0x%08x", gr_intr);
 
        return 0;
@@ -5340,7 +5340,7 @@ int gk20a_gr_nonstall_isr(struct gk20a *g)
        u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
        u32 clear_intr = 0;
 
-       nvhost_dbg(dbg_intr, "pgraph nonstall intr %08x", gr_intr);
+       gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
 
        if (gr_intr & gr_intr_nonstall_trap_pending_f()) {
                gk20a_channel_semaphore_wakeup(g);
@@ -5409,7 +5409,7 @@ int gk20a_gr_suspend(struct gk20a *g)
                msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
        u32 ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
        if (ret)
@@ -5432,7 +5432,7 @@ int gk20a_gr_suspend(struct gk20a *g)
 
        gk20a_gr_flush_channel_tlb(&g->gr);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return ret;
 }
 
@@ -5453,7 +5453,7 @@ int gr_gk20a_decode_priv_addr(struct gk20a *g, u32 addr,
        u32 ppc_address;
        u32 ppc_broadcast_addr;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
        /* setup defaults */
        ppc_address = 0;
@@ -5507,7 +5507,7 @@ static int gr_gk20a_split_ppc_broadcast_addr(struct gk20a *g, u32 addr,
 {
     u32 ppc_num;
 
-    nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+    gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
     for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++)
            priv_addr_table[(*t)++] = pri_ppc_addr(pri_ppccs_addr_mask(addr),
@@ -5537,12 +5537,12 @@ static int gr_gk20a_create_priv_addr_table(struct gk20a *g,
        t = 0;
        *num_registers = 0;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
        err = gr_gk20a_decode_priv_addr(g, addr, &addr_type,
                                        &gpc_num, &tpc_num, &ppc_num, &be_num,
                                        &broadcast_flags);
-       nvhost_dbg(dbg_gpu_dbg, "addr_type = %d", addr_type);
+       gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
        if (err)
                return err;
 
@@ -5619,7 +5619,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
        u32 potential_offsets = proj_scal_litter_num_gpcs_v() *
                proj_scal_litter_num_tpc_per_gpc_v();
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
        /* implementation is crossed-up if either of these happen */
        if (max_offsets > potential_offsets)
@@ -5630,7 +5630,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
 
        priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL);
        if (IS_ERR_OR_NULL(priv_registers)) {
-               nvhost_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
+               gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
                err = PTR_ERR(priv_registers);
                goto cleanup;
        }
@@ -5649,7 +5649,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
                num_registers = 1;
 
        if (!g->gr.ctx_vars.local_golden_image) {
-               nvhost_dbg_fn("no context switch header info to work with");
+               gk20a_dbg_fn("no context switch header info to work with");
                err = -EINVAL;
                goto cleanup;
        }
@@ -5662,7 +5662,7 @@ int gr_gk20a_get_ctx_buffer_offsets(struct gk20a *g,
                                                  g->gr.ctx_vars.golden_image_size,
                                                  &priv_offset);
                if (err) {
-                       nvhost_dbg_fn("Could not determine priv_offset for addr:0x%x",
+                       gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x",
                                      addr); /*, grPriRegStr(addr)));*/
                        goto cleanup;
                }
@@ -5754,7 +5754,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
 
        init_sm_dsm_reg_info();
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
        for (reg = 0; reg < _num_ovr_perf_regs; reg++) {
                for (gpc = 0; gpc < num_gpc; gpc++)  {
@@ -5768,7 +5768,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
                                /* reset the patch count from previous
                                   runs,if ucode has already processed
                                   it */
-                               tmp = mem_rd32(context +
+                               tmp = gk20a_mem_rd32(context +
                                       ctxsw_prog_main_image_patch_count_o(), 0);
 
                                if (!tmp)
@@ -5780,13 +5780,13 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
                                vaddr_lo = u64_lo32(ch_ctx->patch_ctx.gpu_va);
                                vaddr_hi = u64_hi32(ch_ctx->patch_ctx.gpu_va);
 
-                               mem_wr32(context +
+                               gk20a_mem_wr32(context +
                                         ctxsw_prog_main_image_patch_count_o(),
                                         0, ch_ctx->patch_ctx.data_count);
-                               mem_wr32(context +
+                               gk20a_mem_wr32(context +
                                         ctxsw_prog_main_image_patch_adr_lo_o(),
                                         0, vaddr_lo);
-                               mem_wr32(context +
+                               gk20a_mem_wr32(context +
                                         ctxsw_prog_main_image_patch_adr_hi_o(),
                                         0, vaddr_hi);
 
@@ -5813,7 +5813,7 @@ static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
        u32 gpc_tpc_addr;
        u32 gpc_tpc_stride;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "offset=0x%x", offset);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "offset=0x%x", offset);
 
        gpc = pri_get_gpc_num(offset);
        gpc_tpc_addr = pri_gpccs_addr_mask(offset);
@@ -5845,16 +5845,16 @@ static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
 
 static inline bool check_main_image_header_magic(void *context)
 {
-       u32 magic = mem_rd32(context +
+       u32 magic = gk20a_mem_rd32(context +
                             ctxsw_prog_main_image_magic_value_o(), 0);
-       nvhost_dbg(dbg_gpu_dbg, "main image magic=0x%x", magic);
+       gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic);
        return magic == ctxsw_prog_main_image_magic_value_v_value_v();
 }
 static inline bool check_local_header_magic(void *context)
 {
-       u32 magic = mem_rd32(context +
+       u32 magic = gk20a_mem_rd32(context +
                             ctxsw_prog_local_magic_value_o(), 0);
-       nvhost_dbg(dbg_gpu_dbg, "local magic=0x%x",  magic);
+       gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x",  magic);
        return magic == ctxsw_prog_local_magic_value_v_value_v();
 
 }
@@ -5925,7 +5925,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
                else
                        return -EINVAL;
 
-               nvhost_dbg_info(" gpc = %d tpc = %d",
+               gk20a_dbg_info(" gpc = %d tpc = %d",
                                gpc_num, tpc_num);
        } else
                return -EINVAL;
@@ -5937,22 +5937,22 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
        context = context_buffer;
        /* sanity check main header */
        if (!check_main_image_header_magic(context)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "Invalid main header: magic value");
                return -EINVAL;
        }
-       num_gpcs = mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
+       num_gpcs = gk20a_mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
        if (gpc_num >= num_gpcs) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                   "GPC 0x%08x is greater than total count 0x%08x!\n",
                           gpc_num, num_gpcs);
                return -EINVAL;
        }
 
-       data32 = mem_rd32(context + ctxsw_prog_main_extended_buffer_ctl_o(), 0);
+       data32 = gk20a_mem_rd32(context + ctxsw_prog_main_extended_buffer_ctl_o(), 0);
        ext_priv_size   = ctxsw_prog_main_extended_buffer_ctl_size_v(data32);
        if (0 == ext_priv_size) {
-               nvhost_dbg_info(" No extended memory in context buffer");
+               gk20a_dbg_info(" No extended memory in context buffer");
                return -EINVAL;
        }
        ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32);
@@ -5964,7 +5964,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
        /* check local header magic */
        context += ctxsw_prog_ucode_header_size_in_bytes();
        if (!check_local_header_magic(context)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "Invalid local header: magic value\n");
                return -EINVAL;
        }
@@ -5986,7 +5986,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
                if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) {
                        sm_dsm_perf_reg_id = i;
 
-                       nvhost_dbg_info("register match: 0x%08x",
+                       gk20a_dbg_info("register match: 0x%08x",
                                        sm_dsm_perf_regs[i]);
 
                        chk_addr = (proj_gpc_base_v() +
@@ -5996,7 +5996,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
                                   (sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask));
 
                        if (chk_addr != addr) {
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                   "Oops addr miss-match! : 0x%08x != 0x%08x\n",
                                           addr, chk_addr);
                                return -EINVAL;
@@ -6017,7 +6017,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
                            (sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) {
                                sm_dsm_perf_ctrl_reg_id = i;
 
-                               nvhost_dbg_info("register match: 0x%08x",
+                               gk20a_dbg_info("register match: 0x%08x",
                                                sm_dsm_perf_ctrl_regs[i]);
 
                                chk_addr = (proj_gpc_base_v() +
@@ -6028,7 +6028,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
                                            tpc_gpc_mask));
 
                                if (chk_addr != addr) {
-                                       nvhost_err(dev_from_gk20a(g),
+                                       gk20a_err(dev_from_gk20a(g),
                                                   "Oops addr miss-match! : 0x%08x != 0x%08x\n",
                                                   addr, chk_addr);
                                        return -EINVAL;
@@ -6097,7 +6097,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
        /* last sanity check: did we somehow compute an offset outside the
         * extended buffer? */
        if (offset_to_segment > offset_to_segment_end) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "Overflow ctxsw buffer! 0x%08x > 0x%08x\n",
                           offset_to_segment, offset_to_segment_end);
                return -EINVAL;
@@ -6123,7 +6123,7 @@ gr_gk20a_process_context_buffer_priv_segment(struct gk20a *g,
        u32 ppc_num, tpc_num, tpc_addr, gpc_addr, ppc_addr;
        struct aiv_gk20a *reg;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
 
        if (!g->gr.ctx_vars.valid)
                return -EINVAL;
@@ -6240,7 +6240,7 @@ static int gr_gk20a_determine_ppc_configuration(struct gk20a *g,
             (litter_num_pes_per_gpc > 1)))
                return -EINVAL;
 
-       data32 = mem_rd32(context + ctxsw_prog_local_image_ppc_info_o(), 0);
+       data32 = gk20a_mem_rd32(context + ctxsw_prog_local_image_ppc_info_o(), 0);
 
        *num_ppcs = ctxsw_prog_local_image_ppc_info_num_ppcs_v(data32);
        *ppc_mask = ctxsw_prog_local_image_ppc_info_ppc_mask_v(data32);
@@ -6276,7 +6276,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
        void *context;
        u32 offset_to_segment;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
 
        err = gr_gk20a_decode_priv_addr(g, addr, &addr_type,
                                        &gpc_num, &tpc_num, &ppc_num, &be_num,
@@ -6286,20 +6286,20 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
 
        context = context_buffer;
        if (!check_main_image_header_magic(context)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "Invalid main header: magic value");
                return -EINVAL;
        }
-       num_gpcs = mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
+       num_gpcs = gk20a_mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
 
        /* Parse the FECS local header. */
        context += ctxsw_prog_ucode_header_size_in_bytes();
        if (!check_local_header_magic(context)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "Invalid FECS local header: magic value\n");
                return -EINVAL;
        }
-       data32 = mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
+       data32 = gk20a_mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
        sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
 
        /* If found in Ext buffer, ok.
@@ -6330,7 +6330,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
        }
 
        if ((gpc_num + 1) > num_gpcs)  {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                           "GPC %d not in this context buffer.\n",
                           gpc_num);
                return -EINVAL;
@@ -6340,12 +6340,12 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
        for (i = 0; i < num_gpcs; i++) {
                context += ctxsw_prog_ucode_header_size_in_bytes();
                if (!check_local_header_magic(context)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                   "Invalid GPCCS local header: magic value\n");
                        return -EINVAL;
 
                }
-               data32 = mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
+               data32 = gk20a_mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
                gpc_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
 
                err = gr_gk20a_determine_ppc_configuration(g, context,
@@ -6354,10 +6354,10 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
                if (err)
                        return err;
 
-               num_tpcs = mem_rd32(context + ctxsw_prog_local_image_num_tpcs_o(), 0);
+               num_tpcs = gk20a_mem_rd32(context + ctxsw_prog_local_image_num_tpcs_o(), 0);
 
                if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                           "GPC %d TPC %d not in this context buffer.\n",
                                   gpc_num, tpc_num);
                        return -EINVAL;
@@ -6393,7 +6393,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
                                                  num_tpcs) << 2);
                                }
                        } else {
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                           " Unknown address type.\n");
                                return -EINVAL;
                        }
@@ -6431,7 +6431,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
        u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops};
        int err, pass;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
                   num_ctx_wr_ops, num_ctx_rd_ops);
 
        /* disable channel switching.
@@ -6440,7 +6440,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
         */
        err = gr_gk20a_disable_ctxsw(g);
        if (err) {
-               nvhost_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
+               gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
                /* this should probably be ctx-fatal... */
                goto cleanup;
        }
@@ -6451,7 +6451,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
        curr_gr_chid = gk20a_gr_get_chid_from_ctx(g, curr_gr_ctx);
        ch_is_curr_ctx = (curr_gr_chid != -1) && (ch->hw_chid == curr_gr_chid);
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx);
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx);
        if (ch_is_curr_ctx) {
                for (pass = 0; pass < 2; pass++) {
                        ctx_op_nr = 0;
@@ -6476,7 +6476,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                        v |= ctx_ops[i].value_lo;
                                        gk20a_writel(g, offset, v);
 
-                                       nvhost_dbg(dbg_gpu_dbg,
+                                       gk20a_dbg(gpu_dbg_gpu_dbg,
                                                   "direct wr: offset=0x%x v=0x%x",
                                                   offset, v);
 
@@ -6486,7 +6486,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                                v |= ctx_ops[i].value_hi;
                                                gk20a_writel(g, offset + 4, v);
 
-                                               nvhost_dbg(dbg_gpu_dbg,
+                                               gk20a_dbg(gpu_dbg_gpu_dbg,
                                                           "direct wr: offset=0x%x v=0x%x",
                                                           offset + 4, v);
                                        }
@@ -6495,7 +6495,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                        ctx_ops[i].value_lo =
                                                gk20a_readl(g, offset);
 
-                                       nvhost_dbg(dbg_gpu_dbg,
+                                       gk20a_dbg(gpu_dbg_gpu_dbg,
                                                   "direct rd: offset=0x%x v=0x%x",
                                                   offset, ctx_ops[i].value_lo);
 
@@ -6503,7 +6503,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                                ctx_ops[i].value_hi =
                                                        gk20a_readl(g, offset + 4);
 
-                                               nvhost_dbg(dbg_gpu_dbg,
+                                               gk20a_dbg(gpu_dbg_gpu_dbg,
                                                           "direct rd: offset=0x%x v=0x%x",
                                                           offset, ctx_ops[i].value_lo);
                                        } else
@@ -6561,7 +6561,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                                ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD),
                                                ctx_ops[i].quad);
                        if (err) {
-                               nvhost_dbg(dbg_gpu_dbg,
+                               gk20a_dbg(gpu_dbg_gpu_dbg,
                                           "ctx op invalid offset: offset=0x%x",
                                           ctx_ops[i].offset);
                                ctx_ops[i].status =
@@ -6580,22 +6580,22 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
                                if (offsets[j] >= g->gr.ctx_vars.golden_image_size)
                                        continue;
                                if (pass == 0) { /* write pass */
-                                       v = mem_rd32(ctx_ptr + offsets[j], 0);
+                                       v = gk20a_mem_rd32(ctx_ptr + offsets[j], 0);
                                        v &= ~ctx_ops[i].and_n_mask_lo;
                                        v |= ctx_ops[i].value_lo;
-                                       mem_wr32(ctx_ptr + offsets[j], 0, v);
+                                       gk20a_mem_wr32(ctx_ptr + offsets[j], 0, v);
 
-                                       nvhost_dbg(dbg_gpu_dbg,
+                                       gk20a_dbg(gpu_dbg_gpu_dbg,
                                                   "context wr: offset=0x%x v=0x%x",
                                                   offsets[j], v);
 
                                        if (ctx_ops[i].op == REGOP(WRITE_64)) {
-                                               v = mem_rd32(ctx_ptr + offsets[j] + 4, 0);
+                                               v = gk20a_mem_rd32(ctx_ptr + offsets[j] + 4, 0);
                                                v &= ~ctx_ops[i].and_n_mask_hi;
                                                v |= ctx_ops[i].value_hi;
-                                               mem_wr32(ctx_ptr + offsets[j] + 4, 0, v);
+                                               gk20a_mem_wr32(ctx_ptr + offsets[j] + 4, 0, v);
 
-                                               nvhost_dbg(dbg_gpu_dbg,
+                                               gk20a_dbg(gpu_dbg_gpu_dbg,
                                                           "context wr: offset=0x%x v=0x%x",
                                                           offsets[j] + 4, v);
                                        }
@@ -6607,16 +6607,16 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
 
                                } else { /* read pass */
                                        ctx_ops[i].value_lo =
-                                               mem_rd32(ctx_ptr + offsets[0], 0);
+                                               gk20a_mem_rd32(ctx_ptr + offsets[0], 0);
 
-                                       nvhost_dbg(dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
+                                       gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
                                                   offsets[0], ctx_ops[i].value_lo);
 
                                        if (ctx_ops[i].op == REGOP(READ_64)) {
                                                ctx_ops[i].value_hi =
-                                                       mem_rd32(ctx_ptr + offsets[0] + 4, 0);
+                                                       gk20a_mem_rd32(ctx_ptr + offsets[0] + 4, 0);
 
-                                               nvhost_dbg(dbg_gpu_dbg,
+                                               gk20a_dbg(gpu_dbg_gpu_dbg,
                                                           "context rd: offset=0x%x v=0x%x",
                                                           offsets[0] + 4, ctx_ops[i].value_hi);
                                        } else
@@ -6645,7 +6645,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
        if (restart_gr_ctxsw) {
                int tmp_err = gr_gk20a_enable_ctxsw(g);
                if (tmp_err) {
-                       nvhost_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
+                       gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
                        err = tmp_err;
                }
        }
index 006ebf07f61bafa5bbbd6f05ea6e6b040b4489d0..dea740c2da1a0c4829bdb332ed0d30531f3c8f2b 100644 (file)
@@ -21,11 +21,11 @@ int gpu_init_hal(struct gk20a *g)
        u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl;
        switch (ver) {
        case GK20A_GPUID_GK20A:
-               nvhost_dbg_info("gk20a detected");
+               gk20a_dbg_info("gk20a detected");
                gk20a_init_hal(&g->ops);
                break;
        default:
-               nvhost_err(&g->dev->dev, "no support for %x", ver);
+               gk20a_err(&g->dev->dev, "no support for %x", ver);
                return -ENODEV;
        }
 
index 2e0061d3214151b0ddc276e0324e6a7763e0e158..6c18cd33ad337e03f65829357e9af605bd391f59 100644 (file)
@@ -119,7 +119,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
 
        u32 compbit_backing_size;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (max_comptag_lines == 0) {
                gr->compbit_store.size = 0;
@@ -148,9 +148,9 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
        if (max_comptag_lines > hw_max_comptag_lines)
                max_comptag_lines = hw_max_comptag_lines;
 
-       nvhost_dbg_info("compbit backing store size : %d",
+       gk20a_dbg_info("compbit backing store size : %d",
                compbit_backing_size);
-       nvhost_dbg_info("max comptag lines : %d",
+       gk20a_dbg_info("max comptag lines : %d",
                max_comptag_lines);
 
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
@@ -158,7 +158,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
        gr->compbit_store.pages = dma_alloc_attrs(d, gr->compbit_store.size,
                                        &iova, GFP_KERNEL, &attrs);
        if (!gr->compbit_store.pages) {
-               nvhost_err(dev_from_gk20a(g), "failed to allocate"
+               gk20a_err(dev_from_gk20a(g), "failed to allocate"
                           "backing store for compbit : size %d",
                           compbit_backing_size);
                return -ENOMEM;
@@ -184,7 +184,7 @@ static int gk20a_ltc_clear_comptags(struct gk20a *g, u32 min, u32 max)
                ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(
                        gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (gr->compbit_store.size == 0)
                return 0;
@@ -220,7 +220,7 @@ static int gk20a_ltc_clear_comptags(struct gk20a *g, u32 min, u32 max)
                                        !tegra_platform_is_silicon());
 
                        if (!time_before(jiffies, end_jiffies)) {
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                           "comp tag clear timeout\n");
                                return -EBUSY;
                        }
@@ -339,7 +339,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
        gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
                compbit_base_post_divide);
 
-       nvhost_dbg(dbg_info | dbg_map | dbg_pte,
+       gk20a_dbg(gpu_dbg_info | gpu_dbg_map | gpu_dbg_pte,
                   "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
                   (u32)(compbit_store_base_iova >> 32),
                   (u32)(compbit_store_base_iova & 0xffffffff),
@@ -355,7 +355,7 @@ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
        u32 data;
        s32 retry = 100;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* Make sure all previous writes are committed to the L2. There's no
           guarantee that writes are to DRAM. This will be a sysmembar internal
@@ -367,7 +367,7 @@ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
 
                if (ltc_ltss_g_elpg_flush_v(data) ==
                    ltc_ltss_g_elpg_flush_pending_v()) {
-                       nvhost_dbg_info("g_elpg_flush 0x%x", data);
+                       gk20a_dbg_info("g_elpg_flush 0x%x", data);
                        retry--;
                        usleep_range(20, 40);
                } else
@@ -375,7 +375,7 @@ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                            "g_elpg_flush too many retries");
 
 }
index 7d1977298463ee561b547e64a6c37e3557f01449..1c61efc9787feb8a86b6875c8a8a9a6b8abb33cc 100644 (file)
@@ -258,7 +258,7 @@ static int gk20a_alloc_comptags(struct device *dev,
 
 static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        if (g->ops.fb.reset)
                g->ops.fb.reset(g);
 
@@ -275,7 +275,7 @@ void gk20a_remove_mm_support(struct mm_gk20a *mm)
        struct vm_gk20a *vm = &mm->bar1.vm;
        struct inst_desc *inst_block = &mm->bar1.inst_block;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (inst_block->cpuva)
                dma_free_coherent(d, inst_block->size,
@@ -291,10 +291,10 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
        struct mm_gk20a *mm = &g->mm;
        int i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (mm->sw_ready) {
-               nvhost_dbg_fn("skip init");
+               gk20a_dbg_fn("skip init");
                return 0;
        }
 
@@ -329,14 +329,14 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
        /*TBD: make channel vm size configurable */
        mm->channel.size = 1ULL << NV_GMMU_VA_RANGE;
 
-       nvhost_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20));
+       gk20a_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20));
 
-       nvhost_dbg_info("small page-size (%dKB) pte array: %dKB",
+       gk20a_dbg_info("small page-size (%dKB) pte array: %dKB",
                        gmmu_page_sizes[gmmu_page_size_small] >> 10,
                        (mm->page_table_sizing[gmmu_page_size_small].num_ptes *
                         gmmu_pte__size_v()) >> 10);
 
-       nvhost_dbg_info("big page-size (%dKB) pte array: %dKB",
+       gk20a_dbg_info("big page-size (%dKB) pte array: %dKB",
                        gmmu_page_sizes[gmmu_page_size_big] >> 10,
                        (mm->page_table_sizing[gmmu_page_size_big].num_ptes *
                         gmmu_pte__size_v()) >> 10);
@@ -347,7 +347,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
        mm->remove_support = gk20a_remove_mm_support;
        mm->sw_ready = true;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -358,7 +358,7 @@ static int gk20a_init_mm_setup_hw(struct gk20a *g)
        struct inst_desc *inst_block = &mm->bar1.inst_block;
        phys_addr_t inst_pa = inst_block->cpu_pa;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* set large page size in fb
         * note this is very early on, can we defer it ? */
@@ -376,7 +376,7 @@ static int gk20a_init_mm_setup_hw(struct gk20a *g)
        }
 
        inst_pa = (u32)(inst_pa >> bar1_instance_block_shift_gk20a());
-       nvhost_dbg_info("bar1 inst block ptr: 0x%08x",  (u32)inst_pa);
+       gk20a_dbg_info("bar1 inst block ptr: 0x%08x",  (u32)inst_pa);
 
        /* this is very early in init... can we defer this? */
        {
@@ -386,7 +386,7 @@ static int gk20a_init_mm_setup_hw(struct gk20a *g)
                             bus_bar1_block_ptr_f(inst_pa));
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -420,21 +420,21 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
        int err;
        struct page *pages;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        pages = alloc_pages(GFP_KERNEL, order);
        if (!pages) {
-               nvhost_dbg(dbg_pte, "alloc_pages failed\n");
+               gk20a_dbg(gpu_dbg_pte, "alloc_pages failed\n");
                goto err_out;
        }
        *sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
        if (!sgt) {
-               nvhost_dbg(dbg_pte, "cannot allocate sg table");
+               gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table");
                goto err_alloced;
        }
        err = sg_alloc_table(*sgt, 1, GFP_KERNEL);
        if (err) {
-               nvhost_dbg(dbg_pte, "sg_alloc_table failed\n");
+               gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed\n");
                goto err_sg_table;
        }
        sg_set_page((*sgt)->sgl, pages, len, 0);
@@ -457,7 +457,7 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
                            struct sg_table *sgt, u32 order,
                            size_t size)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        BUG_ON(sgt == NULL);
        free_pages((unsigned long)handle, order);
        sg_free_table(sgt);
@@ -490,20 +490,20 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
        struct page **pages;
        int err = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        *size = len;
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
        pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs);
        if (!pages) {
-               nvhost_err(d, "memory allocation failed\n");
+               gk20a_err(d, "memory allocation failed\n");
                goto err_out;
        }
 
        err = gk20a_get_sgtable_from_pages(d, sgt, pages,
                                iova, len);
        if (err) {
-               nvhost_err(d, "sgt allocation failed\n");
+               gk20a_err(d, "sgt allocation failed\n");
                goto err_free;
        }
 
@@ -528,7 +528,7 @@ static void free_gmmu_pages(struct vm_gk20a *vm, void *handle,
        DEFINE_DMA_ATTRS(attrs);
        struct page **pages = (struct page **)handle;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        BUG_ON(sgt == NULL);
 
        iova = sg_dma_address(sgt->sgl);
@@ -546,7 +546,7 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt,
 {
        int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page **pages = (struct page **)handle;
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        *kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!(*kva))
@@ -557,7 +557,7 @@ static int map_gmmu_pages(void *handle, struct sg_table *sgt,
 
 static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        vunmap(va);
 }
 #endif
@@ -577,7 +577,7 @@ static int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
        struct sg_table *sgt;
        size_t size;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* allocate enough pages for the table */
        pte_order = vm->mm->page_table_sizing[gmmu_pgsz_idx].order;
@@ -586,7 +586,7 @@ static int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm,
        if (err)
                return err;
 
-       nvhost_dbg(dbg_pte, "pte = 0x%p, addr=%08llx, size %d",
+       gk20a_dbg(gpu_dbg_pte, "pte = 0x%p, addr=%08llx, size %d",
                        pte, gk20a_mm_iova_addr(sgt->sgl), pte_order);
 
        pte->ref = handle;
@@ -603,9 +603,9 @@ static inline void pde_range_from_vaddr_range(struct vm_gk20a *vm,
 {
        *pde_lo = (u32)(addr_lo >> vm->mm->pde_stride_shift);
        *pde_hi = (u32)(addr_hi >> vm->mm->pde_stride_shift);
-       nvhost_dbg(dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d",
+       gk20a_dbg(gpu_dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d",
                   addr_lo, addr_hi, vm->mm->pde_stride_shift);
-       nvhost_dbg(dbg_pte, "pde_lo=%d pde_hi=%d",
+       gk20a_dbg(gpu_dbg_pte, "pde_lo=%d pde_hi=%d",
                   *pde_lo, *pde_hi);
 }
 
@@ -624,7 +624,7 @@ static inline u32 pte_index_from_vaddr(struct vm_gk20a *vm,
         * doesn't leak over into the high 32b */
        ret = (u32)(addr >> gmmu_page_shifts[pgsz_idx]);
 
-       nvhost_dbg(dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret);
+       gk20a_dbg(gpu_dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret);
        return ret;
 }
 
@@ -638,7 +638,7 @@ static inline void pte_space_page_offset_from_index(u32 i, u32 *pte_page,
        /* this offset is a pte offset, not a byte offset */
        *pte_offset = i & ((1<<9)-1);
 
-       nvhost_dbg(dbg_pte, "i=0x%x pte_page=0x%x pte_offset=0x%x",
+       gk20a_dbg(gpu_dbg_pte, "i=0x%x pte_page=0x%x pte_offset=0x%x",
                   i, *pte_page, *pte_offset);
 }
 
@@ -655,13 +655,13 @@ static int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm,
        struct page_table_gk20a *pte =
                vm->pdes.ptes[gmmu_pgsz_idx] + i;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* if it's already in place it's valid */
        if (pte->ref)
                return 0;
 
-       nvhost_dbg(dbg_pte, "alloc %dKB ptes for pde %d",
+       gk20a_dbg(gpu_dbg_pte, "alloc %dKB ptes for pde %d",
                   gmmu_page_sizes[gmmu_pgsz_idx]/1024, i);
 
        err = zalloc_gmmu_page_table_gk20a(vm, gmmu_pgsz_idx, pte);
@@ -761,7 +761,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
        mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
        if (!mapped_buffer) {
                mutex_unlock(&vm->update_gmmu_lock);
-               nvhost_err(d, "invalid addr to unmap 0x%llx", offset);
+               gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
                return;
        }
 
@@ -776,7 +776,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
                        udelay(50);
                }
                if (!retries)
-                       nvhost_err(d, "sync-unmap failed on 0x%llx",
+                       gk20a_err(d, "sync-unmap failed on 0x%llx",
                                                                offset);
                mutex_lock(&vm->update_gmmu_lock);
        }
@@ -817,7 +817,7 @@ static u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
        /* TBD: DIV_ROUND_UP -> undefined reference to __aeabi_uldivmod */
        size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1);
 
-       nvhost_dbg_info("size=0x%llx @ pgsz=%dKB", size,
+       gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
                        gmmu_page_sizes[gmmu_pgsz_idx]>>10);
 
        /* The vma allocator represents page accounting. */
@@ -826,13 +826,13 @@ static u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
        err = vma->alloc(vma, &start_page_nr, num_pages);
 
        if (err) {
-               nvhost_err(dev_from_vm(vm),
+               gk20a_err(dev_from_vm(vm),
                           "%s oom: sz=0x%llx", vma->name, size);
                return 0;
        }
 
        offset = (u64)start_page_nr << gmmu_page_shifts[gmmu_pgsz_idx];
-       nvhost_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
+       gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
 
        return offset;
 }
@@ -847,7 +847,7 @@ static void gk20a_vm_free_va(struct vm_gk20a *vm,
        u32 start_page_nr, num_pages;
        int err;
 
-       nvhost_dbg_info("%s free addr=0x%llx, size=0x%llx",
+       gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
                        vma->name, offset, size);
 
        start_page_nr = (u32)(offset >> page_shift);
@@ -855,7 +855,7 @@ static void gk20a_vm_free_va(struct vm_gk20a *vm,
 
        err = vma->free(vma, start_page_nr, num_pages);
        if (err) {
-               nvhost_err(dev_from_vm(vm),
+               gk20a_err(dev_from_vm(vm),
                           "not found: offset=0x%llx, sz=0x%llx",
                           offset, size);
        }
@@ -978,7 +978,7 @@ static int setup_buffer_kind_and_compression(struct device *d,
                bfr->kind_v = gmmu_pte_kind_pitch_v();
 
        if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) {
-               nvhost_err(d, "kind 0x%x not supported", bfr->kind_v);
+               gk20a_err(d, "kind 0x%x not supported", bfr->kind_v);
                return -EINVAL;
        }
 
@@ -989,7 +989,7 @@ static int setup_buffer_kind_and_compression(struct device *d,
                bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
                if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) {
                        /* shouldn't happen, but it is worth cross-checking */
-                       nvhost_err(d, "comptag kind 0x%x can't be"
+                       gk20a_err(d, "comptag kind 0x%x can't be"
                                   " downgraded to uncompressed kind",
                                   bfr->kind_v);
                        return -EINVAL;
@@ -999,7 +999,7 @@ static int setup_buffer_kind_and_compression(struct device *d,
        if (unlikely(kind_compressible &&
                     (gmmu_page_sizes[pgsz_idx] != 128*1024))) {
                /*
-               nvhost_warn(d, "comptags specified"
+               gk20a_warn(d, "comptags specified"
                " but pagesize being used doesn't support it");*/
                /* it is safe to fall back to uncompressed as
                   functionality is not harmed */
@@ -1024,7 +1024,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
        struct mapped_buffer_node *buffer;
 
        if (map_offset & gmmu_page_offset_masks[bfr->pgsz_idx]) {
-               nvhost_err(dev, "map offset must be buffer page size aligned 0x%llx",
+               gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx",
                           map_offset);
                return -EINVAL;
        }
@@ -1032,7 +1032,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
        /* find the space reservation */
        va_node = addr_to_reservation(vm, map_offset);
        if (!va_node) {
-               nvhost_warn(dev, "fixed offset mapping without space allocation");
+               gk20a_warn(dev, "fixed offset mapping without space allocation");
                return -EINVAL;
        }
 
@@ -1046,7 +1046,7 @@ static int validate_fixed_buffer(struct vm_gk20a *vm,
                s64 end = min(buffer->addr +
                        buffer->size, map_offset + bfr->size);
                if (end - begin > 0) {
-                       nvhost_warn(dev, "overlapping buffer map requested");
+                       gk20a_warn(dev, "overlapping buffer map requested");
                        return -EINVAL;
                }
        }
@@ -1073,7 +1073,7 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                map_offset = gk20a_vm_alloc_va(vm, size,
                                          pgsz_idx);
                if (!map_offset) {
-                       nvhost_err(d, "failed to allocate va space");
+                       gk20a_err(d, "failed to allocate va space");
                        err = -ENOMEM;
                        goto fail;
                }
@@ -1089,7 +1089,7 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                err = validate_gmmu_page_table_gk20a_locked(vm, i,
                                                            pgsz_idx);
                if (err) {
-                       nvhost_err(d, "failed to validate page table %d: %d",
+                       gk20a_err(d, "failed to validate page table %d: %d",
                                                           i, err);
                        goto fail;
                }
@@ -1104,13 +1104,13 @@ static u64 __locked_gmmu_map(struct vm_gk20a *vm,
                                      NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
                                      rw_flag);
        if (err) {
-               nvhost_err(d, "failed to update ptes on map");
+               gk20a_err(d, "failed to update ptes on map");
                goto fail;
        }
 
        return map_offset;
  fail:
-       nvhost_err(d, "%s: failed with err=%d\n", __func__, err);
+       gk20a_err(d, "%s: failed with err=%d\n", __func__, err);
        return 0;
 }
 
@@ -1196,7 +1196,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
        }
        kref_get(&mapped_buffer->ref);
 
-       nvhost_dbg(dbg_map,
+       gk20a_dbg(gpu_dbg_map,
                   "reusing as=%d pgsz=%d flags=0x%x ctags=%d "
                   "start=%d gv=0x%x,%08x -> 0x%x,%08x -> 0x%x,%08x "
                   "own_mem_ref=%d user_mapped=%d",
@@ -1258,7 +1258,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
                 * track the difference between those two cases we have
                 * to fail the mapping when we run out of SMMU space.
                 */
-               nvhost_warn(d, "oom allocating tracking buffer");
+               gk20a_warn(d, "oom allocating tracking buffer");
                goto clean_up;
        }
 
@@ -1281,7 +1281,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        /* validate/adjust bfr attributes */
        if (unlikely(bfr.pgsz_idx == -1)) {
-               nvhost_err(d, "unsupported page size detected");
+               gk20a_err(d, "unsupported page size detected");
                goto clean_up;
        }
 
@@ -1309,7 +1309,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        err = setup_buffer_kind_and_compression(d, flags, &bfr, bfr.pgsz_idx);
        if (unlikely(err)) {
-               nvhost_err(d, "failure setting up kind and compression");
+               gk20a_err(d, "failure setting up kind and compression");
                goto clean_up;
        }
 
@@ -1352,7 +1352,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        if (!map_offset)
                goto clean_up;
 
-       nvhost_dbg(dbg_map,
+       gk20a_dbg(gpu_dbg_map,
           "as=%d pgsz=%d "
           "kind=0x%x kind_uc=0x%x flags=0x%x "
           "ctags=%d start=%d gv=0x%x,%08x -> 0x%x,%08x -> 0x%x,%08x",
@@ -1369,12 +1369,12 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        {
                int i;
                struct scatterlist *sg = NULL;
-               nvhost_dbg(dbg_pte, "for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i)");
+               gk20a_dbg(gpu_dbg_pte, "for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i)");
                for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i ) {
                        u64 da = sg_dma_address(sg);
                        u64 pa = sg_phys(sg);
                        u64 len = sg->length;
-                       nvhost_dbg(dbg_pte, "i=%d pa=0x%x,%08x da=0x%x,%08x len=0x%x,%08x",
+                       gk20a_dbg(gpu_dbg_pte, "i=%d pa=0x%x,%08x da=0x%x,%08x len=0x%x,%08x",
                                   i, hi32(pa), lo32(pa), hi32(da), lo32(da),
                                   hi32(len), lo32(len));
                }
@@ -1385,7 +1385,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        /* TBD: check for multiple mapping of same buffer */
        mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
        if (!mapped_buffer) {
-               nvhost_warn(d, "oom allocating tracking buffer");
+               gk20a_warn(d, "oom allocating tracking buffer");
                goto clean_up;
        }
        mapped_buffer->dmabuf      = dmabuf;
@@ -1407,14 +1407,14 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        err = insert_mapped_buffer(&vm->mapped_buffers, mapped_buffer);
        if (err) {
-               nvhost_err(d, "failed to insert into mapped buffer tree");
+               gk20a_err(d, "failed to insert into mapped buffer tree");
                goto clean_up;
        }
        inserted = true;
        if (user_mapped)
                vm->num_user_mapped_buffers++;
 
-       nvhost_dbg_info("allocated va @ 0x%llx", map_offset);
+       gk20a_dbg_info("allocated va @ 0x%llx", map_offset);
 
        if (!va_allocated) {
                struct vm_reserved_va_node *va_node;
@@ -1447,7 +1447,7 @@ clean_up:
                gk20a_mm_unpin(d, dmabuf, bfr.sgt);
 
        mutex_unlock(&vm->update_gmmu_lock);
-       nvhost_dbg_info("err=%d\n", err);
+       gk20a_dbg_info("err=%d\n", err);
        return 0;
 }
 
@@ -1469,7 +1469,7 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm,
                                flags, rw_flag);
        mutex_unlock(&vm->update_gmmu_lock);
        if (!vaddr) {
-               nvhost_err(dev_from_vm(vm), "failed to allocate va space");
+               gk20a_err(dev_from_vm(vm), "failed to allocate va space");
                return 0;
        }
 
@@ -1609,7 +1609,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
        pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr,
                                   &pde_lo, &pde_hi);
 
-       nvhost_dbg(dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d",
+       gk20a_dbg(gpu_dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d",
                   pgsz_idx, pde_lo, pde_hi);
 
        /* If ctag_offset !=0 add 1 else add 0.  The idea is to avoid a branch
@@ -1647,13 +1647,13 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                err = map_gmmu_pages(pte->ref, pte->sgt, &pte_kv_cur,
                                     pte->size);
                if (err) {
-                       nvhost_err(dev_from_vm(vm),
+                       gk20a_err(dev_from_vm(vm),
                                   "couldn't map ptes for update as=%d pte_ref_cnt=%d",
                                   vm_aspace_id(vm), pte->ref_cnt);
                        goto clean_up;
                }
 
-               nvhost_dbg(dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
+               gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
                for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) {
 
                        if (likely(sgt)) {
@@ -1685,7 +1685,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
 
                                pte->ref_cnt++;
 
-                               nvhost_dbg(dbg_pte,
+                               gk20a_dbg(gpu_dbg_pte,
                                           "pte_cur=%d addr=0x%x,%08x kind=%d"
                                           " ctag=%d vol=%d refs=%d"
                                           " [0x%08x,0x%08x]",
@@ -1704,13 +1704,13 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
 
                        } else {
                                pte->ref_cnt--;
-                               nvhost_dbg(dbg_pte,
+                               gk20a_dbg(gpu_dbg_pte,
                                           "pte_cur=%d ref=%d [0x0,0x0]",
                                           pte_cur, pte->ref_cnt);
                        }
 
-                       mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]);
-                       mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]);
+                       gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]);
+                       gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]);
                }
 
                unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur);
@@ -1736,7 +1736,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
 
        smp_mb();
        vm->tlb_dirty = true;
-       nvhost_dbg_fn("set tlb dirty");
+       gk20a_dbg_fn("set tlb dirty");
 
        return 0;
 
@@ -1812,8 +1812,8 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i)
 
        pde = pde_from_index(vm, i);
 
-       mem_wr32(pde, 0, pde_v[0]);
-       mem_wr32(pde, 1, pde_v[1]);
+       gk20a_mem_wr32(pde, 0, pde_v[0]);
+       gk20a_mem_wr32(pde, 1, pde_v[1]);
 
        smp_mb();
 
@@ -1823,7 +1823,7 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i)
 
        gk20a_mm_l2_invalidate(vm->mm->g);
 
-       nvhost_dbg(dbg_pte, "pde:%d = 0x%x,0x%08x\n", i, pde_v[1], pde_v[0]);
+       gk20a_dbg(gpu_dbg_pte, "pde:%d = 0x%x,0x%08x\n", i, pde_v[1], pde_v[0]);
 
        vm->tlb_dirty  = true;
 }
@@ -1873,7 +1873,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
                        gk20a_mem_flag_none);
 
                if (!page_vaddr) {
-                       nvhost_err(dev_from_vm(vm), "failed to remap clean buffers!");
+                       gk20a_err(dev_from_vm(vm), "failed to remap clean buffers!");
                        goto err_unmap;
                }
                vaddr += pgsz;
@@ -1919,7 +1919,7 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
                                mapped_buffer->va_allocated,
                                gk20a_mem_flag_none);
 
-       nvhost_dbg(dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
+       gk20a_dbg(gpu_dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
                   vm_aspace_id(vm), gmmu_page_sizes[mapped_buffer->pgsz_idx],
                   hi32(mapped_buffer->addr), lo32(mapped_buffer->addr),
                   mapped_buffer->own_mem_ref);
@@ -1953,7 +1953,7 @@ void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
        mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
        if (!mapped_buffer) {
                mutex_unlock(&vm->update_gmmu_lock);
-               nvhost_err(d, "invalid addr to unmap 0x%llx", offset);
+               gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
                return;
        }
        kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
@@ -1967,7 +1967,7 @@ static void gk20a_vm_remove_support(struct vm_gk20a *vm)
        struct vm_reserved_va_node *va_node, *va_node_tmp;
        struct rb_node *node;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
        mutex_lock(&vm->update_gmmu_lock);
 
        /* TBD: add a flag here for the unmap code to recognize teardown
@@ -2038,7 +2038,7 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
        char name[32];
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        vm = kzalloc(sizeof(*vm), GFP_KERNEL);
        if (!vm)
@@ -2074,7 +2074,7 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
              vm->pdes.ptes[gmmu_page_size_big]))
                return -ENOMEM;
 
-       nvhost_dbg_info("init space for va_limit=0x%llx num_pdes=%d",
+       gk20a_dbg_info("init space for va_limit=0x%llx num_pdes=%d",
                   vm->va_limit, vm->pdes.num_pdes);
 
        /* allocate the page table directory */
@@ -2090,7 +2090,7 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share)
                                        vm->pdes.size);
                return -ENOMEM;
        }
-       nvhost_dbg(dbg_pte, "pdes.kv = 0x%p, pdes.phys = 0x%llx",
+       gk20a_dbg(gpu_dbg_pte, "pdes.kv = 0x%p, pdes.phys = 0x%llx",
                        vm->pdes.kv,
                        gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
        /* we could release vm->pdes.kv but it's only one page... */
@@ -2138,7 +2138,7 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share)
 {
        struct vm_gk20a *vm = as_share->vm;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        vm->as_share = NULL;
 
@@ -2162,7 +2162,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
        struct vm_reserved_va_node *va_node;
        u64 vaddr_start = 0;
 
-       nvhost_dbg_fn("flags=0x%x pgsz=0x%x nr_pages=0x%x o/a=0x%llx",
+       gk20a_dbg_fn("flags=0x%x pgsz=0x%x nr_pages=0x%x o/a=0x%llx",
                        args->flags, args->page_size, args->pages,
                        args->o_a.offset);
 
@@ -2247,7 +2247,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
        struct vm_gk20a *vm = as_share->vm;
        struct vm_reserved_va_node *va_node;
 
-       nvhost_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size,
+       gk20a_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size,
                        args->pages, args->offset);
 
        /* determine pagesz idx */
@@ -2308,7 +2308,7 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
        int err = 0;
        struct vm_gk20a *vm = as_share->vm;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        ch->vm = vm;
        err = channel_gk20a_commit_va(ch);
@@ -2372,7 +2372,7 @@ int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
        struct dma_buf *dmabuf;
        u64 ret_va;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* get ref to the mem handle (released on unmap_locked) */
        dmabuf = dma_buf_get(dmabuf_fd);
@@ -2404,7 +2404,7 @@ int gk20a_vm_unmap_buffer(struct gk20a_as_share *as_share, u64 offset)
 {
        struct vm_gk20a *vm = as_share->vm;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_vm_unmap_user(vm, offset);
        return 0;
@@ -2428,7 +2428,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
 
        mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
 
-       nvhost_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
+       gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
 
        vm->va_start = mm->pde_stride * 1;
        vm->va_limit = mm->bar1.aperture_size;
@@ -2455,7 +2455,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
              vm->pdes.ptes[gmmu_page_size_big]))
                return -ENOMEM;
 
-       nvhost_dbg_info("init space for bar1 va_limit=0x%llx num_pdes=%d",
+       gk20a_dbg_info("init space for bar1 va_limit=0x%llx num_pdes=%d",
                   vm->va_limit, vm->pdes.num_pdes);
 
 
@@ -2472,7 +2472,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
                                        vm->pdes.size);
                goto clean_up;
        }
-       nvhost_dbg(dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
+       gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
                        vm->pdes.kv, gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
        /* we could release vm->pdes.kv but it's only one page... */
 
@@ -2480,7 +2480,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        pde_addr_lo = u64_lo32(pde_addr >> 12);
        pde_addr_hi = u64_hi32(pde_addr);
 
-       nvhost_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
+       gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
                (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl),
                pde_addr_lo, pde_addr_hi);
 
@@ -2489,7 +2489,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
                                &iova, GFP_KERNEL);
        if (!inst_block->cpuva) {
-               nvhost_err(d, "%s: memory allocation failed\n", __func__);
+               gk20a_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -2497,7 +2497,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        inst_block->iova = iova;
        inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
        if (!inst_block->cpu_pa) {
-               nvhost_err(d, "%s: failed to get phys address\n", __func__);
+               gk20a_err(d, "%s: failed to get phys address\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -2505,26 +2505,26 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        inst_pa = inst_block->cpu_pa;
        inst_ptr = inst_block->cpuva;
 
-       nvhost_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p",
+       gk20a_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p",
                (u64)inst_pa, inst_ptr);
 
        memset(inst_ptr, 0, ram_fc_size_val_v());
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
                ram_in_page_dir_base_target_vid_mem_f() |
                ram_in_page_dir_base_vol_true_f() |
                ram_in_page_dir_base_lo_f(pde_addr_lo));
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
                ram_in_page_dir_base_hi_f(pde_addr_hi));
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
                 u64_lo32(vm->va_limit) | 0xFFF);
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
                ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
 
-       nvhost_dbg_info("bar1 inst block ptr: %08llx",  (u64)inst_pa);
+       gk20a_dbg_info("bar1 inst block ptr: %08llx",  (u64)inst_pa);
        gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_bar1",
                              1,/*start*/
                              (vm->va_limit >> 12) - 1 /* length*/,
@@ -2572,7 +2572,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
 
        mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
 
-       nvhost_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
+       gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
 
        vm->va_start  = GK20A_PMU_VA_START;
        vm->va_limit  = vm->va_start + mm->pmu.aperture_size;
@@ -2599,7 +2599,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
              vm->pdes.ptes[gmmu_page_size_big]))
                return -ENOMEM;
 
-       nvhost_dbg_info("init space for pmu va_limit=0x%llx num_pdes=%d",
+       gk20a_dbg_info("init space for pmu va_limit=0x%llx num_pdes=%d",
                   vm->va_limit, vm->pdes.num_pdes);
 
        /* allocate the page table directory */
@@ -2615,7 +2615,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
                                        vm->pdes.size);
                goto clean_up;
        }
-       nvhost_dbg_info("pmu pdes phys @ 0x%llx",
+       gk20a_dbg_info("pmu pdes phys @ 0x%llx",
                        (u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
        /* we could release vm->pdes.kv but it's only one page... */
 
@@ -2623,7 +2623,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        pde_addr_lo = u64_lo32(pde_addr >> 12);
        pde_addr_hi = u64_hi32(pde_addr);
 
-       nvhost_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
+       gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
                        (u64)pde_addr, pde_addr_lo, pde_addr_hi);
 
        /* allocate instance mem for pmu */
@@ -2631,7 +2631,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
                                &iova, GFP_KERNEL);
        if (!inst_block->cpuva) {
-               nvhost_err(d, "%s: memory allocation failed\n", __func__);
+               gk20a_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -2639,7 +2639,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        inst_block->iova = iova;
        inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
        if (!inst_block->cpu_pa) {
-               nvhost_err(d, "%s: failed to get phys address\n", __func__);
+               gk20a_err(d, "%s: failed to get phys address\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
@@ -2647,22 +2647,22 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        inst_pa = inst_block->cpu_pa;
        inst_ptr = inst_block->cpuva;
 
-       nvhost_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa);
+       gk20a_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa);
 
        memset(inst_ptr, 0, GK20A_PMU_INST_SIZE);
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
                ram_in_page_dir_base_target_vid_mem_f() |
                ram_in_page_dir_base_vol_true_f() |
                ram_in_page_dir_base_lo_f(pde_addr_lo));
 
-       mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
                ram_in_page_dir_base_hi_f(pde_addr_hi));
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
                 u64_lo32(vm->va_limit) | 0xFFF);
 
-       mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+       gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
                ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
 
        gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_pmu",
@@ -2700,7 +2700,7 @@ void gk20a_mm_fb_flush(struct gk20a *g)
        u32 data;
        s32 retry = 100;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        mutex_lock(&mm->l2_op_lock);
 
@@ -2719,7 +2719,7 @@ void gk20a_mm_fb_flush(struct gk20a *g)
                        flush_fb_flush_outstanding_true_v() ||
                    flush_fb_flush_pending_v(data) ==
                        flush_fb_flush_pending_busy_v()) {
-                               nvhost_dbg_info("fb_flush 0x%x", data);
+                               gk20a_dbg_info("fb_flush 0x%x", data);
                                retry--;
                                usleep_range(20, 40);
                } else
@@ -2727,7 +2727,7 @@ void gk20a_mm_fb_flush(struct gk20a *g)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "fb_flush too many retries");
 
        mutex_unlock(&mm->l2_op_lock);
@@ -2750,7 +2750,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
                        flush_l2_system_invalidate_outstanding_true_v() ||
                    flush_l2_system_invalidate_pending_v(data) ==
                        flush_l2_system_invalidate_pending_busy_v()) {
-                               nvhost_dbg_info("l2_system_invalidate 0x%x",
+                               gk20a_dbg_info("l2_system_invalidate 0x%x",
                                                data);
                                retry--;
                                usleep_range(20, 40);
@@ -2759,7 +2759,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "l2_system_invalidate too many retries");
 }
 
@@ -2777,7 +2777,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
        u32 data;
        s32 retry = 200;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        mutex_lock(&mm->l2_op_lock);
 
@@ -2793,7 +2793,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
                        flush_l2_flush_dirty_outstanding_true_v() ||
                    flush_l2_flush_dirty_pending_v(data) ==
                        flush_l2_flush_dirty_pending_busy_v()) {
-                               nvhost_dbg_info("l2_flush_dirty 0x%x", data);
+                               gk20a_dbg_info("l2_flush_dirty 0x%x", data);
                                retry--;
                                usleep_range(20, 40);
                } else
@@ -2801,7 +2801,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "l2_flush_dirty too many retries");
 
        if (invalidate)
@@ -2817,7 +2817,7 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
 {
        struct mapped_buffer_node *mapped_buffer;
 
-       nvhost_dbg_fn("gpu_va=0x%llx", gpu_va);
+       gk20a_dbg_fn("gpu_va=0x%llx", gpu_va);
 
        mutex_lock(&vm->update_gmmu_lock);
 
@@ -2844,7 +2844,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
        u32 data;
        s32 retry = 200;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        /* pagetables are considered sw states which are preserved after
           prepare_poweroff. When gk20a deinit releases those pagetables,
@@ -2874,7 +2874,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "wait mmu fifo space too many retries");
 
        gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
@@ -2897,7 +2897,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
        } while (retry >= 0 || !tegra_platform_is_silicon());
 
        if (retry < 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "mmu invalidate too many retries");
 
        mutex_unlock(&mm->tlb_lock);
@@ -2905,12 +2905,12 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
 
 int gk20a_mm_suspend(struct gk20a *g)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_mm_fb_flush(g);
        gk20a_mm_l2_flush(g, true);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -2919,7 +2919,7 @@ void gk20a_mm_ltc_isr(struct gk20a *g)
        u32 intr;
 
        intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
-       nvhost_err(dev_from_gk20a(g), "ltc: %08x\n", intr);
+       gk20a_err(dev_from_gk20a(g), "ltc: %08x\n", intr);
        gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
 }
 
index 07c5535718236288dac8e2608fc7b6f07fb0a8af..9457e6c72b39f6eb0800475d278a91d82b8692cc 100644 (file)
@@ -33,8 +33,8 @@
 
 #define GK20A_PMU_UCODE_IMAGE  "gpmu_ucode.bin"
 
-#define nvhost_dbg_pmu(fmt, arg...) \
-       nvhost_dbg(dbg_pmu, fmt, ##arg)
+#define gk20a_dbg_pmu(fmt, arg...) \
+       gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
 
 static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu);
 static int gk20a_pmu_get_elpg_residency_gating(struct gk20a *g,
@@ -56,13 +56,13 @@ static void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
        u32 *dst_u32 = (u32*)dst;
 
        if (size == 0) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "size is zero");
                return;
        }
 
        if (src & 0x3) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "src (0x%08x) not 4-byte aligned", src);
                return;
        }
@@ -87,7 +87,7 @@ static void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
                data = gk20a_readl(g, pwr_falcon_dmemd_r(port));
                for (i = 0; i < bytes; i++) {
                        dst[(words << 2) + i] = ((u8 *)&data)[i];
-                       nvhost_dbg_pmu("read: dst_u8[%d]=0x%08x",
+                       gk20a_dbg_pmu("read: dst_u8[%d]=0x%08x",
                                        i, dst[(words << 2) + i]);
                }
        }
@@ -104,13 +104,13 @@ static void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
        u32 *src_u32 = (u32*)src;
 
        if (size == 0) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "size is zero");
                return;
        }
 
        if (dst & 0x3) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "dst (0x%08x) not 4-byte aligned", dst);
                return;
        }
@@ -141,7 +141,7 @@ static void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
        data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
        size = ALIGN(size, 4);
        if (data != dst + size) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "copy failed. bytes written %d, expected %d",
                        data - dst, size);
        }
@@ -166,7 +166,7 @@ static int pmu_idle(struct pmu_gk20a *pmu)
                }
 
                if (time_after_eq(jiffies, end_jiffies)) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "timeout waiting pmu idle : 0x%08x",
                                idle_stat);
                        return -EBUSY;
@@ -174,7 +174,7 @@ static int pmu_idle(struct pmu_gk20a *pmu)
                usleep_range(100, 200);
        } while (1);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -182,7 +182,7 @@ static void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable)
 {
        struct gk20a *g = pmu->g;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_writel(g, mc_intr_mask_0_r(),
                gk20a_readl(g, mc_intr_mask_0_r()) &
@@ -240,14 +240,14 @@ static void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable)
                        mc_intr_mask_0_pmu_enabled_f());
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 static int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
 {
        struct gk20a *g = pmu->g;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (enable) {
                int retries = GR_IDLE_CHECK_MAX / GR_IDLE_CHECK_DEFAULT;
@@ -259,14 +259,14 @@ static int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
                                 pwr_falcon_dmactl_imem_scrubbing_m());
 
                        if (!w) {
-                               nvhost_dbg_fn("done");
+                               gk20a_dbg_fn("done");
                                return 0;
                        }
                        udelay(GR_IDLE_CHECK_DEFAULT);
                } while (--retries || !tegra_platform_is_silicon());
 
                gk20a_disable(g, mc_enable_pwr_enabled_f());
-               nvhost_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
+               gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
 
                return -ETIMEDOUT;
        } else {
@@ -281,7 +281,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable)
        u32 pmc_enable;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!enable) {
                pmc_enable = gk20a_readl(g, mc_enable_r());
@@ -305,7 +305,7 @@ static int pmu_enable(struct pmu_gk20a *pmu, bool enable)
                pmu_enable_irq(pmu, true);
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -343,7 +343,7 @@ static int pmu_bootstrap(struct pmu_gk20a *pmu)
        u64 addr_code, addr_data, addr_load;
        u32 i, blocks, addr_args;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_writel(g, pwr_falcon_itfen_r(),
                gk20a_readl(g, pwr_falcon_itfen_r()) |
@@ -443,7 +443,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu,
        index = find_first_zero_bit(pmu->pmu_seq_tbl,
                                sizeof(pmu->pmu_seq_tbl));
        if (index >= sizeof(pmu->pmu_seq_tbl)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "no free sequence available");
                mutex_unlock(&pmu->pmu_seq_lock);
                return -EAGAIN;
@@ -484,7 +484,7 @@ static int pmu_queue_init(struct pmu_queue *queue,
        queue->mutex_id = id;
        mutex_init(&queue->mutex);
 
-       nvhost_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
+       gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
                id, queue->index, queue->offset, queue->size);
 
        return 0;
@@ -588,7 +588,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
 
        if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
                BUG_ON(mutex->ref_cnt == 0);
-               nvhost_dbg_pmu("already acquired by owner : 0x%08x", *token);
+               gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token);
                mutex->ref_cnt++;
                return 0;
        }
@@ -599,7 +599,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
                        gk20a_readl(g, pwr_pmu_mutex_id_r()));
                if (data == pwr_pmu_mutex_id_value_init_v() ||
                    data == pwr_pmu_mutex_id_value_not_avail_v()) {
-                       nvhost_warn(dev_from_gk20a(g),
+                       gk20a_warn(dev_from_gk20a(g),
                                "fail to generate mutex token: val 0x%08x",
                                owner);
                        usleep_range(20, 40);
@@ -615,12 +615,12 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
 
                if (owner == data) {
                        mutex->ref_cnt = 1;
-                       nvhost_dbg_pmu("mutex acquired: id=%d, token=0x%x",
+                       gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x",
                                mutex->index, *token);
                        *token = owner;
                        return 0;
                } else {
-                       nvhost_dbg_info("fail to acquire mutex idx=0x%08x",
+                       gk20a_dbg_info("fail to acquire mutex idx=0x%08x",
                                mutex->index);
 
                        data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
@@ -656,7 +656,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
                gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
 
        if (*token != owner) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "requester 0x%08x NOT match owner 0x%08x",
                        *token, owner);
                return -EINVAL;
@@ -671,7 +671,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
                        pwr_pmu_mutex_id_release_value_f(owner));
                gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
 
-               nvhost_dbg_pmu("mutex released: id=%d, token=0x%x",
+               gk20a_dbg_pmu("mutex released: id=%d, token=0x%x",
                        mutex->index, *token);
        }
 
@@ -774,10 +774,10 @@ static bool pmu_queue_has_room(struct pmu_gk20a *pmu,
 static int pmu_queue_push(struct pmu_gk20a *pmu,
                        struct pmu_queue *queue, void *data, u32 size)
 {
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!queue->opened && queue->oflag == OFLAG_WRITE){
-               nvhost_err(dev_from_gk20a(pmu->g),
+               gk20a_err(dev_from_gk20a(pmu->g),
                        "queue not opened for write");
                return -EINVAL;
        }
@@ -796,7 +796,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
        *bytes_read = 0;
 
        if (!queue->opened && queue->oflag == OFLAG_READ){
-               nvhost_err(dev_from_gk20a(pmu->g),
+               gk20a_err(dev_from_gk20a(pmu->g),
                        "queue not opened for read");
                return -EINVAL;
        }
@@ -813,7 +813,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
                used = queue->offset + queue->size - tail;
 
        if (size > used) {
-               nvhost_warn(dev_from_gk20a(pmu->g),
+               gk20a_warn(dev_from_gk20a(pmu->g),
                        "queue size smaller than request read");
                size = used;
        }
@@ -829,10 +829,10 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu,
 {
        struct pmu_cmd cmd;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!queue->opened) {
-               nvhost_err(dev_from_gk20a(pmu->g),
+               gk20a_err(dev_from_gk20a(pmu->g),
                        "queue not opened");
                return;
        }
@@ -841,7 +841,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu,
                cmd.hdr.unit_id = PMU_UNIT_REWIND;
                cmd.hdr.size = PMU_CMD_HDR_SIZE;
                pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size);
-               nvhost_dbg_pmu("queue %d rewinded", queue->id);
+               gk20a_dbg_pmu("queue %d rewinded", queue->id);
        }
 
        queue->position = queue->offset;
@@ -884,7 +884,7 @@ static int pmu_queue_open_write(struct pmu_gk20a *pmu,
                BUG();
 
        if (!pmu_queue_has_room(pmu, queue, size, &rewind)) {
-               nvhost_err(dev_from_gk20a(pmu->g), "queue full");
+               gk20a_err(dev_from_gk20a(pmu->g), "queue full");
                return -EAGAIN;
        }
 
@@ -961,7 +961,7 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
 {
        struct gk20a_pmu_save_state save;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        gk20a_allocator_destroy(&pmu->dmem);
 
@@ -980,7 +980,7 @@ int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
 {
        struct pmu_gk20a *pmu = &g->pmu;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        pmu_enable_hw(pmu, true);
 
@@ -1003,7 +1003,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        DEFINE_DMA_ATTRS(attrs);
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (pmu->sw_ready) {
                for (i = 0; i < pmu->mutex_cnt; i++) {
@@ -1012,7 +1012,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                }
                pmu_seq_init(pmu);
 
-               nvhost_dbg_fn("skip init");
+               gk20a_dbg_fn("skip init");
                goto skip_init;
        }
 
@@ -1045,13 +1045,13 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        if (!g->pmu_fw) {
                g->pmu_fw = gk20a_request_firmware(g, GK20A_PMU_UCODE_IMAGE);
                if (!g->pmu_fw) {
-                       nvhost_err(d, "failed to load pmu ucode!!");
+                       gk20a_err(d, "failed to load pmu ucode!!");
                        err = -ENOENT;
                        goto err_free_seq;
                }
        }
 
-       nvhost_dbg_fn("firmware loaded");
+       gk20a_dbg_fn("firmware loaded");
 
        pmu->desc = (struct pmu_ucode_desc *)g->pmu_fw->data;
        pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
@@ -1069,7 +1069,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                        GFP_KERNEL,
                                        &attrs);
        if (!pmu->ucode.cpuva) {
-               nvhost_err(d, "failed to allocate memory\n");
+               gk20a_err(d, "failed to allocate memory\n");
                err = -ENOMEM;
                goto err_release_fw;
        }
@@ -1079,7 +1079,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                        &iova,
                                        GFP_KERNEL);
        if (!pmu->seq_buf.cpuva) {
-               nvhost_err(d, "failed to allocate memory\n");
+               gk20a_err(d, "failed to allocate memory\n");
                err = -ENOMEM;
                goto err_free_pmu_ucode;
        }
@@ -1092,7 +1092,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                pmu->ucode.iova,
                                GK20A_PMU_UCODE_SIZE_MAX);
        if (err) {
-               nvhost_err(d, "failed to allocate sg table\n");
+               gk20a_err(d, "failed to allocate sg table\n");
                goto err_free_seq_buf;
        }
 
@@ -1101,7 +1101,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                        0, /* flags */
                                        gk20a_mem_flag_read_only);
        if (!pmu->ucode.pmu_va) {
-               nvhost_err(d, "failed to map pmu ucode memory!!");
+               gk20a_err(d, "failed to map pmu ucode memory!!");
                goto err_free_ucode_sgt;
        }
 
@@ -1110,7 +1110,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                pmu->seq_buf.iova,
                                GK20A_PMU_SEQ_BUF_SIZE);
        if (err) {
-               nvhost_err(d, "failed to allocate sg table\n");
+               gk20a_err(d, "failed to allocate sg table\n");
                goto err_unmap_ucode;
        }
 
@@ -1119,13 +1119,13 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                                        0, /* flags */
                                        gk20a_mem_flag_none);
        if (!pmu->seq_buf.pmu_va) {
-               nvhost_err(d, "failed to map pmu ucode memory!!");
+               gk20a_err(d, "failed to map pmu ucode memory!!");
                goto err_free_seq_buf_sgt;
        }
 
        ptr = (u8 *)pmu->seq_buf.cpuva;
        if (!ptr) {
-               nvhost_err(d, "failed to map cpu ptr for zbc buffer");
+               gk20a_err(d, "failed to map cpu ptr for zbc buffer");
                goto err_unmap_seq_buf;
        }
 
@@ -1141,7 +1141,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        for (i = 0; i < (pmu->desc->app_start_offset +
                        pmu->desc->app_size) >> 2; i++)
-               mem_wr32(ucode_ptr, i, pmu->ucode_image[i]);
+               gk20a_mem_wr32(ucode_ptr, i, pmu->ucode_image[i]);
 
        gk20a_free_sgtable(&sgt_pmu_ucode);
        gk20a_free_sgtable(&sgt_seq_buf);
@@ -1158,7 +1158,7 @@ skip_init:
 
        pmu->remove_support = gk20a_remove_pmu_support;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 
  err_unmap_seq_buf:
@@ -1188,7 +1188,7 @@ skip_init:
  err_free_mutex:
        kfree(pmu->mutex);
  err:
-       nvhost_dbg_fn("fail");
+       gk20a_dbg_fn("fail");
        return err;
 }
 
@@ -1201,16 +1201,16 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
        struct pmu_gk20a *pmu = param;
        struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (status != 0) {
-               nvhost_err(dev_from_gk20a(g), "PGENG cmd aborted");
+               gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted");
                /* TBD: disable ELPG */
                return;
        }
 
        if (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_FAILED) {
-               nvhost_err(dev_from_gk20a(g), "failed to load PGENG buffer");
+               gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
        }
 
        pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
@@ -1222,7 +1222,7 @@ int gk20a_init_pmu_setup_hw1(struct gk20a *g)
        struct pmu_gk20a *pmu = &g->pmu;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        pmu_reset(pmu);
 
@@ -1277,7 +1277,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        struct sg_table *sgt_pg_buf;
        dma_addr_t iova;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!support_gk20a_pmu())
                return 0;
@@ -1285,7 +1285,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        size = 0;
        err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to query fecs pg buffer size");
                return err;
        }
@@ -1295,7 +1295,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                                                &iova,
                                                GFP_KERNEL);
                if (!pmu->pg_buf.cpuva) {
-                       nvhost_err(d, "failed to allocate memory\n");
+                       gk20a_err(d, "failed to allocate memory\n");
                        err = -ENOMEM;
                        goto err;
                }
@@ -1308,7 +1308,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                                        pmu->pg_buf.iova,
                                        size);
                if (err) {
-                       nvhost_err(d, "failed to create sg table\n");
+                       gk20a_err(d, "failed to create sg table\n");
                        goto err_free_pg_buf;
                }
 
@@ -1318,7 +1318,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                                        0, /* flags */
                                        gk20a_mem_flag_none);
                if (!pmu->pg_buf.pmu_va) {
-                       nvhost_err(d, "failed to map fecs pg buffer");
+                       gk20a_err(d, "failed to map fecs pg buffer");
                        err = -ENOMEM;
                        goto err_free_sgtable;
                }
@@ -1344,7 +1344,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                                pmu->elpg_stat == PMU_ELPG_STAT_OFF)),
                        msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
        if (status == 0) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "PG_INIT_ACK failed, remaining timeout : 0x%lx", remain);
                pmu_dump_falcon_stats(pmu);
                return -EBUSY;
@@ -1352,14 +1352,14 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
 
        err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to bind pmu inst to gr");
                return err;
        }
 
        err = gr_gk20a_fecs_set_reglist_virual_addr(g, pmu->pg_buf.pmu_va);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to set pg buffer pmu va");
                return err;
        }
@@ -1384,7 +1384,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                        pmu->buf_loaded,
                        msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
        if (!pmu->buf_loaded) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "PGENG FECS buffer load failed, remaining timeout : 0x%lx",
                        remain);
                return -EBUSY;
@@ -1410,7 +1410,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                        pmu->buf_loaded,
                        msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
        if (!pmu->buf_loaded) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "PGENG ZBC buffer load failed, remaining timeout 0x%lx",
                        remain);
                return -EBUSY;
@@ -1468,7 +1468,7 @@ int gk20a_init_pmu_support(struct gk20a *g)
        struct pmu_gk20a *pmu = &g->pmu;
        u32 err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (pmu->initialized)
                return 0;
@@ -1498,32 +1498,32 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
        struct pmu_gk20a *pmu = param;
        struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (status != 0) {
-               nvhost_err(dev_from_gk20a(g), "ELPG cmd aborted");
+               gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
                /* TBD: disable ELPG */
                return;
        }
 
        switch (elpg_msg->msg) {
        case PMU_PG_ELPG_MSG_INIT_ACK:
-               nvhost_dbg_pmu("INIT_PG is acknowledged from PMU");
+               gk20a_dbg_pmu("INIT_PG is acknowledged from PMU");
                pmu->elpg_ready = true;
                wake_up(&pmu->pg_wq);
                break;
        case PMU_PG_ELPG_MSG_ALLOW_ACK:
-               nvhost_dbg_pmu("ALLOW is acknowledged from PMU");
+               gk20a_dbg_pmu("ALLOW is acknowledged from PMU");
                pmu->elpg_stat = PMU_ELPG_STAT_ON;
                wake_up(&pmu->pg_wq);
                break;
        case PMU_PG_ELPG_MSG_DISALLOW_ACK:
-               nvhost_dbg_pmu("DISALLOW is acknowledged from PMU");
+               gk20a_dbg_pmu("DISALLOW is acknowledged from PMU");
                pmu->elpg_stat = PMU_ELPG_STAT_OFF;
                wake_up(&pmu->pg_wq);
                break;
        default:
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "unsupported ELPG message : 0x%04x", elpg_msg->msg);
        }
 
@@ -1535,17 +1535,17 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
 {
        struct pmu_gk20a *pmu = param;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (status != 0) {
-               nvhost_err(dev_from_gk20a(g), "ELPG cmd aborted");
+               gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
                /* TBD: disable ELPG */
                return;
        }
 
        switch (msg->msg.pg.stat.sub_msg_id) {
        case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
-               nvhost_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
+               gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
                pmu->stat_dmem_offset = msg->msg.pg.stat.data;
                wake_up(&pmu->pg_wq);
                break;
@@ -1560,7 +1560,7 @@ static int pmu_init_powergating(struct pmu_gk20a *pmu)
        struct pmu_cmd cmd;
        u32 seq;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (tegra_cpu_is_asim()) {
                /* TBD: calculate threshold for silicon */
@@ -1628,7 +1628,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
        u32 data;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        pmu->perfmon_ready = 0;
 
@@ -1679,7 +1679,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
        pmu->sample_buffer = 0;
        err = pmu->dmem.alloc(&pmu->dmem, &pmu->sample_buffer, 2 * sizeof(u16));
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "failed to allocate perfmon sample buffer");
                return -ENOMEM;
        }
@@ -1733,7 +1733,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
                (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
 
        if (msg->hdr.unit_id != PMU_UNIT_INIT) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "expecting init msg");
                return -EINVAL;
        }
@@ -1742,7 +1742,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
                (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
 
        if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "expecting init msg");
                return -EINVAL;
        }
@@ -1799,7 +1799,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
 
        err = pmu_queue_open_read(pmu, queue);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to open queue %d for read", queue->id);
                *status = err;
                return false;
@@ -1808,7 +1808,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
        err = pmu_queue_pop(pmu, queue, &msg->hdr,
                        PMU_MSG_HDR_SIZE, &bytes_read);
        if (err || bytes_read != PMU_MSG_HDR_SIZE) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to read msg from queue %d", queue->id);
                *status = err | -EINVAL;
                goto clean_up;
@@ -1820,7 +1820,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
                err = pmu_queue_pop(pmu, queue, &msg->hdr,
                                PMU_MSG_HDR_SIZE, &bytes_read);
                if (err || bytes_read != PMU_MSG_HDR_SIZE) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to read msg from queue %d", queue->id);
                        *status = err | -EINVAL;
                        goto clean_up;
@@ -1828,7 +1828,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
        }
 
        if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "read invalid unit_id %d from queue %d",
                        msg->hdr.unit_id, queue->id);
                        *status = -EINVAL;
@@ -1840,7 +1840,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
                err = pmu_queue_pop(pmu, queue, &msg->msg,
                        read_size, &bytes_read);
                if (err || bytes_read != read_size) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "fail to read msg from queue %d", queue->id);
                        *status = err;
                        goto clean_up;
@@ -1849,7 +1849,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
 
        err = pmu_queue_close(pmu, queue, true);
        if (err) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to close queue %d", queue->id);
                *status = err;
                return false;
@@ -1860,7 +1860,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
 clean_up:
        err = pmu_queue_close(pmu, queue, false);
        if (err)
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to close queue %d", queue->id);
        return false;
 }
@@ -1872,19 +1872,19 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
        struct pmu_sequence *seq;
        int ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        seq = &pmu->seq[msg->hdr.seq_id];
        if (seq->state != PMU_SEQ_STATE_USED &&
            seq->state != PMU_SEQ_STATE_CANCELLED) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "msg for an unknown sequence %d", seq->id);
                return -EINVAL;
        }
 
        if (msg->hdr.unit_id == PMU_UNIT_RC &&
            msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "unhandled cmd: seq %d", seq->id);
        }
        else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
@@ -1899,7 +1899,7 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
                                                0);
                                }
                        } else {
-                               nvhost_err(dev_from_gk20a(g),
+                               gk20a_err(dev_from_gk20a(g),
                                        "sequence %d msg buffer too small",
                                        seq->id);
                        }
@@ -1921,7 +1921,7 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
 
        /* TBD: notify client waiting for available dmem */
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 }
@@ -1958,7 +1958,7 @@ static void pmu_save_zbc(struct gk20a *g, u32 entries)
        pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
                              &pmu->zbc_save_done, 1);
        if (!pmu->zbc_save_done)
-               nvhost_err(dev_from_gk20a(g), "ZBC save timeout");
+               gk20a_err(dev_from_gk20a(g), "ZBC save timeout");
 }
 
 void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
@@ -2039,11 +2039,11 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu,
        struct gk20a *g = pmu->g;
        u32 rate;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        switch (msg->msg_type) {
        case PMU_PERFMON_MSG_ID_INCREASE_EVENT:
-               nvhost_dbg_pmu("perfmon increase event: "
+               gk20a_dbg_pmu("perfmon increase event: "
                        "state_id %d, ground_id %d, pct %d",
                        msg->gen.state_id, msg->gen.group_id, msg->gen.data);
                /* increase gk20a clock freq by 20% */
@@ -2051,7 +2051,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu,
                gk20a_clk_set_rate(g, rate * 6 / 5);
                break;
        case PMU_PERFMON_MSG_ID_DECREASE_EVENT:
-               nvhost_dbg_pmu("perfmon decrease event: "
+               gk20a_dbg_pmu("perfmon decrease event: "
                        "state_id %d, ground_id %d, pct %d",
                        msg->gen.state_id, msg->gen.group_id, msg->gen.data);
                /* decrease gk20a clock freq by 10% */
@@ -2060,7 +2060,7 @@ static int pmu_handle_perfmon_event(struct pmu_gk20a *pmu,
                break;
        case PMU_PERFMON_MSG_ID_INIT_EVENT:
                pmu->perfmon_ready = 1;
-               nvhost_dbg_pmu("perfmon init event");
+               gk20a_dbg_pmu("perfmon init event");
                break;
        default:
                break;
@@ -2077,7 +2077,7 @@ static int pmu_handle_event(struct pmu_gk20a *pmu, struct pmu_msg *msg)
 {
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        switch (msg->hdr.unit_id) {
        case PMU_UNIT_PERFMON:
@@ -2105,7 +2105,7 @@ static int pmu_process_message(struct pmu_gk20a *pmu)
        while (pmu_read_message(pmu,
                &pmu->queue[PMU_MESSAGE_QUEUE], &msg, &status)) {
 
-               nvhost_dbg_pmu("read msg hdr: "
+               gk20a_dbg_pmu("read msg hdr: "
                                "unit_id = 0x%08x, size = 0x%08x, "
                                "ctrl_flags = 0x%08x, seq_id = 0x%08x",
                                msg.hdr.unit_id, msg.hdr.size,
@@ -2153,29 +2153,29 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu)
        pmu_copy_from_dmem(pmu, pmu->stat_dmem_offset,
                (u8 *)&stats, sizeof(struct pmu_pg_stats), 0);
 
-       nvhost_dbg_pmu("pg_entry_start_timestamp : 0x%016llx",
+       gk20a_dbg_pmu("pg_entry_start_timestamp : 0x%016llx",
                stats.pg_entry_start_timestamp);
-       nvhost_dbg_pmu("pg_exit_start_timestamp : 0x%016llx",
+       gk20a_dbg_pmu("pg_exit_start_timestamp : 0x%016llx",
                stats.pg_exit_start_timestamp);
-       nvhost_dbg_pmu("pg_ingating_start_timestamp : 0x%016llx",
+       gk20a_dbg_pmu("pg_ingating_start_timestamp : 0x%016llx",
                stats.pg_ingating_start_timestamp);
-       nvhost_dbg_pmu("pg_ungating_start_timestamp : 0x%016llx",
+       gk20a_dbg_pmu("pg_ungating_start_timestamp : 0x%016llx",
                stats.pg_ungating_start_timestamp);
-       nvhost_dbg_pmu("pg_avg_entry_time_us : 0x%08x",
+       gk20a_dbg_pmu("pg_avg_entry_time_us : 0x%08x",
                stats.pg_avg_entry_time_us);
-       nvhost_dbg_pmu("pg_avg_exit_time_us : 0x%08x",
+       gk20a_dbg_pmu("pg_avg_exit_time_us : 0x%08x",
                stats.pg_avg_exit_time_us);
-       nvhost_dbg_pmu("pg_ingating_cnt : 0x%08x",
+       gk20a_dbg_pmu("pg_ingating_cnt : 0x%08x",
                stats.pg_ingating_cnt);
-       nvhost_dbg_pmu("pg_ingating_time_us : 0x%08x",
+       gk20a_dbg_pmu("pg_ingating_time_us : 0x%08x",
                stats.pg_ingating_time_us);
-       nvhost_dbg_pmu("pg_ungating_count : 0x%08x",
+       gk20a_dbg_pmu("pg_ungating_count : 0x%08x",
                stats.pg_ungating_count);
-       nvhost_dbg_pmu("pg_ungating_time_us 0x%08x: ",
+       gk20a_dbg_pmu("pg_ungating_time_us 0x%08x: ",
                stats.pg_ungating_time_us);
-       nvhost_dbg_pmu("pg_gating_cnt : 0x%08x",
+       gk20a_dbg_pmu("pg_gating_cnt : 0x%08x",
                stats.pg_gating_cnt);
-       nvhost_dbg_pmu("pg_gating_deny_cnt : 0x%08x",
+       gk20a_dbg_pmu("pg_gating_deny_cnt : 0x%08x",
                stats.pg_gating_deny_cnt);
 
        /*
@@ -2184,35 +2184,35 @@ static void pmu_dump_elpg_stats(struct pmu_gk20a *pmu)
        u32 i, val[20];
        pmu_copy_from_dmem(pmu, 0x66c,
                (u8 *)val, sizeof(val), 0);
-       nvhost_dbg_pmu("elpg log begin");
+       gk20a_dbg_pmu("elpg log begin");
        for (i = 0; i < 20; i++)
-               nvhost_dbg_pmu("0x%08x", val[i]);
-       nvhost_dbg_pmu("elpg log end");
+               gk20a_dbg_pmu("0x%08x", val[i]);
+       gk20a_dbg_pmu("elpg log end");
        */
 
-       nvhost_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
-       nvhost_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3)));
-       nvhost_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3)));
-       nvhost_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
                gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0)));
-       nvhost_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x",
                gk20a_readl(g, pwr_pmu_pg_intren_r(0)));
 
-       nvhost_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_count_r(3)));
-       nvhost_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_count_r(4)));
-       nvhost_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_count_r(7)));
 
        /*
         TBD: script can't generate those registers correctly
-       nvhost_dbg_pmu("pwr_pmu_idle_status_r(): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_idle_status_r(): 0x%08x",
                gk20a_readl(g, pwr_pmu_idle_status_r()));
-       nvhost_dbg_pmu("pwr_pmu_pg_ctrl_r(): 0x%08x",
+       gk20a_dbg_pmu("pwr_pmu_pg_ctrl_r(): 0x%08x",
                gk20a_readl(g, pwr_pmu_pg_ctrl_r()));
        */
 }
@@ -2222,117 +2222,117 @@ static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
        struct gk20a *g = pmu->g;
        int i;
 
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
                gk20a_readl(g, pwr_falcon_os_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x",
                gk20a_readl(g, pwr_falcon_cpuctl_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x",
                gk20a_readl(g, pwr_falcon_idlestate_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x",
                gk20a_readl(g, pwr_falcon_mailbox0_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x",
                gk20a_readl(g, pwr_falcon_mailbox1_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x",
                gk20a_readl(g, pwr_falcon_irqstat_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x",
                gk20a_readl(g, pwr_falcon_irqmode_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x",
                gk20a_readl(g, pwr_falcon_irqmask_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x",
                gk20a_readl(g, pwr_falcon_irqdest_r()));
 
        for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x",
                        i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
 
        for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x",
                        i, gk20a_readl(g, pwr_pmu_debug_r(i)));
 
        for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
                gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                        pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
                        pwr_pmu_falcon_icd_cmd_idx_f(i));
-               nvhost_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x",
                        i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
        }
 
        i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
-       nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i);
+       gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i);
        if (i != 0) {
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x",
                        gk20a_readl(g, pwr_pmu_bar0_addr_r()));
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x",
                        gk20a_readl(g, pwr_pmu_bar0_data_r()));
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x",
                        gk20a_readl(g, pwr_pmu_bar0_timeout_r()));
-               nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x",
                        gk20a_readl(g, pwr_pmu_bar0_ctl_r()));
        }
 
        i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
-       nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
+       gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
 
        i = gk20a_readl(g, pwr_falcon_exterrstat_r());
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i);
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i);
        if (pwr_falcon_exterrstat_valid_v(i) ==
                        pwr_falcon_exterrstat_valid_true_v()) {
-               nvhost_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x",
                        gk20a_readl(g, pwr_falcon_exterraddr_r()));
-               nvhost_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
                        gk20a_readl(g, top_fs_status_r()));
-               nvhost_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
                        gk20a_readl(g, mc_enable_r()));
        }
 
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x",
                gk20a_readl(g, pwr_falcon_engctl_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x",
                gk20a_readl(g, pwr_falcon_curctx_r()));
-       nvhost_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x",
                gk20a_readl(g, pwr_falcon_nxtctx_r()));
 
        gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
-       nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x",
                gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
        gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
-       nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x",
                gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
        gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
-       nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x",
                gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
        gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
-       nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x",
                gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
        gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
-       nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x",
+       gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x",
                gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
        for (i = 0; i < 4; i++) {
                gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                        pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                        pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
-               nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x",
                        gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
 
                gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
                        pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
                        pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
-               nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x",
+               gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x",
                        gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
        }
 
@@ -2347,7 +2347,7 @@ void gk20a_pmu_isr(struct gk20a *g)
        u32 intr, mask;
        bool recheck = false;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        mutex_lock(&pmu->isr_mutex);
 
@@ -2356,7 +2356,7 @@ void gk20a_pmu_isr(struct gk20a *g)
 
        intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
 
-       nvhost_dbg_pmu("received falcon interrupt: 0x%08x", intr);
+       gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr);
 
        if (!intr) {
                mutex_unlock(&pmu->isr_mutex);
@@ -2364,12 +2364,12 @@ void gk20a_pmu_isr(struct gk20a *g)
        }
 
        if (intr & pwr_falcon_irqstat_halt_true_f()) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "pmu halt intr not implemented");
                pmu_dump_falcon_stats(pmu);
        }
        if (intr & pwr_falcon_irqstat_exterr_true_f()) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "pmu exterr intr not implemented. Clearing interrupt.");
                pmu_dump_falcon_stats(pmu);
 
@@ -2451,7 +2451,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
        return true;
 
 invalid_cmd:
-       nvhost_err(dev_from_gk20a(g), "invalid pmu cmd :\n"
+       gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n"
                "queue_id=%d,\n"
                "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
                "payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -2473,7 +2473,7 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
                msecs_to_jiffies(timeout);
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        queue = &pmu->queue[queue_id];
 
@@ -2494,10 +2494,10 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
 
 clean_up:
        if (err)
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "fail to write cmd to queue %d", queue_id);
        else
-               nvhost_dbg_fn("done");
+               gk20a_dbg_fn("done");
 
        return err;
 }
@@ -2512,7 +2512,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
        struct pmu_allocation *in = NULL, *out = NULL;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        BUG_ON(!cmd);
        BUG_ON(!seq_desc);
@@ -2589,12 +2589,12 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
        if (err)
                seq->state = PMU_SEQ_STATE_PENDING;
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 
        return 0;
 
 clean_up:
-       nvhost_dbg_fn("fail");
+       gk20a_dbg_fn("fail");
        if (in)
                pmu->dmem.free(&pmu->dmem, in->alloc.dmem.offset,
                        in->alloc.dmem.size);
@@ -2612,7 +2612,7 @@ static int gk20a_pmu_enable_elpg_locked(struct gk20a *g)
        struct pmu_cmd cmd;
        u32 seq, status;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        memset(&cmd, 0, sizeof(struct pmu_cmd));
        cmd.hdr.unit_id = PMU_UNIT_PG;
@@ -2630,7 +2630,7 @@ static int gk20a_pmu_enable_elpg_locked(struct gk20a *g)
 
        BUG_ON(status != 0);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -2641,7 +2641,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
 
        int ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!pmu->elpg_ready || !pmu->initialized)
                goto exit;
@@ -2654,7 +2654,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
 
        /* something is not right if we end up in following code path */
        if (unlikely(pmu->elpg_refcnt > 1)) {
-               nvhost_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
+               gk20a_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
                            __func__, pmu->elpg_refcnt);
                WARN_ON(1);
        }
@@ -2680,7 +2680,7 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
 exit_unlock:
        mutex_unlock(&pmu->elpg_mutex);
 exit:
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return ret;
 }
 
@@ -2689,7 +2689,7 @@ static void pmu_elpg_enable_allow(struct work_struct *work)
        struct pmu_gk20a *pmu = container_of(to_delayed_work(work),
                                        struct pmu_gk20a, elpg_enable);
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        mutex_lock(&pmu->elpg_mutex);
 
@@ -2704,7 +2704,7 @@ static void pmu_elpg_enable_allow(struct work_struct *work)
 
        mutex_unlock(&pmu->elpg_mutex);
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
 }
 
 static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
@@ -2714,7 +2714,7 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
        u32 seq;
        int ret = 0;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!pmu->elpg_ready || !pmu->initialized)
                return 0;
@@ -2726,7 +2726,7 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
 
        pmu->elpg_refcnt--;
        if (pmu->elpg_refcnt > 0) {
-               nvhost_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
+               gk20a_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
                            __func__, pmu->elpg_refcnt);
                WARN_ON(1);
                ret = 0;
@@ -2746,7 +2746,7 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
                                      &pmu->elpg_stat, PMU_ELPG_STAT_ON);
 
                if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
-                       nvhost_err(dev_from_gk20a(g),
+                       gk20a_err(dev_from_gk20a(g),
                                "ELPG_ALLOW_ACK failed, elpg_stat=%d",
                                pmu->elpg_stat);
                        pmu_dump_elpg_stats(pmu);
@@ -2776,7 +2776,7 @@ static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
        pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
                              &pmu->elpg_stat, PMU_ELPG_STAT_OFF);
        if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) {
-               nvhost_err(dev_from_gk20a(g),
+               gk20a_err(dev_from_gk20a(g),
                        "ELPG_DISALLOW_ACK failed");
                pmu_dump_elpg_stats(pmu);
                pmu_dump_falcon_stats(pmu);
@@ -2795,7 +2795,7 @@ exit_reschedule:
 
 exit_unlock:
        mutex_unlock(&pmu->elpg_mutex);
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return ret;
 }
 
@@ -2809,7 +2809,7 @@ int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
        struct pmu_gk20a *pmu = &g->pmu;
        int err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (enable)
                err = pmu_perfmon_start_sampling(pmu);
@@ -2824,7 +2824,7 @@ int gk20a_pmu_destroy(struct gk20a *g)
        struct pmu_gk20a *pmu = &g->pmu;
        u32 elpg_ingating_time, elpg_ungating_time, gating_cnt;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        if (!support_gk20a_pmu())
                return 0;
@@ -2853,7 +2853,7 @@ int gk20a_pmu_destroy(struct gk20a *g)
                pmu->remove_support = NULL;
        }
 
-       nvhost_dbg_fn("done");
+       gk20a_dbg_fn("done");
        return 0;
 }
 
@@ -2985,7 +2985,7 @@ static int gk20a_pmu_ap_send_command(struct gk20a *g,
                break;
 
        default:
-               nvhost_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
+               gk20a_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
                        __func__, p_ap_cmd->cmn.cmd_id);
                return 0x2f;
        }
@@ -2994,7 +2994,7 @@ static int gk20a_pmu_ap_send_command(struct gk20a *g,
                        p_callback, pmu, &seq, ~0);
 
        if (!status) {
-               nvhost_dbg_pmu(
+               gk20a_dbg_pmu(
                        "%s: Unable to submit Adaptive Power Command %d\n",
                        __func__, p_ap_cmd->cmn.cmd_id);
                goto err_return;
@@ -3019,7 +3019,7 @@ static void ap_callback_init_and_enable_ctrl(
                        break;
 
                default:
-                       nvhost_dbg_pmu(
+                       gk20a_dbg_pmu(
                        "%s: Invalid Adaptive Power Message: %x\n",
                        __func__, msg->msg.pg.ap_msg.cmn.msg_id);
                        break;
index eac31f63d4b7bb16684213cf72f9e991c21158a2..acee82c38cc87343cf75579858f6d6e14462c8b6 100644 (file)
@@ -1,9 +1,7 @@
 /*
- * drivers/video/tegra/host/gk20a/priv_ring_gk20a.c
- *
  * GK20A priv ring
  *
- * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -14,9 +12,8 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/delay.h>       /* for mdelay */
@@ -65,7 +62,7 @@ void gk20a_priv_ring_isr(struct gk20a *g)
        status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
        status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
 
-       nvhost_dbg_info("ringmaster intr status0: 0x%08x,"
+       gk20a_dbg_info("ringmaster intr status0: 0x%08x,"
                "status1: 0x%08x", status0, status1);
 
        if (status0 & (0x1 | 0x2 | 0x4)) {
@@ -84,13 +81,13 @@ void gk20a_priv_ring_isr(struct gk20a *g)
        } while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry);
 
        if (retry <= 0)
-               nvhost_warn(dev_from_gk20a(g),
+               gk20a_warn(dev_from_gk20a(g),
                        "priv ringmaster cmd ack too many retries");
 
        status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
        status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
 
-       nvhost_dbg_info("ringmaster intr status0: 0x%08x,"
+       gk20a_dbg_info("ringmaster intr status0: 0x%08x,"
                " status1: 0x%08x", status0, status1);
 }
 
index 7a6d5ed3c83283787c5369ad328ff159c1f16a98..70cdaf3fab66dbd7cccd0c06822e3323b690590d 100644 (file)
@@ -395,7 +395,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
        bool skip_read_lo, skip_read_hi;
        bool ok;
 
-       nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+       gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
 
        ch = dbg_s->ch;
 
@@ -419,7 +419,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
                case REGOP(READ_32):
                        ops[i].value_hi = 0;
                        ops[i].value_lo = gk20a_readl(g, ops[i].offset);
-                       nvhost_dbg(dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
+                       gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
                                   ops[i].value_lo, ops[i].offset);
 
                        break;
@@ -429,7 +429,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
                        ops[i].value_hi =
                                gk20a_readl(g, ops[i].offset + 4);
 
-                       nvhost_dbg(dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
+                       gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
                                   ops[i].value_hi, ops[i].value_lo,
                                   ops[i].offset);
                break;
@@ -468,12 +468,12 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
 
                        /* now update first 32bits */
                        gk20a_writel(g, ops[i].offset, data32_lo);
-                       nvhost_dbg(dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
+                       gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
                                   data32_lo, ops[i].offset);
                        /* if desired, update second 32bits */
                        if (ops[i].op == REGOP(WRITE_64)) {
                                gk20a_writel(g, ops[i].offset + 4, data32_hi);
-                               nvhost_dbg(dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
+                               gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
                                           data32_hi, ops[i].offset + 4);
 
                        }
@@ -501,7 +501,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
        }
 
  clean_up:
-       nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+       gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
        return err;
 
 }
@@ -522,7 +522,7 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
                break;
        default:
                op->status |= REGOP(STATUS_UNSUPPORTED_OP);
-               /*nvhost_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
+               /*gk20a_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
                err = -EINVAL;
                break;
        }
@@ -541,7 +541,7 @@ static int validate_reg_op_info(struct dbg_session_gk20a *dbg_s,
        */
        default:
                op->status |= REGOP(STATUS_INVALID_TYPE);
-               /*nvhost_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
+               /*gk20a_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
                err = -EINVAL;
                break;
        }
@@ -581,7 +581,7 @@ static bool check_whitelists(struct dbg_session_gk20a *dbg_s,
        } else if (op->type == REGOP(TYPE_GR_CTX)) {
                /* it's a context-relative op */
                if (!dbg_s->ch) {
-                       nvhost_err(dbg_s->dev, "can't perform ctx regop unless bound");
+                       gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound");
                        op->status = REGOP(STATUS_UNSUPPORTED_OP);
                        return -ENODEV;
                }
@@ -622,7 +622,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
 
        /* support only 24-bit 4-byte aligned offsets */
        if (offset & 0xFF000003) {
-               nvhost_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
+               gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
                op->status |= REGOP(STATUS_INVALID_OFFSET);
                return -EINVAL;
        }
@@ -651,7 +651,7 @@ static int validate_reg_op_offset(struct dbg_session_gk20a *dbg_s,
        }
 
        if (!valid) {
-               nvhost_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
+               gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
                op->status |= REGOP(STATUS_INVALID_OFFSET);
                return -EINVAL;
        }
@@ -686,7 +686,7 @@ static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
                ok &= !err;
        }
 
-       nvhost_dbg(dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d\n",
+       gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d\n",
                   *ctx_wr_count, *ctx_rd_count);
 
        return ok;
index b734ebfccf4b14b2f6f4bcf81dc77cab01f60cbd..028872453d0f55553ad2a95fabca3fc1413d51ec 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A Therm
  *
- * Copyright (c) 2011 - 2012, NVIDIA Corporation.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -14,8 +14,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include "../dev.h"
@@ -127,7 +128,7 @@ int gk20a_init_therm_support(struct gk20a *g)
 {
        u32 err;
 
-       nvhost_dbg_fn("");
+       gk20a_dbg_fn("");
 
        err = gk20a_init_therm_reset_enable_hw(g);
        if (err)