/* dumb allocator... */
static int generate_as_share_id(struct gk20a_as *as)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return ++as->last_share_id;
}
/* still dumb */
static void release_as_share_id(struct gk20a_as *as, int id)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return;
}
struct gk20a_as_share *as_share;
int err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
*out = 0;
as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
{
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (atomic_dec_return(&as_share->ref_cnt) > 0)
return 0;
int err = 0;
struct channel_gk20a *ch;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch || gk20a_channel_as_bound(ch))
struct gk20a_as_share *as_share,
struct nvhost_as_alloc_space_args *args)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gk20a_vm_alloc_space(as_share, args);
}
struct gk20a_as_share *as_share,
struct nvhost_as_free_space_args *args)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gk20a_vm_free_space(as_share, args);
}
{
int i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* ensure that padding is not set. this is required for ensuring that
* we can safely use these fields later */
struct gk20a_as_share *as_share,
struct nvhost_as_map_buffer_args *args)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gk20a_vm_map_buffer(as_share, args->nvmap_handle,
&args->o_a.align,
args->flags, NV_KIND_DEFAULT);
struct gk20a_as_share *as_share,
struct nvhost_as_unmap_buffer_args *args)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gk20a_vm_unmap_buffer(as_share, args->offset);
}
struct gk20a *g;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
g = container_of(inode->i_cdev, struct gk20a, as.cdev);
err = gk20a_get_client(g);
if (err) {
- nvhost_dbg_fn("fail to get channel!");
+ gk20a_dbg_fn("fail to get channel!");
return err;
}
err = gk20a_as_alloc_share(&g->as, &as_share);
if (err) {
- nvhost_dbg_fn("failed to alloc share");
+ gk20a_dbg_fn("failed to alloc share");
gk20a_put_client(g);
return err;
}
int ret;
struct gk20a *g = gk20a_from_as(as_share->as);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ret = gk20a_as_release_share(as_share);
u32 addr_hi;
void *inst_ptr;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
inst_ptr = c->inst_block.cpuva;
if (!inst_ptr)
addr_lo = u64_lo32(addr >> 12);
addr_hi = u64_hi32(addr);
- nvhost_dbg_info("pde pa=0x%llx addr_lo=0x%x addr_hi=0x%x",
+ gk20a_dbg_info("pde pa=0x%llx addr_lo=0x%x addr_hi=0x%x",
(u64)addr, addr_lo, addr_hi);
- mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() |
ram_in_page_dir_base_vol_true_f() |
ram_in_page_dir_base_lo_f(addr_lo));
- mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
ram_in_page_dir_base_hi_f(addr_hi));
- mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
u64_lo32(c->vm->va_limit) | 0xFFF);
- mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(c->vm->va_limit)));
gk20a_mm_l2_invalidate(c->g);
u32 addr_hi;
void *inst_ptr;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
inst_ptr = c->inst_block.cpuva;
if (!inst_ptr)
addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v());
addr_hi = u64_hi32(c->userd_iova);
- nvhost_dbg_info("channel %d : set ramfc userd 0x%16llx",
+ gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx",
c->hw_chid, (u64)c->userd_iova);
- mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
pbdma_userd_target_vid_mem_f() |
pbdma_userd_addr_f(addr_lo));
- mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_hi_w(),
pbdma_userd_target_vid_mem_f() |
pbdma_userd_hi_addr_f(addr_hi));
}
/* set new timeslice */
- mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
value | (shift << 12) |
fifo_eng_timeslice_enable_true_f());
{
void *inst_ptr;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
inst_ptr = c->inst_block.cpuva;
if (!inst_ptr)
memset(inst_ptr, 0, ram_fc_size_val_v());
- mem_wr32(inst_ptr, ram_fc_gp_base_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_w(),
pbdma_gp_base_offset_f(
u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s())));
- mem_wr32(inst_ptr, ram_fc_gp_base_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_gp_base_hi_w(),
pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) |
pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries)));
- mem_wr32(inst_ptr, ram_fc_signature_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_signature_w(),
pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f());
- mem_wr32(inst_ptr, ram_fc_formats_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_formats_w(),
pbdma_formats_gp_fermi0_f() |
pbdma_formats_pb_fermi1_f() |
pbdma_formats_mp_fermi0_f());
- mem_wr32(inst_ptr, ram_fc_pb_header_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_pb_header_w(),
pbdma_pb_header_priv_user_f() |
pbdma_pb_header_method_zero_f() |
pbdma_pb_header_subchannel_zero_f() |
pbdma_pb_header_first_true_f() |
pbdma_pb_header_type_inc_f());
- mem_wr32(inst_ptr, ram_fc_subdevice_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_subdevice_w(),
pbdma_subdevice_id_f(1) |
pbdma_subdevice_status_active_f() |
pbdma_subdevice_channel_dma_enable_f());
- mem_wr32(inst_ptr, ram_fc_target_w(), pbdma_target_engine_sw_f());
+ gk20a_mem_wr32(inst_ptr, ram_fc_target_w(), pbdma_target_engine_sw_f());
- mem_wr32(inst_ptr, ram_fc_acquire_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_acquire_w(),
pbdma_acquire_retry_man_2_f() |
pbdma_acquire_retry_exp_2_f() |
pbdma_acquire_timeout_exp_max_f() |
pbdma_acquire_timeout_man_max_f() |
pbdma_acquire_timeout_en_disable_f());
- mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_eng_timeslice_w(),
fifo_eng_timeslice_timeout_128_f() |
fifo_eng_timeslice_timescale_3_f() |
fifo_eng_timeslice_enable_true_f());
- mem_wr32(inst_ptr, ram_fc_pb_timeslice_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_pb_timeslice_w(),
fifo_pb_timeslice_timeout_16_f() |
fifo_pb_timeslice_timescale_0_f() |
fifo_pb_timeslice_enable_true_f());
- mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
+ gk20a_mem_wr32(inst_ptr, ram_fc_chid_w(), ram_fc_chid_id_f(c->hw_chid));
/* TBD: alwasy priv mode? */
- mem_wr32(inst_ptr, ram_fc_hce_ctrl_w(),
+ gk20a_mem_wr32(inst_ptr, ram_fc_hce_ctrl_w(),
pbdma_hce_ctrl_hce_priv_mode_yes_f());
gk20a_mm_l2_invalidate(c->g);
{
BUG_ON(!c->userd_cpu_va);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
- mem_wr32(c->userd_cpu_va, ram_userd_put_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_get_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_ref_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_put_hi_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_ref_threshold_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_hi_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_get_hi_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0);
- mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_put_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_get_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_ref_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_put_hi_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_ref_threshold_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_top_level_get_hi_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_get_hi_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_get_w(), 0);
+ gk20a_mem_wr32(c->userd_cpu_va, ram_userd_gp_put_w(), 0);
gk20a_mm_l2_invalidate(c->g);
u32 inst_ptr = ch_gk20a->inst_block.cpu_pa
>> ram_in_base_shift_v();
- nvhost_dbg_info("bind channel %d inst ptr 0x%08x",
+ gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
ch_gk20a->hw_chid, inst_ptr);
ch_gk20a->bound = true;
{
struct gk20a *g = ch_gk20a->g;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (ch_gk20a->bound)
gk20a_writel(g, ccsr_channel_inst_r(ch_gk20a->hw_chid),
int err = 0;
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ch->inst_block.size = ram_in_alloc_size_v();
ch->inst_block.cpuva = dma_alloc_coherent(d,
&iova,
GFP_KERNEL);
if (!ch->inst_block.cpuva) {
- nvhost_err(d, "%s: memory allocation failed\n", __func__);
+ gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
ch->inst_block.cpu_pa = gk20a_get_phys_from_iova(d,
ch->inst_block.iova);
if (!ch->inst_block.cpu_pa) {
- nvhost_err(d, "%s: failed to get physical address\n", __func__);
+ gk20a_err(d, "%s: failed to get physical address\n", __func__);
err = -ENOMEM;
goto clean_up;
}
- nvhost_dbg_info("channel %d inst block physical addr: 0x%16llx",
+ gk20a_dbg_info("channel %d inst block physical addr: 0x%16llx",
ch->hw_chid, (u64)ch->inst_block.cpu_pa);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up:
- nvhost_err(d, "fail");
+ gk20a_err(d, "fail");
channel_gk20a_free_inst(g, ch);
return err;
}
|| !tegra_platform_is_silicon());
if (!channel_idle)
- nvhost_err(dev_from_gk20a(ch->g), "channel jobs not freed");
+ gk20a_err(dev_from_gk20a(ch->g), "channel jobs not freed");
return 0;
}
(u32)(nsec >> 32);
ch->error_notifier->info32 = error;
ch->error_notifier->status = 0xffff;
- nvhost_err(dev_from_gk20a(ch->g),
+ gk20a_err(dev_from_gk20a(ch->g),
"error notifier set to %d\n", error);
}
}
unsigned long timeout = gk20a_get_gr_idle_timeout(g);
struct dbg_session_gk20a *dbg_s;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* if engine reset was deferred, perform it now */
mutex_lock(&f->deferred_reset_mutex);
if (g->fifo.deferred_reset_pending) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "engine reset was"
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "engine reset was"
" deferred, running now");
fifo_gk20a_finish_mmu_fault_handling(g, g->fifo.mmu_fault_engines);
g->fifo.mmu_fault_engines = 0;
if (!gk20a_channel_as_bound(ch))
goto unbind;
- nvhost_dbg_info("freeing bound channel context, timeout=%ld",
+ gk20a_dbg_info("freeing bound channel context, timeout=%ld",
timeout);
gk20a_disable_channel(ch, finish && !ch->has_timedout, timeout);
ch = acquire_unused_channel(f);
if (ch == NULL) {
/* TBD: we want to make this virtualizable */
- nvhost_err(dev_from_gk20a(g), "out of hw chids");
+ gk20a_err(dev_from_gk20a(g), "out of hw chids");
return 0;
}
if (channel_gk20a_alloc_inst(g, ch)) {
ch->in_use = false;
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to open gk20a channel, out of inst mem");
return 0;
err = gk20a_get_client(g);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to get client ref");
return err;
}
err = gk20a_channel_busy(g->dev);
if (err) {
gk20a_put_client(g);
- nvhost_err(dev_from_gk20a(g), "failed to power on, %d", err);
+ gk20a_err(dev_from_gk20a(g), "failed to power on, %d", err);
return err;
}
ch = gk20a_open_new_channel(g);
gk20a_channel_idle(g->dev);
if (!ch) {
gk20a_put_client(g);
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to get f");
return -ENOMEM;
}
&iova,
GFP_KERNEL);
if (!q->mem.base_cpuva) {
- nvhost_err(d, "%s: memory allocation failed\n", __func__);
+ gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
err = gk20a_get_sgtable(d, &sgt,
q->mem.base_cpuva, q->mem.base_iova, size);
if (err) {
- nvhost_err(d, "%s: failed to create sg table\n", __func__);
+ gk20a_err(d, "%s: failed to create sg table\n", __func__);
goto clean_up;
}
0, /* flags */
gk20a_mem_flag_none);
if (!q->base_gpuva) {
- nvhost_err(d, "ch %d : failed to map gpu va"
+ gk20a_err(d, "ch %d : failed to map gpu va"
"for priv cmd buffer", c->hw_chid);
err = -ENOMEM;
goto clean_up_sgt;
for (i = 0; i < q->size / 4; i++) {
e = kzalloc(sizeof(struct priv_cmd_entry), GFP_KERNEL);
if (!e) {
- nvhost_err(d, "ch %d: fail to pre-alloc cmd entry",
+ gk20a_err(d, "ch %d: fail to pre-alloc cmd entry",
c->hw_chid);
err = -ENOMEM;
goto clean_up_sgt;
u32 size = orig_size;
bool no_retry = false;
- nvhost_dbg_fn("size %d", orig_size);
+ gk20a_dbg_fn("size %d", orig_size);
*entry = NULL;
if (q->put + size > q->size)
size = orig_size + (q->size - q->put);
- nvhost_dbg_info("ch %d: priv cmd queue get:put %d:%d",
+ gk20a_dbg_info("ch %d: priv cmd queue get:put %d:%d",
c->hw_chid, q->get, q->put);
TRY_AGAIN:
if (unlikely(list_empty(&q->free))) {
- nvhost_dbg_info("ch %d: run out of pre-alloc entries",
+ gk20a_dbg_info("ch %d: run out of pre-alloc entries",
c->hw_chid);
e = kzalloc(sizeof(struct priv_cmd_entry), GFP_KERNEL);
if (!e) {
- nvhost_err(dev_from_gk20a(c->g),
+ gk20a_err(dev_from_gk20a(c->g),
"ch %d: fail to allocate priv cmd entry",
c->hw_chid);
return -ENOMEM;
*entry = e;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
struct list_head *head = &q->head;
bool wrap_around, found = false;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* Find the most recent free entry. Free it and everything before it */
list_for_each_entry(e, head, list) {
- nvhost_dbg_info("ch %d: cmd entry get:put:wrap %d:%d:%d "
+ gk20a_dbg_info("ch %d: cmd entry get:put:wrap %d:%d:%d "
"curr get:put:wrap %d:%d:%d",
c->hw_chid, e->gp_get, e->gp_put, e->gp_wrap,
c->gpfifo.get, c->gpfifo.put, c->gpfifo.wrap);
if (found)
q->get = (e->ptr - q->mem.base_cpuva) + e->size;
else {
- nvhost_dbg_info("no free entry recycled");
+ gk20a_dbg_info("no free entry recycled");
return;
}
free_priv_cmdbuf(c, e);
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
/* an address space needs to have been bound at this point. */
if (!gk20a_channel_as_bound(c)) {
- nvhost_err(d,
+ gk20a_err(d,
"not bound to an address space at time of gpfifo"
" allocation. Attempting to create and bind to"
" one...");
c->ramfc.size = ram_in_ramfc_s() / 8;
if (c->gpfifo.cpu_va) {
- nvhost_err(d, "channel %d :"
+ gk20a_err(d, "channel %d :"
"gpfifo already allocated", c->hw_chid);
return -EEXIST;
}
&iova,
GFP_KERNEL);
if (!c->gpfifo.cpu_va) {
- nvhost_err(d, "%s: memory allocation failed\n", __func__);
+ gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
err = gk20a_get_sgtable(d, &sgt,
c->gpfifo.cpu_va, c->gpfifo.iova, c->gpfifo.size);
if (err) {
- nvhost_err(d, "%s: failed to allocate sg table\n", __func__);
+ gk20a_err(d, "%s: failed to allocate sg table\n", __func__);
goto clean_up;
}
0, /* flags */
gk20a_mem_flag_none);
if (!c->gpfifo.gpu_va) {
- nvhost_err(d, "channel %d : failed to map"
+ gk20a_err(d, "channel %d : failed to map"
" gpu_va for gpfifo", c->hw_chid);
err = -ENOMEM;
goto clean_up_sgt;
}
- nvhost_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
+ gk20a_dbg_info("channel %d : gpfifo_base 0x%016llx, size %d",
c->hw_chid, c->gpfifo.gpu_va, c->gpfifo.entry_num);
channel_gk20a_setup_ramfc(c, c->gpfifo.gpu_va, c->gpfifo.entry_num);
gk20a_free_sgtable(&sgt);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up_unmap:
c->gpfifo.cpu_va = NULL;
c->gpfifo.iova = 0;
memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
- nvhost_err(d, "fail");
+ gk20a_err(d, "fail");
return err;
}
c->userd_gpu_va + 4 * ram_userd_gp_put_w());
if (c->gpfifo.put != put) {
/*TBD: BUG_ON/teardown on this*/
- nvhost_err(dev_from_gk20a(g), "gp_put changed unexpectedly "
+ gk20a_err(dev_from_gk20a(g), "gp_put changed unexpectedly "
"since last update");
c->gpfifo.put = put;
return false; /* surprise! */
update_gp_get(g, c);
free_count = gp_free_count(c);
if (unlikely(!free_count)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"not enough gpfifo space");
return -EAGAIN;
}
c->userd_gpu_va + 4 * ram_userd_gp_put_w(),
c->gpfifo.put);
- nvhost_dbg_info("post-submit put %d, get %d, size %d",
+ gk20a_dbg_info("post-submit put %d, get %d, size %d",
c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
return 0;
g->ops.ltc.sync_debugfs(g);
#endif
- nvhost_dbg_info("channel %d", c->hw_chid);
+ gk20a_dbg_info("channel %d", c->hw_chid);
/* gk20a_channel_update releases this ref. */
gk20a_channel_busy(g->dev);
check_gp_put(g, c);
update_gp_get(g, c);
- nvhost_dbg_info("pre-submit put %d, get %d, size %d",
+ gk20a_dbg_info("pre-submit put %d, get %d, size %d",
c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
/* Invalidate tlb if it's dirty... */
}
if (err) {
- nvhost_err(d, "not enough gpfifo space");
+ gk20a_err(d, "not enough gpfifo space");
err = -EAGAIN;
goto clean_up;
}
c->userd_gpu_va + 4 * ram_userd_gp_put_w(),
c->gpfifo.put);
- nvhost_dbg_info("post-submit put %d, get %d, size %d",
+ gk20a_dbg_info("post-submit put %d, get %d, size %d",
c->gpfifo.put, c->gpfifo.get, c->gpfifo.entry_num);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return err;
clean_up:
- nvhost_err(d, "fail");
+ gk20a_err(d, "fail");
free_priv_cmdbuf(c, wait_cmd);
free_priv_cmdbuf(c, incr_cmd);
gk20a_channel_idle(g->dev);
return -ETIMEDOUT;
if (!(ch->last_submit_fence.valid && ch->last_submit_fence.wfi)) {
- nvhost_dbg_fn("issuing wfi, incr to finish the channel");
+ gk20a_dbg_fn("issuing wfi, incr to finish the channel");
err = gk20a_channel_submit_wfi(ch);
}
if (err)
BUG_ON(!(ch->last_submit_fence.valid && ch->last_submit_fence.wfi));
- nvhost_dbg_fn("waiting for channel to finish thresh:%d",
+ gk20a_dbg_fn("waiting for channel to finish thresh:%d",
ch->last_submit_fence.thresh);
err = ch->sync->wait_cpu(ch->sync, &ch->last_submit_fence, timeout);
dmabuf = dma_buf_get(id);
if (IS_ERR(dmabuf)) {
- nvhost_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
+ gk20a_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
id);
return -EINVAL;
}
data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
if (!data) {
- nvhost_err(&pdev->dev, "failed to map notifier memory");
+ gk20a_err(&pdev->dev, "failed to map notifier memory");
ret = -EINVAL;
goto cleanup_put;
}
unsigned long timeout;
int remain, ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (ch->has_timedout)
return -ETIMEDOUT;
dmabuf = dma_buf_get(id);
if (IS_ERR(dmabuf)) {
- nvhost_err(d, "invalid notifier nvmap handle 0x%lx",
+ gk20a_err(d, "invalid notifier nvmap handle 0x%lx",
id);
return -EINVAL;
}
notif = dma_buf_vmap(dmabuf);
if (!notif) {
- nvhost_err(d, "failed to map notifier memory");
+ gk20a_err(d, "failed to map notifier memory");
return -ENOMEM;
}
struct gk20a *g = ch->g;
struct gr_gk20a *gr = &g->gr;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gr_gk20a_bind_ctxsw_zcull(g, gr, ch,
args->gpu_va, args->mode);
struct device *d = dev_from_gk20a(g);
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* idle the engine by submitting WFI on non-KEPLER_C channel */
for (chid = 0; chid < f->num_channels; chid++) {
if (c->in_use && c->obj_class != KEPLER_C) {
err = gk20a_channel_submit_wfi(c);
if (err) {
- nvhost_err(d, "cannot idle channel %d\n",
+ gk20a_err(d, "cannot idle channel %d\n",
chid);
return err;
}
for (chid = 0; chid < f->num_channels; chid++) {
if (f->channel[chid].in_use) {
- nvhost_dbg_info("suspend channel %d", chid);
+ gk20a_dbg_info("suspend channel %d", chid);
/* disable channel */
gk20a_writel(g, ccsr_channel_r(chid),
gk20a_readl(g, ccsr_channel_r(chid)) |
}
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
u32 chid;
bool channels_in_use = false;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
for (chid = 0; chid < f->num_channels; chid++) {
if (f->channel[chid].in_use) {
- nvhost_dbg_info("resume channel %d", chid);
+ gk20a_dbg_info("resume channel %d", chid);
channel_gk20a_bind(&f->channel[chid]);
channels_in_use = true;
}
if (channels_in_use)
gk20a_fifo_update_runlist(g, 0, ~0, true, true);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
struct fifo_gk20a *f = &g->fifo;
u32 chid;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
for (chid = 0; chid < f->num_channels; chid++) {
struct channel_gk20a *c = g->fifo.channel+chid;
u32 size;
int ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (ch->has_timedout)
return -ETIMEDOUT;
{
u32 timeout =
(u32)((struct nvhost_set_timeout_args *)buf)->timeout;
- nvhost_dbg(dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->hw_chid);
ch->timeout_ms_max = timeout;
break;
bool timeout_debug_dump = !((u32)
((struct nvhost_set_timeout_ex_args *)buf)->flags &
(1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
- nvhost_dbg(dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "setting timeout (%d ms) for chid %d",
timeout, ch->hw_chid);
ch->timeout_ms_max = timeout;
ch->timeout_debug_dump = timeout_debug_dump;
gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
if (wait_cmd == NULL) {
- nvhost_err(dev_from_gk20a(sp->c->g),
+ gk20a_err(dev_from_gk20a(sp->c->g),
"not enough priv cmd buffer space");
return -EAGAIN;
}
num_wait_cmds = nvhost_sync_num_pts(sync_fence);
gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
if (wait_cmd == NULL) {
- nvhost_err(dev_from_gk20a(c->g),
+ gk20a_err(dev_from_gk20a(c->g),
"not enough priv cmd buffer space");
sync_fence_put(sync_fence);
return -EAGAIN;
if (incr_cmd == NULL) {
gk20a_channel_idle(c->g->dev);
kfree(completed_waiter);
- nvhost_err(dev_from_gk20a(c->g),
+ gk20a_err(dev_from_gk20a(c->g),
"not enough priv cmd buffer space");
return -EAGAIN;
}
#include "hw_trim_gk20a.h"
#include "hw_timer_gk20a.h"
-#define nvhost_dbg_clk(fmt, arg...) \
- nvhost_dbg(dbg_clk, fmt, ##arg)
+#define gk20a_dbg_clk(fmt, arg...) \
+ gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
/* from vbios PLL info table */
struct pll_parms gpc_pll_params = {
BUG_ON(target_freq == NULL);
- nvhost_dbg_fn("request target freq %d MHz", *target_freq);
+ gk20a_dbg_fn("request target freq %d MHz", *target_freq);
ref_clk_f = pll->clk_in;
target_clk_f = *target_freq;
break;
}
}
- nvhost_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
+ gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
low_PL, pl_to_div[low_PL], high_PL, pl_to_div[high_PL]);
for (pl = low_PL; pl <= high_PL; pl++) {
goto found_match;
}
- nvhost_dbg_info("delta %d @ M %d, N %d, PL %d",
+ gk20a_dbg_info("delta %d @ M %d, N %d, PL %d",
delta, m, n, pl);
}
}
BUG_ON(best_delta == ~0);
if (best_fit && best_delta != 0)
- nvhost_dbg_clk("no best match for target @ %dMHz on gpc_pll",
+ gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll",
target_clk_f);
pll->M = best_M;
*target_freq = pll->freq;
- nvhost_dbg_clk("actual target freq %d MHz, M %d, N %d, PL %d(div%d)",
+ gk20a_dbg_clk("actual target freq %d MHz, M %d, N %d, PL %d(div%d)",
*target_freq, pll->M, pll->N, pll->PL, pl_to_div[pll->PL]);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
if (ramp_timeout <= 0) {
- nvhost_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
+ gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
return -ETIMEDOUT;
}
return 0;
u32 m, n, pl;
u32 nlo;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!tegra_platform_is_silicon())
return 0;
static int gk20a_init_clk_reset_enable_hw(struct gk20a *g)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return 0;
}
clk = clk_get_sys("tegra_gk20a", "gpu");
if (IS_ERR(clk)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to get tegra gpu clk tegra_gk20a/gpu");
return NULL;
}
struct clk *ref;
unsigned long ref_rate;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (clk->sw_ready) {
- nvhost_dbg_fn("skip init");
+ gk20a_dbg_fn("skip init");
return 0;
}
ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
if (IS_ERR(ref)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to get GPCPLL reference clock");
return -EINVAL;
}
clk->sw_ready = true;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
{
u32 data;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
data = set_field(data,
/* gpc_pll.freq is changed to new value here */
if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
&freq, true)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to set pll target for %d", freq);
return -EINVAL;
}
struct clk_gk20a *clk = &g->clk;
int err = 0;
- nvhost_dbg_fn("curr freq: %dMHz, target freq %dMHz", old_freq, freq);
+ gk20a_dbg_fn("curr freq: %dMHz, target freq %dMHz", old_freq, freq);
if ((freq == old_freq) && clk->gpc_pll.enabled)
return 0;
/* Just report error but not restore PLL since dvfs could already change
voltage even when it returns error. */
if (err)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to set pll to %d", freq);
return err;
}
struct clk_gk20a *clk = &g->clk;
u32 err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
clk->g = g;
int err;
struct gk20a *g;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
g = container_of(inode->i_cdev,
struct gk20a, ctrl.cdev);
err = gk20a_get_client(g);
if (err) {
- nvhost_dbg_fn("fail to get channel!");
+ gk20a_dbg_fn("fail to get channel!");
return err;
}
{
struct platform_device *dev = filp->private_data;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_put_client(get_gk20a(dev));
return 0;
struct zbc_query_params *zbc_tbl;
int i, err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if ((_IOC_TYPE(cmd) != NVHOST_GPU_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) ||
break;
default:
- nvhost_err(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
+ gk20a_err(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
err = -ENOTTY;
break;
}
struct dbg_session_gk20a *dbg_s;
*_dbg_s = NULL;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
dbg_s = kzalloc(sizeof(*dbg_s), GFP_KERNEL);
if (!dbg_s)
pdev = g->dev;
dev = &pdev->dev;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", dev_name(dev));
err = alloc_session(&dbg_session);
if (err)
static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
gk20a_dbg_session_mutex_lock(dbg_s);
static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
gk20a_dbg_session_mutex_lock(dbg_s);
static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
gk20a_dbg_session_mutex_lock(dbg_s);
{
int ret = 0;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
if (!dbg_s->ch) {
- nvhost_err(dev_from_gk20a(dbg_s->g),
+ gk20a_err(dev_from_gk20a(dbg_s->g),
"no channel bound to dbg session\n");
return -EINVAL;
}
break;
default:
- nvhost_err(dev_from_gk20a(dbg_s->g),
+ gk20a_err(dev_from_gk20a(dbg_s->g),
"unrecognized dbg gpu events ctrl cmd: 0x%x",
args->cmd);
ret = -EINVAL;
unsigned int mask = 0;
struct dbg_session_gk20a *dbg_s = filep->private_data;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
if (dbg_s->dbg_events.events_enabled &&
dbg_s->dbg_events.num_pending_events > 0) {
- nvhost_dbg(dbg_gpu_dbg, "found pending event on session id %d",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
dbg_s->id);
- nvhost_dbg(dbg_gpu_dbg, "%d events pending",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
dbg_s->dbg_events.num_pending_events);
mask = (POLLPRI | POLLIN);
}
int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
}
int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
}
{
struct dbg_session_gk20a *dbg_s;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
/* guard against the session list being modified */
mutex_lock(&ch->dbg_s_lock);
list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) {
if (dbg_s->dbg_events.events_enabled) {
- nvhost_dbg(dbg_gpu_dbg, "posting event on session id %d",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d",
dbg_s->id);
- nvhost_dbg(dbg_gpu_dbg, "%d events pending",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
dbg_s->dbg_events.num_pending_events);
dbg_s->dbg_events.num_pending_events++;
struct channel_gk20a *ch_gk20a = dbg_s->ch;
struct gk20a *g = dbg_s->g;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
/* wasn't bound to start with ? */
if (!ch_gk20a) {
- nvhost_dbg(dbg_gpu_dbg | dbg_fn, "not bound already?");
+ gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "not bound already?");
return -ENODEV;
}
{
struct dbg_session_gk20a *dbg_s = filp->private_data;
- nvhost_dbg(dbg_gpu_dbg | dbg_fn, "%s", dev_name(dbg_s->dev));
+ gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev));
/* unbind if it was bound */
if (!dbg_s->ch)
struct gk20a *g;
struct channel_gk20a *ch;
- nvhost_dbg(dbg_fn|dbg_gpu_dbg, "%s fd=%d",
+ gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
dev_name(dbg_s->dev), args->channel_fd);
if (args->channel_fd == ~0)
ch = gk20a_get_channel_from_file(args->channel_fd);
if (!ch) {
- nvhost_dbg_fn("no channel found for fd");
+ gk20a_dbg_fn("no channel found for fd");
fput(f);
return -EINVAL;
}
g = dbg_s->g;
- nvhost_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
+ gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
mutex_lock(&g->dbg_sessions_lock);
mutex_lock(&ch->dbg_s_lock);
u8 buf[NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE];
int err = 0;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
if ((_IOC_TYPE(cmd) != NVHOST_DBG_GPU_IOCTL_MAGIC) ||
(_IOC_NR(cmd) == 0) ||
case NVHOST_DBG_GPU_IOCTL_BIND_CHANNEL:
err = dbg_bind_channel_gk20a(dbg_s,
(struct nvhost_dbg_gpu_bind_channel_args *)buf);
- nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
break;
case NVHOST_DBG_GPU_IOCTL_REG_OPS:
err = nvhost_ioctl_channel_reg_ops(dbg_s,
(struct nvhost_dbg_gpu_exec_reg_ops_args *)buf);
- nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
break;
case NVHOST_DBG_GPU_IOCTL_POWERGATE:
err = nvhost_ioctl_powergate_gk20a(dbg_s,
(struct nvhost_dbg_gpu_powergate_args *)buf);
- nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
break;
case NVHOST_DBG_GPU_IOCTL_EVENTS_CTRL:
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"unrecognized dbg gpu ioctl cmd: 0x%x",
cmd);
err = -ENOTTY;
struct nvhost_dbg_gpu_reg_op *ops;
u64 ops_size = sizeof(ops[0]) * args->num_ops;
- nvhost_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
+ gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
if (!dbg_s->ops) {
- nvhost_err(dev, "can't call reg_ops on an unbound debugger session");
+ gk20a_err(dev, "can't call reg_ops on an unbound debugger session");
return -EINVAL;
}
if (!dbg_s->is_profiler && !dbg_s->ch) {
- nvhost_err(dev, "bind a channel before regops for a debugging session");
+ gk20a_err(dev, "bind a channel before regops for a debugging session");
return -EINVAL;
}
/* be sure that ctx info is in place */
if (!gr_context_info_available(dbg_s, &g->gr)) {
- nvhost_err(dev, "gr context data not available\n");
+ gk20a_err(dev, "gr context data not available\n");
return -ENODEV;
}
ops = kzalloc(ops_size, GFP_KERNEL);
if (!ops) {
- nvhost_err(dev, "Allocating memory failed!");
+ gk20a_err(dev, "Allocating memory failed!");
return -ENOMEM;
}
- nvhost_dbg_fn("Copying regops from userspace");
+ gk20a_dbg_fn("Copying regops from userspace");
if (copy_from_user(ops, (void *)(uintptr_t)args->ops, ops_size)) {
dev_err(dev, "copy_from_user failed!");
mutex_unlock(&g->dbg_sessions_lock);
if (err) {
- nvhost_err(dev, "dbg regops failed");
+ gk20a_err(dev, "dbg regops failed");
goto clean_up;
}
- nvhost_dbg_fn("Copying result to userspace");
+ gk20a_dbg_fn("Copying result to userspace");
if (copy_to_user((void *)(uintptr_t)args->ops, ops, ops_size)) {
dev_err(dev, "copy_to_user failed!");
/* This function must be called with g->dbg_sessions_lock held */
- nvhost_dbg(dbg_fn|dbg_gpu_dbg, "%s powergate mode = %d",
+ gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s powergate mode = %d",
dev_name(dbg_s->dev), powermode);
switch (powermode) {
if ((dbg_s->is_pg_disabled == false) &&
(g->dbg_powergating_disabled_refcount++ == 0)) {
- nvhost_dbg(dbg_gpu_dbg | dbg_fn, "module busy");
+ gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module busy");
gk20a_busy(g->dev);
gk20a_channel_busy(dbg_s->pdev);
gk20a_pmu_enable_elpg(g);
- nvhost_dbg(dbg_gpu_dbg | dbg_fn, "module idle");
+ gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
gk20a_channel_idle(dbg_s->pdev);
gk20a_idle(g->dev);
}
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"unrecognized dbg gpu powergate mode: 0x%x",
powermode);
err = -ENOTTY;
{
int err;
struct gk20a *g = get_gk20a(dbg_s->pdev);
- nvhost_dbg_fn("%s powergate mode = %d",
+ gk20a_dbg_fn("%s powergate mode = %d",
dev_name(dbg_s->dev), args->mode);
mutex_lock(&g->dbg_sessions_lock);
struct gk20a *g = get_gk20a(dbg_s->pdev);
struct channel_gk20a *ch_gk20a;
- nvhost_dbg_fn("%s smpc ctxsw mode = %d",
- dev_name(dbg_s->dev), args->mode);
+ gk20a_dbg_fn("%s smpc ctxsw mode = %d",
+ dev_name(dbg_s->dev), args->mode);
/* Take the global lock, since we'll be doing global regops */
mutex_lock(&g->dbg_sessions_lock);
ch_gk20a = dbg_s->ch;
if (!ch_gk20a) {
- nvhost_err(dev_from_gk20a(dbg_s->g),
- "no bound channel for smpc ctxsw mode update\n");
+ gk20a_err(dev_from_gk20a(dbg_s->g),
+ "no bound channel for smpc ctxsw mode update\n");
err = -EINVAL;
goto clean_up;
}
err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a,
args->mode == NVHOST_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
if (err) {
- nvhost_err(dev_from_gk20a(dbg_s->g),
- "error (%d) during smpc ctxsw mode update\n", err);
+ gk20a_err(dev_from_gk20a(dbg_s->g),
+ "error (%d) during smpc ctxsw mode update\n", err);
goto clean_up;
}
/* The following regops are a hack/war to make up for the fact that we
gk20a_debug_output(o, "TOP: %016llx PUT: %016llx GET: %016llx "
"FETCH: %016llx\nHEADER: %08x COUNT: %08x\n"
"SYNCPOINT %08x %08x SEMAPHORE %08x %08x %08x %08x\n",
- (u64)mem_rd32(inst_ptr, ram_fc_pb_top_level_get_w()) +
- ((u64)mem_rd32(inst_ptr,
+ (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_top_level_get_w()) +
+ ((u64)gk20a_mem_rd32(inst_ptr,
ram_fc_pb_top_level_get_hi_w()) << 32ULL),
- (u64)mem_rd32(inst_ptr, ram_fc_pb_put_w()) +
- ((u64)mem_rd32(inst_ptr, ram_fc_pb_put_hi_w()) << 32ULL),
- (u64)mem_rd32(inst_ptr, ram_fc_pb_get_w()) +
- ((u64)mem_rd32(inst_ptr, ram_fc_pb_get_hi_w()) << 32ULL),
- (u64)mem_rd32(inst_ptr, ram_fc_pb_fetch_w()) +
- ((u64)mem_rd32(inst_ptr, ram_fc_pb_fetch_hi_w()) << 32ULL),
- mem_rd32(inst_ptr, ram_fc_pb_header_w()),
- mem_rd32(inst_ptr, ram_fc_pb_count_w()),
- mem_rd32(inst_ptr, ram_fc_syncpointa_w()),
- mem_rd32(inst_ptr, ram_fc_syncpointb_w()),
- mem_rd32(inst_ptr, ram_fc_semaphorea_w()),
- mem_rd32(inst_ptr, ram_fc_semaphoreb_w()),
- mem_rd32(inst_ptr, ram_fc_semaphorec_w()),
- mem_rd32(inst_ptr, ram_fc_semaphored_w()));
+ (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_put_w()) +
+ ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_put_hi_w()) << 32ULL),
+ (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_get_w()) +
+ ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_get_hi_w()) << 32ULL),
+ (u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_fetch_w()) +
+ ((u64)gk20a_mem_rd32(inst_ptr, ram_fc_pb_fetch_hi_w()) << 32ULL),
+ gk20a_mem_rd32(inst_ptr, ram_fc_pb_header_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_pb_count_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_syncpointa_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_syncpointb_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_semaphorea_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_semaphoreb_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_semaphorec_w()),
+ gk20a_mem_rd32(inst_ptr, ram_fc_semaphored_w()));
gk20a_debug_output(o, "\n");
}
static void fb_gk20a_reset(struct gk20a *g)
{
- nvhost_dbg_info("reset gk20a fb");
+ gk20a_dbg_info("reset gk20a fb");
gk20a_reset(g, mc_enable_pfb_enabled_f()
| mc_enable_l2_enabled_f()
u32 i;
u32 max_info_entries = top_device_info__size_1_v();
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* all we really care about finding is the graphics entry */
/* especially early on in sim it probably thinks it has more */
gr_info->runlist_id =
top_device_info_runlist_enum_v(table_entry);
- nvhost_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
+ gk20a_dbg_info("gr info: runlist_id %d", gr_info->runlist_id);
gr_info->engine_id =
top_device_info_engine_enum_v(table_entry);
- nvhost_dbg_info("gr info: engine_id %d", gr_info->engine_id);
+ gk20a_dbg_info("gr info: engine_id %d", gr_info->engine_id);
runlist_bit = 1 << gr_info->runlist_id;
for (pbdma_id = 0; pbdma_id < f->num_pbdma; pbdma_id++) {
- nvhost_dbg_info("gr info: pbdma_map[%d]=%d",
+ gk20a_dbg_info("gr info: pbdma_map[%d]=%d",
pbdma_id, f->pbdma_map[pbdma_id]);
if (f->pbdma_map[pbdma_id] & runlist_bit)
break;
}
if (pbdma_id == f->num_pbdma) {
- nvhost_err(d, "busted pbmda map");
+ gk20a_err(d, "busted pbmda map");
return -EINVAL;
}
gr_info->pbdma_id = pbdma_id;
}
if (gr_info->runlist_id == ~0) {
- nvhost_err(d, "busted device info");
+ gk20a_err(d, "busted device info");
return -EINVAL;
}
u32 runlist_id;
u32 i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (f->channel) {
int c;
get_exception_pbdma_info(g, eng_info);
e = &eng_info->pbdma_exception_info;
- nvhost_dbg_fn("pbdma_id %d, "
+ gk20a_dbg_fn("pbdma_id %d, "
"id_type %s, id %d, chan_status %d, "
"next_id_type %s, next_id %d, "
"chsw_in_progress %d",
get_exception_engine_info(g, eng_info);
e = &eng_info->engine_exception_info;
- nvhost_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
+ gk20a_dbg_fn("engine_id %d, id_type %s, id %d, ctx_status %d, "
"faulted %d, idle %d, ctxsw_in_progress %d, ",
eng_info->engine_id, e->id_is_chid ? "chid" : "tsgid",
e->id, e->ctx_status_v,
u32 i;
u64 runlist_size;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
f->max_runlists = fifo_eng_runlist_base__size_1_v();
f->runlist_info = kzalloc(sizeof(struct fifo_runlist_info_gk20a) *
Otherwise, one of them (cur_buffer) must have been pinned. */
runlist->cur_buffer = MAX_RUNLIST_BUFFERS;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up_runlist:
f->runlist_info = NULL;
clean_up:
- nvhost_dbg_fn("fail");
+ gk20a_dbg_fn("fail");
return -ENOMEM;
}
u32 timeout;
int i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* enable pmc pfifo */
gk20a_reset(g, mc_enable_pfifo_enabled_f()
| mc_enable_ce2_enabled_f());
fifo_eng_timeout_detection_enabled_f();
gk20a_writel(g, fifo_eng_timeout_r(), timeout);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
int chid, i, err = 0;
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (f->sw_ready) {
- nvhost_dbg_fn("skip init");
+ gk20a_dbg_fn("skip init");
return 0;
}
goto clean_up;
}
- nvhost_dbg(dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
+ gk20a_dbg(gpu_dbg_map, "userd bar1 va = 0x%llx", f->userd.gpu_va);
f->userd.size = f->userd_total_size;
f->sw_ready = true;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up:
- nvhost_dbg_fn("fail");
+ gk20a_dbg_fn("fail");
if (f->userd.gpu_va)
gk20a_gmmu_unmap(&g->mm.bar1.vm,
f->userd.gpu_va,
{
struct fifo_gk20a *f = &g->fifo;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* test write, read through bar1 @ userd region before
* turning on the snooping */
u32 bar1_vaddr = f->userd.gpu_va;
volatile u32 *cpu_vaddr = f->userd.cpuva;
- nvhost_dbg_info("test bar1 @ vaddr 0x%x",
+ gk20a_dbg_info("test bar1 @ vaddr 0x%x",
bar1_vaddr);
v = gk20a_bar1_readl(g, bar1_vaddr);
smp_mb();
if (v1 != gk20a_bar1_readl(g, bar1_vaddr)) {
- nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
+ gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
return -EINVAL;
}
gk20a_bar1_writel(g, bar1_vaddr, v2);
if (v2 != gk20a_bar1_readl(g, bar1_vaddr)) {
- nvhost_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
+ gk20a_err(dev_from_gk20a(g), "bar1 broken @ gk20a!");
return -EINVAL;
}
/* is it visible to the cpu? */
if (*cpu_vaddr != v2) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"cpu didn't see bar1 write @ %p!",
cpu_vaddr);
}
fifo_bar1_base_ptr_f(f->userd.gpu_va >> 12) |
fifo_bar1_base_valid_true_f());
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
{
u32 fault_info_v;
- nvhost_dbg_fn("engine_id %d", engine_id);
+ gk20a_dbg_fn("engine_id %d", engine_id);
memset(f, 0, sizeof(*f));
static void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (engine_id == top_device_info_type_enum_graphics_v()) {
/* resetting engine using mc_enable_r() is not enough,
u32 intr;
intr = gk20a_readl(g, fifo_intr_chsw_error_r());
- nvhost_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
+ gk20a_err(dev_from_gk20a(g), "chsw: %08x\n", intr);
gk20a_fecs_dump_falcon_stats(g);
gk20a_writel(g, fifo_intr_chsw_error_r(), intr);
}
{
struct device *dev = dev_from_gk20a(g);
u32 fault_id = gk20a_readl(g, fifo_intr_mmu_fault_id_r());
- nvhost_err(dev, "dropped mmu fault (0x%08x)", fault_id);
+ gk20a_err(dev, "dropped mmu fault (0x%08x)", fault_id);
}
static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
if (!ch)
return verbose;
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"channel %d generated a mmu fault",
ch->hw_chid);
if (ch->error_notifier) {
unsigned long engine_mmu_id;
int i;
bool verbose = true;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
g->fifo.deferred_reset_pending = false;
f.engine_subid_desc,
f.client_desc,
f.fault_type_desc);
- nvhost_err(dev_from_gk20a(g), "mmu fault on engine %d, "
+ gk20a_err(dev_from_gk20a(g), "mmu fault on engine %d, "
"engine subid %d (%s), client %d (%s), "
"addr 0x%08x:0x%08x, type %d (%s), info 0x%08x,"
"inst_ptr 0x%llx\n",
if (type_ch) {
ch = g->fifo.channel + id;
} else {
- nvhost_err(dev_from_gk20a(g), "non-chid type not supported");
+ gk20a_err(dev_from_gk20a(g), "non-chid type not supported");
WARN_ON(1);
}
} else {
} else if (f.inst_ptr ==
g->mm.bar1.inst_block.cpu_pa) {
- nvhost_err(dev_from_gk20a(g), "mmu fault from bar1");
+ gk20a_err(dev_from_gk20a(g), "mmu fault from bar1");
} else if (f.inst_ptr ==
g->mm.pmu.inst_block.cpu_pa) {
- nvhost_err(dev_from_gk20a(g), "mmu fault from pmu");
+ gk20a_err(dev_from_gk20a(g), "mmu fault from pmu");
} else
- nvhost_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
+ gk20a_err(dev_from_gk20a(g), "couldn't locate channel for mmu fault");
}
if (g->fifo.deferred_reset_pending) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "sm debugger attached,"
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "sm debugger attached,"
" deferring channel recovery to channel free");
/* clear interrupt */
gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
!tegra_platform_is_silicon());
if (ret)
- nvhost_err(dev_from_gk20a(g), "mmu fault timeout");
+ gk20a_err(dev_from_gk20a(g), "mmu fault timeout");
/* release mmu fault trigger */
for_each_set_bit(engine_id, &engine_ids, 32)
GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000)) {
gk20a_set_error_notifier(ch,
NVHOST_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fifo sched ctxsw timeout error:"
"engine = %u, ch = %d", engine_id, id);
gk20a_fifo_recover(g, BIT(engine_id),
ch->timeout_debug_dump);
} else {
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"fifo is waiting for ctx switch for %d ms,"
"ch = %d\n",
ch->timeout_accumulated_ms,
return ch->timeout_debug_dump;
}
err:
- nvhost_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
+ gk20a_err(dev_from_gk20a(g), "fifo sched error : 0x%08x, engine=%u, %s=%d",
sched_error, engine_id, non_chid ? "non-ch" : "ch", id);
return true;
struct device *dev = dev_from_gk20a(g);
u32 handled = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (fifo_intr & fifo_intr_0_pio_error_pending_f()) {
/* pio mode is unused. this shouldn't happen, ever. */
/* should we clear it or just leave it pending? */
- nvhost_err(dev, "fifo pio error!\n");
+ gk20a_err(dev, "fifo pio error!\n");
BUG_ON(1);
}
if (fifo_intr & fifo_intr_0_bind_error_pending_f()) {
u32 bind_error = gk20a_readl(g, fifo_intr_bind_error_r());
- nvhost_err(dev, "fifo bind error: 0x%08x", bind_error);
+ gk20a_err(dev, "fifo bind error: 0x%08x", bind_error);
print_channel_reset_log = true;
handled |= fifo_intr_0_bind_error_pending_f();
}
if (print_channel_reset_log) {
int engine_id;
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"channel reset initated from %s", __func__);
for (engine_id = 0;
engine_id < g->fifo.max_engines;
engine_id++) {
- nvhost_dbg_fn("enum:%d -> engine_id:%d", engine_id,
+ gk20a_dbg_fn("enum:%d -> engine_id:%d", engine_id,
g->fifo.engine_info[engine_id].engine_id);
fifo_pbdma_exception_status(g,
&g->fifo.engine_info[engine_id]);
bool reset_device = false;
bool reset_channel = false;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
- nvhost_dbg(dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
+ gk20a_dbg(gpu_dbg_intr, "pbdma id intr pending %d %08x %08x", pbdma_id,
pbdma_intr_0, pbdma_intr_1);
if (pbdma_intr_0) {
if (f->intr.pbdma.device_fatal_0 & pbdma_intr_0) {
for (i = 0; i < fifo_intr_pbdma_id_status__size_1_v(); i++) {
if (fifo_intr_pbdma_id_status_f(pbdma_pending, i)) {
- nvhost_dbg(dbg_intr, "pbdma id %d intr pending", i);
+ gk20a_dbg(gpu_dbg_intr, "pbdma id %d intr pending", i);
clear_intr |=
gk20a_fifo_handle_pbdma_intr(dev, g, f, i);
}
* in a threaded interrupt context... */
mutex_lock(&g->fifo.intr.isr.mutex);
- nvhost_dbg(dbg_intr, "fifo isr %08x\n", fifo_intr);
+ gk20a_dbg(gpu_dbg_intr, "fifo isr %08x\n", fifo_intr);
/* handle runlist update */
if (fifo_intr & fifo_intr_0_runlist_event_pending_f()) {
u32 fifo_intr = gk20a_readl(g, fifo_intr_0_r());
u32 clear_intr = 0;
- nvhost_dbg(dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
+ gk20a_dbg(gpu_dbg_intr, "fifo nonstall isr %08x\n", fifo_intr);
if (fifo_intr & fifo_intr_0_channel_intr_pending_f())
clear_intr |= fifo_channel_isr(g, fifo_intr);
u32 elpg_off = 0;
u32 i;
- nvhost_dbg_fn("%d", hw_chid);
+ gk20a_dbg_fn("%d", hw_chid);
/* we have no idea which runlist we are using. lock all */
for (i = 0; i < g->fifo.max_runlists; i++)
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[hw_chid];
- nvhost_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
+ gk20a_err(dev_from_gk20a(g), "preempt channel %d timeout\n",
hw_chid);
/* forcefully reset all busy engines using this channel */
u32 elpg_off;
u32 enable;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* disable elpg if failed to acquire pmu mutex */
elpg_off = pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
else
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
u32 elpg_off;
u32 err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gr_stat =
gk20a_readl(g, fifo_engine_status_r(eng_info->engine_id));
pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
if (err) {
- nvhost_dbg_fn("failed");
+ gk20a_dbg_fn("failed");
if (gk20a_fifo_enable_engine_activity(g, eng_info))
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to enable gr engine activity\n");
} else {
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
return err;
}
old_buf = runlist->cur_buffer;
new_buf = !runlist->cur_buffer;
- nvhost_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
+ gk20a_dbg_info("runlist_id : %d, switch to new buffer 0x%16llx",
runlist_id, runlist->mem[new_buf].iova);
runlist_pa = gk20a_get_phys_from_iova(d, runlist->mem[new_buf].iova);
runlist_entry = runlist_entry_base;
for_each_set_bit(chid,
runlist->active_channels, f->num_channels) {
- nvhost_dbg_info("add channel %d to runlist", chid);
+ gk20a_dbg_info("add channel %d to runlist", chid);
runlist_entry[0] = chid;
runlist_entry[1] = 0;
runlist_entry += 2;
ret = gk20a_fifo_runlist_wait_pending(g, runlist_id);
if (ret == -ETIMEDOUT) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"runlist update timeout");
gk20a_fifo_runlist_reset_engines(g, runlist_id);
mutex_lock(&runlist->mutex);
if (ret)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"runlist update failed: %d", ret);
} else if (ret == -EINTR)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"runlist update interrupted");
}
int gk20a_fifo_suspend(struct gk20a *g)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* stop bar1 snooping */
gk20a_writel(g, fifo_bar1_base_r(),
gk20a_writel(g, fifo_intr_en_0_r(), 0);
gk20a_writel(g, fifo_intr_en_1_r(), 0);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
if (val & (bus_intr_0_pri_squash_m() |
bus_intr_0_pri_fecserr_m() |
bus_intr_0_pri_timeout_m())) {
- nvhost_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
gk20a_readl(g, top_fs_status_r()));
- nvhost_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
gk20a_readl(g, mc_enable_r()));
- nvhost_err(&g->dev->dev,
+ gk20a_err(&g->dev->dev,
"NV_PTIMER_PRI_TIMEOUT_SAVE_0: 0x%x\n",
gk20a_readl(g, timer_pri_timeout_save_0_r()));
- nvhost_err(&g->dev->dev,
+ gk20a_err(&g->dev->dev,
"NV_PTIMER_PRI_TIMEOUT_SAVE_1: 0x%x\n",
gk20a_readl(g, timer_pri_timeout_save_1_r()));
- nvhost_err(&g->dev->dev,
+ gk20a_err(&g->dev->dev,
"NV_PTIMER_PRI_TIMEOUT_FECS_ERRCODE: 0x%x\n",
gk20a_readl(g, timer_pri_timeout_fecs_errcode_r()));
}
if (val)
- nvhost_err(&g->dev->dev,
+ gk20a_err(&g->dev->dev,
"Unhandled pending pbus interrupt\n");
gk20a_writel(g, bus_intr_0_r(), val);
struct gk20a *g = dev_id;
u32 mc_intr_0;
- nvhost_dbg(dbg_intr, "interrupt thread launched");
+ gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
mc_intr_0 = gk20a_readl(g, mc_intr_0_r());
- nvhost_dbg(dbg_intr, "stall intr %08x\n", mc_intr_0);
+ gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
if (mc_intr_0 & mc_intr_0_pgraph_pending_f())
gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
struct gk20a *g = dev_id;
u32 mc_intr_1;
- nvhost_dbg(dbg_intr, "interrupt thread launched");
+ gk20a_dbg(gpu_dbg_intr, "interrupt thread launched");
mc_intr_1 = gk20a_readl(g, mc_intr_1_r());
- nvhost_dbg(dbg_intr, "non-stall intr %08x\n", mc_intr_1);
+ gk20a_dbg(gpu_dbg_intr, "non-stall intr %08x\n", mc_intr_1);
if (mc_intr_1 & mc_intr_0_pfifo_pending_f())
gk20a_fifo_nonstall_isr(g);
struct gk20a *g = get_gk20a(dev);
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
#ifndef CONFIG_PM_RUNTIME
gk20a_pm_finalize_poweron(&dev->dev);
static void gk20a_deinit_client(struct platform_device *dev)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
#ifndef CONFIG_PM_RUNTIME
gk20a_pm_prepare_poweroff(&dev->dev);
#endif
struct gk20a *g = get_gk20a(dev);
int ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!g->power_on)
return 0;
(mc_boot_0_major_revision_v(mc_boot_0_value) << 4) |
mc_boot_0_minor_revision_v(mc_boot_0_value);
- nvhost_dbg_info("arch: %x, impl: %x, rev: %x\n",
+ gk20a_dbg_info("arch: %x, impl: %x, rev: %x\n",
g->gpu_characteristics.arch,
g->gpu_characteristics.impl,
g->gpu_characteristics.rev);
struct gk20a *g = get_gk20a(dev);
int err, nice_value;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (g->power_on)
return 0;
saving features (blcg/slcg) are enabled. For now, do it here. */
err = gk20a_init_clk_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a clk");
+ gk20a_err(&dev->dev, "failed to init gk20a clk");
goto done;
}
err = gk20a_init_fifo_reset_enable_hw(g);
if (err) {
- nvhost_err(&dev->dev, "failed to reset gk20a fifo");
+ gk20a_err(&dev->dev, "failed to reset gk20a fifo");
goto done;
}
err = gk20a_init_mm_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a mm");
+ gk20a_err(&dev->dev, "failed to init gk20a mm");
goto done;
}
err = gk20a_init_pmu_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a pmu");
+ gk20a_err(&dev->dev, "failed to init gk20a pmu");
goto done;
}
err = gk20a_init_fifo_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a fifo");
+ gk20a_err(&dev->dev, "failed to init gk20a fifo");
goto done;
}
err = gk20a_init_gr_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a gr");
+ gk20a_err(&dev->dev, "failed to init gk20a gr");
goto done;
}
err = gk20a_init_pmu_setup_hw2(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a pmu_hw2");
+ gk20a_err(&dev->dev, "failed to init gk20a pmu_hw2");
goto done;
}
err = gk20a_init_therm_support(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a therm");
+ gk20a_err(&dev->dev, "failed to init gk20a therm");
goto done;
}
err = gk20a_init_gpu_characteristics(g);
if (err) {
- nvhost_err(&dev->dev, "failed to init gk20a gpu characteristics");
+ gk20a_err(&dev->dev, "failed to init gk20a gpu characteristics");
goto done;
}
int err;
struct gk20a *g = get_gk20a(pdev);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
cdev_init(cdev, ops);
cdev->owner = THIS_MODULE;
return -ENODATA;
}
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
platform_set_drvdata(dev, platform);
static int __exit gk20a_remove(struct platform_device *dev)
{
struct gk20a *g = get_gk20a(dev);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
#ifdef CONFIG_INPUT_CFBOOST
cfb_remove_device(&dev->dev);
{
u32 pmc;
- nvhost_dbg(dbg_info, "pmc disable: %08x\n", units);
+ gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units);
spin_lock(&g->mc_enable_lock);
pmc = gk20a_readl(g, mc_enable_r());
{
u32 pmc;
- nvhost_dbg(dbg_info, "pmc enable: %08x\n", units);
+ gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units);
spin_lock(&g->mc_enable_lock);
pmc = gk20a_readl(g, mc_enable_r());
/* register accessors */
static inline void gk20a_writel(struct gk20a *g, u32 r, u32 v)
{
- nvhost_dbg(dbg_reg, " r=0x%x v=0x%x", r, v);
+ gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
writel(v, g->regs + r);
}
static inline u32 gk20a_readl(struct gk20a *g, u32 r)
{
u32 v = readl(g->regs + r);
- nvhost_dbg(dbg_reg, " r=0x%x v=0x%x", r, v);
+ gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
return v;
}
static inline void gk20a_bar1_writel(struct gk20a *g, u32 b, u32 v)
{
- nvhost_dbg(dbg_reg, " b=0x%x v=0x%x", b, v);
+ gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
writel(v, g->bar1 + b);
}
static inline u32 gk20a_bar1_readl(struct gk20a *g, u32 b)
{
u32 v = readl(g->bar1 + b);
- nvhost_dbg(dbg_reg, " b=0x%x v=0x%x", b, v);
+ gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
return v;
}
u32 i, major_v = ~0, major_v_hw, netlist_num;
int net, max, err = -ENOENT;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
#ifdef GK20A_NETLIST_IMAGE_FW_NAME
net = NETLIST_FINAL;
for (; net < max; net++) {
if (gr_gk20a_get_netlist_name(net, name) != 0) {
- nvhost_warn(d, "invalid netlist index %d", net);
+ gk20a_warn(d, "invalid netlist index %d", net);
continue;
}
netlist_fw = gk20a_request_firmware(g, name);
if (!netlist_fw) {
- nvhost_warn(d, "failed to load netlist %s", name);
+ gk20a_warn(d, "failed to load netlist %s", name);
continue;
}
switch (netlist->regions[i].region_id) {
case NETLIST_REGIONID_FECS_UCODE_DATA:
- nvhost_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA");
+ gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_DATA");
err = gr_gk20a_alloc_load_netlist_u32(
src, size, &g->gr.ctx_vars.ucode.fecs.data);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_FECS_UCODE_INST:
- nvhost_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST");
+ gk20a_dbg_info("NETLIST_REGIONID_FECS_UCODE_INST");
err = gr_gk20a_alloc_load_netlist_u32(
src, size, &g->gr.ctx_vars.ucode.fecs.inst);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_GPCCS_UCODE_DATA:
- nvhost_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA");
+ gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_DATA");
err = gr_gk20a_alloc_load_netlist_u32(
src, size, &g->gr.ctx_vars.ucode.gpccs.data);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_GPCCS_UCODE_INST:
- nvhost_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST");
+ gk20a_dbg_info("NETLIST_REGIONID_GPCCS_UCODE_INST");
err = gr_gk20a_alloc_load_netlist_u32(
src, size, &g->gr.ctx_vars.ucode.gpccs.inst);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_SW_BUNDLE_INIT:
- nvhost_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT");
+ gk20a_dbg_info("NETLIST_REGIONID_SW_BUNDLE_INIT");
err = gr_gk20a_alloc_load_netlist_av(
src, size, &g->gr.ctx_vars.sw_bundle_init);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_SW_METHOD_INIT:
- nvhost_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT");
+ gk20a_dbg_info("NETLIST_REGIONID_SW_METHOD_INIT");
err = gr_gk20a_alloc_load_netlist_av(
src, size, &g->gr.ctx_vars.sw_method_init);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_SW_CTX_LOAD:
- nvhost_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD");
+ gk20a_dbg_info("NETLIST_REGIONID_SW_CTX_LOAD");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.sw_ctx_load);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_SW_NON_CTX_LOAD:
- nvhost_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD");
+ gk20a_dbg_info("NETLIST_REGIONID_SW_NON_CTX_LOAD");
err = gr_gk20a_alloc_load_netlist_av(
src, size, &g->gr.ctx_vars.sw_non_ctx_load);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_SYS:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_SYS");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_SYS");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.sys);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_GPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_GPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.gpc);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_TPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_TPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_TPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.tpc);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_ZCULL_GPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_ZCULL_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.zcull_gpc);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_PPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.ppc);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_PM_SYS:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_SYS");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_sys);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_PM_GPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_GPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_gpc);
if (err)
goto clean_up;
break;
case NETLIST_REGIONID_CTXREG_PM_TPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PM_TPC");
err = gr_gk20a_alloc_load_netlist_aiv(
src, size, &g->gr.ctx_vars.ctxsw_regs.pm_tpc);
if (err)
break;
case NETLIST_REGIONID_BUFFER_SIZE:
g->gr.ctx_vars.buffer_size = *src;
- nvhost_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d",
+ gk20a_dbg_info("NETLIST_REGIONID_BUFFER_SIZE : %d",
g->gr.ctx_vars.buffer_size);
break;
case NETLIST_REGIONID_CTXSW_REG_BASE_INDEX:
g->gr.ctx_vars.regs_base_index = *src;
- nvhost_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %d",
+ gk20a_dbg_info("NETLIST_REGIONID_CTXSW_REG_BASE_INDEX : %d",
g->gr.ctx_vars.regs_base_index);
break;
case NETLIST_REGIONID_MAJORV:
major_v = *src;
- nvhost_dbg_info("NETLIST_REGIONID_MAJORV : %d",
+ gk20a_dbg_info("NETLIST_REGIONID_MAJORV : %d",
major_v);
break;
case NETLIST_REGIONID_NETLIST_NUM:
netlist_num = *src;
- nvhost_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d",
+ gk20a_dbg_info("NETLIST_REGIONID_NETLIST_NUM : %d",
netlist_num);
break;
case NETLIST_REGIONID_CTXREG_PMPPC:
- nvhost_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC skipped");
+ gk20a_dbg_info("NETLIST_REGIONID_CTXREG_PMPPC skipped");
break;
default:
- nvhost_warn(d, "unrecognized region %d skipped", i);
+ gk20a_warn(d, "unrecognized region %d skipped", i);
break;
}
}
if (net != NETLIST_FINAL && major_v != major_v_hw) {
- nvhost_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x",
+ gk20a_dbg_info("skip %s: major_v 0x%08x doesn't match hw 0x%08x",
name, major_v, major_v_hw);
goto clean_up;
}
g->gr.netlist = net;
release_firmware(netlist_fw);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
goto done;
clean_up:
done:
if (g->gr.ctx_vars.valid) {
- nvhost_dbg_info("netlist image %s loaded", name);
+ gk20a_dbg_info("netlist image %s loaded", name);
return 0;
} else {
- nvhost_err(d, "failed to load netlist image!!");
+ gk20a_err(d, "failed to load netlist image!!");
return err;
}
}
char *reg_path = NULL;
char *value_path = NULL;
- nvhost_dbg(dbg_fn | dbg_info,
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_info,
"querying grctx info from chiplib");
g->gr.ctx_vars.dynamic = true;
gk20a_sim_esc_readl(g, "GRCTX_GEN_CTX_REGS_BASE_INDEX", 0,
&g->gr.ctx_vars.regs_base_index);
- nvhost_dbg(dbg_info | dbg_fn, "finished querying grctx info from chiplib");
+ gk20a_dbg(gpu_dbg_info | gpu_dbg_fn, "finished querying grctx info from chiplib");
return 0;
fail:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed querying grctx info from chiplib");
return err;
{
int i;
- nvhost_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_os_r : %d",
gk20a_readl(g, gr_fecs_os_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_cpuctl_r : 0x%x",
gk20a_readl(g, gr_fecs_cpuctl_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_idlestate_r : 0x%x",
gk20a_readl(g, gr_fecs_idlestate_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox0_r : 0x%x",
gk20a_readl(g, gr_fecs_mailbox0_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_mailbox1_r : 0x%x",
gk20a_readl(g, gr_fecs_mailbox1_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_irqstat_r : 0x%x",
gk20a_readl(g, gr_fecs_irqstat_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmode_r : 0x%x",
gk20a_readl(g, gr_fecs_irqmode_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_irqmask_r : 0x%x",
gk20a_readl(g, gr_fecs_irqmask_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_irqdest_r : 0x%x",
gk20a_readl(g, gr_fecs_irqdest_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_debug1_r : 0x%x",
gk20a_readl(g, gr_fecs_debug1_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_debuginfo_r : 0x%x",
gk20a_readl(g, gr_fecs_debuginfo_r()));
for (i = 0; i < gr_fecs_ctxsw_mailbox__size_1_v(); i++)
- nvhost_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_ctxsw_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, gr_fecs_ctxsw_mailbox_r(i)));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_engctl_r : 0x%x",
gk20a_readl(g, gr_fecs_engctl_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_curctx_r : 0x%x",
gk20a_readl(g, gr_fecs_curctx_r()));
- nvhost_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "gr_fecs_nxtctx_r : 0x%x",
gk20a_readl(g, gr_fecs_nxtctx_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_IMB : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_DMB : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CSW : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_CTX : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_EXCI : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
for (i = 0; i < 4; i++) {
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_PC));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_PC : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
gk20a_writel(g, gr_fecs_icd_cmd_r(),
gr_fecs_icd_cmd_opc_rreg_f() |
gr_fecs_icd_cmd_idx_f(PMU_FALCON_REG_SP));
- nvhost_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "FECS_FALCON_REG_SP : 0x%x",
gk20a_readl(g, gr_fecs_icd_rdata_r()));
}
}
const u32 *ucode_u32_data;
u32 checksum;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_writel(g, gr_gpccs_dmemc_r(0), (gr_gpccs_dmemc_offs_f(0) |
gr_gpccs_dmemc_blk_f(0) |
gk20a_writel(g, gr_fecs_dmemd_r(0), ucode_u32_data[i]);
checksum += ucode_u32_data[i];
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
static void gr_gk20a_load_falcon_imem(struct gk20a *g)
u32 tag, i, pad_start, pad_end;
u32 checksum;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
cfg = gk20a_readl(g, gr_fecs_cfg_r());
fecs_imem_size = gr_fecs_cfg_imem_sz_v(cfg);
bool ctxsw_active;
bool gr_busy;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
do {
/* fmodel: host gets fifo_engine_status(gr) from gr
gr_engine_status_value_busy_f();
if (!gr_enabled || (!gr_busy && !ctxsw_active)) {
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
} while (time_before(jiffies, end_jiffies)
|| !tegra_platform_is_silicon());
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"timeout, ctxsw busy : %d, gr busy : %d",
ctxsw_active, gr_busy);
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
u32 reg;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!tegra_platform_is_linsim()) {
/* Force clocks on */
} while (time_before(jiffies, end_jiffies));
if (!time_before(jiffies, end_jiffies)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to force the clocks on\n");
WARN_ON(1);
}
} while (time_before(jiffies, end_jiffies));
if (!time_before(jiffies, end_jiffies))
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"failed to set power mode to auto\n");
}
u32 check = WAIT_UCODE_LOOP;
u32 reg;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
while (check == WAIT_UCODE_LOOP) {
if (!time_before(jiffies, end_jiffies) &&
/* do no success check */
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid success opcode 0x%x", opc_success);
check = WAIT_UCODE_ERROR;
/* do no check on fail*/
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid fail opcode 0x%x", opc_fail);
check = WAIT_UCODE_ERROR;
break;
}
if (check == WAIT_UCODE_TIMEOUT) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"timeout waiting on ucode response");
gk20a_fecs_dump_falcon_stats(g);
return -1;
} else if (check == WAIT_UCODE_ERROR) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"ucode method failed on mailbox=%d value=0x%08x",
mailbox_id, reg);
gk20a_fecs_dump_falcon_stats(g);
return -1;
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
* are sent to the ucode in sequence, it can get into an undefined state. */
int gr_gk20a_disable_ctxsw(struct gk20a *g)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_stop_ctxsw_v(), 0);
}
/* Start processing (continue) context switches at FECS */
int gr_gk20a_enable_ctxsw(struct gk20a *g)
{
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
return gr_gk20a_ctrl_ctxsw(g, gr_fecs_method_push_adr_start_ctxsw_v(), 0);
}
u32 addr_hi;
void *inst_ptr = NULL;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* flush gpu_va before commit */
gk20a_mm_fb_flush(c->g);
addr_lo = u64_lo32(gpu_va) >> 12;
addr_hi = u64_hi32(gpu_va);
- mem_wr32(inst_ptr, ram_in_gr_wfi_target_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_gr_wfi_target_w(),
ram_in_gr_cs_wfi_f() | ram_in_gr_wfi_mode_virtual_f() |
ram_in_gr_wfi_ptr_lo_f(addr_lo));
- mem_wr32(inst_ptr, ram_in_gr_wfi_ptr_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_gr_wfi_ptr_hi_w(),
ram_in_gr_wfi_ptr_hi_f(addr_hi));
gk20a_mm_l2_invalidate(c->g);
{
/* being defensive still... */
if (ch_ctx->patch_ctx.cpu_va) {
- nvhost_err(dev_from_gk20a(g), "nested ctx patch begin?");
+ gk20a_err(dev_from_gk20a(g), "nested ctx patch begin?");
return -EBUSY;
}
{
/* being defensive still... */
if (!ch_ctx->patch_ctx.cpu_va) {
- nvhost_err(dev_from_gk20a(g), "dangling ctx patch end?");
+ gk20a_err(dev_from_gk20a(g), "dangling ctx patch end?");
return -EINVAL;
}
* but be defensive still... */
if (!ch_ctx->patch_ctx.cpu_va) {
int err;
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"per-write ctx patch begin?");
/* yes, gr_gk20a_ctx_patch_smpc causes this one */
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
patch_ptr = ch_ctx->patch_ctx.cpu_va;
patch_slot = ch_ctx->patch_ctx.data_count * 2;
- mem_wr32(patch_ptr, patch_slot++, addr);
- mem_wr32(patch_ptr, patch_slot++, data);
+ gk20a_mem_wr32(patch_ptr, patch_slot++, addr);
+ gk20a_mem_wr32(patch_ptr, patch_slot++, data);
ch_ctx->patch_ctx.data_count++;
>> ram_in_base_shift_v());
u32 ret;
- nvhost_dbg_info("bind channel %d inst ptr 0x%08x",
+ gk20a_dbg_info("bind channel %d inst ptr 0x%08x",
c->hw_chid, inst_base_ptr);
ret = gr_gk20a_submit_fecs_method_op(g,
.cond.ok = GR_IS_UCODE_OP_AND,
.cond.fail = GR_IS_UCODE_OP_AND});
if (ret)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"bind channel instance failed");
return ret;
int ret = 0;
void *ctx_ptr = NULL;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ctx_ptr = vmap(ch_ctx->gr_ctx.pages,
PAGE_ALIGN(ch_ctx->gr_ctx.size) >> PAGE_SHIFT,
if (disable_fifo) {
ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to disable gr engine activity\n");
goto clean_up;
}
gk20a_mm_fb_flush(g);
gk20a_mm_l2_flush(g, true);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_o(), 0,
ch_ctx->zcull_ctx.ctx_sw_mode);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, va);
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, va);
if (disable_fifo) {
ret = gk20a_fifo_enable_engine_activity(g, gr_info);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to enable gr engine activity\n");
goto clean_up;
}
u32 temp;
u32 cbm_cfg_size1, cbm_cfg_size2;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (patch) {
int err;
u64 addr;
u32 size;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (patch) {
int err;
err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx);
if (size == gr_scc_pagepool_total_pages_hwmax_value_v())
size = gr_scc_pagepool_total_pages_hwmax_v();
- nvhost_dbg_info("pagepool buffer addr : 0x%016llx, size : %d",
+ gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d",
addr, size);
g->ops.gr.commit_global_pagepool(g, ch_ctx, addr, size, patch);
size = gr->bundle_cb_default_size;
- nvhost_dbg_info("bundle cb addr : 0x%016llx, size : %d",
+ gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d",
addr, size);
g->ops.gr.commit_global_bundle_cb(g, ch_ctx, addr, size, patch);
(u64_hi32(ch_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) <<
(32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
- nvhost_dbg_info("attrib cb addr : 0x%016llx", addr);
+ gk20a_dbg_info("attrib cb addr : 0x%016llx", addr);
g->ops.gr.commit_global_attrib_cb(g, ch_ctx, addr, patch);
if (patch)
data = min_t(u32, data, g->gr.min_gpm_fifo_depth);
- nvhost_dbg_info("bundle cb token limit : %d, state limit : %d",
+ gk20a_dbg_info("bundle cb token limit : %d, state limit : %d",
g->gr.bundle_cb_token_limit, data);
gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg2_r(),
u32 pe_vaf;
u32 pe_vsc_vpc;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gpm_pd_cfg = gk20a_readl(g, gr_gpcs_gpm_pd_cfg_r());
pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r());
if (!gr->map_tiles)
return -1;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_writel(g, gr_crstr_map_table_cfg_r(),
gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) |
u32 map_beta[gr_pd_alpha_ratio_table__size_1_v()];
u32 map_reg_used[gr_pd_alpha_ratio_table__size_1_v()];
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
memset(map_alpha, 0, gr_pd_alpha_ratio_table__size_1_v() * sizeof(u32));
memset(map_beta, 0, gr_pd_alpha_ratio_table__size_1_v() * sizeof(u32));
u32 max_ways_evict = INVALID_MAX_WAYS;
u32 l1c_dbg_reg_val;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
for (tpc_index = 0; tpc_index < gr->max_tpc_per_gpc_count; tpc_index++) {
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
>> ram_in_base_shift_v());
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ret = gr_gk20a_submit_fecs_method_op(g,
(struct fecs_method_op_gk20a) {
});
if (ret)
- nvhost_err(dev_from_gk20a(g), "save context image failed");
+ gk20a_err(dev_from_gk20a(g), "save context image failed");
return ret;
}
void *gold_ptr = NULL;
u32 err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* golden ctx is global to all channels. Although only the first
channel initializes golden image, driver needs to prevent multiple
gk20a_mm_l2_flush(g, false);
for (i = 0; i < ctx_header_words; i++) {
- data = mem_rd32(ctx_ptr, i);
- mem_wr32(gold_ptr, i, data);
+ data = gk20a_mem_rd32(ctx_ptr, i);
+ gk20a_mem_wr32(gold_ptr, i, data);
}
- mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_o(), 0,
+ gk20a_mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_o(), 0,
ctxsw_prog_main_image_zcull_mode_no_ctxsw_v());
- mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, 0);
+ gk20a_mem_wr32(gold_ptr + ctxsw_prog_main_image_zcull_ptr_o(), 0, 0);
gr_gk20a_commit_inst(c, ch_ctx->global_ctx_buffer_va[GOLDEN_CTX_VA]);
for (i = 0; i < gr->ctx_vars.golden_image_size / 4; i++)
gr->ctx_vars.local_golden_image[i] =
- mem_rd32(gold_ptr, i);
+ gk20a_mem_rd32(gold_ptr, i);
}
gr_gk20a_commit_inst(c, ch_ctx->gr_ctx.gpu_va);
clean_up:
if (err)
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
else
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
if (gold_ptr)
vunmap(gold_ptr);
if (!ctx_ptr)
return -ENOMEM;
- data = mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
+ data = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
data = data & ~ctxsw_prog_main_image_pm_smpc_mode_m();
data |= enable_smpc_ctxsw ?
ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f() :
ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f();
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
data);
vunmap(ctx_ptr);
int ret = 0;
void *ctx_ptr = NULL;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (gr->ctx_vars.local_golden_image == NULL)
return -1;
return -ENOMEM;
for (i = 0; i < gr->ctx_vars.golden_image_size / 4; i++)
- mem_wr32(ctx_ptr, i, gr->ctx_vars.local_golden_image[i]);
+ gk20a_mem_wr32(ctx_ptr, i, gr->ctx_vars.local_golden_image[i]);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_save_ops_o(), 0, 0);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_restore_ops_o(), 0, 0);
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_save_ops_o(), 0, 0);
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_num_restore_ops_o(), 0, 0);
virt_addr_lo = u64_lo32(ch_ctx->patch_ctx.gpu_va);
virt_addr_hi = u64_hi32(ch_ctx->patch_ctx.gpu_va);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_count_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_count_o(), 0,
ch_ctx->patch_ctx.data_count);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_lo_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_lo_o(), 0,
virt_addr_lo);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_hi_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_patch_adr_hi_o(), 0,
virt_addr_hi);
/* no user for client managed performance counter ctx */
ch_ctx->pm_ctx.ctx_sw_mode =
ctxsw_prog_main_image_pm_mode_no_ctxsw_f();
- data = mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
+ data = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0);
data = data & ~ctxsw_prog_main_image_pm_mode_m();
data |= ch_ctx->pm_ctx.ctx_sw_mode;
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_o(), 0,
data);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
/* set priv access map */
virt_addr_lo =
virt_addr_hi =
u64_hi32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f());
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
virt_addr_lo);
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
virt_addr_hi);
/* disable verif features */
- v = mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
+ v = gk20a_mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
- mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
+ gk20a_mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
vunmap(ctx_ptr);
.cond.fail = GR_IS_UCODE_OP_SKIP});
if (ret)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"restore context image failed");
}
static void gr_gk20a_start_falcon_ucode(struct gk20a *g)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_writel(g, gr_fecs_ctxsw_mailbox_clear_r(0),
gr_fecs_ctxsw_mailbox_clear_value_f(~0));
gk20a_writel(g, gr_gpccs_cpuctl_r(), gr_gpccs_cpuctl_startcpu_f(1));
gk20a_writel(g, gr_fecs_cpuctl_r(), gr_fecs_cpuctl_startcpu_f(1));
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
&iova,
GFP_KERNEL);
if (!ucode_info->inst_blk_desc.cpuva) {
- nvhost_err(d, "failed to allocate memory\n");
+ gk20a_err(d, "failed to allocate memory\n");
return -ENOMEM;
}
inst_ptr = ucode_info->inst_blk_desc.cpuva;
/* Set inst block */
- mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
u64_lo32(vm->va_limit) | 0xFFF);
- mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
pde_addr = gk20a_mm_iova_addr(vm->pdes.sgt->sgl);
pde_addr_lo = u64_lo32(pde_addr >> 12);
pde_addr_hi = u64_hi32(pde_addr);
- mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() |
ram_in_page_dir_base_vol_true_f() |
ram_in_page_dir_base_lo_f(pde_addr_lo));
- mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
ram_in_page_dir_base_hi_f(pde_addr_hi));
/* Map ucode surface to GMMU */
0, /* flags */
gk20a_mem_flag_read_only);
if (!ucode_info->ucode_gpuva) {
- nvhost_err(d, "failed to update gmmu ptes\n");
+ gk20a_err(d, "failed to update gmmu ptes\n");
return -ENOMEM;
}
fecs_fw = gk20a_request_firmware(g, GK20A_FECS_UCODE_IMAGE);
if (!fecs_fw) {
- nvhost_err(d, "failed to load fecs ucode!!");
+ gk20a_err(d, "failed to load fecs ucode!!");
return -ENOENT;
}
gpccs_fw = gk20a_request_firmware(g, GK20A_GPCCS_UCODE_IMAGE);
if (!gpccs_fw) {
release_firmware(fecs_fw);
- nvhost_err(d, "failed to load gpccs ucode!!");
+ gk20a_err(d, "failed to load gpccs ucode!!");
return -ENOENT;
}
GFP_KERNEL,
&attrs);
if (!ucode_info->surface_desc.cpuva) {
- nvhost_err(d, "memory allocation failed\n");
+ gk20a_err(d, "memory allocation failed\n");
err = -ENOMEM;
goto clean_up;
}
ucode_info->surface_desc.iova,
ucode_info->surface_desc.size);
if (err) {
- nvhost_err(d, "failed to create sg table\n");
+ gk20a_err(d, "failed to create sg table\n");
goto clean_up;
}
if (!buf) {
release_firmware(fecs_fw);
release_firmware(gpccs_fw);
- nvhost_err(d, "failed to map surface desc buffer");
+ gk20a_err(d, "failed to map surface desc buffer");
return -ENOMEM;
}
retries--;
}
if (!retries)
- nvhost_err(dev_from_gk20a(g), "arbiter idle timeout");
+ gk20a_err(dev_from_gk20a(g), "arbiter idle timeout");
gk20a_writel(g, gr_fecs_arb_ctx_adr_r(), 0x0);
val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
}
if (!retries)
- nvhost_err(dev_from_gk20a(g), "arbiter complete timeout");
+ gk20a_err(dev_from_gk20a(g), "arbiter complete timeout");
gk20a_writel(g, gr_fecs_current_ctx_r(),
gr_fecs_current_ctx_ptr_f(inst_ptr >> 12) |
val = gk20a_readl(g, gr_fecs_arb_ctx_cmd_r());
}
if (!retries)
- nvhost_err(dev_from_gk20a(g), "arbiter complete timeout");
+ gk20a_err(dev_from_gk20a(g), "arbiter complete timeout");
}
static int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
{
u32 ret;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (tegra_platform_is_linsim()) {
gk20a_writel(g, gr_fecs_ctxsw_mailbox_r(7),
eUcodeHandshakeInitComplete,
GR_IS_UCODE_OP_SKIP, 0);
if (ret) {
- nvhost_err(dev_from_gk20a(g), "falcon ucode init timeout");
+ gk20a_err(dev_from_gk20a(g), "falcon ucode init timeout");
return ret;
}
gk20a_writel(g, gr_fecs_method_push_r(),
gr_fecs_method_push_adr_set_watchdog_timeout_f());
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
.cond.fail = GR_IS_UCODE_OP_SKIP,
};
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
op.method.addr = gr_fecs_method_push_adr_discover_image_size_v();
op.mailbox.ret = &golden_ctx_image_size;
ret = gr_gk20a_submit_fecs_method_op(g, op);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"query golden image size failed");
return ret;
}
op.mailbox.ret = &zcull_ctx_image_size;
ret = gr_gk20a_submit_fecs_method_op(g, op);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"query zcull ctx image size failed");
return ret;
}
op.mailbox.ret = &pm_ctx_image_size;
ret = gr_gk20a_submit_fecs_method_op(g, op);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"query pm ctx image size failed");
return ret;
}
g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
u32 pagepool_buffer_size = gr_scc_pagepool_total_pages_hwmax_value_v() *
gr_scc_pagepool_total_pages_byte_granularity_v();
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
- nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
+ gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR],
cb_buffer_size);
&gr->global_ctx_buffer[CIRCULAR_VPR],
cb_buffer_size);
- nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
+ gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL],
pagepool_buffer_size);
&gr->global_ctx_buffer[PAGEPOOL_VPR],
pagepool_buffer_size);
- nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
+ gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE],
attr_buffer_size);
&gr->global_ctx_buffer[ATTRIBUTE_VPR],
attr_buffer_size);
- nvhost_dbg_info("golden_image_size : %d",
+ gk20a_dbg_info("golden_image_size : %d",
gr->ctx_vars.golden_image_size);
err = gk20a_gr_alloc_ctx_buffer(pdev,
if (err)
goto clean_up;
- nvhost_dbg_info("priv_access_map_size : %d",
+ gk20a_dbg_info("priv_access_map_size : %d",
gr->ctx_vars.priv_access_map_size);
err = gk20a_gr_alloc_ctx_buffer(pdev,
if (err)
goto clean_up;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up:
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
if (gr->global_ctx_buffer[i].destroy) {
gr->global_ctx_buffer[i].destroy(pdev,
&gr->global_ctx_buffer[i]);
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
u64 size;
u64 gpu_va;
u32 i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* Circular Buffer */
if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) {
u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
u32 i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
if (g_bfr_va[i]) {
int err = 0;
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (gr->ctx_vars.buffer_size == 0)
return 0;
struct device *d = dev_from_gk20a(g);
DEFINE_DMA_ATTRS(attrs);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_gmmu_unmap(ch_vm, ch_ctx->gr_ctx.gpu_va,
ch_ctx->gr_ctx.size, gk20a_mem_flag_none);
int err = 0;
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
patch_ctx->size = 128 * sizeof(u32);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
gk20a_free_sgtable(&sgt);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
err_free_sgtable:
patch_ctx->pages, patch_ctx->iova, &attrs);
patch_ctx->pages = NULL;
patch_ctx->iova = 0;
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
return err;
}
struct patch_desc *patch_ctx = &c->ch_ctx.patch_ctx;
struct vm_gk20a *ch_vm = c->vm;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (patch_ctx->gpu_va)
gk20a_gmmu_unmap(ch_vm, patch_ctx->gpu_va,
struct device *d = dev_from_gk20a(g);
DEFINE_DMA_ATTRS(attrs);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gr_gk20a_unmap_channel_patch_ctx(c);
struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
int err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* an address space needs to have been bound at this point.*/
if (!gk20a_channel_as_bound(c)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"not bound to address space at time"
" of grctx allocation");
return -EINVAL;
}
if (!g->ops.gr.is_valid_class(g, args->class_num)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid obj class 0x%x", args->class_num);
err = -EINVAL;
goto out;
if (ch_ctx->gr_ctx.pages == NULL) {
err = gr_gk20a_alloc_channel_gr_ctx(g, c);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to allocate gr ctx buffer");
goto out;
}
} else {
/*TBD: needs to be more subtle about which is being allocated
* as some are allowed to be allocated along same channel */
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"too many classes alloc'd on same channel");
err = -EINVAL;
goto out;
/* commit gr ctx buffer */
err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx.gpu_va);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to commit gr ctx buffer");
goto out;
}
if (ch_ctx->patch_ctx.pages == NULL) {
err = gr_gk20a_alloc_channel_patch_ctx(g, c);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to allocate patch buffer");
goto out;
}
if (!ch_ctx->global_ctx_buffer_mapped) {
err = gr_gk20a_map_global_ctx_buffers(g, c);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to map global ctx buffer");
goto out;
}
/* init golden image, ELPG enabled after this is done */
err = gr_gk20a_init_golden_ctx_image(g, c);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to init golden ctx image");
goto out;
}
err = gr_gk20a_elpg_protected_call(g,
gr_gk20a_load_golden_ctx_image(g, c));
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to load golden ctx image");
goto out;
}
c->num_objects++;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
out:
/* 1. gr_ctx, patch_ctx and global ctx buffer mapping
can be reused so no need to release them.
2. golden image init and load is a one time thing so if
they pass, no need to undo. */
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
return err;
}
{
unsigned long timeout = gk20a_get_gr_idle_timeout(c->g);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (c->num_objects == 0)
return 0;
struct device *d = dev_from_gk20a(g);
DEFINE_DMA_ATTRS(attrs);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gr_gk20a_free_global_ctx_buffers(g);
gr->max_zcull_per_gpc_count = proj_scal_litter_num_zcull_banks_v();
if (!gr->gpc_count) {
- nvhost_err(dev_from_gk20a(g), "gpc_count==0!");
+ gk20a_err(dev_from_gk20a(g), "gpc_count==0!");
goto clean_up;
}
gr->gpc_skip_mask[gpc_index] = gpc_new_skip_mask;
}
- nvhost_dbg_info("fbps: %d", gr->num_fbps);
- nvhost_dbg_info("max_gpc_count: %d", gr->max_gpc_count);
- nvhost_dbg_info("max_fbps_count: %d", gr->max_fbps_count);
- nvhost_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
- nvhost_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
- nvhost_dbg_info("max_tpc_count: %d", gr->max_tpc_count);
- nvhost_dbg_info("sys_count: %d", gr->sys_count);
- nvhost_dbg_info("gpc_count: %d", gr->gpc_count);
- nvhost_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc);
- nvhost_dbg_info("tpc_count: %d", gr->tpc_count);
- nvhost_dbg_info("ppc_count: %d", gr->ppc_count);
+ gk20a_dbg_info("fbps: %d", gr->num_fbps);
+ gk20a_dbg_info("max_gpc_count: %d", gr->max_gpc_count);
+ gk20a_dbg_info("max_fbps_count: %d", gr->max_fbps_count);
+ gk20a_dbg_info("max_tpc_per_gpc_count: %d", gr->max_tpc_per_gpc_count);
+ gk20a_dbg_info("max_zcull_per_gpc_count: %d", gr->max_zcull_per_gpc_count);
+ gk20a_dbg_info("max_tpc_count: %d", gr->max_tpc_count);
+ gk20a_dbg_info("sys_count: %d", gr->sys_count);
+ gk20a_dbg_info("gpc_count: %d", gr->gpc_count);
+ gk20a_dbg_info("pe_count_per_gpc: %d", gr->pe_count_per_gpc);
+ gk20a_dbg_info("tpc_count: %d", gr->tpc_count);
+ gk20a_dbg_info("ppc_count: %d", gr->ppc_count);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
- nvhost_dbg_info("gpc_tpc_count[%d] : %d",
+ gk20a_dbg_info("gpc_tpc_count[%d] : %d",
gpc_index, gr->gpc_tpc_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
- nvhost_dbg_info("gpc_zcb_count[%d] : %d",
+ gk20a_dbg_info("gpc_zcb_count[%d] : %d",
gpc_index, gr->gpc_zcb_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
- nvhost_dbg_info("gpc_ppc_count[%d] : %d",
+ gk20a_dbg_info("gpc_ppc_count[%d] : %d",
gpc_index, gr->gpc_ppc_count[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
- nvhost_dbg_info("gpc_skip_mask[%d] : %d",
+ gk20a_dbg_info("gpc_skip_mask[%d] : %d",
gpc_index, gr->gpc_skip_mask[gpc_index]);
for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++)
for (pes_index = 0;
pes_index < gr->pe_count_per_gpc;
pes_index++)
- nvhost_dbg_info("pes_tpc_count[%d][%d] : %d",
+ gk20a_dbg_info("pes_tpc_count[%d][%d] : %d",
pes_index, gpc_index,
gr->pes_tpc_count[pes_index][gpc_index]);
for (pes_index = 0;
pes_index < gr->pe_count_per_gpc;
pes_index++)
- nvhost_dbg_info("pes_tpc_mask[%d][%d] : %d",
+ gk20a_dbg_info("pes_tpc_mask[%d][%d] : %d",
pes_index, gpc_index,
gr->pes_tpc_mask[pes_index][gpc_index]);
g->ops.gr.calc_global_ctx_buffer_size(g);
gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v();
- nvhost_dbg_info("bundle_cb_default_size: %d",
+ gk20a_dbg_info("bundle_cb_default_size: %d",
gr->bundle_cb_default_size);
- nvhost_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
- nvhost_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
- nvhost_dbg_info("attrib_cb_default_size: %d",
+ gk20a_dbg_info("min_gpm_fifo_depth: %d", gr->min_gpm_fifo_depth);
+ gk20a_dbg_info("bundle_cb_token_limit: %d", gr->bundle_cb_token_limit);
+ gk20a_dbg_info("attrib_cb_default_size: %d",
gr->attrib_cb_default_size);
- nvhost_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size);
- nvhost_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size);
- nvhost_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size);
- nvhost_dbg_info("timeslice_mode: %d", gr->timeslice_mode);
+ gk20a_dbg_info("attrib_cb_size: %d", gr->attrib_cb_size);
+ gk20a_dbg_info("alpha_cb_default_size: %d", gr->alpha_cb_default_size);
+ gk20a_dbg_info("alpha_cb_size: %d", gr->alpha_cb_size);
+ gk20a_dbg_info("timeslice_mode: %d", gr->timeslice_mode);
return 0;
kfree(sorted_to_unsorted_gpc_map);
if (ret)
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
else
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return ret;
}
ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to disable gr engine activity\n");
return ret;
}
ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to idle graphics\n");
goto clean_up;
}
clean_up:
ret = gk20a_fifo_enable_engine_activity(g, gr_info);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to enable gr engine activity\n");
}
ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to disable gr engine activity\n");
return ret;
}
ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to idle graphics\n");
goto clean_up;
}
clean_up:
ret = gk20a_fifo_enable_engine_activity(g, gr_info);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to enable gr engine activity\n");
}
if (memcmp(c_tbl->color_l2, zbc_val->color_l2,
sizeof(zbc_val->color_l2))) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"zbc l2 and ds color don't match with existing entries");
return -EINVAL;
}
}
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid zbc table type %d", zbc_val->type);
return -EINVAL;
}
ret = gk20a_fifo_disable_engine_activity(g, gr_info, true);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to disable gr engine activity\n");
return ret;
}
ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to idle graphics\n");
goto clean_up;
}
clean_up:
ret = gk20a_fifo_enable_engine_activity(g, gr_info);
if (ret) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to enable gr engine activity\n");
}
break;
case GK20A_ZBC_TYPE_COLOR:
if (index >= GK20A_ZBC_TABLE_SIZE) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid zbc color table index\n");
return -EINVAL;
}
break;
case GK20A_ZBC_TYPE_DEPTH:
if (index >= GK20A_ZBC_TABLE_SIZE) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid zbc depth table index\n");
return -EINVAL;
}
query_params->ref_cnt = gr->zbc_dep_tbl[index].ref_cnt;
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid zbc table type\n");
return -EINVAL;
}
if (!err)
gr->max_default_color_index = 4;
else {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to load default zbc color table\n");
return err;
}
if (!err)
gr->max_default_depth_index = 2;
else {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to load default zbc depth table\n");
return err;
}
int gk20a_gr_zbc_set_table(struct gk20a *g, struct gr_gk20a *gr,
struct zbc_entry *zbc_val)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
return gr_gk20a_elpg_protected_call(g,
gr_gk20a_add_zbc(g, gr, zbc_val));
therm_gate_ctrl_blk_clk_auto_f());
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid blcg mode %d", mode);
return;
}
therm_gate_ctrl_eng_clk_auto_f());
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid elcg mode %d", mode);
}
zcull_map_tiles = kzalloc(proj_scal_max_gpcs_v() *
proj_scal_max_tpc_per_gpc_v() * sizeof(u32), GFP_KERNEL);
if (!zcull_map_tiles) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to allocate zcull temp buffers");
return -ENOMEM;
}
proj_scal_max_tpc_per_gpc_v() * sizeof(u32), GFP_KERNEL);
if (!zcull_bank_counters) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to allocate zcull temp buffers");
kfree(zcull_map_tiles);
return -ENOMEM;
if (gpc_zcull_count != gr->max_zcull_per_gpc_count &&
gpc_zcull_count < gpc_tpc_count) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"zcull_banks (%d) less than tpcs (%d) for gpc (%d)",
gpc_zcull_count, gpc_tpc_count, gpc_index);
return -EINVAL;
u32 last_method_data = 0;
u32 i, err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* slcg prod values */
gr_gk20a_slcg_gr_load_gating_prod(g, g->slcg_enabled);
goto out;
out:
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
if (!g->gr.ctx_vars.valid) {
err = gr_gk20a_init_ctx_vars(g, &g->gr);
if (err)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to load gr init ctx");
}
return err;
bool fecs_scrubbing;
bool gpccs_scrubbing;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
do {
fecs_scrubbing = gk20a_readl(g, gr_fecs_dmactl_r()) &
gr_gpccs_dmactl_imem_scrubbing_m());
if (!fecs_scrubbing && !gpccs_scrubbing) {
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
udelay(GR_IDLE_CHECK_DEFAULT);
} while (--retries || !tegra_platform_is_silicon());
- nvhost_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
+ gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
return -ETIMEDOUT;
}
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
u32 i, err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* enable interrupts */
gk20a_writel(g, gr_intr_r(), ~0);
out:
if (err)
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
else
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >>
PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
if (!data) {
- nvhost_err(dev_from_gk20a(g),
- "failed to map priv access map memory");
+ gk20a_err(dev_from_gk20a(g),
+ "failed to map priv access map memory");
err = -ENOMEM;
goto clean_up;
}
map_bit = wl_addr_gk20a[w] >> 2;
map_byte = map_bit >> 3;
map_shift = map_bit & 0x7; /* i.e. 0-7 */
- nvhost_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
+ gk20a_dbg_info("access map addr:0x%x byte:0x%x bit:%d",
wl_addr_gk20a[w], map_byte, map_shift);
((u8 *)data)[map_byte] |= 1 << map_shift;
}
struct gr_gk20a *gr = &g->gr;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (gr->sw_ready) {
- nvhost_dbg_fn("skip init");
+ gk20a_dbg_fn("skip init");
return 0;
}
if (tegra_cpu_is_asim())
gr->max_comptag_mem = 1; /* MBs worth of comptag coverage */
else {
- nvhost_dbg_info("total ram pages : %lu", totalram_pages);
+ gk20a_dbg_info("total ram pages : %lu", totalram_pages);
gr->max_comptag_mem = totalram_pages
>> (10 - (PAGE_SHIFT - 10));
}
gr->remove_support = gk20a_remove_gr_support;
gr->sw_ready = true;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up:
- nvhost_err(dev_from_gk20a(g), "fail");
+ gk20a_err(dev_from_gk20a(g), "fail");
gk20a_remove_gr_support(gr);
return err;
}
{
u32 err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
err = gk20a_init_gr_prepare(g);
if (err)
void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) {
gk20a_writel(g,
u32 gpc_index, ppc_index, stride, val, offset;
u32 cb_size = data * 4;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (cb_size > gr->attrib_cb_size)
cb_size = gr->attrib_cb_size;
u32 pd_ab_max_output;
u32 alpha_cb_size = data * 4;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* if (NO_ALPHA_BETA_TIMESLICE_SUPPORT_DEF)
return; */
static int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr,
u32 class_num, u32 offset, u32 data)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (class_num == KEPLER_COMPUTE_A) {
switch (offset << 2) {
{
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[isr_data->chid];
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_set_error_notifier(ch,
NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT);
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"gr semaphore timeout\n");
return -EINVAL;
}
{
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[isr_data->chid];
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_set_error_notifier(ch,
NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY);
/* This is an unrecoverable error, reset is needed */
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"gr semaphore timeout\n");
return -EINVAL;
}
isr_data->class_num, isr_data->offset,
isr_data->data_lo);
if (ret)
- nvhost_err(dev_from_gk20a(g), "invalid method class 0x%08x"
+ gk20a_err(dev_from_gk20a(g), "invalid method class 0x%08x"
", offset 0x%08x address 0x%08x\n",
isr_data->class_num, isr_data->offset, isr_data->addr);
{
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[isr_data->chid];
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_set_error_notifier(ch,
NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY);
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid class 0x%08x, offset 0x%08x",
isr_data->class_num, isr_data->offset);
return -EINVAL;
{
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[isr_data->chid];
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_set_error_notifier(ch,
NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY);
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"class error 0x%08x, offset 0x%08x",
isr_data->class_num, isr_data->offset);
return -EINVAL;
valid = is_valid_cyclestats_bar0_offset_gk20a(g,
op_elem->offset_bar0);
if (!valid) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"invalid cycletstats op offset: 0x%x\n",
op_elem->offset_bar0);
}
mutex_unlock(&ch->cyclestate.cyclestate_buffer_mutex);
#endif
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
wake_up(&ch->notifier_wq);
return 0;
}
bool mmu_debug_mode_enabled = gk20a_mm_mmu_debug_mode_enabled(g);
u32 dbgr_control0;
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "locking down SM");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locking down SM");
/* assert stop trigger */
dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r());
((global_esr & ~global_esr_mask) != 0);
if (locked_down || !error_pending) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "locked down SM");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "locked down SM");
/* de-assert stop trigger */
dbgr_control0 &= ~gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f();
/* if an mmu fault is pending and mmu debug mode is not
* enabled, the sm will never lock down. */
if (!mmu_debug_mode_enabled && gk20a_fifo_mmu_fault_pending(g)) {
- nvhost_err(dev_from_gk20a(g), "mmu fault pending, sm will"
+ gk20a_err(dev_from_gk20a(g), "mmu fault pending, sm will"
" never lock down!");
return -EFAULT;
}
} while (time_before(jiffies, end_jiffies)
|| !tegra_platform_is_silicon());
- nvhost_err(dev_from_gk20a(g), "timed out while trying to lock down SM");
+ gk20a_err(dev_from_gk20a(g), "timed out while trying to lock down SM");
return -EAGAIN;
}
bool sm_debugger_attached = gk20a_gr_sm_debugger_attached(g);
struct channel_gk20a *fault_ch;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r());
warp_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_warp_esr_r());
u32 tpc_exception_en = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r());
tpc_exception_en &= ~gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f();
gk20a_writel(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r(), tpc_exception_en);
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "SM debugger attached");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM debugger attached");
}
/* if a debugger is present and an error has occurred, do a warp sync */
if (sm_debugger_attached && ((warp_esr != 0) || ((global_esr & ~global_mask) != 0))) {
- nvhost_dbg(dbg_intr, "warp sync needed");
+ gk20a_dbg(gpu_dbg_intr, "warp sync needed");
do_warp_sync = true;
}
if (do_warp_sync) {
ret = gk20a_gr_lock_down_sm(g, global_mask);
if (ret) {
- nvhost_err(dev_from_gk20a(g), "sm did not lock down!\n");
+ gk20a_err(dev_from_gk20a(g), "sm did not lock down!\n");
return ret;
}
}
int ret = 0;
u32 tpc_exception = gk20a_readl(g, gr_gpcs_tpcs_tpccs_tpc_exception_r());
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
/* check if an sm exeption is pending */
if (gr_gpcs_tpcs_tpccs_tpc_exception_sm_v(tpc_exception) ==
gr_gpcs_tpcs_tpccs_tpc_exception_sm_pending_v()) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "SM exception pending");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "SM exception pending");
ret = gk20a_gr_handle_sm_exception(g, isr_data);
}
int ret = 0;
u32 gpc_exception = gk20a_readl(g, gr_gpcs_gpccs_gpc_exception_r());
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "");
/* check if tpc 0 has an exception */
if (gr_gpcs_gpccs_gpc_exception_tpc_v(gpc_exception) ==
gr_gpcs_gpccs_gpc_exception_tpc_0_pending_v()) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "TPC exception pending");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "TPC exception pending");
ret = gk20a_gr_handle_tpc_exception(g, isr_data);
}
int need_reset = 0;
u32 gr_intr = gk20a_readl(g, gr_intr_r());
- nvhost_dbg_fn("");
- nvhost_dbg(dbg_intr, "pgraph intr %08x", gr_intr);
+ gk20a_dbg_fn("");
+ gk20a_dbg(gpu_dbg_intr, "pgraph intr %08x", gr_intr);
if (!gr_intr)
return 0;
isr_data.chid =
gk20a_gr_get_chid_from_ctx(g, isr_data.curr_ctx);
if (isr_data.chid == -1) {
- nvhost_err(dev_from_gk20a(g), "invalid channel ctx 0x%08x",
+ gk20a_err(dev_from_gk20a(g), "invalid channel ctx 0x%08x",
isr_data.curr_ctx);
goto clean_up;
}
- nvhost_dbg(dbg_intr | dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"channel %d: addr 0x%08x, "
"data 0x%08x 0x%08x,"
"ctx 0x%08x, offset 0x%08x, "
* register using set_falcon[4] */
if (gr_intr & gr_intr_firmware_method_pending_f()) {
need_reset |= true;
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "firmware method intr pending\n");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "firmware method intr pending\n");
gk20a_writel(g, gr_intr_r(),
gr_intr_firmware_method_reset_f());
gr_intr &= ~gr_intr_firmware_method_pending_f();
struct fifo_gk20a *f = &g->fifo;
struct channel_gk20a *ch = &f->channel[isr_data.chid];
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "exception %08x\n", exception);
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "exception %08x\n", exception);
if (exception & gr_exception_fe_m()) {
u32 fe = gk20a_readl(g, gr_fe_hww_esr_r());
- nvhost_dbg(dbg_intr, "fe warning %08x\n", fe);
+ gk20a_dbg(gpu_dbg_intr, "fe warning %08x\n", fe);
gk20a_writel(g, gr_fe_hww_esr_r(), fe);
}
u32 exception1 = gk20a_readl(g, gr_exception1_r());
u32 global_esr = gk20a_readl(g, gr_gpc0_tpc0_sm_hww_global_esr_r());
- nvhost_dbg(dbg_intr | dbg_gpu_dbg, "GPC exception pending");
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "GPC exception pending");
/* if no sm debugger is present, clean up the channel */
if (!gk20a_gr_sm_debugger_attached(g)) {
- nvhost_dbg(dbg_intr | dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
"SM debugger not attached, clearing interrupt");
need_reset |= -EFAULT;
} else {
gr_gpfifo_ctl_semaphore_access_f(1));
if (gr_intr)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"unhandled gr interrupt 0x%08x", gr_intr);
return 0;
u32 gr_intr = gk20a_readl(g, gr_intr_nonstall_r());
u32 clear_intr = 0;
- nvhost_dbg(dbg_intr, "pgraph nonstall intr %08x", gr_intr);
+ gk20a_dbg(gpu_dbg_intr, "pgraph nonstall intr %08x", gr_intr);
if (gr_intr & gr_intr_nonstall_trap_pending_f()) {
gk20a_channel_semaphore_wakeup(g);
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
u32 ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ret = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
if (ret)
gk20a_gr_flush_channel_tlb(&g->gr);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return ret;
}
u32 ppc_address;
u32 ppc_broadcast_addr;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
/* setup defaults */
ppc_address = 0;
{
u32 ppc_num;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
for (ppc_num = 0; ppc_num < g->gr.pe_count_per_gpc; ppc_num++)
priv_addr_table[(*t)++] = pri_ppc_addr(pri_ppccs_addr_mask(addr),
t = 0;
*num_registers = 0;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
err = gr_gk20a_decode_priv_addr(g, addr, &addr_type,
&gpc_num, &tpc_num, &ppc_num, &be_num,
&broadcast_flags);
- nvhost_dbg(dbg_gpu_dbg, "addr_type = %d", addr_type);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "addr_type = %d", addr_type);
if (err)
return err;
u32 potential_offsets = proj_scal_litter_num_gpcs_v() *
proj_scal_litter_num_tpc_per_gpc_v();
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
/* implementation is crossed-up if either of these happen */
if (max_offsets > potential_offsets)
priv_registers = kzalloc(sizeof(u32) * potential_offsets, GFP_KERNEL);
if (IS_ERR_OR_NULL(priv_registers)) {
- nvhost_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
+ gk20a_dbg_fn("failed alloc for potential_offsets=%d", potential_offsets);
err = PTR_ERR(priv_registers);
goto cleanup;
}
num_registers = 1;
if (!g->gr.ctx_vars.local_golden_image) {
- nvhost_dbg_fn("no context switch header info to work with");
+ gk20a_dbg_fn("no context switch header info to work with");
err = -EINVAL;
goto cleanup;
}
g->gr.ctx_vars.golden_image_size,
&priv_offset);
if (err) {
- nvhost_dbg_fn("Could not determine priv_offset for addr:0x%x",
+ gk20a_dbg_fn("Could not determine priv_offset for addr:0x%x",
addr); /*, grPriRegStr(addr)));*/
goto cleanup;
}
init_sm_dsm_reg_info();
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
for (reg = 0; reg < _num_ovr_perf_regs; reg++) {
for (gpc = 0; gpc < num_gpc; gpc++) {
/* reset the patch count from previous
runs,if ucode has already processed
it */
- tmp = mem_rd32(context +
+ tmp = gk20a_mem_rd32(context +
ctxsw_prog_main_image_patch_count_o(), 0);
if (!tmp)
vaddr_lo = u64_lo32(ch_ctx->patch_ctx.gpu_va);
vaddr_hi = u64_hi32(ch_ctx->patch_ctx.gpu_va);
- mem_wr32(context +
+ gk20a_mem_wr32(context +
ctxsw_prog_main_image_patch_count_o(),
0, ch_ctx->patch_ctx.data_count);
- mem_wr32(context +
+ gk20a_mem_wr32(context +
ctxsw_prog_main_image_patch_adr_lo_o(),
0, vaddr_lo);
- mem_wr32(context +
+ gk20a_mem_wr32(context +
ctxsw_prog_main_image_patch_adr_hi_o(),
0, vaddr_hi);
u32 gpc_tpc_addr;
u32 gpc_tpc_stride;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "offset=0x%x", offset);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "offset=0x%x", offset);
gpc = pri_get_gpc_num(offset);
gpc_tpc_addr = pri_gpccs_addr_mask(offset);
static inline bool check_main_image_header_magic(void *context)
{
- u32 magic = mem_rd32(context +
+ u32 magic = gk20a_mem_rd32(context +
ctxsw_prog_main_image_magic_value_o(), 0);
- nvhost_dbg(dbg_gpu_dbg, "main image magic=0x%x", magic);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "main image magic=0x%x", magic);
return magic == ctxsw_prog_main_image_magic_value_v_value_v();
}
static inline bool check_local_header_magic(void *context)
{
- u32 magic = mem_rd32(context +
+ u32 magic = gk20a_mem_rd32(context +
ctxsw_prog_local_magic_value_o(), 0);
- nvhost_dbg(dbg_gpu_dbg, "local magic=0x%x", magic);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "local magic=0x%x", magic);
return magic == ctxsw_prog_local_magic_value_v_value_v();
}
else
return -EINVAL;
- nvhost_dbg_info(" gpc = %d tpc = %d",
+ gk20a_dbg_info(" gpc = %d tpc = %d",
gpc_num, tpc_num);
} else
return -EINVAL;
context = context_buffer;
/* sanity check main header */
if (!check_main_image_header_magic(context)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Invalid main header: magic value");
return -EINVAL;
}
- num_gpcs = mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
+ num_gpcs = gk20a_mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
if (gpc_num >= num_gpcs) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"GPC 0x%08x is greater than total count 0x%08x!\n",
gpc_num, num_gpcs);
return -EINVAL;
}
- data32 = mem_rd32(context + ctxsw_prog_main_extended_buffer_ctl_o(), 0);
+ data32 = gk20a_mem_rd32(context + ctxsw_prog_main_extended_buffer_ctl_o(), 0);
ext_priv_size = ctxsw_prog_main_extended_buffer_ctl_size_v(data32);
if (0 == ext_priv_size) {
- nvhost_dbg_info(" No extended memory in context buffer");
+ gk20a_dbg_info(" No extended memory in context buffer");
return -EINVAL;
}
ext_priv_offset = ctxsw_prog_main_extended_buffer_ctl_offset_v(data32);
/* check local header magic */
context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Invalid local header: magic value\n");
return -EINVAL;
}
if ((addr & tpc_gpc_mask) == (sm_dsm_perf_regs[i] & tpc_gpc_mask)) {
sm_dsm_perf_reg_id = i;
- nvhost_dbg_info("register match: 0x%08x",
+ gk20a_dbg_info("register match: 0x%08x",
sm_dsm_perf_regs[i]);
chk_addr = (proj_gpc_base_v() +
(sm_dsm_perf_regs[sm_dsm_perf_reg_id] & tpc_gpc_mask));
if (chk_addr != addr) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Oops addr miss-match! : 0x%08x != 0x%08x\n",
addr, chk_addr);
return -EINVAL;
(sm_dsm_perf_ctrl_regs[i] & tpc_gpc_mask)) {
sm_dsm_perf_ctrl_reg_id = i;
- nvhost_dbg_info("register match: 0x%08x",
+ gk20a_dbg_info("register match: 0x%08x",
sm_dsm_perf_ctrl_regs[i]);
chk_addr = (proj_gpc_base_v() +
tpc_gpc_mask));
if (chk_addr != addr) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Oops addr miss-match! : 0x%08x != 0x%08x\n",
addr, chk_addr);
return -EINVAL;
/* last sanity check: did we somehow compute an offset outside the
* extended buffer? */
if (offset_to_segment > offset_to_segment_end) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Overflow ctxsw buffer! 0x%08x > 0x%08x\n",
offset_to_segment, offset_to_segment_end);
return -EINVAL;
u32 ppc_num, tpc_num, tpc_addr, gpc_addr, ppc_addr;
struct aiv_gk20a *reg;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "pri_addr=0x%x", pri_addr);
if (!g->gr.ctx_vars.valid)
return -EINVAL;
(litter_num_pes_per_gpc > 1)))
return -EINVAL;
- data32 = mem_rd32(context + ctxsw_prog_local_image_ppc_info_o(), 0);
+ data32 = gk20a_mem_rd32(context + ctxsw_prog_local_image_ppc_info_o(), 0);
*num_ppcs = ctxsw_prog_local_image_ppc_info_num_ppcs_v(data32);
*ppc_mask = ctxsw_prog_local_image_ppc_info_ppc_mask_v(data32);
void *context;
u32 offset_to_segment;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "addr=0x%x", addr);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "addr=0x%x", addr);
err = gr_gk20a_decode_priv_addr(g, addr, &addr_type,
&gpc_num, &tpc_num, &ppc_num, &be_num,
context = context_buffer;
if (!check_main_image_header_magic(context)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Invalid main header: magic value");
return -EINVAL;
}
- num_gpcs = mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
+ num_gpcs = gk20a_mem_rd32(context + ctxsw_prog_main_image_num_gpcs_o(), 0);
/* Parse the FECS local header. */
context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Invalid FECS local header: magic value\n");
return -EINVAL;
}
- data32 = mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
+ data32 = gk20a_mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
sys_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
/* If found in Ext buffer, ok.
}
if ((gpc_num + 1) > num_gpcs) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"GPC %d not in this context buffer.\n",
gpc_num);
return -EINVAL;
for (i = 0; i < num_gpcs; i++) {
context += ctxsw_prog_ucode_header_size_in_bytes();
if (!check_local_header_magic(context)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"Invalid GPCCS local header: magic value\n");
return -EINVAL;
}
- data32 = mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
+ data32 = gk20a_mem_rd32(context + ctxsw_prog_local_priv_register_ctl_o(), 0);
gpc_priv_offset = ctxsw_prog_local_priv_register_ctl_offset_v(data32);
err = gr_gk20a_determine_ppc_configuration(g, context,
if (err)
return err;
- num_tpcs = mem_rd32(context + ctxsw_prog_local_image_num_tpcs_o(), 0);
+ num_tpcs = gk20a_mem_rd32(context + ctxsw_prog_local_image_num_tpcs_o(), 0);
if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"GPC %d TPC %d not in this context buffer.\n",
gpc_num, tpc_num);
return -EINVAL;
num_tpcs) << 2);
}
} else {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
" Unknown address type.\n");
return -EINVAL;
}
u32 ctx_op_nr, num_ctx_ops[2] = {num_ctx_wr_ops, num_ctx_rd_ops};
int err, pass;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "wr_ops=%d rd_ops=%d",
num_ctx_wr_ops, num_ctx_rd_ops);
/* disable channel switching.
*/
err = gr_gk20a_disable_ctxsw(g);
if (err) {
- nvhost_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
+ gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
/* this should probably be ctx-fatal... */
goto cleanup;
}
curr_gr_chid = gk20a_gr_get_chid_from_ctx(g, curr_gr_ctx);
ch_is_curr_ctx = (curr_gr_chid != -1) && (ch->hw_chid == curr_gr_chid);
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx);
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "is curr ctx=%d", ch_is_curr_ctx);
if (ch_is_curr_ctx) {
for (pass = 0; pass < 2; pass++) {
ctx_op_nr = 0;
v |= ctx_ops[i].value_lo;
gk20a_writel(g, offset, v);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"direct wr: offset=0x%x v=0x%x",
offset, v);
v |= ctx_ops[i].value_hi;
gk20a_writel(g, offset + 4, v);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"direct wr: offset=0x%x v=0x%x",
offset + 4, v);
}
ctx_ops[i].value_lo =
gk20a_readl(g, offset);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"direct rd: offset=0x%x v=0x%x",
offset, ctx_ops[i].value_lo);
ctx_ops[i].value_hi =
gk20a_readl(g, offset + 4);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"direct rd: offset=0x%x v=0x%x",
offset, ctx_ops[i].value_lo);
} else
ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD),
ctx_ops[i].quad);
if (err) {
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"ctx op invalid offset: offset=0x%x",
ctx_ops[i].offset);
ctx_ops[i].status =
if (offsets[j] >= g->gr.ctx_vars.golden_image_size)
continue;
if (pass == 0) { /* write pass */
- v = mem_rd32(ctx_ptr + offsets[j], 0);
+ v = gk20a_mem_rd32(ctx_ptr + offsets[j], 0);
v &= ~ctx_ops[i].and_n_mask_lo;
v |= ctx_ops[i].value_lo;
- mem_wr32(ctx_ptr + offsets[j], 0, v);
+ gk20a_mem_wr32(ctx_ptr + offsets[j], 0, v);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"context wr: offset=0x%x v=0x%x",
offsets[j], v);
if (ctx_ops[i].op == REGOP(WRITE_64)) {
- v = mem_rd32(ctx_ptr + offsets[j] + 4, 0);
+ v = gk20a_mem_rd32(ctx_ptr + offsets[j] + 4, 0);
v &= ~ctx_ops[i].and_n_mask_hi;
v |= ctx_ops[i].value_hi;
- mem_wr32(ctx_ptr + offsets[j] + 4, 0, v);
+ gk20a_mem_wr32(ctx_ptr + offsets[j] + 4, 0, v);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"context wr: offset=0x%x v=0x%x",
offsets[j] + 4, v);
}
} else { /* read pass */
ctx_ops[i].value_lo =
- mem_rd32(ctx_ptr + offsets[0], 0);
+ gk20a_mem_rd32(ctx_ptr + offsets[0], 0);
- nvhost_dbg(dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "context rd: offset=0x%x v=0x%x",
offsets[0], ctx_ops[i].value_lo);
if (ctx_ops[i].op == REGOP(READ_64)) {
ctx_ops[i].value_hi =
- mem_rd32(ctx_ptr + offsets[0] + 4, 0);
+ gk20a_mem_rd32(ctx_ptr + offsets[0] + 4, 0);
- nvhost_dbg(dbg_gpu_dbg,
+ gk20a_dbg(gpu_dbg_gpu_dbg,
"context rd: offset=0x%x v=0x%x",
offsets[0] + 4, ctx_ops[i].value_hi);
} else
if (restart_gr_ctxsw) {
int tmp_err = gr_gk20a_enable_ctxsw(g);
if (tmp_err) {
- nvhost_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
+ gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
err = tmp_err;
}
}
u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl;
switch (ver) {
case GK20A_GPUID_GK20A:
- nvhost_dbg_info("gk20a detected");
+ gk20a_dbg_info("gk20a detected");
gk20a_init_hal(&g->ops);
break;
default:
- nvhost_err(&g->dev->dev, "no support for %x", ver);
+ gk20a_err(&g->dev->dev, "no support for %x", ver);
return -ENODEV;
}
u32 compbit_backing_size;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (max_comptag_lines == 0) {
gr->compbit_store.size = 0;
if (max_comptag_lines > hw_max_comptag_lines)
max_comptag_lines = hw_max_comptag_lines;
- nvhost_dbg_info("compbit backing store size : %d",
+ gk20a_dbg_info("compbit backing store size : %d",
compbit_backing_size);
- nvhost_dbg_info("max comptag lines : %d",
+ gk20a_dbg_info("max comptag lines : %d",
max_comptag_lines);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
gr->compbit_store.pages = dma_alloc_attrs(d, gr->compbit_store.size,
&iova, GFP_KERNEL, &attrs);
if (!gr->compbit_store.pages) {
- nvhost_err(dev_from_gk20a(g), "failed to allocate"
+ gk20a_err(dev_from_gk20a(g), "failed to allocate"
"backing store for compbit : size %d",
compbit_backing_size);
return -ENOMEM;
ltc_ltcs_ltss_cbc_param_slices_per_fbp_v(
gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r()));
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (gr->compbit_store.size == 0)
return 0;
!tegra_platform_is_silicon());
if (!time_before(jiffies, end_jiffies)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"comp tag clear timeout\n");
return -EBUSY;
}
gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
compbit_base_post_divide);
- nvhost_dbg(dbg_info | dbg_map | dbg_pte,
+ gk20a_dbg(gpu_dbg_info | gpu_dbg_map | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
(u32)(compbit_store_base_iova >> 32),
(u32)(compbit_store_base_iova & 0xffffffff),
u32 data;
s32 retry = 100;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* Make sure all previous writes are committed to the L2. There's no
guarantee that writes are to DRAM. This will be a sysmembar internal
if (ltc_ltss_g_elpg_flush_v(data) ==
ltc_ltss_g_elpg_flush_pending_v()) {
- nvhost_dbg_info("g_elpg_flush 0x%x", data);
+ gk20a_dbg_info("g_elpg_flush 0x%x", data);
retry--;
usleep_range(20, 40);
} else
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"g_elpg_flush too many retries");
}
static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (g->ops.fb.reset)
g->ops.fb.reset(g);
struct vm_gk20a *vm = &mm->bar1.vm;
struct inst_desc *inst_block = &mm->bar1.inst_block;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (inst_block->cpuva)
dma_free_coherent(d, inst_block->size,
struct mm_gk20a *mm = &g->mm;
int i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (mm->sw_ready) {
- nvhost_dbg_fn("skip init");
+ gk20a_dbg_fn("skip init");
return 0;
}
/*TBD: make channel vm size configurable */
mm->channel.size = 1ULL << NV_GMMU_VA_RANGE;
- nvhost_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20));
+ gk20a_dbg_info("channel vm size: %dMB", (int)(mm->channel.size >> 20));
- nvhost_dbg_info("small page-size (%dKB) pte array: %dKB",
+ gk20a_dbg_info("small page-size (%dKB) pte array: %dKB",
gmmu_page_sizes[gmmu_page_size_small] >> 10,
(mm->page_table_sizing[gmmu_page_size_small].num_ptes *
gmmu_pte__size_v()) >> 10);
- nvhost_dbg_info("big page-size (%dKB) pte array: %dKB",
+ gk20a_dbg_info("big page-size (%dKB) pte array: %dKB",
gmmu_page_sizes[gmmu_page_size_big] >> 10,
(mm->page_table_sizing[gmmu_page_size_big].num_ptes *
gmmu_pte__size_v()) >> 10);
mm->remove_support = gk20a_remove_mm_support;
mm->sw_ready = true;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
struct inst_desc *inst_block = &mm->bar1.inst_block;
phys_addr_t inst_pa = inst_block->cpu_pa;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* set large page size in fb
* note this is very early on, can we defer it ? */
}
inst_pa = (u32)(inst_pa >> bar1_instance_block_shift_gk20a());
- nvhost_dbg_info("bar1 inst block ptr: 0x%08x", (u32)inst_pa);
+ gk20a_dbg_info("bar1 inst block ptr: 0x%08x", (u32)inst_pa);
/* this is very early in init... can we defer this? */
{
bus_bar1_block_ptr_f(inst_pa));
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
int err;
struct page *pages;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
pages = alloc_pages(GFP_KERNEL, order);
if (!pages) {
- nvhost_dbg(dbg_pte, "alloc_pages failed\n");
+ gk20a_dbg(gpu_dbg_pte, "alloc_pages failed\n");
goto err_out;
}
*sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
- nvhost_dbg(dbg_pte, "cannot allocate sg table");
+ gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table");
goto err_alloced;
}
err = sg_alloc_table(*sgt, 1, GFP_KERNEL);
if (err) {
- nvhost_dbg(dbg_pte, "sg_alloc_table failed\n");
+ gk20a_dbg(gpu_dbg_pte, "sg_alloc_table failed\n");
goto err_sg_table;
}
sg_set_page((*sgt)->sgl, pages, len, 0);
struct sg_table *sgt, u32 order,
size_t size)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
BUG_ON(sgt == NULL);
free_pages((unsigned long)handle, order);
sg_free_table(sgt);
struct page **pages;
int err = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
*size = len;
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
pages = dma_alloc_attrs(d, len, &iova, GFP_KERNEL, &attrs);
if (!pages) {
- nvhost_err(d, "memory allocation failed\n");
+ gk20a_err(d, "memory allocation failed\n");
goto err_out;
}
err = gk20a_get_sgtable_from_pages(d, sgt, pages,
iova, len);
if (err) {
- nvhost_err(d, "sgt allocation failed\n");
+ gk20a_err(d, "sgt allocation failed\n");
goto err_free;
}
DEFINE_DMA_ATTRS(attrs);
struct page **pages = (struct page **)handle;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
BUG_ON(sgt == NULL);
iova = sg_dma_address(sgt->sgl);
{
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages = (struct page **)handle;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
*kva = vmap(pages, count, 0, pgprot_dmacoherent(PAGE_KERNEL));
if (!(*kva))
static void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
vunmap(va);
}
#endif
struct sg_table *sgt;
size_t size;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* allocate enough pages for the table */
pte_order = vm->mm->page_table_sizing[gmmu_pgsz_idx].order;
if (err)
return err;
- nvhost_dbg(dbg_pte, "pte = 0x%p, addr=%08llx, size %d",
+ gk20a_dbg(gpu_dbg_pte, "pte = 0x%p, addr=%08llx, size %d",
pte, gk20a_mm_iova_addr(sgt->sgl), pte_order);
pte->ref = handle;
{
*pde_lo = (u32)(addr_lo >> vm->mm->pde_stride_shift);
*pde_hi = (u32)(addr_hi >> vm->mm->pde_stride_shift);
- nvhost_dbg(dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d",
+ gk20a_dbg(gpu_dbg_pte, "addr_lo=0x%llx addr_hi=0x%llx pde_ss=%d",
addr_lo, addr_hi, vm->mm->pde_stride_shift);
- nvhost_dbg(dbg_pte, "pde_lo=%d pde_hi=%d",
+ gk20a_dbg(gpu_dbg_pte, "pde_lo=%d pde_hi=%d",
*pde_lo, *pde_hi);
}
* doesn't leak over into the high 32b */
ret = (u32)(addr >> gmmu_page_shifts[pgsz_idx]);
- nvhost_dbg(dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret);
+ gk20a_dbg(gpu_dbg_pte, "addr=0x%llx pte_i=0x%x", addr, ret);
return ret;
}
/* this offset is a pte offset, not a byte offset */
*pte_offset = i & ((1<<9)-1);
- nvhost_dbg(dbg_pte, "i=0x%x pte_page=0x%x pte_offset=0x%x",
+ gk20a_dbg(gpu_dbg_pte, "i=0x%x pte_page=0x%x pte_offset=0x%x",
i, *pte_page, *pte_offset);
}
struct page_table_gk20a *pte =
vm->pdes.ptes[gmmu_pgsz_idx] + i;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* if it's already in place it's valid */
if (pte->ref)
return 0;
- nvhost_dbg(dbg_pte, "alloc %dKB ptes for pde %d",
+ gk20a_dbg(gpu_dbg_pte, "alloc %dKB ptes for pde %d",
gmmu_page_sizes[gmmu_pgsz_idx]/1024, i);
err = zalloc_gmmu_page_table_gk20a(vm, gmmu_pgsz_idx, pte);
mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
if (!mapped_buffer) {
mutex_unlock(&vm->update_gmmu_lock);
- nvhost_err(d, "invalid addr to unmap 0x%llx", offset);
+ gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
return;
}
udelay(50);
}
if (!retries)
- nvhost_err(d, "sync-unmap failed on 0x%llx",
+ gk20a_err(d, "sync-unmap failed on 0x%llx",
offset);
mutex_lock(&vm->update_gmmu_lock);
}
/* TBD: DIV_ROUND_UP -> undefined reference to __aeabi_uldivmod */
size = (size + ((u64)gmmu_page_size - 1)) & ~((u64)gmmu_page_size - 1);
- nvhost_dbg_info("size=0x%llx @ pgsz=%dKB", size,
+ gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
gmmu_page_sizes[gmmu_pgsz_idx]>>10);
/* The vma allocator represents page accounting. */
err = vma->alloc(vma, &start_page_nr, num_pages);
if (err) {
- nvhost_err(dev_from_vm(vm),
+ gk20a_err(dev_from_vm(vm),
"%s oom: sz=0x%llx", vma->name, size);
return 0;
}
offset = (u64)start_page_nr << gmmu_page_shifts[gmmu_pgsz_idx];
- nvhost_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
+ gk20a_dbg_fn("%s found addr: 0x%llx", vma->name, offset);
return offset;
}
u32 start_page_nr, num_pages;
int err;
- nvhost_dbg_info("%s free addr=0x%llx, size=0x%llx",
+ gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
vma->name, offset, size);
start_page_nr = (u32)(offset >> page_shift);
err = vma->free(vma, start_page_nr, num_pages);
if (err) {
- nvhost_err(dev_from_vm(vm),
+ gk20a_err(dev_from_vm(vm),
"not found: offset=0x%llx, sz=0x%llx",
offset, size);
}
bfr->kind_v = gmmu_pte_kind_pitch_v();
if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) {
- nvhost_err(d, "kind 0x%x not supported", bfr->kind_v);
+ gk20a_err(d, "kind 0x%x not supported", bfr->kind_v);
return -EINVAL;
}
bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
if (unlikely(bfr->uc_kind_v == gmmu_pte_kind_invalid_v())) {
/* shouldn't happen, but it is worth cross-checking */
- nvhost_err(d, "comptag kind 0x%x can't be"
+ gk20a_err(d, "comptag kind 0x%x can't be"
" downgraded to uncompressed kind",
bfr->kind_v);
return -EINVAL;
if (unlikely(kind_compressible &&
(gmmu_page_sizes[pgsz_idx] != 128*1024))) {
/*
- nvhost_warn(d, "comptags specified"
+ gk20a_warn(d, "comptags specified"
" but pagesize being used doesn't support it");*/
/* it is safe to fall back to uncompressed as
functionality is not harmed */
struct mapped_buffer_node *buffer;
if (map_offset & gmmu_page_offset_masks[bfr->pgsz_idx]) {
- nvhost_err(dev, "map offset must be buffer page size aligned 0x%llx",
+ gk20a_err(dev, "map offset must be buffer page size aligned 0x%llx",
map_offset);
return -EINVAL;
}
/* find the space reservation */
va_node = addr_to_reservation(vm, map_offset);
if (!va_node) {
- nvhost_warn(dev, "fixed offset mapping without space allocation");
+ gk20a_warn(dev, "fixed offset mapping without space allocation");
return -EINVAL;
}
s64 end = min(buffer->addr +
buffer->size, map_offset + bfr->size);
if (end - begin > 0) {
- nvhost_warn(dev, "overlapping buffer map requested");
+ gk20a_warn(dev, "overlapping buffer map requested");
return -EINVAL;
}
}
map_offset = gk20a_vm_alloc_va(vm, size,
pgsz_idx);
if (!map_offset) {
- nvhost_err(d, "failed to allocate va space");
+ gk20a_err(d, "failed to allocate va space");
err = -ENOMEM;
goto fail;
}
err = validate_gmmu_page_table_gk20a_locked(vm, i,
pgsz_idx);
if (err) {
- nvhost_err(d, "failed to validate page table %d: %d",
+ gk20a_err(d, "failed to validate page table %d: %d",
i, err);
goto fail;
}
NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
rw_flag);
if (err) {
- nvhost_err(d, "failed to update ptes on map");
+ gk20a_err(d, "failed to update ptes on map");
goto fail;
}
return map_offset;
fail:
- nvhost_err(d, "%s: failed with err=%d\n", __func__, err);
+ gk20a_err(d, "%s: failed with err=%d\n", __func__, err);
return 0;
}
}
kref_get(&mapped_buffer->ref);
- nvhost_dbg(dbg_map,
+ gk20a_dbg(gpu_dbg_map,
"reusing as=%d pgsz=%d flags=0x%x ctags=%d "
"start=%d gv=0x%x,%08x -> 0x%x,%08x -> 0x%x,%08x "
"own_mem_ref=%d user_mapped=%d",
* track the difference between those two cases we have
* to fail the mapping when we run out of SMMU space.
*/
- nvhost_warn(d, "oom allocating tracking buffer");
+ gk20a_warn(d, "oom allocating tracking buffer");
goto clean_up;
}
/* validate/adjust bfr attributes */
if (unlikely(bfr.pgsz_idx == -1)) {
- nvhost_err(d, "unsupported page size detected");
+ gk20a_err(d, "unsupported page size detected");
goto clean_up;
}
err = setup_buffer_kind_and_compression(d, flags, &bfr, bfr.pgsz_idx);
if (unlikely(err)) {
- nvhost_err(d, "failure setting up kind and compression");
+ gk20a_err(d, "failure setting up kind and compression");
goto clean_up;
}
if (!map_offset)
goto clean_up;
- nvhost_dbg(dbg_map,
+ gk20a_dbg(gpu_dbg_map,
"as=%d pgsz=%d "
"kind=0x%x kind_uc=0x%x flags=0x%x "
"ctags=%d start=%d gv=0x%x,%08x -> 0x%x,%08x -> 0x%x,%08x",
{
int i;
struct scatterlist *sg = NULL;
- nvhost_dbg(dbg_pte, "for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i)");
+ gk20a_dbg(gpu_dbg_pte, "for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i)");
for_each_sg(bfr.sgt->sgl, sg, bfr.sgt->nents, i ) {
u64 da = sg_dma_address(sg);
u64 pa = sg_phys(sg);
u64 len = sg->length;
- nvhost_dbg(dbg_pte, "i=%d pa=0x%x,%08x da=0x%x,%08x len=0x%x,%08x",
+ gk20a_dbg(gpu_dbg_pte, "i=%d pa=0x%x,%08x da=0x%x,%08x len=0x%x,%08x",
i, hi32(pa), lo32(pa), hi32(da), lo32(da),
hi32(len), lo32(len));
}
/* TBD: check for multiple mapping of same buffer */
mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
if (!mapped_buffer) {
- nvhost_warn(d, "oom allocating tracking buffer");
+ gk20a_warn(d, "oom allocating tracking buffer");
goto clean_up;
}
mapped_buffer->dmabuf = dmabuf;
err = insert_mapped_buffer(&vm->mapped_buffers, mapped_buffer);
if (err) {
- nvhost_err(d, "failed to insert into mapped buffer tree");
+ gk20a_err(d, "failed to insert into mapped buffer tree");
goto clean_up;
}
inserted = true;
if (user_mapped)
vm->num_user_mapped_buffers++;
- nvhost_dbg_info("allocated va @ 0x%llx", map_offset);
+ gk20a_dbg_info("allocated va @ 0x%llx", map_offset);
if (!va_allocated) {
struct vm_reserved_va_node *va_node;
gk20a_mm_unpin(d, dmabuf, bfr.sgt);
mutex_unlock(&vm->update_gmmu_lock);
- nvhost_dbg_info("err=%d\n", err);
+ gk20a_dbg_info("err=%d\n", err);
return 0;
}
flags, rw_flag);
mutex_unlock(&vm->update_gmmu_lock);
if (!vaddr) {
- nvhost_err(dev_from_vm(vm), "failed to allocate va space");
+ gk20a_err(dev_from_vm(vm), "failed to allocate va space");
return 0;
}
pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr,
&pde_lo, &pde_hi);
- nvhost_dbg(dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d",
+ gk20a_dbg(gpu_dbg_pte, "size_idx=%d, pde_lo=%d, pde_hi=%d",
pgsz_idx, pde_lo, pde_hi);
/* If ctag_offset !=0 add 1 else add 0. The idea is to avoid a branch
err = map_gmmu_pages(pte->ref, pte->sgt, &pte_kv_cur,
pte->size);
if (err) {
- nvhost_err(dev_from_vm(vm),
+ gk20a_err(dev_from_vm(vm),
"couldn't map ptes for update as=%d pte_ref_cnt=%d",
vm_aspace_id(vm), pte->ref_cnt);
goto clean_up;
}
- nvhost_dbg(dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
+ gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) {
if (likely(sgt)) {
pte->ref_cnt++;
- nvhost_dbg(dbg_pte,
+ gk20a_dbg(gpu_dbg_pte,
"pte_cur=%d addr=0x%x,%08x kind=%d"
" ctag=%d vol=%d refs=%d"
" [0x%08x,0x%08x]",
} else {
pte->ref_cnt--;
- nvhost_dbg(dbg_pte,
+ gk20a_dbg(gpu_dbg_pte,
"pte_cur=%d ref=%d [0x0,0x0]",
pte_cur, pte->ref_cnt);
}
- mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]);
- mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]);
+ gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 0, pte_w[0]);
+ gk20a_mem_wr32(pte_kv_cur + pte_cur*8, 1, pte_w[1]);
}
unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur);
smp_mb();
vm->tlb_dirty = true;
- nvhost_dbg_fn("set tlb dirty");
+ gk20a_dbg_fn("set tlb dirty");
return 0;
pde = pde_from_index(vm, i);
- mem_wr32(pde, 0, pde_v[0]);
- mem_wr32(pde, 1, pde_v[1]);
+ gk20a_mem_wr32(pde, 0, pde_v[0]);
+ gk20a_mem_wr32(pde, 1, pde_v[1]);
smp_mb();
gk20a_mm_l2_invalidate(vm->mm->g);
- nvhost_dbg(dbg_pte, "pde:%d = 0x%x,0x%08x\n", i, pde_v[1], pde_v[0]);
+ gk20a_dbg(gpu_dbg_pte, "pde:%d = 0x%x,0x%08x\n", i, pde_v[1], pde_v[0]);
vm->tlb_dirty = true;
}
gk20a_mem_flag_none);
if (!page_vaddr) {
- nvhost_err(dev_from_vm(vm), "failed to remap clean buffers!");
+ gk20a_err(dev_from_vm(vm), "failed to remap clean buffers!");
goto err_unmap;
}
vaddr += pgsz;
mapped_buffer->va_allocated,
gk20a_mem_flag_none);
- nvhost_dbg(dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
+ gk20a_dbg(gpu_dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
vm_aspace_id(vm), gmmu_page_sizes[mapped_buffer->pgsz_idx],
hi32(mapped_buffer->addr), lo32(mapped_buffer->addr),
mapped_buffer->own_mem_ref);
mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
if (!mapped_buffer) {
mutex_unlock(&vm->update_gmmu_lock);
- nvhost_err(d, "invalid addr to unmap 0x%llx", offset);
+ gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
return;
}
kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
struct vm_reserved_va_node *va_node, *va_node_tmp;
struct rb_node *node;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
mutex_lock(&vm->update_gmmu_lock);
/* TBD: add a flag here for the unmap code to recognize teardown
char name[32];
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
vm->pdes.ptes[gmmu_page_size_big]))
return -ENOMEM;
- nvhost_dbg_info("init space for va_limit=0x%llx num_pdes=%d",
+ gk20a_dbg_info("init space for va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes);
/* allocate the page table directory */
vm->pdes.size);
return -ENOMEM;
}
- nvhost_dbg(dbg_pte, "pdes.kv = 0x%p, pdes.phys = 0x%llx",
+ gk20a_dbg(gpu_dbg_pte, "pdes.kv = 0x%p, pdes.phys = 0x%llx",
vm->pdes.kv,
gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */
{
struct vm_gk20a *vm = as_share->vm;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
vm->as_share = NULL;
struct vm_reserved_va_node *va_node;
u64 vaddr_start = 0;
- nvhost_dbg_fn("flags=0x%x pgsz=0x%x nr_pages=0x%x o/a=0x%llx",
+ gk20a_dbg_fn("flags=0x%x pgsz=0x%x nr_pages=0x%x o/a=0x%llx",
args->flags, args->page_size, args->pages,
args->o_a.offset);
struct vm_gk20a *vm = as_share->vm;
struct vm_reserved_va_node *va_node;
- nvhost_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size,
+ gk20a_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size,
args->pages, args->offset);
/* determine pagesz idx */
int err = 0;
struct vm_gk20a *vm = as_share->vm;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
ch->vm = vm;
err = channel_gk20a_commit_va(ch);
struct dma_buf *dmabuf;
u64 ret_va;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* get ref to the mem handle (released on unmap_locked) */
dmabuf = dma_buf_get(dmabuf_fd);
{
struct vm_gk20a *vm = as_share->vm;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_vm_unmap_user(vm, offset);
return 0;
mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
- nvhost_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
+ gk20a_dbg_info("bar1 vm size = 0x%x", mm->bar1.aperture_size);
vm->va_start = mm->pde_stride * 1;
vm->va_limit = mm->bar1.aperture_size;
vm->pdes.ptes[gmmu_page_size_big]))
return -ENOMEM;
- nvhost_dbg_info("init space for bar1 va_limit=0x%llx num_pdes=%d",
+ gk20a_dbg_info("init space for bar1 va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes);
vm->pdes.size);
goto clean_up;
}
- nvhost_dbg(dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
+ gk20a_dbg(gpu_dbg_pte, "bar 1 pdes.kv = 0x%p, pdes.phys = 0x%llx",
vm->pdes.kv, gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */
pde_addr_lo = u64_lo32(pde_addr >> 12);
pde_addr_hi = u64_hi32(pde_addr);
- nvhost_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
+ gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
(u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl),
pde_addr_lo, pde_addr_hi);
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
&iova, GFP_KERNEL);
if (!inst_block->cpuva) {
- nvhost_err(d, "%s: memory allocation failed\n", __func__);
+ gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
inst_block->iova = iova;
inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
if (!inst_block->cpu_pa) {
- nvhost_err(d, "%s: failed to get phys address\n", __func__);
+ gk20a_err(d, "%s: failed to get phys address\n", __func__);
err = -ENOMEM;
goto clean_up;
}
inst_pa = inst_block->cpu_pa;
inst_ptr = inst_block->cpuva;
- nvhost_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p",
+ gk20a_dbg_info("bar1 inst block physical phys = 0x%llx, kv = 0x%p",
(u64)inst_pa, inst_ptr);
memset(inst_ptr, 0, ram_fc_size_val_v());
- mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() |
ram_in_page_dir_base_vol_true_f() |
ram_in_page_dir_base_lo_f(pde_addr_lo));
- mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
ram_in_page_dir_base_hi_f(pde_addr_hi));
- mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
u64_lo32(vm->va_limit) | 0xFFF);
- mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
- nvhost_dbg_info("bar1 inst block ptr: %08llx", (u64)inst_pa);
+ gk20a_dbg_info("bar1 inst block ptr: %08llx", (u64)inst_pa);
gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_bar1",
1,/*start*/
(vm->va_limit >> 12) - 1 /* length*/,
mm->pmu.aperture_size = GK20A_PMU_VA_SIZE;
- nvhost_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
+ gk20a_dbg_info("pmu vm size = 0x%x", mm->pmu.aperture_size);
vm->va_start = GK20A_PMU_VA_START;
vm->va_limit = vm->va_start + mm->pmu.aperture_size;
vm->pdes.ptes[gmmu_page_size_big]))
return -ENOMEM;
- nvhost_dbg_info("init space for pmu va_limit=0x%llx num_pdes=%d",
+ gk20a_dbg_info("init space for pmu va_limit=0x%llx num_pdes=%d",
vm->va_limit, vm->pdes.num_pdes);
/* allocate the page table directory */
vm->pdes.size);
goto clean_up;
}
- nvhost_dbg_info("pmu pdes phys @ 0x%llx",
+ gk20a_dbg_info("pmu pdes phys @ 0x%llx",
(u64)gk20a_mm_iova_addr(vm->pdes.sgt->sgl));
/* we could release vm->pdes.kv but it's only one page... */
pde_addr_lo = u64_lo32(pde_addr >> 12);
pde_addr_hi = u64_hi32(pde_addr);
- nvhost_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
+ gk20a_dbg_info("pde pa=0x%llx pde_addr_lo=0x%x pde_addr_hi=0x%x",
(u64)pde_addr, pde_addr_lo, pde_addr_hi);
/* allocate instance mem for pmu */
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
&iova, GFP_KERNEL);
if (!inst_block->cpuva) {
- nvhost_err(d, "%s: memory allocation failed\n", __func__);
+ gk20a_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
inst_block->iova = iova;
inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
if (!inst_block->cpu_pa) {
- nvhost_err(d, "%s: failed to get phys address\n", __func__);
+ gk20a_err(d, "%s: failed to get phys address\n", __func__);
err = -ENOMEM;
goto clean_up;
}
inst_pa = inst_block->cpu_pa;
inst_ptr = inst_block->cpuva;
- nvhost_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa);
+ gk20a_dbg_info("pmu inst block physical addr: 0x%llx", (u64)inst_pa);
memset(inst_ptr, 0, GK20A_PMU_INST_SIZE);
- mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_lo_w(),
ram_in_page_dir_base_target_vid_mem_f() |
ram_in_page_dir_base_vol_true_f() |
ram_in_page_dir_base_lo_f(pde_addr_lo));
- mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_page_dir_base_hi_w(),
ram_in_page_dir_base_hi_f(pde_addr_hi));
- mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_lo_w(),
u64_lo32(vm->va_limit) | 0xFFF);
- mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
+ gk20a_mem_wr32(inst_ptr, ram_in_adr_limit_hi_w(),
ram_in_adr_limit_hi_f(u64_hi32(vm->va_limit)));
gk20a_allocator_init(&vm->vma[gmmu_page_size_small], "gk20a_pmu",
u32 data;
s32 retry = 100;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
mutex_lock(&mm->l2_op_lock);
flush_fb_flush_outstanding_true_v() ||
flush_fb_flush_pending_v(data) ==
flush_fb_flush_pending_busy_v()) {
- nvhost_dbg_info("fb_flush 0x%x", data);
+ gk20a_dbg_info("fb_flush 0x%x", data);
retry--;
usleep_range(20, 40);
} else
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"fb_flush too many retries");
mutex_unlock(&mm->l2_op_lock);
flush_l2_system_invalidate_outstanding_true_v() ||
flush_l2_system_invalidate_pending_v(data) ==
flush_l2_system_invalidate_pending_busy_v()) {
- nvhost_dbg_info("l2_system_invalidate 0x%x",
+ gk20a_dbg_info("l2_system_invalidate 0x%x",
data);
retry--;
usleep_range(20, 40);
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"l2_system_invalidate too many retries");
}
u32 data;
s32 retry = 200;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
mutex_lock(&mm->l2_op_lock);
flush_l2_flush_dirty_outstanding_true_v() ||
flush_l2_flush_dirty_pending_v(data) ==
flush_l2_flush_dirty_pending_busy_v()) {
- nvhost_dbg_info("l2_flush_dirty 0x%x", data);
+ gk20a_dbg_info("l2_flush_dirty 0x%x", data);
retry--;
usleep_range(20, 40);
} else
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"l2_flush_dirty too many retries");
if (invalidate)
{
struct mapped_buffer_node *mapped_buffer;
- nvhost_dbg_fn("gpu_va=0x%llx", gpu_va);
+ gk20a_dbg_fn("gpu_va=0x%llx", gpu_va);
mutex_lock(&vm->update_gmmu_lock);
u32 data;
s32 retry = 200;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
/* pagetables are considered sw states which are preserved after
prepare_poweroff. When gk20a deinit releases those pagetables,
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"wait mmu fifo space too many retries");
gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
} while (retry >= 0 || !tegra_platform_is_silicon());
if (retry < 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"mmu invalidate too many retries");
mutex_unlock(&mm->tlb_lock);
int gk20a_mm_suspend(struct gk20a *g)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_mm_fb_flush(g);
gk20a_mm_l2_flush(g, true);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
u32 intr;
intr = gk20a_readl(g, ltc_ltc0_ltss_intr_r());
- nvhost_err(dev_from_gk20a(g), "ltc: %08x\n", intr);
+ gk20a_err(dev_from_gk20a(g), "ltc: %08x\n", intr);
gk20a_writel(g, ltc_ltc0_ltss_intr_r(), intr);
}
#define GK20A_PMU_UCODE_IMAGE "gpmu_ucode.bin"
-#define nvhost_dbg_pmu(fmt, arg...) \
- nvhost_dbg(dbg_pmu, fmt, ##arg)
+#define gk20a_dbg_pmu(fmt, arg...) \
+ gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
static void pmu_dump_falcon_stats(struct pmu_gk20a *pmu);
static int gk20a_pmu_get_elpg_residency_gating(struct gk20a *g,
u32 *dst_u32 = (u32*)dst;
if (size == 0) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"size is zero");
return;
}
if (src & 0x3) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"src (0x%08x) not 4-byte aligned", src);
return;
}
data = gk20a_readl(g, pwr_falcon_dmemd_r(port));
for (i = 0; i < bytes; i++) {
dst[(words << 2) + i] = ((u8 *)&data)[i];
- nvhost_dbg_pmu("read: dst_u8[%d]=0x%08x",
+ gk20a_dbg_pmu("read: dst_u8[%d]=0x%08x",
i, dst[(words << 2) + i]);
}
}
u32 *src_u32 = (u32*)src;
if (size == 0) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"size is zero");
return;
}
if (dst & 0x3) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"dst (0x%08x) not 4-byte aligned", dst);
return;
}
data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
size = ALIGN(size, 4);
if (data != dst + size) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"copy failed. bytes written %d, expected %d",
data - dst, size);
}
}
if (time_after_eq(jiffies, end_jiffies)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"timeout waiting pmu idle : 0x%08x",
idle_stat);
return -EBUSY;
usleep_range(100, 200);
} while (1);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
{
struct gk20a *g = pmu->g;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_writel(g, mc_intr_mask_0_r(),
gk20a_readl(g, mc_intr_mask_0_r()) &
mc_intr_mask_0_pmu_enabled_f());
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
static int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
{
struct gk20a *g = pmu->g;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (enable) {
int retries = GR_IDLE_CHECK_MAX / GR_IDLE_CHECK_DEFAULT;
pwr_falcon_dmactl_imem_scrubbing_m());
if (!w) {
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
udelay(GR_IDLE_CHECK_DEFAULT);
} while (--retries || !tegra_platform_is_silicon());
gk20a_disable(g, mc_enable_pwr_enabled_f());
- nvhost_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
+ gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
return -ETIMEDOUT;
} else {
u32 pmc_enable;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!enable) {
pmc_enable = gk20a_readl(g, mc_enable_r());
pmu_enable_irq(pmu, true);
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
u64 addr_code, addr_data, addr_load;
u32 i, blocks, addr_args;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_writel(g, pwr_falcon_itfen_r(),
gk20a_readl(g, pwr_falcon_itfen_r()) |
index = find_first_zero_bit(pmu->pmu_seq_tbl,
sizeof(pmu->pmu_seq_tbl));
if (index >= sizeof(pmu->pmu_seq_tbl)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"no free sequence available");
mutex_unlock(&pmu->pmu_seq_lock);
return -EAGAIN;
queue->mutex_id = id;
mutex_init(&queue->mutex);
- nvhost_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
+ gk20a_dbg_pmu("queue %d: index %d, offset 0x%08x, size 0x%08x",
id, queue->index, queue->offset, queue->size);
return 0;
if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) {
BUG_ON(mutex->ref_cnt == 0);
- nvhost_dbg_pmu("already acquired by owner : 0x%08x", *token);
+ gk20a_dbg_pmu("already acquired by owner : 0x%08x", *token);
mutex->ref_cnt++;
return 0;
}
gk20a_readl(g, pwr_pmu_mutex_id_r()));
if (data == pwr_pmu_mutex_id_value_init_v() ||
data == pwr_pmu_mutex_id_value_not_avail_v()) {
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"fail to generate mutex token: val 0x%08x",
owner);
usleep_range(20, 40);
if (owner == data) {
mutex->ref_cnt = 1;
- nvhost_dbg_pmu("mutex acquired: id=%d, token=0x%x",
+ gk20a_dbg_pmu("mutex acquired: id=%d, token=0x%x",
mutex->index, *token);
*token = owner;
return 0;
} else {
- nvhost_dbg_info("fail to acquire mutex idx=0x%08x",
+ gk20a_dbg_info("fail to acquire mutex idx=0x%08x",
mutex->index);
data = gk20a_readl(g, pwr_pmu_mutex_id_release_r());
gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
if (*token != owner) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"requester 0x%08x NOT match owner 0x%08x",
*token, owner);
return -EINVAL;
pwr_pmu_mutex_id_release_value_f(owner));
gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data);
- nvhost_dbg_pmu("mutex released: id=%d, token=0x%x",
+ gk20a_dbg_pmu("mutex released: id=%d, token=0x%x",
mutex->index, *token);
}
static int pmu_queue_push(struct pmu_gk20a *pmu,
struct pmu_queue *queue, void *data, u32 size)
{
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!queue->opened && queue->oflag == OFLAG_WRITE){
- nvhost_err(dev_from_gk20a(pmu->g),
+ gk20a_err(dev_from_gk20a(pmu->g),
"queue not opened for write");
return -EINVAL;
}
*bytes_read = 0;
if (!queue->opened && queue->oflag == OFLAG_READ){
- nvhost_err(dev_from_gk20a(pmu->g),
+ gk20a_err(dev_from_gk20a(pmu->g),
"queue not opened for read");
return -EINVAL;
}
used = queue->offset + queue->size - tail;
if (size > used) {
- nvhost_warn(dev_from_gk20a(pmu->g),
+ gk20a_warn(dev_from_gk20a(pmu->g),
"queue size smaller than request read");
size = used;
}
{
struct pmu_cmd cmd;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!queue->opened) {
- nvhost_err(dev_from_gk20a(pmu->g),
+ gk20a_err(dev_from_gk20a(pmu->g),
"queue not opened");
return;
}
cmd.hdr.unit_id = PMU_UNIT_REWIND;
cmd.hdr.size = PMU_CMD_HDR_SIZE;
pmu_queue_push(pmu, queue, &cmd, cmd.hdr.size);
- nvhost_dbg_pmu("queue %d rewinded", queue->id);
+ gk20a_dbg_pmu("queue %d rewinded", queue->id);
}
queue->position = queue->offset;
BUG();
if (!pmu_queue_has_room(pmu, queue, size, &rewind)) {
- nvhost_err(dev_from_gk20a(pmu->g), "queue full");
+ gk20a_err(dev_from_gk20a(pmu->g), "queue full");
return -EAGAIN;
}
{
struct gk20a_pmu_save_state save;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
gk20a_allocator_destroy(&pmu->dmem);
{
struct pmu_gk20a *pmu = &g->pmu;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
pmu_enable_hw(pmu, true);
DEFINE_DMA_ATTRS(attrs);
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (pmu->sw_ready) {
for (i = 0; i < pmu->mutex_cnt; i++) {
}
pmu_seq_init(pmu);
- nvhost_dbg_fn("skip init");
+ gk20a_dbg_fn("skip init");
goto skip_init;
}
if (!g->pmu_fw) {
g->pmu_fw = gk20a_request_firmware(g, GK20A_PMU_UCODE_IMAGE);
if (!g->pmu_fw) {
- nvhost_err(d, "failed to load pmu ucode!!");
+ gk20a_err(d, "failed to load pmu ucode!!");
err = -ENOENT;
goto err_free_seq;
}
}
- nvhost_dbg_fn("firmware loaded");
+ gk20a_dbg_fn("firmware loaded");
pmu->desc = (struct pmu_ucode_desc *)g->pmu_fw->data;
pmu->ucode_image = (u32 *)((u8 *)pmu->desc +
GFP_KERNEL,
&attrs);
if (!pmu->ucode.cpuva) {
- nvhost_err(d, "failed to allocate memory\n");
+ gk20a_err(d, "failed to allocate memory\n");
err = -ENOMEM;
goto err_release_fw;
}
&iova,
GFP_KERNEL);
if (!pmu->seq_buf.cpuva) {
- nvhost_err(d, "failed to allocate memory\n");
+ gk20a_err(d, "failed to allocate memory\n");
err = -ENOMEM;
goto err_free_pmu_ucode;
}
pmu->ucode.iova,
GK20A_PMU_UCODE_SIZE_MAX);
if (err) {
- nvhost_err(d, "failed to allocate sg table\n");
+ gk20a_err(d, "failed to allocate sg table\n");
goto err_free_seq_buf;
}
0, /* flags */
gk20a_mem_flag_read_only);
if (!pmu->ucode.pmu_va) {
- nvhost_err(d, "failed to map pmu ucode memory!!");
+ gk20a_err(d, "failed to map pmu ucode memory!!");
goto err_free_ucode_sgt;
}
pmu->seq_buf.iova,
GK20A_PMU_SEQ_BUF_SIZE);
if (err) {
- nvhost_err(d, "failed to allocate sg table\n");
+ gk20a_err(d, "failed to allocate sg table\n");
goto err_unmap_ucode;
}
0, /* flags */
gk20a_mem_flag_none);
if (!pmu->seq_buf.pmu_va) {
- nvhost_err(d, "failed to map pmu ucode memory!!");
+ gk20a_err(d, "failed to map pmu ucode memory!!");
goto err_free_seq_buf_sgt;
}
ptr = (u8 *)pmu->seq_buf.cpuva;
if (!ptr) {
- nvhost_err(d, "failed to map cpu ptr for zbc buffer");
+ gk20a_err(d, "failed to map cpu ptr for zbc buffer");
goto err_unmap_seq_buf;
}
for (i = 0; i < (pmu->desc->app_start_offset +
pmu->desc->app_size) >> 2; i++)
- mem_wr32(ucode_ptr, i, pmu->ucode_image[i]);
+ gk20a_mem_wr32(ucode_ptr, i, pmu->ucode_image[i]);
gk20a_free_sgtable(&sgt_pmu_ucode);
gk20a_free_sgtable(&sgt_seq_buf);
pmu->remove_support = gk20a_remove_pmu_support;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
err_unmap_seq_buf:
err_free_mutex:
kfree(pmu->mutex);
err:
- nvhost_dbg_fn("fail");
+ gk20a_dbg_fn("fail");
return err;
}
struct pmu_gk20a *pmu = param;
struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (status != 0) {
- nvhost_err(dev_from_gk20a(g), "PGENG cmd aborted");
+ gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted");
/* TBD: disable ELPG */
return;
}
if (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_FAILED) {
- nvhost_err(dev_from_gk20a(g), "failed to load PGENG buffer");
+ gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer");
}
pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
struct pmu_gk20a *pmu = &g->pmu;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
pmu_reset(pmu);
struct sg_table *sgt_pg_buf;
dma_addr_t iova;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!support_gk20a_pmu())
return 0;
size = 0;
err = gr_gk20a_fecs_get_reglist_img_size(g, &size);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to query fecs pg buffer size");
return err;
}
&iova,
GFP_KERNEL);
if (!pmu->pg_buf.cpuva) {
- nvhost_err(d, "failed to allocate memory\n");
+ gk20a_err(d, "failed to allocate memory\n");
err = -ENOMEM;
goto err;
}
pmu->pg_buf.iova,
size);
if (err) {
- nvhost_err(d, "failed to create sg table\n");
+ gk20a_err(d, "failed to create sg table\n");
goto err_free_pg_buf;
}
0, /* flags */
gk20a_mem_flag_none);
if (!pmu->pg_buf.pmu_va) {
- nvhost_err(d, "failed to map fecs pg buffer");
+ gk20a_err(d, "failed to map fecs pg buffer");
err = -ENOMEM;
goto err_free_sgtable;
}
pmu->elpg_stat == PMU_ELPG_STAT_OFF)),
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
if (status == 0) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"PG_INIT_ACK failed, remaining timeout : 0x%lx", remain);
pmu_dump_falcon_stats(pmu);
return -EBUSY;
err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to bind pmu inst to gr");
return err;
}
err = gr_gk20a_fecs_set_reglist_virual_addr(g, pmu->pg_buf.pmu_va);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to set pg buffer pmu va");
return err;
}
pmu->buf_loaded,
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
if (!pmu->buf_loaded) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"PGENG FECS buffer load failed, remaining timeout : 0x%lx",
remain);
return -EBUSY;
pmu->buf_loaded,
msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)));
if (!pmu->buf_loaded) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"PGENG ZBC buffer load failed, remaining timeout 0x%lx",
remain);
return -EBUSY;
struct pmu_gk20a *pmu = &g->pmu;
u32 err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (pmu->initialized)
return 0;
struct pmu_gk20a *pmu = param;
struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (status != 0) {
- nvhost_err(dev_from_gk20a(g), "ELPG cmd aborted");
+ gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
/* TBD: disable ELPG */
return;
}
switch (elpg_msg->msg) {
case PMU_PG_ELPG_MSG_INIT_ACK:
- nvhost_dbg_pmu("INIT_PG is acknowledged from PMU");
+ gk20a_dbg_pmu("INIT_PG is acknowledged from PMU");
pmu->elpg_ready = true;
wake_up(&pmu->pg_wq);
break;
case PMU_PG_ELPG_MSG_ALLOW_ACK:
- nvhost_dbg_pmu("ALLOW is acknowledged from PMU");
+ gk20a_dbg_pmu("ALLOW is acknowledged from PMU");
pmu->elpg_stat = PMU_ELPG_STAT_ON;
wake_up(&pmu->pg_wq);
break;
case PMU_PG_ELPG_MSG_DISALLOW_ACK:
- nvhost_dbg_pmu("DISALLOW is acknowledged from PMU");
+ gk20a_dbg_pmu("DISALLOW is acknowledged from PMU");
pmu->elpg_stat = PMU_ELPG_STAT_OFF;
wake_up(&pmu->pg_wq);
break;
default:
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"unsupported ELPG message : 0x%04x", elpg_msg->msg);
}
{
struct pmu_gk20a *pmu = param;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (status != 0) {
- nvhost_err(dev_from_gk20a(g), "ELPG cmd aborted");
+ gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted");
/* TBD: disable ELPG */
return;
}
switch (msg->msg.pg.stat.sub_msg_id) {
case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
- nvhost_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
+ gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
pmu->stat_dmem_offset = msg->msg.pg.stat.data;
wake_up(&pmu->pg_wq);
break;
struct pmu_cmd cmd;
u32 seq;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (tegra_cpu_is_asim()) {
/* TBD: calculate threshold for silicon */
u32 data;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
pmu->perfmon_ready = 0;
pmu->sample_buffer = 0;
err = pmu->dmem.alloc(&pmu->dmem, &pmu->sample_buffer, 2 * sizeof(u16));
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"failed to allocate perfmon sample buffer");
return -ENOMEM;
}
(u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
if (msg->hdr.unit_id != PMU_UNIT_INIT) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"expecting init msg");
return -EINVAL;
}
(u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"expecting init msg");
return -EINVAL;
}
err = pmu_queue_open_read(pmu, queue);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to open queue %d for read", queue->id);
*status = err;
return false;
err = pmu_queue_pop(pmu, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
goto clean_up;
err = pmu_queue_pop(pmu, queue, &msg->hdr,
PMU_MSG_HDR_SIZE, &bytes_read);
if (err || bytes_read != PMU_MSG_HDR_SIZE) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to read msg from queue %d", queue->id);
*status = err | -EINVAL;
goto clean_up;
}
if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"read invalid unit_id %d from queue %d",
msg->hdr.unit_id, queue->id);
*status = -EINVAL;
err = pmu_queue_pop(pmu, queue, &msg->msg,
read_size, &bytes_read);
if (err || bytes_read != read_size) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to read msg from queue %d", queue->id);
*status = err;
goto clean_up;
err = pmu_queue_close(pmu, queue, true);
if (err) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to close queue %d", queue->id);
*status = err;
return false;
clean_up:
err = pmu_queue_close(pmu, queue, false);
if (err)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to close queue %d", queue->id);
return false;
}
struct pmu_sequence *seq;
int ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
seq = &pmu->seq[msg->hdr.seq_id];
if (seq->state != PMU_SEQ_STATE_USED &&
seq->state != PMU_SEQ_STATE_CANCELLED) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"msg for an unknown sequence %d", seq->id);
return -EINVAL;
}
if (msg->hdr.unit_id == PMU_UNIT_RC &&
msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"unhandled cmd: seq %d", seq->id);
}
else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
0);
}
} else {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"sequence %d msg buffer too small",
seq->id);
}
/* TBD: notify client waiting for available dmem */
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
&pmu->zbc_save_done, 1);
if (!pmu->zbc_save_done)
- nvhost_err(dev_from_gk20a(g), "ZBC save timeout");
+ gk20a_err(dev_from_gk20a(g), "ZBC save timeout");
}
void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
struct gk20a *g = pmu->g;
u32 rate;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
switch (msg->msg_type) {
case PMU_PERFMON_MSG_ID_INCREASE_EVENT:
- nvhost_dbg_pmu("perfmon increase event: "
+ gk20a_dbg_pmu("perfmon increase event: "
"state_id %d, ground_id %d, pct %d",
msg->gen.state_id, msg->gen.group_id, msg->gen.data);
/* increase gk20a clock freq by 20% */
gk20a_clk_set_rate(g, rate * 6 / 5);
break;
case PMU_PERFMON_MSG_ID_DECREASE_EVENT:
- nvhost_dbg_pmu("perfmon decrease event: "
+ gk20a_dbg_pmu("perfmon decrease event: "
"state_id %d, ground_id %d, pct %d",
msg->gen.state_id, msg->gen.group_id, msg->gen.data);
/* decrease gk20a clock freq by 10% */
break;
case PMU_PERFMON_MSG_ID_INIT_EVENT:
pmu->perfmon_ready = 1;
- nvhost_dbg_pmu("perfmon init event");
+ gk20a_dbg_pmu("perfmon init event");
break;
default:
break;
{
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
switch (msg->hdr.unit_id) {
case PMU_UNIT_PERFMON:
while (pmu_read_message(pmu,
&pmu->queue[PMU_MESSAGE_QUEUE], &msg, &status)) {
- nvhost_dbg_pmu("read msg hdr: "
+ gk20a_dbg_pmu("read msg hdr: "
"unit_id = 0x%08x, size = 0x%08x, "
"ctrl_flags = 0x%08x, seq_id = 0x%08x",
msg.hdr.unit_id, msg.hdr.size,
pmu_copy_from_dmem(pmu, pmu->stat_dmem_offset,
(u8 *)&stats, sizeof(struct pmu_pg_stats), 0);
- nvhost_dbg_pmu("pg_entry_start_timestamp : 0x%016llx",
+ gk20a_dbg_pmu("pg_entry_start_timestamp : 0x%016llx",
stats.pg_entry_start_timestamp);
- nvhost_dbg_pmu("pg_exit_start_timestamp : 0x%016llx",
+ gk20a_dbg_pmu("pg_exit_start_timestamp : 0x%016llx",
stats.pg_exit_start_timestamp);
- nvhost_dbg_pmu("pg_ingating_start_timestamp : 0x%016llx",
+ gk20a_dbg_pmu("pg_ingating_start_timestamp : 0x%016llx",
stats.pg_ingating_start_timestamp);
- nvhost_dbg_pmu("pg_ungating_start_timestamp : 0x%016llx",
+ gk20a_dbg_pmu("pg_ungating_start_timestamp : 0x%016llx",
stats.pg_ungating_start_timestamp);
- nvhost_dbg_pmu("pg_avg_entry_time_us : 0x%08x",
+ gk20a_dbg_pmu("pg_avg_entry_time_us : 0x%08x",
stats.pg_avg_entry_time_us);
- nvhost_dbg_pmu("pg_avg_exit_time_us : 0x%08x",
+ gk20a_dbg_pmu("pg_avg_exit_time_us : 0x%08x",
stats.pg_avg_exit_time_us);
- nvhost_dbg_pmu("pg_ingating_cnt : 0x%08x",
+ gk20a_dbg_pmu("pg_ingating_cnt : 0x%08x",
stats.pg_ingating_cnt);
- nvhost_dbg_pmu("pg_ingating_time_us : 0x%08x",
+ gk20a_dbg_pmu("pg_ingating_time_us : 0x%08x",
stats.pg_ingating_time_us);
- nvhost_dbg_pmu("pg_ungating_count : 0x%08x",
+ gk20a_dbg_pmu("pg_ungating_count : 0x%08x",
stats.pg_ungating_count);
- nvhost_dbg_pmu("pg_ungating_time_us 0x%08x: ",
+ gk20a_dbg_pmu("pg_ungating_time_us 0x%08x: ",
stats.pg_ungating_time_us);
- nvhost_dbg_pmu("pg_gating_cnt : 0x%08x",
+ gk20a_dbg_pmu("pg_gating_cnt : 0x%08x",
stats.pg_gating_cnt);
- nvhost_dbg_pmu("pg_gating_deny_cnt : 0x%08x",
+ gk20a_dbg_pmu("pg_gating_deny_cnt : 0x%08x",
stats.pg_gating_deny_cnt);
/*
u32 i, val[20];
pmu_copy_from_dmem(pmu, 0x66c,
(u8 *)val, sizeof(val), 0);
- nvhost_dbg_pmu("elpg log begin");
+ gk20a_dbg_pmu("elpg log begin");
for (i = 0; i < 20; i++)
- nvhost_dbg_pmu("0x%08x", val[i]);
- nvhost_dbg_pmu("elpg log end");
+ gk20a_dbg_pmu("0x%08x", val[i]);
+ gk20a_dbg_pmu("elpg log end");
*/
- nvhost_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_mask_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3)));
- nvhost_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_mask_1_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3)));
- nvhost_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_ctrl_supp_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3)));
- nvhost_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_pg_idle_cnt_r(0): 0x%08x",
gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0)));
- nvhost_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_pg_intren_r(0): 0x%08x",
gk20a_readl(g, pwr_pmu_pg_intren_r(0)));
- nvhost_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_count_r(3): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(3)));
- nvhost_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_count_r(4): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(4)));
- nvhost_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_count_r(7): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_count_r(7)));
/*
TBD: script can't generate those registers correctly
- nvhost_dbg_pmu("pwr_pmu_idle_status_r(): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_idle_status_r(): 0x%08x",
gk20a_readl(g, pwr_pmu_idle_status_r()));
- nvhost_dbg_pmu("pwr_pmu_pg_ctrl_r(): 0x%08x",
+ gk20a_dbg_pmu("pwr_pmu_pg_ctrl_r(): 0x%08x",
gk20a_readl(g, pwr_pmu_pg_ctrl_r()));
*/
}
struct gk20a *g = pmu->g;
int i;
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d",
gk20a_readl(g, pwr_falcon_os_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x",
gk20a_readl(g, pwr_falcon_cpuctl_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x",
gk20a_readl(g, pwr_falcon_idlestate_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x",
gk20a_readl(g, pwr_falcon_mailbox0_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x",
gk20a_readl(g, pwr_falcon_mailbox1_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqstat_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqmode_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqmask_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x",
gk20a_readl(g, pwr_falcon_irqdest_r()));
for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_debug_r(i)));
for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
pwr_pmu_falcon_icd_cmd_idx_f(i));
- nvhost_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x",
i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
}
i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i);
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i);
if (i != 0) {
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_addr_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_data_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_timeout_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x",
gk20a_readl(g, pwr_pmu_bar0_ctl_r()));
}
i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
- nvhost_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
+ gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
i = gk20a_readl(g, pwr_falcon_exterrstat_r());
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i);
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i);
if (pwr_falcon_exterrstat_valid_v(i) ==
pwr_falcon_exterrstat_valid_true_v()) {
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x",
gk20a_readl(g, pwr_falcon_exterraddr_r()));
- nvhost_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "top_fs_status_r : 0x%x",
gk20a_readl(g, top_fs_status_r()));
- nvhost_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x",
gk20a_readl(g, mc_enable_r()));
}
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x",
gk20a_readl(g, pwr_falcon_engctl_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x",
gk20a_readl(g, pwr_falcon_curctx_r()));
- nvhost_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x",
gk20a_readl(g, pwr_falcon_nxtctx_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
for (i = 0; i < 4; i++) {
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
- nvhost_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x",
+ gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x",
gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
}
u32 intr, mask;
bool recheck = false;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
mutex_lock(&pmu->isr_mutex);
intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask;
- nvhost_dbg_pmu("received falcon interrupt: 0x%08x", intr);
+ gk20a_dbg_pmu("received falcon interrupt: 0x%08x", intr);
if (!intr) {
mutex_unlock(&pmu->isr_mutex);
}
if (intr & pwr_falcon_irqstat_halt_true_f()) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"pmu halt intr not implemented");
pmu_dump_falcon_stats(pmu);
}
if (intr & pwr_falcon_irqstat_exterr_true_f()) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"pmu exterr intr not implemented. Clearing interrupt.");
pmu_dump_falcon_stats(pmu);
return true;
invalid_cmd:
- nvhost_err(dev_from_gk20a(g), "invalid pmu cmd :\n"
+ gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n"
"queue_id=%d,\n"
"cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
"payload in=%p, in_size=%d, in_offset=%d,\n"
msecs_to_jiffies(timeout);
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
queue = &pmu->queue[queue_id];
clean_up:
if (err)
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"fail to write cmd to queue %d", queue_id);
else
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return err;
}
struct pmu_allocation *in = NULL, *out = NULL;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
BUG_ON(!cmd);
BUG_ON(!seq_desc);
if (err)
seq->state = PMU_SEQ_STATE_PENDING;
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
clean_up:
- nvhost_dbg_fn("fail");
+ gk20a_dbg_fn("fail");
if (in)
pmu->dmem.free(&pmu->dmem, in->alloc.dmem.offset,
in->alloc.dmem.size);
struct pmu_cmd cmd;
u32 seq, status;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
memset(&cmd, 0, sizeof(struct pmu_cmd));
cmd.hdr.unit_id = PMU_UNIT_PG;
BUG_ON(status != 0);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
int ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!pmu->elpg_ready || !pmu->initialized)
goto exit;
/* something is not right if we end up in following code path */
if (unlikely(pmu->elpg_refcnt > 1)) {
- nvhost_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
+ gk20a_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->elpg_refcnt);
WARN_ON(1);
}
exit_unlock:
mutex_unlock(&pmu->elpg_mutex);
exit:
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return ret;
}
struct pmu_gk20a *pmu = container_of(to_delayed_work(work),
struct pmu_gk20a, elpg_enable);
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
mutex_lock(&pmu->elpg_mutex);
mutex_unlock(&pmu->elpg_mutex);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
}
static int gk20a_pmu_disable_elpg_defer_enable(struct gk20a *g, bool enable)
u32 seq;
int ret = 0;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!pmu->elpg_ready || !pmu->initialized)
return 0;
pmu->elpg_refcnt--;
if (pmu->elpg_refcnt > 0) {
- nvhost_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
+ gk20a_warn(dev_from_gk20a(g), "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
__func__, pmu->elpg_refcnt);
WARN_ON(1);
ret = 0;
&pmu->elpg_stat, PMU_ELPG_STAT_ON);
if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"ELPG_ALLOW_ACK failed, elpg_stat=%d",
pmu->elpg_stat);
pmu_dump_elpg_stats(pmu);
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
&pmu->elpg_stat, PMU_ELPG_STAT_OFF);
if (pmu->elpg_stat != PMU_ELPG_STAT_OFF) {
- nvhost_err(dev_from_gk20a(g),
+ gk20a_err(dev_from_gk20a(g),
"ELPG_DISALLOW_ACK failed");
pmu_dump_elpg_stats(pmu);
pmu_dump_falcon_stats(pmu);
exit_unlock:
mutex_unlock(&pmu->elpg_mutex);
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return ret;
}
struct pmu_gk20a *pmu = &g->pmu;
int err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (enable)
err = pmu_perfmon_start_sampling(pmu);
struct pmu_gk20a *pmu = &g->pmu;
u32 elpg_ingating_time, elpg_ungating_time, gating_cnt;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
if (!support_gk20a_pmu())
return 0;
pmu->remove_support = NULL;
}
- nvhost_dbg_fn("done");
+ gk20a_dbg_fn("done");
return 0;
}
break;
default:
- nvhost_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
+ gk20a_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
__func__, p_ap_cmd->cmn.cmd_id);
return 0x2f;
}
p_callback, pmu, &seq, ~0);
if (!status) {
- nvhost_dbg_pmu(
+ gk20a_dbg_pmu(
"%s: Unable to submit Adaptive Power Command %d\n",
__func__, p_ap_cmd->cmn.cmd_id);
goto err_return;
break;
default:
- nvhost_dbg_pmu(
+ gk20a_dbg_pmu(
"%s: Invalid Adaptive Power Message: %x\n",
__func__, msg->msg.pg.ap_msg.cmn.msg_id);
break;
/*
- * drivers/video/tegra/host/gk20a/priv_ring_gk20a.c
- *
* GK20A priv ring
*
- * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/delay.h> /* for mdelay */
status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
- nvhost_dbg_info("ringmaster intr status0: 0x%08x,"
+ gk20a_dbg_info("ringmaster intr status0: 0x%08x,"
"status1: 0x%08x", status0, status1);
if (status0 & (0x1 | 0x2 | 0x4)) {
} while (cmd != pri_ringmaster_command_cmd_no_cmd_v() && --retry);
if (retry <= 0)
- nvhost_warn(dev_from_gk20a(g),
+ gk20a_warn(dev_from_gk20a(g),
"priv ringmaster cmd ack too many retries");
status0 = gk20a_readl(g, pri_ringmaster_intr_status0_r());
status1 = gk20a_readl(g, pri_ringmaster_intr_status1_r());
- nvhost_dbg_info("ringmaster intr status0: 0x%08x,"
+ gk20a_dbg_info("ringmaster intr status0: 0x%08x,"
" status1: 0x%08x", status0, status1);
}
bool skip_read_lo, skip_read_hi;
bool ok;
- nvhost_dbg(dbg_fn | dbg_gpu_dbg, "");
+ gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
ch = dbg_s->ch;
case REGOP(READ_32):
ops[i].value_hi = 0;
ops[i].value_lo = gk20a_readl(g, ops[i].offset);
- nvhost_dbg(dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "read_32 0x%08x from 0x%08x",
ops[i].value_lo, ops[i].offset);
break;
ops[i].value_hi =
gk20a_readl(g, ops[i].offset + 4);
- nvhost_dbg(dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "read_64 0x%08x:%08x from 0x%08x",
ops[i].value_hi, ops[i].value_lo,
ops[i].offset);
break;
/* now update first 32bits */
gk20a_writel(g, ops[i].offset, data32_lo);
- nvhost_dbg(dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
data32_lo, ops[i].offset);
/* if desired, update second 32bits */
if (ops[i].op == REGOP(WRITE_64)) {
gk20a_writel(g, ops[i].offset + 4, data32_hi);
- nvhost_dbg(dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "Wrote 0x%08x to 0x%08x ",
data32_hi, ops[i].offset + 4);
}
}
clean_up:
- nvhost_dbg(dbg_gpu_dbg, "ret=%d", err);
+ gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
return err;
}
break;
default:
op->status |= REGOP(STATUS_UNSUPPORTED_OP);
- /*nvhost_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
+ /*gk20a_err(dbg_s->dev, "Invalid regops op %d!", op->op);*/
err = -EINVAL;
break;
}
*/
default:
op->status |= REGOP(STATUS_INVALID_TYPE);
- /*nvhost_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
+ /*gk20a_err(dbg_s->dev, "Invalid regops type %d!", op->type);*/
err = -EINVAL;
break;
}
} else if (op->type == REGOP(TYPE_GR_CTX)) {
/* it's a context-relative op */
if (!dbg_s->ch) {
- nvhost_err(dbg_s->dev, "can't perform ctx regop unless bound");
+ gk20a_err(dbg_s->dev, "can't perform ctx regop unless bound");
op->status = REGOP(STATUS_UNSUPPORTED_OP);
return -ENODEV;
}
/* support only 24-bit 4-byte aligned offsets */
if (offset & 0xFF000003) {
- nvhost_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
+ gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL;
}
}
if (!valid) {
- nvhost_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
+ gk20a_err(dbg_s->dev, "invalid regop offset: 0x%x\n", offset);
op->status |= REGOP(STATUS_INVALID_OFFSET);
return -EINVAL;
}
ok &= !err;
}
- nvhost_dbg(dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d\n",
+ gk20a_dbg(gpu_dbg_gpu_dbg, "ctx_wrs:%d ctx_rds:%d\n",
*ctx_wr_count, *ctx_rd_count);
return ok;
*
* GK20A Therm
*
- * Copyright (c) 2011 - 2012, NVIDIA Corporation.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "../dev.h"
{
u32 err;
- nvhost_dbg_fn("");
+ gk20a_dbg_fn("");
err = gk20a_init_therm_reset_enable_hw(g);
if (err)