struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
u32 virt_addr_lo;
u32 virt_addr_hi;
- u32 i;
+ u32 i, v;
int ret = 0;
void *ctx_ptr = NULL;
ch_ctx->pm_ctx.ctx_sw_mode);
mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
+ /* set priv access map */
+ virt_addr_lo =
+ u64_lo32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
+ virt_addr_hi =
+ u64_hi32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
+
+ mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
+ ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f());
+ mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
+ virt_addr_lo);
+ mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
+ virt_addr_hi);
+ /* disable verif features */
+ v = mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
+ v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
+ v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
+ mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
+
+
vunmap(ctx_ptr);
gk20a_mm_l2_invalidate(g);
BUG_ON(g->gr.ctx_vars.zcull_ctxsw_image_size != zcull_ctx_image_size);
}
+ g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
+
nvhost_dbg_fn("done");
return 0;
}
gr->global_ctx_buffer[GOLDEN_CTX].size =
gr->ctx_vars.golden_image_size;
+ nvhost_dbg_info("priv_access_map_size : %d",
+ gr->ctx_vars.priv_access_map_size);
+
+ mem = nvhost_memmgr_alloc(memmgr, gr->ctx_vars.priv_access_map_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ 0);
+ if (IS_ERR(mem))
+ goto clean_up;
+
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref = mem;
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].size =
+ gr->ctx_vars.priv_access_map_size;
+
nvhost_dbg_fn("done");
return 0;
goto clean_up;
g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
+ /* Priv register Access Map */
+ gpu_va = gk20a_vm_map(ch_vm, memmgr,
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref,
+ /*offset_align, flags, kind*/
+ 0, 0, 0, NULL, false,
+ mem_flag_none);
+ if (!gpu_va)
+ goto clean_up;
+ g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
+
c->ch_ctx.global_ctx_buffer_mapped = true;
return 0;
c->first_init = true;
}
gk20a_mm_l2_invalidate(g);
+
c->num_objects++;
nvhost_dbg_fn("done");
return 0;
}
+/*
+ * XXX Merge this list with the debugger/profiler
+ * session regops whitelists?
+ */
+static u32 wl_addr_gk20a[] = {
+ /* this list must be sorted (low to high) */
+ 0x404468, /* gr_pri_mme_max_instructions */
+ 0x418800, /* gr_pri_gpcs_setup_debug */
+ 0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg */
+ 0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg */
+ 0x419e10, /* gr_pri_gpcs_tpcs_sm_dbgr_control0 */
+ 0x419f78, /* gr_pri_gpcs_tpcs_sm_disp_ctrl */
+};
+
+static int gr_gk20a_init_access_map(struct gk20a *g)
+{
+ struct gr_gk20a *gr = &g->gr;
+ struct mem_handle *mem;
+ void *data;
+ u32 w, page, nr_pages =
+ DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
+ PAGE_SIZE);
+
+ mem = gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref;
+
+ for (page = 0; page < nr_pages; page++) {
+ data = nvhost_memmgr_kmap(mem, page);
+ if (!data) {
+ nvhost_err(dev_from_gk20a(g),
+ "failed to map priv access map memory");
+ return -ENOMEM;
+ }
+ memset(data, 0x0, PAGE_SIZE);
+
+ /* no good unless ARRAY_SIZE(w) == something small */
+ for (w = 0; w < ARRAY_SIZE(wl_addr_gk20a); w++) {
+ u32 map_bit, map_byte, map_shift;
+ u32 map_page, pb_idx;
+ map_bit = wl_addr_gk20a[w] >> 2;
+ map_byte = map_bit >> 3;
+ map_page = map_byte >> PAGE_SHIFT;
+ if (map_page != page)
+ continue;
+ map_shift = map_bit & 0x7; /* i.e. 0-7 */
+ pb_idx = (map_byte & ~PAGE_MASK);
+ nvhost_dbg_info(
+ "access map addr:0x%x pg:%d pb:%d bit:%d",
+ wl_addr_gk20a[w], map_page, pb_idx, map_shift);
+ ((u8 *)data)[pb_idx] |= (1 << map_shift);
+ }
+ /* uncached on cpu side, so no need to flush? */
+ nvhost_memmgr_kunmap(mem, page, data);
+ }
+
+ return 0;
+}
+
static int gk20a_init_gr_setup_sw(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
if (err)
goto clean_up;
+ err = gr_gk20a_init_access_map(g);
+ if (err)
+ goto clean_up;
+
mutex_init(&gr->ctx_mutex);
spin_lock_init(&gr->ch_tlb_lock);
gr_intr &= ~gr_intr_class_error_pending_f();
}
+ /* this one happens if someone tries to hit a non-whitelisted
+ * register using set_falcon[4] */
+ if (gr_intr & gr_intr_firmware_method_pending_f()) {
+ need_reset |= true;
+ nvhost_dbg(dbg_intr | dbg_gpu_dbg, "firmware method intr pending\n");
+ gk20a_writel(g, gr_intr_r(),
+ gr_intr_firmware_method_reset_f());
+ gr_intr &= ~gr_intr_firmware_method_pending_f();
+ }
+
if (gr_intr & gr_intr_exception_pending_f()) {
u32 exception = gk20a_readl(g, gr_exception_r());
struct fifo_gk20a *f = &g->fifo;