]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a implement priv access map
authorKen Adams <kadams@nvidia.com>
Tue, 25 Feb 2014 00:30:00 +0000 (16:30 -0800)
committerMatthew Pedro <mapedro@nvidia.com>
Tue, 18 Mar 2014 03:27:50 +0000 (20:27 -0700)
This change enables a whitelist for priv register accesses on gk20a
through the set_falcon[4] path (used by usermode drivers).

Bug 1375360

Change-Id: I18274097fddaab0a15a8ad59f1d23f9e974a50e7
Signed-off-by: Ken Adams <kadams@nvidia.com>
Reviewed-on: http://git-master/r/375651
(cherry picked from commit afd7fec44674af2569ac6443cf245e25786cc335)
Reviewed-on: http://git-master/r/376912
GVS: Gerrit_Virtual_Submit
Reviewed-by: Matthew Pedro <mapedro@nvidia.com>
Tested-by: Matthew Pedro <mapedro@nvidia.com>
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.h
drivers/video/tegra/host/gk20a/hw_ctxsw_prog_gk20a.h
drivers/video/tegra/host/gk20a/hw_gr_gk20a.h

index 17cd032340da7366b973793534d36bc20df94bbe..15ec8167788c57343b97196c3811ffbe75712a96 100644 (file)
@@ -1572,7 +1572,7 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
        struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx;
        u32 virt_addr_lo;
        u32 virt_addr_hi;
-       u32 i;
+       u32 i, v;
        int ret = 0;
        void *ctx_ptr = NULL;
 
@@ -1616,6 +1616,25 @@ static int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
                ch_ctx->pm_ctx.ctx_sw_mode);
        mem_wr32(ctx_ptr + ctxsw_prog_main_image_pm_ptr_o(), 0, 0);
 
+       /* set priv access map */
+       virt_addr_lo =
+                u64_lo32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
+       virt_addr_hi =
+                u64_hi32(ch_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]);
+
+       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_config_o(), 0,
+                ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f());
+       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_lo_o(), 0,
+                virt_addr_lo);
+       mem_wr32(ctx_ptr + ctxsw_prog_main_image_priv_access_map_addr_hi_o(), 0,
+                virt_addr_hi);
+       /* disable verif features */
+       v = mem_rd32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0);
+       v = v & ~(ctxsw_prog_main_image_misc_options_verif_features_m());
+       v = v | ctxsw_prog_main_image_misc_options_verif_features_disabled_f();
+       mem_wr32(ctx_ptr + ctxsw_prog_main_image_misc_options_o(), 0, v);
+
+
        vunmap(ctx_ptr);
 
        gk20a_mm_l2_invalidate(g);
@@ -2124,6 +2143,8 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
                BUG_ON(g->gr.ctx_vars.zcull_ctxsw_image_size != zcull_ctx_image_size);
        }
 
+       g->gr.ctx_vars.priv_access_map_size = 512 * 1024;
+
        nvhost_dbg_fn("done");
        return 0;
 }
@@ -2253,6 +2274,20 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        gr->global_ctx_buffer[GOLDEN_CTX].size =
                gr->ctx_vars.golden_image_size;
 
+       nvhost_dbg_info("priv_access_map_size : %d",
+                  gr->ctx_vars.priv_access_map_size);
+
+       mem = nvhost_memmgr_alloc(memmgr, gr->ctx_vars.priv_access_map_size,
+                                 DEFAULT_ALLOC_ALIGNMENT,
+                                 DEFAULT_ALLOC_FLAGS,
+                                 0);
+       if (IS_ERR(mem))
+               goto clean_up;
+
+       gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref = mem;
+       gr->global_ctx_buffer[PRIV_ACCESS_MAP].size =
+               gr->ctx_vars.priv_access_map_size;
+
        nvhost_dbg_fn("done");
        return 0;
 
@@ -2346,6 +2381,16 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
                goto clean_up;
        g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
 
+       /* Priv register Access Map */
+       gpu_va = gk20a_vm_map(ch_vm, memmgr,
+                             gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref,
+                             /*offset_align, flags, kind*/
+                             0, 0, 0, NULL, false,
+                             mem_flag_none);
+       if (!gpu_va)
+               goto clean_up;
+       g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
+
        c->ch_ctx.global_ctx_buffer_mapped = true;
        return 0;
 
@@ -2630,6 +2675,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
                c->first_init = true;
        }
        gk20a_mm_l2_invalidate(g);
+
        c->num_objects++;
 
        nvhost_dbg_fn("done");
@@ -4441,6 +4487,63 @@ out:
        return 0;
 }
 
+/*
+ * XXX Merge this list with the debugger/profiler
+ * session regops whitelists?
+ */
+static u32 wl_addr_gk20a[] = {
+       /* this list must be sorted (low to high) */
+       0x404468, /* gr_pri_mme_max_instructions       */
+       0x418800, /* gr_pri_gpcs_setup_debug           */
+       0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg      */
+       0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg     */
+       0x419e10, /* gr_pri_gpcs_tpcs_sm_dbgr_control0 */
+       0x419f78, /* gr_pri_gpcs_tpcs_sm_disp_ctrl     */
+};
+
+static int gr_gk20a_init_access_map(struct gk20a *g)
+{
+       struct gr_gk20a *gr = &g->gr;
+       struct mem_handle *mem;
+       void *data;
+       u32 w, page, nr_pages =
+               DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
+                            PAGE_SIZE);
+
+       mem = gr->global_ctx_buffer[PRIV_ACCESS_MAP].ref;
+
+       for (page = 0; page < nr_pages; page++) {
+               data = nvhost_memmgr_kmap(mem, page);
+               if (!data) {
+                       nvhost_err(dev_from_gk20a(g),
+                                  "failed to map priv access map memory");
+                       return -ENOMEM;
+               }
+               memset(data, 0x0, PAGE_SIZE);
+
+               /* no good unless ARRAY_SIZE(w) == something small */
+               for (w = 0; w < ARRAY_SIZE(wl_addr_gk20a); w++) {
+                       u32 map_bit, map_byte, map_shift;
+                       u32 map_page, pb_idx;
+                       map_bit = wl_addr_gk20a[w] >> 2;
+                       map_byte = map_bit >> 3;
+                       map_page = map_byte >> PAGE_SHIFT;
+                       if (map_page != page)
+                               continue;
+                       map_shift = map_bit & 0x7; /* i.e. 0-7 */
+                       pb_idx = (map_byte & ~PAGE_MASK);
+                       nvhost_dbg_info(
+                               "access map addr:0x%x pg:%d pb:%d bit:%d",
+                               wl_addr_gk20a[w], map_page, pb_idx, map_shift);
+                       ((u8 *)data)[pb_idx] |= (1 << map_shift);
+               }
+               /* uncached on cpu side, so no need to flush? */
+               nvhost_memmgr_kunmap(mem, page, data);
+       }
+
+       return 0;
+}
+
 static int gk20a_init_gr_setup_sw(struct gk20a *g)
 {
        struct gr_gk20a *gr = &g->gr;
@@ -4486,6 +4589,10 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
        if (err)
                goto clean_up;
 
+       err = gr_gk20a_init_access_map(g);
+       if (err)
+               goto clean_up;
+
        mutex_init(&gr->ctx_mutex);
        spin_lock_init(&gr->ch_tlb_lock);
 
@@ -5227,6 +5334,16 @@ int gk20a_gr_isr(struct gk20a *g)
                gr_intr &= ~gr_intr_class_error_pending_f();
        }
 
+       /* this one happens if someone tries to hit a non-whitelisted
+        * register using set_falcon[4] */
+       if (gr_intr & gr_intr_firmware_method_pending_f()) {
+               need_reset |= true;
+               nvhost_dbg(dbg_intr | dbg_gpu_dbg, "firmware method intr pending\n");
+               gk20a_writel(g, gr_intr_r(),
+                       gr_intr_firmware_method_reset_f());
+               gr_intr &= ~gr_intr_firmware_method_pending_f();
+       }
+
        if (gr_intr & gr_intr_exception_pending_f()) {
                u32 exception = gk20a_readl(g, gr_exception_r());
                struct fifo_gk20a *f = &g->fifo;
index ce726e9eb2a074827dd508473a0041712bbba0a9..2d138cf12e6580421f7fa89ffcbd600dbdb09155 100644 (file)
@@ -39,7 +39,8 @@ enum /* global_ctx_buffer */ {
        PAGEPOOL_VPR            = 4,
        ATTRIBUTE_VPR           = 5,
        GOLDEN_CTX              = 6,
-       NR_GLOBAL_CTX_BUF       = 7
+       PRIV_ACCESS_MAP         = 7,
+       NR_GLOBAL_CTX_BUF       = 8
 };
 
 /* either ATTRIBUTE or ATTRIBUTE_VPR maps to ATTRIBUTE_VA */
@@ -48,7 +49,8 @@ enum  /*global_ctx_buffer_va */ {
        PAGEPOOL_VA             = 1,
        ATTRIBUTE_VA            = 2,
        GOLDEN_CTX_VA           = 3,
-       NR_GLOBAL_CTX_BUF_VA    = 4
+       PRIV_ACCESS_MAP_VA      = 4,
+       NR_GLOBAL_CTX_BUF_VA    = 5
 };
 
 enum {
@@ -173,6 +175,8 @@ struct gr_gk20a {
 
                u32 buffer_header_size;
 
+               u32 priv_access_map_size;
+
                struct gr_ucode_gk20a ucode;
 
                struct av_list_gk20a  sw_bundle_init;
index 44617ba4286648ee03929e886102313036c354ce..745cfd47dd896e71870af4e2002824005bc7f1e5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -118,6 +118,46 @@ static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void)
 {
        return 0x600dc0de;
 }
+static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void)
+{
+       return 0x000000a0;
+}
+static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void)
+{
+       return 0x0;
+}
+static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_none_f(void)
+{
+       return 0x1;
+}
+static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void)
+{
+       return 0x2;
+}
+static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void)
+{
+       return 0x000000a4;
+}
+static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void)
+{
+       return 0x000000a8;
+}
+static inline u32 ctxsw_prog_main_image_misc_options_o(void)
+{
+       return 0x0000003c;
+}
+static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void)
+{
+       return 0x1 << 3;
+}
+static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void)
+{
+       return 0x0;
+}
+static inline u32 ctxsw_prog_main_image_misc_options_verif_features_enabled_f(void)
+{
+       return 0x8;
+}
 static inline u32 ctxsw_prog_local_priv_register_ctl_o(void)
 {
        return 0x0000000c;
index 1247a93e987ce8817522fbf746d2e9892fc25b36..99d03313c35d5693c7d2348005c1d21e4e1adcff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -122,6 +122,14 @@ static inline u32 gr_intr_exception_reset_f(void)
 {
        return 0x200000;
 }
+static inline u32 gr_intr_firmware_method_pending_f(void)
+{
+       return 0x100;
+}
+static inline u32 gr_intr_firmware_method_reset_f(void)
+{
+       return 0x100;
+}
 static inline u32 gr_intr_nonstall_r(void)
 {
        return 0x00400120;