]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: gk20a: add read only flag for pmu firmware
authorDeepak Nibade <dnibade@nvidia.com>
Wed, 18 Sep 2013 07:46:26 +0000 (13:16 +0530)
committerAjay Nandakumar <anandakumarm@nvidia.com>
Thu, 3 Oct 2013 13:47:42 +0000 (19:17 +0530)
- add new parameter 'rw_flag' to functions gk20a_vm_map() and
  update_gmmu_ptes() to pass read/write only flag
- update_gmmu_ptes() then sets GMMU read/write only attributes
  based on 'rw_flag'
- add read only flag 'mem_flag_read_only' to vm->map() while
  loading pmu firmware
- add 'mem_flag_none' in all other calls to vm->map()

Bug 1309863

Change-Id: I840dfebf4c93746eb726fa99bba821d60285404b
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/276189
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Tested-by: Sachin Nikam <snikam@nvidia.com>
(cherry picked from commit e72ab3af0d3bf9acffdfe899e8bc3622b5fa7578)
Signed-off-by: Ajay Nandakumar <anandakumarm@nvidia.com>
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.h
drivers/video/tegra/host/gk20a/pmu_gk20a.c

index 6c584c448811999996e78f83cd57c9121fcd220f..8e846e7fda346a8aa143afb1ae19f6919cebb104 100644 (file)
@@ -682,7 +682,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        q->base_gva = ch_vm->map(ch_vm, memmgr,
                        q->mem.ref,
                         /*offset_align, flags, kind*/
-                       0, 0, 0, NULL, false);
+                       0, 0, 0, NULL, false, mem_flag_none);
        if (!q->base_gva) {
                nvhost_err(d, "ch %d : failed to map gpu va"
                           "for priv cmd buffer", c->hw_chid);
@@ -983,7 +983,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
        c->gpfifo.gpu_va = ch_vm->map(ch_vm, memmgr,
                                c->gpfifo.mem.ref,
                                /*offset_align, flags, kind*/
-                               0, 0, 0, NULL, false);
+                               0, 0, 0, NULL, false, mem_flag_none);
        if (!c->gpfifo.gpu_va) {
                nvhost_err(d, "channel %d : failed to map"
                           " gpu_va for gpfifo", c->hw_chid);
index 9fa173c25ebd85bf625a662dbb6607c8a161e340..cff380434a44191c4746618ff7a290985fb9aab2 100644 (file)
@@ -510,7 +510,8 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
                                            /*offset_align, flags, kind*/
                                            4096, 0, 0,
                                            &f->userd.mem.sgt,
-                                           false);
+                                           false,
+                                           mem_flag_none);
        f->userd.cpu_pa = gk20a_mm_iova_addr(f->userd.mem.sgt->sgl);
        nvhost_dbg(dbg_map, "userd physical address : 0x%08llx - 0x%08llx",
                        f->userd.cpu_pa, f->userd.cpu_pa + f->userd_total_size);
index b526e31abf5a6866683ff7bf5751891a95c86169..569efddead1d6d96bc2471a610fcdd1209586a69 100644 (file)
@@ -1843,7 +1843,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
                            /*offset_align, flags, kind*/
                            0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
-                           NULL, false);
+                           NULL, false, mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[CIRCULAR_VA] = gpu_va;
@@ -1857,7 +1857,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
                            /*offset_align, flags, kind*/
                            0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
-                           NULL, false);
+                           NULL, false, mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[ATTRIBUTE_VA] = gpu_va;
@@ -1871,7 +1871,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
                            /*offset_align, flags, kind*/
                            0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
-                           NULL, false);
+                           NULL, false, mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -1880,7 +1880,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        gpu_va = ch_vm->map(ch_vm, memmgr,
                            gr->global_ctx_buffer[GOLDEN_CTX].ref,
                            /*offset_align, flags, kind*/
-                           0, 0, 0, NULL, false);
+                           0, 0, 0, NULL, false, mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
@@ -1944,7 +1944,8 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
        gr_ctx->gpu_va = ch_vm->map(ch_vm, memmgr,
                gr_ctx->mem.ref,
                /*offset_align, flags, kind*/
-               0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0, NULL, false);
+               0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0, NULL, false,
+               mem_flag_none);
        if (!gr_ctx->gpu_va) {
                nvhost_memmgr_put(memmgr, gr_ctx->mem.ref);
                return -ENOMEM;
@@ -1984,7 +1985,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
        patch_ctx->gpu_va = ch_vm->map(ch_vm, memmgr,
                                patch_ctx->mem.ref,
                                /*offset_align, flags, kind*/
-                               0, 0, 0, NULL, false);
+                               0, 0, 0, NULL, false, mem_flag_none);
        if (!patch_ctx->gpu_va)
                goto clean_up;
 
index 616b022c6c9d0fc1710af264527e03333cbb9a30..963be1ab14f3c455f63f0539e7ef979a87530a8e 100644 (file)
@@ -107,7 +107,8 @@ static int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
 static int update_gmmu_ptes(struct vm_gk20a *vm,
                            enum gmmu_pgsz_gk20a pgsz_idx, struct sg_table *sgt,
                            u64 first_vaddr, u64 last_vaddr,
-                           u8 kind_v, u32 ctag_offset, bool cacheable);
+                           u8 kind_v, u32 ctag_offset, bool cacheable,
+                           int rw_flag);
 static void update_gmmu_pde(struct vm_gk20a *vm, u32 i);
 
 
@@ -917,7 +918,8 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
                        u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
                        u32 kind,
                        struct sg_table **sgt,
-                       bool user_mapped)
+                       bool user_mapped,
+                       int rw_flag)
 {
        struct gk20a *g = gk20a_from_vm(vm);
        struct nvhost_allocator *ctag_allocator = &g->gr.comp_tags;
@@ -984,7 +986,7 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
        }
 
        /* pin buffer to get phys/iovmm addr */
-       bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, mem_flag_none);
+       bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, rw_flag);
        if (IS_ERR(bfr.sgt)) {
                /* Falling back to physical is actually possible
                 * here in many cases if we use 4K phys pages in the
@@ -1178,7 +1180,8 @@ static u64 gk20a_vm_map(struct vm_gk20a *vm,
                               map_offset, map_offset + bfr.size - 1,
                               bfr.kind_v,
                               bfr.ctag_offset,
-                              flags & NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE);
+                              flags & NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+                              rw_flag);
        if (err) {
                nvhost_err(d, "failed to update ptes on map");
                goto clean_up;
@@ -1224,7 +1227,8 @@ static int update_gmmu_ptes(struct vm_gk20a *vm,
                            struct sg_table *sgt,
                            u64 first_vaddr, u64 last_vaddr,
                            u8 kind_v, u32 ctag_offset,
-                           bool cacheable)
+                           bool cacheable,
+                           int rw_flag)
 {
        int err;
        u32 pde_lo, pde_hi, pde_i;
@@ -1299,6 +1303,14 @@ static int update_gmmu_ptes(struct vm_gk20a *vm,
                                        gmmu_pte_kind_f(kind_v) |
                                        gmmu_pte_comptagline_f(ctag);
 
+                               if (rw_flag == mem_flag_read_only) {
+                                       pte_w[0] |= gmmu_pte_read_only_true_f()
+                                            | gmmu_pte_write_disable_true_f();
+                               } else if (rw_flag == mem_flag_write_only) {
+                                       pte_w[0] |=
+                                               gmmu_pte_read_disable_true_f();
+                               }
+
                                if (!cacheable)
                                        pte_w[1] |= gmmu_pte_vol_true_f();
 
@@ -1470,7 +1482,8 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
                               0, /* n/a for unmap */
                               mapped_buffer->addr,
                               mapped_buffer->addr + mapped_buffer->size - 1,
-                              0, 0, false /* n/a for unmap */);
+                              0, 0, false /* n/a for unmap */,
+                              mem_flag_none);
 
        /* detect which if any pdes/ptes can now be released */
 
@@ -1806,7 +1819,8 @@ static int gk20a_as_map_buffer(struct nvhost_as_share *as_share,
        }
 
        ret_va = vm->map(vm, memmgr, r, *offset_align,
-                       flags, 0/*no kind here, to be removed*/, NULL, true);
+                       flags, 0/*no kind here, to be removed*/, NULL, true,
+                       mem_flag_none);
        *offset_align = ret_va;
        if (!ret_va)
                err = -EINVAL;
index 4db90c9b80bf7070b400d9d295dd11f10552023b..6b3f963fda83ac370707070fb3345380c881c22d 100644 (file)
@@ -192,7 +192,8 @@ struct vm_gk20a {
                   u32 flags /*NVHOST_MAP_BUFFER_FLAGS_*/,
                   u32 kind,
                   struct sg_table **sgt,
-                  bool user_mapped);
+                  bool user_mapped,
+                  int rw_flag);
 
        /* unmap handle from kernel */
        void (*unmap)(struct vm_gk20a *vm,
index 736dddfe73dca198ba589907bd0c16ebcbe4589c..af51f75ee39ff66e124fb494f80f3aaa73323a33 100644 (file)
@@ -1045,7 +1045,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        pmu->ucode.pmu_va = vm->map(vm, memmgr, pmu->ucode.mem.ref,
                        /*offset_align, flags, kind*/
-                       0, 0, 0, NULL, false);
+                       0, 0, 0, NULL, false, mem_flag_read_only);
        if (!pmu->ucode.pmu_va) {
                nvhost_err(d, "failed to map pmu ucode memory!!");
                return err;
@@ -1076,7 +1076,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        pmu->pg_buf.pmu_va = vm->map(vm, memmgr, pmu->pg_buf.mem.ref,
                         /*offset_align, flags, kind*/
-                       0, 0, 0, NULL, false);
+                       0, 0, 0, NULL, false, mem_flag_none);
        if (!pmu->pg_buf.pmu_va) {
                nvhost_err(d, "failed to map fecs pg buffer");
                err = -ENOMEM;
@@ -1096,7 +1096,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        pmu->seq_buf.pmu_va = vm->map(vm, memmgr, pmu->seq_buf.mem.ref,
                        /*offset_align, flags, kind*/
-                       0, 0, 0, NULL, false);
+                       0, 0, 0, NULL, false, mem_flag_none);
        if (!pmu->seq_buf.pmu_va) {
                nvhost_err(d, "failed to map zbc buffer");
                err = -ENOMEM;