q->base_gva = ch_vm->map(ch_vm, memmgr,
q->mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!q->base_gva) {
nvhost_err(d, "ch %d : failed to map gpu va"
"for priv cmd buffer", c->hw_chid);
c->gpfifo.gpu_va = ch_vm->map(ch_vm, memmgr,
c->gpfifo.mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!c->gpfifo.gpu_va) {
nvhost_err(d, "channel %d : failed to map"
" gpu_va for gpfifo", c->hw_chid);
/*offset_align, flags, kind*/
4096, 0, 0,
&f->userd.mem.sgt,
- false);
+ false,
+ mem_flag_none);
f->userd.cpu_pa = gk20a_mm_iova_addr(f->userd.mem.sgt->sgl);
nvhost_dbg(dbg_map, "userd physical address : 0x%08llx - 0x%08llx",
f->userd.cpu_pa, f->userd.cpu_pa + f->userd_total_size);
gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
- NULL, false);
+ NULL, false, mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[CIRCULAR_VA] = gpu_va;
gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
- NULL, false);
+ NULL, false, mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[ATTRIBUTE_VA] = gpu_va;
gpu_va = ch_vm->map(ch_vm, memmgr, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0,
- NULL, false);
+ NULL, false, mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[PAGEPOOL_VA] = gpu_va;
gpu_va = ch_vm->map(ch_vm, memmgr,
gr->global_ctx_buffer[GOLDEN_CTX].ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
gr_ctx->gpu_va = ch_vm->map(ch_vm, memmgr,
gr_ctx->mem.ref,
/*offset_align, flags, kind*/
- 0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0, NULL, false);
+ 0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 0, NULL, false,
+ mem_flag_none);
if (!gr_ctx->gpu_va) {
nvhost_memmgr_put(memmgr, gr_ctx->mem.ref);
return -ENOMEM;
patch_ctx->gpu_va = ch_vm->map(ch_vm, memmgr,
patch_ctx->mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!patch_ctx->gpu_va)
goto clean_up;
static int update_gmmu_ptes(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx, struct sg_table *sgt,
u64 first_vaddr, u64 last_vaddr,
- u8 kind_v, u32 ctag_offset, bool cacheable);
+ u8 kind_v, u32 ctag_offset, bool cacheable,
+ int rw_flag);
static void update_gmmu_pde(struct vm_gk20a *vm, u32 i);
u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
u32 kind,
struct sg_table **sgt,
- bool user_mapped)
+ bool user_mapped,
+ int rw_flag)
{
struct gk20a *g = gk20a_from_vm(vm);
struct nvhost_allocator *ctag_allocator = &g->gr.comp_tags;
}
/* pin buffer to get phys/iovmm addr */
- bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, mem_flag_none);
+ bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, rw_flag);
if (IS_ERR(bfr.sgt)) {
/* Falling back to physical is actually possible
* here in many cases if we use 4K phys pages in the
map_offset, map_offset + bfr.size - 1,
bfr.kind_v,
bfr.ctag_offset,
- flags & NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE);
+ flags & NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+ rw_flag);
if (err) {
nvhost_err(d, "failed to update ptes on map");
goto clean_up;
struct sg_table *sgt,
u64 first_vaddr, u64 last_vaddr,
u8 kind_v, u32 ctag_offset,
- bool cacheable)
+ bool cacheable,
+ int rw_flag)
{
int err;
u32 pde_lo, pde_hi, pde_i;
gmmu_pte_kind_f(kind_v) |
gmmu_pte_comptagline_f(ctag);
+ if (rw_flag == mem_flag_read_only) {
+ pte_w[0] |= gmmu_pte_read_only_true_f()
+ | gmmu_pte_write_disable_true_f();
+ } else if (rw_flag == mem_flag_write_only) {
+ pte_w[0] |=
+ gmmu_pte_read_disable_true_f();
+ }
+
if (!cacheable)
pte_w[1] |= gmmu_pte_vol_true_f();
0, /* n/a for unmap */
mapped_buffer->addr,
mapped_buffer->addr + mapped_buffer->size - 1,
- 0, 0, false /* n/a for unmap */);
+ 0, 0, false /* n/a for unmap */,
+ mem_flag_none);
/* detect which if any pdes/ptes can now be released */
}
ret_va = vm->map(vm, memmgr, r, *offset_align,
- flags, 0/*no kind here, to be removed*/, NULL, true);
+ flags, 0/*no kind here, to be removed*/, NULL, true,
+ mem_flag_none);
*offset_align = ret_va;
if (!ret_va)
err = -EINVAL;
u32 flags /*NVHOST_MAP_BUFFER_FLAGS_*/,
u32 kind,
struct sg_table **sgt,
- bool user_mapped);
+ bool user_mapped,
+ int rw_flag);
/* unmap handle from kernel */
void (*unmap)(struct vm_gk20a *vm,
pmu->ucode.pmu_va = vm->map(vm, memmgr, pmu->ucode.mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_read_only);
if (!pmu->ucode.pmu_va) {
nvhost_err(d, "failed to map pmu ucode memory!!");
return err;
pmu->pg_buf.pmu_va = vm->map(vm, memmgr, pmu->pg_buf.mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!pmu->pg_buf.pmu_va) {
nvhost_err(d, "failed to map fecs pg buffer");
err = -ENOMEM;
pmu->seq_buf.pmu_va = vm->map(vm, memmgr, pmu->seq_buf.mem.ref,
/*offset_align, flags, kind*/
- 0, 0, 0, NULL, false);
+ 0, 0, 0, NULL, false, mem_flag_none);
if (!pmu->seq_buf.pmu_va) {
nvhost_err(d, "failed to map zbc buffer");
err = -ENOMEM;