&ucode_info->surface_desc.sgt,
ucode_info->surface_desc.size,
0, /* flags */
- gk20a_mem_flag_read_only);
+ gk20a_mem_flag_read_only,
+ false);
if (!ucode_info->surface_desc.gpu_va) {
gk20a_err(d, "failed to update gmmu ptes\n");
return -ENOMEM;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, true);
if (!gpu_va)
goto clean_up;
g_bfr_va[CIRCULAR_VA] = gpu_va;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, false);
if (!gpu_va)
goto clean_up;
g_bfr_va[ATTRIBUTE_VA] = gpu_va;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, true);
if (!gpu_va)
goto clean_up;
g_bfr_va[PAGEPOOL_VA] = gpu_va;
sgt = gr->global_ctx_buffer[GOLDEN_CTX].mem.sgt;
size = gr->global_ctx_buffer[GOLDEN_CTX].mem.size;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, true);
if (!gpu_va)
goto clean_up;
g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.sgt;
size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, true);
if (!gpu_va)
goto clean_up;
g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none, true);
if (!gr_ctx->mem.gpu_va)
goto err_free_mem;
u8 kind_v, u32 ctag_offset, bool cacheable,
bool umapped_pte, int rw_flag,
bool sparse,
- u32 flags);
+ bool priv);
static int __must_check gk20a_init_system_vm(struct mm_gk20a *mm);
static int __must_check gk20a_init_bar1_vm(struct mm_gk20a *mm);
static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm);
int rw_flag,
bool clear_ctags,
bool sparse,
+ bool priv,
struct vm_gk20a_mapping_batch *batch)
{
int err = 0;
NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE,
rw_flag,
sparse,
- flags);
+ priv);
if (err) {
gk20a_err(d, "failed to update ptes on map");
goto fail_validate;
flags, rw_flag,
clear_ctags,
false,
+ false,
batch);
if (!map_offset)
goto clean_up;
gk20a_mem_flag_read_only,
false, /* clear_ctags */
false, /* sparse */
+ false, /* priv */
NULL); /* mapping_batch handle */
if (!mapped_buffer->ctag_map_win_addr) {
struct sg_table **sgt,
u64 size,
u32 flags,
- int rw_flag)
+ int rw_flag,
+ bool priv)
{
struct gk20a *g = gk20a_from_vm(vm);
u64 vaddr;
flags, rw_flag,
false, /* clear_ctags */
false, /* sparse */
+ priv, /* priv */
NULL); /* mapping_batch handle */
mutex_unlock(&vm->update_gmmu_lock);
if (!vaddr) {
if (err)
return err;
- mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, gk20a_mem_flag_none);
+ mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0,
+ gk20a_mem_flag_none, false);
if (!mem->gpu_va) {
err = -ENOMEM;
goto fail_free;
u64 *iova,
u32 kind_v, u32 *ctag,
bool cacheable, bool unammped_pte,
- int rw_flag, bool sparse, u32 flags)
+ int rw_flag, bool sparse, bool priv)
{
struct gk20a *g = gk20a_from_vm(vm);
bool small_valid, big_valid;
u64 *iova,
u32 kind_v, u32 *ctag,
bool cacheable, bool unmapped_pte,
- int rw_flag, bool sparse, u32 flags)
+ int rw_flag, bool sparse, bool priv)
{
struct gk20a *g = gk20a_from_vm(vm);
u32 ctag_granularity = g->ops.fb.compression_page_size(g);
gmmu_pte_address_sys_f(*iova
>> gmmu_pte_address_shift_v());
+ if (priv)
+ pte_w[0] |= gmmu_pte_privilege_true_f();
+
pte_w[1] = gmmu_pte_aperture_video_memory_f() |
gmmu_pte_kind_f(kind_v) |
gmmu_pte_comptagline_f(*ctag / ctag_granularity);
int rw_flag,
bool sparse,
int lvl,
- u32 flags)
+ bool priv)
{
const struct gk20a_mmu_level *l = &vm->mmu_levels[lvl];
const struct gk20a_mmu_level *next_l = &vm->mmu_levels[lvl+1];
err = l->update_entry(vm, pte, pde_i, pgsz_idx,
sgl, offset, iova,
kind_v, ctag, cacheable, unmapped_pte,
- rw_flag, sparse, flags);
+ rw_flag, sparse, priv);
if (err)
return err;
gpu_va,
next,
kind_v, ctag, cacheable, unmapped_pte,
- rw_flag, sparse, lvl+1, flags);
+ rw_flag, sparse, lvl+1, priv);
unmap_gmmu_pages(next_pte);
if (err)
bool cacheable, bool unmapped_pte,
int rw_flag,
bool sparse,
- u32 flags)
+ bool priv)
{
struct gk20a *g = gk20a_from_vm(vm);
int ctag_granularity = g->ops.fb.compression_page_size(g);
gk20a_dbg(gpu_dbg_pte, "size_idx=%d, iova=%llx, buffer offset %lld, nents %d",
pgsz_idx,
- sgt ? g->ops.mm.get_iova_addr(vm->mm->g, sgt->sgl, flags)
+ sgt ? g->ops.mm.get_iova_addr(vm->mm->g, sgt->sgl, 0)
: 0ULL,
buffer_offset,
sgt ? sgt->nents : 0);
return -EINVAL;
if (sgt) {
- iova = g->ops.mm.get_iova_addr(vm->mm->g, sgt->sgl, flags);
+ iova = g->ops.mm.get_iova_addr(vm->mm->g, sgt->sgl, 0);
if (!vm->mm->bypass_smmu && iova) {
iova += space_to_skip;
} else {
&iova,
gpu_va, gpu_end,
kind_v, &ctag,
- cacheable, unmapped_pte, rw_flag, sparse, 0, flags);
+ cacheable, unmapped_pte, rw_flag, sparse, 0, priv);
unmap_gmmu_pages(&vm->pdb);
smp_mb();
gk20a_mem_flag_none,
false,
true,
+ false,
NULL);
if (!map_offset) {
mutex_unlock(&vm->update_gmmu_lock);