gk20a_dbg_fn("");
- ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
- PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+ ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+ PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
0, pgprot_dmacoherent(PAGE_KERNEL));
if (!ctx_ptr)
return -ENOMEM;
(u64_hi32(ch_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) <<
(32 - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
- size = gr->global_ctx_buffer[PAGEPOOL].size /
+ size = gr->global_ctx_buffer[PAGEPOOL].mem.size /
gr_scc_pagepool_total_pages_byte_granularity_v();
if (size == g->ops.gr.pagepool_default_size(g))
if (err)
goto clean_up;
- gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages,
- PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >>
+ gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].mem.pages,
+ PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].mem.size) >>
PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
if (!gold_ptr)
goto clean_up;
- ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
- PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+ ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+ PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
0, pgprot_dmacoherent(PAGE_KERNEL));
if (!ctx_ptr)
goto clean_up;
gk20a_mem_rd32(gold_ptr, i);
}
- gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+ gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
gr->ctx_vars.golden_image_initialized = true;
Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true);
- ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
- PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+ ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+ PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
0, pgprot_dmacoherent(PAGE_KERNEL));
if (!ctx_ptr)
return -ENOMEM;
Flush and invalidate before cpu update. */
g->ops.mm.l2_flush(g, true);
- ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
- PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+ ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+ PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
0, pgprot_dmacoherent(PAGE_KERNEL));
if (!ctx_ptr)
return -ENOMEM;
return 0;
}
-static void gk20a_gr_destroy_ctx_buffer(struct platform_device *pdev,
+static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
struct gr_ctx_buffer_desc *desc)
{
- struct device *dev = &pdev->dev;
if (!desc)
return;
- if (desc->sgt) {
- gk20a_free_sgtable(&desc->sgt);
- desc->sgt = NULL;
- }
- if (desc->pages) {
- dma_free_attrs(dev, desc->size, desc->pages,
- desc->iova, &desc->attrs);
- desc->pages = NULL;
- }
+ gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &desc->mem);
}
-static int gk20a_gr_alloc_ctx_buffer(struct platform_device *pdev,
+static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
struct gr_ctx_buffer_desc *desc,
size_t size)
{
- struct device *dev = &pdev->dev;
- DEFINE_DMA_ATTRS(attrs);
- dma_addr_t iova;
int err = 0;
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-
- desc->pages = dma_alloc_attrs(&pdev->dev, size, &iova,
- GFP_KERNEL, &attrs);
- if (!desc->pages)
- return -ENOMEM;
+ err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
+ size, &desc->mem);
+ if (err)
+ return err;
- desc->iova = iova;
- desc->size = size;
- desc->attrs = attrs;
desc->destroy = gk20a_gr_destroy_ctx_buffer;
- err = gk20a_get_sgtable_from_pages(&pdev->dev, &desc->sgt, desc->pages,
- desc->iova, desc->size);
- if (err) {
- dma_free_attrs(dev, desc->size, desc->pages,
- desc->iova, &desc->attrs);
- memset(desc, 0, sizeof(*desc));
- }
return err;
}
gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
- err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR],
+ err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR],
cb_buffer_size);
if (err)
goto clean_up;
gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
- err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL],
+ err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL],
pagepool_buffer_size);
if (err)
goto clean_up;
gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
- err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE],
+ err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
attr_buffer_size);
if (err)
goto clean_up;
gk20a_dbg_info("golden_image_size : %d",
gr->ctx_vars.golden_image_size);
- err = gk20a_gr_alloc_ctx_buffer(pdev,
+ err = gk20a_gr_alloc_ctx_buffer(g,
&gr->global_ctx_buffer[GOLDEN_CTX],
gr->ctx_vars.golden_image_size);
if (err)
gk20a_dbg_info("priv_access_map_size : %d",
gr->ctx_vars.priv_access_map_size);
- err = gk20a_gr_alloc_ctx_buffer(pdev,
+ err = gk20a_gr_alloc_ctx_buffer(g,
&gr->global_ctx_buffer[PRIV_ACCESS_MAP],
gr->ctx_vars.priv_access_map_size);
gk20a_err(dev_from_gk20a(g), "fail");
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
if (gr->global_ctx_buffer[i].destroy) {
- gr->global_ctx_buffer[i].destroy(pdev,
+ gr->global_ctx_buffer[i].destroy(g,
&gr->global_ctx_buffer[i]);
}
}
static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
{
- struct platform_device *pdev = g->dev;
struct gr_gk20a *gr = &g->gr;
DEFINE_DMA_ATTRS(attrs);
u32 i;
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
- gr->global_ctx_buffer[i].destroy(pdev,
+ gr->global_ctx_buffer[i].destroy(g,
&gr->global_ctx_buffer[i]);
}
gk20a_dbg_fn("");
/* Circular Buffer */
- if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) {
- sgt = gr->global_ctx_buffer[CIRCULAR].sgt;
- size = gr->global_ctx_buffer[CIRCULAR].size;
+ if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) {
+ sgt = gr->global_ctx_buffer[CIRCULAR].mem.sgt;
+ size = gr->global_ctx_buffer[CIRCULAR].mem.size;
} else {
- sgt = gr->global_ctx_buffer[CIRCULAR_VPR].sgt;
- size = gr->global_ctx_buffer[CIRCULAR_VPR].size;
+ sgt = gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt;
+ size = gr->global_ctx_buffer[CIRCULAR_VPR].mem.size;
}
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
g_bfr_size[CIRCULAR_VA] = size;
/* Attribute Buffer */
- if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt == NULL)) {
- sgt = gr->global_ctx_buffer[ATTRIBUTE].sgt;
- size = gr->global_ctx_buffer[ATTRIBUTE].size;
+ if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) {
+ sgt = gr->global_ctx_buffer[ATTRIBUTE].mem.sgt;
+ size = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
} else {
- sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt;
- size = gr->global_ctx_buffer[ATTRIBUTE_VPR].size;
+ sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt;
+ size = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.size;
}
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
g_bfr_size[ATTRIBUTE_VA] = size;
/* Page Pool */
- if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].sgt == NULL)) {
- sgt = gr->global_ctx_buffer[PAGEPOOL].sgt;
- size = gr->global_ctx_buffer[PAGEPOOL].size;
+ if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) {
+ sgt = gr->global_ctx_buffer[PAGEPOOL].mem.sgt;
+ size = gr->global_ctx_buffer[PAGEPOOL].mem.size;
} else {
- sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].sgt;
- size = gr->global_ctx_buffer[PAGEPOOL_VPR].size;
+ sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt;
+ size = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.size;
}
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
g_bfr_size[PAGEPOOL_VA] = size;
/* Golden Image */
- sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt;
- size = gr->global_ctx_buffer[GOLDEN_CTX].size;
+ sgt = gr->global_ctx_buffer[GOLDEN_CTX].mem.sgt;
+ size = gr->global_ctx_buffer[GOLDEN_CTX].mem.size;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
gk20a_mem_flag_none);
if (!gpu_va)
g_bfr_size[GOLDEN_CTX_VA] = size;
/* Priv register Access Map */
- sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].sgt;
- size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].size;
+ sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.sgt;
+ size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
gk20a_mem_flag_none);
if (!gpu_va)
for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
if (g_bfr_va[i]) {
gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
- gr->global_ctx_buffer[i].size,
+ gr->global_ctx_buffer[i].mem.size,
gk20a_mem_flag_none);
g_bfr_va[i] = 0;
}
{
struct gr_ctx_desc *gr_ctx = NULL;
struct gr_gk20a *gr = &g->gr;
- struct device *d = dev_from_gk20a(g);
- struct sg_table *sgt;
- DEFINE_DMA_ATTRS(attrs);
int err = 0;
- dma_addr_t iova;
gk20a_dbg_fn("");
if (!gr_ctx)
return -ENOMEM;
- gr_ctx->size = gr->ctx_vars.buffer_total_size;
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- gr_ctx->pages = dma_alloc_attrs(d, gr_ctx->size,
- &iova, GFP_KERNEL, &attrs);
- if (!gr_ctx->pages) {
- err = -ENOMEM;
- goto err_free_ctx;
- }
-
- gr_ctx->iova = iova;
- err = gk20a_get_sgtable_from_pages(d, &sgt, gr_ctx->pages,
- gr_ctx->iova, gr_ctx->size);
+ err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
+ gr->ctx_vars.buffer_total_size,
+ &gr_ctx->mem);
if (err)
- goto err_free;
-
- gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size,
- NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- gk20a_mem_flag_none);
- if (!gr_ctx->gpu_va)
- goto err_free_sgt;
+ goto err_free_ctx;
- gk20a_free_sgtable(&sgt);
+ gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size,
+ NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+ gk20a_mem_flag_none);
+ if (!gr_ctx->mem.gpu_va)
+ goto err_free_mem;
*__gr_ctx = gr_ctx;
return 0;
- err_free_sgt:
- gk20a_free_sgtable(&sgt);
- err_free:
- dma_free_attrs(d, gr_ctx->size,
- gr_ctx->pages, gr_ctx->iova, &attrs);
- gr_ctx->pages = NULL;
- gr_ctx->iova = 0;
+ err_free_mem:
+ gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
err_free_ctx:
kfree(gr_ctx);
gr_ctx = NULL;
void gr_gk20a_free_gr_ctx(struct gk20a *g,
struct vm_gk20a *vm, struct gr_ctx_desc *gr_ctx)
{
- struct device *d = dev_from_gk20a(g);
- DEFINE_DMA_ATTRS(attrs);
-
gk20a_dbg_fn("");
- if (!gr_ctx || !gr_ctx->gpu_va)
+ if (!gr_ctx || !gr_ctx->mem.gpu_va)
return;
- gk20a_gmmu_unmap(vm, gr_ctx->gpu_va,
- gr_ctx->size, gk20a_mem_flag_none);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- dma_free_attrs(d, gr_ctx->size,
- gr_ctx->pages, gr_ctx->iova, &attrs);
- gr_ctx->pages = NULL;
- gr_ctx->iova = 0;
+ gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
+ gr_ctx->mem.size, gk20a_mem_flag_none);
+ gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
kfree(gr_ctx);
}
}
/* commit gr ctx buffer */
- err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+ err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to commit gr ctx buffer");
DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
PAGE_SIZE);
- data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].pages,
- PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >>
+ data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.pages,
+ PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size) >>
PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
if (!data) {
gk20a_err(dev_from_gk20a(g),
/* would have been a variant of gr_gk20a_apply_instmem_overrides */
/* recoded in-place instead.*/
- ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
- PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+ ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+ PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
0, pgprot_dmacoherent(PAGE_KERNEL));
if (!ctx_ptr) {
err = -ENOMEM;
attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
- gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size;
+ gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
- gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size;
+ gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
- gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
+ gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
gk20a_dbg_info("priv access map size : %d",
gr->ctx_vars.priv_access_map_size);
- gr->global_ctx_buffer[PRIV_ACCESS_MAP].size =
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
gr->ctx_vars.priv_access_map_size;
return 0;
/* Circular Buffer */
gpu_va = gk20a_vm_alloc_va(ch_vm,
- gr->global_ctx_buffer[CIRCULAR].size, 0);
+ gr->global_ctx_buffer[CIRCULAR].mem.size, 0);
if (!gpu_va)
goto clean_up;
g_bfr_va[CIRCULAR_VA] = gpu_va;
- g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].size;
+ g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
/* Attribute Buffer */
gpu_va = gk20a_vm_alloc_va(ch_vm,
- gr->global_ctx_buffer[ATTRIBUTE].size, 0);
+ gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0);
if (!gpu_va)
goto clean_up;
g_bfr_va[ATTRIBUTE_VA] = gpu_va;
- g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].size;
+ g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
/* Page Pool */
gpu_va = gk20a_vm_alloc_va(ch_vm,
- gr->global_ctx_buffer[PAGEPOOL].size, 0);
+ gr->global_ctx_buffer[PAGEPOOL].mem.size, 0);
if (!gpu_va)
goto clean_up;
g_bfr_va[PAGEPOOL_VA] = gpu_va;
- g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].size;
+ g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
/* Priv register Access Map */
gpu_va = gk20a_vm_alloc_va(ch_vm,
- gr->global_ctx_buffer[PRIV_ACCESS_MAP].size, 0);
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0);
if (!gpu_va)
goto clean_up;
g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
g_bfr_size[PRIV_ACCESS_MAP_VA] =
- gr->global_ctx_buffer[PRIV_ACCESS_MAP].size;
+ gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
msg.handle = platform->virt_handle;
if (!gr_ctx)
return -ENOMEM;
- gr_ctx->size = gr->ctx_vars.buffer_total_size;
- gr_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->size, 0);
+ gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
+ gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->mem.size, 0);
- if (!gr_ctx->gpu_va) {
+ if (!gr_ctx->mem.gpu_va) {
kfree(gr_ctx);
return -ENOMEM;
}
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX;
msg.handle = platform->virt_handle;
p->handle = c->virt_ctx;
- p->gr_ctx_va = gr_ctx->gpu_va;
+ p->gr_ctx_va = gr_ctx->mem.gpu_va;
p->class_num = c->obj_class;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
if (err || msg.ret) {
kfree(gr_ctx);
- gk20a_vm_free_va(ch_vm, gr_ctx->gpu_va, gr_ctx->size, 0);
+ gk20a_vm_free_va(ch_vm, gr_ctx->mem.gpu_va,
+ gr_ctx->mem.size, 0);
err = -ENOMEM;
} else
c->ch_ctx.gr_ctx = gr_ctx;
gk20a_dbg_fn("");
- if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->gpu_va) {
+ if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->mem.gpu_va) {
struct tegra_vgpu_cmd_msg msg;
struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
int err;
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
WARN_ON(err || msg.ret);
- gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->gpu_va,
- ch_ctx->gr_ctx->size, 0);
- ch_ctx->gr_ctx->gpu_va = 0;
+ gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->mem.gpu_va,
+ ch_ctx->gr_ctx->mem.size, 0);
+ ch_ctx->gr_ctx->mem.gpu_va = 0;
kfree(ch_ctx->gr_ctx);
}
}
}
/* commit gr ctx buffer */
- err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+ err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
if (err) {
gk20a_err(dev_from_gk20a(g),
"fail to commit gr ctx buffer");