/* ..then release mapped memory */
gk20a_deinit_cde_img(cde_ctx);
gk20a_gmmu_unmap(vm, cde_ctx->backing_store_vaddr,
- g->gr.compbit_store.size, 1);
+ g->gr.compbit_store.mem.size, 1);
/* housekeeping on app */
list_del(&cde_ctx->list);
new_data = cde_ctx->compbit_size;
break;
case TYPE_PARAM_BACKINGSTORE_SIZE:
- new_data = g->gr.compbit_store.size;
+ new_data = g->gr.compbit_store.mem.size;
break;
case TYPE_PARAM_SOURCE_SMMU_ADDR:
new_data = gk20a_mm_gpuva_to_iova_base(cde_ctx->vm,
}
gk20a_dbg(gpu_dbg_cde, "cde: buffer=cbc, size=%zu, gpuva=%llx\n",
- g->gr.compbit_store.size, cde_ctx->backing_store_vaddr);
+ g->gr.compbit_store.mem.size, cde_ctx->backing_store_vaddr);
gk20a_dbg(gpu_dbg_cde, "cde: buffer=compbits, size=%llu, gpuva=%llx\n",
cde_ctx->compbit_size, cde_ctx->compbit_vaddr);
}
/* map backing store to gpu virtual space */
- vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.sgt,
- g->gr.compbit_store.size,
+ vaddr = gk20a_gmmu_map(ch->vm, &gr->compbit_store.mem.sgt,
+ g->gr.compbit_store.mem.size,
NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
gk20a_mem_flag_read_only);
return 0;
err_init_cde_img:
- gk20a_gmmu_unmap(ch->vm, vaddr, g->gr.compbit_store.size, 1);
+ gk20a_gmmu_unmap(ch->vm, vaddr, g->gr.compbit_store.mem.size, 1);
err_map_backingstore:
err_alloc_gpfifo:
gk20a_vm_put(ch->vm);
size_t compbit_backing_size)
{
struct gr_gk20a *gr = &g->gr;
- int order = order_base_2(compbit_backing_size >> PAGE_SHIFT);
- struct page *pages;
- struct sg_table *sgt;
- int err = 0;
-
- /* allocate pages */
- pages = alloc_pages(GFP_KERNEL, order);
- if (!pages) {
- gk20a_dbg(gpu_dbg_pte, "alloc_pages failed\n");
- err = -ENOMEM;
- goto err_alloc_pages;
- }
-
- /* clean up the pages */
- memset(page_address(pages), 0, compbit_backing_size);
- /* allocate room for placing the pages pointer.. */
- gr->compbit_store.pages =
- kzalloc(sizeof(*gr->compbit_store.pages), GFP_KERNEL);
- if (!gr->compbit_store.pages) {
- gk20a_dbg(gpu_dbg_pte, "failed to allocate pages struct");
- err = -ENOMEM;
- goto err_alloc_compbit_store;
- }
-
- err = gk20a_get_sgtable_from_pages(&g->dev->dev, &sgt, &pages, 0,
- compbit_backing_size);
- if (err) {
- gk20a_dbg(gpu_dbg_pte, "could not get sg table for pages\n");
- goto err_alloc_sg_table;
- }
-
- /* store the parameters to gr structure */
- *gr->compbit_store.pages = pages;
- gr->compbit_store.base_iova = sg_phys(sgt->sgl);
- gr->compbit_store.size = compbit_backing_size;
- gr->compbit_store.sgt = sgt;
-
- return 0;
-
-err_alloc_sg_table:
- kfree(gr->compbit_store.pages);
- gr->compbit_store.pages = NULL;
-err_alloc_compbit_store:
- __free_pages(pages, order);
-err_alloc_pages:
- return err;
+ return gk20a_gmmu_alloc_attr(g, DMA_ATTR_FORCE_CONTIGUOUS,
+ compbit_backing_size,
+ &gr->compbit_store.mem);
}
static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
size_t compbit_backing_size)
{
- struct device *d = dev_from_gk20a(g);
struct gr_gk20a *gr = &g->gr;
- DEFINE_DMA_ATTRS(attrs);
- dma_addr_t iova;
- int err;
-
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-
- gr->compbit_store.pages =
- dma_alloc_attrs(d, compbit_backing_size, &iova,
- GFP_KERNEL, &attrs);
- if (!gr->compbit_store.pages) {
- gk20a_err(dev_from_gk20a(g), "failed to allocate backing store for compbit : size %zu",
- compbit_backing_size);
- return -ENOMEM;
- }
-
- gr->compbit_store.base_iova = iova;
- gr->compbit_store.size = compbit_backing_size;
- err = gk20a_get_sgtable_from_pages(d,
- &gr->compbit_store.sgt,
- gr->compbit_store.pages, iova,
- compbit_backing_size);
- if (err) {
- gk20a_err(dev_from_gk20a(g), "failed to allocate sgt for backing store");
- return err;
- }
- return 0;
+ return gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
+ compbit_backing_size,
+ &gr->compbit_store.mem);
}
static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
u32 compbit_base_post_divide;
u64 compbit_base_post_multiply64;
- u64 compbit_store_base_iova;
+ u64 compbit_store_iova;
u64 compbit_base_post_divide64;
if (tegra_platform_is_linsim())
- compbit_store_base_iova = gr->compbit_store.base_iova;
+ compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
else
- compbit_store_base_iova = gk20a_mm_smmu_vaddr_translate(g,
- gr->compbit_store.base_iova);
+ compbit_store_iova = gk20a_mm_iova_addr(g,
+ gr->compbit_store.mem.sgt->sgl);
- compbit_base_post_divide64 = compbit_store_base_iova >>
+ compbit_base_post_divide64 = compbit_store_iova >>
ltc_ltcs_ltss_cbc_base_alignment_shift_v();
do_div(compbit_base_post_divide64, g->ltc_count);
compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
g->ltc_count) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
- if (compbit_base_post_multiply64 < compbit_store_base_iova)
+ if (compbit_base_post_multiply64 < compbit_store_iova)
compbit_base_post_divide++;
/* Bug 1477079 indicates sw adjustment on the posted divided base. */
gk20a_dbg(gpu_dbg_info | gpu_dbg_map | gpu_dbg_pte,
"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
- (u32)(compbit_store_base_iova >> 32),
- (u32)(compbit_store_base_iova & 0xffffffff),
+ (u32)(compbit_store_iova >> 32),
+ (u32)(compbit_store_iova & 0xffffffff),
compbit_base_post_divide);
gr->compbit_store.base_hw = compbit_base_post_divide;