addr_hi = u64_hi32(c->userd_iova);
nvhost_dbg_info("channel %d : set ramfc userd 0x%16llx",
- c->hw_chid, c->userd_iova);
+ c->hw_chid, (u64)c->userd_iova);
mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
pbdma_userd_target_vid_mem_f() |
{
struct device *d = dev_from_gk20a(g);
int err = 0;
+ dma_addr_t iova;
nvhost_dbg_fn("");
ch->inst_block.size = ram_in_alloc_size_v();
ch->inst_block.cpuva = dma_alloc_coherent(d,
ch->inst_block.size,
- &ch->inst_block.iova,
+ &iova,
GFP_KERNEL);
if (!ch->inst_block.cpuva) {
nvhost_err(d, "%s: memory allocation failed\n", __func__);
goto clean_up;
}
+ ch->inst_block.iova = iova;
ch->inst_block.cpu_pa = gk20a_get_phys_from_iova(d,
ch->inst_block.iova);
if (!ch->inst_block.cpu_pa) {
}
nvhost_dbg_info("channel %d inst block physical addr: 0x%16llx",
- ch->hw_chid, ch->inst_block.cpu_pa);
+ ch->hw_chid, (u64)ch->inst_block.cpu_pa);
nvhost_dbg_fn("done");
return 0;
u32 i = 0, size;
int err = 0;
struct sg_table *sgt;
+ dma_addr_t iova;
/* Kernel can insert gpfifos before and after user gpfifos.
Before user gpfifos, kernel inserts fence_wait, which takes
c->gpfifo.entry_num * 2 * 10 * sizeof(u32) / 3);
q->mem.base_cpuva = dma_alloc_coherent(d, size,
- &q->mem.base_iova,
+ &iova,
GFP_KERNEL);
if (!q->mem.base_cpuva) {
nvhost_err(d, "%s: memory allocation failed\n", __func__);
goto clean_up;
}
+ q->mem.base_iova = iova;
q->mem.size = size;
err = gk20a_get_sgtable(d, &sgt,
u32 gpfifo_size;
int err = 0;
struct sg_table *sgt;
+ dma_addr_t iova;
/* Kernel can insert one extra gpfifo entry before user submitted gpfifos
and another one after, for internal usage. Triple the requested size. */
c->gpfifo.size = gpfifo_size * sizeof(struct gpfifo);
c->gpfifo.cpu_va = (struct gpfifo *)dma_alloc_coherent(d,
c->gpfifo.size,
- &c->gpfifo.iova,
+ &iova,
GFP_KERNEL);
if (!c->gpfifo.cpu_va) {
nvhost_err(d, "%s: memory allocation failed\n", __func__);
goto clean_up;
}
+ c->gpfifo.iova = iova;
c->gpfifo.entry_num = gpfifo_size;
c->gpfifo.get = c->gpfifo.put = 0;
runlist_size = ram_rl_entry_size_v() * f->num_channels;
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
+ dma_addr_t iova;
+
runlist->mem[i].cpuva =
dma_alloc_coherent(d,
runlist_size,
- &runlist->mem[i].iova,
+ &iova,
GFP_KERNEL);
if (!runlist->mem[i].cpuva) {
dev_err(d, "memory allocation failed\n");
goto clean_up_runlist;
}
+ runlist->mem[i].iova = iova;
runlist->mem[i].size = runlist_size;
}
mutex_init(&runlist->mutex);
struct fifo_gk20a *f = &g->fifo;
struct device *d = dev_from_gk20a(g);
int chid, i, err = 0;
+ dma_addr_t iova;
nvhost_dbg_fn("");
f->userd.cpuva = dma_alloc_coherent(d,
f->userd_total_size,
- &f->userd.iova,
+ &iova,
GFP_KERNEL);
if (!f->userd.cpuva) {
dev_err(d, "memory allocation failed\n");
goto clean_up;
}
+ f->userd.iova = iova;
err = gk20a_get_sgtable(d, &f->userd.sgt,
f->userd.cpuva, f->userd.iova,
f->userd_total_size);
u32 pde_addr_lo;
u32 pde_addr_hi;
u64 pde_addr;
+ dma_addr_t iova;
/* Alloc mem of inst block */
p_ucode_info->inst_blk_desc.size = ram_in_alloc_size_v();
p_ucode_info->inst_blk_desc.cpuva = dma_alloc_coherent(d,
p_ucode_info->inst_blk_desc.size,
- &p_ucode_info->inst_blk_desc.iova,
+ &iova,
GFP_KERNEL);
if (!p_ucode_info->inst_blk_desc.cpuva) {
nvhost_err(d, "failed to allocate memory\n");
return -ENOMEM;
}
+ p_ucode_info->inst_blk_desc.iova = iova;
p_ucode_info->inst_blk_desc.cpu_pa = gk20a_get_phys_from_iova(d,
p_ucode_info->inst_blk_desc.iova);
u8 *p_buf;
u32 ucode_size;
int err = 0;
+ dma_addr_t iova;
DEFINE_DMA_ATTRS(attrs);
fecs_fw = gk20a_request_firmware(g, GK20A_FECS_UCODE_IMAGE);
dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);
p_ucode_info->surface_desc.cpuva = dma_alloc_attrs(d,
p_ucode_info->surface_desc.size,
- &p_ucode_info->surface_desc.iova,
+ &iova,
GFP_KERNEL,
&attrs);
if (!p_ucode_info->surface_desc.cpuva) {
goto clean_up;
}
+ p_ucode_info->surface_desc.iova = iova;
err = gk20a_get_sgtable(d, &p_ucode_info->surface_desc.sgt,
p_ucode_info->surface_desc.cpuva,
p_ucode_info->surface_desc.iova,
struct sg_table *sgt;
DEFINE_DMA_ATTRS(attrs);
int err = 0;
+ dma_addr_t iova;
nvhost_dbg_fn("");
gr_ctx->size = gr->ctx_vars.buffer_total_size;
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
gr_ctx->pages = dma_alloc_attrs(d, gr_ctx->size,
- &gr_ctx->iova, GFP_KERNEL, &attrs);
+ &iova, GFP_KERNEL, &attrs);
if (!gr_ctx->pages)
return -ENOMEM;
+ gr_ctx->iova = iova;
err = gk20a_get_sgtable_from_pages(d, &sgt, gr_ctx->pages,
gr_ctx->iova, gr_ctx->size);
if (err)
DEFINE_DMA_ATTRS(attrs);
struct sg_table *sgt;
int err = 0;
+ dma_addr_t iova;
nvhost_dbg_fn("");
patch_ctx->size = 128 * sizeof(u32);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
patch_ctx->pages = dma_alloc_attrs(d, patch_ctx->size,
- &patch_ctx->iova, GFP_KERNEL,
+ &iova, GFP_KERNEL,
&attrs);
if (!patch_ctx->pages)
return -ENOMEM;
+ patch_ctx->iova = iova;
err = gk20a_get_sgtable_from_pages(d, &sgt, patch_ctx->pages,
patch_ctx->iova, patch_ctx->size);
if (err)
static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
{
struct device *d = dev_from_gk20a(g);
+ dma_addr_t iova;
gr->mmu_wr_mem_size = gr->mmu_rd_mem_size = 0x1000;
gr->mmu_wr_mem.size = gr->mmu_wr_mem_size;
gr->mmu_wr_mem.cpuva = dma_zalloc_coherent(d, gr->mmu_wr_mem_size,
- &gr->mmu_wr_mem.iova, GFP_KERNEL);
+ &iova, GFP_KERNEL);
if (!gr->mmu_wr_mem.cpuva)
goto err;
+ gr->mmu_wr_mem.iova = iova;
+
gr->mmu_rd_mem.size = gr->mmu_rd_mem_size;
gr->mmu_rd_mem.cpuva = dma_zalloc_coherent(d, gr->mmu_rd_mem_size,
- &gr->mmu_rd_mem.iova, GFP_KERNEL);
+ &iova, GFP_KERNEL);
if (!gr->mmu_rd_mem.cpuva)
goto err_free_wr_mem;
+
+ gr->mmu_rd_mem.iova = iova;
return 0;
err_free_wr_mem:
struct gk20a *g = mm->g;
u32 pgsz = gmmu_page_sizes[pgsz_idx];
u32 i;
+ dma_addr_t iova;
/* allocate the zero page if the va does not already have one */
if (!vm->zero_page_cpuva) {
int err = 0;
vm->zero_page_cpuva = dma_alloc_coherent(&g->dev->dev,
mm->big_page_size,
- &vm->zero_page_iova,
+ &iova,
GFP_KERNEL);
if (!vm->zero_page_cpuva) {
dev_err(&g->dev->dev, "failed to allocate zero page\n");
return -ENOMEM;
}
+ vm->zero_page_iova = iova;
err = gk20a_get_sgtable(&g->dev->dev, &vm->zero_page_sgt,
vm->zero_page_cpuva, vm->zero_page_iova,
mm->big_page_size);
u64 pde_addr;
u32 pde_addr_lo;
u32 pde_addr_hi;
+ dma_addr_t iova;
vm->mm = mm;
/* allocate instance mem for bar1 */
inst_block->size = ram_in_alloc_size_v();
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
- &inst_block->iova, GFP_KERNEL);
+ &iova, GFP_KERNEL);
if (!inst_block->cpuva) {
nvhost_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
+ inst_block->iova = iova;
inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
if (!inst_block->cpu_pa) {
nvhost_err(d, "%s: failed to get phys address\n", __func__);
u64 pde_addr;
u32 pde_addr_lo;
u32 pde_addr_hi;
+ dma_addr_t iova;
vm->mm = mm;
/* allocate instance mem for pmu */
inst_block->size = GK20A_PMU_INST_SIZE;
inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
- &inst_block->iova, GFP_KERNEL);
+ &iova, GFP_KERNEL);
if (!inst_block->cpuva) {
nvhost_err(d, "%s: memory allocation failed\n", __func__);
err = -ENOMEM;
goto clean_up;
}
+ inst_block->iova = iova;
inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
if (!inst_block->cpu_pa) {
nvhost_err(d, "%s: failed to get phys address\n", __func__);
struct sg_table *sgt_pmu_ucode;
struct sg_table *sgt_seq_buf;
DEFINE_DMA_ATTRS(attrs);
+ dma_addr_t iova;
nvhost_dbg_fn("");
dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);
pmu->ucode.cpuva = dma_alloc_attrs(d, GK20A_PMU_UCODE_SIZE_MAX,
- &pmu->ucode.iova,
+ &iova,
GFP_KERNEL,
&attrs);
if (!pmu->ucode.cpuva) {
goto err_release_fw;
}
+ pmu->ucode.iova = iova;
pmu->seq_buf.cpuva = dma_alloc_coherent(d, GK20A_PMU_SEQ_BUF_SIZE,
- &pmu->seq_buf.iova,
+ &iova,
GFP_KERNEL);
if (!pmu->seq_buf.cpuva) {
nvhost_err(d, "failed to allocate memory\n");
goto err_free_pmu_ucode;
}
+ pmu->seq_buf.iova = iova;
init_waitqueue_head(&pmu->pg_wq);
err = gk20a_get_sgtable(d, &sgt_pmu_ucode,
bool status;
u32 size;
struct sg_table *sgt_pg_buf;
+ dma_addr_t iova;
nvhost_dbg_fn("");
if (!pmu->sw_ready) {
pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
- &pmu->pg_buf.iova,
+ &iova,
GFP_KERNEL);
if (!pmu->pg_buf.cpuva) {
nvhost_err(d, "failed to allocate memory\n");
goto err;
}
+ pmu->pg_buf.iova = iova;
pmu->pg_buf.size = size;
err = gk20a_get_sgtable(d, &sgt_pg_buf,