]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: Fix dma_addr_t vs u64 errors
authorTerje Bergstrom <tbergstrom@nvidia.com>
Mon, 3 Feb 2014 17:53:55 +0000 (19:53 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Wed, 5 Feb 2014 06:32:01 +0000 (22:32 -0800)
dma_alloc_*() require a dma addr_t pointer, but we always give it
u64.

Change-Id: If48220b4e34dd91bd92171f2e57934d0b6dd611d
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/362951

drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/pmu_gk20a.c

index 849a12d7a586b7df9282462ff5f78581acc590ac..9a5a938bfa864c5b531973d52c358bffd8dadd46 100644 (file)
@@ -151,7 +151,7 @@ static int channel_gk20a_commit_userd(struct channel_gk20a *c)
        addr_hi = u64_hi32(c->userd_iova);
 
        nvhost_dbg_info("channel %d : set ramfc userd 0x%16llx",
-               c->hw_chid, c->userd_iova);
+               c->hw_chid, (u64)c->userd_iova);
 
        mem_wr32(inst_ptr, ram_in_ramfc_w() + ram_fc_userd_w(),
                 pbdma_userd_target_vid_mem_f() |
@@ -360,13 +360,14 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
 {
        struct device *d = dev_from_gk20a(g);
        int err = 0;
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
        ch->inst_block.size = ram_in_alloc_size_v();
        ch->inst_block.cpuva = dma_alloc_coherent(d,
                                        ch->inst_block.size,
-                                       &ch->inst_block.iova,
+                                       &iova,
                                        GFP_KERNEL);
        if (!ch->inst_block.cpuva) {
                nvhost_err(d, "%s: memory allocation failed\n", __func__);
@@ -374,6 +375,7 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
                goto clean_up;
        }
 
+       ch->inst_block.iova = iova;
        ch->inst_block.cpu_pa = gk20a_get_phys_from_iova(d,
                                                        ch->inst_block.iova);
        if (!ch->inst_block.cpu_pa) {
@@ -383,7 +385,7 @@ static int channel_gk20a_alloc_inst(struct gk20a *g,
        }
 
        nvhost_dbg_info("channel %d inst block physical addr: 0x%16llx",
-               ch->hw_chid, ch->inst_block.cpu_pa);
+               ch->hw_chid, (u64)ch->inst_block.cpu_pa);
 
        nvhost_dbg_fn("done");
        return 0;
@@ -872,6 +874,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        u32 i = 0, size;
        int err = 0;
        struct sg_table *sgt;
+       dma_addr_t iova;
 
        /* Kernel can insert gpfifos before and after user gpfifos.
           Before user gpfifos, kernel inserts fence_wait, which takes
@@ -886,7 +889,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
                c->gpfifo.entry_num * 2 * 10 * sizeof(u32) / 3);
 
        q->mem.base_cpuva = dma_alloc_coherent(d, size,
-                                       &q->mem.base_iova,
+                                       &iova,
                                        GFP_KERNEL);
        if (!q->mem.base_cpuva) {
                nvhost_err(d, "%s: memory allocation failed\n", __func__);
@@ -894,6 +897,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
                goto clean_up;
        }
 
+       q->mem.base_iova = iova;
        q->mem.size = size;
 
        err = gk20a_get_sgtable(d, &sgt,
@@ -1148,6 +1152,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
        u32 gpfifo_size;
        int err = 0;
        struct sg_table *sgt;
+       dma_addr_t iova;
 
        /* Kernel can insert one extra gpfifo entry before user submitted gpfifos
           and another one after, for internal usage. Triple the requested size. */
@@ -1184,7 +1189,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
        c->gpfifo.size = gpfifo_size * sizeof(struct gpfifo);
        c->gpfifo.cpu_va = (struct gpfifo *)dma_alloc_coherent(d,
                                                c->gpfifo.size,
-                                               &c->gpfifo.iova,
+                                               &iova,
                                                GFP_KERNEL);
        if (!c->gpfifo.cpu_va) {
                nvhost_err(d, "%s: memory allocation failed\n", __func__);
@@ -1192,6 +1197,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
                goto clean_up;
        }
 
+       c->gpfifo.iova = iova;
        c->gpfifo.entry_num = gpfifo_size;
 
        c->gpfifo.get = c->gpfifo.put = 0;
index 691750b2d4f16444b36dd6e72026b0d803a4c261..5810507ab85fb68e8a98a3945b33b2618dcd7d33 100644 (file)
@@ -318,15 +318,18 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
 
        runlist_size  = ram_rl_entry_size_v() * f->num_channels;
        for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
+               dma_addr_t iova;
+
                runlist->mem[i].cpuva =
                        dma_alloc_coherent(d,
                                        runlist_size,
-                                       &runlist->mem[i].iova,
+                                       &iova,
                                        GFP_KERNEL);
                if (!runlist->mem[i].cpuva) {
                        dev_err(d, "memory allocation failed\n");
                        goto clean_up_runlist;
                }
+               runlist->mem[i].iova = iova;
                runlist->mem[i].size = runlist_size;
        }
        mutex_init(&runlist->mutex);
@@ -475,6 +478,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
        struct fifo_gk20a *f = &g->fifo;
        struct device *d = dev_from_gk20a(g);
        int chid, i, err = 0;
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
@@ -499,13 +503,14 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
 
        f->userd.cpuva = dma_alloc_coherent(d,
                                        f->userd_total_size,
-                                       &f->userd.iova,
+                                       &iova,
                                        GFP_KERNEL);
        if (!f->userd.cpuva) {
                dev_err(d, "memory allocation failed\n");
                goto clean_up;
        }
 
+       f->userd.iova = iova;
        err = gk20a_get_sgtable(d, &f->userd.sgt,
                                f->userd.cpuva, f->userd.iova,
                                f->userd_total_size);
index 14f49232b6c4d58a78a571b20896ad7c4faccf04..52827337d1599da62d69077008d95f6c6a783f80 100644 (file)
@@ -1721,18 +1721,20 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
        u32 pde_addr_lo;
        u32 pde_addr_hi;
        u64 pde_addr;
+       dma_addr_t iova;
 
        /* Alloc mem of inst block */
        p_ucode_info->inst_blk_desc.size = ram_in_alloc_size_v();
        p_ucode_info->inst_blk_desc.cpuva = dma_alloc_coherent(d,
                                        p_ucode_info->inst_blk_desc.size,
-                                       &p_ucode_info->inst_blk_desc.iova,
+                                       &iova,
                                        GFP_KERNEL);
        if (!p_ucode_info->inst_blk_desc.cpuva) {
                nvhost_err(d, "failed to allocate memory\n");
                return -ENOMEM;
        }
 
+       p_ucode_info->inst_blk_desc.iova = iova;
        p_ucode_info->inst_blk_desc.cpu_pa = gk20a_get_phys_from_iova(d,
                                        p_ucode_info->inst_blk_desc.iova);
 
@@ -1816,6 +1818,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
        u8 *p_buf;
        u32 ucode_size;
        int err = 0;
+       dma_addr_t iova;
        DEFINE_DMA_ATTRS(attrs);
 
        fecs_fw = gk20a_request_firmware(g, GK20A_FECS_UCODE_IMAGE);
@@ -1853,7 +1856,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
        dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);
        p_ucode_info->surface_desc.cpuva = dma_alloc_attrs(d,
                                        p_ucode_info->surface_desc.size,
-                                       &p_ucode_info->surface_desc.iova,
+                                       &iova,
                                        GFP_KERNEL,
                                        &attrs);
        if (!p_ucode_info->surface_desc.cpuva) {
@@ -1862,6 +1865,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
                goto clean_up;
        }
 
+       p_ucode_info->surface_desc.iova = iova;
        err = gk20a_get_sgtable(d, &p_ucode_info->surface_desc.sgt,
                                p_ucode_info->surface_desc.cpuva,
                                p_ucode_info->surface_desc.iova,
@@ -2430,6 +2434,7 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
        struct sg_table *sgt;
        DEFINE_DMA_ATTRS(attrs);
        int err = 0;
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
@@ -2443,10 +2448,11 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
        gr_ctx->size = gr->ctx_vars.buffer_total_size;
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
        gr_ctx->pages = dma_alloc_attrs(d, gr_ctx->size,
-                               &gr_ctx->iova, GFP_KERNEL, &attrs);
+                               &iova, GFP_KERNEL, &attrs);
        if (!gr_ctx->pages)
                return -ENOMEM;
 
+       gr_ctx->iova = iova;
        err = gk20a_get_sgtable_from_pages(d, &sgt, gr_ctx->pages,
                        gr_ctx->iova, gr_ctx->size);
        if (err)
@@ -2501,17 +2507,19 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
        DEFINE_DMA_ATTRS(attrs);
        struct sg_table *sgt;
        int err = 0;
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
        patch_ctx->size = 128 * sizeof(u32);
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
        patch_ctx->pages = dma_alloc_attrs(d, patch_ctx->size,
-                               &patch_ctx->iova, GFP_KERNEL,
+                               &iova, GFP_KERNEL,
                                &attrs);
        if (!patch_ctx->pages)
                return -ENOMEM;
 
+       patch_ctx->iova = iova;
        err = gk20a_get_sgtable_from_pages(d, &sgt, patch_ctx->pages,
                        patch_ctx->iova, patch_ctx->size);
        if (err)
@@ -2978,20 +2986,25 @@ clean_up:
 static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr)
 {
        struct device *d = dev_from_gk20a(g);
+       dma_addr_t iova;
 
        gr->mmu_wr_mem_size = gr->mmu_rd_mem_size = 0x1000;
 
        gr->mmu_wr_mem.size = gr->mmu_wr_mem_size;
        gr->mmu_wr_mem.cpuva = dma_zalloc_coherent(d, gr->mmu_wr_mem_size,
-                                       &gr->mmu_wr_mem.iova, GFP_KERNEL);
+                                       &iova, GFP_KERNEL);
        if (!gr->mmu_wr_mem.cpuva)
                goto err;
 
+       gr->mmu_wr_mem.iova = iova;
+
        gr->mmu_rd_mem.size = gr->mmu_rd_mem_size;
        gr->mmu_rd_mem.cpuva = dma_zalloc_coherent(d, gr->mmu_rd_mem_size,
-                                       &gr->mmu_rd_mem.iova, GFP_KERNEL);
+                                       &iova, GFP_KERNEL);
        if (!gr->mmu_rd_mem.cpuva)
                goto err_free_wr_mem;
+
+       gr->mmu_rd_mem.iova = iova;
        return 0;
 
  err_free_wr_mem:
index 98a00eee2903929e4774476560ec6e372d5346f3..40d87e8b3d1b17e5e29018a7e0731cb9157a89ac 100644 (file)
@@ -1755,19 +1755,21 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
        struct gk20a *g = mm->g;
        u32 pgsz = gmmu_page_sizes[pgsz_idx];
        u32 i;
+       dma_addr_t iova;
 
        /* allocate the zero page if the va does not already have one */
        if (!vm->zero_page_cpuva) {
                int err = 0;
                vm->zero_page_cpuva = dma_alloc_coherent(&g->dev->dev,
                                                         mm->big_page_size,
-                                                        &vm->zero_page_iova,
+                                                        &iova,
                                                         GFP_KERNEL);
                if (!vm->zero_page_cpuva) {
                        dev_err(&g->dev->dev, "failed to allocate zero page\n");
                        return -ENOMEM;
                }
 
+               vm->zero_page_iova = iova;
                err = gk20a_get_sgtable(&g->dev->dev, &vm->zero_page_sgt,
                                        vm->zero_page_cpuva, vm->zero_page_iova,
                                        mm->big_page_size);
@@ -2316,6 +2318,7 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        u64 pde_addr;
        u32 pde_addr_lo;
        u32 pde_addr_hi;
+       dma_addr_t iova;
 
        vm->mm = mm;
 
@@ -2380,13 +2383,14 @@ int gk20a_init_bar1_vm(struct mm_gk20a *mm)
        /* allocate instance mem for bar1 */
        inst_block->size = ram_in_alloc_size_v();
        inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
-                               &inst_block->iova, GFP_KERNEL);
+                               &iova, GFP_KERNEL);
        if (!inst_block->cpuva) {
                nvhost_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
 
+       inst_block->iova = iova;
        inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
        if (!inst_block->cpu_pa) {
                nvhost_err(d, "%s: failed to get phys address\n", __func__);
@@ -2458,6 +2462,7 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        u64 pde_addr;
        u32 pde_addr_lo;
        u32 pde_addr_hi;
+       dma_addr_t iova;
 
        vm->mm = mm;
 
@@ -2520,13 +2525,14 @@ int gk20a_init_pmu_vm(struct mm_gk20a *mm)
        /* allocate instance mem for pmu */
        inst_block->size = GK20A_PMU_INST_SIZE;
        inst_block->cpuva = dma_alloc_coherent(d, inst_block->size,
-                               &inst_block->iova, GFP_KERNEL);
+                               &iova, GFP_KERNEL);
        if (!inst_block->cpuva) {
                nvhost_err(d, "%s: memory allocation failed\n", __func__);
                err = -ENOMEM;
                goto clean_up;
        }
 
+       inst_block->iova = iova;
        inst_block->cpu_pa = gk20a_get_phys_from_iova(d, inst_block->iova);
        if (!inst_block->cpu_pa) {
                nvhost_err(d, "%s: failed to get phys address\n", __func__);
index 8ac138fc9bc6f4a7bc2209b2e82aace5b69ba5ca..8938fc98a340bfceabea5d89057aff10fd39979d 100644 (file)
@@ -976,6 +976,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        struct sg_table *sgt_pmu_ucode;
        struct sg_table *sgt_seq_buf;
        DEFINE_DMA_ATTRS(attrs);
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
@@ -1039,7 +1040,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
 
        dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);
        pmu->ucode.cpuva = dma_alloc_attrs(d, GK20A_PMU_UCODE_SIZE_MAX,
-                                       &pmu->ucode.iova,
+                                       &iova,
                                        GFP_KERNEL,
                                        &attrs);
        if (!pmu->ucode.cpuva) {
@@ -1048,8 +1049,9 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                goto err_release_fw;
        }
 
+       pmu->ucode.iova = iova;
        pmu->seq_buf.cpuva = dma_alloc_coherent(d, GK20A_PMU_SEQ_BUF_SIZE,
-                                       &pmu->seq_buf.iova,
+                                       &iova,
                                        GFP_KERNEL);
        if (!pmu->seq_buf.cpuva) {
                nvhost_err(d, "failed to allocate memory\n");
@@ -1057,6 +1059,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
                goto err_free_pmu_ucode;
        }
 
+       pmu->seq_buf.iova = iova;
        init_waitqueue_head(&pmu->pg_wq);
 
        err = gk20a_get_sgtable(d, &sgt_pmu_ucode,
@@ -1241,6 +1244,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
        bool status;
        u32 size;
        struct sg_table *sgt_pg_buf;
+       dma_addr_t iova;
 
        nvhost_dbg_fn("");
 
@@ -1257,7 +1261,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
 
        if (!pmu->sw_ready) {
                pmu->pg_buf.cpuva = dma_alloc_coherent(d, size,
-                                               &pmu->pg_buf.iova,
+                                               &iova,
                                                GFP_KERNEL);
                if (!pmu->pg_buf.cpuva) {
                        nvhost_err(d, "failed to allocate memory\n");
@@ -1265,6 +1269,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                        goto err;
                }
 
+               pmu->pg_buf.iova = iova;
                pmu->pg_buf.size = size;
 
                err = gk20a_get_sgtable(d, &sgt_pg_buf,