]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
gpu: nvgpu: Use common allocator for context
authorTerje Bergstrom <tbergstrom@nvidia.com>
Fri, 20 Mar 2015 19:59:09 +0000 (12:59 -0700)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Thu, 2 Apr 2015 20:38:27 +0000 (13:38 -0700)
Reduce amount of duplicate code around memory allocation by using
common helpers, and common data structure for storing results of
allocations.

Bug 1605769

Change-Id: I10c226e2377aa867a5cf11be61d08a9d67206b1d
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/720507

drivers/gpu/nvgpu/gk20a/gr_gk20a.c
drivers/gpu/nvgpu/gk20a/mm_gk20a.c
drivers/gpu/nvgpu/gk20a/mm_gk20a.h
drivers/gpu/nvgpu/gk20a/platform_gk20a_tegra.c
drivers/gpu/nvgpu/gm20b/gr_gm20b.c
drivers/gpu/nvgpu/vgpu/gr_vgpu.c

index b08cf5f85332fc262140b38c7c1410741bfd8e88..fa7067a3cfcda3b0c337412780c8ed0d93367bce 100644 (file)
@@ -713,8 +713,8 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c,
 
        gk20a_dbg_fn("");
 
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr)
                return -ENOMEM;
@@ -857,7 +857,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
                (u64_hi32(ch_ctx->global_ctx_buffer_va[PAGEPOOL_VA]) <<
                 (32 - gr_scc_pagepool_base_addr_39_8_align_bits_v()));
 
-       size = gr->global_ctx_buffer[PAGEPOOL].size /
+       size = gr->global_ctx_buffer[PAGEPOOL].mem.size /
                gr_scc_pagepool_total_pages_byte_granularity_v();
 
        if (size == g->ops.gr.pagepool_default_size(g))
@@ -1490,14 +1490,14 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
        if (err)
                goto clean_up;
 
-       gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages,
-                       PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >>
+       gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].mem.pages,
+                       PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].mem.size) >>
                        PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!gold_ptr)
                goto clean_up;
 
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr)
                goto clean_up;
@@ -1536,7 +1536,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
                                gk20a_mem_rd32(gold_ptr, i);
        }
 
-       gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+       gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
 
        gr->ctx_vars.golden_image_initialized = true;
 
@@ -1570,8 +1570,8 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
           Flush and invalidate before cpu update. */
        g->ops.mm.l2_flush(g, true);
 
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr)
                return -ENOMEM;
@@ -1610,8 +1610,8 @@ int gr_gk20a_load_golden_ctx_image(struct gk20a *g,
           Flush and invalidate before cpu update. */
        g->ops.mm.l2_flush(g, true);
 
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr)
                return -ENOMEM;
@@ -2207,50 +2207,26 @@ int gr_gk20a_init_ctx_state(struct gk20a *g)
        return 0;
 }
 
-static void gk20a_gr_destroy_ctx_buffer(struct platform_device *pdev,
+static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g,
                                        struct gr_ctx_buffer_desc *desc)
 {
-       struct device *dev = &pdev->dev;
        if (!desc)
                return;
-       if (desc->sgt) {
-               gk20a_free_sgtable(&desc->sgt);
-               desc->sgt = NULL;
-       }
-       if (desc->pages) {
-               dma_free_attrs(dev, desc->size, desc->pages,
-                      desc->iova, &desc->attrs);
-               desc->pages = NULL;
-       }
+       gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &desc->mem);
 }
 
-static int gk20a_gr_alloc_ctx_buffer(struct platform_device *pdev,
+static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g,
                                     struct gr_ctx_buffer_desc *desc,
                                     size_t size)
 {
-       struct device *dev = &pdev->dev;
-       DEFINE_DMA_ATTRS(attrs);
-       dma_addr_t iova;
        int err = 0;
 
-       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-
-       desc->pages = dma_alloc_attrs(&pdev->dev, size, &iova,
-                                     GFP_KERNEL, &attrs);
-       if (!desc->pages)
-               return -ENOMEM;
+       err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
+                                   size, &desc->mem);
+       if (err)
+               return err;
 
-       desc->iova = iova;
-       desc->size = size;
-       desc->attrs = attrs;
        desc->destroy = gk20a_gr_destroy_ctx_buffer;
-       err = gk20a_get_sgtable_from_pages(&pdev->dev, &desc->sgt, desc->pages,
-                                          desc->iova, desc->size);
-       if (err) {
-               dma_free_attrs(dev, desc->size, desc->pages,
-                              desc->iova, &desc->attrs);
-               memset(desc, 0, sizeof(*desc));
-       }
 
        return err;
 }
@@ -2274,7 +2250,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
        gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
 
-       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR],
+       err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[CIRCULAR],
                                        cb_buffer_size);
        if (err)
                goto clean_up;
@@ -2286,7 +2262,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
        gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
 
-       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL],
+       err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[PAGEPOOL],
                                        pagepool_buffer_size);
        if (err)
                goto clean_up;
@@ -2298,7 +2274,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
        gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
 
-       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE],
+       err = gk20a_gr_alloc_ctx_buffer(g, &gr->global_ctx_buffer[ATTRIBUTE],
                                        attr_buffer_size);
        if (err)
                goto clean_up;
@@ -2314,7 +2290,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        gk20a_dbg_info("golden_image_size : %d",
                   gr->ctx_vars.golden_image_size);
 
-       err = gk20a_gr_alloc_ctx_buffer(pdev,
+       err = gk20a_gr_alloc_ctx_buffer(g,
                                        &gr->global_ctx_buffer[GOLDEN_CTX],
                                        gr->ctx_vars.golden_image_size);
        if (err)
@@ -2323,7 +2299,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        gk20a_dbg_info("priv_access_map_size : %d",
                   gr->ctx_vars.priv_access_map_size);
 
-       err = gk20a_gr_alloc_ctx_buffer(pdev,
+       err = gk20a_gr_alloc_ctx_buffer(g,
                                        &gr->global_ctx_buffer[PRIV_ACCESS_MAP],
                                        gr->ctx_vars.priv_access_map_size);
 
@@ -2337,7 +2313,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        gk20a_err(dev_from_gk20a(g), "fail");
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
                if (gr->global_ctx_buffer[i].destroy) {
-                       gr->global_ctx_buffer[i].destroy(pdev,
+                       gr->global_ctx_buffer[i].destroy(g,
                                        &gr->global_ctx_buffer[i]);
                }
        }
@@ -2346,7 +2322,6 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
 static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
 {
-       struct platform_device *pdev = g->dev;
        struct gr_gk20a *gr = &g->gr;
        DEFINE_DMA_ATTRS(attrs);
        u32 i;
@@ -2354,7 +2329,7 @@ static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
 
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
-               gr->global_ctx_buffer[i].destroy(pdev,
+               gr->global_ctx_buffer[i].destroy(g,
                                &gr->global_ctx_buffer[i]);
        }
 
@@ -2375,12 +2350,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        gk20a_dbg_fn("");
 
        /* Circular Buffer */
-       if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) {
-               sgt = gr->global_ctx_buffer[CIRCULAR].sgt;
-               size = gr->global_ctx_buffer[CIRCULAR].size;
+       if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[CIRCULAR].mem.sgt;
+               size = gr->global_ctx_buffer[CIRCULAR].mem.size;
        } else {
-               sgt = gr->global_ctx_buffer[CIRCULAR_VPR].sgt;
-               size = gr->global_ctx_buffer[CIRCULAR_VPR].size;
+               sgt = gr->global_ctx_buffer[CIRCULAR_VPR].mem.sgt;
+               size = gr->global_ctx_buffer[CIRCULAR_VPR].mem.size;
        }
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2392,12 +2367,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        g_bfr_size[CIRCULAR_VA] = size;
 
        /* Attribute Buffer */
-       if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt == NULL)) {
-               sgt = gr->global_ctx_buffer[ATTRIBUTE].sgt;
-               size = gr->global_ctx_buffer[ATTRIBUTE].size;
+       if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[ATTRIBUTE].mem.sgt;
+               size = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
        } else {
-               sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt;
-               size = gr->global_ctx_buffer[ATTRIBUTE_VPR].size;
+               sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.sgt;
+               size = gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.size;
        }
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2409,12 +2384,12 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        g_bfr_size[ATTRIBUTE_VA] = size;
 
        /* Page Pool */
-       if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].sgt == NULL)) {
-               sgt = gr->global_ctx_buffer[PAGEPOOL].sgt;
-               size = gr->global_ctx_buffer[PAGEPOOL].size;
+       if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[PAGEPOOL].mem.sgt;
+               size = gr->global_ctx_buffer[PAGEPOOL].mem.size;
        } else {
-               sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].sgt;
-               size = gr->global_ctx_buffer[PAGEPOOL_VPR].size;
+               sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.sgt;
+               size = gr->global_ctx_buffer[PAGEPOOL_VPR].mem.size;
        }
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
@@ -2426,8 +2401,8 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        g_bfr_size[PAGEPOOL_VA] = size;
 
        /* Golden Image */
-       sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt;
-       size = gr->global_ctx_buffer[GOLDEN_CTX].size;
+       sgt = gr->global_ctx_buffer[GOLDEN_CTX].mem.sgt;
+       size = gr->global_ctx_buffer[GOLDEN_CTX].mem.size;
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
                                gk20a_mem_flag_none);
        if (!gpu_va)
@@ -2436,8 +2411,8 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        g_bfr_size[GOLDEN_CTX_VA] = size;
 
        /* Priv register Access Map */
-       sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].sgt;
-       size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].size;
+       sgt = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.sgt;
+       size = gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
                                gk20a_mem_flag_none);
        if (!gpu_va)
@@ -2452,7 +2427,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
                if (g_bfr_va[i]) {
                        gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
-                                        gr->global_ctx_buffer[i].size,
+                                        gr->global_ctx_buffer[i].mem.size,
                                         gk20a_mem_flag_none);
                        g_bfr_va[i] = 0;
                }
@@ -2488,11 +2463,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
 {
        struct gr_ctx_desc *gr_ctx = NULL;
        struct gr_gk20a *gr = &g->gr;
-       struct device *d = dev_from_gk20a(g);
-       struct sg_table *sgt;
-       DEFINE_DMA_ATTRS(attrs);
        int err = 0;
-       dma_addr_t iova;
 
        gk20a_dbg_fn("");
 
@@ -2507,40 +2478,24 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
        if (!gr_ctx)
                return -ENOMEM;
 
-       gr_ctx->size = gr->ctx_vars.buffer_total_size;
-       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-       gr_ctx->pages = dma_alloc_attrs(d, gr_ctx->size,
-                               &iova, GFP_KERNEL, &attrs);
-       if (!gr_ctx->pages) {
-               err = -ENOMEM;
-               goto err_free_ctx;
-       }
-
-       gr_ctx->iova = iova;
-       err = gk20a_get_sgtable_from_pages(d, &sgt, gr_ctx->pages,
-                       gr_ctx->iova, gr_ctx->size);
+       err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
+                                       gr->ctx_vars.buffer_total_size,
+                                       &gr_ctx->mem);
        if (err)
-               goto err_free;
-
-       gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size,
-                               NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                               gk20a_mem_flag_none);
-       if (!gr_ctx->gpu_va)
-               goto err_free_sgt;
+               goto err_free_ctx;
 
-       gk20a_free_sgtable(&sgt);
+       gr_ctx->mem.gpu_va = gk20a_gmmu_map(vm, &gr_ctx->mem.sgt, gr_ctx->mem.size,
+                                       NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+                                       gk20a_mem_flag_none);
+       if (!gr_ctx->mem.gpu_va)
+               goto err_free_mem;
 
        *__gr_ctx = gr_ctx;
 
        return 0;
 
- err_free_sgt:
-       gk20a_free_sgtable(&sgt);
- err_free:
-       dma_free_attrs(d, gr_ctx->size,
-               gr_ctx->pages, gr_ctx->iova, &attrs);
-       gr_ctx->pages = NULL;
-       gr_ctx->iova = 0;
+ err_free_mem:
+       gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
  err_free_ctx:
        kfree(gr_ctx);
        gr_ctx = NULL;
@@ -2582,21 +2537,14 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
 void gr_gk20a_free_gr_ctx(struct gk20a *g,
                          struct vm_gk20a *vm, struct gr_ctx_desc *gr_ctx)
 {
-       struct device *d = dev_from_gk20a(g);
-       DEFINE_DMA_ATTRS(attrs);
-
        gk20a_dbg_fn("");
 
-       if (!gr_ctx || !gr_ctx->gpu_va)
+       if (!gr_ctx || !gr_ctx->mem.gpu_va)
                return;
 
-       gk20a_gmmu_unmap(vm, gr_ctx->gpu_va,
-                       gr_ctx->size, gk20a_mem_flag_none);
-       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
-       dma_free_attrs(d, gr_ctx->size,
-               gr_ctx->pages, gr_ctx->iova, &attrs);
-       gr_ctx->pages = NULL;
-       gr_ctx->iova = 0;
+       gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va,
+               gr_ctx->mem.size, gk20a_mem_flag_none);
+       gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, &gr_ctx->mem);
        kfree(gr_ctx);
 }
 
@@ -2801,7 +2749,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a  *c,
        }
 
        /* commit gr ctx buffer */
-       err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+       err = gr_gk20a_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
        if (err) {
                gk20a_err(dev_from_gk20a(g),
                        "fail to commit gr ctx buffer");
@@ -4449,8 +4397,8 @@ static int gr_gk20a_init_access_map(struct gk20a *g)
                DIV_ROUND_UP(gr->ctx_vars.priv_access_map_size,
                             PAGE_SIZE);
 
-       data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].pages,
-                   PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].size) >>
+       data = vmap(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.pages,
+                   PAGE_ALIGN(gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size) >>
                    PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!data) {
                gk20a_err(dev_from_gk20a(g),
@@ -6851,8 +6799,8 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
 
        /* would have been a variant of gr_gk20a_apply_instmem_overrides */
        /* recoded in-place instead.*/
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr) {
                err = -ENOMEM;
index 2862a0b5c347485713e3160e62841b454e0697f4..81ccbc01c22fcf7264bdf157fcc7f367c6835e73 100644 (file)
@@ -1581,7 +1581,7 @@ void gk20a_gmmu_free_attr(struct gk20a *g, enum dma_attr attr,
 {
        struct device *d = dev_from_gk20a(g);
 
-       if (mem->cpu_va) {
+       if (mem->cpu_va || mem->pages) {
                if (attr) {
                        DEFINE_DMA_ATTRS(attrs);
                        dma_set_attr(attr, &attrs);
index b510b472d2575a61e4362192cd9ab87de9a0aa87..9165953710f3071d61e8d62341b3f5262ad180f3 100644 (file)
@@ -82,15 +82,10 @@ struct zcull_ctx_desc {
        u32 ctx_sw_mode;
 };
 
-struct gr_ctx_buffer_desc;
-struct platform_device;
+struct gk20a;
 struct gr_ctx_buffer_desc {
-       void (*destroy)(struct platform_device *, struct gr_ctx_buffer_desc *);
-       struct sg_table *sgt;
-       struct page **pages;
-       size_t size;
-       u64 iova;
-       struct dma_attrs attrs;
+       void (*destroy)(struct gk20a *, struct gr_ctx_buffer_desc *);
+       struct mem_desc mem;
        void *priv;
 };
 
@@ -99,10 +94,8 @@ struct gr_ctx_buffer_desc {
 #endif
 
 struct gr_ctx_desc {
-       struct page **pages;
-       u64 iova;
-       size_t size;
-       u64 gpu_va;
+       struct mem_desc mem;
+
        int preempt_mode;
 #ifdef CONFIG_ARCH_TEGRA_18x_SOC
        struct gr_ctx_desc_t18x t18x;
index fea2c774a93a5c6fdd8db430fa59bf7d87c2e6d0..126f963336b58743e72248d0d1bcaa3f5327aead 100644 (file)
@@ -80,19 +80,18 @@ static int gk20a_tegra_secure_page_alloc(struct platform_device *pdev)
        return 0;
 }
 
-static void gk20a_tegra_secure_destroy(struct platform_device *pdev,
+static void gk20a_tegra_secure_destroy(struct gk20a *g,
                                       struct gr_ctx_buffer_desc *desc)
 {
-       if (desc->sgt) {
-               gk20a_free_sgtable(&desc->sgt);
-               desc->sgt = NULL;
-       }
+       DEFINE_DMA_ATTRS(attrs);
 
-       if (desc->iova) {
-               dma_free_attrs(&tegra_vpr_dev, desc->size,
-                       (void *)(uintptr_t)desc->iova,
-                       desc->iova, &desc->attrs);
-               desc->iova = 0;
+       if (desc->mem.sgt) {
+               phys_addr_t pa = sg_phys(desc->mem.sgt->sgl);
+               dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
+                       (void *)(uintptr_t)pa,
+                       pa, &attrs);
+               gk20a_free_sgtable(&desc->mem.sgt);
+               desc->mem.sgt = NULL;
        }
 }
 
@@ -116,9 +115,7 @@ static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
        if (dma_mapping_error(&tegra_vpr_dev, iova))
                return -ENOMEM;
 
-       desc->iova = iova;
-       desc->size = size;
-       desc->attrs = attrs;
+       desc->mem.size = size;
        desc->destroy = gk20a_tegra_secure_destroy;
 
        sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
@@ -136,16 +133,15 @@ static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
        /* This bypasses SMMU for VPR during gmmu_map. */
        sg_dma_address(sgt->sgl) = 0;
 
-       desc->sgt = sgt;
+       desc->mem.sgt = sgt;
 
        return err;
 
 fail_sgt:
        kfree(sgt);
 fail:
-       dma_free_attrs(&tegra_vpr_dev, desc->size,
-                       (void *)(uintptr_t)&desc->iova,
-                       desc->iova, &desc->attrs);
+       dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
+                       (void *)(uintptr_t)iova, iova, &attrs);
        return err;
 }
 
index 771b9f42d2829480c8ef828cc4bce3c31b321605..9dd4e67e5e8b467267ffef2d74647a46fc212e92 100644 (file)
@@ -978,8 +978,8 @@ static int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
        if (!ch_ctx || !ch_ctx->gr_ctx || c->vpr)
                return -EINVAL;
 
-       ctx_ptr = vmap(ch_ctx->gr_ctx->pages,
-                       PAGE_ALIGN(ch_ctx->gr_ctx->size) >> PAGE_SHIFT,
+       ctx_ptr = vmap(ch_ctx->gr_ctx->mem.pages,
+                       PAGE_ALIGN(ch_ctx->gr_ctx->mem.size) >> PAGE_SHIFT,
                        0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!ctx_ptr)
                return -ENOMEM;
index 60880f6d8c000add4967624d971cf2334d2098b8..fd8bb81bf9ce4d743cc930f85951cf257f0bef67 100644 (file)
@@ -107,17 +107,17 @@ static int vgpu_gr_alloc_global_ctx_buffers(struct gk20a *g)
        attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g);
 
        gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
-       gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size;
+       gr->global_ctx_buffer[CIRCULAR].mem.size = cb_buffer_size;
 
        gk20a_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
-       gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size;
+       gr->global_ctx_buffer[PAGEPOOL].mem.size = pagepool_buffer_size;
 
        gk20a_dbg_info("attr_buffer_size : %d", attr_buffer_size);
-       gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
+       gr->global_ctx_buffer[ATTRIBUTE].mem.size = attr_buffer_size;
 
        gk20a_dbg_info("priv access map size : %d",
                gr->ctx_vars.priv_access_map_size);
-       gr->global_ctx_buffer[PRIV_ACCESS_MAP].size =
+       gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size =
                gr->ctx_vars.priv_access_map_size;
 
        return 0;
@@ -143,38 +143,38 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
 
        /* Circular Buffer */
        gpu_va = gk20a_vm_alloc_va(ch_vm,
-                               gr->global_ctx_buffer[CIRCULAR].size, 0);
+                               gr->global_ctx_buffer[CIRCULAR].mem.size, 0);
 
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[CIRCULAR_VA] = gpu_va;
-       g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].size;
+       g_bfr_size[CIRCULAR_VA] = gr->global_ctx_buffer[CIRCULAR].mem.size;
 
        /* Attribute Buffer */
        gpu_va = gk20a_vm_alloc_va(ch_vm,
-                               gr->global_ctx_buffer[ATTRIBUTE].size, 0);
+                               gr->global_ctx_buffer[ATTRIBUTE].mem.size, 0);
 
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[ATTRIBUTE_VA] = gpu_va;
-       g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].size;
+       g_bfr_size[ATTRIBUTE_VA] = gr->global_ctx_buffer[ATTRIBUTE].mem.size;
 
        /* Page Pool */
        gpu_va = gk20a_vm_alloc_va(ch_vm,
-                               gr->global_ctx_buffer[PAGEPOOL].size, 0);
+                       gr->global_ctx_buffer[PAGEPOOL].mem.size, 0);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[PAGEPOOL_VA] = gpu_va;
-       g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].size;
+       g_bfr_size[PAGEPOOL_VA] = gr->global_ctx_buffer[PAGEPOOL].mem.size;
 
        /* Priv register Access Map */
        gpu_va = gk20a_vm_alloc_va(ch_vm,
-                               gr->global_ctx_buffer[PRIV_ACCESS_MAP].size, 0);
+                       gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 0);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
        g_bfr_size[PRIV_ACCESS_MAP_VA] =
-               gr->global_ctx_buffer[PRIV_ACCESS_MAP].size;
+               gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size;
 
        msg.cmd = TEGRA_VGPU_CMD_CHANNEL_MAP_GR_GLOBAL_CTX;
        msg.handle = platform->virt_handle;
@@ -257,10 +257,10 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
        if (!gr_ctx)
                return -ENOMEM;
 
-       gr_ctx->size = gr->ctx_vars.buffer_total_size;
-       gr_ctx->gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->size, 0);
+       gr_ctx->mem.size = gr->ctx_vars.buffer_total_size;
+       gr_ctx->mem.gpu_va = gk20a_vm_alloc_va(ch_vm, gr_ctx->mem.size, 0);
 
-       if (!gr_ctx->gpu_va) {
+       if (!gr_ctx->mem.gpu_va) {
                kfree(gr_ctx);
                return -ENOMEM;
        }
@@ -268,13 +268,14 @@ static int vgpu_gr_alloc_channel_gr_ctx(struct gk20a *g,
        msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_GR_CTX;
        msg.handle = platform->virt_handle;
        p->handle = c->virt_ctx;
-       p->gr_ctx_va = gr_ctx->gpu_va;
+       p->gr_ctx_va = gr_ctx->mem.gpu_va;
        p->class_num = c->obj_class;
        err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
 
        if (err || msg.ret) {
                kfree(gr_ctx);
-               gk20a_vm_free_va(ch_vm, gr_ctx->gpu_va, gr_ctx->size, 0);
+               gk20a_vm_free_va(ch_vm, gr_ctx->mem.gpu_va,
+                                gr_ctx->mem.size, 0);
                err = -ENOMEM;
        } else
                c->ch_ctx.gr_ctx = gr_ctx;
@@ -290,7 +291,7 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
 
        gk20a_dbg_fn("");
 
-       if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->gpu_va) {
+       if (ch_ctx->gr_ctx && ch_ctx->gr_ctx->mem.gpu_va) {
                struct tegra_vgpu_cmd_msg msg;
                struct tegra_vgpu_gr_ctx_params *p = &msg.params.gr_ctx;
                int err;
@@ -301,9 +302,9 @@ static void vgpu_gr_free_channel_gr_ctx(struct channel_gk20a *c)
                err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
                WARN_ON(err || msg.ret);
 
-               gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->gpu_va,
-                               ch_ctx->gr_ctx->size, 0);
-               ch_ctx->gr_ctx->gpu_va = 0;
+               gk20a_vm_free_va(ch_vm, ch_ctx->gr_ctx->mem.gpu_va,
+                               ch_ctx->gr_ctx->mem.size, 0);
+               ch_ctx->gr_ctx->mem.gpu_va = 0;
                kfree(ch_ctx->gr_ctx);
        }
 }
@@ -429,7 +430,7 @@ static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a  *c,
        }
 
        /* commit gr ctx buffer */
-       err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->gpu_va);
+       err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va);
        if (err) {
                gk20a_err(dev_from_gk20a(g),
                        "fail to commit gr ctx buffer");