]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a: GR uses dma_mapping
authorArto Merilainen <amerilainen@nvidia.com>
Fri, 21 Feb 2014 15:18:13 +0000 (17:18 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Thu, 27 Feb 2014 06:36:40 +0000 (22:36 -0800)
This patch reworks gr_gk20a.c file to use dma_mapping in all its
allocations. VPR allocations are abstracted behind platform specific
.secure_alloc() callback. If this callback is not specified, the
kernel uses non-secure allocations - as before.

Bug 1450489

Change-Id: I8109fa454239d2f15c10c4c6a1bf732a42b0e668
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/372964
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/gr_gk20a.h
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.h
drivers/video/tegra/host/gk20a/platform_gk20a.h
drivers/video/tegra/host/gk20a/platform_gk20a_tegra.c

index 4631492d2b40c634104e37fb81d70a949dd4f1fe..9d828675f7e97deac06747770467cfb9321f9db1 100644 (file)
@@ -1542,7 +1542,9 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
        if (err)
                goto clean_up;
 
-       gold_ptr = dma_buf_vmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
+       gold_ptr = vmap(gr->global_ctx_buffer[GOLDEN_CTX].pages,
+                       PAGE_ALIGN(gr->global_ctx_buffer[GOLDEN_CTX].size) >>
+                       PAGE_SHIFT, 0, pgprot_dmacoherent(PAGE_KERNEL));
        if (!gold_ptr)
                goto clean_up;
 
@@ -1605,8 +1607,7 @@ clean_up:
                nvhost_dbg_fn("done");
 
        if (gold_ptr)
-               dma_buf_vunmap(gr->global_ctx_buffer[GOLDEN_CTX].ref,
-                              gold_ptr);
+               vunmap(gold_ptr);
        if (ctx_ptr)
                vunmap(ctx_ptr);
 
@@ -2180,11 +2181,52 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
        return 0;
 }
 
+static void gk20a_gr_destroy_ctx_buffer(struct platform_device *pdev,
+                                       struct gr_ctx_buffer_desc *desc)
+{
+       struct device *dev = &pdev->dev;
+       gk20a_free_sgtable(&desc->sgt);
+       dma_free_attrs(dev, desc->size, desc->pages,
+                      desc->iova, &desc->attrs);
+}
+
+static int gk20a_gr_alloc_ctx_buffer(struct platform_device *pdev,
+                                    struct gr_ctx_buffer_desc *desc,
+                                    size_t size)
+{
+       struct device *dev = &pdev->dev;
+       DEFINE_DMA_ATTRS(attrs);
+       dma_addr_t iova;
+       int err = 0;
+
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+       desc->pages = dma_alloc_attrs(&pdev->dev, size, &iova,
+                                     GFP_KERNEL, &attrs);
+       if (!desc->pages)
+               return -ENOMEM;
+
+       desc->iova = iova;
+       desc->size = size;
+       desc->attrs = attrs;
+       desc->destroy = gk20a_gr_destroy_ctx_buffer;
+       err = gk20a_get_sgtable_from_pages(&pdev->dev, &desc->sgt, desc->pages,
+                                          desc->iova, desc->size);
+       if (err) {
+               dma_free_attrs(dev, desc->size, desc->pages,
+                              desc->iova, &desc->attrs);
+               memset(desc, 0, sizeof(*desc));
+       }
+
+       return err;
+}
+
 static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 {
+       struct gk20a_platform *platform = platform_get_drvdata(g->dev);
        struct gr_gk20a *gr = &g->gr;
-       struct dma_buf *dmabuf;
-       int i, attr_buffer_size;
+       int i, attr_buffer_size, err;
+       struct platform_device *pdev = g->dev;
 
        u32 cb_buffer_size = gr->bundle_cb_default_size *
                gr_scc_bundle_cb_size_div_256b_byte_granularity_v();
@@ -2198,98 +2240,58 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
        nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
 
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(cb_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   0);
-       if (IS_ERR(dmabuf))
+       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[CIRCULAR],
+                                       cb_buffer_size);
+       if (err)
                goto clean_up;
 
-       gr->global_ctx_buffer[CIRCULAR].ref = dmabuf;
-       gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size;
-
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(cb_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(dmabuf)) {
-               gr->global_ctx_buffer[CIRCULAR_VPR].ref = dmabuf;
-               gr->global_ctx_buffer[CIRCULAR_VPR].size = cb_buffer_size;
-       }
+       if (platform->secure_alloc)
+               platform->secure_alloc(pdev,
+                                      &gr->global_ctx_buffer[CIRCULAR_VPR],
+                                      cb_buffer_size);
 
        nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
 
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(pagepool_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   0);
-       if (IS_ERR(dmabuf))
+       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[PAGEPOOL],
+                                       pagepool_buffer_size);
+       if (err)
                goto clean_up;
 
-       gr->global_ctx_buffer[PAGEPOOL].ref = dmabuf;
-       gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size;
-
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(pagepool_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(dmabuf)) {
-               gr->global_ctx_buffer[PAGEPOOL_VPR].ref = dmabuf;
-               gr->global_ctx_buffer[PAGEPOOL_VPR].size = pagepool_buffer_size;
-       }
+       if (platform->secure_alloc)
+               platform->secure_alloc(pdev,
+                                      &gr->global_ctx_buffer[PAGEPOOL_VPR],
+                                      pagepool_buffer_size);
 
        nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
 
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(attr_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   0);
-       if (IS_ERR(dmabuf))
+       err = gk20a_gr_alloc_ctx_buffer(pdev, &gr->global_ctx_buffer[ATTRIBUTE],
+                                       attr_buffer_size);
+       if (err)
                goto clean_up;
 
-       gr->global_ctx_buffer[ATTRIBUTE].ref = dmabuf;
-       gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
-
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(attr_buffer_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(dmabuf)) {
-               gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = dmabuf;
-               gr->global_ctx_buffer[ATTRIBUTE_VPR].size = attr_buffer_size;
-       }
+       if (platform->secure_alloc)
+               platform->secure_alloc(pdev,
+                                      &gr->global_ctx_buffer[ATTRIBUTE_VPR],
+                                      attr_buffer_size);
 
        nvhost_dbg_info("golden_image_size : %d",
                   gr->ctx_vars.golden_image_size);
 
-       dmabuf = (struct dma_buf *)
-               nvhost_memmgr_alloc(gr->ctx_vars.golden_image_size,
-                                   DEFAULT_ALLOC_ALIGNMENT,
-                                   DEFAULT_ALLOC_FLAGS,
-                                   0);
-       if (IS_ERR(dmabuf))
+       err = gk20a_gr_alloc_ctx_buffer(pdev,
+                                       &gr->global_ctx_buffer[GOLDEN_CTX],
+                                       gr->ctx_vars.golden_image_size);
+       if (err)
                goto clean_up;
 
-       gr->global_ctx_buffer[GOLDEN_CTX].ref = dmabuf;
-       gr->global_ctx_buffer[GOLDEN_CTX].size =
-               gr->ctx_vars.golden_image_size;
-
        nvhost_dbg_fn("done");
        return 0;
 
  clean_up:
        nvhost_err(dev_from_gk20a(g), "fail");
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
-               if (gr->global_ctx_buffer[i].ref) {
-                       dma_buf_put(gr->global_ctx_buffer[i].ref);
-                       memset(&gr->global_ctx_buffer[i],
-                               0, sizeof(struct mem_desc));
+               if (gr->global_ctx_buffer[i].destroy) {
+                       gr->global_ctx_buffer[i].destroy(pdev,
+                                       &gr->global_ctx_buffer[i]);
                }
        }
        return -ENOMEM;
@@ -2297,12 +2299,16 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
 static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
 {
+       struct platform_device *pdev = g->dev;
        struct gr_gk20a *gr = &g->gr;
+       DEFINE_DMA_ATTRS(attrs);
        u32 i;
 
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
-               dma_buf_put(gr->global_ctx_buffer[i].ref);
-               memset(&gr->global_ctx_buffer[i], 0, sizeof(struct mem_desc));
+               gr->global_ctx_buffer[i].destroy(pdev,
+                               &gr->global_ctx_buffer[i]);
        }
 
        nvhost_dbg_fn("done");
@@ -2312,63 +2318,67 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
                                        struct channel_gk20a *c)
 {
        struct vm_gk20a *ch_vm = c->vm;
-       struct dma_buf *handle_ref;
        u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
        struct gr_gk20a *gr = &g->gr;
+       struct sg_table *sgt;
+       u64 size;
        u64 gpu_va;
        u32 i;
        nvhost_dbg_fn("");
 
        /* Circular Buffer */
-       if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].ref == NULL))
-               handle_ref = gr->global_ctx_buffer[CIRCULAR].ref;
-       else
-               handle_ref = gr->global_ctx_buffer[CIRCULAR_VPR].ref;
+       if (!c->vpr || (gr->global_ctx_buffer[CIRCULAR_VPR].sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[CIRCULAR].sgt;
+               size = gr->global_ctx_buffer[CIRCULAR].size;
+       } else {
+               sgt = gr->global_ctx_buffer[CIRCULAR_VPR].sgt;
+               size = gr->global_ctx_buffer[CIRCULAR_VPR].size;
+       }
 
-       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
-                             /*offset_align, flags, kind*/
-                             0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                             gmmu_pte_kind_pitch_v(), NULL, false,
-                             mem_flag_none);
+       gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
+                               NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+                               mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[CIRCULAR_VA] = gpu_va;
 
        /* Attribute Buffer */
-       if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].ref == NULL))
-               handle_ref = gr->global_ctx_buffer[ATTRIBUTE].ref;
-       else
-               handle_ref = gr->global_ctx_buffer[ATTRIBUTE_VPR].ref;
+       if (!c->vpr || (gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[ATTRIBUTE].sgt;
+               size = gr->global_ctx_buffer[ATTRIBUTE].size;
+       } else {
+               sgt = gr->global_ctx_buffer[ATTRIBUTE_VPR].sgt;
+               size = gr->global_ctx_buffer[ATTRIBUTE_VPR].size;
+       }
 
-       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
-                             /*offset_align, flags, kind*/
-                             0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                             gmmu_pte_kind_pitch_v(), NULL, false,
-                             mem_flag_none);
+       gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
+                               NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+                               mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[ATTRIBUTE_VA] = gpu_va;
 
        /* Page Pool */
-       if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].ref == NULL))
-               handle_ref = gr->global_ctx_buffer[PAGEPOOL].ref;
-       else
-               handle_ref = gr->global_ctx_buffer[PAGEPOOL_VPR].ref;
+       if (!c->vpr || (gr->global_ctx_buffer[PAGEPOOL_VPR].sgt == NULL)) {
+               sgt = gr->global_ctx_buffer[PAGEPOOL].sgt;
+               size = gr->global_ctx_buffer[PAGEPOOL].size;
+       } else {
+               sgt = gr->global_ctx_buffer[PAGEPOOL_VPR].sgt;
+               size = gr->global_ctx_buffer[PAGEPOOL_VPR].size;
+       }
 
-       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
-                             /*offset_align, flags, kind*/
-                             0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                             gmmu_pte_kind_pitch_v(), NULL, false,
-                             mem_flag_none);
+       gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
+                               NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
+                               mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[PAGEPOOL_VA] = gpu_va;
 
        /* Golden Image */
-       gpu_va = gk20a_vm_map(ch_vm, gr->global_ctx_buffer[GOLDEN_CTX].ref,
-                             /*offset_align, flags, kind*/
-                             0, 0, gmmu_pte_kind_pitch_v(), NULL, false,
-                             mem_flag_none);
+       sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt;
+       size = gr->global_ctx_buffer[GOLDEN_CTX].size;
+       gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
+                               mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
@@ -2379,7 +2389,9 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
  clean_up:
        for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
                if (g_bfr_va[i]) {
-                       gk20a_vm_unmap(ch_vm, g_bfr_va[i]);
+                       gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
+                                        gr->global_ctx_buffer[i].size,
+                                        mem_flag_none);
                        g_bfr_va[i] = 0;
                }
        }
@@ -2389,6 +2401,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
 static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
 {
        struct vm_gk20a *ch_vm = c->vm;
+       struct gr_gk20a *gr = &c->g->gr;
        u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
        u32 i;
 
@@ -2396,7 +2409,9 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
 
        for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
                if (g_bfr_va[i]) {
-                       gk20a_vm_unmap(ch_vm, g_bfr_va[i]);
+                       gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
+                                        gr->global_ctx_buffer[i].size,
+                                        mem_flag_none);
                        g_bfr_va[i] = 0;
                }
        }
index cb67dd0a71b94a67becf232583091818df0715d0..397c921cfe8dad098de7fdb3ee56642882d2080e 100644 (file)
@@ -231,7 +231,7 @@ struct gr_gk20a {
        u32 alpha_cb_size;
        u32 timeslice_mode;
 
-       struct mem_desc global_ctx_buffer[NR_GLOBAL_CTX_BUF];
+       struct gr_ctx_buffer_desc global_ctx_buffer[NR_GLOBAL_CTX_BUF];
 
        struct mmu_desc mmu_wr_mem;
        u32 mmu_wr_mem_size;
index 5ca82f9658a4c55b021a843bd13656da61beda0c..c5be4c2e5ae8d199f4509c06e5f8f37322ac0188 100644 (file)
@@ -156,8 +156,7 @@ static void gk20a_mm_delete_priv(void *_priv)
        kfree(priv);
 }
 
-static struct sg_table *gk20a_mm_pin(struct device *dev,
-                                    struct dma_buf *dmabuf)
+struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
 {
        struct gk20a_dmabuf_priv *priv;
        static DEFINE_MUTEX(priv_lock);
@@ -205,8 +204,8 @@ priv_exist_or_err:
        return priv->sgt;
 }
 
-static void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
-                          struct sg_table *sgt)
+void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
+                   struct sg_table *sgt)
 {
        struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
        dma_addr_t dma_addr;
index 59aa1a89ce3c3ac1529db9a91f54dfa2e126a288..b4b2f6ae99fe106f42a2ff8a9d98c43653adfc5d 100644 (file)
@@ -22,6 +22,7 @@
 #define __MM_GK20A_H__
 
 #include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
 #include <linux/iommu.h>
 #include <asm/dma-iommu.h>
 #include "../nvhost_allocator.h"
@@ -135,6 +136,17 @@ struct pm_ctx_desc {
        u32 ctx_sw_mode;
 };
 
+struct gr_ctx_buffer_desc;
+struct gr_ctx_buffer_desc {
+       void (*destroy)(struct platform_device *, struct gr_ctx_buffer_desc *);
+       struct sg_table *sgt;
+       struct page **pages;
+       size_t size;
+       u64 iova;
+       struct dma_attrs attrs;
+       void *priv;
+};
+
 struct gr_ctx_desc {
        struct page **pages;
        u64 iova;
@@ -363,6 +375,10 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
                u64 size,
                int rw_flag);
 
+struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf);
+void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
+                   struct sg_table *sgt);
+
 u64 gk20a_vm_map(struct vm_gk20a *vm,
                struct dma_buf *dmabuf,
                u64 offset_align,
index 29e8b0fb75968271cce5ee68d394364b870394fe..27505bf9773721b4dbe1cb40c85cbcf7efeadcad 100644 (file)
@@ -25,6 +25,7 @@
 
 struct gk20a;
 struct channel_gk20a;
+struct gr_ctx_buffer_desc;
 
 struct gk20a_platform {
 #ifdef CONFIG_TEGRA_GK20A
@@ -62,6 +63,14 @@ struct gk20a_platform {
         * depends on. The platform implementation must count refs to this
         * call. */
        void (*channel_idle)(struct platform_device *dev);
+
+       /* This function is called to allocate secure memory (memory that the
+        * CPU cannot see). The function should fill the context buffer
+        * descriptor (especially fields destroy, sgt, size).
+        */
+       int (*secure_alloc)(struct platform_device *dev,
+                           struct gr_ctx_buffer_desc *desc,
+                           size_t size);
 };
 
 static inline struct gk20a_platform *gk20a_get_platform(
index 7b8842ebc0d257b3d8721984deda1b2a0235ec20..4190d47eb6911bb26c8b8d9f4a61223eaad5961b 100644 (file)
@@ -25,6 +25,8 @@
 #include "../../../../../arch/arm/mach-tegra/iomap.h"
 #include <linux/tegra-powergate.h>
 #include <linux/nvhost_ioctl.h>
+#include <linux/dma-buf.h>
+#include <linux/nvmap.h>
 #include <mach/irqs.h>
 
 #include "gk20a.h"
@@ -68,6 +70,37 @@ static void gk20a_tegra_channel_idle(struct platform_device *dev)
                nvhost_module_idle(nvhost_get_parent(dev));
 }
 
+static void gk20a_tegra_secure_destroy(struct platform_device *pdev,
+                                      struct gr_ctx_buffer_desc *desc)
+{
+       struct dma_buf *dmabuf = desc->priv;
+
+       gk20a_mm_unpin(&pdev->dev, dmabuf, desc->sgt);
+       dma_buf_put(dmabuf);
+}
+
+static int gk20a_tegra_secure_alloc(struct platform_device *pdev,
+                                   struct gr_ctx_buffer_desc *desc,
+                                   size_t size)
+{
+#ifdef CONFIG_TEGRA_NVMAP
+       struct dma_buf *dmabuf;
+
+       dmabuf = nvmap_alloc_dmabuf(size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   NVMAP_HANDLE_UNCACHEABLE,
+                                   NVMAP_HEAP_CARVEOUT_VPR);
+       desc->sgt = gk20a_mm_pin(&pdev->dev, dmabuf);
+       desc->size = size;
+       desc->destroy = gk20a_tegra_secure_destroy;
+       desc->priv = dmabuf;
+
+       return 0;
+#else
+       return -ENOSYS;
+#endif
+}
+
 static int gk20a_tegra_probe(struct platform_device *dev)
 {
        int err;
@@ -169,6 +202,7 @@ struct gk20a_platform t132_gk20a_tegra_platform = {
        .probe = gk20a_tegra_probe,
        .channel_busy = gk20a_tegra_channel_busy,
        .channel_idle = gk20a_tegra_channel_idle,
+       .secure_alloc = gk20a_tegra_secure_alloc,
 };
 
 struct gk20a_platform gk20a_tegra_platform = {
@@ -202,6 +236,7 @@ struct gk20a_platform gk20a_tegra_platform = {
        .probe = gk20a_tegra_probe,
        .channel_busy = gk20a_tegra_channel_busy,
        .channel_idle = gk20a_tegra_channel_idle,
+       .secure_alloc = gk20a_tegra_secure_alloc,
 };
 
 struct platform_device tegra_gk20a_device = {