]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a: Use dmabuf directly
authorArto Merilainen <amerilainen@nvidia.com>
Mon, 17 Feb 2014 10:25:59 +0000 (12:25 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Thu, 27 Feb 2014 06:36:25 +0000 (22:36 -0800)
Currently, gk20a memory management operations go through
nvhost_memmgr abstraction. This patch modifies gk20a driver
to use dmabuf API directly. At the same time this patch removes
unnecessary calls to take a reference on the memory manager itself.

In addition, this patch reworks the memmgr allocation not to take
memmgr as a parameter.

Bug 1450489

Change-Id: If46b7442f2d03c41b90a0e81f697f4ceb63c3dae
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/368447
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
13 files changed:
drivers/video/tegra/host/dmabuf.c
drivers/video/tegra/host/dmabuf.h
drivers/video/tegra/host/gk20a/as_gk20a.c
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/channel_gk20a.h
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.h
drivers/video/tegra/host/nvhost_hwctx.h
drivers/video/tegra/host/nvhost_memmgr.c
drivers/video/tegra/host/nvhost_memmgr.h
drivers/video/tegra/host/nvmap.c
drivers/video/tegra/host/nvmap.h

index f86f8c7e5ccc7f9dfb097079d7231bea4f934082..85a2028345fd7dda2821fa87cbc114de150f0bbc 100644 (file)
@@ -110,7 +110,7 @@ struct mem_handle *nvhost_dmabuf_get(ulong id, struct platform_device *dev)
        return (struct mem_handle *) ((uintptr_t)h | mem_mgr_type_dmabuf);
 }
 
-int nvhost_dmabuf_get_param(struct mem_mgr *memmgr, struct mem_handle *handle,
+int nvhost_dmabuf_get_param(struct mem_handle *handle,
                            u32 param, u64 *result)
 {
        /* TBD: find a way to associate size, kind, etc */
index 34d34f051077ddd0e7856a043bee8e24f6c2d372..2b9170e28c3808596167c32ba549aff0219b133b 100644 (file)
@@ -30,8 +30,7 @@ struct mem_mgr *nvhost_dmabuf_alloc_mgr(void);
 void nvhost_dmabuf_put_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_dmabuf_get_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_dmabuf_get_mgr_file(int fd);
-struct mem_handle *nvhost_dmabuf_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags);
+struct mem_handle *nvhost_dmabuf_alloc(size_t size, size_t align, int flags);
 void nvhost_dmabuf_put(struct mem_handle *handle);
 struct sg_table *nvhost_dmabuf_pin(struct mem_handle *handle);
 void nvhost_dmabuf_unpin(struct mem_handle *handle, struct sg_table *sgt);
@@ -41,7 +40,7 @@ void *nvhost_dmabuf_kmap(struct mem_handle *handle, unsigned int pagenum);
 void nvhost_dmabuf_kunmap(struct mem_handle *handle, unsigned int pagenum,
                void *addr);
 struct mem_handle *nvhost_dmabuf_get(ulong id, struct platform_device *dev);
-int nvhost_dmabuf_get_param(struct mem_mgr *memmgr, struct mem_handle *handle,
+int nvhost_dmabuf_get_param(struct mem_handle *handle,
                           u32 param, u64 *result);
 size_t nvhost_dmabuf_size(struct mem_handle *handle);
 #endif
index 7cc6ea5a7850425de41c7f46d491437aef39d8b6..4f2ef0746474402b0362f9bbe93c86b93b181c8f 100644 (file)
@@ -141,7 +141,7 @@ static int gk20a_as_ioctl_map_buffer_ex(
                if (args->padding[i])
                        return -EINVAL;
 
-       return gk20a_vm_map_buffer(as_share, 0, args->dmabuf_fd,
+       return gk20a_vm_map_buffer(as_share, args->dmabuf_fd,
                                   &args->offset, args->flags,
                                   args->kind);
 }
@@ -151,8 +151,8 @@ static int gk20a_as_ioctl_map_buffer(
                struct nvhost_as_map_buffer_args *args)
 {
        nvhost_dbg_fn("");
-       return gk20a_vm_map_buffer(as_share, args->nvmap_fd,
-                                  args->nvmap_handle, &args->o_a.align,
+       return gk20a_vm_map_buffer(as_share, args->nvmap_handle,
+                                  &args->o_a.align,
                                   args->flags, NV_KIND_DEFAULT);
        /* args->o_a.offset will be set if !err */
 }
index 4f3861bf369bdd6a93bf29d547e8a1828d1af14b..91da8dffe0cd477fcdc809dff7d0fea2136e5229 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/file.h>
 #include <linux/anon_inodes.h>
+#include <linux/dma-buf.h>
 
 #include "dev.h"
 #include "debug.h"
-#include "nvhost_memmgr.h"
 #include "nvhost_sync.h"
 
 #include "gk20a.h"
 #include "dbg_gpu_gk20a.h"
 
+#include "nvhost_memmgr.h"
 #include "hw_ram_gk20a.h"
 #include "hw_fifo_gk20a.h"
 #include "hw_pbdma_gk20a.h"
@@ -480,14 +481,12 @@ void gk20a_disable_channel(struct channel_gk20a *ch,
 
 static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
 {
-       struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
        /* disable existing cyclestats buffer */
        mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex);
        if (ch->cyclestate.cyclestate_buffer_handler) {
-               nvhost_memmgr_munmap(ch->cyclestate.cyclestate_buffer_handler,
+               dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler,
                                ch->cyclestate.cyclestate_buffer);
-               nvhost_memmgr_put(memmgr,
-                               ch->cyclestate.cyclestate_buffer_handler);
+               dma_buf_put(ch->cyclestate.cyclestate_buffer_handler);
                ch->cyclestate.cyclestate_buffer_handler = NULL;
                ch->cyclestate.cyclestate_buffer = NULL;
                ch->cyclestate.cyclestate_buffer_size = 0;
@@ -498,30 +497,22 @@ static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
 static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
                       struct nvhost_cycle_stats_args *args)
 {
-       struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
-       struct mem_handle *handle_ref;
+       struct dma_buf *dmabuf;
        void *virtual_address;
-       u64 cyclestate_buffer_size;
-       struct platform_device *dev = ch->ch->dev;
 
        if (args->nvmap_handle && !ch->cyclestate.cyclestate_buffer_handler) {
 
                /* set up new cyclestats buffer */
-               handle_ref = nvhost_memmgr_get(memmgr,
-                               args->nvmap_handle, dev);
-               if (IS_ERR(handle_ref))
-                       return PTR_ERR(handle_ref);
-               virtual_address = nvhost_memmgr_mmap(handle_ref);
+               dmabuf = dma_buf_get(args->nvmap_handle);
+               if (IS_ERR(dmabuf))
+                       return PTR_ERR(dmabuf);
+               virtual_address = dma_buf_vmap(dmabuf);
                if (!virtual_address)
                        return -ENOMEM;
 
-               nvhost_memmgr_get_param(memmgr, handle_ref,
-                                       NVMAP_HANDLE_PARAM_SIZE,
-                                       &cyclestate_buffer_size);
-
-               ch->cyclestate.cyclestate_buffer_handler = handle_ref;
+               ch->cyclestate.cyclestate_buffer_handler = dmabuf;
                ch->cyclestate.cyclestate_buffer = virtual_address;
-               ch->cyclestate.cyclestate_buffer_size = cyclestate_buffer_size;
+               ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
                return 0;
 
        } else if (!args->nvmap_handle &&
@@ -543,37 +534,34 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
 
 static int gk20a_init_error_notifier(struct channel_gk20a *ch,
                struct nvhost_set_error_notifier *args) {
-       struct platform_device *dev = ch->g->dev;
        void *va;
 
-       struct mem_mgr *memmgr;
-       struct mem_handle *handle_ref;
+       struct dma_buf *dmabuf;
 
        if (!args->mem) {
                pr_err("gk20a_init_error_notifier: invalid memory handle\n");
                return -EINVAL;
        }
 
-       memmgr = gk20a_channel_mem_mgr(ch);
-       handle_ref = nvhost_memmgr_get(memmgr, args->mem, dev);
+       dmabuf = dma_buf_get(args->mem);
 
        if (ch->error_notifier_ref)
                gk20a_free_error_notifiers(ch);
 
-       if (IS_ERR(handle_ref)) {
+       if (IS_ERR(dmabuf)) {
                pr_err("Invalid handle: %d\n", args->mem);
                return -EINVAL;
        }
        /* map handle */
-       va = nvhost_memmgr_mmap(handle_ref);
+       va = dma_buf_vmap(dmabuf);
        if (!va) {
-               nvhost_memmgr_put(memmgr, handle_ref);
+               dma_buf_put(dmabuf);
                pr_err("Cannot map notifier handle\n");
                return -ENOMEM;
        }
 
        /* set channel notifiers pointer */
-       ch->error_notifier_ref = handle_ref;
+       ch->error_notifier_ref = dmabuf;
        ch->error_notifier = va + args->offset;
        ch->error_notifier_va = va;
        memset(ch->error_notifier, 0, sizeof(struct nvhost_notification));
@@ -602,10 +590,8 @@ void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error)
 static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
 {
        if (ch->error_notifier_ref) {
-               struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
-               nvhost_memmgr_munmap(ch->error_notifier_ref,
-                               ch->error_notifier_va);
-               nvhost_memmgr_put(memmgr, ch->error_notifier_ref);
+               dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
+               dma_buf_put(ch->error_notifier_ref);
                ch->error_notifier_ref = 0;
                ch->error_notifier = 0;
                ch->error_notifier_va = 0;
@@ -702,7 +688,6 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
 {
        struct channel_gk20a *ch = (struct channel_gk20a *)filp->private_data;
        struct gk20a *g = ch->g;
-       struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
 
        trace_nvhost_channel_release(dev_name(&g->dev->dev));
 
@@ -711,8 +696,6 @@ int gk20a_channel_release(struct inode *inode, struct file *filp)
        gk20a_channel_idle(ch->g->dev);
 
        gk20a_put_client(g);
-       if (memmgr)
-               nvhost_memmgr_put_mgr(memmgr);
        filp->private_data = NULL;
        return 0;
 }
@@ -1418,19 +1401,17 @@ static void trace_write_pushbuffer(struct channel_gk20a *c, struct gpfifo *g)
        void *mem = NULL;
        unsigned int words;
        u64 offset;
-       struct mem_handle *r = NULL;
+       struct dma_buf *dmabuf = NULL;
 
        if (nvhost_debug_trace_cmdbuf) {
                u64 gpu_va = (u64)g->entry0 |
                        (u64)((u64)pbdma_gp_entry1_get_hi_v(g->entry1) << 32);
-               struct mem_mgr *memmgr = NULL;
                int err;
 
                words = pbdma_gp_entry1_length_v(g->entry1);
-               err = gk20a_vm_find_buffer(c->vm, gpu_va, &memmgr, &r,
-                                          &offset);
+               err = gk20a_vm_find_buffer(c->vm, gpu_va, &dmabuf, &offset);
                if (!err)
-                       mem = nvhost_memmgr_mmap(r);
+                       mem = dma_buf_vmap(dmabuf);
        }
 
        if (mem) {
@@ -1447,7 +1428,7 @@ static void trace_write_pushbuffer(struct channel_gk20a *c, struct gpfifo *g)
                                offset + i * sizeof(u32),
                                mem);
                }
-               nvhost_memmgr_munmap(r, mem);
+               dma_buf_vunmap(dmabuf, mem);
        }
 }
 
@@ -1881,8 +1862,7 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
                                        u32 payload, long timeout)
 {
        struct platform_device *pdev = ch->ch->dev;
-       struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
-       struct mem_handle *handle_ref;
+       struct dma_buf *dmabuf;
        void *data;
        u32 *semaphore;
        int ret = 0;
@@ -1892,14 +1872,14 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
        if (ch->has_timedout)
                return -ETIMEDOUT;
 
-       handle_ref = nvhost_memmgr_get(memmgr, id, pdev);
-       if (IS_ERR(handle_ref)) {
+       dmabuf = dma_buf_get(id);
+       if (IS_ERR(dmabuf)) {
                nvhost_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
                           id);
                return -EINVAL;
        }
 
-       data = nvhost_memmgr_kmap(handle_ref, offset >> PAGE_SHIFT);
+       data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
        if (!data) {
                nvhost_err(&pdev->dev, "failed to map notifier memory");
                ret = -EINVAL;
@@ -1918,9 +1898,9 @@ static int gk20a_channel_wait_semaphore(struct channel_gk20a *ch,
        else if (remain < 0)
                ret = remain;
 
-       nvhost_memmgr_kunmap(handle_ref, offset >> PAGE_SHIFT, data);
+       dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
 cleanup_put:
-       nvhost_memmgr_put(memmgr, handle_ref);
+       dma_buf_put(dmabuf);
        return ret;
 }
 
@@ -1928,9 +1908,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
                              struct nvhost_wait_args *args)
 {
        struct device *d = dev_from_gk20a(ch->g);
-       struct platform_device *dev = ch->ch->dev;
-       struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
-       struct mem_handle *handle_ref;
+       struct dma_buf *dmabuf;
        struct notification *notif;
        struct timespec tv;
        u64 jiffies;
@@ -1954,14 +1932,14 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
                id = args->condition.notifier.nvmap_handle;
                offset = args->condition.notifier.offset;
 
-               handle_ref = nvhost_memmgr_get(memmgr, id, dev);
-               if (IS_ERR(handle_ref)) {
+               dmabuf = dma_buf_get(id);
+               if (IS_ERR(dmabuf)) {
                        nvhost_err(d, "invalid notifier nvmap handle 0x%lx",
                                   id);
                        return -EINVAL;
                }
 
-               notif = nvhost_memmgr_mmap(handle_ref);
+               notif = dma_buf_vmap(dmabuf);
                if (!notif) {
                        nvhost_err(d, "failed to map notifier memory");
                        return -ENOMEM;
@@ -1993,7 +1971,7 @@ static int gk20a_channel_wait(struct channel_gk20a *ch,
                notif->info16 = ch->hw_chid; /* should be method offset */
 
 notif_clean_up:
-               nvhost_memmgr_munmap(handle_ref, notif);
+               dma_buf_vunmap(dmabuf, notif);
                return ret;
 
        case NVHOST_WAIT_TYPE_SEMAPHORE:
@@ -2273,19 +2251,7 @@ long gk20a_channel_ioctl(struct file *filp,
                break;
        }
        case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
-       {
-               int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
-               struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
-
-               if (IS_ERR(new_client)) {
-                       err = PTR_ERR(new_client);
-                       break;
-               }
-               if (ch->memmgr)
-                       nvhost_memmgr_put_mgr(ch->memmgr);
-               ch->memmgr = new_client;
                break;
-       }
        case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
                gk20a_channel_busy(dev);
                err = gk20a_alloc_obj_ctx(ch,
index 253f5bb02cff67b1d2a97352227b246146bdb8bc..2740b3f799d66a99caaf0544a11c63bcedf2cb1f 100644 (file)
@@ -28,8 +28,6 @@
 #include <linux/nvhost_ioctl.h>
 struct gk20a;
 struct gr_gk20a;
-struct mem_mgr;
-struct mem_handle;
 struct dbg_session_gk20a;
 
 #include "nvhost_channel.h"
@@ -85,7 +83,6 @@ struct channel_gk20a {
        bool vpr;
        pid_t pid;
 
-       struct mem_mgr *memmgr;
        struct nvhost_channel *ch;
 
        struct list_head jobs;
@@ -129,7 +126,7 @@ struct channel_gk20a {
        struct {
        void *cyclestate_buffer;
        u32 cyclestate_buffer_size;
-       struct mem_handle *cyclestate_buffer_handler;
+       struct dma_buf *cyclestate_buffer_handler;
        struct mutex cyclestate_buffer_mutex;
        } cyclestate;
 #endif
@@ -140,7 +137,7 @@ struct channel_gk20a {
        u32 timeout_ms_max;
        bool timeout_debug_dump;
 
-       struct mem_handle *error_notifier_ref;
+       struct dma_buf *error_notifier_ref;
        struct nvhost_notification *error_notifier;
        void *error_notifier_va;
 };
@@ -167,11 +164,6 @@ void gk20a_channel_semaphore_wakeup(struct gk20a *g);
 int gk20a_channel_suspend(struct gk20a *g);
 int gk20a_channel_resume(struct gk20a *g);
 
-static inline
-struct mem_mgr *gk20a_channel_mem_mgr(struct channel_gk20a *ch)
-{
-       return ch->memmgr;
-}
 /* Channel file operations */
 int gk20a_channel_open(struct inode *inode, struct file *filp);
 long gk20a_channel_ioctl(struct file *filp,
index 6ac592cb63604c292b2764ca959d5912557bf494..4631492d2b40c634104e37fb81d70a949dd4f1fe 100644 (file)
@@ -1542,7 +1542,7 @@ static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
        if (err)
                goto clean_up;
 
-       gold_ptr = nvhost_memmgr_mmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
+       gold_ptr = dma_buf_vmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
        if (!gold_ptr)
                goto clean_up;
 
@@ -1605,8 +1605,8 @@ clean_up:
                nvhost_dbg_fn("done");
 
        if (gold_ptr)
-               nvhost_memmgr_munmap(gr->global_ctx_buffer[GOLDEN_CTX].ref,
-                                    gold_ptr);
+               dma_buf_vunmap(gr->global_ctx_buffer[GOLDEN_CTX].ref,
+                              gold_ptr);
        if (ctx_ptr)
                vunmap(ctx_ptr);
 
@@ -2183,8 +2183,7 @@ static int gr_gk20a_init_ctx_state(struct gk20a *g, struct gr_gk20a *gr)
 static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 {
        struct gr_gk20a *gr = &g->gr;
-       struct mem_mgr *memmgr = mem_mgr_from_g(g);
-       struct mem_handle *mem;
+       struct dma_buf *dmabuf;
        int i, attr_buffer_size;
 
        u32 cb_buffer_size = gr->bundle_cb_default_size *
@@ -2199,78 +2198,85 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 
        nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
 
-       mem = nvhost_memmgr_alloc(memmgr, cb_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 0);
-       if (IS_ERR(mem))
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(cb_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   0);
+       if (IS_ERR(dmabuf))
                goto clean_up;
 
-       gr->global_ctx_buffer[CIRCULAR].ref = mem;
+       gr->global_ctx_buffer[CIRCULAR].ref = dmabuf;
        gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size;
 
-       mem = nvhost_memmgr_alloc(memmgr, cb_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(mem)) {
-               gr->global_ctx_buffer[CIRCULAR_VPR].ref = mem;
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(cb_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   NVMAP_HEAP_CARVEOUT_VPR);
+       if (!IS_ERR(dmabuf)) {
+               gr->global_ctx_buffer[CIRCULAR_VPR].ref = dmabuf;
                gr->global_ctx_buffer[CIRCULAR_VPR].size = cb_buffer_size;
        }
 
        nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
 
-       mem = nvhost_memmgr_alloc(memmgr, pagepool_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 0);
-       if (IS_ERR(mem))
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(pagepool_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   0);
+       if (IS_ERR(dmabuf))
                goto clean_up;
 
-       gr->global_ctx_buffer[PAGEPOOL].ref = mem;
+       gr->global_ctx_buffer[PAGEPOOL].ref = dmabuf;
        gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size;
 
-       mem = nvhost_memmgr_alloc(memmgr, pagepool_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(mem)) {
-               gr->global_ctx_buffer[PAGEPOOL_VPR].ref = mem;
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(pagepool_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   NVMAP_HEAP_CARVEOUT_VPR);
+       if (!IS_ERR(dmabuf)) {
+               gr->global_ctx_buffer[PAGEPOOL_VPR].ref = dmabuf;
                gr->global_ctx_buffer[PAGEPOOL_VPR].size = pagepool_buffer_size;
        }
 
        nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
 
-       mem = nvhost_memmgr_alloc(memmgr, attr_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 0);
-       if (IS_ERR(mem))
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(attr_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   0);
+       if (IS_ERR(dmabuf))
                goto clean_up;
 
-       gr->global_ctx_buffer[ATTRIBUTE].ref = mem;
+       gr->global_ctx_buffer[ATTRIBUTE].ref = dmabuf;
        gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
 
-       mem = nvhost_memmgr_alloc(memmgr, attr_buffer_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 NVMAP_HEAP_CARVEOUT_VPR);
-       if (!IS_ERR(mem)) {
-               gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = mem;
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(attr_buffer_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   NVMAP_HEAP_CARVEOUT_VPR);
+       if (!IS_ERR(dmabuf)) {
+               gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = dmabuf;
                gr->global_ctx_buffer[ATTRIBUTE_VPR].size = attr_buffer_size;
        }
 
        nvhost_dbg_info("golden_image_size : %d",
                   gr->ctx_vars.golden_image_size);
 
-       mem = nvhost_memmgr_alloc(memmgr, gr->ctx_vars.golden_image_size,
-                                 DEFAULT_ALLOC_ALIGNMENT,
-                                 DEFAULT_ALLOC_FLAGS,
-                                 0);
-       if (IS_ERR(mem))
+       dmabuf = (struct dma_buf *)
+               nvhost_memmgr_alloc(gr->ctx_vars.golden_image_size,
+                                   DEFAULT_ALLOC_ALIGNMENT,
+                                   DEFAULT_ALLOC_FLAGS,
+                                   0);
+       if (IS_ERR(dmabuf))
                goto clean_up;
 
-       gr->global_ctx_buffer[GOLDEN_CTX].ref = mem;
+       gr->global_ctx_buffer[GOLDEN_CTX].ref = dmabuf;
        gr->global_ctx_buffer[GOLDEN_CTX].size =
                gr->ctx_vars.golden_image_size;
 
@@ -2281,8 +2287,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
        nvhost_err(dev_from_gk20a(g), "fail");
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
                if (gr->global_ctx_buffer[i].ref) {
-                       nvhost_memmgr_put(memmgr,
-                                         gr->global_ctx_buffer[i].ref);
+                       dma_buf_put(gr->global_ctx_buffer[i].ref);
                        memset(&gr->global_ctx_buffer[i],
                                0, sizeof(struct mem_desc));
                }
@@ -2293,11 +2298,10 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
 static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
 {
        struct gr_gk20a *gr = &g->gr;
-       struct mem_mgr *memmgr = mem_mgr_from_g(g);
        u32 i;
 
        for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
-               nvhost_memmgr_put(memmgr, gr->global_ctx_buffer[i].ref);
+               dma_buf_put(gr->global_ctx_buffer[i].ref);
                memset(&gr->global_ctx_buffer[i], 0, sizeof(struct mem_desc));
        }
 
@@ -2308,8 +2312,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
                                        struct channel_gk20a *c)
 {
        struct vm_gk20a *ch_vm = c->vm;
-       struct mem_mgr *memmgr = mem_mgr_from_g(g);
-       struct mem_handle *handle_ref;
+       struct dma_buf *handle_ref;
        u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
        struct gr_gk20a *gr = &g->gr;
        u64 gpu_va;
@@ -2322,7 +2325,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        else
                handle_ref = gr->global_ctx_buffer[CIRCULAR_VPR].ref;
 
-       gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
                              /*offset_align, flags, kind*/
                              0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
                              gmmu_pte_kind_pitch_v(), NULL, false,
@@ -2337,7 +2340,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        else
                handle_ref = gr->global_ctx_buffer[ATTRIBUTE_VPR].ref;
 
-       gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
                              /*offset_align, flags, kind*/
                              0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
                              gmmu_pte_kind_pitch_v(), NULL, false,
@@ -2352,7 +2355,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        else
                handle_ref = gr->global_ctx_buffer[PAGEPOOL_VPR].ref;
 
-       gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+       gpu_va = gk20a_vm_map(ch_vm, handle_ref,
                              /*offset_align, flags, kind*/
                              0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
                              gmmu_pte_kind_pitch_v(), NULL, false,
@@ -2362,8 +2365,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        g_bfr_va[PAGEPOOL_VA] = gpu_va;
 
        /* Golden Image */
-       gpu_va = gk20a_vm_map(ch_vm, memmgr,
-                             gr->global_ctx_buffer[GOLDEN_CTX].ref,
+       gpu_va = gk20a_vm_map(ch_vm, gr->global_ctx_buffer[GOLDEN_CTX].ref,
                              /*offset_align, flags, kind*/
                              0, 0, gmmu_pte_kind_pitch_v(), NULL, false,
                              mem_flag_none);
index 42664f4251ff74beee754ebb745041153337e31f..5ca82f9658a4c55b021a843bd13656da61beda0c 100644 (file)
@@ -104,7 +104,7 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer);
 static struct mapped_buffer_node *find_mapped_buffer_locked(
                                        struct rb_root *root, u64 addr);
 static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
-                               struct rb_root *root, struct mem_handle *r,
+                               struct rb_root *root, struct dma_buf *dmabuf,
                                u32 kind);
 static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                                   enum gmmu_pgsz_gk20a pgsz_idx,
@@ -123,6 +123,157 @@ static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
                                                                0x1ffffLL };
 static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
 
+struct gk20a_comptags {
+       u32 offset;
+       u32 lines;
+};
+
+struct gk20a_dmabuf_priv {
+       struct mutex lock;
+
+       struct nvhost_allocator *comptag_allocator;
+       struct gk20a_comptags comptags;
+
+       struct dma_buf_attachment *attach;
+       struct sg_table *sgt;
+
+       int pin_count;
+};
+
+static void gk20a_mm_delete_priv(void *_priv)
+{
+       struct gk20a_dmabuf_priv *priv = _priv;
+       if (!priv)
+               return;
+
+       if (priv->comptags.lines) {
+               BUG_ON(!priv->comptag_allocator);
+               priv->comptag_allocator->free(priv->comptag_allocator,
+                                             priv->comptags.offset,
+                                             priv->comptags.lines);
+       }
+
+       kfree(priv);
+}
+
+static struct sg_table *gk20a_mm_pin(struct device *dev,
+                                    struct dma_buf *dmabuf)
+{
+       struct gk20a_dmabuf_priv *priv;
+       static DEFINE_MUTEX(priv_lock);
+
+       /* create the nvhost priv if needed */
+       priv = dma_buf_get_drvdata(dmabuf, dev);
+       if (!priv) {
+               mutex_lock(&priv_lock);
+               priv = dma_buf_get_drvdata(dmabuf, dev);
+               if (priv)
+                       goto priv_exist_or_err;
+               priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+               if (!priv) {
+                       priv = ERR_PTR(-ENOMEM);
+                       goto priv_exist_or_err;
+               }
+               mutex_init(&priv->lock);
+               dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
+priv_exist_or_err:
+               mutex_unlock(&priv_lock);
+       }
+       if (IS_ERR(priv))
+               return (struct sg_table *)priv;
+
+       mutex_lock(&priv->lock);
+
+       if (priv->pin_count == 0) {
+               priv->attach = dma_buf_attach(dmabuf, dev);
+               if (IS_ERR(priv->attach)) {
+                       mutex_unlock(&priv->lock);
+                       return (struct sg_table *)priv->attach;
+               }
+
+               priv->sgt = dma_buf_map_attachment(priv->attach,
+                                                  DMA_BIDIRECTIONAL);
+               if (IS_ERR(priv->sgt)) {
+                       dma_buf_detach(dmabuf, priv->attach);
+                       mutex_unlock(&priv->lock);
+                       return priv->sgt;
+               }
+       }
+
+       priv->pin_count++;
+       mutex_unlock(&priv->lock);
+       return priv->sgt;
+}
+
+static void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
+                          struct sg_table *sgt)
+{
+       struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+       dma_addr_t dma_addr;
+
+       if (IS_ERR(priv) || !priv)
+               return;
+
+       mutex_lock(&priv->lock);
+       WARN_ON(priv->sgt != sgt);
+       priv->pin_count--;
+       WARN_ON(priv->pin_count < 0);
+       dma_addr = sg_dma_address(priv->sgt->sgl);
+       if (priv->pin_count == 0) {
+               dma_buf_unmap_attachment(priv->attach, priv->sgt,
+                                        DMA_BIDIRECTIONAL);
+               dma_buf_detach(dmabuf, priv->attach);
+       }
+       mutex_unlock(&priv->lock);
+}
+
+
+static void gk20a_get_comptags(struct device *dev,
+                              struct dma_buf *dmabuf,
+                              struct gk20a_comptags *comptags)
+{
+       struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+
+       if (!comptags)
+               return;
+
+       if (!priv) {
+               comptags->lines = 0;
+               comptags->offset = 0;
+               return;
+       }
+
+       *comptags = priv->comptags;
+}
+
+static int gk20a_alloc_comptags(struct device *dev,
+                               struct dma_buf *dmabuf,
+                               struct nvhost_allocator *allocator,
+                               int lines)
+{
+       struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+       u32 offset = 0;
+       int err;
+
+       if (!priv)
+               return -ENOSYS;
+
+       if (!lines)
+               return -EINVAL;
+
+       /* store the allocator so we can use it when we free the ctags */
+       priv->comptag_allocator = allocator;
+       err = allocator->alloc(allocator, &offset, lines);
+       if (!err) {
+               priv->comptags.lines = lines;
+               priv->comptags.offset = offset;
+       }
+       return err;
+}
+
+
+
+
 static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
 {
        nvhost_dbg_fn("");
@@ -760,14 +911,14 @@ static int insert_mapped_buffer(struct rb_root *root,
 }
 
 static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
-                               struct rb_root *root, struct mem_handle *r,
+                               struct rb_root *root, struct dma_buf *dmabuf,
                                u32 kind)
 {
        struct rb_node *node = rb_first(root);
        while (node) {
                struct mapped_buffer_node *mapped_buffer =
                        container_of(node, struct mapped_buffer_node, node);
-               if (mapped_buffer->handle_ref == r &&
+               if (mapped_buffer->dmabuf == dmabuf &&
                    kind == mapped_buffer->kind)
                        return mapped_buffer;
                node = rb_next(&mapped_buffer->node);
@@ -1023,8 +1174,7 @@ static void __locked_gmmu_unmap(struct vm_gk20a *vm,
 }
 
 static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
-                                        struct mem_mgr *memmgr,
-                                        struct mem_handle *r,
+                                        struct dma_buf *dmabuf,
                                         u64 offset_align,
                                         u32 flags,
                                         int kind,
@@ -1037,13 +1187,15 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
        /* fall-back to default kind if no kind is provided */
        if (kind < 0) {
                u64 nvmap_param;
-               nvhost_memmgr_get_param(memmgr, r, NVMAP_HANDLE_PARAM_KIND,
+               nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
+                                       NVMAP_HANDLE_PARAM_KIND,
                                        &nvmap_param);
                kind = nvmap_param;
        }
 
-       mapped_buffer = find_mapped_buffer_reverse_locked(
-                                               &vm->mapped_buffers, r, kind);
+       mapped_buffer =
+               find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
+                                                 dmabuf, kind);
        if (!mapped_buffer)
                return 0;
 
@@ -1054,7 +1206,6 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
            mapped_buffer->addr != offset_align)
                return 0;
 
-       WARN_ON(mapped_buffer->memmgr != memmgr);
        BUG_ON(mapped_buffer->vm != vm);
 
        /* mark the buffer as used */
@@ -1068,14 +1219,10 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
                 * existing mapping here, we need to give back those
                 * refs once in order not to leak.
                 */
-               if (mapped_buffer->own_mem_ref) {
-                       nvhost_memmgr_put(mapped_buffer->memmgr,
-                                         mapped_buffer->handle_ref);
-                       nvhost_memmgr_put_mgr(mapped_buffer->memmgr);
-               } else
+               if (mapped_buffer->own_mem_ref)
+                       dma_buf_put(mapped_buffer->dmabuf);
+               else
                        mapped_buffer->own_mem_ref = true;
-
-               mapped_buffer->memmgr = memmgr;
        }
        kref_get(&mapped_buffer->ref);
 
@@ -1100,8 +1247,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
 }
 
 u64 gk20a_vm_map(struct vm_gk20a *vm,
-                       struct mem_mgr *memmgr,
-                       struct mem_handle *r,
+                       struct dma_buf *dmabuf,
                        u64 offset_align,
                        u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
                        int kind,
@@ -1118,12 +1264,12 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        u64 map_offset = 0;
        int err = 0;
        struct buffer_attrs bfr = {0};
-       struct nvhost_comptags comptags;
+       struct gk20a_comptags comptags;
 
        mutex_lock(&vm->update_gmmu_lock);
 
        /* check if this buffer is already mapped */
-       map_offset = gk20a_vm_map_duplicate_locked(vm, memmgr, r, offset_align,
+       map_offset = gk20a_vm_map_duplicate_locked(vm, dmabuf, offset_align,
                                                   flags, kind, sgt,
                                                   user_mapped, rw_flag);
        if (map_offset) {
@@ -1132,7 +1278,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        }
 
        /* pin buffer to get phys/iovmm addr */
-       bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, rw_flag);
+       bfr.sgt = gk20a_mm_pin(d, dmabuf);
        if (IS_ERR(bfr.sgt)) {
                /* Falling back to physical is actually possible
                 * here in many cases if we use 4K phys pages in the
@@ -1151,7 +1297,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
 
        if (kind < 0) {
                u64 value;
-               err = nvhost_memmgr_get_param(memmgr, r,
+               err = nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
                                              NVMAP_HANDLE_PARAM_KIND,
                                              &value);
                if (err) {
@@ -1163,7 +1309,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        }
 
        bfr.kind_v = kind;
-       bfr.size = nvhost_memmgr_size(r);
+       bfr.size = dmabuf->size;
        bfr.align = 1 << __ffs((u64)sg_dma_address(bfr.sgt->sgl));
        bfr.pgsz_idx = -1;
 
@@ -1214,19 +1360,19 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        if (!vm->enable_ctag)
                bfr.ctag_lines = 0;
 
-       nvhost_memmgr_get_comptags(d, r, &comptags);
+       gk20a_get_comptags(d, dmabuf, &comptags);
 
        if (bfr.ctag_lines && !comptags.lines) {
                /* allocate compression resources if needed */
-               err = nvhost_memmgr_alloc_comptags(d, r,
-                               ctag_allocator, bfr.ctag_lines);
+               err = gk20a_alloc_comptags(d, dmabuf, ctag_allocator,
+                                          bfr.ctag_lines);
                if (err) {
                        /* ok to fall back here if we ran out */
                        /* TBD: we can partially alloc ctags as well... */
                        bfr.ctag_lines = bfr.ctag_offset = 0;
                        bfr.kind_v = bfr.uc_kind_v;
                } else {
-                       nvhost_memmgr_get_comptags(d, r, &comptags);
+                       gk20a_get_comptags(d, dmabuf, &comptags);
 
                        /* init/clear the ctag buffer */
                        g->ops.ltc.clear_comptags(g,
@@ -1285,8 +1431,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
                nvhost_warn(d, "oom allocating tracking buffer");
                goto clean_up;
        }
-       mapped_buffer->memmgr      = memmgr;
-       mapped_buffer->handle_ref  = r;
+       mapped_buffer->dmabuf      = dmabuf;
        mapped_buffer->sgt         = bfr.sgt;
        mapped_buffer->addr        = map_offset;
        mapped_buffer->size        = bfr.size;
@@ -1342,7 +1487,7 @@ clean_up:
        if (va_allocated)
                gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx);
        if (!IS_ERR(bfr.sgt))
-               nvhost_memmgr_unpin(memmgr, r, d, bfr.sgt);
+               gk20a_mm_unpin(d, dmabuf, bfr.sgt);
 
        mutex_unlock(&vm->update_gmmu_lock);
        nvhost_dbg_info("err=%d\n", err);
@@ -1823,10 +1968,8 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
                   hi32(mapped_buffer->addr), lo32(mapped_buffer->addr),
                   mapped_buffer->own_mem_ref);
 
-       nvhost_memmgr_unpin(mapped_buffer->memmgr,
-                           mapped_buffer->handle_ref,
-                           dev_from_vm(vm),
-                           mapped_buffer->sgt);
+       gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf,
+                      mapped_buffer->sgt);
 
        /* remove from mapped buffer tree and remove list, free */
        rb_erase(&mapped_buffer->node, &vm->mapped_buffers);
@@ -1837,11 +1980,8 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
        if (mapped_buffer->user_mapped)
                vm->num_user_mapped_buffers--;
 
-       if (mapped_buffer->own_mem_ref) {
-               nvhost_memmgr_put(mapped_buffer->memmgr,
-                                 mapped_buffer->handle_ref);
-               nvhost_memmgr_put_mgr(mapped_buffer->memmgr);
-       }
+       if (mapped_buffer->own_mem_ref)
+               dma_buf_put(mapped_buffer->dmabuf);
 
        kfree(mapped_buffer);
 
@@ -2223,40 +2363,29 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
 }
 
 int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
-                       int memmgr_fd,
-                       ulong mem_id,
+                       int dmabuf_fd,
                        u64 *offset_align,
                        u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
                        int kind)
 {
        int err = 0;
        struct vm_gk20a *vm = as_share->vm;
-       struct gk20a *g = gk20a_from_vm(vm);
-       struct mem_mgr *memmgr;
-       struct mem_handle *r;
+       struct dma_buf *dmabuf;
        u64 ret_va;
 
        nvhost_dbg_fn("");
 
-       /* get ref to the memmgr (released on unmap_locked) */
-       memmgr = nvhost_memmgr_get_mgr_file(memmgr_fd);
-       if (IS_ERR(memmgr))
-               return 0;
-
        /* get ref to the mem handle (released on unmap_locked) */
-       r = nvhost_memmgr_get(memmgr, mem_id, g->dev);
-       if (!r) {
-               nvhost_memmgr_put_mgr(memmgr);
+       dmabuf = dma_buf_get(dmabuf_fd);
+       if (!dmabuf)
                return 0;
-       }
 
-       ret_va = gk20a_vm_map(vm, memmgr, r, *offset_align,
+       ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
                        flags, kind, NULL, true,
                        mem_flag_none);
        *offset_align = ret_va;
        if (!ret_va) {
-               nvhost_memmgr_put(memmgr, r);
-               nvhost_memmgr_put_mgr(memmgr);
+               dma_buf_put(dmabuf);
                err = -EINVAL;
        }
 
@@ -2709,7 +2838,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
 
 
 int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
-                        struct mem_mgr **mgr, struct mem_handle **r,
+                        struct dma_buf **dmabuf,
                         u64 *offset)
 {
        struct mapped_buffer_node *mapped_buffer;
@@ -2725,8 +2854,7 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
                return -EINVAL;
        }
 
-       *mgr = mapped_buffer->memmgr;
-       *r = mapped_buffer->handle_ref;
+       *dmabuf = mapped_buffer->dmabuf;
        *offset = gpu_va - mapped_buffer->addr;
 
        mutex_unlock(&vm->update_gmmu_lock);
index add15121a6a9df74e350f263fc99c1b72b6c698c..59aa1a89ce3c3ac1529db9a91f54dfa2e126a288 100644 (file)
@@ -41,7 +41,7 @@
 #define NV_GMMU_VA_IS_UPPER(x) ((x) >= ((u64)0x1 << (NV_GMMU_VA_RANGE-1)))
 
 struct mem_desc {
-       struct mem_handle *ref;
+       struct dma_buf *ref;
        struct sg_table *sgt;
        u32 size;
 };
@@ -184,8 +184,7 @@ struct mapped_buffer_node {
        struct vm_reserved_va_node *va_node;
        u64 addr;
        u64 size;
-       struct mem_mgr *memmgr;
-       struct mem_handle *handle_ref;
+       struct dma_buf *dmabuf;
        struct sg_table *sgt;
        struct kref ref;
        u32 user_mapped;
@@ -295,8 +294,6 @@ int gk20a_mm_init(struct mm_gk20a *mm);
 #define gk20a_from_mm(mm) ((mm)->g)
 #define gk20a_from_vm(vm) ((vm)->mm->g)
 
-#define mem_mgr_from_mm(mm) (gk20a_from_mm(mm)->host->memmgr)
-#define mem_mgr_from_vm(vm) (gk20a_from_vm(vm)->host->memmgr)
 #define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
 
 #define DEFAULT_ALLOC_FLAGS (mem_mgr_flag_uncacheable)
@@ -367,14 +364,13 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
                int rw_flag);
 
 u64 gk20a_vm_map(struct vm_gk20a *vm,
-                struct mem_mgr *memmgr,
-                struct mem_handle *r,
-                u64 offset_align,
-                u32 flags /*NVHOST_MAP_BUFFER_FLAGS_*/,
-                int kind,
-                struct sg_table **sgt,
-                bool user_mapped,
-                int rw_flag);
+               struct dma_buf *dmabuf,
+               u64 offset_align,
+               u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
+               int kind,
+               struct sg_table **sgt,
+               bool user_mapped,
+               int rw_flag);
 
 /* unmap handle from kernel */
 void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset);
@@ -394,7 +390,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm);
 
 /* find buffer corresponding to va */
 int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
-                        struct mem_mgr **memmgr, struct mem_handle **r,
+                        struct dma_buf **dmabuf,
                         u64 *offset);
 
 void gk20a_vm_get(struct vm_gk20a *vm);
@@ -412,10 +408,9 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
 int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
                          struct channel_gk20a *ch);
 int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
-                       int memmgr_fd,
-                       ulong mem_id,
+                       int dmabuf_fd,
                        u64 *offset_align,
-                       u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
+                       u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
                        int kind);
 int gk20a_vm_unmap_buffer(struct gk20a_as_share *, u64 offset);
 
index 6b3d0bda2519b66e8f3bab70217411edd4d31464..f29ad51cead313054610ec00dc08943665b6d125 100644 (file)
@@ -27,6 +27,7 @@
 struct nvhost_channel;
 struct nvhost_cdma;
 struct mem_mgr;
+struct dma_buf;
 struct nvhost_dbg_session;
 
 struct nvhost_hwctx {
index 0a7a1fd911c3342c2d9868bdd8d5b74128b07e65..be490b8270fc6af8696a6bf366c002b62cf6f4dc 100644 (file)
@@ -87,15 +87,15 @@ struct mem_mgr *nvhost_memmgr_get_mgr_file(int fd)
        return mgr;
 }
 
-struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *mgr,
-       size_t size, size_t align, int flags, unsigned int heap_mask)
+struct mem_handle *nvhost_memmgr_alloc(size_t size, size_t align,
+                                      int flags, unsigned int heap_mask)
 {
        struct mem_handle *h = NULL;
 #ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
-       h = nvhost_nvmap_alloc(mgr, size, align, flags, heap_mask);
+       h = nvhost_nvmap_alloc(size, align, flags, heap_mask);
 #else
 #ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
-       h = nvhost_dmabuf_alloc(mgr, size, align, flags);
+       h = nvhost_dmabuf_alloc(size, align, flags);
 #endif
 #endif
 
@@ -220,8 +220,7 @@ void nvhost_memmgr_munmap(struct mem_handle *handle, void *addr)
        }
 }
 
-int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
-                           struct mem_handle *mem_handle,
+int nvhost_memmgr_get_param(struct mem_handle *mem_handle,
                            u32 param, u64 *result)
 {
 #ifndef CONFIG_ARM64
@@ -231,13 +230,13 @@ int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
 #endif
 #ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
        case mem_mgr_type_nvmap:
-               return nvhost_nvmap_get_param(mem_mgr, mem_handle,
+               return nvhost_nvmap_get_param(mem_handle,
                                              param, result);
                break;
 #endif
 #ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
        case mem_mgr_type_dmabuf:
-               return nvhost_dmabuf_get_param(mem_mgr, mem_handle,
+               return nvhost_dmabuf_get_param(mem_handle,
                                               param, result);
                break;
 #endif
index 7a4d329404d8285856e89d2b422908a615d24815..764694198321f497a5fe81cd847275a455ba9ee5 100644 (file)
@@ -60,9 +60,8 @@ struct mem_mgr *nvhost_memmgr_alloc_mgr(void);
 void nvhost_memmgr_put_mgr(struct mem_mgr *);
 struct mem_mgr *nvhost_memmgr_get_mgr(struct mem_mgr *);
 struct mem_mgr *nvhost_memmgr_get_mgr_file(int fd);
-struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *,
-               size_t size, size_t align,
-               int flags, unsigned int heap_mask);
+struct mem_handle *nvhost_memmgr_alloc(size_t size, size_t align,
+                                      int flags, unsigned int heap_mask);
 struct mem_handle *nvhost_memmgr_get(struct mem_mgr *,
                ulong id, struct platform_device *dev);
 void nvhost_memmgr_put(struct mem_mgr *mgr, struct mem_handle *handle);
@@ -85,8 +84,7 @@ void nvhost_memmgr_free_sg_table(struct mem_mgr *mgr,
 static inline int nvhost_memmgr_type(ulong id) { return id & MEMMGR_TYPE_MASK; }
 static inline int nvhost_memmgr_id(ulong id) { return id & MEMMGR_ID_MASK; }
 
-int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
-                           struct mem_handle *mem_handle,
+int nvhost_memmgr_get_param(struct mem_handle *mem_handle,
                            u32 param, u64 *result);
 
 void nvhost_memmgr_get_comptags(struct device *dev,
index 6f9b2611dde2b34fa05fbf70b4efa456fed6469b..118379b648decd72a162a1b1449a9b793c982f36 100644 (file)
@@ -67,8 +67,8 @@ struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd)
        return (struct mem_mgr *)0x1;
 }
 
-struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags, unsigned int heap_mask)
+struct mem_handle *nvhost_nvmap_alloc(size_t size, size_t align,
+                                     int flags, unsigned int heap_mask)
 {
        return (struct mem_handle *)nvmap_alloc_dmabuf(
                        size, align, flags, heap_mask);
@@ -266,8 +266,8 @@ struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr,
        return (struct mem_handle *)dma_buf_get(id);
 }
 
-int nvhost_nvmap_get_param(struct mem_mgr *mgr, struct mem_handle *handle,
-               u32 param, u64 *result)
+int nvhost_nvmap_get_param(struct mem_handle *handle,
+                          u32 param, u64 *result)
 {
        return nvmap_get_dmabuf_param(
                        (struct dma_buf *)handle,
index a11e7f24e7307a997ec483ee405d842fb3ec4374..f96643e21dd5df478f124785e85c84ad29c17ad4 100644 (file)
@@ -31,8 +31,8 @@ struct mem_mgr *nvhost_nvmap_alloc_mgr(void);
 void nvhost_nvmap_put_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_nvmap_get_mgr(struct mem_mgr *mgr);
 struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd);
-struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags, unsigned int heap_flags);
+struct mem_handle *nvhost_nvmap_alloc(size_t size, size_t align,
+                                     int flags, unsigned int heap_flags);
 void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle);
 struct sg_table *nvhost_nvmap_pin(struct mem_mgr *mgr,
                struct mem_handle *handle, struct device *dev, int rw_flag);
@@ -45,7 +45,7 @@ void nvhost_nvmap_kunmap(struct mem_handle *handle, unsigned int pagenum,
                void *addr);
 struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr,
                ulong id, struct platform_device *dev);
-int nvhost_nvmap_get_param(struct mem_mgr *mgr, struct mem_handle *handle,
+int nvhost_nvmap_get_param(struct mem_handle *handle,
                           u32 param, u64 *result);
 phys_addr_t nvhost_nvmap_get_addr_from_id(ulong id);