]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: use dma_buf for nvhost_job
authorDeepak Nibade <dnibade@nvidia.com>
Thu, 6 Feb 2014 08:16:24 +0000 (13:46 +0530)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Tue, 11 Feb 2014 08:57:52 +0000 (00:57 -0800)
Bug 1450489

Change-Id: Id414830e418a577e913d313bd6cd7ee7b7a4a1a6
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/364303
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
drivers/video/tegra/host/bus_client.c
drivers/video/tegra/host/host1x/host1x_channel.c
drivers/video/tegra/host/host1x/host1x_debug.c
drivers/video/tegra/host/nvhost_job.c
drivers/video/tegra/host/nvhost_job.h
drivers/video/tegra/host/nvhost_memmgr.c
drivers/video/tegra/host/nvhost_memmgr.h

index c82135523ba44b4d2b8e708ef6494f9e440f8903..15c7b79c11d2daeb88562b3b69f929fed942e71b 100644 (file)
@@ -303,8 +303,7 @@ static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
                        num_cmdbufs,
                        num_relocs,
                        num_waitchks,
-                       num_syncpt_incrs,
-                       ctx->memmgr);
+                       num_syncpt_incrs);
        if (!job)
                return -ENOMEM;
 
index c0cae18e62c0ce049151a6817716dcb8fff39c57..ebcacbd843f4865962e219b3287c6ede8d455f3d 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra Graphics Host Channel
  *
- * Copyright (c) 2010-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2010-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -233,6 +233,7 @@ static void submit_gathers(struct nvhost_job *job)
 {
        u32 class_id = 0;
        int i;
+       void *cpuva;
 
        /* push user gathers */
        for (i = 0 ; i < job->num_gathers; i++) {
@@ -259,11 +260,14 @@ static void submit_gathers(struct nvhost_job *job)
                else
                        op1 = nvhost_opcode_gather(g->words);
                op2 = job->gathers[i].mem_base + g->offset;
-               nvhost_cdma_push_gather(&job->ch->cdma,
-                               job->memmgr,
-                               g->ref,
+
+               cpuva = dma_buf_vmap(g->buf);
+               _nvhost_cdma_push_gather(&job->ch->cdma,
+                               cpuva,
+                               job->gathers[i].mem_base,
                                g->offset,
                                op1, op2);
+               dma_buf_vunmap(g->buf, cpuva);
        }
 }
 
@@ -410,8 +414,7 @@ static int host1x_save_context(struct nvhost_channel *ch)
                goto done;
        }
 
-       job = nvhost_job_alloc(ch, hwctx_to_save, 0, 0, 0, 1,
-                       nvhost_get_host(ch->dev)->memmgr);
+       job = nvhost_job_alloc(ch, hwctx_to_save, 0, 0, 0, 1);
        if (!job) {
                err = -ENOMEM;
                mutex_unlock(&ch->submitlock);
index b1719988b8560998b23c3594af5df698d1db57e4..44139d7b2d66b552aa9bdaf3792f739474ba8791 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2010 Google, Inc.
  * Author: Erik Gilling <konkers@android.com>
  *
- * Copyright (c) 2011-2013, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA Corporation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -93,7 +93,7 @@ static void show_channel_gathers(struct output *o, struct nvhost_cdma *cdma)
 
        for (i = 0; i < job->num_gathers; i++) {
                struct nvhost_job_gather *g = &job->gathers[i];
-               u32 *mapped = nvhost_memmgr_mmap(g->ref);
+               u32 *mapped = dma_buf_vmap(g->buf);
                if (!mapped) {
                        nvhost_debug_output(o, "[could not mmap]\n");
                        continue;
@@ -105,7 +105,7 @@ static void show_channel_gathers(struct output *o, struct nvhost_cdma *cdma)
 
                do_show_channel_gather(o, g->mem_base + g->offset,
                                g->words, cdma, g->mem_base, mapped);
-               nvhost_memmgr_munmap(g->ref, mapped);
+               dma_buf_vunmap(g->buf, mapped);
        }
 }
 
index e5215ea768d94d6af3d4fbba6d9da6a018dfac47..4616b07c419194bb8da5ba5c26ee5873983ad76f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra Graphics Host Job
  *
- * Copyright (c) 2010-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2010-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -22,6 +22,7 @@
 #include <linux/kref.h>
 #include <linux/err.h>
 #include <linux/vmalloc.h>
+#include <linux/sort.h>
 #include <linux/scatterlist.h>
 #include <trace/events/nvhost.h>
 #include "nvhost_channel.h"
@@ -29,7 +30,6 @@
 #include "nvhost_hwctx.h"
 #include "nvhost_syncpt.h"
 #include "dev.h"
-#include "nvhost_memmgr.h"
 #include "chip_support.h"
 
 /* Magic to use to fill freed handle slots */
@@ -48,7 +48,7 @@ static size_t job_size(u32 num_cmdbufs, u32 num_relocs, u32 num_waitchks,
                        + num_waitchks * sizeof(struct nvhost_waitchk)
                        + num_cmdbufs * sizeof(struct nvhost_job_gather)
                        + num_unpins * sizeof(dma_addr_t)
-                       + num_unpins * sizeof(struct nvhost_memmgr_pinid)
+                       + num_unpins * sizeof(struct nvhost_pinid)
                        + num_syncpts * sizeof(struct nvhost_job_syncpt);
 
        if(total > ULONG_MAX)
@@ -85,7 +85,7 @@ static void init_fields(struct nvhost_job *job,
        job->addr_phys = num_unpins ? mem : NULL;
        mem += num_unpins * sizeof(dma_addr_t);
        job->pin_ids = num_unpins ? mem : NULL;
-       mem += num_unpins * sizeof(struct nvhost_memmgr_pinid);
+       mem += num_unpins * sizeof(struct nvhost_pinid);
        job->sp = num_syncpts ? mem : NULL;
 
        job->reloc_addr_phys = job->addr_phys;
@@ -95,7 +95,7 @@ static void init_fields(struct nvhost_job *job,
 struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
                struct nvhost_hwctx *hwctx,
                int num_cmdbufs, int num_relocs, int num_waitchks,
-               int num_syncpts, struct mem_mgr *memmgr)
+               int num_syncpts)
 {
        struct nvhost_job *job = NULL;
        size_t size =
@@ -112,7 +112,6 @@ struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
        job->hwctx = hwctx;
        if (hwctx)
                hwctx->h->get(hwctx);
-       job->memmgr = memmgr ? nvhost_memmgr_get_mgr(memmgr) : NULL;
 
        init_fields(job, num_cmdbufs, num_relocs, num_waitchks, num_syncpts);
 
@@ -132,8 +131,6 @@ static void job_free(struct kref *ref)
                job->hwctxref->h->put(job->hwctxref);
        if (job->hwctx)
                job->hwctx->h->put(job->hwctx);
-       if (job->memmgr)
-               nvhost_memmgr_put_mgr(job->memmgr);
        vfree(job);
 }
 
@@ -174,7 +171,7 @@ void nvhost_job_add_gather(struct nvhost_job *job,
  * avoid a wrap condition in the HW).
  */
 static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp,
-               u32 patch_mem, struct mem_handle *h)
+               u32 patch_mem, struct dma_buf *buf)
 {
        int i;
 
@@ -213,13 +210,13 @@ static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp,
                            nvhost_syncpt_read_min(sp, wait->syncpt_id));
 
                        /* patch the wait */
-                       patch_addr = nvhost_memmgr_kmap(h,
+                       patch_addr = dma_buf_kmap(buf,
                                        wait->offset >> PAGE_SHIFT);
                        if (patch_addr) {
                                nvhost_syncpt_patch_wait(sp,
                                        (patch_addr +
                                         (wait->offset & ~PAGE_MASK)));
-                               nvhost_memmgr_kunmap(h,
+                               dma_buf_kunmap(buf,
                                                wait->offset >> PAGE_SHIFT,
                                                patch_addr);
                        } else {
@@ -232,6 +229,65 @@ static int do_waitchks(struct nvhost_job *job, struct nvhost_syncpt *sp,
        return 0;
 }
 
+static int id_cmp(const void *_id1, const void *_id2)
+{
+       u32 id1 = ((struct nvhost_pinid *)_id1)->id;
+       u32 id2 = ((struct nvhost_pinid *)_id2)->id;
+
+       if (id1 < id2)
+               return -1;
+       if (id1 > id2)
+               return 1;
+
+       return 0;
+}
+
+static int pin_array_ids(struct platform_device *dev,
+               struct nvhost_pinid *ids,
+               dma_addr_t *phys_addr,
+               u32 count,
+               struct nvhost_job_unpin *unpin_data)
+{
+       int i, pin_count = 0;
+       struct sg_table *sgt;
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attach;
+       u32 prev_id = 0;
+       dma_addr_t prev_addr = 0;
+
+       for (i = 0; i < count; i++)
+               ids[i].index = i;
+
+       sort(ids, count, sizeof(*ids), id_cmp, NULL);
+
+       for (i = 0; i < count; i++) {
+               if (ids[i].id == prev_id) {
+                       phys_addr[ids[i].index] = prev_addr;
+                       continue;
+               }
+
+               buf = dma_buf_get(ids[i].id);
+               if (IS_ERR(buf))
+                       return -EINVAL;
+
+               attach = dma_buf_attach(buf, &dev->dev);
+               if (IS_ERR(attach))
+                       return PTR_ERR(attach);
+
+               sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+               if (IS_ERR(sgt))
+                       return PTR_ERR(sgt);
+
+               phys_addr[ids[i].index] = sg_dma_address(sgt->sgl);
+               unpin_data[pin_count].buf = buf;
+               unpin_data[pin_count].attach = attach;
+               unpin_data[pin_count++].sgt = sgt;
+
+               prev_id = ids[i].id;
+               prev_addr = phys_addr[ids[i].index];
+       }
+       return pin_count;
+}
 
 static int pin_job_mem(struct nvhost_job *job)
 {
@@ -252,7 +308,7 @@ static int pin_job_mem(struct nvhost_job *job)
        }
 
        /* validate array and pin unique ids, get refs for unpinning */
-       result = nvhost_memmgr_pin_array_ids(job->memmgr, job->ch->dev,
+       result = pin_array_ids(job->ch->dev,
                job->pin_ids, job->addr_phys,
                count,
                job->unpins);
@@ -264,7 +320,7 @@ static int pin_job_mem(struct nvhost_job *job)
 }
 
 static int do_relocs(struct nvhost_job *job,
-               u32 cmdbuf_mem, struct mem_handle *h)
+               u32 cmdbuf_mem, struct dma_buf *buf)
 {
        int i = 0;
        int last_page = -1;
@@ -283,10 +339,10 @@ static int do_relocs(struct nvhost_job *job,
 
                if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
                        if (cmdbuf_page_addr)
-                               nvhost_memmgr_kunmap(h,
-                                               last_page, cmdbuf_page_addr);
+                               dma_buf_kunmap(buf, last_page,
+                                               cmdbuf_page_addr);
 
-                       cmdbuf_page_addr = nvhost_memmgr_kmap(h,
+                       cmdbuf_page_addr = dma_buf_kmap(buf,
                                        reloc->cmdbuf_offset >> PAGE_SHIFT);
                        last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
 
@@ -322,7 +378,7 @@ static int do_relocs(struct nvhost_job *job,
        }
 
        if (cmdbuf_page_addr)
-               nvhost_memmgr_kunmap(h, last_page, cmdbuf_page_addr);
+               dma_buf_kunmap(buf, last_page, cmdbuf_page_addr);
 
        return 0;
 }
@@ -354,12 +410,11 @@ int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
                struct nvhost_job_gather *g = &job->gathers[i];
 
                /* process each gather mem only once */
-               if (!g->ref) {
-                       g->ref = nvhost_memmgr_get(job->memmgr,
-                               g->mem_id, job->ch->dev);
-                       if (IS_ERR(g->ref)) {
-                               err = PTR_ERR(g->ref);
-                               g->ref = NULL;
+               if (!g->buf) {
+                       g->buf = dma_buf_get(g->mem_id);
+                       if (IS_ERR(g->buf)) {
+                               err = PTR_ERR(g->buf);
+                               g->buf = NULL;
                                break;
                        }
 
@@ -368,16 +423,16 @@ int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
                        for (j = 0; j < job->num_gathers; j++) {
                                struct nvhost_job_gather *tmp =
                                        &job->gathers[j];
-                               if (!tmp->ref && tmp->mem_id == g->mem_id) {
-                                       tmp->ref = g->ref;
+                               if (!tmp->buf && tmp->mem_id == g->mem_id) {
+                                       tmp->buf = g->buf;
                                        tmp->mem_base = g->mem_base;
                                }
                        }
-                       err = do_relocs(job, g->mem_id,  g->ref);
+                       err = do_relocs(job, g->mem_id,  g->buf);
                        if (!err)
                                err = do_waitchks(job, sp,
-                                               g->mem_id, g->ref);
-                       nvhost_memmgr_put(job->memmgr, g->ref);
+                                               g->mem_id, g->buf);
+                       dma_buf_put(g->buf);
                        if (err)
                                break;
                }
@@ -386,18 +441,16 @@ fail:
        return err;
 }
 
-/*
- * Fast unpin, only for nvmap
- */
 void nvhost_job_unpin(struct nvhost_job *job)
 {
        int i;
 
        for (i = 0; i < job->num_unpins; i++) {
                struct nvhost_job_unpin *unpin = &job->unpins[i];
-               nvhost_memmgr_unpin(job->memmgr, unpin->h,
-                               &job->ch->dev->dev, unpin->mem);
-               nvhost_memmgr_put(job->memmgr, unpin->h);
+               dma_buf_unmap_attachment(unpin->attach, unpin->sgt,
+                                               DMA_BIDIRECTIONAL);
+               dma_buf_detach(unpin->buf, unpin->attach);
+               dma_buf_put(unpin->buf);
        }
        job->num_unpins = 0;
 }
index ae490d39352a9b81b656ea1bd4b96070f7d565ee..cb37e78a7485648c653dea2fe66d3c4ddedaa19a 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra Graphics Host Interrupt Management
  *
- * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -23,6 +23,7 @@
 
 #include <linux/nvhost_ioctl.h>
 #include <linux/kref.h>
+#include <linux/dma-buf.h>
 
 struct nvhost_channel;
 struct nvhost_hwctx;
@@ -37,7 +38,7 @@ struct nvhost_job_gather {
        ulong mem_id;
        u32 class_id;
        int offset;
-       struct mem_handle *ref;
+       struct dma_buf *buf;
        int pre_fence;
 };
 
@@ -48,6 +49,17 @@ struct nvhost_job_syncpt {
        u32 waitbase;
 };
 
+struct nvhost_pinid {
+       u32 id;
+       u32 index;
+};
+
+struct nvhost_job_unpin {
+       struct sg_table *sgt;
+       struct dma_buf *buf;
+       struct dma_buf_attachment *attach;
+};
+
 /*
  * Each submit is tracked as a nvhost_job.
  */
@@ -65,9 +77,6 @@ struct nvhost_job {
        struct nvhost_hwctx *hwctx;
        int clientid;
 
-       /* Nvmap to be used for pinning & unpinning memory */
-       struct mem_mgr *memmgr;
-
        /* Gathers and their memory */
        struct nvhost_job_gather *gathers;
        int num_gathers;
@@ -83,7 +92,7 @@ struct nvhost_job {
        struct nvhost_job_unpin *unpins;
        int num_unpins;
 
-       struct nvhost_memmgr_pinid *pin_ids;
+       struct nvhost_pinid *pin_ids;
        dma_addr_t *addr_phys;
        dma_addr_t *gather_addr_phys;
        dma_addr_t *reloc_addr_phys;
@@ -125,7 +134,7 @@ struct nvhost_job {
 struct nvhost_job *nvhost_job_alloc(struct nvhost_channel *ch,
                struct nvhost_hwctx *hwctx,
                int num_cmdbufs, int num_relocs, int num_waitchks,
-               int num_syncpts, struct mem_mgr *memmgr);
+               int num_syncpts);
 
 /*
  * Add a gather to a job.
index 98e09f39d093fee2056ccfc92d44f2396a1080f4..71a8a11aef12ce3ecdb111800f74684262075c80 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra Graphics Host Memory Management Abstraction
  *
- * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,6 @@
 #ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
 #include "dmabuf.h"
 #endif
-#include <linux/sort.h>
 #include "chip_support.h"
 
 struct mem_mgr *nvhost_memmgr_alloc_mgr(void)
@@ -286,61 +285,6 @@ void nvhost_memmgr_kunmap(struct mem_handle *handle, unsigned int pagenum,
        }
 }
 
-static int id_cmp(const void *_id1, const void *_id2)
-{
-       u32 id1 = ((struct nvhost_memmgr_pinid *)_id1)->id;
-       u32 id2 = ((struct nvhost_memmgr_pinid *)_id2)->id;
-
-       if (id1 < id2)
-               return -1;
-       if (id1 > id2)
-               return 1;
-
-       return 0;
-}
-
-int nvhost_memmgr_pin_array_ids(struct mem_mgr *mgr,
-               struct platform_device *dev,
-               struct nvhost_memmgr_pinid *ids,
-               dma_addr_t *phys_addr,
-               u32 count,
-               struct nvhost_job_unpin *unpin_data)
-{
-       int i, pin_count = 0;
-       struct sg_table *sgt;
-       struct mem_handle *h;
-       u32 prev_id = 0;
-       dma_addr_t prev_addr = 0;
-
-       for (i = 0; i < count; i++)
-               ids[i].index = i;
-
-       sort(ids, count, sizeof(*ids), id_cmp, NULL);
-
-       for (i = 0; i < count; i++) {
-               if (ids[i].id == prev_id) {
-                       phys_addr[ids[i].index] = prev_addr;
-                       continue;
-               }
-
-               h = nvhost_memmgr_get(mgr, ids[i].id, dev);
-               if (IS_ERR(h))
-                       return -EINVAL;
-
-               sgt = nvhost_memmgr_pin(mgr, h, &dev->dev, mem_flag_none);
-               if (IS_ERR(sgt))
-                       return PTR_ERR(sgt);
-
-               phys_addr[ids[i].index] = nvhost_memmgr_dma_addr(sgt);
-               unpin_data[pin_count].h = h;
-               unpin_data[pin_count++].mem = sgt;
-
-               prev_id = ids[i].id;
-               prev_addr = phys_addr[ids[i].index];
-       }
-       return pin_count;
-}
-
 struct sg_table *nvhost_memmgr_sg_table(struct mem_mgr *mgr,
                struct mem_handle *handle)
 {
index fee2d650c8c33bd93c07fc8b2094f109db8cece6..03abc6933f6a97ef1dc7370f4a83d671ca6d2a77 100644 (file)
@@ -36,16 +36,6 @@ struct nvhost_comptags {
        u32 lines;
 };
 
-struct nvhost_job_unpin {
-       struct mem_handle *h;
-       struct sg_table *mem;
-};
-
-struct nvhost_memmgr_pinid {
-       u32 id;
-       u32 index;
-};
-
 enum mem_mgr_flag {
        mem_mgr_flag_uncacheable = 0,
        mem_mgr_flag_write_combine = 1,
@@ -95,12 +85,6 @@ void nvhost_memmgr_free_sg_table(struct mem_mgr *mgr,
 static inline int nvhost_memmgr_type(ulong id) { return id & MEMMGR_TYPE_MASK; }
 static inline int nvhost_memmgr_id(ulong id) { return id & MEMMGR_ID_MASK; }
 
-int nvhost_memmgr_pin_array_ids(struct mem_mgr *mgr,
-               struct platform_device *dev,
-               struct nvhost_memmgr_pinid *ids,
-               dma_addr_t *phys_addr,
-               u32 count,
-               struct nvhost_job_unpin *unpin_data);
 int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
                            struct mem_handle *mem_handle,
                            u32 param, u64 *result);