]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: Support explicit buffer map
authorArto Merilainen <amerilainen@nvidia.com>
Sun, 2 Nov 2014 11:06:53 +0000 (13:06 +0200)
committerArto Merilainen <amerilainen@nvidia.com>
Fri, 14 Nov 2014 07:10:21 +0000 (23:10 -0800)
Currently nvhost supports only implicit mapping; Data and command
buffers are mapped at submit time and command buffers are patches
with data buffer addresses. However, this operation is expensive
because the command buffers need to be mapped into kernel address
space for patching.

This patch adds support to explicit buffer mapping. This allows
using the memory addresses direrctly inside command buffers making
relocations themselves unnecessary.

Bug 1573309

Change-Id: I64c0f237d3f37268d2fb9d1b0e4574e4c870742f
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/592375
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit

drivers/video/tegra/host/bus_client.c
include/linux/nvhost_ioctl.h

index 36dcc55e144b5eb9ac4be580b24aafa6feb9f947..e7fe16a3b8061596e56799a9bcc6a22200995f72 100644 (file)
@@ -568,6 +568,156 @@ fail:
        return err;
 }
 
+static int nvhost_ioctl_channel_map_buffer(struct nvhost_channel_userctx *ctx,
+                               struct nvhost_channel_map_buffer_args *args)
+{
+       struct nvhost_channel_buffer __user *__buffers =
+               (struct nvhost_channel_buffer *)(uintptr_t)args->table_address;
+       struct nvhost_channel_buffer *buffers;
+       int err = 0, i = 0, num_handled_buffers = 0;
+       dma_addr_t addr = 0;
+
+       /* ensure that reserved fields are kept clear */
+       if (args->reserved)
+               return -EINVAL;
+
+       /* allocate room for buffers */
+       buffers = kzalloc(args->num_buffers * sizeof(*buffers), GFP_KERNEL);
+       if (!buffers) {
+               err = -ENOMEM;
+               goto err_alloc_buffers;
+       }
+
+       /* copy the buffers from user space */
+       err = copy_from_user(buffers, __buffers,
+                            sizeof(*__buffers) * args->num_buffers);
+       if (err)
+               goto err_copy_from_user;
+
+       /* go through all the buffers */
+       for (i = 0, num_handled_buffers = 0;
+            i < args->num_buffers;
+            i++, num_handled_buffers++) {
+               struct dma_buf *dmabuf;
+
+               /* ensure that reserved fields are kept clear */
+               if (buffers[i].reserved0 ||
+                   buffers[i].reserved1[0] ||
+                   buffers[i].reserved1[1]) {
+                       err = -EINVAL;
+                       goto err_map_buffers;
+               }
+
+               /* validate dmabuf fd */
+               dmabuf = dma_buf_get(buffers[i].dmabuf_fd);
+               if (IS_ERR(dmabuf))
+                       goto err_map_buffers;
+
+               /* map it into context vm */
+               err = nvhost_vm_map_dmabuf(ctx->vm, dmabuf,
+                                          &addr);
+               buffers[i].address = (u64)addr;
+
+               /* not needed anymore, vm keeps reference now */
+               dma_buf_put(dmabuf);
+
+               if (err)
+                       goto err_map_buffers;
+       }
+
+       /* finally, copy the addresses back to userspace */
+       err = copy_to_user(__buffers, buffers,
+                          args->num_buffers * sizeof(*buffers));
+       if (err)
+               goto err_copy_buffers_to_user;
+
+       kfree(buffers);
+       return err;
+
+err_copy_buffers_to_user:
+err_map_buffers:
+       for (i = 0; i < num_handled_buffers; i++) {
+               struct dma_buf *dmabuf;
+
+               dmabuf = dma_buf_get(buffers[i].dmabuf_fd);
+               if (IS_ERR(dmabuf))
+                       continue;
+               nvhost_vm_unmap_dmabuf(ctx->vm, dmabuf);
+               dma_buf_put(dmabuf);
+       }
+err_copy_from_user:
+       kfree(buffers);
+err_alloc_buffers:
+       return err;
+}
+
+static int nvhost_ioctl_channel_unmap_buffer(struct nvhost_channel_userctx *ctx,
+                               struct nvhost_channel_unmap_buffer_args *args)
+{
+       struct nvhost_channel_buffer __user *__buffers =
+               (struct nvhost_channel_buffer *)(uintptr_t)args->table_address;
+       struct nvhost_channel_buffer *buffers;
+       int err = 0, i = 0, num_handled_buffers = 0;
+       struct dma_buf **dmabufs;
+
+       /* ensure that reserved fields are kept clear */
+       if (args->reserved)
+               return -EINVAL;
+
+       /* allocate room for buffers */
+       buffers = kzalloc(args->num_buffers * sizeof(*buffers), GFP_KERNEL);
+       if (!buffers) {
+               err = -ENOMEM;
+               goto err_alloc_buffers;
+       }
+
+       /* allocate room for buffers */
+       dmabufs = kzalloc(args->num_buffers * sizeof(*dmabufs), GFP_KERNEL);
+       if (!buffers) {
+               err = -ENOMEM;
+               goto err_alloc_dmabufs;
+       }
+
+       /* copy the buffers from user space */
+       err = copy_from_user(buffers, __buffers,
+                            sizeof(*__buffers) * args->num_buffers);
+       if (err)
+               goto err_copy_from_user;
+
+       /* first get all dmabufs... */
+       for (i = 0, num_handled_buffers = 0;
+            i < args->num_buffers;
+            i++, num_handled_buffers++) {
+               /* ensure that reserved fields are kept clear */
+               if (buffers[i].reserved0 ||
+                   buffers[i].reserved1[0] ||
+                   buffers[i].reserved1[1]) {
+                       err = -EINVAL;
+                       goto err_get_dmabufs;
+               }
+
+               dmabufs[i] = dma_buf_get(buffers[i].dmabuf_fd);
+               if (IS_ERR(dmabufs[i])) {
+                       err = PTR_ERR(dmabufs[i]);
+                       goto err_get_dmabufs;
+               }
+       }
+
+       /* ..then unmap */
+       for (i = 0; i < args->num_buffers; i++)
+               nvhost_vm_unmap_dmabuf(ctx->vm, dmabufs[i]);
+
+err_get_dmabufs:
+       for (i = 0; i < num_handled_buffers; i++)
+               dma_buf_put(dmabufs[i]);
+err_copy_from_user:
+       kfree(dmabufs);
+err_alloc_dmabufs:
+       kfree(buffers);
+err_alloc_buffers:
+       return err;
+}
+
 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
 {
        int i;
@@ -1039,6 +1189,12 @@ static long nvhost_channelctl(struct file *filp,
                err = nvhost_init_error_notifier(priv,
                        (struct nvhost_set_error_notifier *)buf);
                break;
+       case NVHOST_IOCTL_CHANNEL_MAP_BUFFER:
+               err = nvhost_ioctl_channel_map_buffer(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_UNMAP_BUFFER:
+               err = nvhost_ioctl_channel_unmap_buffer(priv, (void *)buf);
+               break;
        case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
        {
                u32 timeout =
index 286980400f1b99d9834bc552c584cd4d33cb8b8c..82c7639ffa7b3b7eaad2a459fbae1cf62a6fd309 100644 (file)
@@ -233,6 +233,25 @@ struct nvhost_set_ctxswitch_args {
        __u32 pad;
 };
 
+struct nvhost_channel_buffer {
+       __u32 dmabuf_fd;        /* in */
+       __u32 reserved0;        /* reserved, must be 0 */
+       __u64 reserved1[2];     /* reserved, must be 0 */
+       __u64 address;          /* out, device view to the buffer */
+};
+
+struct nvhost_channel_unmap_buffer_args {
+       __u32 num_buffers;      /* in, number of buffers to unmap */
+       __u32 reserved;         /* reserved, must be 0 */
+       __u64 table_address;    /* pointer to beginning of buffer */
+};
+
+struct nvhost_channel_map_buffer_args {
+       __u32 num_buffers;      /* in, number of buffers to map */
+       __u32 reserved;         /* reserved, must be 0 */
+       __u64 table_address;    /* pointer to beginning of buffer */
+};
+
 #define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS    \
        _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
 #define NVHOST_IOCTL_CHANNEL_GET_WAITBASES     \
@@ -278,6 +297,11 @@ struct nvhost_set_ctxswitch_args {
 #define        NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR     \
        _IOWR(NVHOST_IOCTL_MAGIC, 27, struct nvhost_ctrl_module_regrdwr_args)
 
+#define        NVHOST_IOCTL_CHANNEL_MAP_BUFFER \
+       _IOWR(NVHOST_IOCTL_MAGIC, 28, struct nvhost_channel_map_buffer_args)
+#define        NVHOST_IOCTL_CHANNEL_UNMAP_BUFFER       \
+       _IOWR(NVHOST_IOCTL_MAGIC, 29, struct nvhost_channel_unmap_buffer_args)
+
 #define NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER  \
        _IOWR(NVHOST_IOCTL_MAGIC, 111, struct nvhost_set_error_notifier)
 #define NVHOST_IOCTL_CHANNEL_OPEN      \