]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
staging: apf: Changing dma-buf attachment xilinx-v2016.3-trd
authorMichael Gill <gill@xilinx.com>
Mon, 19 Dec 2016 21:06:13 +0000 (13:06 -0800)
committerMichal Simek <michal.simek@xilinx.com>
Tue, 20 Dec 2016 06:58:02 +0000 (07:58 +0100)
The method used for attaching to buffers through the dma-buf
API resulted in loss of buffers when buffers originated from
paged v4l2 devices.  This patch resolves that.

Signed-off-by: Michael Gill <gill@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
drivers/staging/apf/xilinx-dma-apf.c
drivers/staging/apf/xilinx-dma-apf.h
drivers/staging/apf/xlnk.c

index 25c77aa337bb38ab5fc23c717e16e8de10d54586..5cac19ff62125de052c5f6031a0ee40988cc4c36 100644 (file)
@@ -817,49 +817,48 @@ int xdma_submit(struct xdma_chan *chan,
        dmahead->size = size;
        dmahead->dmadir = chan->direction;
        dmahead->userflag = user_flags;
+       dmahead->dmabuf = dp;
        dmadir = chan->direction;
 
        if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
                dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
 
        if (dp) {
-               if (!dp->is_mapped) {
-                       struct scatterlist *sg;
-                       int cpy_size;
-                       int i;
-                       unsigned int remaining_size = size;
-
-                       dp->dbuf_attach = dma_buf_attach(dp->dbuf, chan->dev);
-                       dp->dbuf_sg_table = dma_buf_map_attachment(
-                               dp->dbuf_attach, chan->direction);
-
-                       if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
-                               pr_err("%s unable to map sg_table for dbuf: %d\n",
-                                       __func__, (int)dp->dbuf_sg_table);
-                               return -EINVAL;
-                       }
-                       cpy_size = dp->dbuf_sg_table->nents *
-                               sizeof(struct scatterlist);
-                       dp->sg_list = kmalloc(cpy_size, GFP_KERNEL);
-                       if (!dp->sg_list)
-                               return -ENOMEM;
-                       dp->sg_list_cnt = 0;
-                       memcpy(dp->sg_list, dp->dbuf_sg_table->sgl, cpy_size);
-                       for_each_sg(dp->sg_list,
-                                   sg,
-                                   dp->dbuf_sg_table->nents,
-                                   i) {
-                               if (remaining_size == 0) {
-                                       sg_dma_len(sg) = 0;
-                               } else if (sg_dma_len(sg) > remaining_size) {
-                                       sg_dma_len(sg) = remaining_size;
-                                       dp->sg_list_cnt++;
-                               } else {
-                                       remaining_size -= sg_dma_len(sg);
-                                       dp->sg_list_cnt++;
-                               }
+               int i;
+               int cpy_size;
+               struct scatterlist *sg;
+               unsigned int remaining_size = size;
+               unsigned int observed_size = 0;
+
+               dp->dbuf_attach = dma_buf_attach(dp->dbuf, chan->dev);
+               dp->dbuf_sg_table = dma_buf_map_attachment(dp->dbuf_attach,
+                                                          chan->direction);
+               if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
+                       pr_err("%s unable to map sg_table for dbuf: %d\n",
+                              __func__, (int)dp->dbuf_sg_table);
+                       return -EINVAL;
+               }
+               cpy_size = dp->dbuf_sg_table->nents *
+                       sizeof(struct scatterlist);
+               dp->sg_list = kmalloc(cpy_size, GFP_KERNEL);
+               if (!dp->sg_list)
+                       return -ENOMEM;
+               dp->sg_list_cnt = 0;
+               memcpy(dp->sg_list, dp->dbuf_sg_table->sgl, cpy_size);
+               for_each_sg(dp->sg_list,
+                           sg,
+                           dp->dbuf_sg_table->nents,
+                           i) {
+                       observed_size += sg_dma_len(sg);
+                       if (remaining_size == 0) {
+                               sg_dma_len(sg) = 0;
+                       } else if (sg_dma_len(sg) > remaining_size) {
+                               sg_dma_len(sg) = remaining_size;
+                               dp->sg_list_cnt++;
+                       } else {
+                               remaining_size -= sg_dma_len(sg);
+                               dp->sg_list_cnt++;
                        }
-                       dp->is_mapped = 1;
                }
 
                sglist_dma = dp->sg_list;
@@ -867,7 +866,6 @@ int xdma_submit(struct xdma_chan *chan,
                sgcnt = dp->sg_list_cnt;
                sgcnt_dma = dp->sg_list_cnt;
                dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
-               dmahead->is_dmabuf = 1;
        } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
                sglist = chan->scratch_sglist;
                sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
@@ -951,6 +949,7 @@ int xdma_wait(struct xdma_head *dmahead,
 {
        struct xdma_chan *chan = dmahead->chan;
        DEFINE_DMA_ATTRS(attrs);
+
        if (chan->poll_mode) {
                xilinx_chan_desc_cleanup(chan);
                *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
@@ -965,12 +964,17 @@ int xdma_wait(struct xdma_head *dmahead,
                }
        }
 
-       if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
-               dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
-
-       if (dmahead->is_dmabuf) {
-               dmahead->is_dmabuf = 0;
+       if (dmahead->dmabuf) {
+               dma_buf_unmap_attachment(dmahead->dmabuf->dbuf_attach,
+                                        dmahead->dmabuf->dbuf_sg_table,
+                                        dmahead->dmabuf->dma_direction);
+               kfree(dmahead->dmabuf->sg_list);
+               dma_buf_detach(dmahead->dmabuf->dbuf,
+                              dmahead->dmabuf->dbuf_attach);
        } else {
+               if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+                       dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
+
                get_dma_ops(chan->dev)->unmap_sg(chan->dev,
                                                 dmahead->sglist,
                                                 dmahead->sgcnt,
@@ -979,6 +983,7 @@ int xdma_wait(struct xdma_head *dmahead,
                if (!(user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS))
                        unpin_user_pages(dmahead->sglist, dmahead->sgcnt);
        }
+
        return 0;
 }
 EXPORT_SYMBOL(xdma_wait);
index e677d409e1f155b792a2f2ffe5d160b7d7f266fa..9949fcf2986467ea38deddf82039327beb3f8768 100644 (file)
@@ -90,7 +90,7 @@
 #define XDMA_BD_SF_SW_DONE_MASK                0x00000001
 
 /* driver defines */
-#define XDMA_MAX_BD_CNT                        2048
+#define XDMA_MAX_BD_CNT                        16384
 #define XDMA_MAX_CHANS_PER_DEVICE      2
 #define XDMA_MAX_TRANS_LEN             0x7FF000
 #define XDMA_MAX_APPWORDS              5
@@ -202,7 +202,7 @@ struct xdma_head {
        u32 appwords_o[XDMA_MAX_APPWORDS];
        unsigned int userflag;
        u32 last_bd_index;
-       u32 is_dmabuf;
+       struct xlnk_dmabuf_reg *dmabuf;
 };
 
 struct xdma_chan *xdma_request_channel(char *name);
index a14da6db5f93f98ce1d1a2767e5bcdcdd286ae31..cde5cf9361a23c73e303a8331360b04d5927f420 100644 (file)
@@ -881,30 +881,19 @@ static int xlnk_adddmabuf_ioctl(struct file *filp, unsigned int code,
        union xlnk_args temp_args;
        struct xlnk_dmabuf_reg *db;
        int status;
-
        status = copy_from_user(&temp_args, (void __user *)args,
                                sizeof(union xlnk_args));
 
        if (status)
                return -ENOMEM;
 
-       dev_dbg(xlnk_dev, "Registering dmabuf fd %d for virtual address %p\n",
-               temp_args.dmabuf.dmabuf_fd, temp_args.dmabuf.user_addr);
-
        db = kzalloc(sizeof(struct xlnk_dmabuf_reg), GFP_KERNEL);
        if (!db)
                return -ENOMEM;
 
        db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
        db->user_vaddr = temp_args.dmabuf.user_addr;
-
        db->dbuf = dma_buf_get(db->dmabuf_fd);
-       if (IS_ERR_OR_NULL(db->dbuf)) {
-               dev_err(xlnk_dev, "%s Invalid dmabuf fd %d\n",
-                        __func__, db->dmabuf_fd);
-               return -EINVAL;
-       }
-       db->is_mapped = 0;
 
        INIT_LIST_HEAD(&db->list);
        list_add_tail(&db->list, &xlnk_dmabuf_list);
@@ -927,12 +916,6 @@ static int xlnk_cleardmabuf_ioctl(struct file *filp, unsigned int code,
 
        list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
                if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
-                       if (dp->is_mapped) {
-                               dma_buf_unmap_attachment(dp->dbuf_attach,
-                                       dp->dbuf_sg_table, dp->dma_direction);
-                               dma_buf_detach(dp->dbuf, dp->dbuf_attach);
-                               kfree(dp->sg_list);
-                       }
                        dma_buf_put(dp->dbuf);
                        list_del(&dp->list);
                        kfree(dp);