]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
dmaengine: vdma: Fix race condition in Non-SG mode
authorKedareswara rao Appana <appana.durga.rao@xilinx.com>
Wed, 6 Apr 2016 05:08:09 +0000 (10:38 +0530)
committerVinod Koul <vinod.koul@intel.com>
Wed, 6 Apr 2016 15:41:15 +0000 (08:41 -0700)
When VDMA is configured in  Non-sg mode
Users can queue descriptors greater than h/w configured frames.

Current driver allows the user to queue descriptors upto h/w configured.
Which is wrong for non-sg mode configuration.

This patch fixes this issue.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/xilinx/xilinx_vdma.c

index abe915c732660f021cd61cc5fbb8c81108e098ea..b873d98d756b706827803845989512257aa18f90 100644 (file)
@@ -209,6 +209,7 @@ struct xilinx_vdma_tx_descriptor {
  * @flush_on_fsync: Flush on Frame sync
  * @desc_pendingcount: Descriptor pending count
  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
  */
 struct xilinx_vdma_chan {
        struct xilinx_vdma_device *xdev;
@@ -233,6 +234,7 @@ struct xilinx_vdma_chan {
        bool flush_on_fsync;
        u32 desc_pendingcount;
        bool ext_addr;
+       u32 desc_submitcount;
 };
 
 /**
@@ -716,9 +718,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
                struct xilinx_vdma_tx_segment *segment, *last = NULL;
                int i = 0;
 
-               list_for_each_entry(desc, &chan->pending_list, node) {
-                       segment = list_first_entry(&desc->segments,
-                                          struct xilinx_vdma_tx_segment, node);
+               if (chan->desc_submitcount < chan->num_frms)
+                       i = chan->desc_submitcount;
+
+               list_for_each_entry(segment, &desc->segments, node) {
                        if (chan->ext_addr)
                                vdma_desc_write_64(chan,
                                        XILINX_VDMA_REG_START_ADDRESS_64(i++),
@@ -742,8 +745,17 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
                vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
        }
 
-       list_splice_tail_init(&chan->pending_list, &chan->active_list);
-       chan->desc_pendingcount = 0;
+       if (!chan->has_sg) {
+               list_del(&desc->node);
+               list_add_tail(&desc->node, &chan->active_list);
+               chan->desc_submitcount++;
+               chan->desc_pendingcount--;
+               if (chan->desc_submitcount == chan->num_frms)
+                       chan->desc_submitcount = 0;
+       } else {
+               list_splice_tail_init(&chan->pending_list, &chan->active_list);
+               chan->desc_pendingcount = 0;
+       }
 }
 
 /**
@@ -927,7 +939,8 @@ append:
        list_add_tail(&desc->node, &chan->pending_list);
        chan->desc_pendingcount++;
 
-       if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+       if (chan->has_sg &&
+           unlikely(chan->desc_pendingcount > chan->num_frms)) {
                dev_dbg(chan->dev, "desc pendingcount is too high\n");
                chan->desc_pendingcount = chan->num_frms;
        }