]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
dmaengine: xilinx: dma: Program hardware supported buffer length
authorRadhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Thu, 8 Mar 2018 09:48:34 +0000 (15:18 +0530)
committerMichal Simek <michal.simek@xilinx.com>
Fri, 16 Mar 2018 07:27:54 +0000 (08:27 +0100)
AXI-DMA IP supports configurable (c_sg_length_width) buffer length
register width, hence read buffer length (xlnx,sg-length-width) DT
property and ensure that driver doesn't program buffer length
exceeding the supported limit. For VDMA and CDMA there is no change.

Signed-off-by: Radhey Shyam Pandey <radheys@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
drivers/dma/xilinx/xilinx_dma.c

index be74db136f44cf704b22ffb2ed8b9d077c9396df..b78d005fbfff3be6031f52f34944796b0980f2e7 100644 (file)
 #define XILINX_DMA_REG_BTT             0x28
 
 /* AXI DMA Specific Masks/Bit fields */
-#define XILINX_DMA_MAX_TRANS_LEN       GENMASK(22, 0)
+#define XILINX_DMA_MAX_TRANS_LEN_MIN   8
+#define XILINX_DMA_MAX_TRANS_LEN_MAX   23
 #define XILINX_DMA_CR_COALESCE_MAX     GENMASK(23, 16)
 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK        BIT(4)
 #define XILINX_DMA_CR_COALESCE_SHIFT   16
@@ -409,6 +410,7 @@ struct xilinx_dma_config {
  * @rxs_clk: DMA s2mm stream clock
  * @nr_channels: Number of channels DMA device supports
  * @chan_id: DMA channel identifier
+ * @max_buffer_len: Max buffer length
  */
 struct xilinx_dma_device {
        void __iomem *regs;
@@ -428,6 +430,7 @@ struct xilinx_dma_device {
        struct clk *rxs_clk;
        u32 nr_channels;
        u32 chan_id;
+       u32 max_buffer_len;
 };
 
 /* Macros */
@@ -976,7 +979,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
                        list_for_each_entry(segment, &desc->segments, node) {
                                hw = &segment->hw;
                                residue += (hw->control - hw->status) &
-                                          XILINX_DMA_MAX_TRANS_LEN;
+                                          chan->xdev->max_buffer_len;
                        }
                }
                spin_unlock_irqrestore(&chan->lock, flags);
@@ -1236,7 +1239,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                               hw->control & XILINX_DMA_MAX_TRANS_LEN);
+                               hw->control & chan->xdev->max_buffer_len);
        }
 
        list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1339,7 +1342,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
-                              hw->control & XILINX_DMA_MAX_TRANS_LEN);
+                              hw->control & chan->xdev->max_buffer_len);
        }
 
        list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -1700,7 +1703,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
        struct xilinx_cdma_tx_segment *segment;
        struct xilinx_cdma_desc_hw *hw;
 
-       if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+       if (!len || len > chan->xdev->max_buffer_len)
                return NULL;
 
        desc = xilinx_dma_alloc_tx_descriptor(chan);
@@ -1781,7 +1784,7 @@ static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
         */
        while (true) {
                len = min_t(size_t, src_avail, dst_avail);
-               len = min_t(size_t, len, XILINX_DMA_MAX_TRANS_LEN);
+               len = min_t(size_t, len, chan->xdev->max_buffer_len);
                if (len == 0)
                        goto fetch;
 
@@ -1901,7 +1904,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
                         * making sure it is less than the hw limit
                         */
                        copy = min_t(size_t, sg_dma_len(sg) - sg_used,
-                                    XILINX_DMA_MAX_TRANS_LEN);
+                                    chan->xdev->max_buffer_len);
                        hw = &segment->hw;
 
                        /* Fill in the descriptor */
@@ -2006,7 +2009,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
                         * making sure it is less than the hw limit
                         */
                        copy = min_t(size_t, period_len - sg_used,
-                                    XILINX_DMA_MAX_TRANS_LEN);
+                                    chan->xdev->max_buffer_len);
                        hw = &segment->hw;
                        xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
                                          period_len * i);
@@ -2690,7 +2693,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        struct xilinx_dma_device *xdev;
        struct device_node *child, *np = pdev->dev.of_node;
        struct resource *io;
-       u32 num_frames, addr_width;
+       u32 num_frames, addr_width, len_width;
        int i, err;
 
        /* Allocate and initialize the DMA engine structure */
@@ -2722,8 +2725,22 @@ static int xilinx_dma_probe(struct platform_device *pdev)
 
        /* Retrieve the DMA engine properties from the device tree */
        xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
-       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+       xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
+
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
                xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+               if (!of_property_read_u32(node, "xlnx,sg-length-width",
+                                         &len_width)) {
+                       if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
+                           len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) {
+                               dev_warn(xdev->dev,
+                                        "invalid xlnx,sg-length-width property value using default width\n");
+                       } else {
+                               xdev->max_buffer_len = GENMASK(len_width - 1,
+                                                              0);
+                       }
+               }
+       }
 
        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
                err = of_property_read_u32(node, "xlnx,num-fstores",