]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
dma: Remove drivers which had DMA_SG in past
authorMichal Simek <michal.simek@xilinx.com>
Thu, 18 Oct 2018 09:06:03 +0000 (11:06 +0200)
committerMichal Simek <michal.simek@xilinx.com>
Wed, 9 Jan 2019 08:16:02 +0000 (09:16 +0100)
The patch
Revert "dmaengine: remove DMA_SG as it is dead code in kernel"
(sha1: d1797ba7285165859c754c6ff22fd77b7c74c0e6)

added back DMA_SG changes to several drivers. There is no reason to keep
these changes in our tree because we need it just for Xilinx PCIE
driver. That's why this patch removed changes in non Xilinx drivers to
have smaller diff.

Signed-off-by: Michal Simek <michal.simek@xilinx.com>
drivers/crypto/ccp/ccp-dmaengine.c
drivers/dma/at_hdmac.c
drivers/dma/fsldma.c
drivers/dma/mv_xor.c
drivers/dma/nbpfaxi.c
drivers/dma/ste_dma40.c
drivers/dma/xgene-dma.c

index 010e503e1398b6b47f841391ea58510cd3b176aa..67155cb21636917456941c346457b300116d61f1 100644 (file)
@@ -501,27 +501,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
        return &desc->tx_desc;
 }
 
-static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
-       struct dma_chan *dma_chan, struct scatterlist *dst_sg,
-       unsigned int dst_nents, struct scatterlist *src_sg,
-       unsigned int src_nents, unsigned long flags)
-{
-       struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
-                                                dma_chan);
-       struct ccp_dma_desc *desc;
-
-       dev_dbg(chan->ccp->dev,
-               "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
-               __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
-
-       desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
-                              flags);
-       if (!desc)
-               return NULL;
-
-       return &desc->tx_desc;
-}
-
 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
        struct dma_chan *dma_chan, unsigned long flags)
 {
@@ -703,7 +682,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
        dma_dev->directions = DMA_MEM_TO_MEM;
        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
-       dma_cap_set(DMA_SG, dma_dev->cap_mask);
        dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
 
        /* The DMA channels for this device can be set to public or private,
@@ -739,7 +717,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
 
        dma_dev->device_free_chan_resources = ccp_free_chan_resources;
        dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
-       dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
        dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
        dma_dev->device_issue_pending = ccp_issue_pending;
        dma_dev->device_tx_status = ccp_tx_status;
index de5ee0ff0448c74157ec20b7a6a351dfc23df15a..75f38d19fcbed5dac47e4fd8b7d8b78cef228e66 100644 (file)
@@ -1204,138 +1204,6 @@ err:
        return NULL;
 }
 
-/**
- * atc_prep_dma_sg - prepare memory to memory scather-gather operation
- * @chan: the channel to prepare operation on
- * @dst_sg: destination scatterlist
- * @dst_nents: number of destination scatterlist entries
- * @src_sg: source scatterlist
- * @src_nents: number of source scatterlist entries
- * @flags: tx descriptor status flags
- */
-static struct dma_async_tx_descriptor *
-atc_prep_dma_sg(struct dma_chan *chan,
-               struct scatterlist *dst_sg, unsigned int dst_nents,
-               struct scatterlist *src_sg, unsigned int src_nents,
-               unsigned long flags)
-{
-       struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct at_desc          *desc = NULL;
-       struct at_desc          *first = NULL;
-       struct at_desc          *prev = NULL;
-       unsigned int            src_width;
-       unsigned int            dst_width;
-       size_t                  xfer_count;
-       u32                     ctrla;
-       u32                     ctrlb;
-       size_t                  dst_len = 0, src_len = 0;
-       dma_addr_t              dst = 0, src = 0;
-       size_t                  len = 0, total_len = 0;
-
-       if (unlikely(dst_nents == 0 || src_nents == 0))
-               return NULL;
-
-       if (unlikely(dst_sg == NULL || src_sg == NULL))
-               return NULL;
-
-       ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
-               | ATC_SRC_ADDR_MODE_INCR
-               | ATC_DST_ADDR_MODE_INCR
-               | ATC_FC_MEM2MEM;
-
-       /*
-        * loop until there is either no more source or no more destination
-        * scatterlist entry
-        */
-       while (true) {
-
-               /* prepare the next transfer */
-               if (dst_len == 0) {
-
-                       /* no more destination scatterlist entries */
-                       if (!dst_sg || !dst_nents)
-                               break;
-
-                       dst = sg_dma_address(dst_sg);
-                       dst_len = sg_dma_len(dst_sg);
-
-                       dst_sg = sg_next(dst_sg);
-                       dst_nents--;
-               }
-
-               if (src_len == 0) {
-
-                       /* no more source scatterlist entries */
-                       if (!src_sg || !src_nents)
-                               break;
-
-                       src = sg_dma_address(src_sg);
-                       src_len = sg_dma_len(src_sg);
-
-                       src_sg = sg_next(src_sg);
-                       src_nents--;
-               }
-
-               len = min_t(size_t, src_len, dst_len);
-               if (len == 0)
-                       continue;
-
-               /* take care for the alignment */
-               src_width = dst_width = atc_get_xfer_width(src, dst, len);
-
-               ctrla = ATC_SRC_WIDTH(src_width) |
-                       ATC_DST_WIDTH(dst_width);
-
-               /*
-                * The number of transfers to set up refer to the source width
-                * that depends on the alignment.
-                */
-               xfer_count = len >> src_width;
-               if (xfer_count > ATC_BTSIZE_MAX) {
-                       xfer_count = ATC_BTSIZE_MAX;
-                       len = ATC_BTSIZE_MAX << src_width;
-               }
-
-               /* create the transfer */
-               desc = atc_desc_get(atchan);
-               if (!desc)
-                       goto err_desc_get;
-
-               desc->lli.saddr = src;
-               desc->lli.daddr = dst;
-               desc->lli.ctrla = ctrla | xfer_count;
-               desc->lli.ctrlb = ctrlb;
-
-               desc->txd.cookie = 0;
-               desc->len = len;
-
-               atc_desc_chain(&first, &prev, desc);
-
-               /* update the lengths and addresses for the next loop cycle */
-               dst_len -= len;
-               src_len -= len;
-               dst += len;
-               src += len;
-
-               total_len += len;
-       }
-
-       /* First descriptor of the chain embedds additional information */
-       first->txd.cookie = -EBUSY;
-       first->total_len = total_len;
-
-       /* set end-of-link to the last link descriptor of list*/
-       set_desc_eol(desc);
-
-       first->txd.flags = flags; /* client is in control of this ack */
-
-       return &first->txd;
-
-err_desc_get:
-       atc_desc_put(atchan, first);
-       return NULL;
-}
-
 /**
  * atc_dma_cyclic_check_values
  * Check for too big/unaligned periods and unaligned DMA buffer
@@ -1935,14 +1803,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        /* setup platform data for each SoC */
        dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
-       dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
        dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
        dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
        dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
        dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
        dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
        dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
-       dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
 
        /* get DMA parameters from controller type */
        plat_dat = at_dma_get_driver_data(pdev);
@@ -2080,16 +1946,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
                atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        }
 
-       if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
-               atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
-
        dma_writel(atdma, EN, AT_DMA_ENABLE);
 
-       dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
+       dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
          dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
          dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
          dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
-         dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
          plat_dat->nr_channels);
 
        dma_async_device_register(&atdma->dma_common);
index 6faa849eebdf4cde7bde5dc227a89b7afc1f7888..1117b5123a6fc786f28d68868deef2fa10951d42 100644 (file)
@@ -825,122 +825,6 @@ fail:
        return NULL;
 }
 
-static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
-       struct scatterlist *dst_sg, unsigned int dst_nents,
-       struct scatterlist *src_sg, unsigned int src_nents,
-       unsigned long flags)
-{
-       struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
-       struct fsldma_chan *chan = to_fsl_chan(dchan);
-       size_t dst_avail, src_avail;
-       dma_addr_t dst, src;
-       size_t len;
-
-       /* basic sanity checks */
-       if (dst_nents == 0 || src_nents == 0)
-               return NULL;
-
-       if (dst_sg == NULL || src_sg == NULL)
-               return NULL;
-
-       /*
-        * TODO: should we check that both scatterlists have the same
-        * TODO: number of bytes in total? Is that really an error?
-        */
-
-       /* get prepared for the loop */
-       dst_avail = sg_dma_len(dst_sg);
-       src_avail = sg_dma_len(src_sg);
-
-       /* run until we are out of scatterlist entries */
-       while (true) {
-
-               /* create the largest transaction possible */
-               len = min_t(size_t, src_avail, dst_avail);
-               len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
-               if (len == 0)
-                       goto fetch;
-
-               dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
-               src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
-
-               /* allocate and populate the descriptor */
-               new = fsl_dma_alloc_descriptor(chan);
-               if (!new) {
-                       chan_err(chan, "%s\n", msg_ld_oom);
-                       goto fail;
-               }
-
-               set_desc_cnt(chan, &new->hw, len);
-               set_desc_src(chan, &new->hw, src);
-               set_desc_dst(chan, &new->hw, dst);
-
-               if (!first)
-                       first = new;
-               else
-                       set_desc_next(chan, &prev->hw, new->async_tx.phys);
-
-               new->async_tx.cookie = 0;
-               async_tx_ack(&new->async_tx);
-               prev = new;
-
-               /* Insert the link descriptor to the LD ring */
-               list_add_tail(&new->node, &first->tx_list);
-
-               /* update metadata */
-               dst_avail -= len;
-               src_avail -= len;
-
-fetch:
-               /* fetch the next dst scatterlist entry */
-               if (dst_avail == 0) {
-
-                       /* no more entries: we're done */
-                       if (dst_nents == 0)
-                               break;
-
-                       /* fetch the next entry: if there are no more: done */
-                       dst_sg = sg_next(dst_sg);
-                       if (dst_sg == NULL)
-                               break;
-
-                       dst_nents--;
-                       dst_avail = sg_dma_len(dst_sg);
-               }
-
-               /* fetch the next src scatterlist entry */
-               if (src_avail == 0) {
-
-                       /* no more entries: we're done */
-                       if (src_nents == 0)
-                               break;
-
-                       /* fetch the next entry: if there are no more: done */
-                       src_sg = sg_next(src_sg);
-                       if (src_sg == NULL)
-                               break;
-
-                       src_nents--;
-                       src_avail = sg_dma_len(src_sg);
-               }
-       }
-
-       new->async_tx.flags = flags; /* client is in control of this ack */
-       new->async_tx.cookie = -EBUSY;
-
-       /* Set End-of-link to the last link descriptor of new list */
-       set_ld_eol(chan, new);
-
-       return &first->async_tx;
-
-fail:
-       if (!first)
-               return NULL;
-
-       fsldma_free_desc_list_reverse(chan, &first->tx_list);
-       return NULL;
-}
-
 static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
 {
        struct fsldma_chan *chan;
@@ -1357,12 +1241,10 @@ static int fsldma_of_probe(struct platform_device *op)
        fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
 
        dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
-       dma_cap_set(DMA_SG, fdev->common.cap_mask);
        dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
        fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
        fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
        fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
-       fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
        fdev->common.device_tx_status = fsl_tx_status;
        fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
        fdev->common.device_config = fsl_dma_device_config;
index 11f5978cc1af97b448920e1e44b29313f7664617..969534c1a6c63339b1b5c48ae3568d275f3bb518 100644 (file)
@@ -68,36 +68,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
        hw_desc->byte_count = byte_count;
 }
 
-/* Populate the descriptor */
-static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
-                                    dma_addr_t dma_src, dma_addr_t dma_dst,
-                                    u32 len, struct mv_xor_desc_slot *prev)
-{
-       struct mv_xor_desc *hw_desc = desc->hw_desc;
-
-       hw_desc->status = XOR_DESC_DMA_OWNED;
-       hw_desc->phy_next_desc = 0;
-       /* Configure for XOR with only one src address -> MEMCPY */
-       hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
-       hw_desc->phy_dest_addr = dma_dst;
-       hw_desc->phy_src_addr[0] = dma_src;
-       hw_desc->byte_count = len;
-
-       if (prev) {
-               struct mv_xor_desc *hw_prev = prev->hw_desc;
-
-               hw_prev->phy_next_desc = desc->async_tx.phys;
-       }
-}
-
-static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
-{
-       struct mv_xor_desc *hw_desc = desc->hw_desc;
-
-       /* Enable end-of-descriptor interrupt */
-       hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
-}
-
 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
 {
        struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -662,132 +632,6 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
        return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 }
 
-/**
- * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
- * @chan: DMA channel
- * @dst_sg: Destination scatter list
- * @dst_sg_len: Number of entries in destination scatter list
- * @src_sg: Source scatter list
- * @src_sg_len: Number of entries in source scatter list
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
-                  unsigned int dst_sg_len, struct scatterlist *src_sg,
-                  unsigned int src_sg_len, unsigned long flags)
-{
-       struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
-       struct mv_xor_desc_slot *new;
-       struct mv_xor_desc_slot *first = NULL;
-       struct mv_xor_desc_slot *prev = NULL;
-       size_t len, dst_avail, src_avail;
-       dma_addr_t dma_dst, dma_src;
-       int desc_cnt = 0;
-       int ret;
-
-       dev_dbg(mv_chan_to_devp(mv_chan),
-               "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
-               __func__, dst_sg_len, src_sg_len, flags);
-
-       dst_avail = sg_dma_len(dst_sg);
-       src_avail = sg_dma_len(src_sg);
-
-       /* Run until we are out of scatterlist entries */
-       while (true) {
-               /* Allocate and populate the descriptor */
-               desc_cnt++;
-               new = mv_chan_alloc_slot(mv_chan);
-               if (!new) {
-                       dev_err(mv_chan_to_devp(mv_chan),
-                               "Out of descriptors (desc_cnt=%d)!\n",
-                               desc_cnt);
-                       goto err;
-               }
-
-               len = min_t(size_t, src_avail, dst_avail);
-               len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
-               if (len == 0)
-                       goto fetch;
-
-               if (len < MV_XOR_MIN_BYTE_COUNT) {
-                       dev_err(mv_chan_to_devp(mv_chan),
-                               "Transfer size of %zu too small!\n", len);
-                       goto err;
-               }
-
-               dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
-                       dst_avail;
-               dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
-                       src_avail;
-
-               /* Check if a new window needs to get added for 'dst' */
-               ret = mv_xor_add_io_win(mv_chan, dma_dst);
-               if (ret)
-                       goto err;
-
-               /* Check if a new window needs to get added for 'src' */
-               ret = mv_xor_add_io_win(mv_chan, dma_src);
-               if (ret)
-                       goto err;
-
-               /* Populate the descriptor */
-               mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
-               prev = new;
-               dst_avail -= len;
-               src_avail -= len;
-
-               if (!first)
-                       first = new;
-               else
-                       list_move_tail(&new->node, &first->sg_tx_list);
-
-fetch:
-               /* Fetch the next dst scatterlist entry */
-               if (dst_avail == 0) {
-                       if (dst_sg_len == 0)
-                               break;
-
-                       /* Fetch the next entry: if there are no more: done */
-                       dst_sg = sg_next(dst_sg);
-                       if (dst_sg == NULL)
-                               break;
-
-                       dst_sg_len--;
-                       dst_avail = sg_dma_len(dst_sg);
-               }
-
-               /* Fetch the next src scatterlist entry */
-               if (src_avail == 0) {
-                       if (src_sg_len == 0)
-                               break;
-
-                       /* Fetch the next entry: if there are no more: done */
-                       src_sg = sg_next(src_sg);
-                       if (src_sg == NULL)
-                               break;
-
-                       src_sg_len--;
-                       src_avail = sg_dma_len(src_sg);
-               }
-       }
-
-       /* Set the EOD flag in the last descriptor */
-       mv_xor_desc_config_eod(new);
-       first->async_tx.flags = flags;
-
-       return &first->async_tx;
-
-err:
-       /* Cleanup: Move all descriptors back into the free list */
-       spin_lock_bh(&mv_chan->lock);
-       mv_desc_clean_slot(first, mv_chan);
-       spin_unlock_bh(&mv_chan->lock);
-
-       return NULL;
-}
-
 static void mv_xor_free_chan_resources(struct dma_chan *chan)
 {
        struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1254,8 +1098,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
                dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
        if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
                dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
-       if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
-               dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
                dma_dev->max_xor = 8;
                dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1305,11 +1147,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
                        goto err_free_irq;
        }
 
-       dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
+       dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
                 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
                 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
                 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
-                dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
                 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
 
        dma_async_device_register(dma_dev);
@@ -1552,7 +1393,6 @@ static int mv_xor_probe(struct platform_device *pdev)
 
                        dma_cap_zero(cap_mask);
                        dma_cap_set(DMA_MEMCPY, cap_mask);
-                       dma_cap_set(DMA_SG, cap_mask);
                        dma_cap_set(DMA_XOR, cap_mask);
                        dma_cap_set(DMA_INTERRUPT, cap_mask);
 
index 9d873f360403c9caaa7b87baff03e760b3f6929f..8c7b2e8703dabd06bbef253c3f9d2ef6f7c085cb 100644 (file)
@@ -1006,21 +1006,6 @@ static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
                            DMA_MEM_TO_MEM, flags);
 }
 
-static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
-       struct dma_chan *dchan,
-       struct scatterlist *dst_sg, unsigned int dst_nents,
-       struct scatterlist *src_sg, unsigned int src_nents,
-       unsigned long flags)
-{
-       struct nbpf_channel *chan = nbpf_to_chan(dchan);
-
-       if (dst_nents != src_nents)
-               return NULL;
-
-       return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
-                           DMA_MEM_TO_MEM, flags);
-}
-
 static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
        struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
        enum dma_transfer_direction direction, unsigned long flags, void *context)
@@ -1417,13 +1402,11 @@ static int nbpf_probe(struct platform_device *pdev)
        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
        dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
-       dma_cap_set(DMA_SG, dma_dev->cap_mask);
 
        /* Common and MEMCPY operations */
        dma_dev->device_alloc_chan_resources
                = nbpf_alloc_chan_resources;
        dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
-       dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
        dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
        dma_dev->device_tx_status = nbpf_tx_status;
        dma_dev->device_issue_pending = nbpf_issue_pending;
index 35b24ca80d5e6b9fe1e719579511038e406fca2e..f4edfc56f34ef65dc34e50e38fd3c9aa258364fd 100644 (file)
@@ -2486,19 +2486,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
                           DMA_MEM_TO_MEM, dma_flags);
 }
 
-static struct dma_async_tx_descriptor *
-d40_prep_memcpy_sg(struct dma_chan *chan,
-                  struct scatterlist *dst_sg, unsigned int dst_nents,
-                  struct scatterlist *src_sg, unsigned int src_nents,
-                  unsigned long dma_flags)
-{
-       if (dst_nents != src_nents)
-               return NULL;
-
-       return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
-                          DMA_MEM_TO_MEM, dma_flags);
-}
-
 static struct dma_async_tx_descriptor *
 d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                  unsigned int sg_len, enum dma_transfer_direction direction,
@@ -2823,9 +2810,6 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
                dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
        }
 
-       if (dma_has_cap(DMA_SG, dev->cap_mask))
-               dev->device_prep_dma_sg = d40_prep_memcpy_sg;
-
        if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
                dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
 
@@ -2867,7 +2851,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
 
        dma_cap_zero(base->dma_memcpy.cap_mask);
        dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
-       dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
 
        d40_ops_init(base, &base->dma_memcpy);
 
@@ -2885,7 +2868,6 @@ static int __init d40_dmaengine_init(struct d40_base *base,
        dma_cap_zero(base->dma_both.cap_mask);
        dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
        dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
-       dma_cap_set(DMA_SG, base->dma_both.cap_mask);
        dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
 
        d40_ops_init(base, &base->dma_both);
index cd224b6eac4b49625cb9d85f8d243204cfeb7715..1d5988849aa690abf349b0eb810277659c6d2061 100644 (file)
@@ -420,48 +420,6 @@ static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
                                XGENE_DMA_DESC_HOENQ_NUM_POS);
 }
 
-static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
-                                   struct xgene_dma_desc_sw *desc_sw,
-                                   dma_addr_t dst, dma_addr_t src,
-                                   size_t len)
-{
-       struct xgene_dma_desc_hw *desc1, *desc2;
-       int i;
-
-       /* Get 1st descriptor */
-       desc1 = &desc_sw->desc1;
-       xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
-
-       /* Set destination address */
-       desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
-       desc1->m3 |= cpu_to_le64(dst);
-
-       /* Set 1st source address */
-       xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
-
-       if (!len)
-               return;
-
-       /*
-        * We need to split this source buffer,
-        * and need to use 2nd descriptor
-        */
-       desc2 = &desc_sw->desc2;
-       desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
-
-       /* Set 2nd to 5th source address */
-       for (i = 0; i < 4 && len; i++)
-               xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
-                                        &len, &src);
-
-       /* Invalidate unused source address field */
-       for (; i < 4; i++)
-               xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
-
-       /* Updated flag that we have prepared 64B descriptor */
-       desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
-}
-
 static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
                                    struct xgene_dma_desc_sw *desc_sw,
                                    dma_addr_t *dst, dma_addr_t *src,
@@ -886,114 +844,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
        chan->desc_pool = NULL;
 }
 
-static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
-       struct dma_chan *dchan, struct scatterlist *dst_sg,
-       u32 dst_nents, struct scatterlist *src_sg,
-       u32 src_nents, unsigned long flags)
-{
-       struct xgene_dma_desc_sw *first = NULL, *new = NULL;
-       struct xgene_dma_chan *chan;
-       size_t dst_avail, src_avail;
-       dma_addr_t dst, src;
-       size_t len;
-
-       if (unlikely(!dchan))
-               return NULL;
-
-       if (unlikely(!dst_nents || !src_nents))
-               return NULL;
-
-       if (unlikely(!dst_sg || !src_sg))
-               return NULL;
-
-       chan = to_dma_chan(dchan);
-
-       /* Get prepared for the loop */
-       dst_avail = sg_dma_len(dst_sg);
-       src_avail = sg_dma_len(src_sg);
-       dst_nents--;
-       src_nents--;
-
-       /* Run until we are out of scatterlist entries */
-       while (true) {
-               /* Create the largest transaction possible */
-               len = min_t(size_t, src_avail, dst_avail);
-               len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
-               if (len == 0)
-                       goto fetch;
-
-               dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
-               src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
-
-               /* Allocate the link descriptor from DMA pool */
-               new = xgene_dma_alloc_descriptor(chan);
-               if (!new)
-                       goto fail;
-
-               /* Prepare DMA descriptor */
-               xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
-
-               if (!first)
-                       first = new;
-
-               new->tx.cookie = 0;
-               async_tx_ack(&new->tx);
-
-               /* update metadata */
-               dst_avail -= len;
-               src_avail -= len;
-
-               /* Insert the link descriptor to the LD ring */
-               list_add_tail(&new->node, &first->tx_list);
-
-fetch:
-               /* fetch the next dst scatterlist entry */
-               if (dst_avail == 0) {
-                       /* no more entries: we're done */
-                       if (dst_nents == 0)
-                               break;
-
-                       /* fetch the next entry: if there are no more: done */
-                       dst_sg = sg_next(dst_sg);
-                       if (!dst_sg)
-                               break;
-
-                       dst_nents--;
-                       dst_avail = sg_dma_len(dst_sg);
-               }
-
-               /* fetch the next src scatterlist entry */
-               if (src_avail == 0) {
-                       /* no more entries: we're done */
-                       if (src_nents == 0)
-                               break;
-
-                       /* fetch the next entry: if there are no more: done */
-                       src_sg = sg_next(src_sg);
-                       if (!src_sg)
-                               break;
-
-                       src_nents--;
-                       src_avail = sg_dma_len(src_sg);
-               }
-       }
-
-       if (!new)
-               return NULL;
-
-       new->tx.flags = flags; /* client is in control of this ack */
-       new->tx.cookie = -EBUSY;
-       list_splice(&first->tx_list, &new->tx_list);
-
-       return &new->tx;
-fail:
-       if (!first)
-               return NULL;
-
-       xgene_dma_free_desc_list(chan, &first->tx_list);
-       return NULL;
-}
-
 static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
        struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
        u32 src_cnt, size_t len, unsigned long flags)
@@ -1648,7 +1498,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
        dma_cap_zero(dma_dev->cap_mask);
 
        /* Set DMA device capability */
-       dma_cap_set(DMA_SG, dma_dev->cap_mask);
 
        /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
         * and channel 1 supports XOR, PQ both. First thing here is we have
@@ -1674,7 +1523,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
        dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
        dma_dev->device_issue_pending = xgene_dma_issue_pending;
        dma_dev->device_tx_status = xgene_dma_tx_status;
-       dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
 
        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
                dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
@@ -1726,8 +1574,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
 
        /* DMA capability info */
        dev_info(pdma->dev,
-                "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
-                dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
+                "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
                 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
                 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");