- Used by the client drivers to register a callback that will be
called on a regular basis through the DMA controller interrupt
+- DMA_SG
+ - The device supports memory to memory scatter-gather
+ transfers.
+ - Even though a plain memcpy can look like a particular case of a
+ scatter-gather transfer, with a single chunk to transfer, it's a
+ distinct transaction type in the mem2mem transfers case
+
- DMA_PRIVATE
- The devices only supports slave transfers, and as such isn't
return -EIO;
}
+ if (dma_has_cap(DMA_SG, device->cap_mask) && !device->device_prep_dma_sg) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_SG");
+ return -EIO;
+ }
+
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+ "Number of scatter gather buffers (default: 1)");
+
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-memset (default: 0)");
+ "dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
align = dev->fill_align;
src_cnt = dst_cnt = 1;
is_memset = true;
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
+ src_cnt = dst_cnt = sg_buffers;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
+ struct scatterlist tx_sg[src_cnt];
+ struct scatterlist rx_sg[src_cnt];
total_tests++;
um->bidi_cnt++;
}
+ sg_init_table(tx_sg, src_cnt);
+ sg_init_table(rx_sg, src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ sg_dma_address(&rx_sg[i]) = srcs[i];
+ sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
dsts[0] + dst_off,
*(thread->srcs[0] + src_off),
len, flags);
+ else if (thread->type == DMA_SG)
+ tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
+ rx_sg, src_cnt, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst_off,
op = "copy";
else if (type == DMA_MEMSET)
op = "set";
+ else if (type == DMA_SG)
+ op = "sg";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
}
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
- if (dmatest == 1) {
+ if (dmatest == 2) {
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
thread_count += cnt > 0 ? cnt : 0;
}
}
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dmatest == 1) {
+ cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
+ request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
return &first->async_tx;
}
+/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ void *desc = NULL, *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+ ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+ len, prev);
+ prev = desc;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
/**
* zynqmp_dma_chan_remove - Channel remove function
* @chan: ZynqMP DMA channel pointer
INIT_LIST_HEAD(&zdev->common.channels);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_SG, zdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
+ p->device_prep_dma_sg = zynqmp_dma_prep_sg;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_issue_pending = zynqmp_dma_issue_pending;
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
+ DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
len, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
+ return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents, flags);
+}
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers