u8 *buf = thread->srcs[i] + src_off;
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
}
- /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
+ /* map with DMA_MEM_TO_MEM to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
}
if (!tx) {
for (i = 0; i < src_cnt; i++)
dma_unmap_single(dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i],
test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
error_count = 0;
dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
}
dma_dsts[i] = dma_map_single(rx_dev->dev, thread->dsts[i],
test_buf_size,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
dma_unmap_single(rx_dev->dev, dma_dsts[i],
test_buf_size,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
dma_dsts[i] = dma_map_single(rx_dev->dev, thread->dsts[i],
test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
}
sg_init_table(tx_sg, bd_cnt);
(unsigned long)&config);
rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, bd_cnt,
- DMA_FROM_DEVICE, flags);
+ DMA_DEV_TO_MEM, flags, NULL);
txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, bd_cnt,
- DMA_TO_DEVICE, flags);
+ DMA_MEM_TO_DEV, flags, NULL);
if (!rxd || !txd) {
for (i = 0; i < src_cnt; i++)
dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(rx_dev->dev, dma_dsts[i],
test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(rx_dev->dev, dma_dsts[i], test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
error_count = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
- direction = DMA_TO_DEVICE;
+ direction = DMA_MEM_TO_DEV;
match = (direction & 0xFF) | XILINX_DMA_IP_DMA;
pr_info("match is %x\n", match);
pr_info("Did not find tx device\n");
}
- direction = DMA_FROM_DEVICE;
+ direction = DMA_DEV_TO_MEM;
match = (direction & 0xFF) | XILINX_DMA_IP_DMA;
rx_chan = dma_request_channel(mask, xdma_filter, &match);
u8 *buf = thread->srcs[i] + src_off;
dma_srcs[i] = dma_map_single(dev->dev, buf, len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
}
- /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
+ /* map with DMA_MEM_TO_MEM to force writeback/invalidate */
for (i = 0; i < dst_cnt; i++) {
dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
}
if (!tx) {
for (i = 0; i < src_cnt; i++)
dma_unmap_single(dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i],
test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
for (i = 0; i < dst_cnt; i++)
dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
- DMA_BIDIRECTIONAL);
+ DMA_MEM_TO_MEM);
error_count = 0;
u8 *buf = thread->srcs[i];
dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
pr_debug("src buf %x dma %x\n", (unsigned int)buf, dma_srcs[i]);
sg_dma_address(&tx_sg[i]) = dma_srcs[i];
sg_dma_len(&tx_sg[i]) = len;
dma_dsts[i] = dma_map_single(rx_dev->dev, thread->dsts[i],
test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
pr_debug("dst %x dma %x\n", thread->dsts[i], dma_dsts[i]);
sg_dma_address(&rx_sg[i]) = dma_dsts[i];
sg_dma_len(&rx_sg[i]) = len;
}
/* Set up hardware configuration information */
- config.direction = DMA_TO_DEVICE;
+ config.direction = DMA_MEM_TO_DEV;
config.vsize = vsize;
config.hsize = hsize;
config.stride = hsize;
config.disable_intr = 0;
tx_dev->device_control(tx_chan, DMA_SLAVE_CONFIG, (unsigned long)&config);
- config.direction = DMA_FROM_DEVICE;
+ config.direction = DMA_DEV_TO_MEM;
config.park = 0;
rx_dev->device_control(rx_chan, DMA_SLAVE_CONFIG, (unsigned long)&config);
rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, frm_cnt,
- DMA_FROM_DEVICE, flags);
+ DMA_DEV_TO_MEM, flags, NULL);
txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, frm_cnt,
- DMA_TO_DEVICE, flags);
+ DMA_MEM_TO_DEV, flags, NULL);
if (!rxd || !txd) {
for (i = 0; i < frm_cnt; i++)
dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
- DMA_TO_DEVICE);
+ DMA_MEM_TO_DEV);
for (i = 0; i < frm_cnt; i++)
dma_unmap_single(rx_dev->dev, dma_dsts[i],
test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
pr_warning("%s: #%u: prep error with len=0x%x ",
thread_name, total_tests - 1, len);
msleep(100);
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
for (i = 0; i < frm_cnt; i++)
dma_unmap_single(rx_dev->dev, dma_dsts[i], test_buf_size,
- DMA_FROM_DEVICE);
+ DMA_DEV_TO_MEM);
error_count = 0;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
- direction = DMA_TO_DEVICE;
+ direction = DMA_MEM_TO_DEV;
match = (direction & 0xFF) | XILINX_DMA_IP_VDMA;
pr_info("match is %x\n", match);
pr_info("Did not find tx device\n");
}
- direction = DMA_FROM_DEVICE;
+ direction = DMA_DEV_TO_MEM;
match = (direction & 0xFF) | XILINX_DMA_IP_VDMA;
rx_chan = dma_request_channel(mask, xdma_filter, &match);
struct device *dev; /* The dma device */
int irq; /* Channel IRQ */
int id; /* Channel ID */
- enum dma_data_direction direction;/* Transfer direction */
+ enum dma_transfer_direction direction;/* Transfer direction */
int max_len; /* Maximum data len per transfer */
int is_lite; /* Whether is light build */
int num_frms; /* Number of frames */
DMA_OUT(&chan->regs->cr, reg);
if ((config->park_frm >= 0) && (config->park_frm < chan->num_frms)) {
- if (config->direction == DMA_TO_DEVICE) {
+ if (config->direction == DMA_MEM_TO_DEV) {
chan_base = (char *)chan->regs;
DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
config->park_frm);
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct xilinx_dma_chan *chan;
struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
* If this is not the first descriptor, chain the
* current descriptor after the previous descriptor
*
- * For the first DMA_TO_DEVICE transfer, set SOP
+ * For the first DMA_MEM_TO_DEV transfer, set SOP
*/
if (!first) {
first = new;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
hw->control |= XILINX_DMA_BD_SOP;
#ifdef TEST_DMA_WITH_LOOPBACK
- hw->app_4 = copy;
+ hw->app_4 = total_len;
#endif
}
} else {
/* Link the last BD with the first BD */
hw->next_desc = first->async_tx.phys;
- if (direction == DMA_TO_DEVICE)
+ if (direction == DMA_MEM_TO_DEV)
hw->control |= XILINX_DMA_BD_EOP;
/* All scatter gather list entries has length == 0 */
*/
static struct dma_async_tx_descriptor *xilinx_vdma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
{
struct xilinx_dma_chan *chan;
struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
flush_fsync = be32_to_cpup(value);
if (feature & XILINX_DMA_IP_CDMA) {
- chan->direction = DMA_BIDIRECTIONAL;
+ chan->direction = DMA_MEM_TO_MEM;
chan->start_transfer = xilinx_cdma_start_transfer;
chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
if (of_device_is_compatible(node,
"xlnx,axi-dma-mm2s-channel"))
- chan->direction = DMA_TO_DEVICE;
+ chan->direction = DMA_MEM_TO_DEV;
if (of_device_is_compatible(node,
"xlnx,axi-dma-s2mm-channel"))
- chan->direction = DMA_FROM_DEVICE;
+ chan->direction = DMA_DEV_TO_MEM;
}
if (of_device_is_compatible(node,
"xlnx,axi-vdma-mm2s-channel")) {
- chan->direction = DMA_TO_DEVICE;
+ chan->direction = DMA_MEM_TO_DEV;
if (!chan->has_SG) {
chan->addr_regs = (struct vdma_addr_regs *)
((u32)xdev->regs +
if (of_device_is_compatible(node,
"xlnx,axi-vdma-s2mm-channel")) {
- chan->direction = DMA_FROM_DEVICE;
+ chan->direction = DMA_DEV_TO_MEM;
if (!chan->has_SG) {
chan->addr_regs = (struct vdma_addr_regs *)
((u32)xdev->regs +
chan->regs = (struct xdma_regs *)xdev->regs;
chan->id = 0;
- if (chan->direction == DMA_FROM_DEVICE) {
+ if (chan->direction == DMA_DEV_TO_MEM) {
chan->regs = (struct xdma_regs *)((u32)xdev->regs +
XILINX_DMA_RX_CHANNEL_OFFSET);
chan->id = 1;
return err;
}
-static int xilinx_dma_of_remove(struct platform_device *op)
+static int __devexit xilinx_dma_of_remove(struct platform_device *op)
{
struct xilinx_dma_device *xdev;
int i;
.of_match_table = xilinx_dma_of_ids,
},
.probe = xilinx_dma_of_probe,
- .remove = xilinx_dma_of_remove,
+ .remove = __devexit_p(xilinx_dma_of_remove),
};
/*----------------------------------------------------------------------------*/
if (feature & XILINX_DMA_IP_CDMA) {
- chan->direction = DMA_BIDIRECTIONAL;
+ chan->direction = DMA_MEM_TO_MEM;
chan->start_transfer = xilinx_cdma_start_transfer;
chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
chan->start_transfer = xilinx_dma_start_transfer;
if (!strcmp(channel_config->type, "axi-dma-mm2s-channel"))
- chan->direction = DMA_TO_DEVICE;
+ chan->direction = DMA_MEM_TO_DEV;
if (!strcmp(channel_config->type, "axi-dma-s2mm-channel"))
- chan->direction = DMA_FROM_DEVICE;
+ chan->direction = DMA_DEV_TO_MEM;
}
if (feature & XILINX_DMA_IP_VDMA) {
printk(KERN_INFO, "axi-vdma-mm2s-channel found\n");
- chan->direction = DMA_TO_DEVICE;
+ chan->direction = DMA_MEM_TO_DEV;
if (!chan->has_SG) {
chan->addr_regs = (struct vdma_addr_regs *)
((u32)xdev->regs +
printk(KERN_INFO, "axi-vdma-s2mm-channel found\n");
- chan->direction = DMA_FROM_DEVICE;
+ chan->direction = DMA_DEV_TO_MEM;
if (!chan->has_SG) {
chan->addr_regs = (struct vdma_addr_regs *)
((u32)xdev->regs +
chan->regs = (struct xdma_regs *)xdev->regs;
chan->id = 0;
- if (chan->direction == DMA_FROM_DEVICE) {
+ if (chan->direction == DMA_DEV_TO_MEM) {
chan->regs = (struct xdma_regs *)((u32)xdev->regs +
XILINX_DMA_RX_CHANNEL_OFFSET);
chan->id = 1;
* If used to set interrupt coalescing and delay counter only for
* Xilinx VDMA, hsize must be -1 */
struct xilinx_dma_config {
- enum dma_data_direction direction; /* Channel direction */
+ enum dma_transfer_direction direction; /* Channel direction */
int vsize; /* Vertical size */
int hsize; /* Horizontal size */
int stride; /* Stride */