struct axienet_local *lp = netdev_priv(ndev);
for (i = 0; i < RX_BD_NUM; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
+ if (lp->use_acpport) {
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
dev_kfree_skb((struct sk_buff *)
(lp->rx_bd_v[i].sw_id_offset));
}
if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v,
- lp->rx_bd_p);
+ if (lp->use_acpport) {
+ kfree(lp->rx_bd_v);
+ lp->rx_bd_v = NULL;
+ } else {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
+ }
}
if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ if (lp->use_acpport) {
+ kfree(lp->tx_bd_v);
+ lp->tx_bd_v = NULL;
+ } else {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+ }
}
}
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
+ if (lp->use_acpport) {
+ lp->tx_bd_v = kmalloc(sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ GFP_DMA);
+ lp->tx_bd_p = virt_to_phys(lp->tx_bd_v);
+ memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
+ } else {
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
+ }
+
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
+ if (lp->use_acpport) {
+ lp->rx_bd_v = kmalloc(sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ GFP_DMA);
+ lp->rx_bd_p = virt_to_phys(lp->rx_bd_v);
+ memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
+ } else {
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
+ }
+
if (!lp->rx_bd_v)
goto out;
goto out;
lp->rx_bd_v[i].sw_id_offset = (u32) skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ if (lp->use_acpport)
+ lp->rx_bd_v[i].phys = virt_to_phys(skb->data);
+ else
+ lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
+ if (!lp->use_acpport)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
/*cur_p->phys = 0;*/
}
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+
+ if (lp->use_acpport)
+ cur_p->phys = virt_to_phys(skb->data);
+ else
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ if (lp->use_acpport)
+ cur_p->phys = virt_to_phys(skb_frag_address(frag));
+ else
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
cur_p->cntrl = skb_frag_size(frag);
}
skb = (struct sk_buff *) (cur_p->sw_id_offset);
length = cur_p->app4 & 0x0000FFFF;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ if (!lp->use_acpport)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
break;
}
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ if (lp->use_acpport)
+ cur_p->phys = virt_to_phys(new_skb->data);
+ else
+ cur_p->phys = dma_map_single(ndev->dev.parent,
+ new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
for (i = 0; i < TX_BD_NUM; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (!lp->use_acpport)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
+ lp->use_acpport = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,has-acp");
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);