]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
ethernet: xilinx: Add ACP Port support
authorKedareswara rao Appana <appana.durga.rao@xilinx.com>
Wed, 6 May 2015 11:53:56 +0000 (17:23 +0530)
committerMichal Simek <michal.simek@xilinx.com>
Thu, 7 May 2015 13:02:33 +0000 (15:02 +0200)
This patch Add's Support for ACP
Port based design's.

Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 720a6e50e4f6c8fca292defc291e5d192bf5bb73..725619ae046756809b66b3e94950e12da1cca738 100644 (file)
@@ -422,6 +422,7 @@ struct axidma_bd {
  *               1522 bytes (assuming support for basic VLAN)
  * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
  *                can handle jumbo packets, this entry will be 1, else 0.
+ * @use_acpport:  Tells whether the design contains the acp port or not.
  */
 struct axienet_local {
        struct net_device *ndev;
@@ -468,6 +469,7 @@ struct axienet_local {
 
        u32 coalesce_count_rx;
        u32 coalesce_count_tx;
+       bool use_acpport;
 };
 
 /**
index 54cfe488e656bc35c3df3b99359e26bb9909b45e..142c99ec12fd39f5beb433d71e32950fd158d01b 100644 (file)
@@ -155,23 +155,35 @@ static void axienet_dma_bd_release(struct net_device *ndev)
        struct axienet_local *lp = netdev_priv(ndev);
 
        for (i = 0; i < RX_BD_NUM; i++) {
-               dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
-                                lp->max_frm_size, DMA_FROM_DEVICE);
+               if (lp->use_acpport) {
+                       dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+                                        lp->max_frm_size, DMA_FROM_DEVICE);
+               }
                dev_kfree_skb((struct sk_buff *)
                              (lp->rx_bd_v[i].sw_id_offset));
        }
 
        if (lp->rx_bd_v) {
-               dma_free_coherent(ndev->dev.parent,
-                                 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
-                                 lp->rx_bd_v,
-                                 lp->rx_bd_p);
+               if (lp->use_acpport) {
+                       kfree(lp->rx_bd_v);
+                       lp->rx_bd_v = NULL;
+               } else {
+                       dma_free_coherent(ndev->dev.parent,
+                                         sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+                                         lp->rx_bd_v,
+                                         lp->rx_bd_p);
+               }
        }
        if (lp->tx_bd_v) {
-               dma_free_coherent(ndev->dev.parent,
-                                 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
-                                 lp->tx_bd_v,
-                                 lp->tx_bd_p);
+               if (lp->use_acpport) {
+                       kfree(lp->tx_bd_v);
+                       lp->tx_bd_v = NULL;
+               } else {
+                       dma_free_coherent(ndev->dev.parent,
+                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+                                         lp->tx_bd_v,
+                                         lp->tx_bd_p);
+               }
        }
 }
 
@@ -198,15 +210,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
        lp->rx_bd_ci = 0;
 
        /* Allocate the Tx and Rx buffer descriptors. */
-       lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
-                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
-                                         &lp->tx_bd_p, GFP_KERNEL);
+       if (lp->use_acpport) {
+               lp->tx_bd_v = kmalloc(sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+                                     GFP_DMA);
+               lp->tx_bd_p = virt_to_phys(lp->tx_bd_v);
+               memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
+       } else {
+               lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+                                       sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+                                       &lp->tx_bd_p, GFP_KERNEL);
+       }
+
        if (!lp->tx_bd_v)
                goto out;
 
-       lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
-                                         sizeof(*lp->rx_bd_v) * RX_BD_NUM,
-                                         &lp->rx_bd_p, GFP_KERNEL);
+       if (lp->use_acpport) {
+               lp->rx_bd_v = kmalloc(sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+                                     GFP_DMA);
+               lp->rx_bd_p = virt_to_phys(lp->rx_bd_v);
+               memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
+       } else {
+               lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+                                       sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+                                       &lp->rx_bd_p, GFP_KERNEL);
+       }
+
        if (!lp->rx_bd_v)
                goto out;
 
@@ -226,10 +254,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
                        goto out;
 
                lp->rx_bd_v[i].sw_id_offset = (u32) skb;
-               lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
-                                                    skb->data,
-                                                    lp->max_frm_size,
-                                                    DMA_FROM_DEVICE);
+               if (lp->use_acpport)
+                       lp->rx_bd_v[i].phys = virt_to_phys(skb->data);
+               else
+                       lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+                                                            skb->data,
+                                                            lp->max_frm_size,
+                                                            DMA_FROM_DEVICE);
                lp->rx_bd_v[i].cntrl = lp->max_frm_size;
        }
 
@@ -590,9 +621,11 @@ static void axienet_start_xmit_done(struct net_device *ndev)
        cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
        status = cur_p->status;
        while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
-               dma_unmap_single(ndev->dev.parent, cur_p->phys,
-                               (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
-                               DMA_TO_DEVICE);
+               if (!lp->use_acpport)
+                       dma_unmap_single(ndev->dev.parent, cur_p->phys,
+                                        (cur_p->cntrl &
+                                        XAXIDMA_BD_CTRL_LENGTH_MASK),
+                                        DMA_TO_DEVICE);
                if (cur_p->app4)
                        dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
                /*cur_p->phys = 0;*/
@@ -688,7 +721,11 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
 
        cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
-       cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+
+       if (lp->use_acpport)
+               cur_p->phys = virt_to_phys(skb->data);
+       else
+               cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
                                     skb_headlen(skb), DMA_TO_DEVICE);
 
        for (ii = 0; ii < num_frag; ii++) {
@@ -696,10 +733,14 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                lp->tx_bd_tail %= TX_BD_NUM;
                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
                frag = &skb_shinfo(skb)->frags[ii];
-               cur_p->phys = dma_map_single(ndev->dev.parent,
-                                            skb_frag_address(frag),
-                                            skb_frag_size(frag),
-                                            DMA_TO_DEVICE);
+               if (lp->use_acpport)
+                       cur_p->phys = virt_to_phys(skb_frag_address(frag));
+               else
+                       cur_p->phys = dma_map_single(ndev->dev.parent,
+                                               skb_frag_address(frag),
+                                               skb_frag_size(frag),
+                                               DMA_TO_DEVICE);
+
                cur_p->cntrl = skb_frag_size(frag);
        }
 
@@ -751,9 +792,10 @@ static int axienet_recv(struct net_device *ndev, int budget)
                skb = (struct sk_buff *) (cur_p->sw_id_offset);
                length = cur_p->app4 & 0x0000FFFF;
 
-               dma_unmap_single(ndev->dev.parent, cur_p->phys,
-                                lp->max_frm_size,
-                                DMA_FROM_DEVICE);
+               if (!lp->use_acpport)
+                       dma_unmap_single(ndev->dev.parent, cur_p->phys,
+                                        lp->max_frm_size,
+                                        DMA_FROM_DEVICE);
 
                skb_put(skb, length);
                skb->protocol = eth_type_trans(skb, ndev);
@@ -786,9 +828,14 @@ static int axienet_recv(struct net_device *ndev, int budget)
                        break;
                }
 
-               cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
-                                            lp->max_frm_size,
-                                            DMA_FROM_DEVICE);
+               if (lp->use_acpport)
+                       cur_p->phys = virt_to_phys(new_skb->data);
+               else
+                       cur_p->phys = dma_map_single(ndev->dev.parent,
+                                               new_skb->data,
+                                               lp->max_frm_size,
+                                               DMA_FROM_DEVICE);
+
                cur_p->cntrl = lp->max_frm_size;
                cur_p->status = 0;
                cur_p->sw_id_offset = (u32) new_skb;
@@ -1470,7 +1517,8 @@ static void axienet_dma_err_handler(unsigned long data)
        for (i = 0; i < TX_BD_NUM; i++) {
                cur_p = &lp->tx_bd_v[i];
                if (cur_p->phys)
-                       dma_unmap_single(ndev->dev.parent, cur_p->phys,
+                       if (!lp->use_acpport)
+                               dma_unmap_single(ndev->dev.parent, cur_p->phys,
                                         (cur_p->cntrl &
                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
                                         DMA_TO_DEVICE);
@@ -1664,6 +1712,8 @@ static int axienet_probe(struct platform_device *pdev)
         */
        of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
        of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
+       lp->use_acpport = of_property_read_bool(pdev->dev.of_node,
+                                               "xlnx,has-acp");
 
        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
        np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);