]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - drivers/net/ethernet/cadence/macb_main.c
net: macb: Add support for partial store and forward
[zynq/linux.git] / drivers / net / ethernet / cadence / macb_main.c
index da106e81e092e9e34d6a225ea1873b236542b851..5e44d73b4008de6c375fbd09a8dbc7095b697faa 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ip.h>
 #include <linux/udp.h>
 #include <linux/tcp.h>
+#include <linux/crc32.h>
 #include "macb.h"
 
 #define MACB_RX_BUFFER_SIZE    128
@@ -530,7 +531,7 @@ static int macb_mii_probe(struct net_device *dev)
 static int macb_mii_init(struct macb *bp)
 {
        struct macb_platform_data *pdata;
-       struct device_node *np;
+       struct device_node *np, *mdio_np;
        int err = -ENXIO, i;
 
        /* Enable management port */
@@ -548,49 +549,41 @@ static int macb_mii_init(struct macb *bp)
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                 bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->pdev->dev;
+       bp->mii_bus->parent = &bp->dev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        np = bp->pdev->dev.of_node;
-       if (np) {
-               if (of_phy_is_fixed_link(np)) {
-                       if (of_phy_register_fixed_link(np) < 0) {
-                               dev_err(&bp->pdev->dev,
-                                       "broken fixed-link specification\n");
-                               goto err_out_unregister_bus;
-                       }
-                       bp->phy_node = of_node_get(np);
-
-                       err = mdiobus_register(bp->mii_bus);
-               } else {
-                       /* try dt phy registration */
-                       err = of_mdiobus_register(bp->mii_bus, np);
-
-                       /* fallback to standard phy registration if no phy were
-                        * found during dt phy registration
-                        */
-                       if (!err && !phy_find_first(bp->mii_bus)) {
-                               for (i = 0; i < PHY_MAX_ADDR; i++) {
-                                       struct phy_device *phydev;
-
-                                       phydev = mdiobus_scan(bp->mii_bus, i);
-                                       if (IS_ERR(phydev) &&
-                                           PTR_ERR(phydev) != -ENODEV) {
-                                               err = PTR_ERR(phydev);
-                                               break;
-                                       }
+       mdio_np = of_get_child_by_name(np, "mdio");
+       if (mdio_np) {
+               of_node_put(mdio_np);
+               err = of_mdiobus_register(bp->mii_bus, mdio_np);
+               if (err)
+                       goto err_out_unregister_bus;
+       } else if (np) {
+               /* try dt phy registration */
+               err = of_mdiobus_register(bp->mii_bus, np);
+
+               /* fallback to standard phy registration if no phy were
+                * found during dt phy registration
+                */
+               if (!err && !phy_find_first(bp->mii_bus)) {
+                       for (i = 0; i < PHY_MAX_ADDR; i++) {
+                               struct phy_device *phydev;
+
+                               phydev = mdiobus_scan(bp->mii_bus, i);
+                               if (IS_ERR(phydev) &&
+                                   PTR_ERR(phydev) != -ENODEV) {
+                                       err = PTR_ERR(phydev);
+                                       break;
                                }
-
-                               if (err)
-                                       goto err_out_unregister_bus;
                        }
+
+                       if (err)
+                               goto err_out_unregister_bus;
                }
        } else {
-               for (i = 0; i < PHY_MAX_ADDR; i++)
-                       bp->mii_bus->irq[i] = PHY_POLL;
-
                if (pdata)
                        bp->mii_bus->phy_mask = pdata->phy_mask;
 
@@ -954,6 +947,15 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
         */
 }
 
+static int macb_validate_hw_csum(struct sk_buff *skb)
+{
+       u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
+       u32 csum  = ~crc32_le(~0, skb_mac_header(skb),
+                       skb->len + ETH_HLEN - ETH_FCS_LEN);
+
+       return (pkt_csum != csum);
+}
+
 static int gem_rx(struct macb *bp, int budget)
 {
        unsigned int            len;
@@ -1007,6 +1009,16 @@ static int gem_rx(struct macb *bp, int budget)
                                 bp->rx_buffer_size, DMA_FROM_DEVICE);
 
                skb->protocol = eth_type_trans(skb, bp->dev);
+
+               /* Validate MAC fcs if RX checsum offload disabled */
+               if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+                       if (macb_validate_hw_csum(skb)) {
+                               netdev_err(bp->dev, "incorrect FCS\n");
+                               bp->dev->stats.rx_dropped++;
+                               break;
+                       }
+               }
+
                skb_checksum_none_assert(skb);
                if (bp->dev->features & NETIF_F_RXCSUM &&
                    !(bp->dev->flags & IFF_PROMISC) &&
@@ -1101,6 +1113,19 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                        break;
        }
 
+       /* Validate MAC fcs if RX checsum offload disabled */
+       if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+               if (macb_validate_hw_csum(skb)) {
+                       netdev_err(bp->dev, "incorrect FCS\n");
+                       bp->dev->stats.rx_dropped++;
+
+                       /* Make descriptor updates visible to hardware */
+                       wmb();
+
+                       return 1;
+               }
+       }
+
        /* Make descriptor updates visible to hardware */
        wmb();
 
@@ -1241,6 +1266,62 @@ static int macb_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+static void macb_hresp_error_task(unsigned long data)
+{
+       struct macb *bp = (struct macb *)data;
+       struct net_device *dev = bp->dev;
+       struct macb_queue *queue = bp->queues;
+       unsigned int q;
+       u32 ctrl;
+
+       for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+               queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+                                        MACB_TX_INT_FLAGS |
+                                        MACB_BIT(HRESP));
+       }
+       ctrl = macb_readl(bp, NCR);
+       ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+       macb_writel(bp, NCR, ctrl);
+
+       netif_tx_stop_all_queues(dev);
+       netif_carrier_off(dev);
+
+       bp->macbgem_ops.mog_init_rings(bp);
+
+       macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+               macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+#endif
+       for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+               queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+                       queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
+#endif
+               /* We only use the first queue at the moment. Remaining
+                * queues must be tied-off before we enable the receiver.
+                *
+                * See the documentation for receive_q1_ptr for more info.
+                */
+               if (q)
+                       queue_writel(queue, RBQP,
+                                    lower_32_bits(bp->rx_ring_tieoff_dma));
+
+               /* Enable interrupts */
+               queue_writel(queue, IER,
+                            MACB_RX_INT_FLAGS |
+                            MACB_TX_INT_FLAGS |
+                            MACB_BIT(HRESP));
+       }
+
+       ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+       macb_writel(bp, NCR, ctrl);
+
+       netif_carrier_on(dev);
+       netif_tx_start_all_queues(dev);
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
        struct macb_queue *queue = dev_id;
@@ -1330,10 +1411,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                }
 
                if (status & MACB_BIT(HRESP)) {
-                       /* TODO: Reset the hardware, and maybe move the
-                        * netdev_err to a lower-priority context as well
-                        * (work queue?)
-                        */
+                       tasklet_schedule(&bp->hresp_err_tasklet);
                        netdev_err(dev, "DMA bus error: HRESP not OK\n");
 
                        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1752,6 +1830,12 @@ static void macb_free_consistent(struct macb *bp)
                bp->rx_ring = NULL;
        }
 
+       if (bp->rx_ring_tieoff) {
+               dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+                                 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+               bp->rx_ring_tieoff = NULL;
+       }
+
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                kfree(queue->tx_skb);
                queue->tx_skb = NULL;
@@ -1823,6 +1907,19 @@ static int macb_alloc_consistent(struct macb *bp)
                                         &bp->rx_ring_dma, GFP_KERNEL);
        if (!bp->rx_ring)
                goto out_err;
+
+       /* If we have more than one queue, allocate a tie off descriptor
+        * that will be used to disable unused RX queues.
+        */
+       if (bp->num_queues > 1) {
+               bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+                                               macb_dma_desc_get_size(bp),
+                                               &bp->rx_ring_tieoff_dma,
+                                               GFP_KERNEL);
+               if (!bp->rx_ring_tieoff)
+                       goto out_err;
+       }
+
        netdev_dbg(bp->dev,
                   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
                   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
@@ -1837,6 +1934,19 @@ out_err:
        return -ENOMEM;
 }
 
+static void macb_init_tieoff(struct macb *bp)
+{
+       struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+       if (bp->num_queues > 1) {
+               /* Setup a wrapping descriptor with no free slots
+                * (WRAP and USED) to tie off/disable unused RX queues.
+                */
+               macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+               d->ctrl = 0;
+       }
+}
+
 static void gem_init_rings(struct macb *bp)
 {
        struct macb_queue *queue;
@@ -1859,6 +1969,7 @@ static void gem_init_rings(struct macb *bp)
        bp->rx_prepared_head = 0;
 
        gem_rx_refill(bp);
+       macb_init_tieoff(bp);
 }
 
 static void macb_init_rings(struct macb *bp)
@@ -1876,6 +1987,8 @@ static void macb_init_rings(struct macb *bp)
        bp->queues[0].tx_head = 0;
        bp->queues[0].tx_tail = 0;
        desc->ctrl |= MACB_BIT(TX_WRAP);
+
+       macb_init_tieoff(bp);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -1895,6 +2008,10 @@ static void macb_reset_hw(struct macb *bp)
        macb_writel(bp, TSR, -1);
        macb_writel(bp, RSR, -1);
 
+       /* Disable RX partial store and forward and reset watermark value */
+       if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+               gem_writel(bp, PBUFRXCUT, 0xFFF);
+
        /* Disable all interrupts */
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                queue_writel(queue, IDR, -1);
@@ -2024,7 +2141,11 @@ static void macb_init_hw(struct macb *bp)
                config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
        config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
        config |= MACB_BIT(PAE);                /* PAuse Enable */
-       config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
+
+       /* Do not discard Rx FCS if RX checsum offload disabled */
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
+
        if (bp->caps & MACB_CAPS_JUMBO)
                config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
        else
@@ -2040,13 +2161,24 @@ static void macb_init_hw(struct macb *bp)
        if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
                gem_writel(bp, JML, bp->jumbo_max_len);
        bp->speed = SPEED_10;
-       bp->duplex = DUPLEX_HALF;
+       if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+               bp->duplex = DUPLEX_FULL;
+       else
+               bp->duplex = DUPLEX_HALF;
        bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
        if (bp->caps & MACB_CAPS_JUMBO)
                bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
 
        macb_configure_dma(bp);
 
+       /* Enable RX partial store and forward and set watermark */
+       if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+               gem_writel(bp, PBUFRXCUT,
+                          (gem_readl(bp, PBUFRXCUT) &
+                          GEM_BF(WTRMRK, bp->rx_watermark)) |
+                          GEM_BIT(ENCUTTHRU));
+       }
+
        /* Initialize TX and RX buffers */
        macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
@@ -2059,6 +2191,14 @@ static void macb_init_hw(struct macb *bp)
                if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                        queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
+               /* We only use the first queue at the moment. Remaining
+                * queues must be tied-off before we enable the receiver.
+                *
+                * See the documentation for receive_q1_ptr for more info.
+                */
+               if (q)
+                       queue_writel(queue, RBQP,
+                                    lower_32_bits(bp->rx_ring_tieoff_dma));
 
                /* Enable interrupts */
                queue_writel(queue, IER,
@@ -2714,10 +2854,29 @@ static void macb_configure_caps(struct macb *bp,
                                const struct macb_config *dt_conf)
 {
        u32 dcfg;
+       int retval;
 
        if (dt_conf)
                bp->caps = dt_conf->caps;
 
+       /* By default we set to partial store and forward mode for zynqmp.
+        * Disable if not set in devicetree.
+        */
+       if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+               retval = of_property_read_u16(bp->pdev->dev.of_node,
+                                             "rx-watermark",
+                                             &bp->rx_watermark);
+
+               /* Disable partial store and forward in case of error or
+                * invalid watermark value
+                */
+               if (retval || bp->rx_watermark > 0xFFF) {
+                       dev_info(&bp->pdev->dev,
+                                "Not enabling partial store and forward\n");
+                       bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
+               }
+       }
+
        if (hw_is_gem(bp->regs, bp->native_io)) {
                bp->caps |= MACB_CAPS_MACB_IS_GEM;
 
@@ -2910,6 +3069,7 @@ static int macb_init(struct platform_device *pdev)
                        if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                                queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
+                       queue->RBQP = GEM_RBQP(hw_q - 1);
                } else {
                        /* queue0 uses legacy registers */
                        queue->ISR  = MACB_ISR;
@@ -2921,6 +3081,7 @@ static int macb_init(struct platform_device *pdev)
                        if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                                queue->TBQPH = MACB_TBQPH;
 #endif
+                       queue->RBQP = MACB_RBQP;
                }
 
                /* get irq: here we use the linux queue index, not the hardware
@@ -2972,6 +3133,8 @@ static int macb_init(struct platform_device *pdev)
        /* Checksum offload is only available on gem with packet buffer */
        if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
                dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+               dev->hw_features &= ~NETIF_F_RXCSUM;
        if (bp->caps & MACB_CAPS_SG_DISABLED)
                dev->hw_features &= ~NETIF_F_SG;
        dev->features = dev->hw_features;
@@ -3382,7 +3545,8 @@ static const struct macb_config np4_config = {
 
 static const struct macb_config zynqmp_config = {
        .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
-               MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_PCS,
+               MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_PCS |
+               MACB_CAPS_PARTIAL_STORE_FORWARD,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -3588,6 +3752,9 @@ static int macb_probe(struct platform_device *pdev)
 
        netif_carrier_off(dev);
 
+       tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+                    (unsigned long)bp);
+
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
                    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
                    dev->base_addr, dev->irq, dev->dev_addr);