#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
+#include <linux/crc32.h>
#include "macb.h"
#define MACB_RX_BUFFER_SIZE 128
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
+ struct phy_device *phydev = bp->phy_dev;
unsigned long flags;
int status_change = 0;
}
pdata = dev_get_platdata(&bp->pdev->dev);
- if (pdata) {
- if (gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev,
- pdata->phy_irq_pin, "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
- }
- } else {
- phydev->irq = PHY_POLL;
+ if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+ ret = devm_gpio_request(&bp->pdev->dev,
+ pdata->phy_irq_pin, "phy int");
+ if (!ret) {
+ phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+ phydev->irq = (phy_irq < 0) ?
+ PHY_POLL : phy_irq;
}
}
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
+ bp->phy_dev = phydev;
return 0;
}
static int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
- struct device_node *np;
+ struct device_node *np, *mdio_np;
int err = -ENXIO, i;
/* Enable management port */
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->pdev->dev;
+ bp->mii_bus->parent = &bp->dev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np) {
- if (of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification\n");
- goto err_out_unregister_bus;
- }
- bp->phy_node = of_node_get(np);
-
- err = mdiobus_register(bp->mii_bus);
- } else {
- /* try dt phy registration */
- err = of_mdiobus_register(bp->mii_bus, np);
-
- /* fallback to standard phy registration if no phy were
- * found during dt phy registration
- */
- if (!err && !phy_find_first(bp->mii_bus)) {
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
-
- phydev = mdiobus_scan(bp->mii_bus, i);
- if (IS_ERR(phydev) &&
- PTR_ERR(phydev) != -ENODEV) {
- err = PTR_ERR(phydev);
- break;
- }
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
+ err = of_mdiobus_register(bp->mii_bus, mdio_np);
+ if (err)
+ goto err_out_unregister_bus;
+ } else if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ * found during dt phy registration
+ */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
+ err = PTR_ERR(phydev);
+ break;
}
-
- if (err)
- goto err_out_unregister_bus;
}
+
+ if (err)
+ goto err_out_unregister_bus;
}
} else {
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
if (pdata)
bp->mii_bus->phy_mask = pdata->phy_mask;
*/
}
+static int macb_validate_hw_csum(struct sk_buff *skb)
+{
+ u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
+ u32 csum = ~crc32_le(~0, skb_mac_header(skb),
+ skb->len + ETH_HLEN - ETH_FCS_LEN);
+
+ return (pkt_csum != csum);
+}
+
static int gem_rx(struct macb *bp, int budget)
{
unsigned int len;
bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+ break;
+ }
+ }
+
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM &&
!(bp->dev->flags & IFF_PROMISC) &&
break;
}
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ return 1;
+ }
+ }
+
/* Make descriptor updates visible to hardware */
wmb();
return work_done;
}
+static void macb_hresp_error_task(unsigned long data)
+{
+ struct macb *bp = (struct macb *)data;
+ struct net_device *dev = bp->dev;
+ struct macb_queue *queue = bp->queues;
+ unsigned int q;
+ u32 ctrl;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, ctrl);
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ bp->macbgem_ops.mog_init_rings(bp);
+
+ macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
+#endif
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
+#endif
+ /* We only use the first queue at the moment. Remaining
+ * queues must be tied-off before we enable the receiver.
+ *
+ * See the documentation for receive_q1_ptr for more info.
+ */
+ if (q)
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+
+ /* Enable interrupts */
+ queue_writel(queue, IER,
+ MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
+ macb_writel(bp, NCR, ctrl);
+
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
}
if (status & MACB_BIT(HRESP)) {
- /* TODO: Reset the hardware, and maybe move the
- * netdev_err to a lower-priority context as well
- * (work queue?)
- */
+ tasklet_schedule(&bp->hresp_err_tasklet);
netdev_err(dev, "DMA bus error: HRESP not OK\n");
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
bp->rx_ring = NULL;
}
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
+
+ /* If we have more than one queue, allocate a tie off descriptor
+ * that will be used to disable unused RX queues.
+ */
+ if (bp->num_queues > 1) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+ if (bp->num_queues > 1) {
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ d->ctrl = 0;
+ }
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
bp->rx_prepared_head = 0;
gem_rx_refill(bp);
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
+ /* Disable RX partial store and forward and reset watermark value */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ gem_writel(bp, PBUFRXCUT, 0xFFF);
+
/* Disable all interrupts */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, -1);
config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
- config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
+ /* Do not discard Rx FCS if RX checsum offload disabled */
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
else
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ bp->duplex = DUPLEX_FULL;
+ else
+ bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
+ /* Enable RX partial store and forward and set watermark */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ gem_writel(bp, PBUFRXCUT,
+ (gem_readl(bp, PBUFRXCUT) &
+ GEM_BF(WTRMRK, bp->rx_watermark)) |
+ GEM_BIT(ENCUTTHRU));
+ }
+
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
+ /* We only use the first queue at the moment. Remaining
+ * queues must be tied-off before we enable the receiver.
+ *
+ * See the documentation for receive_q1_ptr for more info.
+ */
+ if (q)
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
/* Enable interrupts */
queue_writel(queue, IER,
MACB_BIT(HRESP));
}
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
/* Enable TX and RX */
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
}
netif_carrier_off(dev);
/* if the phy is not yet register, retry later*/
- if (!dev->phydev)
+ if (!bp->phy_dev)
return -EAGAIN;
/* RX buffers initialization */
macb_init_hw(bp);
/* schedule a link state check */
- phy_start(dev->phydev);
+ phy_start(bp->phy_dev);
netif_tx_start_all_queues(dev);
netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
- if (dev->phydev)
- phy_stop(dev->phydev);
+ if (bp->phy_dev)
+ phy_stop(bp->phy_dev);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
const struct macb_config *dt_conf)
{
u32 dcfg;
+ int retval;
if (dt_conf)
bp->caps = dt_conf->caps;
+ /* By default we set to partial store and forward mode for zynqmp.
+ * Disable if not set in devicetree.
+ */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ retval = of_property_read_u16(bp->pdev->dev.of_node,
+ "rx-watermark",
+ &bp->rx_watermark);
+
+ /* Disable partial store and forward in case of error or
+ * invalid watermark value
+ */
+ if (retval || bp->rx_watermark > 0xFFF) {
+ dev_info(&bp->pdev->dev,
+ "Not enabling partial store and forward\n");
+ bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
+ }
+ }
+
if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
}
+#if defined(CONFIG_OF)
+static void macb_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset, msec = 1;
+ bool active_low;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return;
+
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ active_low = of_property_read_bool(np, "phy-reset-active-low");
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpio", 0);
+ if (!gpio_is_valid(phy_reset))
+ return;
+
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ active_low ? GPIOF_OUT_INIT_LOW :
+ GPIOF_OUT_INIT_HIGH, "phy-reset");
+ if (err) {
+ dev_err(&pdev->dev, "failed to get phy-reset-gpio: %d\n", err);
+ return;
+ }
+ msleep(msec);
+ gpio_set_value(phy_reset, active_low);
+}
+#else /* CONFIG_OF */
+static void macb_reset_phy(struct platform_device *pdev)
+{
+}
+#endif /* CONFIG_OF */
+
static void macb_probe_queues(void __iomem *mem,
bool native_io,
unsigned int *queue_mask,
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
+ queue->RBQP = GEM_RBQP(hw_q - 1);
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
queue->TBQPH = MACB_TBQPH;
#endif
+ queue->RBQP = MACB_RBQP;
}
/* get irq: here we use the linux queue index, not the hardware
/* Checksum offload is only available on gem with packet buffer */
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ dev->hw_features &= ~NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
macb_writel(bp, NCFGR, val);
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
return 0;
}
MACB_BIT(HRESP));
/* schedule a link state check */
- phy_start(dev->phydev);
+ phy_start(lp->phy_dev);
netif_start_queue(dev);
};
static const struct macb_config zynqmp_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_PCS |
+ MACB_CAPS_PARTIAL_STORE_FORWARD,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
macb_get_hwaddr(bp);
/* Power up the PHY if there is a GPIO reset */
- phy_node = of_get_next_available_child(np, NULL);
- if (phy_node) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err < 0) {
+ dev_err(&pdev->dev, "broken fixed-link specification");
+ goto failed_phy;
+ }
+ phy_node = of_node_get(np);
+ bp->phy_node = phy_node;
+ } else {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
if (gpio_is_valid(gpio)) {
gpiod_direction_output(bp->reset_gpio, 1);
}
}
- of_node_put(phy_node);
err = of_get_phy_mode(np);
if (err < 0) {
bp->phy_interface = err;
}
+ macb_reset_phy(pdev);
+
/* IP specific init */
err = init(pdev);
if (err)
goto err_out_free_netdev;
- err = macb_mii_init(bp);
- if (err)
- goto err_out_free_netdev;
-
- phydev = dev->phydev;
-
- netif_carrier_off(dev);
-
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_mdio;
+ goto err_out_unregister_netdev;
}
- phy_attached_info(phydev);
+ err = macb_mii_init(bp);
+ if (err)
+ goto err_out_unregister_netdev;
+
+ netif_carrier_off(dev);
+
+ tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
+ (unsigned long)bp);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
- return 0;
+ phydev = bp->phy_dev;
+ phy_attached_info(phydev);
-err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
- mdiobus_unregister(bp->mii_bus);
- mdiobus_free(bp->mii_bus);
+ return 0;
- /* Shutdown the PHY if there is a GPIO reset */
- if (bp->reset_gpio)
- gpiod_set_value(bp->reset_gpio, 0);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_free_netdev:
free_netdev(dev);
+failed_phy:
+ of_node_put(phy_node);
+
err_disable_clocks:
clk_disable_unprepare(tx_clk);
clk_disable_unprepare(hclk);
if (dev) {
bp = netdev_priv(dev);
- if (dev->phydev)
- phy_disconnect(dev->phydev);
+ if (bp->phy_dev)
+ phy_disconnect(bp->phy_dev);
mdiobus_unregister(bp->mii_bus);
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);