2 * Cadence MACB/GEM Ethernet Controller driver
4 * Copyright (C) 2004-2006 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/clk.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/circ_buf.h>
18 #include <linux/slab.h>
19 #include <linux/init.h>
21 #include <linux/gpio.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/platform_data/macb.h>
28 #include <linux/platform_device.h>
29 #include <linux/phy.h>
31 #include <linux/of_device.h>
32 #include <linux/of_gpio.h>
33 #include <linux/of_mdio.h>
34 #include <linux/of_net.h>
36 #include <linux/udp.h>
37 #include <linux/tcp.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/crc32.h>
40 #include <linux/inetdevice.h>
43 #define MACB_RX_BUFFER_SIZE 128
44 #define RX_BUFFER_MULTIPLE 64 /* bytes */
46 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
47 #define MIN_RX_RING_SIZE 64
48 #define MAX_RX_RING_SIZE 8192
49 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
52 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
53 #define MIN_TX_RING_SIZE 64
54 #define MAX_TX_RING_SIZE 4096
55 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
58 /* level of occupied TX descriptors under which we wake up TX process */
59 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
61 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
63 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
66 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
68 /* Max length of transmit frame must be a multiple of 8 bytes */
69 #define MACB_TX_LEN_ALIGN 8
70 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
71 #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
73 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
74 #define MACB_NETIF_LSO NETIF_F_TSO
76 /* Graceful stop timeouts in us. We should allow up to
77 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
79 #define MACB_HALT_TIMEOUT 1230
80 #define MACB_PM_TIMEOUT 100 /* ms */
82 /* DMA buffer descriptor might be different size
83 * depends on hardware configuration:
85 * 1. dma address width 32 bits:
86 * word 1: 32 bit address of Data Buffer
89 * 2. dma address width 64 bits:
90 * word 1: 32 bit address of Data Buffer
92 * word 3: upper 32 bit address of Data Buffer
95 * 3. dma address width 32 bits with hardware timestamping:
96 * word 1: 32 bit address of Data Buffer
98 * word 3: timestamp word 1
99 * word 4: timestamp word 2
101 * 4. dma address width 64 bits with hardware timestamping:
102 * word 1: 32 bit address of Data Buffer
104 * word 3: upper 32 bit address of Data Buffer
106 * word 5: timestamp word 1
107 * word 6: timestamp word 2
109 static unsigned int macb_dma_desc_get_size(struct macb *bp)
112 unsigned int desc_size;
114 switch (bp->hw_dma_cap) {
116 desc_size = sizeof(struct macb_dma_desc)
117 + sizeof(struct macb_dma_desc_64);
120 desc_size = sizeof(struct macb_dma_desc)
121 + sizeof(struct macb_dma_desc_ptp);
123 case HW_DMA_CAP_64B_PTP:
124 desc_size = sizeof(struct macb_dma_desc)
125 + sizeof(struct macb_dma_desc_64)
126 + sizeof(struct macb_dma_desc_ptp);
129 desc_size = sizeof(struct macb_dma_desc);
133 return sizeof(struct macb_dma_desc);
136 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
139 switch (bp->hw_dma_cap) {
144 case HW_DMA_CAP_64B_PTP:
154 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
155 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
157 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
158 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
163 /* Ring buffer accessors */
164 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
166 return index & (bp->tx_ring_size - 1);
169 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
172 index = macb_tx_ring_wrap(queue->bp, index);
173 index = macb_adj_dma_desc_idx(queue->bp, index);
174 return &queue->tx_ring[index];
177 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
180 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
183 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
187 offset = macb_tx_ring_wrap(queue->bp, index) *
188 macb_dma_desc_get_size(queue->bp);
190 return queue->tx_ring_dma + offset;
193 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
195 return index & (bp->rx_ring_size - 1);
198 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
200 index = macb_rx_ring_wrap(bp, index);
201 index = macb_adj_dma_desc_idx(bp, index);
202 return &bp->rx_ring[index];
205 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
207 return bp->rx_buffers + bp->rx_buffer_size *
208 macb_rx_ring_wrap(bp, index);
212 static u32 hw_readl_native(struct macb *bp, int offset)
214 return __raw_readl(bp->regs + offset);
217 static void hw_writel_native(struct macb *bp, int offset, u32 value)
219 __raw_writel(value, bp->regs + offset);
222 static u32 hw_readl(struct macb *bp, int offset)
224 return readl_relaxed(bp->regs + offset);
227 static void hw_writel(struct macb *bp, int offset, u32 value)
229 writel_relaxed(value, bp->regs + offset);
232 /* Find the CPU endianness by using the loopback bit of NCR register. When the
233 * CPU is in big endian we need to program swapped mode for management
236 static bool hw_is_native_io(void __iomem *addr)
238 u32 value = MACB_BIT(LLB);
240 __raw_writel(value, addr + MACB_NCR);
241 value = __raw_readl(addr + MACB_NCR);
243 /* Write 0 back to disable everything */
244 __raw_writel(0, addr + MACB_NCR);
246 return value == MACB_BIT(LLB);
249 static bool hw_is_gem(void __iomem *addr, bool native_io)
254 id = __raw_readl(addr + MACB_MID);
256 id = readl_relaxed(addr + MACB_MID);
258 return MACB_BFEXT(IDNUM, id) >= 0x2;
261 static void macb_set_hwaddr(struct macb *bp)
266 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
267 macb_or_gem_writel(bp, SA1B, bottom);
268 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
269 macb_or_gem_writel(bp, SA1T, top);
271 gem_writel(bp, RXPTPUNI, bottom);
272 gem_writel(bp, TXPTPUNI, bottom);
274 /* Clear unused address register sets */
275 macb_or_gem_writel(bp, SA2B, 0);
276 macb_or_gem_writel(bp, SA2T, 0);
277 macb_or_gem_writel(bp, SA3B, 0);
278 macb_or_gem_writel(bp, SA3T, 0);
279 macb_or_gem_writel(bp, SA4B, 0);
280 macb_or_gem_writel(bp, SA4T, 0);
283 static void macb_get_hwaddr(struct macb *bp)
285 struct macb_platform_data *pdata;
291 pdata = dev_get_platdata(&bp->pdev->dev);
293 /* Check all 4 address register for valid address */
294 for (i = 0; i < 4; i++) {
295 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
296 top = macb_or_gem_readl(bp, SA1T + i * 8);
298 if (pdata && pdata->rev_eth_addr) {
299 addr[5] = bottom & 0xff;
300 addr[4] = (bottom >> 8) & 0xff;
301 addr[3] = (bottom >> 16) & 0xff;
302 addr[2] = (bottom >> 24) & 0xff;
303 addr[1] = top & 0xff;
304 addr[0] = (top & 0xff00) >> 8;
306 addr[0] = bottom & 0xff;
307 addr[1] = (bottom >> 8) & 0xff;
308 addr[2] = (bottom >> 16) & 0xff;
309 addr[3] = (bottom >> 24) & 0xff;
310 addr[4] = top & 0xff;
311 addr[5] = (top >> 8) & 0xff;
314 if (is_valid_ether_addr(addr)) {
315 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
320 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
321 eth_hw_addr_random(bp->dev);
324 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
326 struct macb *bp = bus->priv;
331 err = pm_runtime_get_sync(&bp->pdev->dev);
335 timeout = jiffies + msecs_to_jiffies(1000);
336 /* wait for end of transfer */
338 if (MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
342 } while (!time_after_eq(jiffies, timeout));
344 if (time_after_eq(jiffies, timeout)) {
345 netdev_err(bp->dev, "wait for end of transfer timed out\n");
346 pm_runtime_mark_last_busy(&bp->pdev->dev);
347 pm_runtime_put_autosuspend(&bp->pdev->dev);
351 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
352 | MACB_BF(RW, MACB_MAN_READ)
353 | MACB_BF(PHYA, mii_id)
354 | MACB_BF(REGA, regnum)
355 | MACB_BF(CODE, MACB_MAN_CODE)));
357 timeout = jiffies + msecs_to_jiffies(1000);
358 /* wait for end of transfer */
360 if (MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
364 } while (!time_after_eq(jiffies, timeout));
366 if (time_after_eq(jiffies, timeout)) {
367 netdev_err(bp->dev, "wait for end of transfer timed out\n");
368 pm_runtime_mark_last_busy(&bp->pdev->dev);
369 pm_runtime_put_autosuspend(&bp->pdev->dev);
373 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
375 pm_runtime_mark_last_busy(&bp->pdev->dev);
376 pm_runtime_put_autosuspend(&bp->pdev->dev);
380 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
383 struct macb *bp = bus->priv;
387 err = pm_runtime_get_sync(&bp->pdev->dev);
391 timeout = jiffies + msecs_to_jiffies(1000);
392 /* wait for end of transfer */
394 if (MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
398 } while (!time_after_eq(jiffies, timeout));
400 if (time_after_eq(jiffies, timeout)) {
401 netdev_err(bp->dev, "wait for end of transfer timed out\n");
402 pm_runtime_mark_last_busy(&bp->pdev->dev);
403 pm_runtime_put_autosuspend(&bp->pdev->dev);
407 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
408 | MACB_BF(RW, MACB_MAN_WRITE)
409 | MACB_BF(PHYA, mii_id)
410 | MACB_BF(REGA, regnum)
411 | MACB_BF(CODE, MACB_MAN_CODE)
412 | MACB_BF(DATA, value)));
414 timeout = jiffies + msecs_to_jiffies(1000);
415 /* wait for end of transfer */
417 if (MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
421 } while (!time_after_eq(jiffies, timeout));
423 if (time_after_eq(jiffies, timeout)) {
424 netdev_err(bp->dev, "wait for end of transfer timed out\n");
425 pm_runtime_mark_last_busy(&bp->pdev->dev);
426 pm_runtime_put_autosuspend(&bp->pdev->dev);
430 pm_runtime_mark_last_busy(&bp->pdev->dev);
431 pm_runtime_put_autosuspend(&bp->pdev->dev);
436 * macb_set_tx_clk - Set a clock to a new frequency
437 * @clk: Pointer to the clock to change
438 * @speed: New frequency in Hz
439 * @dev: Pointer to the struct net_device
441 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
443 long ferr, rate, rate_rounded;
462 rate_rounded = clk_round_rate(clk, rate);
463 if (rate_rounded < 0)
466 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
469 ferr = abs(rate_rounded - rate);
470 ferr = DIV_ROUND_UP(ferr, rate / 100000);
472 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
475 if (clk_set_rate(clk, rate_rounded))
476 netdev_err(dev, "adjusting tx_clk failed.\n");
479 static void macb_handle_link_change(struct net_device *dev)
481 struct macb *bp = netdev_priv(dev);
482 struct phy_device *phydev = bp->phy_dev;
484 int status_change = 0;
486 spin_lock_irqsave(&bp->lock, flags);
489 if ((bp->speed != phydev->speed) ||
490 (bp->duplex != phydev->duplex)) {
493 reg = macb_readl(bp, NCFGR);
494 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
496 reg &= ~GEM_BIT(GBE);
500 if (phydev->speed == SPEED_100)
501 reg |= MACB_BIT(SPD);
502 if (phydev->speed == SPEED_1000 &&
503 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
506 macb_or_gem_writel(bp, NCFGR, reg);
508 bp->speed = phydev->speed;
509 bp->duplex = phydev->duplex;
514 if (phydev->link != bp->link) {
519 bp->link = phydev->link;
524 spin_unlock_irqrestore(&bp->lock, flags);
528 /* Update the TX clock rate if and only if the link is
529 * up and there has been a link change.
531 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
533 netif_carrier_on(dev);
534 netdev_info(dev, "link up (%d/%s)\n",
536 phydev->duplex == DUPLEX_FULL ?
539 netif_carrier_off(dev);
540 netdev_info(dev, "link down\n");
545 /* based on au1000_eth. c*/
546 static int macb_mii_probe(struct net_device *dev)
548 struct macb *bp = netdev_priv(dev);
549 struct macb_platform_data *pdata;
550 struct phy_device *phydev;
555 phydev = of_phy_connect(dev, bp->phy_node,
556 &macb_handle_link_change, 0,
561 phydev = phy_find_first(bp->mii_bus);
563 netdev_err(dev, "no PHY found\n");
567 pdata = dev_get_platdata(&bp->pdev->dev);
568 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
569 ret = devm_gpio_request(&bp->pdev->dev,
570 pdata->phy_irq_pin, "phy int");
572 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
573 phydev->irq = (phy_irq < 0) ?
578 /* attach the mac to the phy */
579 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
582 netdev_err(dev, "Could not attach to PHY\n");
587 /* mask with MAC supported features */
588 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
589 phydev->supported &= PHY_GBIT_FEATURES;
591 phydev->supported &= PHY_BASIC_FEATURES;
593 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
594 phydev->supported &= ~SUPPORTED_1000baseT_Half;
596 phydev->advertising = phydev->supported;
601 bp->phy_dev = phydev;
606 static int macb_mii_init(struct macb *bp)
608 struct macb_platform_data *pdata;
609 struct device_node *np, *mdio_np;
612 /* Enable management port */
613 macb_writel(bp, NCR, MACB_BIT(MPE));
615 bp->mii_bus = mdiobus_alloc();
621 bp->mii_bus->name = "MACB_mii_bus";
622 bp->mii_bus->read = &macb_mdio_read;
623 bp->mii_bus->write = &macb_mdio_write;
624 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
625 bp->pdev->name, bp->pdev->id);
626 bp->mii_bus->priv = bp;
627 bp->mii_bus->parent = &bp->dev->dev;
628 pdata = dev_get_platdata(&bp->pdev->dev);
630 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
632 np = bp->pdev->dev.of_node;
633 mdio_np = of_get_child_by_name(np, "mdio");
635 of_node_put(mdio_np);
636 err = of_mdiobus_register(bp->mii_bus, mdio_np);
638 goto err_out_unregister_bus;
640 /* try dt phy registration */
641 err = of_mdiobus_register(bp->mii_bus, np);
643 /* fallback to standard phy registration if no phy were
644 * found during dt phy registration
646 if (!err && !phy_find_first(bp->mii_bus)) {
647 for (i = 0; i < PHY_MAX_ADDR; i++) {
648 struct phy_device *phydev;
650 phydev = mdiobus_scan(bp->mii_bus, i);
651 if (IS_ERR(phydev) &&
652 PTR_ERR(phydev) != -ENODEV) {
653 err = PTR_ERR(phydev);
659 goto err_out_unregister_bus;
663 bp->mii_bus->phy_mask = pdata->phy_mask;
665 err = mdiobus_register(bp->mii_bus);
669 goto err_out_free_mdiobus;
671 err = macb_mii_probe(bp->dev);
673 goto err_out_unregister_bus;
677 err_out_unregister_bus:
678 mdiobus_unregister(bp->mii_bus);
679 err_out_free_mdiobus:
680 mdiobus_free(bp->mii_bus);
685 static void macb_update_stats(struct macb *bp)
687 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
688 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
689 int offset = MACB_PFR;
691 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
693 for (; p < end; p++, offset += 4)
694 *p += bp->macb_reg_readl(bp, offset);
697 static int macb_halt_tx(struct macb *bp)
699 unsigned long halt_time, timeout;
702 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
704 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
707 status = macb_readl(bp, TSR);
708 if (!(status & MACB_BIT(TGO)))
711 usleep_range(10, 250);
712 } while (time_before(halt_time, timeout));
717 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
719 if (tx_skb->mapping) {
720 if (tx_skb->mapped_as_page)
721 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
722 tx_skb->size, DMA_TO_DEVICE);
724 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
725 tx_skb->size, DMA_TO_DEVICE);
730 dev_kfree_skb_any(tx_skb->skb);
735 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
737 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
738 struct macb_dma_desc_64 *desc_64;
740 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
741 desc_64 = macb_64b_desc(bp, desc);
742 desc_64->addrh = upper_32_bits(addr);
745 desc->addr = lower_32_bits(addr);
748 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
751 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
752 struct macb_dma_desc_64 *desc_64;
754 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
755 desc_64 = macb_64b_desc(bp, desc);
756 addr = ((u64)(desc_64->addrh) << 32);
759 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
763 static void macb_tx_error_task(struct work_struct *work)
765 struct macb_queue *queue = container_of(work, struct macb_queue,
767 struct macb *bp = queue->bp;
768 struct macb_tx_skb *tx_skb;
769 struct macb_dma_desc *desc;
774 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
775 (unsigned int)(queue - bp->queues),
776 queue->tx_tail, queue->tx_head);
778 /* Prevent the queue IRQ handlers from running: each of them may call
779 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
780 * As explained below, we have to halt the transmission before updating
781 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
782 * network engine about the macb/gem being halted.
784 spin_lock_irqsave(&bp->lock, flags);
786 /* Make sure nobody is trying to queue up new packets */
787 netif_tx_stop_all_queues(bp->dev);
789 /* Stop transmission now
790 * (in case we have just queued new packets)
791 * macb/gem must be halted to write TBQP register
793 if (macb_halt_tx(bp))
794 /* Just complain for now, reinitializing TX path can be good */
795 netdev_err(bp->dev, "BUG: halt tx timed out\n");
797 /* Treat frames in TX queue including the ones that caused the error.
798 * Free transmit buffers in upper layer.
800 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
803 desc = macb_tx_desc(queue, tail);
805 tx_skb = macb_tx_skb(queue, tail);
808 if (ctrl & MACB_BIT(TX_USED)) {
809 /* skb is set for the last buffer of the frame */
811 macb_tx_unmap(bp, tx_skb);
813 tx_skb = macb_tx_skb(queue, tail);
817 /* ctrl still refers to the first buffer descriptor
818 * since it's the only one written back by the hardware
820 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
821 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
822 macb_tx_ring_wrap(bp, tail),
824 bp->dev->stats.tx_packets++;
825 bp->dev->stats.tx_bytes += skb->len;
828 /* "Buffers exhausted mid-frame" errors may only happen
829 * if the driver is buggy, so complain loudly about
830 * those. Statistics are updated by hardware.
832 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
834 "BUG: TX buffers exhausted mid-frame\n");
836 desc->ctrl = ctrl | MACB_BIT(TX_USED);
839 macb_tx_unmap(bp, tx_skb);
842 /* Set end of TX queue */
843 desc = macb_tx_desc(queue, 0);
844 macb_set_addr(bp, desc, 0);
845 desc->ctrl = MACB_BIT(TX_USED);
847 /* Make descriptor updates visible to hardware */
850 /* Reinitialize the TX desc queue */
851 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
852 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
853 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
854 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
856 /* Make TX ring reflect state of hardware */
860 /* Housework before enabling TX IRQ */
861 macb_writel(bp, TSR, macb_readl(bp, TSR));
862 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
864 /* Now we are ready to start transmission again */
865 netif_tx_start_all_queues(bp->dev);
866 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
868 spin_unlock_irqrestore(&bp->lock, flags);
871 static void macb_tx_interrupt(struct macb_queue *queue)
876 struct macb *bp = queue->bp;
877 u16 queue_index = queue - bp->queues;
879 status = macb_readl(bp, TSR);
880 macb_writel(bp, TSR, status);
882 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
883 queue_writel(queue, ISR, MACB_BIT(TCOMP));
885 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
886 (unsigned long)status);
888 head = queue->tx_head;
889 for (tail = queue->tx_tail; tail != head; tail++) {
890 struct macb_tx_skb *tx_skb;
892 struct macb_dma_desc *desc;
895 desc = macb_tx_desc(queue, tail);
897 /* Make hw descriptor updates visible to CPU */
902 /* TX_USED bit is only set by hardware on the very first buffer
903 * descriptor of the transmitted frame.
905 if (!(ctrl & MACB_BIT(TX_USED)))
908 /* Process all buffers of the current transmitted frame */
910 tx_skb = macb_tx_skb(queue, tail);
913 /* First, update TX stats if needed */
915 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
916 /* skb now belongs to timestamp buffer
917 * and will be removed later
921 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
922 macb_tx_ring_wrap(bp, tail),
924 bp->dev->stats.tx_packets++;
925 bp->dev->stats.tx_bytes += skb->len;
928 /* Now we can safely release resources */
929 macb_tx_unmap(bp, tx_skb);
931 /* skb is set only for the last buffer of the frame.
932 * WARNING: at this point skb has been freed by
940 queue->tx_tail = tail;
941 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
942 CIRC_CNT(queue->tx_head, queue->tx_tail,
943 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
944 netif_wake_subqueue(bp->dev, queue_index);
947 static void gem_rx_refill(struct macb *bp)
952 struct macb_dma_desc *desc;
954 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
955 bp->rx_ring_size) > 0) {
956 entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
958 /* Make hw descriptor updates visible to CPU */
961 bp->rx_prepared_head++;
962 desc = macb_rx_desc(bp, entry);
964 if (!bp->rx_skbuff[entry]) {
965 /* allocate sk_buff for this free entry in ring */
966 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
967 if (unlikely(!skb)) {
969 "Unable to allocate sk_buff\n");
973 /* now fill corresponding descriptor entry */
974 paddr = dma_map_single(&bp->pdev->dev, skb->data,
977 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
982 bp->rx_skbuff[entry] = skb;
984 if (entry == bp->rx_ring_size - 1)
985 paddr |= MACB_BIT(RX_WRAP);
986 macb_set_addr(bp, desc, paddr);
989 /* properly align Ethernet header */
990 skb_reserve(skb, NET_IP_ALIGN);
992 desc->addr &= ~MACB_BIT(RX_USED);
997 /* Make descriptor updates visible to hardware */
1000 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
1001 bp->rx_prepared_head, bp->rx_tail);
1004 /* Mark DMA descriptors from begin up to and not including end as unused */
1005 static void discard_partial_frame(struct macb *bp, unsigned int begin,
1010 for (frag = begin; frag != end; frag++) {
1011 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
1013 desc->addr &= ~MACB_BIT(RX_USED);
1016 /* Make descriptor updates visible to hardware */
1019 /* When this happens, the hardware stats registers for
1020 * whatever caused this is updated, so we don't have to record
1026 static int macb_validate_hw_csum(struct sk_buff *skb)
1028 u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
1029 u32 csum = ~crc32_le(~0, skb_mac_header(skb),
1030 skb->len + ETH_HLEN - ETH_FCS_LEN);
1032 return (pkt_csum != csum);
1035 static int gem_rx(struct macb *bp, int budget)
1039 struct sk_buff *skb;
1040 struct macb_dma_desc *desc;
1043 while (count < budget) {
1048 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
1050 desc = macb_rx_desc(bp, entry);
1052 /* Make hw descriptor updates visible to CPU */
1055 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1056 addr = macb_get_addr(bp, desc);
1065 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1067 "not whole frame pointed by descriptor\n");
1068 bp->dev->stats.rx_dropped++;
1071 skb = bp->rx_skbuff[entry];
1072 if (unlikely(!skb)) {
1074 "inconsistent Rx descriptor chain\n");
1075 bp->dev->stats.rx_dropped++;
1078 /* now everything is ready for receiving packet */
1079 bp->rx_skbuff[entry] = NULL;
1080 len = ctrl & bp->rx_frm_len_mask;
1082 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1085 dma_unmap_single(&bp->pdev->dev, addr,
1086 bp->rx_buffer_size, DMA_FROM_DEVICE);
1088 skb->protocol = eth_type_trans(skb, bp->dev);
1090 /* Validate MAC fcs if RX checsum offload disabled */
1091 if (!(bp->dev->features & NETIF_F_RXCSUM)) {
1092 if (macb_validate_hw_csum(skb)) {
1093 netdev_err(bp->dev, "incorrect FCS\n");
1094 bp->dev->stats.rx_dropped++;
1099 skb_checksum_none_assert(skb);
1100 if (bp->dev->features & NETIF_F_RXCSUM &&
1101 !(bp->dev->flags & IFF_PROMISC) &&
1102 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1103 skb->ip_summed = CHECKSUM_UNNECESSARY;
1105 bp->dev->stats.rx_packets++;
1106 bp->dev->stats.rx_bytes += skb->len;
1108 gem_ptp_do_rxstamp(bp, skb, desc);
1110 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1111 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1112 skb->len, skb->csum);
1113 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1114 skb_mac_header(skb), 16, true);
1115 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1116 skb->data, 32, true);
1119 netif_receive_skb(skb);
1127 static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
1128 unsigned int last_frag)
1132 unsigned int offset;
1133 struct sk_buff *skb;
1134 struct macb_dma_desc *desc;
1136 desc = macb_rx_desc(bp, last_frag);
1137 len = desc->ctrl & bp->rx_frm_len_mask;
1139 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1140 macb_rx_ring_wrap(bp, first_frag),
1141 macb_rx_ring_wrap(bp, last_frag), len);
1143 /* The ethernet header starts NET_IP_ALIGN bytes into the
1144 * first buffer. Since the header is 14 bytes, this makes the
1145 * payload word-aligned.
1147 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1148 * the two padding bytes into the skb so that we avoid hitting
1149 * the slowpath in memcpy(), and pull them off afterwards.
1151 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1153 bp->dev->stats.rx_dropped++;
1154 for (frag = first_frag; ; frag++) {
1155 desc = macb_rx_desc(bp, frag);
1156 desc->addr &= ~MACB_BIT(RX_USED);
1157 if (frag == last_frag)
1161 /* Make descriptor updates visible to hardware */
1168 len += NET_IP_ALIGN;
1169 skb_checksum_none_assert(skb);
1172 for (frag = first_frag; ; frag++) {
1173 unsigned int frag_len = bp->rx_buffer_size;
1175 if (offset + frag_len > len) {
1176 if (unlikely(frag != last_frag)) {
1177 dev_kfree_skb_any(skb);
1180 frag_len = len - offset;
1182 skb_copy_to_linear_data_offset(skb, offset,
1183 macb_rx_buffer(bp, frag),
1185 offset += bp->rx_buffer_size;
1186 desc = macb_rx_desc(bp, frag);
1187 desc->addr &= ~MACB_BIT(RX_USED);
1189 if (frag == last_frag)
1193 /* Validate MAC fcs if RX checsum offload disabled */
1194 if (!(bp->dev->features & NETIF_F_RXCSUM)) {
1195 if (macb_validate_hw_csum(skb)) {
1196 netdev_err(bp->dev, "incorrect FCS\n");
1197 bp->dev->stats.rx_dropped++;
1199 /* Make descriptor updates visible to hardware */
1206 /* Make descriptor updates visible to hardware */
1209 __skb_pull(skb, NET_IP_ALIGN);
1210 skb->protocol = eth_type_trans(skb, bp->dev);
1212 bp->dev->stats.rx_packets++;
1213 bp->dev->stats.rx_bytes += skb->len;
1214 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1215 skb->len, skb->csum);
1216 netif_receive_skb(skb);
1221 static inline void macb_init_rx_ring(struct macb *bp)
1224 struct macb_dma_desc *desc = NULL;
1227 addr = bp->rx_buffers_dma;
1228 for (i = 0; i < bp->rx_ring_size; i++) {
1229 desc = macb_rx_desc(bp, i);
1230 macb_set_addr(bp, desc, addr);
1232 addr += bp->rx_buffer_size;
1234 desc->addr |= MACB_BIT(RX_WRAP);
1238 static int macb_rx(struct macb *bp, int budget)
1240 bool reset_rx_queue = false;
1243 int first_frag = -1;
1245 for (tail = bp->rx_tail; budget > 0; tail++) {
1246 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1249 /* Make hw descriptor updates visible to CPU */
1254 if (!(desc->addr & MACB_BIT(RX_USED)))
1257 if (ctrl & MACB_BIT(RX_SOF)) {
1258 if (first_frag != -1)
1259 discard_partial_frame(bp, first_frag, tail);
1263 if (ctrl & MACB_BIT(RX_EOF)) {
1266 if (unlikely(first_frag == -1)) {
1267 reset_rx_queue = true;
1271 dropped = macb_rx_frame(bp, first_frag, tail);
1273 if (unlikely(dropped < 0)) {
1274 reset_rx_queue = true;
1284 if (unlikely(reset_rx_queue)) {
1285 unsigned long flags;
1288 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1290 spin_lock_irqsave(&bp->lock, flags);
1292 ctrl = macb_readl(bp, NCR);
1293 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1295 macb_init_rx_ring(bp);
1296 macb_writel(bp, RBQP, bp->rx_ring_dma);
1298 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1300 spin_unlock_irqrestore(&bp->lock, flags);
1304 if (first_frag != -1)
1305 bp->rx_tail = first_frag;
1312 static int macb_poll(struct napi_struct *napi, int budget)
1314 struct macb *bp = container_of(napi, struct macb, napi);
1318 status = macb_readl(bp, RSR);
1319 macb_writel(bp, RSR, status);
1323 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1324 (unsigned long)status, budget);
1326 work_done = bp->macbgem_ops.mog_rx(bp, budget);
1327 if (work_done < budget) {
1328 napi_complete(napi);
1330 /* Packets received while interrupts were disabled */
1331 status = macb_readl(bp, RSR);
1333 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1334 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1335 napi_reschedule(napi);
1337 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1341 /* TODO: Handle errors */
1346 static void macb_hresp_error_task(unsigned long data)
1348 struct macb *bp = (struct macb *)data;
1349 struct net_device *dev = bp->dev;
1350 struct macb_queue *queue = bp->queues;
1354 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1355 queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
1359 ctrl = macb_readl(bp, NCR);
1360 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1361 macb_writel(bp, NCR, ctrl);
1363 netif_tx_stop_all_queues(dev);
1364 netif_carrier_off(dev);
1366 bp->macbgem_ops.mog_init_rings(bp);
1368 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1369 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1370 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1371 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1373 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1374 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1375 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1376 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1377 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1379 /* We only use the first queue at the moment. Remaining
1380 * queues must be tied-off before we enable the receiver.
1382 * See the documentation for receive_q1_ptr for more info.
1385 queue_writel(queue, RBQP,
1386 lower_32_bits(bp->rx_ring_tieoff_dma));
1388 /* Enable interrupts */
1389 queue_writel(queue, IER,
1395 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1396 macb_writel(bp, NCR, ctrl);
1398 netif_carrier_on(dev);
1399 netif_tx_start_all_queues(dev);
1402 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1404 struct macb_queue *queue = dev_id;
1405 struct macb *bp = queue->bp;
1406 struct net_device *dev = bp->dev;
1409 status = queue_readl(queue, ISR);
1411 if (unlikely(!status))
1414 spin_lock(&bp->lock);
1417 if (status & MACB_BIT(WOL)) {
1418 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1419 queue_writel(queue, ISR, MACB_BIT(WOL));
1423 /* close possible race with dev_close */
1424 if (unlikely(!netif_running(dev))) {
1425 queue_writel(queue, IDR, -1);
1426 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1427 queue_writel(queue, ISR, -1);
1431 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1432 (unsigned int)(queue - bp->queues),
1433 (unsigned long)status);
1435 if (status & MACB_RX_INT_FLAGS) {
1436 /* There's no point taking any more interrupts
1437 * until we have processed the buffers. The
1438 * scheduling call may fail if the poll routine
1439 * is already scheduled, so disable interrupts
1442 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1443 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1444 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1446 if (napi_schedule_prep(&bp->napi)) {
1447 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1448 __napi_schedule(&bp->napi);
1452 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1453 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1454 schedule_work(&queue->tx_error_task);
1456 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1457 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1462 if (status & MACB_BIT(TCOMP))
1463 macb_tx_interrupt(queue);
1465 /* Link change detection isn't possible with RMII, so we'll
1466 * add that if/when we get our hands on a full-blown MII PHY.
1469 /* There is a hardware issue under heavy load where DMA can
1470 * stop, this causes endless "used buffer descriptor read"
1471 * interrupts but it can be cleared by re-enabling RX. See
1472 * the at91 manual, section 41.3.1 or the Zynq manual
1473 * section 16.7.4 for details.
1475 if (status & MACB_BIT(RXUBR)) {
1476 ctrl = macb_readl(bp, NCR);
1477 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1479 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1481 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1482 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1485 if (status & MACB_BIT(ISR_ROVR)) {
1486 /* We missed at least one packet */
1487 if (macb_is_gem(bp))
1488 bp->hw_stats.gem.rx_overruns++;
1490 bp->hw_stats.macb.rx_overruns++;
1492 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1493 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1496 if (status & MACB_BIT(HRESP)) {
1497 tasklet_schedule(&bp->hresp_err_tasklet);
1498 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1500 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1501 queue_writel(queue, ISR, MACB_BIT(HRESP));
1503 status = queue_readl(queue, ISR);
1506 spin_unlock(&bp->lock);
1511 #ifdef CONFIG_NET_POLL_CONTROLLER
1512 /* Polling receive - used by netconsole and other diagnostic tools
1513 * to allow network i/o with interrupts disabled.
1515 static void macb_poll_controller(struct net_device *dev)
1517 struct macb *bp = netdev_priv(dev);
1518 struct macb_queue *queue;
1519 unsigned long flags;
1522 local_irq_save(flags);
1523 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1524 macb_interrupt(dev->irq, queue);
1525 local_irq_restore(flags);
1529 static unsigned int macb_tx_map(struct macb *bp,
1530 struct macb_queue *queue,
1531 struct sk_buff *skb,
1532 unsigned int hdrlen)
1535 unsigned int len, entry, i, tx_head = queue->tx_head;
1536 struct macb_tx_skb *tx_skb = NULL;
1537 struct macb_dma_desc *desc;
1538 unsigned int offset, size, count = 0;
1539 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1540 unsigned int eof = 1, mss_mfs = 0;
1541 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1544 if (skb_shinfo(skb)->gso_size != 0) {
1545 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1547 lso_ctrl = MACB_LSO_UFO_ENABLE;
1550 lso_ctrl = MACB_LSO_TSO_ENABLE;
1553 /* First, map non-paged data */
1554 len = skb_headlen(skb);
1556 /* first buffer length */
1561 entry = macb_tx_ring_wrap(bp, tx_head);
1562 tx_skb = &queue->tx_skb[entry];
1564 mapping = dma_map_single(&bp->pdev->dev,
1566 size, DMA_TO_DEVICE);
1567 if (dma_mapping_error(&bp->pdev->dev, mapping))
1570 /* Save info to properly release resources */
1572 tx_skb->mapping = mapping;
1573 tx_skb->size = size;
1574 tx_skb->mapped_as_page = false;
1581 size = min(len, bp->max_tx_length);
1584 /* Then, map paged data from fragments */
1585 for (f = 0; f < nr_frags; f++) {
1586 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1588 len = skb_frag_size(frag);
1591 size = min(len, bp->max_tx_length);
1592 entry = macb_tx_ring_wrap(bp, tx_head);
1593 tx_skb = &queue->tx_skb[entry];
1595 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1596 offset, size, DMA_TO_DEVICE);
1597 if (dma_mapping_error(&bp->pdev->dev, mapping))
1600 /* Save info to properly release resources */
1602 tx_skb->mapping = mapping;
1603 tx_skb->size = size;
1604 tx_skb->mapped_as_page = true;
1613 /* Should never happen */
1614 if (unlikely(!tx_skb)) {
1615 netdev_err(bp->dev, "BUG! empty skb!\n");
1619 /* This is the last buffer of the frame: save socket buffer */
1622 /* Update TX ring: update buffer descriptors in reverse order
1623 * to avoid race condition
1626 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1627 * to set the end of TX queue
1630 entry = macb_tx_ring_wrap(bp, i);
1631 ctrl = MACB_BIT(TX_USED);
1632 desc = macb_tx_desc(queue, entry);
1636 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1637 /* include header and FCS in value given to h/w */
1638 mss_mfs = skb_shinfo(skb)->gso_size +
1639 skb_transport_offset(skb) +
1642 mss_mfs = skb_shinfo(skb)->gso_size;
1643 /* TCP Sequence Number Source Select
1644 * can be set only for TSO
1652 entry = macb_tx_ring_wrap(bp, i);
1653 tx_skb = &queue->tx_skb[entry];
1654 desc = macb_tx_desc(queue, entry);
1656 ctrl = (u32)tx_skb->size;
1658 ctrl |= MACB_BIT(TX_LAST);
1661 if (unlikely(entry == (bp->tx_ring_size - 1)))
1662 ctrl |= MACB_BIT(TX_WRAP);
1664 /* First descriptor is header descriptor */
1665 if (i == queue->tx_head) {
1666 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1667 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1669 /* Only set MSS/MFS on payload descriptors
1670 * (second or later descriptor)
1672 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1674 /* Set TX buffer descriptor */
1675 macb_set_addr(bp, desc, tx_skb->mapping);
1676 /* desc->addr must be visible to hardware before clearing
1677 * 'TX_USED' bit in desc->ctrl.
1681 } while (i != queue->tx_head);
1683 queue->tx_head = tx_head;
1688 netdev_err(bp->dev, "TX DMA map failed\n");
1690 for (i = queue->tx_head; i != tx_head; i++) {
1691 tx_skb = macb_tx_skb(queue, i);
1693 macb_tx_unmap(bp, tx_skb);
1699 static netdev_features_t macb_features_check(struct sk_buff *skb,
1700 struct net_device *dev,
1701 netdev_features_t features)
1703 unsigned int nr_frags, f;
1704 unsigned int hdrlen;
1706 /* Validate LSO compatibility */
1708 /* there is only one buffer */
1709 if (!skb_is_nonlinear(skb))
1712 /* length of header */
1713 hdrlen = skb_transport_offset(skb);
1714 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1715 hdrlen += tcp_hdrlen(skb);
1718 * When software supplies two or more payload buffers all payload buffers
1719 * apart from the last must be a multiple of 8 bytes in size.
1721 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1722 return features & ~MACB_NETIF_LSO;
1724 nr_frags = skb_shinfo(skb)->nr_frags;
1725 /* No need to check last fragment */
1727 for (f = 0; f < nr_frags; f++) {
1728 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1730 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1731 return features & ~MACB_NETIF_LSO;
1736 static inline int macb_clear_csum(struct sk_buff *skb)
1738 /* no change for packets without checksum offloading */
1739 if (skb->ip_summed != CHECKSUM_PARTIAL)
1742 /* make sure we can modify the header */
1743 if (unlikely(skb_cow_head(skb, 0)))
1746 /* initialize checksum field
1747 * This is required - at least for Zynq, which otherwise calculates
1748 * wrong UDP header checksums for UDP packets with UDP data len <=2
1750 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1754 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1756 u16 queue_index = skb_get_queue_mapping(skb);
1757 struct macb *bp = netdev_priv(dev);
1758 struct macb_queue *queue = &bp->queues[queue_index];
1759 unsigned long flags;
1760 unsigned int desc_cnt, nr_frags, frag_size, f;
1761 unsigned int hdrlen;
1762 bool is_lso, is_udp = 0;
1764 is_lso = (skb_shinfo(skb)->gso_size != 0);
1767 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1769 /* length of headers */
1771 /* only queue eth + ip headers separately for UDP */
1772 hdrlen = skb_transport_offset(skb);
1774 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1775 if (skb_headlen(skb) < hdrlen) {
1776 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1777 /* if this is required, would need to copy to single buffer */
1778 return NETDEV_TX_BUSY;
1781 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1783 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1784 netdev_vdbg(bp->dev,
1785 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1786 queue_index, skb->len, skb->head, skb->data,
1787 skb_tail_pointer(skb), skb_end_pointer(skb));
1788 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1789 skb->data, 16, true);
1792 /* Count how many TX buffer descriptors are needed to send this
1793 * socket buffer: skb fragments of jumbo frames may need to be
1794 * split into many buffer descriptors.
1796 if (is_lso && (skb_headlen(skb) > hdrlen))
1797 /* extra header descriptor if also payload in first buffer */
1798 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1800 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1801 nr_frags = skb_shinfo(skb)->nr_frags;
1802 for (f = 0; f < nr_frags; f++) {
1803 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1804 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1807 spin_lock_irqsave(&bp->lock, flags);
1809 /* This is a hard error, log it. */
1810 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1811 bp->tx_ring_size) < desc_cnt) {
1812 netif_stop_subqueue(dev, queue_index);
1813 spin_unlock_irqrestore(&bp->lock, flags);
1814 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1815 queue->tx_head, queue->tx_tail);
1816 return NETDEV_TX_BUSY;
1819 if (macb_clear_csum(skb)) {
1820 dev_kfree_skb_any(skb);
1824 /* Map socket buffer for DMA transfer */
1825 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1826 dev_kfree_skb_any(skb);
1830 /* Make newly initialized descriptor visible to hardware */
1832 skb_tx_timestamp(skb);
1834 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1836 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1837 netif_stop_subqueue(dev, queue_index);
1840 spin_unlock_irqrestore(&bp->lock, flags);
1842 return NETDEV_TX_OK;
1845 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1847 if (!macb_is_gem(bp)) {
1848 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1850 bp->rx_buffer_size = size;
1852 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1854 "RX buffer must be multiple of %d bytes, expanding\n",
1855 RX_BUFFER_MULTIPLE);
1856 bp->rx_buffer_size =
1857 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1861 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1862 bp->dev->mtu, bp->rx_buffer_size);
1865 static void gem_free_rx_buffers(struct macb *bp)
1867 struct sk_buff *skb;
1868 struct macb_dma_desc *desc;
1875 for (i = 0; i < bp->rx_ring_size; i++) {
1876 skb = bp->rx_skbuff[i];
1881 desc = macb_rx_desc(bp, i);
1882 addr = macb_get_addr(bp, desc);
1884 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1886 dev_kfree_skb_any(skb);
1890 kfree(bp->rx_skbuff);
1891 bp->rx_skbuff = NULL;
1894 static void macb_free_rx_buffers(struct macb *bp)
1896 if (bp->rx_buffers) {
1897 dma_free_coherent(&bp->pdev->dev,
1898 bp->rx_ring_size * bp->rx_buffer_size,
1899 bp->rx_buffers, bp->rx_buffers_dma);
1900 bp->rx_buffers = NULL;
1904 static void macb_free_consistent(struct macb *bp)
1906 struct macb_queue *queue;
1909 bp->macbgem_ops.mog_free_rx_buffers(bp);
1911 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1912 bp->rx_ring, bp->rx_ring_dma);
1916 if (bp->rx_ring_tieoff) {
1917 dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
1918 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
1919 bp->rx_ring_tieoff = NULL;
1922 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1923 kfree(queue->tx_skb);
1924 queue->tx_skb = NULL;
1925 if (queue->tx_ring) {
1926 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1927 queue->tx_ring, queue->tx_ring_dma);
1928 queue->tx_ring = NULL;
1933 static int gem_alloc_rx_buffers(struct macb *bp)
1937 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1938 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1943 "Allocated %d RX struct sk_buff entries at %p\n",
1944 bp->rx_ring_size, bp->rx_skbuff);
1948 static int macb_alloc_rx_buffers(struct macb *bp)
1952 size = bp->rx_ring_size * bp->rx_buffer_size;
1953 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1954 &bp->rx_buffers_dma, GFP_KERNEL);
1955 if (!bp->rx_buffers)
1959 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1960 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1964 static int macb_alloc_consistent(struct macb *bp)
1966 struct macb_queue *queue;
1970 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1971 size = TX_RING_BYTES(bp);
1972 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1973 &queue->tx_ring_dma,
1975 if (!queue->tx_ring)
1978 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1979 q, size, (unsigned long)queue->tx_ring_dma,
1982 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1983 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1988 size = RX_RING_BYTES(bp);
1989 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1990 &bp->rx_ring_dma, GFP_KERNEL);
1994 /* If we have more than one queue, allocate a tie off descriptor
1995 * that will be used to disable unused RX queues.
1997 if (bp->num_queues > 1) {
1998 bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
1999 macb_dma_desc_get_size(bp),
2000 &bp->rx_ring_tieoff_dma,
2002 if (!bp->rx_ring_tieoff)
2007 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2008 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
2010 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2016 macb_free_consistent(bp);
2020 static void macb_init_tieoff(struct macb *bp)
2022 struct macb_dma_desc *d = bp->rx_ring_tieoff;
2024 if (bp->num_queues > 1) {
2025 /* Setup a wrapping descriptor with no free slots
2026 * (WRAP and USED) to tie off/disable unused RX queues.
2028 macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
2033 static void gem_init_rings(struct macb *bp)
2035 struct macb_queue *queue;
2036 struct macb_dma_desc *desc = NULL;
2040 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2041 for (i = 0; i < bp->tx_ring_size; i++) {
2042 desc = macb_tx_desc(queue, i);
2043 macb_set_addr(bp, desc, 0);
2044 desc->ctrl = MACB_BIT(TX_USED);
2046 desc->ctrl |= MACB_BIT(TX_WRAP);
2052 bp->rx_prepared_head = 0;
2055 macb_init_tieoff(bp);
2058 static void macb_init_rings(struct macb *bp)
2061 struct macb_dma_desc *desc = NULL;
2063 macb_init_rx_ring(bp);
2065 for (i = 0; i < bp->tx_ring_size; i++) {
2066 desc = macb_tx_desc(&bp->queues[0], i);
2067 macb_set_addr(bp, desc, 0);
2068 desc->ctrl = MACB_BIT(TX_USED);
2070 bp->queues[0].tx_head = 0;
2071 bp->queues[0].tx_tail = 0;
2072 desc->ctrl |= MACB_BIT(TX_WRAP);
2075 macb_init_tieoff(bp);
2078 static void macb_reset_hw(struct macb *bp)
2080 struct macb_queue *queue;
2083 /* Disable RX and TX (XXX: Should we halt the transmission
2086 macb_writel(bp, NCR, 0);
2088 /* Clear the stats registers (XXX: Update stats first?) */
2089 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
2091 /* Clear all status flags */
2092 macb_writel(bp, TSR, -1);
2093 macb_writel(bp, RSR, -1);
2095 /* Disable RX partial store and forward and reset watermark value */
2096 if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
2097 gem_writel(bp, PBUFRXCUT, 0xFFF);
2099 /* Disable all interrupts */
2100 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2101 queue_writel(queue, IDR, -1);
2102 queue_readl(queue, ISR);
2103 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2104 queue_writel(queue, ISR, -1);
2108 static u32 gem_mdc_clk_div(struct macb *bp)
2111 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2113 if (pclk_hz <= 20000000)
2114 config = GEM_BF(CLK, GEM_CLK_DIV8);
2115 else if (pclk_hz <= 40000000)
2116 config = GEM_BF(CLK, GEM_CLK_DIV16);
2117 else if (pclk_hz <= 80000000)
2118 config = GEM_BF(CLK, GEM_CLK_DIV32);
2119 else if (pclk_hz <= 120000000)
2120 config = GEM_BF(CLK, GEM_CLK_DIV48);
2121 else if (pclk_hz <= 160000000)
2122 config = GEM_BF(CLK, GEM_CLK_DIV64);
2124 config = GEM_BF(CLK, GEM_CLK_DIV96);
2129 static u32 macb_mdc_clk_div(struct macb *bp)
2132 unsigned long pclk_hz;
2134 if (macb_is_gem(bp))
2135 return gem_mdc_clk_div(bp);
2137 pclk_hz = clk_get_rate(bp->pclk);
2138 if (pclk_hz <= 20000000)
2139 config = MACB_BF(CLK, MACB_CLK_DIV8);
2140 else if (pclk_hz <= 40000000)
2141 config = MACB_BF(CLK, MACB_CLK_DIV16);
2142 else if (pclk_hz <= 80000000)
2143 config = MACB_BF(CLK, MACB_CLK_DIV32);
2145 config = MACB_BF(CLK, MACB_CLK_DIV64);
2150 /* Get the DMA bus width field of the network configuration register that we
2151 * should program. We find the width from decoding the design configuration
2152 * register to find the maximum supported data bus width.
2154 static u32 macb_dbw(struct macb *bp)
2156 if (!macb_is_gem(bp))
2159 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2161 return GEM_BF(DBW, GEM_DBW128);
2163 return GEM_BF(DBW, GEM_DBW64);
2166 return GEM_BF(DBW, GEM_DBW32);
2170 /* Configure the receive DMA engine
2171 * - use the correct receive buffer size
2172 * - set best burst length for DMA operations
2173 * (if not supported by FIFO, it will fallback to default)
2174 * - set both rx/tx packet buffers to full memory size
2175 * These are configurable parameters for GEM.
2177 static void macb_configure_dma(struct macb *bp)
2181 if (macb_is_gem(bp)) {
2182 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2183 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
2184 if (bp->dma_burst_length)
2185 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2186 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2187 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2190 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2192 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2194 if (bp->dev->features & NETIF_F_HW_CSUM)
2195 dmacfg |= GEM_BIT(TXCOEN);
2197 dmacfg &= ~GEM_BIT(TXCOEN);
2199 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2200 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2201 dmacfg |= GEM_BIT(ADDR64);
2203 #ifdef CONFIG_MACB_USE_HWSTAMP
2204 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2205 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2207 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2209 gem_writel(bp, DMACFG, dmacfg);
2213 static void macb_init_hw(struct macb *bp)
2215 struct macb_queue *queue;
2221 macb_set_hwaddr(bp);
2223 config = macb_mdc_clk_div(bp);
2224 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2225 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2226 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
2227 config |= MACB_BIT(PAE); /* PAuse Enable */
2229 /* Do not discard Rx FCS if RX checsum offload disabled */
2230 if (bp->dev->features & NETIF_F_RXCSUM)
2231 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
2233 if (bp->caps & MACB_CAPS_JUMBO)
2234 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
2236 config |= MACB_BIT(BIG); /* Receive oversized frames */
2237 if (bp->dev->flags & IFF_PROMISC)
2238 config |= MACB_BIT(CAF); /* Copy All Frames */
2239 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2240 config |= GEM_BIT(RXCOEN);
2241 if (!(bp->dev->flags & IFF_BROADCAST))
2242 config |= MACB_BIT(NBC); /* No BroadCast */
2243 config |= macb_dbw(bp);
2244 macb_writel(bp, NCFGR, config);
2245 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2246 gem_writel(bp, JML, bp->jumbo_max_len);
2247 bp->speed = SPEED_10;
2248 if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
2249 bp->duplex = DUPLEX_FULL;
2251 bp->duplex = DUPLEX_HALF;
2252 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2253 if (bp->caps & MACB_CAPS_JUMBO)
2254 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2256 macb_configure_dma(bp);
2258 /* Enable RX partial store and forward and set watermark */
2259 if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
2260 gem_writel(bp, PBUFRXCUT,
2261 (gem_readl(bp, PBUFRXCUT) &
2262 GEM_BF(WTRMRK, bp->rx_watermark)) |
2263 GEM_BIT(ENCUTTHRU));
2266 /* Initialize TX and RX buffers */
2267 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
2268 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2269 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2270 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
2272 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2273 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2274 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2275 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2276 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2278 /* We only use the first queue at the moment. Remaining
2279 * queues must be tied-off before we enable the receiver.
2281 * See the documentation for receive_q1_ptr for more info.
2284 queue_writel(queue, RBQP,
2285 lower_32_bits(bp->rx_ring_tieoff_dma));
2287 /* Enable interrupts */
2288 queue_writel(queue, IER,
2294 if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
2295 (bp->caps & MACB_CAPS_PCS))
2296 gem_writel(bp, PCSCNTRL,
2297 gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
2299 /* Enable TX and RX */
2300 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE) |
2304 /* The hash address register is 64 bits long and takes up two
2305 * locations in the memory map. The least significant bits are stored
2306 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2308 * The unicast hash enable and the multicast hash enable bits in the
2309 * network configuration register enable the reception of hash matched
2310 * frames. The destination address is reduced to a 6 bit index into
2311 * the 64 bit hash register using the following hash function. The
2312 * hash function is an exclusive or of every sixth bit of the
2313 * destination address.
2315 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2316 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2317 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2318 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2319 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2320 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2322 * da[0] represents the least significant bit of the first byte
2323 * received, that is, the multicast/unicast indicator, and da[47]
2324 * represents the most significant bit of the last byte received. If
2325 * the hash index, hi[n], points to a bit that is set in the hash
2326 * register then the frame will be matched according to whether the
2327 * frame is multicast or unicast. A multicast match will be signalled
2328 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2329 * index points to a bit set in the hash register. A unicast match
2330 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2331 * and the hash index points to a bit set in the hash register. To
2332 * receive all multicast frames, the hash register should be set with
2333 * all ones and the multicast hash enable bit should be set in the
2334 * network configuration register.
2337 static inline int hash_bit_value(int bitnr, __u8 *addr)
2339 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2344 /* Return the hash index value for the specified address. */
2345 static int hash_get_index(__u8 *addr)
2350 for (j = 0; j < 6; j++) {
2351 for (i = 0, bitval = 0; i < 8; i++)
2352 bitval ^= hash_bit_value(i * 6 + j, addr);
2354 hash_index |= (bitval << j);
2360 /* Add multicast addresses to the internal multicast-hash table. */
2361 static void macb_sethashtable(struct net_device *dev)
2363 struct netdev_hw_addr *ha;
2364 unsigned long mc_filter[2];
2366 struct macb *bp = netdev_priv(dev);
2371 netdev_for_each_mc_addr(ha, dev) {
2372 bitnr = hash_get_index(ha->addr);
2373 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2376 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2377 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2380 /* Enable/Disable promiscuous and multicast modes. */
2381 static void macb_set_rx_mode(struct net_device *dev)
2384 struct macb *bp = netdev_priv(dev);
2386 cfg = macb_readl(bp, NCFGR);
2388 if (dev->flags & IFF_PROMISC) {
2389 /* Enable promiscuous mode */
2390 cfg |= MACB_BIT(CAF);
2392 /* Disable RX checksum offload */
2393 if (macb_is_gem(bp))
2394 cfg &= ~GEM_BIT(RXCOEN);
2396 /* Disable promiscuous mode */
2397 cfg &= ~MACB_BIT(CAF);
2399 /* Enable RX checksum offload only if requested */
2400 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2401 cfg |= GEM_BIT(RXCOEN);
2404 if (dev->flags & IFF_ALLMULTI) {
2405 /* Enable all multicast mode */
2406 macb_or_gem_writel(bp, HRB, -1);
2407 macb_or_gem_writel(bp, HRT, -1);
2408 cfg |= MACB_BIT(NCFGR_MTI);
2409 } else if (!netdev_mc_empty(dev)) {
2410 /* Enable specific multicasts */
2411 macb_sethashtable(dev);
2412 cfg |= MACB_BIT(NCFGR_MTI);
2413 } else if (dev->flags & (~IFF_ALLMULTI)) {
2414 /* Disable all multicast mode */
2415 macb_or_gem_writel(bp, HRB, 0);
2416 macb_or_gem_writel(bp, HRT, 0);
2417 cfg &= ~MACB_BIT(NCFGR_MTI);
2420 macb_writel(bp, NCFGR, cfg);
2423 static int macb_open(struct net_device *dev)
2425 struct macb *bp = netdev_priv(dev);
2426 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2429 netdev_dbg(bp->dev, "open\n");
2431 err = pm_runtime_get_sync(&bp->pdev->dev);
2435 /* carrier starts down */
2436 netif_carrier_off(dev);
2438 /* if the phy is not yet register, retry later*/
2442 /* RX buffers initialization */
2443 macb_init_rx_buffer_size(bp, bufsz);
2445 err = macb_alloc_consistent(bp);
2447 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2452 napi_enable(&bp->napi);
2454 bp->macbgem_ops.mog_init_rings(bp);
2457 /* schedule a link state check */
2458 phy_start(bp->phy_dev);
2460 netif_tx_start_all_queues(dev);
2463 bp->ptp_info->ptp_init(dev);
2468 static int macb_close(struct net_device *dev)
2470 struct macb *bp = netdev_priv(dev);
2471 unsigned long flags;
2473 netif_tx_stop_all_queues(dev);
2474 napi_disable(&bp->napi);
2477 phy_stop(bp->phy_dev);
2479 spin_lock_irqsave(&bp->lock, flags);
2481 netif_carrier_off(dev);
2482 spin_unlock_irqrestore(&bp->lock, flags);
2484 macb_free_consistent(bp);
2487 bp->ptp_info->ptp_remove(dev);
2489 pm_runtime_put(&bp->pdev->dev);
2494 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2496 if (netif_running(dev))
2504 static void gem_update_stats(struct macb *bp)
2507 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2509 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2510 u32 offset = gem_statistics[i].offset;
2511 u64 val = bp->macb_reg_readl(bp, offset);
2513 bp->ethtool_stats[i] += val;
2516 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2517 /* Add GEM_OCTTXH, GEM_OCTRXH */
2518 val = bp->macb_reg_readl(bp, offset + 4);
2519 bp->ethtool_stats[i] += ((u64)val) << 32;
2525 static struct net_device_stats *gem_get_stats(struct macb *bp)
2527 struct gem_stats *hwstat = &bp->hw_stats.gem;
2528 struct net_device_stats *nstat = &bp->dev->stats;
2530 gem_update_stats(bp);
2532 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2533 hwstat->rx_alignment_errors +
2534 hwstat->rx_resource_errors +
2535 hwstat->rx_overruns +
2536 hwstat->rx_oversize_frames +
2537 hwstat->rx_jabbers +
2538 hwstat->rx_undersized_frames +
2539 hwstat->rx_length_field_frame_errors);
2540 nstat->tx_errors = (hwstat->tx_late_collisions +
2541 hwstat->tx_excessive_collisions +
2542 hwstat->tx_underrun +
2543 hwstat->tx_carrier_sense_errors);
2544 nstat->multicast = hwstat->rx_multicast_frames;
2545 nstat->collisions = (hwstat->tx_single_collision_frames +
2546 hwstat->tx_multiple_collision_frames +
2547 hwstat->tx_excessive_collisions);
2548 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2549 hwstat->rx_jabbers +
2550 hwstat->rx_undersized_frames +
2551 hwstat->rx_length_field_frame_errors);
2552 nstat->rx_over_errors = hwstat->rx_resource_errors;
2553 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2554 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2555 nstat->rx_fifo_errors = hwstat->rx_overruns;
2556 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2557 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2558 nstat->tx_fifo_errors = hwstat->tx_underrun;
2563 static void gem_get_ethtool_stats(struct net_device *dev,
2564 struct ethtool_stats *stats, u64 *data)
2568 bp = netdev_priv(dev);
2569 gem_update_stats(bp);
2570 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2573 static int gem_get_sset_count(struct net_device *dev, int sset)
2577 return GEM_STATS_LEN;
2583 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2589 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2590 memcpy(p, gem_statistics[i].stat_string,
2596 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2598 struct macb *bp = netdev_priv(dev);
2599 struct net_device_stats *nstat = &bp->dev->stats;
2600 struct macb_stats *hwstat = &bp->hw_stats.macb;
2602 if (macb_is_gem(bp))
2603 return gem_get_stats(bp);
2605 /* read stats from hardware */
2606 macb_update_stats(bp);
2608 /* Convert HW stats into netdevice stats */
2609 nstat->rx_errors = (hwstat->rx_fcs_errors +
2610 hwstat->rx_align_errors +
2611 hwstat->rx_resource_errors +
2612 hwstat->rx_overruns +
2613 hwstat->rx_oversize_pkts +
2614 hwstat->rx_jabbers +
2615 hwstat->rx_undersize_pkts +
2616 hwstat->rx_length_mismatch);
2617 nstat->tx_errors = (hwstat->tx_late_cols +
2618 hwstat->tx_excessive_cols +
2619 hwstat->tx_underruns +
2620 hwstat->tx_carrier_errors +
2621 hwstat->sqe_test_errors);
2622 nstat->collisions = (hwstat->tx_single_cols +
2623 hwstat->tx_multiple_cols +
2624 hwstat->tx_excessive_cols);
2625 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2626 hwstat->rx_jabbers +
2627 hwstat->rx_undersize_pkts +
2628 hwstat->rx_length_mismatch);
2629 nstat->rx_over_errors = hwstat->rx_resource_errors +
2630 hwstat->rx_overruns;
2631 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2632 nstat->rx_frame_errors = hwstat->rx_align_errors;
2633 nstat->rx_fifo_errors = hwstat->rx_overruns;
2634 /* XXX: What does "missed" mean? */
2635 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2636 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2637 nstat->tx_fifo_errors = hwstat->tx_underruns;
2638 /* Don't know about heartbeat or window errors... */
2643 static int macb_get_regs_len(struct net_device *netdev)
2645 return MACB_GREGS_NBR * sizeof(u32);
2648 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2651 struct macb *bp = netdev_priv(dev);
2652 unsigned int tail, head;
2655 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2656 | MACB_GREGS_VERSION;
2658 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2659 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2661 regs_buff[0] = macb_readl(bp, NCR);
2662 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2663 regs_buff[2] = macb_readl(bp, NSR);
2664 regs_buff[3] = macb_readl(bp, TSR);
2665 regs_buff[4] = macb_readl(bp, RBQP);
2666 regs_buff[5] = macb_readl(bp, TBQP);
2667 regs_buff[6] = macb_readl(bp, RSR);
2668 regs_buff[7] = macb_readl(bp, IMR);
2670 regs_buff[8] = tail;
2671 regs_buff[9] = head;
2672 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2673 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2675 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2676 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2677 if (macb_is_gem(bp))
2678 regs_buff[13] = gem_readl(bp, DMACFG);
2682 static void macb_get_ringparam(struct net_device *netdev,
2683 struct ethtool_ringparam *ring)
2685 struct macb *bp = netdev_priv(netdev);
2687 ring->rx_max_pending = MAX_RX_RING_SIZE;
2688 ring->tx_max_pending = MAX_TX_RING_SIZE;
2690 ring->rx_pending = bp->rx_ring_size;
2691 ring->tx_pending = bp->tx_ring_size;
2694 static int macb_set_ringparam(struct net_device *netdev,
2695 struct ethtool_ringparam *ring)
2697 struct macb *bp = netdev_priv(netdev);
2698 u32 new_rx_size, new_tx_size;
2699 unsigned int reset = 0;
2701 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2704 new_rx_size = clamp_t(u32, ring->rx_pending,
2705 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2706 new_rx_size = roundup_pow_of_two(new_rx_size);
2708 new_tx_size = clamp_t(u32, ring->tx_pending,
2709 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2710 new_tx_size = roundup_pow_of_two(new_tx_size);
2712 if ((new_tx_size == bp->tx_ring_size) &&
2713 (new_rx_size == bp->rx_ring_size)) {
2718 if (netif_running(bp->dev)) {
2720 macb_close(bp->dev);
2723 bp->rx_ring_size = new_rx_size;
2724 bp->tx_ring_size = new_tx_size;
2732 #ifdef CONFIG_MACB_USE_HWSTAMP
2733 static unsigned int gem_get_tsu_rate(struct macb *bp)
2735 struct clk *tsu_clk;
2736 unsigned int tsu_rate;
2738 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2739 if (!IS_ERR(tsu_clk))
2740 tsu_rate = clk_get_rate(tsu_clk);
2741 /* try pclk instead */
2742 else if (!IS_ERR(bp->pclk)) {
2744 tsu_rate = clk_get_rate(tsu_clk);
2750 static s32 gem_get_ptp_max_adj(void)
2755 static int gem_get_ts_info(struct net_device *dev,
2756 struct ethtool_ts_info *info)
2758 struct macb *bp = netdev_priv(dev);
2760 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2761 ethtool_op_get_ts_info(dev, info);
2765 info->so_timestamping =
2766 SOF_TIMESTAMPING_TX_SOFTWARE |
2767 SOF_TIMESTAMPING_RX_SOFTWARE |
2768 SOF_TIMESTAMPING_SOFTWARE |
2769 SOF_TIMESTAMPING_TX_HARDWARE |
2770 SOF_TIMESTAMPING_RX_HARDWARE |
2771 SOF_TIMESTAMPING_RAW_HARDWARE;
2773 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2774 (1 << HWTSTAMP_TX_OFF) |
2775 (1 << HWTSTAMP_TX_ON);
2777 (1 << HWTSTAMP_FILTER_NONE) |
2778 (1 << HWTSTAMP_FILTER_ALL);
2780 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2785 static struct macb_ptp_info gem_ptp_info = {
2786 .ptp_init = gem_ptp_init,
2787 .ptp_remove = gem_ptp_remove,
2788 .get_ptp_max_adj = gem_get_ptp_max_adj,
2789 .get_tsu_rate = gem_get_tsu_rate,
2790 .get_ts_info = gem_get_ts_info,
2791 .get_hwtst = gem_get_hwtst,
2792 .set_hwtst = gem_set_hwtst,
2796 static int macb_get_ts_info(struct net_device *netdev,
2797 struct ethtool_ts_info *info)
2799 struct macb *bp = netdev_priv(netdev);
2802 return bp->ptp_info->get_ts_info(netdev, info);
2804 return ethtool_op_get_ts_info(netdev, info);
2807 static const struct ethtool_ops macb_ethtool_ops = {
2808 .get_regs_len = macb_get_regs_len,
2809 .get_regs = macb_get_regs,
2810 .get_link = ethtool_op_get_link,
2811 .get_ts_info = ethtool_op_get_ts_info,
2812 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2813 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2814 .get_ringparam = macb_get_ringparam,
2815 .set_ringparam = macb_set_ringparam,
2818 static const struct ethtool_ops gem_ethtool_ops = {
2819 .get_regs_len = macb_get_regs_len,
2820 .get_regs = macb_get_regs,
2821 .get_link = ethtool_op_get_link,
2822 .get_ts_info = macb_get_ts_info,
2823 .get_ethtool_stats = gem_get_ethtool_stats,
2824 .get_strings = gem_get_ethtool_strings,
2825 .get_sset_count = gem_get_sset_count,
2826 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2827 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2828 .get_ringparam = macb_get_ringparam,
2829 .set_ringparam = macb_set_ringparam,
2832 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2834 struct phy_device *phydev = dev->phydev;
2835 struct macb *bp = netdev_priv(dev);
2837 if (!netif_running(dev))
2844 return phy_mii_ioctl(phydev, rq, cmd);
2848 return bp->ptp_info->set_hwtst(dev, rq, cmd);
2850 return bp->ptp_info->get_hwtst(dev, rq);
2852 return phy_mii_ioctl(phydev, rq, cmd);
2856 static int macb_set_features(struct net_device *netdev,
2857 netdev_features_t features)
2859 struct macb *bp = netdev_priv(netdev);
2860 netdev_features_t changed = features ^ netdev->features;
2862 /* TX checksum offload */
2863 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2866 dmacfg = gem_readl(bp, DMACFG);
2867 if (features & NETIF_F_HW_CSUM)
2868 dmacfg |= GEM_BIT(TXCOEN);
2870 dmacfg &= ~GEM_BIT(TXCOEN);
2871 gem_writel(bp, DMACFG, dmacfg);
2874 /* RX checksum offload */
2875 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2878 netcfg = gem_readl(bp, NCFGR);
2879 if (features & NETIF_F_RXCSUM &&
2880 !(netdev->flags & IFF_PROMISC))
2881 netcfg |= GEM_BIT(RXCOEN);
2883 netcfg &= ~GEM_BIT(RXCOEN);
2884 gem_writel(bp, NCFGR, netcfg);
2890 static const struct net_device_ops macb_netdev_ops = {
2891 .ndo_open = macb_open,
2892 .ndo_stop = macb_close,
2893 .ndo_start_xmit = macb_start_xmit,
2894 .ndo_set_rx_mode = macb_set_rx_mode,
2895 .ndo_get_stats = macb_get_stats,
2896 .ndo_do_ioctl = macb_ioctl,
2897 .ndo_validate_addr = eth_validate_addr,
2898 .ndo_change_mtu = macb_change_mtu,
2899 .ndo_set_mac_address = eth_mac_addr,
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 .ndo_poll_controller = macb_poll_controller,
2903 .ndo_set_features = macb_set_features,
2904 .ndo_features_check = macb_features_check,
2907 /* Configure peripheral capabilities according to device tree
2908 * and integration options used
2910 static void macb_configure_caps(struct macb *bp,
2911 const struct macb_config *dt_conf)
2917 bp->caps = dt_conf->caps;
2919 /* By default we set to partial store and forward mode for zynqmp.
2920 * Disable if not set in devicetree.
2922 if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
2923 retval = of_property_read_u16(bp->pdev->dev.of_node,
2927 /* Disable partial store and forward in case of error or
2928 * invalid watermark value
2930 if (retval || bp->rx_watermark > 0xFFF) {
2931 dev_info(&bp->pdev->dev,
2932 "Not enabling partial store and forward\n");
2933 bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
2937 if (hw_is_gem(bp->regs, bp->native_io)) {
2938 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2940 dcfg = gem_readl(bp, DCFG1);
2941 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2942 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2943 dcfg = gem_readl(bp, DCFG2);
2944 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2945 bp->caps |= MACB_CAPS_FIFO_MODE;
2946 #ifdef CONFIG_MACB_USE_HWSTAMP
2947 if (gem_has_ptp(bp)) {
2948 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
2949 pr_err("GEM doesn't support hardware ptp.\n");
2951 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
2952 bp->ptp_info = &gem_ptp_info;
2958 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2961 #if defined(CONFIG_OF)
2962 static void macb_reset_phy(struct platform_device *pdev)
2964 int err, phy_reset, msec = 1;
2966 struct device_node *np = pdev->dev.of_node;
2971 of_property_read_u32(np, "phy-reset-duration", &msec);
2972 active_low = of_property_read_bool(np, "phy-reset-active-low");
2974 phy_reset = of_get_named_gpio(np, "phy-reset-gpio", 0);
2975 if (!gpio_is_valid(phy_reset))
2978 err = devm_gpio_request_one(&pdev->dev, phy_reset,
2979 active_low ? GPIOF_OUT_INIT_LOW :
2980 GPIOF_OUT_INIT_HIGH, "phy-reset");
2982 dev_err(&pdev->dev, "failed to get phy-reset-gpio: %d\n", err);
2986 gpio_set_value(phy_reset, active_low);
2988 #else /* CONFIG_OF */
2989 static void macb_reset_phy(struct platform_device *pdev)
2992 #endif /* CONFIG_OF */
2994 static void macb_probe_queues(void __iomem *mem,
2996 unsigned int *queue_mask,
2997 unsigned int *num_queues)
3004 /* is it macb or gem ?
3006 * We need to read directly from the hardware here because
3007 * we are early in the probe process and don't have the
3008 * MACB_CAPS_MACB_IS_GEM flag positioned
3010 if (!hw_is_gem(mem, native_io))
3013 /* bit 0 is never set but queue 0 always exists */
3014 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3018 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3019 if (*queue_mask & (1 << hw_q))
3023 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3024 struct clk **hclk, struct clk **tx_clk,
3025 struct clk **rx_clk, struct clk **tsu_clk)
3029 *pclk = devm_clk_get(&pdev->dev, "pclk");
3030 if (IS_ERR(*pclk)) {
3031 err = PTR_ERR(*pclk);
3032 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3036 *hclk = devm_clk_get(&pdev->dev, "hclk");
3037 if (IS_ERR(*hclk)) {
3038 err = PTR_ERR(*hclk);
3039 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3043 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3044 if (IS_ERR(*tx_clk))
3047 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3048 if (IS_ERR(*rx_clk))
3051 *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
3052 if (IS_ERR(*tsu_clk))
3055 err = clk_prepare_enable(*pclk);
3057 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3061 err = clk_prepare_enable(*hclk);
3063 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3064 goto err_disable_pclk;
3067 err = clk_prepare_enable(*tx_clk);
3069 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3070 goto err_disable_hclk;
3073 err = clk_prepare_enable(*rx_clk);
3075 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3076 goto err_disable_txclk;
3079 err = clk_prepare_enable(*tsu_clk);
3081 dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err);
3082 goto err_disable_rxclk;
3088 clk_disable_unprepare(*rx_clk);
3091 clk_disable_unprepare(*tx_clk);
3094 clk_disable_unprepare(*hclk);
3097 clk_disable_unprepare(*pclk);
3102 static int macb_init(struct platform_device *pdev)
3104 struct net_device *dev = platform_get_drvdata(pdev);
3105 unsigned int hw_q, q;
3106 struct macb *bp = netdev_priv(dev);
3107 struct macb_queue *queue;
3111 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3112 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3114 /* set the queue register mapping once for all: queue0 has a special
3115 * register mapping but we don't want to test the queue index then
3116 * compute the corresponding register offset at run time.
3118 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3119 if (!(bp->queue_mask & (1 << hw_q)))
3122 queue = &bp->queues[q];
3125 queue->ISR = GEM_ISR(hw_q - 1);
3126 queue->IER = GEM_IER(hw_q - 1);
3127 queue->IDR = GEM_IDR(hw_q - 1);
3128 queue->IMR = GEM_IMR(hw_q - 1);
3129 queue->TBQP = GEM_TBQP(hw_q - 1);
3130 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3131 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
3132 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3134 queue->RBQP = GEM_RBQP(hw_q - 1);
3136 /* queue0 uses legacy registers */
3137 queue->ISR = MACB_ISR;
3138 queue->IER = MACB_IER;
3139 queue->IDR = MACB_IDR;
3140 queue->IMR = MACB_IMR;
3141 queue->TBQP = MACB_TBQP;
3142 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3143 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
3144 queue->TBQPH = MACB_TBQPH;
3146 queue->RBQP = MACB_RBQP;
3149 /* get irq: here we use the linux queue index, not the hardware
3150 * queue index. the queue irq definitions in the device tree
3151 * must remove the optional gaps that could exist in the
3152 * hardware queue mask.
3154 queue->irq = platform_get_irq(pdev, q);
3155 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3156 IRQF_SHARED, dev->name, queue);
3159 "Unable to request IRQ %d (error %d)\n",
3164 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3168 dev->netdev_ops = &macb_netdev_ops;
3169 netif_napi_add(dev, &bp->napi, macb_poll, 64);
3171 /* setup appropriated routines according to adapter type */
3172 if (macb_is_gem(bp)) {
3173 bp->max_tx_length = GEM_MAX_TX_LEN;
3174 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3175 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3176 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3177 bp->macbgem_ops.mog_rx = gem_rx;
3178 dev->ethtool_ops = &gem_ethtool_ops;
3180 bp->max_tx_length = MACB_MAX_TX_LEN;
3181 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3182 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3183 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3184 bp->macbgem_ops.mog_rx = macb_rx;
3185 dev->ethtool_ops = &macb_ethtool_ops;
3189 dev->hw_features = NETIF_F_SG;
3191 /* Check LSO capability */
3192 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3193 dev->hw_features |= MACB_NETIF_LSO;
3195 /* Checksum offload is only available on gem with packet buffer */
3196 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3197 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3198 if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
3199 dev->hw_features &= ~NETIF_F_RXCSUM;
3200 if (bp->caps & MACB_CAPS_SG_DISABLED)
3201 dev->hw_features &= ~NETIF_F_SG;
3202 dev->features = dev->hw_features;
3204 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3206 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3207 val = GEM_BIT(RGMII);
3208 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3209 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3210 val = MACB_BIT(RMII);
3211 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3212 val = MACB_BIT(MII);
3214 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3215 val |= MACB_BIT(CLKEN);
3217 macb_or_gem_writel(bp, USRIO, val);
3220 /* Set MII management clock divider */
3221 val = macb_mdc_clk_div(bp);
3222 val |= macb_dbw(bp);
3223 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3224 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3225 macb_writel(bp, NCFGR, val);
3227 if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
3228 (bp->caps & MACB_CAPS_PCS))
3229 gem_writel(bp, PCSCNTRL,
3230 gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
3235 #if defined(CONFIG_OF)
3236 /* 1518 rounded up */
3237 #define AT91ETHER_MAX_RBUFF_SZ 0x600
3238 /* max number of receive buffers */
3239 #define AT91ETHER_MAX_RX_DESCR 9
3241 /* Initialize and start the Receiver and Transmit subsystems */
3242 static int at91ether_start(struct net_device *dev)
3244 struct macb *lp = netdev_priv(dev);
3245 struct macb_dma_desc *desc;
3250 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3251 (AT91ETHER_MAX_RX_DESCR *
3252 macb_dma_desc_get_size(lp)),
3253 &lp->rx_ring_dma, GFP_KERNEL);
3257 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3258 AT91ETHER_MAX_RX_DESCR *
3259 AT91ETHER_MAX_RBUFF_SZ,
3260 &lp->rx_buffers_dma, GFP_KERNEL);
3261 if (!lp->rx_buffers) {
3262 dma_free_coherent(&lp->pdev->dev,
3263 AT91ETHER_MAX_RX_DESCR *
3264 macb_dma_desc_get_size(lp),
3265 lp->rx_ring, lp->rx_ring_dma);
3270 addr = lp->rx_buffers_dma;
3271 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3272 desc = macb_rx_desc(lp, i);
3273 macb_set_addr(lp, desc, addr);
3275 addr += AT91ETHER_MAX_RBUFF_SZ;
3278 /* Set the Wrap bit on the last descriptor */
3279 desc->addr |= MACB_BIT(RX_WRAP);
3281 /* Reset buffer index */
3284 /* Program address of descriptor list in Rx Buffer Queue register */
3285 macb_writel(lp, RBQP, lp->rx_ring_dma);
3287 /* Enable Receive and Transmit */
3288 ctl = macb_readl(lp, NCR);
3289 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3294 /* Open the ethernet interface */
3295 static int at91ether_open(struct net_device *dev)
3297 struct macb *lp = netdev_priv(dev);
3301 /* Clear internal statistics */
3302 ctl = macb_readl(lp, NCR);
3303 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3305 macb_set_hwaddr(lp);
3307 ret = at91ether_start(dev);
3311 /* Enable MAC interrupts */
3312 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3314 MACB_BIT(ISR_TUND) |
3317 MACB_BIT(ISR_ROVR) |
3320 /* schedule a link state check */
3321 phy_start(lp->phy_dev);
3323 netif_start_queue(dev);
3328 /* Close the interface */
3329 static int at91ether_close(struct net_device *dev)
3331 struct macb *lp = netdev_priv(dev);
3334 /* Disable Receiver and Transmitter */
3335 ctl = macb_readl(lp, NCR);
3336 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3338 /* Disable MAC interrupts */
3339 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3341 MACB_BIT(ISR_TUND) |
3344 MACB_BIT(ISR_ROVR) |
3347 netif_stop_queue(dev);
3349 dma_free_coherent(&lp->pdev->dev,
3350 AT91ETHER_MAX_RX_DESCR *
3351 macb_dma_desc_get_size(lp),
3352 lp->rx_ring, lp->rx_ring_dma);
3355 dma_free_coherent(&lp->pdev->dev,
3356 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3357 lp->rx_buffers, lp->rx_buffers_dma);
3358 lp->rx_buffers = NULL;
3363 /* Transmit packet */
3364 static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3366 struct macb *lp = netdev_priv(dev);
3368 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3369 netif_stop_queue(dev);
3371 /* Store packet information (to free when Tx completed) */
3373 lp->skb_length = skb->len;
3374 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3376 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3377 dev_kfree_skb_any(skb);
3378 dev->stats.tx_dropped++;
3379 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3380 return NETDEV_TX_OK;
3383 /* Set address of the data in the Transmit Address register */
3384 macb_writel(lp, TAR, lp->skb_physaddr);
3385 /* Set length of the packet in the Transmit Control register */
3386 macb_writel(lp, TCR, skb->len);
3389 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3390 return NETDEV_TX_BUSY;
3393 return NETDEV_TX_OK;
3396 /* Extract received frame from buffer descriptors and sent to upper layers.
3397 * (Called from interrupt context)
3399 static void at91ether_rx(struct net_device *dev)
3401 struct macb *lp = netdev_priv(dev);
3402 struct macb_dma_desc *desc;
3403 unsigned char *p_recv;
3404 struct sk_buff *skb;
3405 unsigned int pktlen;
3407 desc = macb_rx_desc(lp, lp->rx_tail);
3408 while (desc->addr & MACB_BIT(RX_USED)) {
3409 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3410 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3411 skb = netdev_alloc_skb(dev, pktlen + 2);
3413 skb_reserve(skb, 2);
3414 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
3416 skb->protocol = eth_type_trans(skb, dev);
3417 dev->stats.rx_packets++;
3418 dev->stats.rx_bytes += pktlen;
3421 dev->stats.rx_dropped++;
3424 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3425 dev->stats.multicast++;
3427 /* reset ownership bit */
3428 desc->addr &= ~MACB_BIT(RX_USED);
3430 /* wrap after last buffer */
3431 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3436 desc = macb_rx_desc(lp, lp->rx_tail);
3440 /* MAC interrupt handler */
3441 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3443 struct net_device *dev = dev_id;
3444 struct macb *lp = netdev_priv(dev);
3447 /* MAC Interrupt Status register indicates what interrupts are pending.
3448 * It is automatically cleared once read.
3450 intstatus = macb_readl(lp, ISR);
3452 /* Receive complete */
3453 if (intstatus & MACB_BIT(RCOMP))
3456 /* Transmit complete */
3457 if (intstatus & MACB_BIT(TCOMP)) {
3458 /* The TCOM bit is set even if the transmission failed */
3459 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3460 dev->stats.tx_errors++;
3463 dev_kfree_skb_irq(lp->skb);
3465 dma_unmap_single(NULL, lp->skb_physaddr,
3466 lp->skb_length, DMA_TO_DEVICE);
3467 dev->stats.tx_packets++;
3468 dev->stats.tx_bytes += lp->skb_length;
3470 netif_wake_queue(dev);
3473 /* Work-around for EMAC Errata section 41.3.1 */
3474 if (intstatus & MACB_BIT(RXUBR)) {
3475 ctl = macb_readl(lp, NCR);
3476 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3478 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3481 if (intstatus & MACB_BIT(ISR_ROVR))
3482 netdev_err(dev, "ROVR error\n");
3487 #ifdef CONFIG_NET_POLL_CONTROLLER
3488 static void at91ether_poll_controller(struct net_device *dev)
3490 unsigned long flags;
3492 local_irq_save(flags);
3493 at91ether_interrupt(dev->irq, dev);
3494 local_irq_restore(flags);
3498 static const struct net_device_ops at91ether_netdev_ops = {
3499 .ndo_open = at91ether_open,
3500 .ndo_stop = at91ether_close,
3501 .ndo_start_xmit = at91ether_start_xmit,
3502 .ndo_get_stats = macb_get_stats,
3503 .ndo_set_rx_mode = macb_set_rx_mode,
3504 .ndo_set_mac_address = eth_mac_addr,
3505 .ndo_do_ioctl = macb_ioctl,
3506 .ndo_validate_addr = eth_validate_addr,
3507 .ndo_change_mtu = eth_change_mtu,
3508 #ifdef CONFIG_NET_POLL_CONTROLLER
3509 .ndo_poll_controller = at91ether_poll_controller,
3513 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3514 struct clk **hclk, struct clk **tx_clk,
3515 struct clk **rx_clk, struct clk **tsu_clk)
3524 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3526 return PTR_ERR(*pclk);
3528 err = clk_prepare_enable(*pclk);
3530 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3537 static int at91ether_init(struct platform_device *pdev)
3539 struct net_device *dev = platform_get_drvdata(pdev);
3540 struct macb *bp = netdev_priv(dev);
3544 dev->netdev_ops = &at91ether_netdev_ops;
3545 dev->ethtool_ops = &macb_ethtool_ops;
3547 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3552 macb_writel(bp, NCR, 0);
3554 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3555 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3556 reg |= MACB_BIT(RM9200_RMII);
3558 macb_writel(bp, NCFGR, reg);
3563 static const struct macb_config at91sam9260_config = {
3564 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3565 .clk_init = macb_clk_init,
3569 static const struct macb_config pc302gem_config = {
3570 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3571 .dma_burst_length = 16,
3572 .clk_init = macb_clk_init,
3576 static const struct macb_config sama5d2_config = {
3577 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3578 .dma_burst_length = 16,
3579 .clk_init = macb_clk_init,
3583 static const struct macb_config sama5d3_config = {
3584 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3585 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3586 .dma_burst_length = 16,
3587 .clk_init = macb_clk_init,
3591 static const struct macb_config sama5d4_config = {
3592 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3593 .dma_burst_length = 4,
3594 .clk_init = macb_clk_init,
3598 static const struct macb_config emac_config = {
3599 .clk_init = at91ether_clk_init,
3600 .init = at91ether_init,
3603 static const struct macb_config np4_config = {
3604 .caps = MACB_CAPS_USRIO_DISABLED,
3605 .clk_init = macb_clk_init,
3609 static const struct macb_config zynqmp_config = {
3610 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
3611 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_PCS |
3612 MACB_CAPS_PARTIAL_STORE_FORWARD | MACB_CAPS_WOL,
3613 .dma_burst_length = 16,
3614 .clk_init = macb_clk_init,
3616 .jumbo_max_len = 10240,
3619 static const struct macb_config zynq_config = {
3620 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3621 .dma_burst_length = 16,
3622 .clk_init = macb_clk_init,
3626 static const struct of_device_id macb_dt_ids[] = {
3627 { .compatible = "cdns,at32ap7000-macb" },
3628 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3629 { .compatible = "cdns,macb" },
3630 { .compatible = "cdns,np4-macb", .data = &np4_config },
3631 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3632 { .compatible = "cdns,gem", .data = &pc302gem_config },
3633 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3634 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3635 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3636 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3637 { .compatible = "cdns,emac", .data = &emac_config },
3638 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3639 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
3642 MODULE_DEVICE_TABLE(of, macb_dt_ids);
3643 #endif /* CONFIG_OF */
3645 static int macb_probe(struct platform_device *pdev)
3647 int (*clk_init)(struct platform_device *, struct clk **,
3648 struct clk **, struct clk **, struct clk **,
3649 struct clk **) = macb_clk_init;
3650 int (*init)(struct platform_device *) = macb_init;
3651 struct device_node *np = pdev->dev.of_node;
3652 struct device_node *phy_node;
3653 const struct macb_config *macb_config = NULL;
3654 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3655 struct clk *tsu_clk = NULL;
3656 unsigned int queue_mask, num_queues;
3657 struct macb_platform_data *pdata;
3659 struct phy_device *phydev;
3660 struct net_device *dev;
3661 struct resource *regs;
3667 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3668 mem = devm_ioremap_resource(&pdev->dev, regs);
3670 return PTR_ERR(mem);
3673 const struct of_device_id *match;
3675 match = of_match_node(macb_dt_ids, np);
3676 if (match && match->data) {
3677 macb_config = match->data;
3678 clk_init = macb_config->clk_init;
3679 init = macb_config->init;
3683 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
3687 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
3688 pm_runtime_use_autosuspend(&pdev->dev);
3689 pm_runtime_get_noresume(&pdev->dev);
3690 pm_runtime_set_active(&pdev->dev);
3691 pm_runtime_enable(&pdev->dev);
3692 native_io = hw_is_native_io(mem);
3694 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3695 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3698 goto err_disable_clocks;
3701 dev->base_addr = regs->start;
3703 SET_NETDEV_DEV(dev, &pdev->dev);
3705 bp = netdev_priv(dev);
3709 bp->native_io = native_io;
3711 bp->macb_reg_readl = hw_readl_native;
3712 bp->macb_reg_writel = hw_writel_native;
3714 bp->macb_reg_readl = hw_readl;
3715 bp->macb_reg_writel = hw_writel;
3717 bp->num_queues = num_queues;
3718 bp->queue_mask = queue_mask;
3720 bp->dma_burst_length = macb_config->dma_burst_length;
3723 bp->tx_clk = tx_clk;
3724 bp->rx_clk = rx_clk;
3725 bp->tsu_clk = tsu_clk;
3727 bp->tsu_rate = clk_get_rate(tsu_clk);
3730 bp->jumbo_max_len = macb_config->jumbo_max_len;
3732 spin_lock_init(&bp->lock);
3734 /* setup capabilities */
3735 macb_configure_caps(bp, macb_config);
3737 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3738 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3739 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3740 bp->hw_dma_cap |= HW_DMA_CAP_64B;
3743 platform_set_drvdata(pdev, dev);
3745 dev->irq = platform_get_irq(pdev, 0);
3748 goto err_out_free_netdev;
3751 /* MTU range: 68 - 1500 or 10240 */
3752 dev->min_mtu = GEM_MTU_MIN_SIZE;
3753 if (bp->caps & MACB_CAPS_JUMBO)
3754 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3756 dev->max_mtu = ETH_DATA_LEN;
3758 mac = of_get_mac_address(np);
3760 ether_addr_copy(bp->dev->dev_addr, mac);
3762 macb_get_hwaddr(bp);
3764 /* Power up the PHY if there is a GPIO reset */
3765 phy_node = of_parse_phandle(np, "phy-handle", 0);
3766 if (!phy_node && of_phy_is_fixed_link(np)) {
3767 err = of_phy_register_fixed_link(np);
3769 dev_err(&pdev->dev, "broken fixed-link specification");
3772 phy_node = of_node_get(np);
3773 bp->phy_node = phy_node;
3775 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3776 if (gpio_is_valid(gpio)) {
3777 bp->reset_gpio = gpio_to_desc(gpio);
3778 gpiod_direction_output(bp->reset_gpio, 1);
3782 err = of_get_phy_mode(np);
3784 pdata = dev_get_platdata(&pdev->dev);
3785 if (pdata && pdata->is_rmii)
3786 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3788 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3790 bp->phy_interface = err;
3793 macb_reset_phy(pdev);
3795 /* IP specific init */
3798 goto err_out_free_netdev;
3800 err = register_netdev(dev);
3802 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3803 goto err_out_unregister_netdev;
3806 err = macb_mii_init(bp);
3808 goto err_out_unregister_netdev;
3810 netif_carrier_off(dev);
3812 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
3815 if (bp->caps & MACB_CAPS_WOL)
3816 device_set_wakeup_capable(&bp->dev->dev, 1);
3818 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3819 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3820 dev->base_addr, dev->irq, dev->dev_addr);
3822 phydev = bp->phy_dev;
3823 phy_attached_info(phydev);
3824 pm_runtime_mark_last_busy(&bp->pdev->dev);
3825 pm_runtime_put_autosuspend(&bp->pdev->dev);
3829 err_out_unregister_netdev:
3830 unregister_netdev(dev);
3832 err_out_free_netdev:
3836 of_node_put(phy_node);
3839 clk_disable_unprepare(tx_clk);
3840 clk_disable_unprepare(hclk);
3841 clk_disable_unprepare(pclk);
3842 clk_disable_unprepare(rx_clk);
3843 clk_disable_unprepare(tsu_clk);
3844 pm_runtime_disable(&pdev->dev);
3845 pm_runtime_set_suspended(&pdev->dev);
3846 pm_runtime_dont_use_autosuspend(&pdev->dev);
3851 static int macb_remove(struct platform_device *pdev)
3853 struct net_device *dev;
3856 dev = platform_get_drvdata(pdev);
3859 bp = netdev_priv(dev);
3861 phy_disconnect(bp->phy_dev);
3862 mdiobus_unregister(bp->mii_bus);
3864 mdiobus_free(bp->mii_bus);
3866 /* Shutdown the PHY if there is a GPIO reset */
3868 gpiod_set_value(bp->reset_gpio, 0);
3870 unregister_netdev(dev);
3871 pm_runtime_disable(&pdev->dev);
3872 pm_runtime_dont_use_autosuspend(&pdev->dev);
3873 if (!pm_runtime_suspended(&pdev->dev)) {
3874 clk_disable_unprepare(bp->tx_clk);
3875 clk_disable_unprepare(bp->hclk);
3876 clk_disable_unprepare(bp->pclk);
3877 clk_disable_unprepare(bp->rx_clk);
3878 clk_disable_unprepare(bp->tsu_clk);
3879 pm_runtime_set_suspended(&pdev->dev);
3881 of_node_put(bp->phy_node);
3888 static int __maybe_unused macb_suspend(struct device *dev)
3890 struct platform_device *pdev = to_platform_device(dev);
3891 struct net_device *netdev = platform_get_drvdata(pdev);
3892 struct macb *bp = netdev_priv(netdev);
3893 struct macb_queue *queue = bp->queues;
3894 unsigned long flags;
3896 u32 ctrl, arpipmask;
3898 if (!netif_running(netdev))
3901 if (device_may_wakeup(&bp->dev->dev)) {
3902 spin_lock_irqsave(&bp->lock, flags);
3903 ctrl = macb_readl(bp, NCR);
3904 ctrl &= ~(MACB_BIT(TE) | MACB_BIT(RE));
3905 macb_writel(bp, NCR, ctrl);
3906 /* Tie off RXQ0 as well */
3907 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_tieoff_dma));
3908 ctrl = macb_readl(bp, NCR);
3909 ctrl |= MACB_BIT(RE);
3910 macb_writel(bp, NCR, ctrl);
3911 gem_writel(bp, NCFGR, gem_readl(bp, NCFGR) & ~MACB_BIT(NBC));
3912 macb_writel(bp, TSR, -1);
3913 macb_writel(bp, RSR, -1);
3914 macb_readl(bp, ISR);
3915 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
3916 macb_writel(bp, ISR, -1);
3918 /* Enable WOL (Q0 only) and disable all other interrupts */
3919 macb_writel(bp, IER, MACB_BIT(WOL));
3920 for (q = 1, queue = bp->queues; q < bp->num_queues;
3922 queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
3927 arpipmask = cpu_to_be32p(&bp->dev->ip_ptr->ifa_list->ifa_local)
3929 gem_writel(bp, WOL, MACB_BIT(ARP) | arpipmask);
3930 spin_unlock_irqrestore(&bp->lock, flags);
3931 enable_irq_wake(bp->queues[0].irq);
3932 netif_device_detach(netdev);
3933 napi_disable(&bp->napi);
3935 netif_device_detach(netdev);
3936 napi_disable(&bp->napi);
3937 phy_stop(bp->phy_dev);
3938 phy_suspend(bp->phy_dev);
3939 spin_lock_irqsave(&bp->lock, flags);
3941 spin_unlock_irqrestore(&bp->lock, flags);
3944 netif_carrier_off(netdev);
3946 bp->ptp_info->ptp_remove(netdev);
3947 pm_runtime_force_suspend(dev);
3952 static int __maybe_unused macb_resume(struct device *dev)
3954 struct platform_device *pdev = to_platform_device(dev);
3955 struct net_device *netdev = platform_get_drvdata(pdev);
3956 struct macb *bp = netdev_priv(netdev);
3957 unsigned long flags;
3959 if (!netif_running(netdev))
3962 pm_runtime_force_resume(dev);
3964 if (device_may_wakeup(&bp->dev->dev)) {
3965 spin_lock_irqsave(&bp->lock, flags);
3966 macb_writel(bp, IDR, MACB_BIT(WOL));
3967 gem_writel(bp, WOL, 0);
3968 /* Clear Q0 ISR as WOL was enabled on Q0 */
3969 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
3970 macb_writel(bp, ISR, -1);
3971 disable_irq_wake(bp->queues[0].irq);
3972 spin_unlock_irqrestore(&bp->lock, flags);
3973 macb_writel(bp, NCR, MACB_BIT(MPE));
3974 napi_enable(&bp->napi);
3975 netif_carrier_on(netdev);
3977 macb_writel(bp, NCR, MACB_BIT(MPE));
3978 napi_enable(&bp->napi);
3979 netif_carrier_on(netdev);
3980 phy_resume(bp->phy_dev);
3981 phy_start(bp->phy_dev);
3984 bp->macbgem_ops.mog_init_rings(bp);
3986 macb_set_rx_mode(netdev);
3987 netif_device_attach(netdev);
3989 bp->ptp_info->ptp_init(netdev);
3994 static int __maybe_unused macb_runtime_suspend(struct device *dev)
3996 struct platform_device *pdev = to_platform_device(dev);
3997 struct net_device *netdev = platform_get_drvdata(pdev);
3998 struct macb *bp = netdev_priv(netdev);
4000 if (!(device_may_wakeup(&bp->dev->dev))) {
4001 clk_disable_unprepare(bp->tx_clk);
4002 clk_disable_unprepare(bp->hclk);
4003 clk_disable_unprepare(bp->pclk);
4004 clk_disable_unprepare(bp->rx_clk);
4006 clk_disable_unprepare(bp->tsu_clk);
4011 static int __maybe_unused macb_runtime_resume(struct device *dev)
4013 struct platform_device *pdev = to_platform_device(dev);
4014 struct net_device *netdev = platform_get_drvdata(pdev);
4015 struct macb *bp = netdev_priv(netdev);
4017 if (!(device_may_wakeup(&bp->dev->dev))) {
4018 clk_prepare_enable(bp->pclk);
4019 clk_prepare_enable(bp->hclk);
4020 clk_prepare_enable(bp->tx_clk);
4021 clk_prepare_enable(bp->rx_clk);
4023 clk_prepare_enable(bp->tsu_clk);
4028 static const struct dev_pm_ops macb_pm_ops = {
4029 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4030 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4033 static struct platform_driver macb_driver = {
4034 .probe = macb_probe,
4035 .remove = macb_remove,
4038 .of_match_table = of_match_ptr(macb_dt_ids),
4043 module_platform_driver(macb_driver);
4045 MODULE_LICENSE("GPL");
4046 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4047 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4048 MODULE_ALIAS("platform:macb");