bp->rx_ring = NULL;
}
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, sizeof(bp->rx_ring_tieoff[0]),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
+
+ /* If we have more than one queue, allocate a tie off descriptor
+ * that will be used to disable unused RX queues.
+ */
+ if (bp->num_queues > 1) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ sizeof(bp->rx_ring_tieoff[0]),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+ if (bp->num_queues > 1) {
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ d->addr = MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED);
+ d->ctrl = 0;
+ }
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
bp->rx_prepared_head = 0;
gem_rx_refill(bp);
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
bp->rx_tail = 0;
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
#endif
+ /* We only use the first queue at the moment. Remaining
+ * queues must be tied-off before we enable the receiver.
+ *
+ * See the documentation for receive_q1_ptr for more info.
+ */
+ if (q)
+ queue_writel(queue, RBQP, bp->rx_ring_tieoff_dma);
/* Enable interrupts */
queue_writel(queue, IER,