]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
net: macb: Tie-off unused RX queues
authorEdgar E. Iglesias <edgar.iglesias@xilinx.com>
Sat, 12 Mar 2016 04:45:13 +0000 (10:15 +0530)
committerMichal Simek <monstr@monstr.eu>
Tue, 3 Oct 2017 15:26:09 +0000 (17:26 +0200)
Currently, we only use the first receive queue and leave the
remaining DMA descriptor pointers pointing at 0.

Disable unused queues by connecting them to a looped descriptor
chain without free slots.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Acked-by: Harini Katakam <harinik@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h

index 92f93667027a4751890832a544e399bebc61ac89..a86c5c579dd770a9c9c384fe646de240527fc472 100644 (file)
@@ -1584,6 +1584,12 @@ static void macb_free_consistent(struct macb *bp)
                bp->rx_ring = NULL;
        }
 
+       if (bp->rx_ring_tieoff) {
+               dma_free_coherent(&bp->pdev->dev, sizeof(bp->rx_ring_tieoff[0]),
+                                 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+               bp->rx_ring_tieoff = NULL;
+       }
+
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                kfree(queue->tx_skb);
                queue->tx_skb = NULL;
@@ -1655,6 +1661,19 @@ static int macb_alloc_consistent(struct macb *bp)
                                         &bp->rx_ring_dma, GFP_KERNEL);
        if (!bp->rx_ring)
                goto out_err;
+
+       /* If we have more than one queue, allocate a tie off descriptor
+        * that will be used to disable unused RX queues.
+        */
+       if (bp->num_queues > 1) {
+               bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+                                               sizeof(bp->rx_ring_tieoff[0]),
+                                               &bp->rx_ring_tieoff_dma,
+                                               GFP_KERNEL);
+               if (!bp->rx_ring_tieoff)
+                       goto out_err;
+       }
+
        netdev_dbg(bp->dev,
                   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
                   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
@@ -1669,6 +1688,19 @@ out_err:
        return -ENOMEM;
 }
 
+static void macb_init_tieoff(struct macb *bp)
+{
+       struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+       if (bp->num_queues > 1) {
+               /* Setup a wrapping descriptor with no free slots
+                * (WRAP and USED) to tie off/disable unused RX queues.
+                */
+               d->addr = MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED);
+               d->ctrl = 0;
+       }
+}
+
 static void gem_init_rings(struct macb *bp)
 {
        struct macb_queue *queue;
@@ -1689,6 +1721,7 @@ static void gem_init_rings(struct macb *bp)
        bp->rx_prepared_head = 0;
 
        gem_rx_refill(bp);
+       macb_init_tieoff(bp);
 }
 
 static void macb_init_rings(struct macb *bp)
@@ -1706,6 +1739,7 @@ static void macb_init_rings(struct macb *bp)
        bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
 
        bp->rx_tail = 0;
+       macb_init_tieoff(bp);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -2088,6 +2122,13 @@ static void macb_init_hw(struct macb *bp)
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
                queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
 #endif
+               /* We only use the first queue at the moment. Remaining
+                * queues must be tied-off before we enable the receiver.
+                *
+                * See the documentation for receive_q1_ptr for more info.
+                */
+               if (q)
+                       queue_writel(queue, RBQP, bp->rx_ring_tieoff_dma);
 
                /* Enable interrupts */
                queue_writel(queue, IER,
index 12e360f1c20d95c1940becc9292dce61aa495115..4e0ea27b2697afd07d4baaf83016b039b80c8c67 100644 (file)
@@ -872,6 +872,7 @@ struct macb {
        unsigned int            rx_tail;
        unsigned int            rx_prepared_head;
        struct macb_dma_desc    *rx_ring;
+       struct macb_dma_desc    *rx_ring_tieoff;
        struct sk_buff          **rx_skbuff;
        void                    *rx_buffers;
        size_t                  rx_buffer_size;
@@ -895,6 +896,7 @@ struct macb {
        }                       hw_stats;
 
        dma_addr_t              rx_ring_dma;
+       dma_addr_t              rx_ring_tieoff_dma;
        dma_addr_t              rx_buffers_dma;
 
        struct macb_or_gem_ops  macbgem_ops;