#define XXVENET_TS_HEADER_LEN 4
#define NS_PER_SEC 1000000000ULL /* Nanoseconds per second */
+#define XAE_NUM_QUEUES(lp) ((lp)->num_queues)
+#define for_each_dma_queue(lp, var) \
+ for ((var) = 0; (var) < XAE_NUM_QUEUES(lp); (var)++)
+
/* Option table for setting up Axi Ethernet hardware options */
static struct axienet_option axienet_options[] = {
/* Turn on jumbo packet support for both Rx and Tx */
/**
* axienet_dma_in32 - Memory mapped Axi DMA register read
- * @lp: Pointer to axienet local structure
+ * @q: Pointer to DMA queue structure
* @reg: Address offset from the base address of the Axi DMA core
*
* Return: The contents of the Axi DMA register
*
* This function returns the contents of the corresponding Axi DMA register.
*/
-static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
+static inline u32 axienet_dma_in32(struct axienet_dma_q *q, off_t reg)
{
- return in_be32(lp->dma_regs + reg);
+ return in_be32(q->dma_regs + reg);
}
/**
* axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
+ * @q: Pointer to DMA queue structure
* @reg: Address offset from the base address of the Axi DMA core
* @value: Value to be written into the Axi DMA register
*
* This function writes the desired value into the corresponding Axi DMA
* register.
*/
-static inline void axienet_dma_out32(struct axienet_local *lp,
+static inline void axienet_dma_out32(struct axienet_dma_q *q,
off_t reg, u32 value)
{
- out_be32((lp->dma_regs + reg), value);
+ out_be32((q->dma_regs + reg), value);
}
/**
* axienet_dma_bdout - Memory mapped Axi DMA register Buffer Descriptor write.
- * @lp: Pointer to axienet local structure
+ * @q: Pointer to DMA queue structure
* @reg: Address offset from the base address of the Axi DMA core
* @value: Value to be written into the Axi DMA register
*
* This function writes the desired value into the corresponding Axi DMA
* register.
*/
-static inline void axienet_dma_bdout(struct axienet_local *lp,
+static inline void axienet_dma_bdout(struct axienet_dma_q *q,
off_t reg, dma_addr_t value)
{
#if defined(CONFIG_PHYS_ADDR_T_64BIT)
- writeq(value, (lp->dma_regs + reg));
+ writeq(value, (q->dma_regs + reg));
#else
- writel(value, (lp->dma_regs + reg));
+ writel(value, (q->dma_regs + reg));
#endif
}
/**
- * axienet_dma_bd_release - Release buffer descriptor rings
+ * axienet_bd_free - Release buffer descriptor rings for individual dma queue
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
- * This function is used to release the descriptors allocated in
- * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
- * driver stop api is called.
+ * This function is helper function to axienet_dma_bd_release.
*/
-static void axienet_dma_bd_release(struct net_device *ndev)
+
+static void axienet_bd_free(struct net_device *ndev, struct axienet_dma_q *q)
{
int i;
struct axienet_local *lp = netdev_priv(ndev);
for (i = 0; i < RX_BD_NUM; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ dma_unmap_single(ndev->dev.parent, q->rx_bd_v[i].phys,
lp->max_frm_size, DMA_FROM_DEVICE);
dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+ (q->rx_bd_v[i].sw_id_offset));
}
- if (lp->rx_bd_v) {
+ if (q->rx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v,
- lp->rx_bd_p);
+ sizeof(*q->rx_bd_v) * RX_BD_NUM,
+ q->rx_bd_v,
+ q->rx_bd_p);
}
- if (lp->tx_bd_v) {
+ if (q->tx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ sizeof(*q->tx_bd_v) * TX_BD_NUM,
+ q->tx_bd_v,
+ q->tx_bd_p);
}
}
/**
- * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
+ * axienet_dma_bd_release - Release buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
+ * driver stop api is called.
+ */
+static void axienet_dma_bd_release(struct net_device *ndev)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for_each_dma_queue(lp, i)
+ axienet_bd_free(ndev, lp->dq[i]);
+}
+
+/**
+ * axienet_dma_q_init - Setup buffer descriptor rings for individual Axi DMA
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
* Return: 0, on success -ENOMEM, on failure
*
- * This function is called to initialize the Rx and Tx DMA descriptor
- * rings. This initializes the descriptors with required default values
- * and is called when Axi Ethernet driver reset is called.
+ * This function is helper function to axienet_dma_bd_init
*/
-static int axienet_dma_bd_init(struct net_device *ndev)
+static int axienet_dma_q_init(struct net_device *ndev, struct axienet_dma_q *q)
{
u32 cr;
int i;
struct axienet_local *lp = netdev_priv(ndev);
/* Reset the indexes which are used for accessing the BDs */
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v)
+ q->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * TX_BD_NUM,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v)
+ q->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * RX_BD_NUM,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rx_bd_v)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
+ q->tx_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->tx_bd_v) *
((i + 1) % TX_BD_NUM);
}
- if (!lp->eth_hasdre) {
- lp->tx_bufs = dma_zalloc_coherent(ndev->dev.parent,
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_zalloc_coherent(ndev->dev.parent,
XAE_MAX_PKT_LEN * TX_BD_NUM,
- &lp->tx_bufs_dma,
+ &q->tx_bufs_dma,
GFP_KERNEL);
- if (!lp->tx_bufs)
+ if (!q->tx_bufs)
goto out;
for (i = 0; i < TX_BD_NUM; i++)
- lp->tx_buf[i] = &lp->tx_bufs[i * XAE_MAX_PKT_LEN];
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
}
for (i = 0; i < RX_BD_NUM; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
+ q->rx_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rx_bd_v) *
((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb(ndev, lp->max_frm_size);
*/
wmb();
- lp->rx_bd_v[i].sw_id_offset = (phys_addr_t) skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ q->rx_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
- lp->rx_bd_v[i].cntrl = lp->max_frm_size;
+ q->rx_bd_v[i].cntrl = lp->max_frm_size;
}
/* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
/* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_bdout(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_bdout(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting.
*/
- axienet_dma_bdout(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
return 0;
return -ENOMEM;
}
+/**
+ * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
+ * @ndev: Pointer to the net_device structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is called to initialize the Rx and Tx DMA descriptor
+ * rings. This initializes the descriptors with required default values
+ * and is called when Axi Ethernet driver reset is called.
+ */
+static int axienet_dma_bd_init(struct net_device *ndev)
+{
+ int i, ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for_each_dma_queue(lp, i) {
+ ret = axienet_dma_q_init(ndev, lp->dq[i]);
+ if (ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
/**
* axienet_set_mac_address - Write the MAC address
* @ndev: Pointer to the net_device structure
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static void __axienet_device_reset(struct axienet_dma_q *q, off_t offset)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* commands/transfers will be flushed or completed during this
* reset process.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(q, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ while (axienet_dma_in32(q, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ netdev_err(q->lp->ndev, "%s: DMA reset timeout!\n",
__func__);
break;
}
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
u32 err, val;
+ struct axienet_dma_q *q;
+ u32 i;
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ for_each_dma_queue(lp, i) {
+ q = lp->dq[i];
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
+ }
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
* This function is invoked from the Axi DMA Tx isr to notify the completion
* of transmit operation. It clears fields in the corresponding Tx BDs and
* buffer. It finally invokes "netif_wake_queue" to restart transmission if
* required.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static void axienet_start_xmit_done(struct net_device *ndev,
+ struct axienet_dma_q *q)
{
u32 size = 0;
u32 packets = 0;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct axienet_local *lp = netdev_priv(ndev);
+#endif
struct axidma_bd *cur_p;
unsigned int status = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ ++q->tx_bd_ci;
+ q->tx_bd_ci %= TX_BD_NUM;
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
}
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
+ * @q: Pointer to DMA queue structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+static inline int axienet_check_tx_bd_space(struct axienet_dma_q *q,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+ cur_p = &q->tx_bd_v[(q->tx_bd_tail + num_frag) % TX_BD_NUM];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
/**
* axienet_create_tsheader - Create timestamp header for tx
- * @lp: Pointer to axienet local structure
+ * @q: Pointer to DMA queue structure
* @buf: Pointer to the buf to copy timestamp header
* @msg_type: PTP message type
*
* Return: None.
*/
-static void axienet_create_tsheader(struct axienet_local *lp, u8 *buf,
- u8 msg_type)
+static void axienet_create_tsheader(u8 *buf, u8 msg_type,
+ struct axienet_dma_q *q)
{
+ struct axienet_local *lp = q->lp;
struct axidma_bd *cur_p;
u64 val;
u32 tmp;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
if (msg_type == TX_TS_OP_ONESTEP) {
buf[0] = TX_TS_OP_ONESTEP;
struct axidma_bd *cur_p;
unsigned long flags;
u32 pad = 0;
+ struct axienet_dma_q *q;
+ u16 map = 0; /* Single dma queue default*/
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- spin_lock_irqsave(&lp->tx_lock, flags);
- if (axienet_check_tx_bd_space(lp, num_frag)) {
+ q = lp->dq[map];
+
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+
+ spin_lock_irqsave(&q->tx_lock, flags);
+ if (axienet_check_tx_bd_space(q, num_frag)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
- spin_unlock_irqrestore(&lp->tx_lock, flags);
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_BUSY;
}
dev_err(&ndev->dev, "failed "
"to allocate new socket buffer\n");
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&lp->tx_lock, flags);
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_OK;
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
if (lp->tstamp_config.tx_type ==
HWTSTAMP_TX_ONESTEP_SYNC) {
- axienet_create_tsheader(lp, tmp,
- TX_TS_OP_ONESTEP);
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_ONESTEP, q);
} else {
- axienet_create_tsheader(lp, tmp,
- TX_TS_OP_TWOSTEP);
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_TWOSTEP, q);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
cur_p->ptp_tx_skb = (unsigned long)skb_get(skb);
}
cur_p->ptp_tx_ts_tag = (prandom_u32() &
~XAXIFIFO_TXTS_TAG_MASK) + 1;
if (lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
- axienet_create_tsheader(lp, lp->tx_ptpheader,
- TX_TS_OP_ONESTEP);
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_ONESTEP, q);
} else {
- axienet_create_tsheader(lp, lp->tx_ptpheader,
- TX_TS_OP_TWOSTEP);
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_TWOSTEP, q);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
cur_p->ptp_tx_skb = (phys_addr_t)skb_get(skb);
}
}
cur_p->cntrl = (skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK) + pad;
- if (!lp->eth_hasdre &&
+ if (!q->eth_hasdre &&
(((phys_addr_t)skb->data & 0x3) || (num_frag > 0))) {
- skb_copy_and_csum_dev(skb, lp->tx_buf[lp->tx_bd_tail]);
+ skb_copy_and_csum_dev(skb, q->tx_buf[q->tx_bd_tail]);
- cur_p->phys = lp->tx_bufs_dma +
- (lp->tx_buf[lp->tx_bd_tail] - lp->tx_bufs);
+ cur_p->phys = q->tx_bufs_dma +
+ (q->tx_buf[q->tx_bd_tail] - q->tx_bufs);
if (num_frag > 0) {
pad = skb_pagelen(skb) - skb_headlen(skb);
u32 len;
skb_frag_t *frag;
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ ++q->tx_bd_tail;
+ q->tx_bd_tail %= TX_BD_NUM;
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
len = skb_frag_size(frag);
cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, len,
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->tx_skb = (phys_addr_t)skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
/* Ensure BD write before starting transfer */
wmb();
/* Start the transfer */
- axienet_dma_bdout(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ axienet_dma_bdout(q, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ ++q->tx_bd_tail;
+ q->tx_bd_tail %= TX_BD_NUM;
- spin_unlock_irqrestore(&lp->tx_lock, flags);
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_OK;
}
* BD processing.
* @ndev: Pointer to net_device structure.
* @budget: NAPI budget
+ * @q: Pointer to axienet DMA queue structure
*
* This function is invoked from the Axi DMA Rx isr(poll) to process the Rx BDs
* It does minimal processing and invokes "netif_receive_skb" to complete
* further processing.
* Return: Number of BD's processed.
*/
-static int axienet_recv(struct net_device *ndev, int budget)
+static int axienet_recv(struct net_device *ndev, int budget,
+ struct axienet_dma_q *q)
{
u32 length;
u32 csumstatus;
/* Get relevat BD status value */
rmb();
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
while ((numbdfree < budget) &&
(cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
+ tail_p = q->rx_bd_p + sizeof(*q->rx_bd_v) * q->rx_bd_ci;
+ skb = (struct sk_buff *)(cur_p->sw_id_offset);
if (lp->eth_hasnobuf ||
(lp->axienet_config->mactype != XAXIENET_1G))
cur_p->status = 0;
cur_p->sw_id_offset = (phys_addr_t) new_skb;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
+ ++q->rx_bd_ci;
+ q->rx_bd_ci %= RX_BD_NUM;
/* Get relevat BD status value */
rmb();
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
numbdfree++;
}
ndev->stats.rx_bytes += size;
if (tail_p)
- axienet_dma_bdout(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, tail_p);
return numbdfree;
}
*/
static int xaxienet_rx_poll(struct napi_struct *napi, int quota)
{
- struct axienet_local *lp = container_of(napi,
- struct axienet_local, napi);
+ struct net_device *ndev = napi->dev;
+ struct axienet_local *lp = netdev_priv(ndev);
int work_done = 0;
unsigned int status, cr;
- spin_lock(&lp->rx_lock);
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ int map = napi - lp->napi;
+
+ struct axienet_dma_q *q = lp->dq[map];
+
+ spin_lock(&q->rx_lock);
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
while ((status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) &&
(work_done < quota)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(lp->dev, "Rx error 0x%x\n\r", status);
break;
}
- work_done += axienet_recv(lp->ndev, quota - work_done);
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
}
- spin_unlock(&lp->rx_lock);
+ spin_unlock(&q->rx_lock);
if (work_done < quota) {
napi_complete(napi);
/* Enable the interrupts again */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
}
return work_done;
return IRQ_HANDLED;
}
+/**
+ * map_dma_q_irq - Map dma q based on interrupt number.
+ * @irq: irq number
+ * @lp: axienet local structure
+ *
+ * Return: DMA queue.
+ *
+ * This returns the DMA number on which interrupt has occurred.
+ */
+static int map_dma_q_irq(int irq, struct axienet_local *lp)
+{
+ int i;
+
+ for_each_dma_queue(lp, i) {
+ if (irq == lp->dq[i]->tx_irq || irq == lp->dq[i]->rx_irq)
+ return i;
+ }
+ pr_err("Error mapping DMA irq\n");
+ return -ENODEV;
+}
+
/**
* axienet_tx_irq - Tx Done Isr.
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED or IRQ_NONE.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
- status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ axienet_start_xmit_done(lp->ndev, q);
goto out;
}
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
+ (q->tx_bd_v[q->tx_bd_ci]).phys);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
}
out:
return IRQ_HANDLED;
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED or IRQ_NONE.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- napi_schedule(&lp->napi);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+ napi_schedule(&lp->napi[i]);
}
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
+ (q->rx_bd_v[q->rx_bd_ci]).phys);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
/* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
/* Disable coalesce, delay timer and error interrupts */
cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
}
return IRQ_HANDLED;
*/
static int axienet_open(struct net_device *ndev)
{
- int ret = 0;
+ int ret = 0, i;
struct axienet_local *lp = netdev_priv(ndev);
struct phy_device *phydev = NULL;
+ struct axienet_dma_q *q;
dev_dbg(&ndev->dev, "axienet_open()\n");
}
/* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ for_each_dma_queue(lp, i) {
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_dma_err_handler,
+ (unsigned long)lp->dq[i]);
/* Enable NAPI scheduling before enabling Axi DMA Rx IRQ, or you
* might run into a race condition; the RX ISR disables IRQ processing
* If NAPI scheduling is (still) disabled at that time, no more RX IRQs
* will be processed as only the NAPI function re-enables them!
*/
- napi_enable(&lp->napi);
-
- /* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
- if (ret)
- goto err_tx_irq;
- /* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
- if (ret)
- goto err_rx_irq;
+ napi_enable(&lp->napi[i]);
+ }
+ for_each_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(q->tx_irq, axienet_tx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(q->rx_irq, axienet_rx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+ }
if (!lp->eth_hasnobuf && (lp->axienet_config->mactype == XAXIENET_1G)) {
/* Enable interrupts for Axi Ethernet */
return 0;
err_eth_irq:
- free_irq(lp->rx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->rx_irq, ndev);
+ }
+ i = lp->num_queues;
err_rx_irq:
- free_irq(lp->tx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->tx_irq, ndev);
+ }
err_tx_irq:
- napi_disable(&lp->napi);
+ for_each_dma_queue(lp, i)
+ napi_disable(&lp->napi[i]);
if (phydev)
phy_disconnect(phydev);
phydev = NULL;
- tasklet_kill(&lp->dma_err_tasklet);
+ for_each_dma_queue(lp, i)
+ tasklet_kill(&lp->dma_err_tasklet[i]);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
static int axienet_stop(struct net_device *ndev)
{
u32 cr;
+ u32 i;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- lp->axienet_config->setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
-
- napi_disable(&lp->napi);
- tasklet_kill(&lp->dma_err_tasklet);
-
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
+ for_each_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi[i]);
+ tasklet_kill(&lp->dma_err_tasklet[i]);
+ free_irq(q->tx_irq, ndev);
+ free_irq(q->rx_irq, ndev);
+ }
if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
free_irq(lp->eth_irq, ndev);
{
u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ int i;
+
+ for_each_dma_queue(lp, i) {
+ q = lp->dq[i];
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+ regval = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ regval = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
return 0;
}
u32 axienet_status;
u32 cr, i;
int mdio_mcreg = 0;
- struct axienet_local *lp = (struct axienet_local *) data;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
~XAE_MDIO_MC_MDIOEN_MASK));
}
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
}
for (i = 0; i < TX_BD_NUM; i++) {
- cur_p = &lp->tx_bd_v[i];
+ cur_p = &q->tx_bd_v[i];
if (cur_p->phys)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl &
}
for (i = 0; i < RX_BD_NUM; i++) {
- cur_p = &lp->rx_bd_v[i];
+ cur_p = &q->rx_bd_v[i];
cur_p->status = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app4 = 0;
}
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
/* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
(XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
/* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
/* Update the interrupt coalesce count */
cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
(XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
/* Enable coalesce, delay timer and error interrupts */
cr |= XAXIDMA_IRQ_ALL_MASK;
/* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
- axienet_dma_bdout(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_bdout(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
* tail pointer register that the Tx channel will start transmitting
*/
- axienet_dma_bdout(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
lp->axienet_config->setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
lp->axienet_config->setoptions(ndev, lp->options);
}
+static int axienet_dma_probe(struct platform_device *pdev,
+ struct net_device *ndev)
+{
+ int i, ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ struct device_node *np;
+ struct resource dmares;
+ char dma_name[10];
+
+ for_each_dma_queue(lp, i) {
+ q = kmalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+
+ lp->dq[i] = q;
+ }
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ /* TODO handle error ret */
+ for_each_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ i);
+ if (np) {
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret >= 0)
+ q->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ else
+ return -ENODEV;
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ } else {
+ return -EINVAL;
+ }
+ }
+ for_each_dma_queue(lp, i) {
+ sprintf(dma_name, "dma%d_tx", i);
+ lp->dq[i]->tx_irq = platform_get_irq_byname(pdev, dma_name);
+ sprintf(dma_name, "dma%d_rx", i);
+ lp->dq[i]->rx_irq = platform_get_irq_byname(pdev, dma_name);
+
+ pr_info("lp->dq[%d]->tx_irq %d\n", i, lp->dq[i]->tx_irq);
+ pr_info("lp->dq[%d]->rx_irq %d\n", i, lp->dq[i]->rx_irq);
+ }
+
+ of_node_put(np);
+
+ for_each_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+
+ spin_lock_init(&q->tx_lock);
+ spin_lock_init(&q->rx_lock);
+ }
+
+ for_each_dma_queue(lp, i) {
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
static const struct axienet_config axienet_1g_config = {
.mactype = XAXIENET_1G,
.setoptions = axienet_setoptions,
static int axienet_probe(struct platform_device *pdev)
{
int ret = 0;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct device_node *np;
+#endif
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
u32 value;
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = alloc_etherdev_mq(sizeof(*lp), XAE_MAX_QUEUES);
if (!ndev)
return -ENOMEM;
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->num_queues = XAE_MAX_QUEUES;
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
of_node_put(np);
#endif
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
+ ret = axienet_dma_probe(pdev, ndev);
if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ pr_err("Getting DMA resource failed\n");
goto free_netdev;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
- if (IS_ERR(lp->dma_regs)) {
- ret = PTR_ERR(lp->dma_regs);
- goto free_netdev;
- }
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto free_netdev;
- }
- lp->eth_hasdre = of_property_read_bool(np, "xlnx,include-dre");
-
- spin_lock_init(&lp->tx_lock);
- spin_lock_init(&lp->rx_lock);
lp->dma_clk = devm_clk_get(&pdev->dev, "dma_clk");
if (IS_ERR(lp->dma_clk)) {
dev_warn(&pdev->dev, "error registering MDIO bus\n");
}
- netif_napi_add(ndev, &lp->napi, xaxienet_rx_poll, XAXIENET_NAPI_WEIGHT);
-
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
axienet_mdio_teardown(lp);
- netif_napi_del(&lp->napi);
+ for_each_dma_queue(lp, i)
+ netif_napi_del(&lp->napi[i]);
unregister_netdev(ndev);
clk_disable_unprepare(lp->eth_clk);
clk_disable_unprepare(lp->dma_clk);