1 /* =========================================================================
2 * The Synopsys DWC ETHER QOS Software Driver and documentation (hereinafter
3 * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
4 * otherwise expressly agreed to in writing between Synopsys and you.
6 * The Software IS NOT an item of Licensed Software or Licensed Product under
7 * any End User Software License Agreement or Agreement for Licensed Product
8 * with Synopsys or any supplement thereto. Permission is hereby granted,
9 * free of charge, to any person obtaining a copy of this software annotated
10 * with this license and the Software, to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject
14 * to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
30 * ========================================================================= */
32 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
34 * This program is free software; you can redistribute it and/or modify it
35 * under the terms and conditions of the GNU General Public License,
36 * version 2, as published by the Free Software Foundation.
38 * This program is distributed in the hope it will be useful, but WITHOUT
39 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
40 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
44 * @brief: Driver functions.
47 #include <linux/gpio.h>
52 extern ULONG eqos_base_addr;
55 #include <linux/inet_lro.h>
56 #include <soc/tegra/chip-id.h>
58 static INT eqos_status;
59 static int handle_txrx_completions(struct eqos_prv_data *pdata, int qinx);
61 /* SA(Source Address) operations on TX */
62 unsigned char mac_addr0[6] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 };
63 unsigned char mac_addr1[6] = { 0x00, 0x66, 0x77, 0x88, 0x99, 0xaa };
65 /* module parameters for configuring the queue modes
66 * set default mode as GENERIC
68 /* Value of "2" enables mtl tx q */
69 static int q_op_mode[EQOS_MAX_TX_QUEUE_CNT] = {
80 /* Store the IRQ names to be used by /proc/interrupts */
81 static char irq_names[8][32];
83 module_param_array(q_op_mode, int, NULL, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(q_op_mode,
85 "MTL queue operation mode [0-DISABLED, 1-AVB, 2-DCB, 3-GENERIC]");
87 void eqos_stop_all_ch_tx_dma(struct eqos_prv_data *pdata)
89 struct hw_if_struct *hw_if = &(pdata->hw_if);
92 pr_debug("-->eqos_stop_all_ch_tx_dma\n");
94 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++)
95 hw_if->stop_dma_tx(pdata, qinx);
97 pr_debug("<--eqos_stop_all_ch_tx_dma\n");
100 static int is_ptp_addr(char *addr)
102 if ((addr[0] == PTP1_MAC0) &&
103 (addr[1] == PTP1_MAC1) &&
104 (addr[2] == PTP1_MAC2) &&
105 (addr[3] == PTP1_MAC3) &&
106 (addr[4] == PTP1_MAC4) && (addr[5] == PTP1_MAC5))
108 else if ((addr[0] == PTP2_MAC0) &&
109 (addr[1] == PTP2_MAC1) &&
110 (addr[2] == PTP2_MAC2) &&
111 (addr[3] == PTP2_MAC3) &&
112 (addr[4] == PTP2_MAC4) && (addr[5] == PTP2_MAC5))
118 /*Check if Channel 0 is PTP and has data 0xee
119 Check if Channel 1 is AV and has data 0xbb or 0xcc
120 Check if Channel 2 is AV and has data 0xdd*/
121 #ifdef ENABLE_CHANNEL_DATA_CHECK
122 static void check_channel_data(struct sk_buff *skb, unsigned int qinx,
126 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF788) &&
127 ((*(((char *)skb->data) + 80) & 0xFF) != 0xee)) ||
129 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF022) &&
130 (((*(((char *)skb->data) + 80) & 0xFF) != 0xbb) &&
131 ((*(((char *)skb->data) + 80) & 0xFF) != 0xcc))) ||
133 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF022) &&
134 ((*(((char *)skb->data) + 80) & 0xFF) != 0xdd))) {
136 pr_err("Incorrect %s data 0x%x in Q %d\n",
137 ((is_rx) ? "RX" : "TX"), *(((char *)skb->data) + 80), qinx);
142 static void eqos_stop_all_ch_rx_dma(struct eqos_prv_data *pdata)
144 struct hw_if_struct *hw_if = &(pdata->hw_if);
147 pr_debug("-->eqos_stop_all_ch_rx_dma\n");
149 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
150 hw_if->stop_dma_rx(qinx);
152 pr_debug("<--eqos_stop_all_ch_rx_dma\n");
155 static void eqos_start_all_ch_tx_dma(struct eqos_prv_data *pdata)
157 struct hw_if_struct *hw_if = &(pdata->hw_if);
160 pr_debug("-->eqos_start_all_ch_tx_dma\n");
162 for (i = 0; i < EQOS_TX_QUEUE_CNT; i++)
163 hw_if->start_dma_tx(i);
165 pr_debug("<--eqos_start_all_ch_tx_dma\n");
168 static void eqos_start_all_ch_rx_dma(struct eqos_prv_data *pdata)
170 struct hw_if_struct *hw_if = &(pdata->hw_if);
173 pr_debug("-->eqos_start_all_ch_rx_dma\n");
175 for (i = 0; i < EQOS_RX_QUEUE_CNT; i++)
176 hw_if->start_dma_rx(i);
178 pr_debug("<--eqos_start_all_ch_rx_dma\n");
181 static void eqos_napi_enable_mq(struct eqos_prv_data *pdata)
183 struct eqos_rx_queue *rx_queue = NULL;
186 pr_debug("-->eqos_napi_enable_mq\n");
188 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
189 rx_queue = GET_RX_QUEUE_PTR(qinx);
190 napi_enable(&rx_queue->napi);
193 pr_debug("<--eqos_napi_enable_mq\n");
196 static void eqos_all_ch_napi_disable(struct eqos_prv_data *pdata)
198 struct eqos_rx_queue *rx_queue = NULL;
201 pr_debug("-->eqos_napi_disable\n");
203 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
204 rx_queue = GET_RX_QUEUE_PTR(qinx);
205 napi_disable(&rx_queue->napi);
208 pr_debug("<--eqos_napi_disable\n");
211 void eqos_disable_all_ch_rx_interrpt(struct eqos_prv_data *pdata)
213 struct hw_if_struct *hw_if = &(pdata->hw_if);
216 pr_debug("-->eqos_disable_all_ch_rx_interrpt\n");
218 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
219 hw_if->disable_rx_interrupt(qinx, pdata);
221 pr_debug("<--eqos_disable_all_ch_rx_interrpt\n");
224 void eqos_enable_all_ch_rx_interrpt(struct eqos_prv_data *pdata)
226 struct hw_if_struct *hw_if = &(pdata->hw_if);
229 pr_debug("-->eqos_enable_all_ch_rx_interrpt\n");
231 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
232 hw_if->enable_rx_interrupt(qinx, pdata);
234 pr_debug("<--eqos_enable_all_ch_rx_interrpt\n");
237 void handle_non_ti_ri_chan_intrs(struct eqos_prv_data *pdata, int qinx)
242 pr_debug("-->%s(), chan=%d\n", __func__, qinx);
244 DMA_SR_RD(qinx, dma_sr);
246 DMA_IER_RD(qinx, dma_ier);
248 pr_debug("DMA_SR[%d] = %#lx, DMA_IER= %#lx\n", qinx, dma_sr, dma_ier);
250 /*on ufpga, update of DMA_IER is really slow, such that interrupt
251 * would happen, but read of IER returns old value. This would
252 * cause driver to return when there really was an interrupt asserted.
253 * so for now, comment this out.
255 /* process only those interrupts which we
258 if (!(tegra_platform_is_unit_fpga()))
259 dma_sr = (dma_sr & dma_ier);
261 /* mask off ri and ti */
262 dma_sr &= ~(((0x1) << 6) | 1);
267 /* ack non ti/ri ints */
268 DMA_SR_WR(qinx, dma_sr);
270 if ((GET_VALUE(dma_sr, DMA_SR_RBU_LPOS, DMA_SR_RBU_HPOS) & 1))
271 pdata->xstats.rx_buf_unavailable_irq_n[qinx]++;
273 if (tegra_platform_is_unit_fpga())
274 dma_sr = (dma_sr & dma_ier);
276 if (GET_VALUE(dma_sr, DMA_SR_TPS_LPOS, DMA_SR_TPS_HPOS) & 1) {
277 pdata->xstats.tx_process_stopped_irq_n[qinx]++;
278 eqos_status = -E_DMA_SR_TPS;
280 if (GET_VALUE(dma_sr, DMA_SR_TBU_LPOS, DMA_SR_TBU_HPOS) & 1) {
281 pdata->xstats.tx_buf_unavailable_irq_n[qinx]++;
282 eqos_status = -E_DMA_SR_TBU;
284 if (GET_VALUE(dma_sr, DMA_SR_RPS_LPOS, DMA_SR_RPS_HPOS) & 1) {
285 pdata->xstats.rx_process_stopped_irq_n[qinx]++;
286 eqos_status = -E_DMA_SR_RPS;
288 if (GET_VALUE(dma_sr, DMA_SR_RWT_LPOS, DMA_SR_RWT_HPOS) & 1) {
289 pdata->xstats.rx_watchdog_irq_n++;
290 eqos_status = S_DMA_SR_RWT;
292 if (GET_VALUE(dma_sr, DMA_SR_FBE_LPOS, DMA_SR_FBE_HPOS) & 1) {
293 pdata->xstats.fatal_bus_error_irq_n++;
294 pdata->fbe_chan_mask |= (1 << qinx);
295 eqos_status = -E_DMA_SR_FBE;
296 schedule_work(&pdata->fbe_work);
299 pr_debug("<--%s()\n", __func__);
302 void handle_ti_ri_chan_intrs(struct eqos_prv_data *pdata,
303 int qinx, int *pnapi_sched)
309 struct hw_if_struct *hw_if = &(pdata->hw_if);
311 struct eqos_rx_queue *rx_queue = NULL;
313 pr_debug("-->%s(), chan=%d\n", __func__, qinx);
315 rx_queue = GET_RX_QUEUE_PTR(qinx);
317 DMA_SR_RD(qinx, dma_sr);
319 DMA_IER_RD(qinx, dma_ier);
320 VIRT_INTR_CH_STAT_RD(qinx, ch_stat_reg);
321 VIRT_INTR_CH_CRTL_RD(qinx, ch_crtl_reg);
323 pr_debug("DMA_SR[%d] = %#lx, DMA_IER= %#lx\n", qinx, dma_sr, dma_ier);
325 pr_debug("VIRT_INTR_CH_STAT[%d] = %#x, VIRT_INTR_CH_CRTL= %#x\n",
326 qinx, ch_stat_reg, ch_crtl_reg);
328 /*on ufpga, update of DMA_IER is really slow, such that interrupt
329 * would happen, but read of IER returns old value. This would
330 * cause driver to return when there really was an interrupt asserted.
331 * so for now, comment this out.
333 /* process only those interrupts which we
336 if (!(tegra_platform_is_unit_fpga()))
337 ch_stat_reg &= ch_crtl_reg;
339 if (ch_stat_reg == 0)
342 if (ch_stat_reg & VIRT_INTR_CH_CRTL_RX_WR_MASK) {
343 DMA_SR_WR(qinx, ((0x1) << 6) | ((0x1) << 15));
344 VIRT_INTR_CH_STAT_WR(qinx, VIRT_INTR_CH_CRTL_RX_WR_MASK);
345 pdata->xstats.rx_normal_irq_n[qinx]++;
348 if (tegra_platform_is_unit_fpga())
349 ch_stat_reg &= ch_crtl_reg;
351 if (ch_stat_reg & VIRT_INTR_CH_CRTL_TX_WR_MASK) {
352 DMA_SR_WR(qinx, ((0x1) << 0) | ((0x1) << 15));
353 VIRT_INTR_CH_STAT_WR(qinx, VIRT_INTR_CH_CRTL_TX_WR_MASK);
354 pdata->xstats.tx_normal_irq_n[qinx]++;
357 if (likely(napi_schedule_prep(&rx_queue->napi))) {
358 hw_if->disable_chan_interrupts(qinx, pdata);
359 __napi_schedule(&rx_queue->napi);
361 /* Do nothing here. */
362 pr_alert("Ethernet Interrupt while in poll!\n");
364 pr_debug("<--%s()\n", __func__);
367 void handle_mac_intrs(struct eqos_prv_data *pdata, ULONG dma_isr)
374 struct net_device *dev = pdata->dev;
376 pr_debug("-->%s()\n", __func__);
380 /* Handle MAC interrupts */
381 if (GET_VALUE(dma_isr, DMA_ISR_MACIS_LPOS, DMA_ISR_MACIS_HPOS) & 1) {
382 /* handle only those MAC interrupts which are enabled */
384 mac_isr = (mac_isr & mac_imr);
387 * RemoteWake and MagicPacket events will be received by PHY supporting
388 * these features on silicon and can be used to wake up Tegra.
389 * Still let the below code be here in case we ever get this interrupt.
391 if (GET_VALUE(mac_isr, MAC_ISR_PMTIS_LPOS, MAC_ISR_PMTIS_HPOS) &
393 pdata->xstats.pmt_irq_n++;
394 eqos_status = S_MAC_ISR_PMTIS;
395 MAC_PMTCSR_RD(mac_pmtcsr);
396 pr_debug("commonisr: PMTCSR : %#lx\n", mac_pmtcsr);
397 if (pdata->power_down)
398 eqos_powerup(pdata->dev, EQOS_IOCTL_CONTEXT);
401 /* RGMII/SMII interrupt */
403 (mac_isr, MAC_ISR_RGSMIIS_LPOS, MAC_ISR_RGSMIIS_HPOS) & 1) {
405 pr_debug("RGMII/SMII interrupt: MAC_PCS = %#lx\n",
407 #ifdef HWA_NV_1637630
410 /* Comment out this block of code(1637630)
411 * as it was preventing 10mb to work.
413 if ((mac_pcs & 0x80000) == 0x80000) {
415 netif_carrier_on(dev);
416 if ((mac_pcs & 0x10000) == 0x10000) {
417 pdata->pcs_duplex = 1;
418 hw_if->set_full_duplex();
420 pdata->pcs_duplex = 0;
421 hw_if->set_half_duplex();
424 if ((mac_pcs & 0x60000) == 0x0) {
425 pdata->pcs_speed = SPEED_10;
426 hw_if->set_mii_speed_10();
427 } else if ((mac_pcs & 0x60000) == 0x20000) {
428 pdata->pcs_speed = SPEED_100;
429 hw_if->set_mii_speed_100();
430 } else if ((mac_pcs & 0x60000) == 0x30000) {
431 pdata->pcs_speed = SPEED_1000;
432 hw_if->set_gmii_speed();
434 pr_err("Link is UP:%dMbps & %s duplex\n",
436 pdata->pcs_duplex ? "Full" : "Half");
438 pr_err("Link is Down\n");
440 netif_carrier_off(dev);
445 /* PCS Link Status interrupt */
447 (mac_isr, MAC_ISR_PCSLCHGIS_LPOS,
448 MAC_ISR_PCSLCHGIS_HPOS) & 1) {
449 pr_err("PCS Link Status interrupt\n");
451 if (GET_VALUE(mac_ans, MAC_ANS_LS_LPOS, MAC_ANS_LS_HPOS)
453 pr_err("Link: Up\n");
454 netif_carrier_on(dev);
457 pr_err("Link: Down\n");
458 netif_carrier_off(dev);
463 /* PCS Auto-Negotiation Complete interrupt */
465 (mac_isr, MAC_ISR_PCSANCIS_LPOS,
466 MAC_ISR_PCSANCIS_HPOS) & 1) {
467 pr_err("PCS Auto-Negotiation Complete interrupt\n");
472 if (GET_VALUE(mac_isr, MAC_ISR_LPI_LPOS, MAC_ISR_LPI_HPOS) & 1) {
473 eqos_handle_eee_interrupt(pdata);
477 pr_debug("<--%s()\n", __func__);
482 * Only used when multi irq is enabled
485 irqreturn_t eqos_common_isr(int irq, void *device_id)
488 struct eqos_prv_data *pdata = (struct eqos_prv_data *)device_id;
491 pr_debug("-->%s()\n", __func__);
497 pr_debug("DMA_ISR = %#lx\n", dma_isr);
500 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++)
501 handle_non_ti_ri_chan_intrs(pdata, qinx);
503 handle_mac_intrs(pdata, dma_isr);
505 pr_debug("<--%s()\n", __func__);
511 /* Only used when multi irq is enabled.
512 * Will only handle tx/rx for one channel.
514 irqreturn_t eqos_ch_isr(int irq, void *device_id)
516 struct eqos_prv_data *pdata = (struct eqos_prv_data *)device_id;
521 i = smp_processor_id();
523 if ((irq == pdata->rx_irqs[0]) || (irq == pdata->tx_irqs[0]))
525 else if ((irq == pdata->rx_irqs[1]) || (irq == pdata->tx_irqs[1]))
527 else if ((irq == pdata->rx_irqs[2]) || (irq == pdata->tx_irqs[2]))
529 else if ((irq == pdata->rx_irqs[3]) || (irq == pdata->tx_irqs[3]))
532 pr_debug("-->%s(): cpu=%d, chan=%d\n", __func__, i, qinx);
535 handle_ti_ri_chan_intrs(pdata, qinx, &napi_sched);
537 pr_debug("%s(): irq %d not handled\n", __func__, irq);
541 spin_lock(&pdata->chinfo[qinx].irq_lock);
542 handle_ti_ri_chan_intrs(pdata, qinx, &napi_sched);
543 spin_unlock(&pdata->chinfo[qinx].irq_lock);
545 pr_debug("<--%s()\n", __func__);
552 * \brief API to get all hw features.
554 * \details This function is used to check what are all the different
555 * features the device supports.
557 * \param[in] pdata - pointer to driver private structure
562 void eqos_get_all_hw_features(struct eqos_prv_data *pdata)
564 unsigned int mac_hfr0;
565 unsigned int mac_hfr1;
566 unsigned int mac_hfr2;
568 pr_debug("-->eqos_get_all_hw_features\n");
570 MAC_HFR0_RD(mac_hfr0);
571 MAC_HFR1_RD(mac_hfr1);
572 MAC_HFR2_RD(mac_hfr2);
574 memset(&pdata->hw_feat, 0, sizeof(pdata->hw_feat));
575 pdata->hw_feat.mii_sel = ((mac_hfr0 >> 0) & MAC_HFR0_MIISEL_MASK);
576 pdata->hw_feat.gmii_sel = ((mac_hfr0 >> 1) & MAC_HFR0_GMIISEL_MASK);
577 pdata->hw_feat.hd_sel = ((mac_hfr0 >> 2) & MAC_HFR0_HDSEL_MASK);
578 pdata->hw_feat.pcs_sel = ((mac_hfr0 >> 3) & MAC_HFR0_PCSSEL_MASK);
579 pdata->hw_feat.vlan_hash_en = 0;
580 pdata->hw_feat.sma_sel = ((mac_hfr0 >> 5) & MAC_HFR0_SMASEL_MASK);
581 pdata->hw_feat.rwk_sel = ((mac_hfr0 >> 6) & MAC_HFR0_RWKSEL_MASK);
582 pdata->hw_feat.mgk_sel = ((mac_hfr0 >> 7) & MAC_HFR0_MGKSEL_MASK);
583 pdata->hw_feat.mmc_sel = ((mac_hfr0 >> 8) & MAC_HFR0_MMCSEL_MASK);
584 pdata->hw_feat.arp_offld_en =
585 ((mac_hfr0 >> 9) & MAC_HFR0_ARPOFFLDEN_MASK);
586 pdata->hw_feat.ts_sel = ((mac_hfr0 >> 12) & MAC_HFR0_TSSSEL_MASK);
587 pdata->hw_feat.eee_sel = ((mac_hfr0 >> 13) & MAC_HFR0_EEESEL_MASK);
588 pdata->hw_feat.tx_coe_sel = ((mac_hfr0 >> 14) & MAC_HFR0_TXCOESEL_MASK);
589 pdata->hw_feat.rx_coe_sel = ((mac_hfr0 >> 16) & MAC_HFR0_RXCOE_MASK);
590 pdata->hw_feat.mac_addr16_sel =
591 ((mac_hfr0 >> 18) & MAC_HFR0_ADDMACADRSEL_MASK);
592 pdata->hw_feat.mac_addr32_sel =
593 ((mac_hfr0 >> 23) & MAC_HFR0_MACADR32SEL_MASK);
594 pdata->hw_feat.mac_addr64_sel =
595 ((mac_hfr0 >> 24) & MAC_HFR0_MACADR64SEL_MASK);
596 pdata->hw_feat.tsstssel = ((mac_hfr0 >> 25) & MAC_HFR0_TSINTSEL_MASK);
597 pdata->hw_feat.sa_vlan_ins =
598 ((mac_hfr0 >> 27) & MAC_HFR0_SAVLANINS_MASK);
599 pdata->hw_feat.act_phy_sel =
600 ((mac_hfr0 >> 28) & MAC_HFR0_ACTPHYSEL_MASK);
602 pdata->hw_feat.rx_fifo_size =
603 ((mac_hfr1 >> 0) & MAC_HFR1_RXFIFOSIZE_MASK);
604 pdata->hw_feat.tx_fifo_size =
605 ((mac_hfr1 >> 6) & MAC_HFR1_TXFIFOSIZE_MASK);
606 pdata->hw_feat.adv_ts_hword =
607 ((mac_hfr1 >> 13) & MAC_HFR1_ADVTHWORD_MASK);
608 pdata->hw_feat.dcb_en = ((mac_hfr1 >> 16) & MAC_HFR1_DCBEN_MASK);
609 pdata->hw_feat.sph_en = ((mac_hfr1 >> 17) & MAC_HFR1_SPHEN_MASK);
610 pdata->hw_feat.tso_en = ((mac_hfr1 >> 18) & MAC_HFR1_TSOEN_MASK);
611 pdata->hw_feat.dma_debug_gen =
612 ((mac_hfr1 >> 19) & MAC_HFR1_DMADEBUGEN_MASK);
613 pdata->hw_feat.av_sel = ((mac_hfr1 >> 20) & MAC_HFR1_AVSEL_MASK);
614 pdata->hw_feat.lp_mode_en = ((mac_hfr1 >> 23) & MAC_HFR1_LPMODEEN_MASK);
615 #ifdef ENABLE_PERFECT_L2_FILTER
616 pdata->hw_feat.hash_tbl_sz = 0;
618 pdata->hw_feat.hash_tbl_sz =
619 ((mac_hfr1 >> 24) & MAC_HFR1_HASHTBLSZ_MASK);
621 pdata->hw_feat.l3l4_filter_num =
622 ((mac_hfr1 >> 27) & MAC_HFR1_L3L4FILTERNUM_MASK);
624 pdata->hw_feat.rx_q_cnt = ((mac_hfr2 >> 0) & MAC_HFR2_RXQCNT_MASK);
625 pdata->hw_feat.tx_q_cnt = ((mac_hfr2 >> 6) & MAC_HFR2_TXQCNT_MASK);
626 pdata->hw_feat.rx_ch_cnt = ((mac_hfr2 >> 12) & MAC_HFR2_RXCHCNT_MASK);
627 pdata->hw_feat.tx_ch_cnt = ((mac_hfr2 >> 18) & MAC_HFR2_TXCHCNT_MASK);
628 pdata->hw_feat.pps_out_num =
629 ((mac_hfr2 >> 24) & MAC_HFR2_PPSOUTNUM_MASK);
630 pdata->hw_feat.aux_snap_num =
631 ((mac_hfr2 >> 28) & MAC_HFR2_AUXSNAPNUM_MASK);
633 if (pdata->hw_feat.mac_addr64_sel)
634 pdata->max_addr_reg_cnt = 128;
635 else if (pdata->hw_feat.mac_addr32_sel)
636 pdata->max_addr_reg_cnt = 64;
637 else if (pdata->hw_feat.mac_addr16_sel)
638 pdata->max_addr_reg_cnt = 32;
640 pdata->max_addr_reg_cnt = 1;
642 switch (pdata->hw_feat.hash_tbl_sz) {
644 pdata->max_hash_table_size = 0;
647 pdata->max_hash_table_size = 64;
650 pdata->max_hash_table_size = 128;
653 pdata->max_hash_table_size = 256;
657 pr_debug("<--eqos_get_all_hw_features\n");
661 * \brief API to print all hw features.
663 * \details This function is used to print all the device feature.
665 * \param[in] pdata - pointer to driver private structure
670 void eqos_print_all_hw_features(struct eqos_prv_data *pdata)
674 pr_debug("-->eqos_print_all_hw_features\n");
677 pr_err("=====================================================/\n");
679 pr_err("10/100 Mbps Support : %s\n",
680 pdata->hw_feat.mii_sel ? "YES" : "NO");
681 pr_err("1000 Mbps Support : %s\n",
682 pdata->hw_feat.gmii_sel ? "YES" : "NO");
683 pr_err("Half-duplex Support : %s\n",
684 pdata->hw_feat.hd_sel ? "YES" : "NO");
685 pr_err("PCS Registers(TBI/SGMII/RTBI PHY interface) : %s\n",
686 pdata->hw_feat.pcs_sel ? "YES" : "NO");
687 pr_err("VLAN Hash Filter Selected : %s\n",
688 pdata->hw_feat.vlan_hash_en ? "YES" : "NO");
689 pdata->vlan_hash_filtering = pdata->hw_feat.vlan_hash_en;
690 pr_err("SMA (MDIO) Interface : %s\n",
691 pdata->hw_feat.sma_sel ? "YES" : "NO");
692 pr_err("PMT Remote Wake-up Packet Enable : %s\n",
693 pdata->hw_feat.rwk_sel ? "YES" : "NO");
694 pr_err("PMT Magic Packet Enable : %s\n",
695 pdata->hw_feat.mgk_sel ? "YES" : "NO");
696 pr_err("RMON/MMC Module Enable : %s\n",
697 pdata->hw_feat.mmc_sel ? "YES" : "NO");
698 pr_err("ARP Offload Enabled : %s\n",
699 pdata->hw_feat.arp_offld_en ? "YES" : "NO");
700 pr_err("IEEE 1588-2008 Timestamp Enabled : %s\n",
701 pdata->hw_feat.ts_sel ? "YES" : "NO");
702 pr_err("Energy Efficient Ethernet Enabled : %s\n",
703 pdata->hw_feat.eee_sel ? "YES" : "NO");
704 pr_err("Transmit Checksum Offload Enabled : %s\n",
705 pdata->hw_feat.tx_coe_sel ? "YES" : "NO");
706 pr_err("Receive Checksum Offload Enabled : %s\n",
707 pdata->hw_feat.rx_coe_sel ? "YES" : "NO");
708 pr_err("MAC Addresses 16–31 Selected : %s\n",
709 pdata->hw_feat.mac_addr16_sel ? "YES" : "NO");
710 pr_err("MAC Addresses 32–63 Selected : %s\n",
711 pdata->hw_feat.mac_addr32_sel ? "YES" : "NO");
712 pr_err("MAC Addresses 64–127 Selected : %s\n",
713 pdata->hw_feat.mac_addr64_sel ? "YES" : "NO");
715 switch (pdata->hw_feat.tsstssel) {
729 pr_err("Timestamp System Time Source : %s\n", str);
730 pr_err("Source Address or VLAN Insertion Enable : %s\n",
731 pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
733 switch (pdata->hw_feat.act_phy_sel) {
761 pr_err("Active PHY Selected : %s\n", str);
763 switch (pdata->hw_feat.rx_fifo_size) {
803 pr_err("MTL Receive FIFO Size : %s\n", str);
805 switch (pdata->hw_feat.tx_fifo_size) {
845 pr_err("MTL Transmit FIFO Size : %s\n", str);
846 pr_err("IEEE 1588 High Word Register Enable : %s\n",
847 pdata->hw_feat.adv_ts_hword ? "YES" : "NO");
848 pr_err("DCB Feature Enable : %s\n",
849 pdata->hw_feat.dcb_en ? "YES" : "NO");
850 pr_err("Split Header Feature Enable : %s\n",
851 pdata->hw_feat.sph_en ? "YES" : "NO");
852 pr_err("TCP Segmentation Offload Enable : %s\n",
853 pdata->hw_feat.tso_en ? "YES" : "NO");
854 pr_err("DMA Debug Registers Enabled : %s\n",
855 pdata->hw_feat.dma_debug_gen ? "YES" : "NO");
856 pr_err("AV Feature Enabled : %s\n",
857 pdata->hw_feat.av_sel ? "YES" : "NO");
858 pr_err("Low Power Mode Enabled : %s\n",
859 pdata->hw_feat.lp_mode_en ? "YES" : "NO");
861 switch (pdata->hw_feat.hash_tbl_sz) {
863 str = "No hash table selected";
875 pr_err("Hash Table Size : %s\n", str);
877 ("Total number of L3 or L4 Filters : %d L3/L4 Filter\n",
878 pdata->hw_feat.l3l4_filter_num);
879 pr_err("Number of MTL Receive Queues : %d\n",
880 (pdata->hw_feat.rx_q_cnt + 1));
881 pr_err("Number of MTL Transmit Queues : %d\n",
882 (pdata->hw_feat.tx_q_cnt + 1));
883 pr_err("Number of DMA Receive Channels : %d\n",
884 (pdata->hw_feat.rx_ch_cnt + 1));
885 pr_err("Number of DMA Transmit Channels : %d\n",
886 (pdata->hw_feat.tx_ch_cnt + 1));
888 switch (pdata->hw_feat.pps_out_num) {
890 str = "No PPS output";
893 str = "1 PPS output";
896 str = "2 PPS output";
899 str = "3 PPS output";
902 str = "4 PPS output";
907 pr_err("Number of PPS Outputs : %s\n", str);
909 switch (pdata->hw_feat.aux_snap_num) {
911 str = "No auxillary input";
914 str = "1 auxillary input";
917 str = "2 auxillary input";
920 str = "3 auxillary input";
923 str = "4 auxillary input";
928 pr_err("Number of Auxiliary Snapshot Inputs : %s", str);
931 pr_err("=====================================================/\n");
933 pr_debug("<--eqos_print_all_hw_features\n");
937 * \brief allcation of Rx skb's for default rx mode.
939 * \details This function is invoked by other api's for
940 * allocating the Rx skb's with default Rx mode.
942 * \param[in] pdata – pointer to private data structure.
943 * \param[in] buffer – pointer to wrapper receive buffer data structure.
944 * \param[in] gfp – the type of memory allocation.
948 * \retval 0 on success and -ve number on failure.
951 static int eqos_alloc_rx_buf(struct eqos_prv_data *pdata,
952 struct rx_swcx_desc *prx_swcx_desc, gfp_t gfp)
954 struct sk_buff *skb = prx_swcx_desc->skb;
956 pr_debug("-->eqos_alloc_rx_buf\n");
960 if (prx_swcx_desc->dma)
966 __netdev_alloc_skb_ip_align(pdata->dev, pdata->rx_buffer_len, gfp);
968 prx_swcx_desc->skb = NULL;
969 pr_err("Failed to allocate skb\n");
972 prx_swcx_desc->skb = skb;
973 prx_swcx_desc->len = pdata->rx_buffer_len;
976 prx_swcx_desc->dma = dma_map_single(&pdata->pdev->dev, skb->data,
977 pdata->rx_buffer_len,
979 if (dma_mapping_error(&pdata->pdev->dev, prx_swcx_desc->dma)) {
980 pr_err("failed to do the RX dma map\n");
985 prx_swcx_desc->mapped_as_page = Y_FALSE;
987 pr_debug("<--eqos_alloc_rx_buf\n");
993 * \brief api to configure Rx function pointer after reset.
995 * \details This function will initialize the receive function pointers
996 * which are used for allocating skb's and receiving the packets based
999 * \param[in] pdata – pointer to private data structure.
1004 static void eqos_configure_rx_fun_ptr(struct eqos_prv_data *pdata)
1006 pr_debug("-->eqos_configure_rx_fun_ptr\n");
1008 pdata->process_rx_completions = process_rx_completions;
1009 pdata->alloc_rx_buf = eqos_alloc_rx_buf;
1011 pr_debug("<--eqos_configure_rx_fun_ptr\n");
1015 * \brief api to initialize default values.
1017 * \details This function is used to initialize differnet parameters to
1018 * default values which are common parameters between Tx and Rx path.
1020 * \param[in] pdata – pointer to private data structure.
1025 static void eqos_default_common_confs(struct eqos_prv_data *pdata)
1027 pr_debug("-->eqos_default_common_confs\n");
1029 pdata->drop_tx_pktburstcnt = 1;
1030 pdata->mac_enable_count = 0;
1031 pdata->incr_incrx = EQOS_INCR_ENABLE;
1032 pdata->flow_ctrl = EQOS_FLOW_CTRL_TX_RX;
1033 pdata->oldflow_ctrl = EQOS_FLOW_CTRL_TX_RX;
1034 pdata->power_down = 0;
1035 pdata->tx_sa_ctrl_via_desc = EQOS_SA0_NONE;
1036 pdata->tx_sa_ctrl_via_reg = EQOS_SA0_NONE;
1037 pdata->hwts_tx_en = 0;
1038 pdata->hwts_rx_en = 0;
1039 pdata->l3_l4_filter = 0;
1040 pdata->l2_filtering_mode = !!pdata->hw_feat.hash_tbl_sz;
1041 pdata->tx_path_in_lpi_mode = 0;
1042 pdata->use_lpi_tx_automate = true;
1043 pdata->eee_active = 0;
1044 pdata->one_nsec_accuracy = 1;
1046 pr_debug("<--eqos_default_common_confs\n");
1050 * \brief api to initialize Tx parameters.
1052 * \details This function is used to initialize all Tx
1053 * parameters to default values on reset.
1055 * \param[in] pdata – pointer to private data structure.
1056 * \param[in] qinx – DMA channel/queue number to be initialized.
1061 static void eqos_default_tx_confs_single_q(struct eqos_prv_data *pdata,
1064 struct eqos_tx_queue *queue_data = GET_TX_QUEUE_PTR(qinx);
1065 struct tx_ring *ptx_ring =
1066 GET_TX_WRAPPER_DESC(qinx);
1068 pr_debug("-->eqos_default_tx_confs_single_q\n");
1070 queue_data->q_op_mode = q_op_mode[qinx];
1072 ptx_ring->tx_threshold_val = EQOS_TX_THRESHOLD_32;
1073 ptx_ring->tsf_on = EQOS_TSF_ENABLE;
1074 ptx_ring->osf_on = EQOS_OSF_ENABLE;
1075 ptx_ring->tx_pbl = EQOS_PBL_16;
1076 ptx_ring->tx_vlan_tag_via_reg = Y_FALSE;
1077 ptx_ring->tx_vlan_tag_ctrl = EQOS_TX_VLAN_TAG_INSERT;
1078 ptx_ring->vlan_tag_present = 0;
1079 ptx_ring->context_setup = 0;
1080 ptx_ring->default_mss = 0;
1082 pr_debug("<--eqos_default_tx_confs_single_q\n");
1086 * \brief api to initialize Rx parameters.
1088 * \details This function is used to initialize all Rx
1089 * parameters to default values on reset.
1091 * \param[in] pdata – pointer to private data structure.
1092 * \param[in] qinx – DMA queue/channel number to be initialized.
1097 static void eqos_default_rx_confs_single_q(struct eqos_prv_data *pdata,
1100 struct rx_ring *prx_ring =
1101 GET_RX_WRAPPER_DESC(qinx);
1103 pr_debug("-->eqos_default_rx_confs_single_q\n");
1105 prx_ring->rx_threshold_val = EQOS_RX_THRESHOLD_64;
1106 prx_ring->rsf_on = EQOS_RSF_DISABLE;
1107 prx_ring->rx_pbl = EQOS_PBL_16;
1108 prx_ring->rx_outer_vlan_strip = EQOS_RX_VLAN_STRIP_ALWAYS;
1109 prx_ring->rx_inner_vlan_strip = EQOS_RX_VLAN_STRIP_ALWAYS;
1111 pr_debug("<--eqos_default_rx_confs_single_q\n");
1114 static void eqos_default_tx_confs(struct eqos_prv_data *pdata)
1118 pr_debug("-->eqos_default_tx_confs\n");
1120 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++) {
1121 eqos_default_tx_confs_single_q(pdata, qinx);
1124 pr_debug("<--eqos_default_tx_confs\n");
1127 static void eqos_default_rx_confs(struct eqos_prv_data *pdata)
1131 pr_debug("-->eqos_default_rx_confs\n");
1133 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
1134 eqos_default_rx_confs_single_q(pdata, qinx);
1137 pr_debug("<--eqos_default_rx_confs\n");
1140 void free_txrx_irqs(struct eqos_prv_data *pdata)
1144 pr_debug("-->%s()\n", __func__);
1146 free_irq(pdata->common_irq, pdata);
1148 for (i = 0; i < pdata->num_chans; i++) {
1149 if (pdata->rx_irq_alloc_mask & (1 << i)) {
1150 irq_set_affinity_hint(pdata->rx_irqs[i], NULL);
1151 free_irq(pdata->rx_irqs[i], pdata);
1153 if (pdata->tx_irq_alloc_mask & (1 << i)) {
1154 irq_set_affinity_hint(pdata->tx_irqs[i], NULL);
1155 free_irq(pdata->tx_irqs[i], pdata);
1159 pr_debug("<--%s()\n", __func__);
1162 int request_txrx_irqs(struct eqos_prv_data *pdata)
1164 int ret = Y_SUCCESS;
1166 struct chan_data *pchinfo;
1167 struct platform_device *pdev = pdata->pdev;
1169 pr_debug("-->%s()\n", __func__);
1171 pdata->irq_number = pdata->dev->irq;
1173 ret = request_irq(pdata->common_irq,
1174 eqos_common_isr, IRQF_SHARED, "ether_qos.common_irq", pdata);
1175 if (ret != Y_SUCCESS) {
1176 pr_err("Unable to register %d\n", pdata->common_irq);
1178 goto err_common_irq;
1181 for (i = 0; i < pdata->num_chans; i++) {
1183 snprintf(irq_names[j], 32, "%s.rx%d", dev_name(&pdev->dev), i);
1184 ret = request_irq(pdata->rx_irqs[i],
1185 eqos_ch_isr, 0, irq_names[j++], pdata);
1187 pr_err("Unable to register %d\n", pdata->rx_irqs[i]);
1191 snprintf(irq_names[j], 32, "%s.tx%d", dev_name(&pdev->dev), i);
1192 ret = request_irq(pdata->tx_irqs[i],
1193 eqos_ch_isr, 0, irq_names[j++], pdata);
1195 pr_err("Unable to register %d\n", pdata->tx_irqs[i]);
1199 pchinfo = &pdata->chinfo[i];
1201 irq_set_affinity_hint(pdata->rx_irqs[i],
1202 cpumask_of(pchinfo->cpu));
1203 pdata->rx_irq_alloc_mask |= (1 << i);
1205 irq_set_affinity_hint(pdata->tx_irqs[i],
1206 cpumask_of(pchinfo->cpu));
1207 pdata->tx_irq_alloc_mask |= (1 << i);
1209 pr_debug("<--%s()\n", __func__);
1214 free_txrx_irqs(pdata);
1215 free_irq(pdata->common_irq, pdata);
1218 pr_debug("<--%s(): error\n", __func__);
1224 * \brief API to open a deivce for data transmission & reception.
1226 * \details Opens the interface. The interface is opned whenever
1227 * ifconfig activates it. The open method should register any
1228 * system resource it needs like I/O ports, IRQ, DMA, etc,
1229 * turn on the hardware, and perform any other setup your device requires.
1231 * \param[in] dev - pointer to net_device structure
1235 * \retval 0 on success & negative number on failure.
1238 static int eqos_open(struct net_device *dev)
1240 struct eqos_prv_data *pdata = netdev_priv(dev);
1241 int ret = Y_SUCCESS;
1242 struct desc_if_struct *desc_if = &pdata->desc_if;
1244 pr_debug("-->eqos_open\n");
1246 if (!is_valid_ether_addr(dev->dev_addr))
1247 return -EADDRNOTAVAIL;
1250 gpio_set_value(pdata->phy_reset_gpio, 0);
1251 usleep_range(10, 11);
1252 gpio_set_value(pdata->phy_reset_gpio, 1);
1254 /* PHY initialisation */
1255 ret = eqos_init_phy(dev);
1257 dev_err(&dev->dev, "%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
1261 ret = request_txrx_irqs(pdata);
1262 if (ret != Y_SUCCESS)
1265 ret = desc_if->alloc_buff_and_desc(pdata);
1267 dev_err(&pdata->pdev->dev,
1268 "Failed to allocate buffer/descriptor memory\n");
1270 goto err_out_desc_buf_alloc_failed;
1273 mutex_lock(&pdata->hw_change_lock);
1274 eqos_start_dev(pdata);
1276 pdata->hw_stopped = false;
1277 mutex_unlock(&pdata->hw_change_lock);
1279 pr_debug("<--%s()\n", __func__);
1282 err_out_desc_buf_alloc_failed:
1283 free_txrx_irqs(pdata);
1286 pr_debug("<--%s()\n", __func__);
1291 * \brief API to close a device.
1293 * \details Stops the interface. The interface is stopped when it is brought
1294 * down. This function should reverse operations performed at open time.
1296 * \param[in] dev - pointer to net_device structure
1300 * \retval 0 on success & negative number on failure.
1303 static int eqos_close(struct net_device *dev)
1305 struct eqos_prv_data *pdata = netdev_priv(dev);
1306 struct desc_if_struct *desc_if = &pdata->desc_if;
1308 pr_debug("-->%s\n", __func__);
1310 /* Put PHY in low power mode */
1311 if (pdata->phydev && pdata->phydev->drv &&
1312 pdata->phydev->drv->low_power_mode)
1313 pdata->phydev->drv->low_power_mode(pdata->phydev, true);
1315 /* Stop and disconnect the PHY */
1316 if (pdata->phydev) {
1317 phy_stop(pdata->phydev);
1318 phy_disconnect(pdata->phydev);
1319 gpio_set_value(pdata->phy_reset_gpio, 0);
1320 pdata->phydev = NULL;
1323 mutex_lock(&pdata->hw_change_lock);
1324 eqos_stop_dev(pdata);
1326 desc_if->free_buff_and_desc(pdata);
1327 free_txrx_irqs(pdata);
1329 pdata->hw_stopped = true;
1330 mutex_unlock(&pdata->hw_change_lock);
1332 /* cancel iso work */
1333 cancel_work_sync(&pdata->iso_work);
1334 /* Cancel FBE handling work */
1335 cancel_work_sync(&pdata->fbe_work);
1337 pr_debug("<--%s\n", __func__);
1342 * \brief API to configure the multicast address in device.
1344 * \details This function collects all the multicast addresse
1345 * and updates the device.
1347 * \param[in] dev - pointer to net_device structure.
1349 * \retval 0 if perfect filtering is seleted & 1 if hash
1350 * filtering is seleted.
1352 static int eqos_prepare_mc_list(struct net_device *dev)
1354 struct eqos_prv_data *pdata = netdev_priv(dev);
1355 struct hw_if_struct *hw_if = &(pdata->hw_if);
1356 u32 mc_filter[EQOS_HTR_CNT];
1357 struct netdev_hw_addr *ha = NULL;
1361 DBGPR_FILTER("-->eqos_prepare_mc_list\n");
1363 if (pdata->l2_filtering_mode) {
1365 ("select HASH FILTERING for mc addresses: mc_count = %d\n",
1366 netdev_mc_count(dev));
1368 memset(mc_filter, 0, sizeof(mc_filter));
1370 if (pdata->max_hash_table_size == 64) {
1371 netdev_for_each_mc_addr(ha, dev) {
1373 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1374 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1375 ha->addr[3], ha->addr[4], ha->addr[5]);
1376 /* The upper 6 bits of the calculated CRC are used to
1377 * index the content of the Hash Table Reg 0 and 1.
1380 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1382 /* The most significant bit determines the register
1383 * to use (Hash Table Reg X, X = 0 and 1) while the
1384 * other 5(0x1F) bits determines the bit within the
1387 mc_filter[crc32_val >> 5] |=
1388 (1 << (crc32_val & 0x1F));
1390 } else if (pdata->max_hash_table_size == 128) {
1391 netdev_for_each_mc_addr(ha, dev) {
1393 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1394 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1395 ha->addr[3], ha->addr[4], ha->addr[5]);
1396 /* The upper 7 bits of the calculated CRC are used to
1397 * index the content of the Hash Table Reg 0,1,2 and 3.
1400 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1403 pr_err("crc_le = %#x, crc_be = %#x\n",
1404 bitrev32(~crc32_le(~0, ha->addr, 6)),
1405 bitrev32(~crc32_be(~0, ha->addr, 6)));
1407 /* The most significant 2 bits determines the register
1408 * to use (Hash Table Reg X, X = 0,1,2 and 3) while the
1409 * other 5(0x1F) bits determines the bit within the
1412 mc_filter[crc32_val >> 5] |=
1413 (1 << (crc32_val & 0x1F));
1415 } else if (pdata->max_hash_table_size == 256) {
1416 netdev_for_each_mc_addr(ha, dev) {
1418 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1419 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1420 ha->addr[3], ha->addr[4], ha->addr[5]);
1421 /* The upper 8 bits of the calculated CRC are used to
1422 * index the content of the Hash Table Reg 0,1,2,3,4,
1426 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1428 /* The most significant 3 bits determines the register
1429 * to use (Hash Table Reg X, X = 0,1,2,3,4,5,6 and 7) while
1430 * the other 5(0x1F) bits determines the bit within the
1433 mc_filter[crc32_val >> 5] |=
1434 (1 << (crc32_val & 0x1F));
1438 for (i = 0; i < EQOS_HTR_CNT; i++)
1439 hw_if->update_hash_table_reg(i, mc_filter[i]);
1443 ("select PERFECT FILTERING for mc addresses, mc_count = %d, max_addr_reg_cnt = %d\n",
1444 netdev_mc_count(dev), pdata->max_addr_reg_cnt);
1446 netdev_for_each_mc_addr(ha, dev) {
1447 DBGPR_FILTER("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1449 ha->addr[0], ha->addr[1], ha->addr[2],
1450 ha->addr[3], ha->addr[4], ha->addr[5]);
1452 hw_if->update_mac_addr1_31_low_high_reg(i,
1456 hw_if->update_mac_addr32_127_low_high_reg(i,
1460 if ((pdata->ptp_cfg.use_tagged_ptp) &&
1461 (is_ptp_addr(ha->addr)))
1462 hw_if->config_ptp_channel(pdata->ptp_cfg.
1469 DBGPR_FILTER("<--eqos_prepare_mc_list\n");
1475 * \brief API to configure the unicast address in device.
1477 * \details This function collects all the unicast addresses
1478 * and updates the device.
1480 * \param[in] dev - pointer to net_device structure.
1482 * \retval 0 if perfect filtering is seleted & 1 if hash
1483 * filtering is seleted.
1485 static int eqos_prepare_uc_list(struct net_device *dev)
1487 struct eqos_prv_data *pdata = netdev_priv(dev);
1488 struct hw_if_struct *hw_if = &(pdata->hw_if);
1489 u32 uc_filter[EQOS_HTR_CNT];
1490 struct netdev_hw_addr *ha = NULL;
1494 DBGPR_FILTER("-->eqos_prepare_uc_list\n");
1496 if (pdata->l2_filtering_mode) {
1498 ("select HASH FILTERING for uc addresses: uc_count = %d\n",
1499 netdev_uc_count(dev));
1501 memset(uc_filter, 0, sizeof(uc_filter));
1503 if (pdata->max_hash_table_size == 64) {
1504 netdev_for_each_uc_addr(ha, dev) {
1506 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1507 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1508 ha->addr[3], ha->addr[4], ha->addr[5]);
1510 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1512 uc_filter[crc32_val >> 5] |=
1513 (1 << (crc32_val & 0x1F));
1515 } else if (pdata->max_hash_table_size == 128) {
1516 netdev_for_each_uc_addr(ha, dev) {
1518 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1519 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1520 ha->addr[3], ha->addr[4], ha->addr[5]);
1522 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1524 uc_filter[crc32_val >> 5] |=
1525 (1 << (crc32_val & 0x1F));
1527 } else if (pdata->max_hash_table_size == 256) {
1528 netdev_for_each_uc_addr(ha, dev) {
1530 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1531 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1532 ha->addr[3], ha->addr[4], ha->addr[5]);
1534 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1536 uc_filter[crc32_val >> 5] |=
1537 (1 << (crc32_val & 0x1F));
1541 /* configure hash value of real/default interface also */
1543 ("real/default dev_addr = %#x:%#x:%#x:%#x:%#x:%#x\n",
1544 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1545 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1547 if (pdata->max_hash_table_size == 64) {
1549 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 26);
1550 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1551 } else if (pdata->max_hash_table_size == 128) {
1553 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 25);
1554 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1556 } else if (pdata->max_hash_table_size == 256) {
1558 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 24);
1559 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1562 for (i = 0; i < EQOS_HTR_CNT; i++)
1563 hw_if->update_hash_table_reg(i, uc_filter[i]);
1567 ("select PERFECT FILTERING for uc addresses: uc_count = %d\n",
1568 netdev_uc_count(dev));
1570 netdev_for_each_uc_addr(ha, dev) {
1571 DBGPR_FILTER("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1572 i, ha->addr[0], ha->addr[1], ha->addr[2],
1573 ha->addr[3], ha->addr[4], ha->addr[5]);
1575 hw_if->update_mac_addr1_31_low_high_reg(i,
1579 hw_if->update_mac_addr32_127_low_high_reg(i,
1586 DBGPR_FILTER("<--eqos_prepare_uc_list\n");
1592 * \brief API to set the device receive mode
1594 * \details The set_multicast_list function is called when the multicast list
1595 * for the device changes and when the flags change.
1597 * \param[in] dev - pointer to net_device structure.
1601 static void eqos_set_rx_mode(struct net_device *dev)
1603 struct eqos_prv_data *pdata = netdev_priv(dev);
1604 struct hw_if_struct *hw_if = &(pdata->hw_if);
1605 unsigned char pr_mode = 0;
1606 unsigned char huc_mode = 0;
1607 unsigned char hmc_mode = 0;
1608 unsigned char pm_mode = 0;
1609 unsigned char hpf_mode = 0;
1612 DBGPR_FILTER("-->eqos_set_rx_mode\n");
1614 spin_lock(&pdata->lock);
1616 if (dev->flags & IFF_PROMISC) {
1618 ("PROMISCUOUS MODE (Accept all packets irrespective of DA)\n");
1620 #ifdef ENABLE_PERFECT_L2_FILTER
1621 } else if ((dev->flags & IFF_ALLMULTI)) {
1623 } else if ((dev->flags & IFF_ALLMULTI) ||
1624 (netdev_mc_count(dev) > (pdata->max_hash_table_size))) {
1626 DBGPR_FILTER("pass all multicast pkt\n");
1628 if (pdata->max_hash_table_size) {
1629 for (i = 0; i < EQOS_HTR_CNT; i++)
1630 hw_if->update_hash_table_reg(i, 0xffffffff);
1632 } else if (!netdev_mc_empty(dev)) {
1633 DBGPR_FILTER("pass list of multicast pkt\n");
1634 if ((netdev_mc_count(dev) > (pdata->max_addr_reg_cnt - 1)) &&
1635 (!pdata->max_hash_table_size)) {
1636 /* switch to PROMISCUOUS mode */
1639 mode = eqos_prepare_mc_list(dev);
1641 /* Hash filtering for multicast */
1644 /* Perfect filtering for multicast */
1651 /* Handle multiple unicast addresses */
1652 if ((netdev_uc_count(dev) > (pdata->max_addr_reg_cnt - 1)) &&
1653 (!pdata->max_hash_table_size)) {
1654 /* switch to PROMISCUOUS mode */
1656 } else if (!netdev_uc_empty(dev)) {
1657 mode = eqos_prepare_uc_list(dev);
1659 /* Hash filtering for unicast */
1662 /* Perfect filtering for unicast */
1668 hw_if->config_mac_pkt_filter_reg(pr_mode, huc_mode,
1669 hmc_mode, pm_mode, hpf_mode);
1671 spin_unlock(&pdata->lock);
1673 pr_debug("<--eqos_set_rx_mode\n");
1678 * \brief API to transmit the packets
1680 * \details The start_xmit function initiates the transmission of a packet.
1681 * The full packet (protocol headers and all) is contained in a socket buffer
1682 * (sk_buff) structure.
1684 * \param[in] skb - pointer to sk_buff structure
1685 * \param[in] dev - pointer to net_device structure
1692 static int eqos_start_xmit(struct sk_buff *skb, struct net_device *dev)
1694 struct eqos_prv_data *pdata = netdev_priv(dev);
1695 UINT qinx = skb_get_queue_mapping(skb);
1697 struct tx_ring *ptx_ring = GET_TX_WRAPPER_DESC(qinx);
1698 struct s_tx_pkt_features *tx_pkt_features = GET_TX_PKT_FEATURES_PTR(qinx);
1701 struct hw_if_struct *hw_if = &pdata->hw_if;
1702 struct desc_if_struct *desc_if = &pdata->desc_if;
1703 INT retval = NETDEV_TX_OK;
1706 pr_debug("-->eqos_start_xmit: skb->len = %d, qinx = %u\n", skb->len, qinx);
1708 if (ptx_ring->tx_pkt_queued > (TX_DESC_CNT >> 2))
1709 process_tx_completions(pdata->dev, pdata, qinx);
1711 spin_lock(&pdata->chinfo[qinx].chan_tx_lock);
1713 if (skb->len <= 0) {
1714 dev_kfree_skb_any(skb);
1715 pr_err("%s : Empty skb received from stack\n", dev->name);
1716 goto tx_netdev_return;
1720 memset(tx_pkt_features, 0, sizeof(struct s_tx_pkt_features));
1722 #ifdef EQOS_ENABLE_VLAN_TAG
1723 ptx_ring->vlan_tag_present = 0;
1724 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
1725 if (vlan_tx_tag_present(skb)) {
1726 USHORT vlan_tag = vlan_tx_tag_get(skb);
1728 if (skb_vlan_tag_present(skb)) {
1729 USHORT vlan_tag = skb_vlan_tag_get(skb);
1731 vlan_tag |= (skb->priority << 13);
1732 ptx_ring->vlan_tag_present = 1;
1733 if (vlan_tag != ptx_ring->vlan_tag_id ||
1734 ptx_ring->context_setup == 1) {
1735 ptx_ring->vlan_tag_id = vlan_tag;
1736 if (Y_TRUE == ptx_ring->tx_vlan_tag_via_reg) {
1737 pr_err("VLAN control info update via reg\n");
1738 hw_if->enable_vlan_reg_control(ptx_ring);
1740 hw_if->enable_vlan_desc_control(pdata);
1741 TX_PKT_FEATURES_PKT_ATTRIBUTES_VLAN_PKT_WR
1742 (tx_pkt_features->pkt_attributes, 1);
1743 TX_PKT_FEATURES_VLAN_TAG_VT_WR
1744 (tx_pkt_features->vlan_tag, vlan_tag);
1747 pdata->xstats.tx_vlan_pkt_n++;
1751 /* check for hw tstamping */
1752 if (pdata->hw_feat.tsstssel && pdata->hwts_tx_en) {
1753 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1754 /* declare that device is doing timestamping */
1755 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1756 TX_PKT_FEATURES_PKT_ATTRIBUTES_PTP_ENABLE_WR
1757 (tx_pkt_features->pkt_attributes, 1);
1759 ("Got PTP pkt to transmit [qinx = %d, cur_tx = %d]\n",
1760 qinx, ptx_ring->cur_tx);
1764 tso = desc_if->handle_tso(dev, skb);
1766 pr_err("Unable to handle TSO\n");
1767 dev_kfree_skb_any(skb);
1768 retval = NETDEV_TX_OK;
1769 goto tx_netdev_return;
1772 pdata->xstats.tx_tso_pkt_n++;
1773 TX_PKT_FEATURES_PKT_ATTRIBUTES_TSO_ENABLE_WR(tx_pkt_features->
1775 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1776 TX_PKT_FEATURES_PKT_ATTRIBUTES_CSUM_ENABLE_WR(tx_pkt_features->
1781 cnt = desc_if->tx_swcx_alloc(dev, skb);
1784 ptx_ring->queue_stopped = 1;
1785 netif_stop_subqueue(dev, qinx);
1786 pr_debug("%s(): TX ring full for queue %d\n",
1788 retval = NETDEV_TX_BUSY;
1789 goto tx_netdev_return;
1791 dev_kfree_skb_any(skb);
1792 retval = NETDEV_TX_OK;
1793 goto tx_netdev_return;
1796 dev->trans_start = jiffies;
1798 ptx_ring->free_desc_cnt -= cnt;
1799 ptx_ring->tx_pkt_queued += cnt;
1801 #ifdef EQOS_ENABLE_TX_PKT_DUMP
1802 print_pkt(skb, skb->len, 1, (ptx_ring->cur_tx - 1));
1805 #ifdef ENABLE_CHANNEL_DATA_CHECK
1806 check_channel_data(skb, qinx, 0);
1809 if ((pdata->eee_enabled) && (pdata->tx_path_in_lpi_mode) &&
1810 (!pdata->use_lpi_tx_automate))
1811 eqos_disable_eee_mode(pdata);
1813 /* fallback to software time stamping if core doesn't
1814 * support hardware time stamping */
1815 if ((pdata->hw_feat.tsstssel == 0) || (pdata->hwts_tx_en == 0))
1816 skb_tx_timestamp(skb);
1818 /* configure required descriptor fields for transmission */
1819 hw_if->pre_xmit(pdata, qinx);
1822 spin_unlock(&pdata->chinfo[qinx].chan_tx_lock);
1824 pr_debug("<--eqos_start_xmit\n");
1829 static void eqos_print_rx_tstamp_info(struct s_rx_desc *rxdesc,
1834 char *tstamp_dropped = NULL;
1835 char *tstamp_available = NULL;
1836 char *ptp_version = NULL;
1837 char *ptp_pkt_type = NULL;
1838 char *ptp_msg_type = NULL;
1840 DBGPR_PTP("-->eqos_print_rx_tstamp_info\n");
1842 /* status in rdes1 is not valid */
1843 if (!(rxdesc->rdes3 & EQOS_RDESC3_RS1V))
1846 ptp_status = rxdesc->rdes1;
1847 tstamp_dropped = ((ptp_status & 0x8000) ? "YES" : "NO");
1848 tstamp_available = ((ptp_status & 0x4000) ? "YES" : "NO");
1850 ((ptp_status & 0x2000) ? "v2 (1588-2008)" : "v1 (1588-2002)");
1852 ((ptp_status & 0x1000) ? "ptp over Eth" : "ptp over IPv4/6");
1854 pkt_type = ((ptp_status & 0xF00) >> 8);
1857 ptp_msg_type = "NO PTP msg received";
1860 ptp_msg_type = "SYNC";
1863 ptp_msg_type = "Follow_Up";
1866 ptp_msg_type = "Delay_Req";
1869 ptp_msg_type = "Delay_Resp";
1872 ptp_msg_type = "Pdelay_Req";
1875 ptp_msg_type = "Pdelay_Resp";
1878 ptp_msg_type = "Pdelay_Resp_Follow_up";
1881 ptp_msg_type = "Announce";
1884 ptp_msg_type = "Management";
1887 ptp_msg_type = "Signaling";
1893 ptp_msg_type = "Reserved";
1896 ptp_msg_type = "PTP pkr with Reserved Msg Type";
1900 DBGPR_PTP("Rx timestamp detail for queue %d\n"
1901 "tstamp dropped = %s\n"
1902 "tstamp available = %s\n"
1903 "PTP version = %s\n"
1904 "PTP Pkt Type = %s\n"
1905 "PTP Msg Type = %s\n",
1906 qinx, tstamp_dropped, tstamp_available,
1907 ptp_version, ptp_pkt_type, ptp_msg_type);
1909 DBGPR_PTP("<--eqos_print_rx_tstamp_info\n");
1913 * \brief API to get rx time stamp value.
1915 * \details This function will read received packet's timestamp from
1916 * the descriptor and pass it to stack and also perform some sanity checks.
1918 * \param[in] pdata - pointer to private data structure.
1919 * \param[in] skb - pointer to sk_buff structure.
1920 * \param[in] prx_ring - pointer to wrapper receive descriptor structure.
1921 * \param[in] qinx - Queue/Channel number.
1925 * \retval 0 if no context descriptor
1926 * \retval 1 if timestamp is valid
1927 * \retval 2 if time stamp is corrupted
1930 static unsigned char eqos_get_rx_hwtstamp(struct eqos_prv_data *pdata,
1931 struct sk_buff *skb,
1933 *prx_ring, unsigned int qinx)
1935 struct s_rx_desc *prx_desc =
1936 GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
1937 struct s_rx_context_desc *rx_context_desc = NULL;
1938 struct hw_if_struct *hw_if = &(pdata->hw_if);
1939 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1943 DBGPR_PTP("-->eqos_get_rx_hwtstamp\n");
1945 eqos_print_rx_tstamp_info(prx_desc, qinx);
1947 prx_ring->dirty_rx++;
1948 INCR_RX_DESC_INDEX(prx_ring->cur_rx, 1);
1949 rx_context_desc = GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
1951 DBGPR_PTP("\nRX_CONTEX_DESC[%d %4p %d RECEIVED FROM DEVICE]"
1952 " = %#x:%#x:%#x:%#x",
1953 qinx, rx_context_desc, prx_ring->cur_rx,
1954 rx_context_desc->rdes0, rx_context_desc->rdes1,
1955 rx_context_desc->rdes2, rx_context_desc->rdes3);
1957 /* check rx tsatmp */
1958 for (retry = 0; retry < 10; retry++) {
1959 ret = hw_if->get_rx_tstamp_status(rx_context_desc);
1961 /* time stamp is valid */
1963 } else if (ret == 0) {
1964 pr_err("Device has not yet updated the context "
1965 "desc to hold Rx time stamp(retry = %d)\n",
1969 ("Error: Rx time stamp is corrupted(retry = %d)\n",
1976 pr_err("Device has not yet updated the context "
1977 "desc to hold Rx time stamp(retry = %d)\n", retry);
1978 prx_ring->dirty_rx--;
1979 DECR_RX_DESC_INDEX(prx_ring->cur_rx);
1983 pdata->xstats.rx_timestamp_captured_n++;
1984 /* get valid tstamp */
1985 ns = hw_if->get_rx_tstamp(rx_context_desc);
1987 shhwtstamp = skb_hwtstamps(skb);
1988 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1989 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1991 DBGPR_PTP("<--eqos_get_rx_hwtstamp\n");
1997 * \brief API to get tx time stamp value.
1999 * \details This function will read timestamp from the descriptor
2000 * and pass it to stack and also perform some sanity checks.
2002 * \param[in] pdata - pointer to private data structure.
2003 * \param[in] txdesc - pointer to transmit descriptor structure.
2004 * \param[in] skb - pointer to sk_buff structure.
2008 * \retval 1 if time stamp is taken
2009 * \retval 0 if time stamp in not taken/valid
2012 static unsigned int eqos_get_tx_hwtstamp(struct eqos_prv_data *pdata,
2013 struct s_tx_desc *txdesc,
2014 struct sk_buff *skb)
2016 struct hw_if_struct *hw_if = &(pdata->hw_if);
2017 struct skb_shared_hwtstamps shhwtstamp;
2020 DBGPR_PTP("-->eqos_get_tx_hwtstamp\n");
2022 if (hw_if->drop_tx_status_enabled() == 0) {
2023 /* check tx tstamp status */
2024 if (!hw_if->get_tx_tstamp_status(txdesc)) {
2026 ("tx timestamp is not captured for this packet\n");
2030 /* get the valid tstamp */
2031 ns = hw_if->get_tx_tstamp(txdesc);
2033 /* drop tx status mode is enabled, hence read time
2034 * stamp from register instead of descriptor */
2036 /* check tx tstamp status */
2037 if (!hw_if->get_tx_tstamp_status_via_reg()) {
2039 ("tx timestamp is not captured for this packet\n");
2043 /* get the valid tstamp */
2044 ns = hw_if->get_tx_tstamp_via_reg();
2047 pdata->xstats.tx_timestamp_captured_n++;
2048 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2049 shhwtstamp.hwtstamp = ns_to_ktime(ns);
2050 /* pass tstamp to stack */
2051 skb_tstamp_tx(skb, &shhwtstamp);
2053 DBGPR_PTP("<--eqos_get_tx_hwtstamp\n");
2059 * \brief API to update the tx status.
2061 * \details This function is called in isr handler once after getting
2062 * transmit complete interrupt to update the transmited packet status
2063 * and it does some house keeping work like updating the
2064 * private data structure variables.
2066 * \param[in] dev - pointer to net_device structure
2067 * \param[in] pdata - pointer to private data structure.
2072 static void process_tx_completions(struct net_device *dev,
2073 struct eqos_prv_data *pdata, UINT qinx)
2075 struct tx_ring *ptx_ring =
2076 GET_TX_WRAPPER_DESC(qinx);
2077 struct s_tx_desc *ptx_desc = NULL;
2078 struct tx_swcx_desc *ptx_swcx_desc = NULL;
2079 struct hw_if_struct *hw_if = &(pdata->hw_if);
2080 struct desc_if_struct *desc_if = &(pdata->desc_if);
2081 int err_incremented;
2082 unsigned int tstamp_taken = 0;
2084 pr_debug("-->%s(): ptx_ring->tx_pkt_queued = %d"
2085 " dirty_tx = %d, qinx = %u\n",
2087 ptx_ring->tx_pkt_queued, ptx_ring->dirty_tx, qinx);
2089 spin_lock(&pdata->chinfo[qinx].chan_tx_lock);
2091 pdata->xstats.tx_clean_n[qinx]++;
2092 while (ptx_ring->tx_pkt_queued > 0) {
2093 ptx_desc = GET_TX_DESC_PTR(qinx, ptx_ring->dirty_tx);
2094 ptx_swcx_desc = GET_TX_BUF_PTR(qinx, ptx_ring->dirty_tx);
2097 if (!hw_if->tx_complete(ptx_desc))
2100 #ifdef EQOS_ENABLE_TX_DESC_DUMP
2101 dump_tx_desc(pdata, ptx_ring->dirty_tx, ptx_ring->dirty_tx,
2105 /* update the tx error if any by looking at last segment
2106 * for NORMAL descriptors
2108 if ((hw_if->get_tx_desc_ls(ptx_desc)) &&
2109 !(hw_if->get_tx_desc_ctxt(ptx_desc))) {
2110 if (ptx_swcx_desc->skb == NULL) {
2111 dev_err(&pdata->pdev->dev,
2112 "NULL SKB in process_tx_completions()\n");
2114 /* check whether skb support hw tstamp */
2115 if ((pdata->hw_feat.tsstssel) &&
2116 (skb_shinfo(ptx_swcx_desc->skb)->
2117 tx_flags & SKBTX_IN_PROGRESS)) {
2119 eqos_get_tx_hwtstamp(pdata, ptx_desc,
2120 ptx_swcx_desc->skb);
2123 ("passed tx timestamp to stack[qinx = %d, dirty_tx = %d]\n",
2124 qinx, ptx_ring->dirty_tx);
2128 err_incremented = 0;
2129 if (hw_if->tx_window_error) {
2130 if (hw_if->tx_window_error(ptx_desc)) {
2131 err_incremented = 1;
2132 dev->stats.tx_window_errors++;
2135 if (hw_if->tx_aborted_error) {
2136 if (hw_if->tx_aborted_error(ptx_desc)) {
2137 err_incremented = 1;
2138 dev->stats.tx_aborted_errors++;
2139 if (hw_if->tx_handle_aborted_error)
2141 tx_handle_aborted_error
2145 if (hw_if->tx_carrier_lost_error) {
2146 if (hw_if->tx_carrier_lost_error(ptx_desc)) {
2147 err_incremented = 1;
2148 dev->stats.tx_carrier_errors++;
2151 if (hw_if->tx_fifo_underrun) {
2152 if (hw_if->tx_fifo_underrun(ptx_desc)) {
2153 err_incremented = 1;
2154 dev->stats.tx_fifo_errors++;
2155 if (hw_if->tx_update_fifo_threshold)
2157 tx_update_fifo_threshold
2161 if (hw_if->tx_get_collision_count)
2162 dev->stats.collisions +=
2163 hw_if->tx_get_collision_count(ptx_desc);
2165 if (err_incremented == 1)
2166 dev->stats.tx_errors++;
2168 pdata->xstats.q_tx_pkt_n[qinx]++;
2169 pdata->xstats.tx_pkt_n++;
2170 dev->stats.tx_packets++;
2172 dev->stats.tx_bytes += ptx_swcx_desc->len;
2173 desc_if->tx_swcx_free(pdata, ptx_swcx_desc);
2175 /* reset the descriptor so that driver/host can reuse it */
2176 hw_if->tx_desc_reset(ptx_ring->dirty_tx, pdata, qinx);
2178 INCR_TX_DESC_INDEX(ptx_ring->dirty_tx, 1);
2179 ptx_ring->free_desc_cnt++;
2180 ptx_ring->tx_pkt_queued--;
2183 if ((ptx_ring->queue_stopped == 1) && (ptx_ring->free_desc_cnt > 0)) {
2184 ptx_ring->queue_stopped = 0;
2185 netif_wake_subqueue(dev, qinx);
2188 if ((pdata->eee_enabled) && (!pdata->tx_path_in_lpi_mode) &&
2189 (!pdata->use_lpi_tx_automate)) {
2190 eqos_enable_eee_mode(pdata);
2191 mod_timer(&pdata->eee_ctrl_timer,
2192 EQOS_LPI_TIMER(EQOS_DEFAULT_LPI_TIMER));
2195 spin_unlock(&pdata->chinfo[qinx].chan_tx_lock);
2197 pr_debug("<--%s(): ptx_ring->tx_pkt_queued = %d\n",
2198 __func__, ptx_ring->tx_pkt_queued);
2201 #ifdef YDEBUG_FILTER
2202 static void eqos_check_rx_filter_status(struct s_rx_desc *prx_desc)
2204 u32 rdes2 = prx_desc->rdes2;
2205 u32 rdes3 = prx_desc->rdes3;
2207 /* Receive Status rdes2 Valid ? */
2208 if ((rdes3 & 0x8000000) == 0x8000000) {
2209 if ((rdes2 & 0x400) == 0x400)
2210 pr_err("ARP pkt received\n");
2211 if ((rdes2 & 0x800) == 0x800)
2212 pr_err("ARP reply not generated\n");
2213 if ((rdes2 & 0x8000) == 0x8000)
2214 pr_err("VLAN pkt passed VLAN filter\n");
2215 if ((rdes2 & 0x10000) == 0x10000)
2216 pr_err("SA Address filter fail\n");
2217 if ((rdes2 & 0x20000) == 0x20000)
2218 pr_err("DA Addess filter fail\n");
2219 if ((rdes2 & 0x40000) == 0x40000)
2221 ("pkt passed the HASH filter in MAC and HASH value = %#x\n",
2222 (rdes2 >> 19) & 0xff);
2223 if ((rdes2 & 0x8000000) == 0x8000000)
2224 pr_err("L3 filter(%d) Match\n", ((rdes2 >> 29) & 0x7));
2225 if ((rdes2 & 0x10000000) == 0x10000000)
2226 pr_err("L4 filter(%d) Match\n", ((rdes2 >> 29) & 0x7));
2229 #endif /* YDEBUG_FILTER */
2231 /* pass skb to upper layer */
2232 static void eqos_receive_skb(struct eqos_prv_data *pdata,
2233 struct net_device *dev, struct sk_buff *skb,
2236 struct eqos_rx_queue *rx_queue = GET_RX_QUEUE_PTR(qinx);
2238 skb_record_rx_queue(skb, qinx);
2240 skb->protocol = eth_type_trans(skb, dev);
2242 if (dev->features & NETIF_F_GRO) {
2243 napi_gro_receive(&rx_queue->napi, skb);
2244 } else if ((dev->features & NETIF_F_LRO) &&
2245 (skb->ip_summed == CHECKSUM_UNNECESSARY)) {
2246 lro_receive_skb(&rx_queue->lro_mgr, skb, (void *)pdata);
2247 rx_queue->lro_flush_needed = 1;
2249 netif_receive_skb(skb);
2253 /* Receive Checksum Offload configuration */
2254 static inline void eqos_config_rx_csum(struct eqos_prv_data *pdata,
2255 struct sk_buff *skb,
2256 struct s_rx_desc *prx_desc)
2260 skb->ip_summed = CHECKSUM_NONE;
2262 if ((pdata->dev_state & NETIF_F_RXCSUM) == NETIF_F_RXCSUM) {
2263 /* Receive Status rdes1 Valid ? */
2264 if ((prx_desc->rdes3 & EQOS_RDESC3_RS1V)) {
2265 /* check(rdes1.IPCE bit) whether device has done csum correctly or not */
2266 RX_NORMAL_DESC_RDES1_RD(prx_desc->rdes1, rdes1);
2267 if ((rdes1 & 0xC8) == 0x0)
2268 skb->ip_summed = CHECKSUM_UNNECESSARY; /* csum done by device */
2273 static inline void eqos_get_rx_vlan(struct eqos_prv_data *pdata,
2274 struct sk_buff *skb,
2275 struct s_rx_desc *prx_desc)
2277 USHORT vlan_tag = 0;
2279 if ((pdata->dev_state & NETIF_F_HW_VLAN_CTAG_RX) ==
2280 NETIF_F_HW_VLAN_CTAG_RX) {
2281 /* Receive Status rdes0 Valid ? */
2282 if ((prx_desc->rdes3 & EQOS_RDESC3_RS0V)) {
2283 /* device received frame with VLAN Tag or
2284 * double VLAN Tag ? */
2285 if (((prx_desc->rdes3 & EQOS_RDESC3_LT) ==
2287 ((prx_desc->rdes3 & EQOS_RDESC3_LT) ==
2289 vlan_tag = prx_desc->rdes0 & 0xffff;
2290 /* insert VLAN tag into skb */
2291 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2293 pdata->xstats.rx_vlan_pkt_n++;
2299 /* This api check for payload type and returns
2300 * 1 if payload load is TCP else returns 0;
2302 static int eqos_check_for_tcp_payload(struct s_rx_desc *rxdesc)
2307 if (rxdesc->rdes3 & EQOS_RDESC3_RS1V) {
2308 pt_type = rxdesc->rdes1 & EQOS_RDESC1_PT;
2309 if (pt_type == EQOS_RDESC1_PT_TCP)
2317 * \brief API to pass the Rx packets to stack if default mode
2320 * \details This function is invoked by main NAPI function in default
2321 * Rx mode. This function checks the
2322 * device descriptor for the packets and passes it to stack if any packtes
2323 * are received by device.
2325 * \param[in] pdata - pointer to private data structure.
2326 * \param[in] quota - maximum no. of packets that we are allowed to pass
2327 * to into the kernel.
2328 * \param[in] qinx - DMA channel/queue no. to be checked for packet.
2332 * \retval number of packets received.
2335 static int process_rx_completions(struct eqos_prv_data *pdata,
2336 int quota, UINT qinx)
2338 struct rx_ring *prx_ring =
2339 GET_RX_WRAPPER_DESC(qinx);
2340 struct net_device *dev = pdata->dev;
2341 struct desc_if_struct *desc_if = &pdata->desc_if;
2342 struct hw_if_struct *hw_if = &(pdata->hw_if);
2343 struct sk_buff *skb = NULL;
2345 struct rx_swcx_desc *prx_swcx_desc = NULL;
2346 struct s_rx_desc *prx_desc = NULL;
2348 UINT err_bits = EQOS_RDESC3_ES_BITS;
2349 u32 sw_cur_rx_desc_addr = 0;
2350 u32 hw_cur_rx_desc_addr = 0;
2354 pr_debug("-->%s(): qinx = %u, quota = %d\n", __func__, qinx, quota);
2356 hw_cur_rx_desc_addr = prx_ring->hw_last_rx_desc_addr;
2357 while (received < quota) {
2358 prx_swcx_desc = GET_RX_BUF_PTR(qinx, prx_ring->cur_rx);
2359 prx_desc = GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
2361 sw_cur_rx_desc_addr =
2362 GET_RX_DESC_DMA_ADDR(qinx, prx_ring->cur_rx);
2364 /* check for data availability */
2365 if (!(prx_desc->rdes3 & EQOS_RDESC3_OWN) &&
2366 prx_swcx_desc->skb) {
2367 if (hw_cur_rx_desc_addr == sw_cur_rx_desc_addr) {
2369 prx_ring->hw_last_rx_desc_addr);
2370 if (prx_ring->hw_last_rx_desc_addr ==
2371 hw_cur_rx_desc_addr)
2373 hw_cur_rx_desc_addr =
2374 prx_ring->hw_last_rx_desc_addr;
2376 #ifdef EQOS_ENABLE_RX_DESC_DUMP
2377 dump_rx_desc(qinx, prx_desc, prx_ring->cur_rx);
2379 /* assign it to new skb */
2380 skb = prx_swcx_desc->skb;
2381 prx_swcx_desc->skb = NULL;
2383 dma_unmap_single(&pdata->pdev->dev, prx_swcx_desc->dma,
2384 pdata->rx_buffer_len,
2386 prx_swcx_desc->dma = 0;
2388 /* get the packet length */
2389 pkt_len = (prx_desc->rdes3 & EQOS_RDESC3_PL);
2391 #ifdef EQOS_ENABLE_RX_PKT_DUMP
2392 print_pkt(skb, pkt_len, 0, (prx_ring->cur_rx));
2395 #ifdef ENABLE_CHANNEL_DATA_CHECK
2396 check_channel_data(skb, qinx, 1);
2399 /* check for bad/oversized packet,
2400 * error is valid only for last descriptor
2401 * (OWN + LD bit set).
2403 if (tegra_platform_is_unit_fpga())
2404 err_bits = EQOS_RDESC3_CRC | EQOS_RDESC3_OF;
2406 if (!(prx_desc->rdes3 & err_bits) &&
2407 (prx_desc->rdes3 & EQOS_RDESC3_LD)) {
2408 /* pkt_len = pkt_len - 4; *//* CRC stripping */
2410 /* code added for copybreak, this should improve
2411 * performance for small pkts with large amount
2412 * of reassembly being done in the stack
2414 if (pkt_len < EQOS_COPYBREAK_DEFAULT) {
2415 struct sk_buff *new_skb =
2416 netdev_alloc_skb_ip_align(dev,
2419 skb_copy_to_linear_data_offset
2420 (new_skb, -NET_IP_ALIGN,
2421 (skb->data - NET_IP_ALIGN),
2422 (pkt_len + NET_IP_ALIGN));
2423 /* recycle actual desc skb */
2424 prx_swcx_desc->skb = skb;
2427 /* just continue the old skb */
2430 skb_put(skb, pkt_len);
2432 eqos_config_rx_csum(pdata, skb, prx_desc);
2434 #ifdef EQOS_ENABLE_VLAN_TAG
2435 eqos_get_rx_vlan(pdata, skb, prx_desc);
2438 #ifdef YDEBUG_FILTER
2439 eqos_check_rx_filter_status(prx_desc);
2442 if (pdata->hw_feat.tsstssel &&
2443 pdata->hwts_rx_en &&
2445 rx_tstamp_available(prx_desc)) {
2446 /* get rx tstamp if available */
2447 ret = eqos_get_rx_hwtstamp(pdata, skb,
2451 /* device has not yet updated
2452 * the CONTEXT desc to hold the
2453 * time stamp, hence delay the
2456 prx_swcx_desc->skb = skb;
2457 prx_swcx_desc->dma =
2458 dma_map_single(&pdata->
2461 pdata->rx_buffer_len,
2464 if (dma_mapping_error
2466 prx_swcx_desc->dma))
2468 ("failed to do the RX dma map\n");
2469 goto rx_tstmp_failed;
2473 if (!(dev->features & NETIF_F_GRO) &&
2474 (dev->features & NETIF_F_LRO)) {
2476 eqos_check_for_tcp_payload
2480 dev->last_rx = jiffies;
2481 /* update the statistics */
2482 dev->stats.rx_packets++;
2483 dev->stats.rx_bytes += skb->len;
2484 eqos_receive_skb(pdata, dev, skb, qinx);
2487 dump_rx_desc(qinx, prx_desc,
2489 if (!(prx_desc->rdes3 & EQOS_RDESC3_LD))
2490 pr_debug("Received oversized pkt,"
2491 "spanned across multiple desc\n");
2494 prx_swcx_desc->skb = skb;
2495 dev->stats.rx_errors++;
2496 eqos_update_rx_errors(dev,
2500 prx_ring->dirty_rx++;
2501 if (prx_ring->dirty_rx >=
2502 prx_ring->skb_realloc_threshold)
2503 desc_if->realloc_skb(pdata, qinx);
2505 INCR_RX_DESC_INDEX(prx_ring->cur_rx, 1);
2507 /* no more data to read */
2514 if (prx_ring->dirty_rx)
2515 desc_if->realloc_skb(pdata, qinx);
2517 pr_debug("<--%s(): received = %d, qinx=%d\n", __func__, received, qinx);
2523 * \brief API to update the rx status.
2525 * \details This function is called in poll function to update the
2526 * status of received packets.
2528 * \param[in] dev - pointer to net_device structure.
2529 * \param[in] rx_status - value of received packet status.
2534 void eqos_update_rx_errors(struct net_device *dev, unsigned int rx_status)
2536 pr_debug("-->eqos_update_rx_errors\n");
2538 /* received pkt with crc error */
2539 if ((rx_status & 0x1000000))
2540 dev->stats.rx_crc_errors++;
2542 /* received frame alignment */
2543 if ((rx_status & 0x100000))
2544 dev->stats.rx_frame_errors++;
2546 /* receiver fifo overrun */
2547 if ((rx_status & 0x200000))
2548 dev->stats.rx_fifo_errors++;
2550 pr_debug("<--eqos_update_rx_errors\n");
2553 static int handle_txrx_completions(struct eqos_prv_data *pdata, int qinx)
2555 struct eqos_rx_queue *rx_queue;
2557 int budget = pdata->dt_cfg.chan_napi_quota[qinx];
2559 pr_debug("-->%s(): chan=%d\n", __func__, qinx);
2561 rx_queue = GET_RX_QUEUE_PTR(qinx);
2563 /* check for tx descriptor status */
2564 process_tx_completions(pdata->dev, pdata, qinx);
2565 rx_queue->lro_flush_needed = 0;
2567 received = pdata->process_rx_completions(pdata, budget, qinx);
2569 pdata->xstats.rx_pkt_n += received;
2570 pdata->xstats.q_rx_pkt_n[qinx] += received;
2572 if (rx_queue->lro_flush_needed)
2573 lro_flush_all(&rx_queue->lro_mgr);
2575 pr_debug("<--%s():\n", __func__);
2580 static void do_txrx_post_processing(struct eqos_prv_data *pdata,
2581 struct napi_struct *napi,
2582 int received, int budget)
2584 struct eqos_rx_queue *rx_queue;
2586 struct hw_if_struct *hw_if = &(pdata->hw_if);
2588 pr_debug("-->%s():\n", __func__);
2590 /* If we processed all pkts, we are done;
2591 * tell the kernel & re-enable interrupt
2593 if (received < budget) {
2594 rx_queue = container_of(napi, struct eqos_rx_queue, napi);
2595 qinx = rx_queue->chan_num;
2596 hw_if = &pdata->hw_if;
2597 if (pdata->dev->features & NETIF_F_GRO) {
2598 /* to turn off polling */
2599 napi_complete(napi);
2601 /* Enable RX interrupt */
2602 hw_if->enable_chan_interrupts(qinx, pdata);
2605 spin_lock(&pdata->lock);
2606 __napi_complete(napi);
2608 /* Enable RX interrupt */
2609 hw_if->enable_chan_interrupts(qinx, pdata);
2611 spin_unlock(&pdata->lock);
2614 pr_debug("<--%s():\n", __func__);
2618 int eqos_napi_mq(struct napi_struct *napi, int budget)
2620 struct eqos_rx_queue *rx_queue =
2621 container_of(napi, struct eqos_rx_queue, napi);
2622 struct eqos_prv_data *pdata = rx_queue->pdata;
2624 int qinx = rx_queue->chan_num;
2627 pr_debug("-->%s(): budget = %d\n", __func__, budget);
2629 pdata->xstats.napi_poll_n++;
2630 received = handle_txrx_completions(pdata, qinx);
2632 do_txrx_post_processing(pdata, napi, received,
2633 pdata->dt_cfg.chan_napi_quota[qinx]);
2635 pr_debug("<--%s()\n", __func__);
2641 * \brief API to return the device/interface status.
2643 * \details The get_stats function is called whenever an application needs to
2644 * get statistics for the interface. For example, this happend when ifconfig
2645 * or netstat -i is run.
2647 * \param[in] dev - pointer to net_device structure.
2649 * \return net_device_stats structure
2651 * \retval net_device_stats - returns pointer to net_device_stats structure.
2654 static struct net_device_stats *eqos_get_stats(struct net_device *dev)
2662 * \brief User defined parameter setting API
2664 * \details This function is invoked by kernel to update the device
2665 * configuration to new features. This function supports enabling and
2666 * disabling of TX and RX csum features.
2668 * \param[in] dev – pointer to net device structure.
2669 * \param[in] features – device feature to be enabled/disabled.
2676 static int eqos_set_features(struct net_device *dev, netdev_features_t features)
2678 struct eqos_prv_data *pdata = netdev_priv(dev);
2679 struct hw_if_struct *hw_if = &(pdata->hw_if);
2680 UINT dev_rxcsum_enable;
2681 #ifdef EQOS_ENABLE_VLAN_TAG
2682 UINT dev_rxvlan_enable, dev_txvlan_enable;
2684 pr_debug("-->eqos_set_features\n");
2686 if (pdata->hw_feat.rx_coe_sel) {
2687 dev_rxcsum_enable = !!(pdata->dev_state & NETIF_F_RXCSUM);
2689 if (((features & NETIF_F_RXCSUM) == NETIF_F_RXCSUM)
2690 && !dev_rxcsum_enable) {
2691 hw_if->enable_rx_csum();
2692 pdata->dev_state |= NETIF_F_RXCSUM;
2693 pr_err("State change - rxcsum enable\n");
2694 } else if (((features & NETIF_F_RXCSUM) == 0)
2695 && dev_rxcsum_enable) {
2696 hw_if->disable_rx_csum();
2697 pdata->dev_state &= ~NETIF_F_RXCSUM;
2698 pr_err("State change - rxcsum disable\n");
2701 #ifdef EQOS_ENABLE_VLAN_TAG
2702 dev_rxvlan_enable = !!(pdata->dev_state & NETIF_F_HW_VLAN_CTAG_RX);
2703 if (((features & NETIF_F_HW_VLAN_CTAG_RX) == NETIF_F_HW_VLAN_CTAG_RX)
2704 && !dev_rxvlan_enable) {
2705 pdata->dev_state |= NETIF_F_HW_VLAN_CTAG_RX;
2707 config_rx_outer_vlan_stripping(EQOS_RX_VLAN_STRIP_ALWAYS);
2708 pr_err("State change - rxvlan enable\n");
2709 } else if (((features & NETIF_F_HW_VLAN_CTAG_RX) == 0) &&
2710 dev_rxvlan_enable) {
2711 pdata->dev_state &= ~NETIF_F_HW_VLAN_CTAG_RX;
2712 hw_if->config_rx_outer_vlan_stripping(EQOS_RX_NO_VLAN_STRIP);
2713 pr_err("State change - rxvlan disable\n");
2716 dev_txvlan_enable = !!(pdata->dev_state & NETIF_F_HW_VLAN_CTAG_TX);
2717 if (((features & NETIF_F_HW_VLAN_CTAG_TX) == NETIF_F_HW_VLAN_CTAG_TX)
2718 && !dev_txvlan_enable) {
2719 pdata->dev_state |= NETIF_F_HW_VLAN_CTAG_TX;
2720 pr_err("State change - txvlan enable\n");
2721 } else if (((features & NETIF_F_HW_VLAN_CTAG_TX) == 0) &&
2722 dev_txvlan_enable) {
2723 pdata->dev_state &= ~NETIF_F_HW_VLAN_CTAG_TX;
2724 pr_err("State change - txvlan disable\n");
2726 #endif /* EQOS_ENABLE_VLAN_TAG */
2728 pr_debug("<--eqos_set_features\n");
2734 * \details This function is invoked by ioctl function when user issues
2735 * an ioctl command to enable/disable L3/L4 filtering.
2737 * \param[in] dev – pointer to net device structure.
2738 * \param[in] flags – flag to indicate whether L3/L4 filtering to be
2743 * \retval zero on success and -ve number on failure.
2745 static int eqos_config_l3_l4_filtering(struct net_device *dev,
2748 struct eqos_prv_data *pdata = netdev_priv(dev);
2749 struct hw_if_struct *hw_if = &(pdata->hw_if);
2752 DBGPR_FILTER("-->eqos_config_l3_l4_filtering\n");
2754 if (flags && pdata->l3_l4_filter) {
2755 pr_err("L3/L4 filtering is already enabled\n");
2759 if (!flags && !pdata->l3_l4_filter) {
2760 pr_err("L3/L4 filtering is already disabled\n");
2764 pdata->l3_l4_filter = !!flags;
2765 hw_if->config_l3_l4_filter_enable(pdata->l3_l4_filter);
2767 DBGPR_FILTER("Succesfully %s L3/L4 filtering\n",
2768 (flags ? "ENABLED" : "DISABLED"));
2770 DBGPR_FILTER("<--eqos_config_l3_l4_filtering\n");
2776 * \details This function is invoked by ioctl function when user issues an
2777 * ioctl command to configure L3(IPv4) filtering. This function does following,
2778 * - enable/disable IPv4 filtering.
2779 * - select source/destination address matching.
2780 * - select perfect/inverse matching.
2781 * - Update the IPv4 address into MAC register.
2783 * \param[in] dev – pointer to net device structure.
2784 * \param[in] req – pointer to IOCTL specific structure.
2788 * \retval zero on success and -ve number on failure.
2790 static int eqos_config_ip4_filters(struct net_device *dev,
2791 struct ifr_data_struct *req)
2793 struct eqos_prv_data *pdata = netdev_priv(dev);
2794 struct hw_if_struct *hw_if = &(pdata->hw_if);
2795 struct eqos_l3_l4_filter *u_l3_filter =
2796 (struct eqos_l3_l4_filter *)req->ptr;
2797 struct eqos_l3_l4_filter l_l3_filter;
2800 DBGPR_FILTER("-->eqos_config_ip4_filters\n");
2802 if (pdata->hw_feat.l3l4_filter_num == 0)
2803 return EQOS_NO_HW_SUPPORT;
2805 if (copy_from_user(&l_l3_filter, u_l3_filter,
2806 sizeof(struct eqos_l3_l4_filter)))
2809 if ((l_l3_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2810 pr_err("%d filter is not supported in the HW\n",
2811 l_l3_filter.filter_no);
2812 return EQOS_NO_HW_SUPPORT;
2815 if (!pdata->l3_l4_filter) {
2816 hw_if->config_l3_l4_filter_enable(1);
2817 pdata->l3_l4_filter = 1;
2820 /* configure the L3 filters */
2821 hw_if->config_l3_filters(l_l3_filter.filter_no,
2822 l_l3_filter.filter_enb_dis, 0,
2823 l_l3_filter.src_dst_addr_match,
2824 l_l3_filter.perfect_inverse_match);
2826 if (!l_l3_filter.src_dst_addr_match)
2827 hw_if->update_ip4_addr0(l_l3_filter.filter_no,
2828 l_l3_filter.ip4_addr);
2830 hw_if->update_ip4_addr1(l_l3_filter.filter_no,
2831 l_l3_filter.ip4_addr);
2834 ("Successfully %s IPv4 %s %s addressing filtering on %d filter\n",
2835 (l_l3_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2836 (l_l3_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2837 (l_l3_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2838 l_l3_filter.filter_no);
2840 DBGPR_FILTER("<--eqos_config_ip4_filters\n");
2846 * \details This function is invoked by ioctl function when user issues an
2847 * ioctl command to configure L3(IPv6) filtering. This function does following,
2848 * - enable/disable IPv6 filtering.
2849 * - select source/destination address matching.
2850 * - select perfect/inverse matching.
2851 * - Update the IPv6 address into MAC register.
2853 * \param[in] dev – pointer to net device structure.
2854 * \param[in] req – pointer to IOCTL specific structure.
2858 * \retval zero on success and -ve number on failure.
2860 static int eqos_config_ip6_filters(struct net_device *dev,
2861 struct ifr_data_struct *req)
2863 struct eqos_prv_data *pdata = netdev_priv(dev);
2864 struct hw_if_struct *hw_if = &(pdata->hw_if);
2865 struct eqos_l3_l4_filter *u_l3_filter =
2866 (struct eqos_l3_l4_filter *)req->ptr;
2867 struct eqos_l3_l4_filter l_l3_filter;
2870 DBGPR_FILTER("-->eqos_config_ip6_filters\n");
2872 if (pdata->hw_feat.l3l4_filter_num == 0)
2873 return EQOS_NO_HW_SUPPORT;
2875 if (copy_from_user(&l_l3_filter, u_l3_filter,
2876 sizeof(struct eqos_l3_l4_filter)))
2879 if ((l_l3_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2880 pr_err("%d filter is not supported in the HW\n",
2881 l_l3_filter.filter_no);
2882 return EQOS_NO_HW_SUPPORT;
2885 if (!pdata->l3_l4_filter) {
2886 hw_if->config_l3_l4_filter_enable(1);
2887 pdata->l3_l4_filter = 1;
2890 /* configure the L3 filters */
2891 hw_if->config_l3_filters(l_l3_filter.filter_no,
2892 l_l3_filter.filter_enb_dis, 1,
2893 l_l3_filter.src_dst_addr_match,
2894 l_l3_filter.perfect_inverse_match);
2896 hw_if->update_ip6_addr(l_l3_filter.filter_no, l_l3_filter.ip6_addr);
2899 ("Successfully %s IPv6 %s %s addressing filtering on %d filter\n",
2900 (l_l3_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2901 (l_l3_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2902 (l_l3_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2903 l_l3_filter.filter_no);
2905 DBGPR_FILTER("<--eqos_config_ip6_filters\n");
2911 * \details This function is invoked by ioctl function when user issues an
2912 * ioctl command to configure L4(TCP/UDP) filtering. This function does following,
2913 * - enable/disable L4 filtering.
2914 * - select TCP/UDP filtering.
2915 * - select source/destination port matching.
2916 * - select perfect/inverse matching.
2917 * - Update the port number into MAC register.
2919 * \param[in] dev – pointer to net device structure.
2920 * \param[in] req – pointer to IOCTL specific structure.
2921 * \param[in] tcp_udp – flag to indicate TCP/UDP filtering.
2925 * \retval zero on success and -ve number on failure.
2927 static int eqos_config_tcp_udp_filters(struct net_device *dev,
2928 struct ifr_data_struct *req, int tcp_udp)
2930 struct eqos_prv_data *pdata = netdev_priv(dev);
2931 struct hw_if_struct *hw_if = &(pdata->hw_if);
2932 struct eqos_l3_l4_filter *u_l4_filter =
2933 (struct eqos_l3_l4_filter *)req->ptr;
2934 struct eqos_l3_l4_filter l_l4_filter;
2937 DBGPR_FILTER("-->eqos_config_tcp_udp_filters\n");
2939 if (pdata->hw_feat.l3l4_filter_num == 0)
2940 return EQOS_NO_HW_SUPPORT;
2942 if (copy_from_user(&l_l4_filter, u_l4_filter,
2943 sizeof(struct eqos_l3_l4_filter)))
2946 if ((l_l4_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2947 pr_err("%d filter is not supported in the HW\n",
2948 l_l4_filter.filter_no);
2949 return EQOS_NO_HW_SUPPORT;
2952 if (!pdata->l3_l4_filter) {
2953 hw_if->config_l3_l4_filter_enable(1);
2954 pdata->l3_l4_filter = 1;
2957 /* configure the L4 filters */
2958 hw_if->config_l4_filters(l_l4_filter.filter_no,
2959 l_l4_filter.filter_enb_dis,
2961 l_l4_filter.src_dst_addr_match,
2962 l_l4_filter.perfect_inverse_match);
2964 if (l_l4_filter.src_dst_addr_match)
2965 hw_if->update_l4_da_port_no(l_l4_filter.filter_no,
2966 l_l4_filter.port_no);
2968 hw_if->update_l4_sa_port_no(l_l4_filter.filter_no,
2969 l_l4_filter.port_no);
2972 ("Successfully %s %s %s %s Port number filtering on %d filter\n",
2973 (l_l4_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2974 (tcp_udp ? "UDP" : "TCP"),
2975 (l_l4_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2976 (l_l4_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2977 l_l4_filter.filter_no);
2979 DBGPR_FILTER("<--eqos_config_tcp_udp_filters\n");
2985 * \details This function is invoked by ioctl function when user issues an
2986 * ioctl command to configure VALN filtering. This function does following,
2987 * - enable/disable VLAN filtering.
2988 * - select perfect/hash filtering.
2990 * \param[in] dev – pointer to net device structure.
2991 * \param[in] req – pointer to IOCTL specific structure.
2995 * \retval zero on success and -ve number on failure.
2997 static int eqos_config_vlan_filter(struct net_device *dev,
2998 struct ifr_data_struct *req)
3000 struct eqos_prv_data *pdata = netdev_priv(dev);
3001 struct hw_if_struct *hw_if = &(pdata->hw_if);
3002 struct eqos_vlan_filter *u_vlan_filter =
3003 (struct eqos_vlan_filter *)req->ptr;
3004 struct eqos_vlan_filter l_vlan_filter;
3007 DBGPR_FILTER("-->eqos_config_vlan_filter\n");
3009 if (copy_from_user(&l_vlan_filter, u_vlan_filter,
3010 sizeof(struct eqos_vlan_filter)))
3013 if ((l_vlan_filter.perfect_hash) && (pdata->hw_feat.vlan_hash_en == 0)) {
3014 pr_err("VLAN HASH filtering is not supported\n");
3015 return EQOS_NO_HW_SUPPORT;
3018 /* configure the vlan filter */
3019 hw_if->config_vlan_filtering(l_vlan_filter.filter_enb_dis,
3020 l_vlan_filter.perfect_hash,
3021 l_vlan_filter.perfect_inverse_match);
3022 pdata->vlan_hash_filtering = l_vlan_filter.perfect_hash;
3024 DBGPR_FILTER("Successfully %s VLAN %s filtering and %s matching\n",
3025 (l_vlan_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
3026 (l_vlan_filter.perfect_hash ? "HASH" : "PERFECT"),
3028 perfect_inverse_match ? "INVERSE" : "PERFECT"));
3030 DBGPR_FILTER("<--eqos_config_vlan_filter\n");
3036 * \details This function is invoked by ioctl function when user issues an
3037 * ioctl command to enable/disable ARP offloading feature.
3039 * \param[in] dev – pointer to net device structure.
3040 * \param[in] req – pointer to IOCTL specific structure.
3044 * \retval zero on success and -ve number on failure.
3046 static int eqos_config_arp_offload(struct net_device *dev,
3047 struct ifr_data_struct *req)
3049 struct eqos_prv_data *pdata = netdev_priv(dev);
3050 struct hw_if_struct *hw_if = &(pdata->hw_if);
3051 struct eqos_arp_offload *u_arp_offload =
3052 (struct eqos_arp_offload *)req->ptr;
3053 struct eqos_arp_offload l_arp_offload;
3056 pr_err("-->eqos_config_arp_offload\n");
3058 if (pdata->hw_feat.arp_offld_en == 0)
3059 return EQOS_NO_HW_SUPPORT;
3061 if (copy_from_user(&l_arp_offload, u_arp_offload,
3062 sizeof(struct eqos_arp_offload)))
3065 /* configure the L3 filters */
3066 hw_if->config_arp_offload(req->flags);
3067 hw_if->update_arp_offload_ip_addr(l_arp_offload.ip_addr);
3068 pdata->arp_offload = req->flags;
3070 pr_err("Successfully %s arp Offload\n",
3071 (req->flags ? "ENABLED" : "DISABLED"));
3073 pr_err("<--eqos_config_arp_offload\n");
3079 * \details This function is invoked by ioctl function when user issues an
3080 * ioctl command to configure L2 destination addressing filtering mode. This
3081 * function dose following,
3082 * - selects perfect/hash filtering.
3083 * - selects perfect/inverse matching.
3085 * \param[in] dev – pointer to net device structure.
3086 * \param[in] req – pointer to IOCTL specific structure.
3090 * \retval zero on success and -ve number on failure.
3092 static int eqos_confing_l2_da_filter(struct net_device *dev,
3093 struct ifr_data_struct *req)
3095 struct eqos_prv_data *pdata = netdev_priv(dev);
3096 struct hw_if_struct *hw_if = &(pdata->hw_if);
3097 struct eqos_l2_da_filter *u_l2_da_filter =
3098 (struct eqos_l2_da_filter *)req->ptr;
3099 struct eqos_l2_da_filter l_l2_da_filter;
3102 DBGPR_FILTER("-->eqos_confing_l2_da_filter\n");
3104 if (copy_from_user(&l_l2_da_filter, u_l2_da_filter,
3105 sizeof(struct eqos_l2_da_filter)))
3108 if (l_l2_da_filter.perfect_hash) {
3109 if (pdata->hw_feat.hash_tbl_sz > 0)
3110 pdata->l2_filtering_mode = 1;
3112 ret = EQOS_NO_HW_SUPPORT;
3114 if (pdata->max_addr_reg_cnt > 1)
3115 pdata->l2_filtering_mode = 0;
3117 ret = EQOS_NO_HW_SUPPORT;
3120 /* configure L2 DA perfect/inverse_matching */
3121 hw_if->config_l2_da_perfect_inverse_match(l_l2_da_filter.
3122 perfect_inverse_match);
3125 ("Successfully selected L2 %s filtering and %s DA matching\n",
3126 (l_l2_da_filter.perfect_hash ? "HASH" : "PERFECT"),
3127 (l_l2_da_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"));
3129 DBGPR_FILTER("<--eqos_confing_l2_da_filter\n");
3135 * \details This function is invoked by ioctl function when user issues
3136 * an ioctl command to enable/disable mac loopback mode.
3138 * \param[in] dev – pointer to net device structure.
3139 * \param[in] flags – flag to indicate whether mac loopback mode to be
3144 * \retval zero on success and -ve number on failure.
3146 int eqos_config_mac_loopback_mode(struct net_device *dev,
3149 struct eqos_prv_data *pdata = netdev_priv(dev);
3150 struct hw_if_struct *hw_if = &(pdata->hw_if);
3153 pr_debug("-->eqos_config_mac_loopback_mode\n");
3155 if (flags && pdata->mac_loopback_mode) {
3156 pr_err("MAC loopback mode is already enabled\n");
3159 if (!flags && !pdata->mac_loopback_mode) {
3160 pr_err("MAC loopback mode is already disabled\n");
3163 pdata->mac_loopback_mode = !!flags;
3164 hw_if->config_mac_loopback_mode(flags);
3166 pr_err("Succesfully %s MAC loopback mode\n",
3167 (flags ? "enabled" : "disabled"));
3169 pr_debug("<--eqos_config_mac_loopback_mode\n");
3174 static VOID eqos_config_timer_registers(struct eqos_prv_data *pdata)
3176 struct timespec now;
3177 struct hw_if_struct *hw_if = &(pdata->hw_if);
3180 pr_debug("-->eqos_config_timer_registers\n");
3182 /* program Sub Second Increment Reg */
3183 hw_if->config_sub_second_increment(EQOS_SYSCLOCK);
3186 * addend = 2^32/freq_div_ratio;
3188 * where, freq_div_ratio = EQOS_SYSCLOCK/50MHz
3190 * hence, addend = ((2^32) * 50MHz)/EQOS_SYSCLOCK;
3192 * NOTE: EQOS_SYSCLOCK should be >= 50MHz to
3193 * achive 20ns accuracy.
3195 * 2^x * y == (y << x), hence
3196 * 2^32 * 6250000 ==> (6250000 << 32)
3198 temp = (u64) (62500000ULL << 32);
3199 pdata->default_addend = div_u64(temp, 125000000);
3201 hw_if->config_addend(pdata->default_addend);
3203 /* initialize system time */
3204 getnstimeofday(&now);
3205 hw_if->init_systime(now.tv_sec, now.tv_nsec);
3207 pr_debug("-->eqos_config_timer_registers\n");
3211 * \details This function is invoked by ioctl function when user issues
3212 * an ioctl command to configure PTP offloading feature.
3214 * \param[in] pdata - pointer to private data structure.
3215 * \param[in] flags – Each bit in this variable carry some information related
3216 * double vlan processing.
3220 * \retval zero on success and -ve number on failure.
3222 static int eqos_config_ptpoffload(struct eqos_prv_data *pdata,
3223 struct eqos_config_ptpoffloading *u_conf_ptp)
3227 struct eqos_config_ptpoffloading l_conf_ptp;
3228 struct hw_if_struct *hw_if = &(pdata->hw_if);
3230 if (copy_from_user(&l_conf_ptp, u_conf_ptp,
3231 sizeof(struct eqos_config_ptpoffloading))) {
3232 pr_err("Failed to fetch Double vlan Struct info from user\n");
3233 return EQOS_CONFIG_FAIL;
3236 pr_err("-->eqos_config_ptpoffload - %d\n", l_conf_ptp.mode);
3238 pto_cntrl = MAC_PTOCR_PTOEN; /* enable ptp offloading */
3239 mac_tcr = MAC_TCR_TSENA | MAC_TCR_TSIPENA | MAC_TCR_TSVER2ENA
3240 | MAC_TCR_TSCFUPDT | MAC_TCR_TSCTRLSSR;
3241 if (l_conf_ptp.mode == EQOS_PTP_ORDINARY_SLAVE) {
3243 mac_tcr |= MAC_TCR_TSEVENTENA;
3244 pdata->ptp_offloading_mode = EQOS_PTP_ORDINARY_SLAVE;
3246 } else if (l_conf_ptp.mode == EQOS_PTP_TRASPARENT_SLAVE) {
3248 pto_cntrl |= MAC_PTOCR_APDREQEN;
3249 mac_tcr |= MAC_TCR_TSEVENTENA;
3250 mac_tcr |= MAC_TCR_SNAPTYPSEL_1;
3251 pdata->ptp_offloading_mode = EQOS_PTP_TRASPARENT_SLAVE;
3253 } else if (l_conf_ptp.mode == EQOS_PTP_ORDINARY_MASTER) {
3255 pto_cntrl |= MAC_PTOCR_ASYNCEN;
3256 mac_tcr |= MAC_TCR_TSEVENTENA;
3257 mac_tcr |= MAC_TCR_TSMASTERENA;
3258 pdata->ptp_offloading_mode = EQOS_PTP_ORDINARY_MASTER;
3260 } else if (l_conf_ptp.mode == EQOS_PTP_TRASPARENT_MASTER) {
3262 pto_cntrl |= MAC_PTOCR_ASYNCEN | MAC_PTOCR_APDREQEN;
3263 mac_tcr |= MAC_TCR_SNAPTYPSEL_1;
3264 mac_tcr |= MAC_TCR_TSEVENTENA;
3265 mac_tcr |= MAC_TCR_TSMASTERENA;
3266 pdata->ptp_offloading_mode = EQOS_PTP_TRASPARENT_MASTER;
3268 } else if (l_conf_ptp.mode == EQOS_PTP_PEER_TO_PEER_TRANSPARENT) {
3270 pto_cntrl |= MAC_PTOCR_APDREQEN;
3271 mac_tcr |= MAC_TCR_SNAPTYPSEL_3;
3272 pdata->ptp_offloading_mode = EQOS_PTP_PEER_TO_PEER_TRANSPARENT;
3275 pdata->ptp_offload = 1;
3276 if (l_conf_ptp.en_dis == EQOS_PTP_OFFLOADING_DISABLE) {
3279 pdata->ptp_offload = 0;
3282 pto_cntrl |= (l_conf_ptp.domain_num << 8);
3283 hw_if->config_hw_time_stamping(mac_tcr);
3284 eqos_config_timer_registers(pdata);
3285 hw_if->config_ptpoffload_engine(pto_cntrl, l_conf_ptp.mc_uc);
3287 pr_err("<--eqos_config_ptpoffload\n");
3293 * \details This function is invoked by ioctl function when user issues
3294 * an ioctl command to enable/disable pfc.
3296 * \param[in] dev – pointer to net device structure.
3297 * \param[in] flags – flag to indicate whether pfc to be enabled/disabled.
3301 * \retval zero on success and -ve number on failure.
3303 static int eqos_config_pfc(struct net_device *dev, unsigned int flags)
3305 struct eqos_prv_data *pdata = netdev_priv(dev);
3306 struct hw_if_struct *hw_if = &(pdata->hw_if);
3309 pr_debug("-->eqos_config_pfc\n");
3311 if (!pdata->hw_feat.dcb_en) {
3312 pr_err("PFC is not supported\n");
3313 return EQOS_NO_HW_SUPPORT;
3316 hw_if->config_pfc(flags);
3318 pr_err("Succesfully %s PFC(Priority Based Flow Control)\n",
3319 (flags ? "enabled" : "disabled"));
3321 pr_debug("<--eqos_config_pfc\n");
3327 * \brief Driver IOCTL routine
3329 * \details This function is invoked by main ioctl function when
3330 * users request to configure various device features like,
3331 * PMT module, TX and RX PBL, TX and RX FIFO threshold level,
3332 * TX and RX OSF mode, SA insert/replacement, L2/L3/L4 and
3333 * VLAN filtering, AVB/DCB algorithm etc.
3335 * \param[in] pdata – pointer to private data structure.
3336 * \param[in] req – pointer to ioctl structure.
3340 * \retval 0 - success
3341 * \retval negative - failure
3344 static int eqos_handle_prv_ioctl(struct eqos_prv_data *pdata,
3345 struct ifr_data_struct *req)
3347 unsigned int qinx = req->qinx;
3348 struct tx_ring *ptx_ring =
3349 GET_TX_WRAPPER_DESC(qinx);
3350 struct rx_ring *prx_ring =
3351 GET_RX_WRAPPER_DESC(qinx);
3352 struct hw_if_struct *hw_if = &(pdata->hw_if);
3353 struct net_device *dev = pdata->dev;
3356 pr_debug("-->eqos_handle_prv_ioctl\n");
3358 if (qinx > EQOS_QUEUE_CNT) {
3359 pr_err("Queue number %d is invalid\n"
3360 "Hardware has only %d Tx/Rx Queues\n",
3361 qinx, EQOS_QUEUE_CNT);
3362 ret = EQOS_NO_HW_SUPPORT;
3367 case EQOS_POWERUP_MAGIC_CMD:
3368 if (pdata->hw_feat.mgk_sel) {
3369 ret = eqos_powerup(dev, EQOS_IOCTL_CONTEXT);
3371 ret = EQOS_CONFIG_SUCCESS;
3373 ret = EQOS_CONFIG_FAIL;
3375 ret = EQOS_NO_HW_SUPPORT;
3379 case EQOS_POWERDOWN_MAGIC_CMD:
3380 if (pdata->hw_feat.mgk_sel) {
3384 EQOS_IOCTL_CONTEXT);
3386 ret = EQOS_CONFIG_SUCCESS;
3388 ret = EQOS_CONFIG_FAIL;
3390 ret = EQOS_NO_HW_SUPPORT;
3394 case EQOS_POWERUP_REMOTE_WAKEUP_CMD:
3395 if (pdata->hw_feat.rwk_sel) {
3396 ret = eqos_powerup(dev, EQOS_IOCTL_CONTEXT);
3398 ret = EQOS_CONFIG_SUCCESS;
3400 ret = EQOS_CONFIG_FAIL;
3402 ret = EQOS_NO_HW_SUPPORT;
3406 case EQOS_POWERDOWN_REMOTE_WAKEUP_CMD:
3407 if (pdata->hw_feat.rwk_sel) {
3408 ret = eqos_configure_remotewakeup(dev, req);
3410 ret = EQOS_CONFIG_SUCCESS;
3412 ret = EQOS_CONFIG_FAIL;
3414 ret = EQOS_NO_HW_SUPPORT;
3418 case EQOS_RX_THRESHOLD_CMD:
3419 prx_ring->rx_threshold_val = req->flags;
3420 hw_if->config_rx_threshold(qinx,
3421 prx_ring->rx_threshold_val);
3422 pr_err("Configured Rx threshold with %d\n",
3423 prx_ring->rx_threshold_val);
3426 case EQOS_TX_THRESHOLD_CMD:
3427 ptx_ring->tx_threshold_val = req->flags;
3428 hw_if->config_tx_threshold(qinx,
3429 ptx_ring->tx_threshold_val);
3430 pr_err("Configured Tx threshold with %d\n",
3431 ptx_ring->tx_threshold_val);
3435 prx_ring->rsf_on = req->flags;
3436 hw_if->config_rsf_mode(qinx, prx_ring->rsf_on);
3437 pr_err("Receive store and forward mode %s\n",
3438 (prx_ring->rsf_on) ? "enabled" : "disabled");
3442 ptx_ring->tsf_on = req->flags;
3443 hw_if->config_tsf_mode(qinx, ptx_ring->tsf_on);
3444 pr_err("Transmit store and forward mode %s\n",
3445 (ptx_ring->tsf_on) ? "enabled" : "disabled");
3449 ptx_ring->osf_on = req->flags;
3450 hw_if->config_osf_mode(qinx, ptx_ring->osf_on);
3451 pr_err("Transmit DMA OSF mode is %s\n",
3452 (ptx_ring->osf_on) ? "enabled" : "disabled");
3455 case EQOS_INCR_INCRX_CMD:
3456 pdata->incr_incrx = req->flags;
3457 hw_if->config_incr_incrx_mode(pdata->incr_incrx);
3458 pr_err("%s mode is enabled\n",
3459 (pdata->incr_incrx) ? "INCRX" : "INCR");
3462 case EQOS_RX_PBL_CMD:
3463 prx_ring->rx_pbl = req->flags;
3464 eqos_config_rx_pbl(pdata, prx_ring->rx_pbl, qinx);
3467 case EQOS_TX_PBL_CMD:
3468 ptx_ring->tx_pbl = req->flags;
3469 eqos_config_tx_pbl(pdata, ptx_ring->tx_pbl, qinx);
3472 case EQOS_PTPOFFLOADING_CMD:
3473 if (pdata->hw_feat.tsstssel) {
3474 ret = eqos_config_ptpoffload(pdata, req->ptr);
3476 pr_err("No HW support for PTP\n");
3477 ret = EQOS_NO_HW_SUPPORT;
3481 case EQOS_SA0_DESC_CMD:
3482 if (pdata->hw_feat.sa_vlan_ins) {
3483 pdata->tx_sa_ctrl_via_desc = req->flags;
3484 pdata->tx_sa_ctrl_via_reg = EQOS_SA0_NONE;
3485 if (req->flags == EQOS_SA0_NONE) {
3486 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3489 memcpy(pdata->mac_addr, mac_addr0,
3492 hw_if->configure_mac_addr0_reg(pdata->mac_addr);
3493 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3495 ("SA will use MAC0 with descriptor for configuration %d\n",
3496 pdata->tx_sa_ctrl_via_desc);
3499 ("Device doesn't supports SA Insertion/Replacement\n");
3500 ret = EQOS_NO_HW_SUPPORT;
3504 case EQOS_SA1_DESC_CMD:
3505 if (pdata->hw_feat.sa_vlan_ins) {
3506 pdata->tx_sa_ctrl_via_desc = req->flags;
3507 pdata->tx_sa_ctrl_via_reg = EQOS_SA1_NONE;
3508 if (req->flags == EQOS_SA1_NONE) {
3509 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3512 memcpy(pdata->mac_addr, mac_addr1,
3515 hw_if->configure_mac_addr1_reg(pdata->mac_addr);
3516 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3518 ("SA will use MAC1 with descriptor for configuration %d\n",
3519 pdata->tx_sa_ctrl_via_desc);
3522 ("Device doesn't supports SA Insertion/Replacement\n");
3523 ret = EQOS_NO_HW_SUPPORT;
3527 case EQOS_SA0_REG_CMD:
3528 if (pdata->hw_feat.sa_vlan_ins) {
3529 pdata->tx_sa_ctrl_via_reg = req->flags;
3530 pdata->tx_sa_ctrl_via_desc = EQOS_SA0_NONE;
3531 if (req->flags == EQOS_SA0_NONE) {
3532 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3535 memcpy(pdata->mac_addr, mac_addr0,
3538 hw_if->configure_mac_addr0_reg(pdata->mac_addr);
3539 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3541 ("SA will use MAC0 with register for configuration %d\n",
3542 pdata->tx_sa_ctrl_via_desc);
3545 ("Device doesn't supports SA Insertion/Replacement\n");
3546 ret = EQOS_NO_HW_SUPPORT;
3550 case EQOS_SA1_REG_CMD:
3551 if (pdata->hw_feat.sa_vlan_ins) {
3552 pdata->tx_sa_ctrl_via_reg = req->flags;
3553 pdata->tx_sa_ctrl_via_desc = EQOS_SA1_NONE;
3554 if (req->flags == EQOS_SA1_NONE) {
3555 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3558 memcpy(pdata->mac_addr, mac_addr1,
3561 hw_if->configure_mac_addr1_reg(pdata->mac_addr);
3562 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3564 ("SA will use MAC1 with register for configuration %d\n",
3565 pdata->tx_sa_ctrl_via_desc);
3568 ("Device doesn't supports SA Insertion/Replacement\n");
3569 ret = EQOS_NO_HW_SUPPORT;
3573 case EQOS_SETUP_CONTEXT_DESCRIPTOR:
3574 if (pdata->hw_feat.sa_vlan_ins) {
3575 ptx_ring->context_setup = req->context_setup;
3576 if (ptx_ring->context_setup == 1) {
3577 pr_err("Context descriptor will be transmitted"
3578 " with every normal descriptor on %d DMA Channel\n",
3581 pr_err("Context descriptor will be setup"
3582 " only if VLAN id changes %d\n", qinx);
3585 pr_err("Device doesn't support VLAN operations\n");
3586 ret = EQOS_NO_HW_SUPPORT;
3590 case EQOS_GET_RX_QCNT:
3591 req->qinx = EQOS_RX_QUEUE_CNT;
3594 case EQOS_GET_TX_QCNT:
3595 req->qinx = EQOS_TX_QUEUE_CNT;
3598 case EQOS_GET_CONNECTED_SPEED:
3599 req->connected_speed = pdata->speed;
3602 case EQOS_DCB_ALGORITHM:
3603 eqos_program_dcb_algorithm(pdata, req);
3606 case EQOS_AVB_ALGORITHM:
3607 eqos_program_avb_algorithm(pdata, req);
3610 case EQOS_L3_L4_FILTER_CMD:
3611 if (pdata->hw_feat.l3l4_filter_num > 0) {
3612 ret = eqos_config_l3_l4_filtering(dev, req->flags);
3614 ret = EQOS_CONFIG_SUCCESS;
3616 ret = EQOS_CONFIG_FAIL;
3618 ret = EQOS_NO_HW_SUPPORT;
3621 case EQOS_IPV4_FILTERING_CMD:
3622 ret = eqos_config_ip4_filters(dev, req);
3624 case EQOS_IPV6_FILTERING_CMD:
3625 ret = eqos_config_ip6_filters(dev, req);
3627 case EQOS_UDP_FILTERING_CMD:
3628 ret = eqos_config_tcp_udp_filters(dev, req, 1);
3630 case EQOS_TCP_FILTERING_CMD:
3631 ret = eqos_config_tcp_udp_filters(dev, req, 0);
3633 case EQOS_VLAN_FILTERING_CMD:
3634 ret = eqos_config_vlan_filter(dev, req);
3636 case EQOS_L2_DA_FILTERING_CMD:
3637 ret = eqos_confing_l2_da_filter(dev, req);
3639 case EQOS_ARP_OFFLOAD_CMD:
3640 ret = eqos_config_arp_offload(dev, req);
3642 case EQOS_AXI_PBL_CMD:
3643 pdata->axi_pbl = req->flags;
3644 hw_if->config_axi_pbl_val(pdata->axi_pbl);
3645 pr_err("AXI PBL value: %d\n", pdata->axi_pbl);
3647 case EQOS_AXI_WORL_CMD:
3648 pdata->axi_worl = req->flags;
3649 hw_if->config_axi_worl_val(pdata->axi_worl);
3650 pr_err("AXI WORL value: %d\n", pdata->axi_worl);
3652 case EQOS_AXI_RORL_CMD:
3653 pdata->axi_rorl = req->flags;
3654 hw_if->config_axi_rorl_val(pdata->axi_rorl);
3655 pr_err("AXI RORL value: %d\n", pdata->axi_rorl);
3657 case EQOS_MAC_LOOPBACK_MODE_CMD:
3658 ret = eqos_config_mac_loopback_mode(dev, req->flags);
3660 ret = EQOS_CONFIG_SUCCESS;
3662 ret = EQOS_CONFIG_FAIL;
3665 ret = eqos_config_pfc(dev, req->flags);
3667 case EQOS_PHY_LOOPBACK:
3668 ret = eqos_handle_phy_loopback(pdata, (void *)req);
3670 case EQOS_MEM_ISO_TEST:
3671 ret = eqos_handle_mem_iso_ioctl(pdata, (void *)req);
3673 case EQOS_CSR_ISO_TEST:
3674 ret = eqos_handle_csr_iso_ioctl(pdata, (void *)req);
3678 pr_err("Unsupported command call\n");
3681 pr_debug("<--eqos_handle_prv_ioctl\n");
3687 * \brief control hw timestamping.
3689 * \details This function is used to configure the MAC to enable/disable both
3690 * outgoing(Tx) and incoming(Rx) packets time stamping based on user input.
3692 * \param[in] pdata – pointer to private data structure.
3693 * \param[in] ifr – pointer to IOCTL specific structure.
3697 * \retval 0 - success
3698 * \retval negative - failure
3701 static int eqos_handle_hwtstamp_ioctl(struct eqos_prv_data *pdata,
3704 struct hw_if_struct *hw_if = &(pdata->hw_if);
3705 struct hwtstamp_config config;
3708 u32 ptp_over_ipv4_udp = 0;
3709 u32 ptp_over_ipv6_udp = 0;
3710 u32 ptp_over_ethernet = 0;
3711 u32 snap_type_sel = 0;
3712 u32 ts_master_en = 0;
3713 u32 ts_event_en = 0;
3714 u32 av_8021asm_en = 0;
3717 struct timespec now;
3719 DBGPR_PTP("-->eqos_handle_hwtstamp_ioctl\n");
3721 if (!pdata->hw_feat.tsstssel) {
3722 pr_err("No hw timestamping is available in this core\n");
3726 if (copy_from_user(&config, ifr->ifr_data,
3727 sizeof(struct hwtstamp_config)))
3730 DBGPR_PTP("config.flags = %#x, tx_type = %#x, rx_filter = %#x\n",
3731 config.flags, config.tx_type, config.rx_filter);
3733 /* reserved for future extensions */
3737 switch (config.tx_type) {
3738 case HWTSTAMP_TX_OFF:
3739 pdata->hwts_tx_en = 0;
3741 case HWTSTAMP_TX_ON:
3742 pdata->hwts_tx_en = 1;
3748 switch (config.rx_filter) {
3749 /* time stamp no incoming packet at all */
3750 case HWTSTAMP_FILTER_NONE:
3751 config.rx_filter = HWTSTAMP_FILTER_NONE;
3754 /* PTP v1, UDP, any kind of event packet */
3755 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3756 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
3757 /* take time stamp for all event messages */
3758 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3760 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3761 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3764 /* PTP v1, UDP, Sync packet */
3765 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3766 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
3767 /* take time stamp for SYNC messages only */
3768 ts_event_en = MAC_TCR_TSEVENTENA;
3770 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3771 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3774 /* PTP v1, UDP, Delay_req packet */
3775 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3776 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
3777 /* take time stamp for Delay_Req messages only */
3778 ts_master_en = MAC_TCR_TSMASTERENA;
3779 ts_event_en = MAC_TCR_TSEVENTENA;
3781 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3782 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3785 /* PTP v2, UDP, any kind of event packet */
3786 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3787 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
3788 ptp_v2 = MAC_TCR_TSVER2ENA;
3789 /* take time stamp for all event messages */
3790 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3792 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3793 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3796 /* PTP v2, UDP, Sync packet */
3797 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3798 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
3799 ptp_v2 = MAC_TCR_TSVER2ENA;
3800 /* take time stamp for SYNC messages only */
3801 ts_event_en = MAC_TCR_TSEVENTENA;
3803 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3804 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3807 /* PTP v2, UDP, Delay_req packet */
3808 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3809 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
3810 ptp_v2 = MAC_TCR_TSVER2ENA;
3811 /* take time stamp for Delay_Req messages only */
3812 ts_master_en = MAC_TCR_TSMASTERENA;
3813 ts_event_en = MAC_TCR_TSEVENTENA;
3815 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3816 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3819 /* PTP v2/802.AS1, any layer, any kind of event packet */
3820 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3821 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3822 ptp_v2 = MAC_TCR_TSVER2ENA;
3823 /* take time stamp for all event messages */
3824 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3826 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3827 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3828 ptp_over_ethernet = MAC_TCR_TSIPENA;
3829 /* for VLAN tagged PTP, AV8021ASMEN bit should not be set */
3830 #ifdef DWC_1588_VLAN_UNTAGGED
3831 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3835 /* PTP v2/802.AS1, any layer, Sync packet */
3836 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3837 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
3838 ptp_v2 = MAC_TCR_TSVER2ENA;
3839 /* take time stamp for SYNC messages only */
3840 ts_event_en = MAC_TCR_TSEVENTENA;
3842 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3843 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3844 ptp_over_ethernet = MAC_TCR_TSIPENA;
3845 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3848 /* PTP v2/802.AS1, any layer, Delay_req packet */
3849 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3850 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
3851 ptp_v2 = MAC_TCR_TSVER2ENA;
3852 /* take time stamp for Delay_Req messages only */
3853 ts_master_en = MAC_TCR_TSMASTERENA;
3854 ts_event_en = MAC_TCR_TSEVENTENA;
3856 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3857 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3858 ptp_over_ethernet = MAC_TCR_TSIPENA;
3859 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3862 /* time stamp any incoming packet */
3863 case HWTSTAMP_FILTER_ALL:
3864 config.rx_filter = HWTSTAMP_FILTER_ALL;
3865 tstamp_all = MAC_TCR_TSENALL;
3872 ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
3874 if (!pdata->hwts_tx_en && !pdata->hwts_rx_en) {
3875 /* disable hw time stamping */
3876 hw_if->config_hw_time_stamping(mac_tcr);
3879 (MAC_TCR_TSENA | MAC_TCR_TSCFUPDT | MAC_TCR_TSCTRLSSR |
3880 tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp
3881 | ptp_over_ipv4_udp | ts_event_en | ts_master_en |
3882 snap_type_sel | av_8021asm_en);
3884 if (!pdata->one_nsec_accuracy)
3885 mac_tcr &= ~MAC_TCR_TSCTRLSSR;
3887 hw_if->config_hw_time_stamping(mac_tcr);
3889 /* program Sub Second Increment Reg */
3890 hw_if->config_sub_second_increment(EQOS_SYSCLOCK);
3893 * addend = 2^32/freq_div_ratio;
3895 * where, freq_div_ratio = EQOS_SYSCLOCK/50MHz
3897 * hence, addend = ((2^32) * 50MHz)/EQOS_SYSCLOCK;
3899 * NOTE: EQOS_SYSCLOCK should be >= 50MHz to
3900 * achive 20ns accuracy.
3902 * 2^x * y == (y << x), hence
3903 * 2^32 * 6250000 ==> (6250000 << 32)
3905 temp = (u64) (62500000ULL << 32);
3906 pdata->default_addend = div_u64(temp, 125000000);
3908 hw_if->config_addend(pdata->default_addend);
3910 /* initialize system time */
3911 getnstimeofday(&now);
3912 hw_if->init_systime(now.tv_sec, now.tv_nsec);
3915 DBGPR_PTP("config.flags = %#x, tx_type = %#x, rx_filter = %#x\n",
3916 config.flags, config.tx_type, config.rx_filter);
3918 DBGPR_PTP("<--eqos_handle_hwtstamp_ioctl\n");
3920 return (copy_to_user(ifr->ifr_data, &config,
3921 sizeof(struct hwtstamp_config))) ? -EFAULT : 0;
3925 * \brief Driver IOCTL routine
3927 * \details This function is invoked by kernel when a user request an ioctl
3928 * which can't be handled by the generic interface code. Following operations
3929 * are performed in this functions.
3930 * - Configuring the PMT module.
3931 * - Configuring TX and RX PBL.
3932 * - Configuring the TX and RX FIFO threshold level.
3933 * - Configuring the TX and RX OSF mode.
3935 * \param[in] dev – pointer to net device structure.
3936 * \param[in] ifr – pointer to IOCTL specific structure.
3937 * \param[in] cmd – IOCTL command.
3941 * \retval 0 - success
3942 * \retval negative - failure
3945 static int eqos_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3947 struct eqos_prv_data *pdata = netdev_priv(dev);
3948 struct ifr_data_struct *req = ifr->ifr_ifru.ifru_data;
3949 struct mii_ioctl_data *data = if_mii(ifr);
3950 unsigned int reg_val = 0;
3953 pr_debug("-->eqos_ioctl\n");
3955 if ((!netif_running(dev)) || (!pdata->phydev)) {
3956 pr_debug("<--eqos_ioctl - error\n");
3960 spin_lock(&pdata->lock);
3963 data->phy_id = pdata->phyaddr;
3964 pr_err("PHY ID: SIOCGMIIPHY\n");
3969 eqos_mdio_read_direct(pdata, pdata->phyaddr,
3970 (data->reg_num & 0x1F), ®_val);
3974 data->val_out = reg_val;
3975 pr_err("PHY ID: SIOCGMIIREG reg:%#x reg_val:%#x\n",
3976 (data->reg_num & 0x1F), reg_val);
3980 pr_err("PHY ID: SIOCSMIIPHY\n");
3983 case EQOS_PRV_IOCTL:
3984 ret = eqos_handle_prv_ioctl(pdata, req);
3985 req->command_error = ret;
3989 ret = eqos_handle_hwtstamp_ioctl(pdata, ifr);
3994 pr_err("Unsupported IOCTL call\n");
3996 spin_unlock(&pdata->lock);
3998 pr_debug("<--eqos_ioctl\n");
4004 * \brief API to change MTU.
4006 * \details This function is invoked by upper layer when user changes
4007 * MTU (Maximum Transfer Unit). The MTU is used by the Network layer
4008 * to driver packet transmission. Ethernet has a default MTU of
4009 * 1500Bytes. This value can be changed with ifconfig -
4010 * ifconfig <interface_name> mtu <new_mtu_value>
4012 * \param[in] dev - pointer to net_device structure
4013 * \param[in] new_mtu - the new MTU for the device.
4017 * \retval 0 - on success and -ve on failure.
4020 static INT eqos_change_mtu(struct net_device *dev, INT new_mtu)
4022 struct eqos_prv_data *pdata = netdev_priv(dev);
4023 struct platform_device *pdev = pdata->pdev;
4024 int max_frame = (new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
4026 pr_debug("-->eqos_change_mtu: new_mtu:%d\n", new_mtu);
4028 #ifdef EQOS_CONFIG_PGTEST
4029 dev_err(&pdev->dev, "jumbo frames not supported with PG test\n");
4032 if (pdata->dt_cfg.use_multi_q) {
4034 "mtu cannot be modified in multi queue mode\n");
4037 if ((new_mtu != 1500) && (new_mtu != 4074) && (new_mtu != 9000)) {
4038 dev_err(&pdev->dev, "valid mtus are 1500, 4074, or 9000\n");
4041 if (max_frame > (pdata->dt_cfg.phy_max_frame_size)) {
4042 dev_err(&pdev->dev, "mtu exceeds phy max frame size of %d",
4043 pdata->dt_cfg.phy_max_frame_size);
4046 if (dev->mtu == new_mtu) {
4047 dev_err(&pdev->dev, "already configured to mtu %d\n", new_mtu);
4051 dev_info(&pdev->dev, "changing MTU from %d to %d\n", dev->mtu, new_mtu);
4053 mutex_lock(&pdata->hw_change_lock);
4054 if (!pdata->hw_stopped)
4055 eqos_stop_dev(pdata);
4057 if (max_frame <= 2048) {
4058 pdata->rx_buffer_len = 2048;
4060 pdata->rx_buffer_len = ALIGN_SIZE(max_frame);
4062 pdata->rx_max_frame_size = max_frame;
4066 if (!pdata->hw_stopped)
4067 eqos_start_dev(pdata);
4069 mutex_unlock(&pdata->hw_change_lock);
4071 pr_debug("<--eqos_change_mtu\n");
4076 #ifdef EQOS_QUEUE_SELECT_ALGO
4077 u16 eqos_select_queue(struct net_device *dev,
4078 struct sk_buff *skb, void *accel_priv,
4079 select_queue_fallback_t fallback)
4081 int txqueue_select = -1;
4082 struct eqos_prv_data *pdata = netdev_priv(dev);
4083 struct eqos_cfg *pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg;
4086 pr_debug("-->eqos_select_queue\n");
4088 for (i = 0; i <= EQOS_TX_QUEUE_CNT; i++) {
4089 if (pdt_cfg->q_prio[i] == skb->priority) {
4095 if (txqueue_select < 0)
4098 pr_debug("<--eqos_select_queue txqueue-select:%d\n", txqueue_select);
4100 return txqueue_select;
4104 unsigned int crc32_snps_le(unsigned int initval, unsigned char *data,
4107 unsigned int crc = initval;
4108 unsigned int poly = 0x04c11db7;
4109 unsigned int temp = 0;
4110 unsigned char my_data = 0;
4112 for (bit_count = 0; bit_count < size; bit_count++) {
4113 if ((bit_count % 8) == 0)
4114 my_data = data[bit_count / 8];
4115 DBGPR_FILTER("%s my_data = %x crc=%x\n", __func__, my_data,
4117 temp = ((crc >> 31) ^ my_data) & 0x1;
4123 DBGPR_FILTER("%s my_data = %x crc=%x\n", __func__, my_data, crc);
4128 * \brief API to delete vid to HW filter.
4130 * \details This function is invoked by upper layer when a VLAN id is removed.
4131 * This function deletes the VLAN id from the HW filter.
4132 * vlan id can be removed with vconfig -
4133 * vconfig rem <interface_name > <vlan_id>
4135 * \param[in] dev - pointer to net_device structure
4136 * \param[in] vid - vlan id to be removed.
4140 static int eqos_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4142 struct eqos_prv_data *pdata = netdev_priv(dev);
4143 struct hw_if_struct *hw_if = &(pdata->hw_if);
4144 unsigned short new_index, old_index;
4146 unsigned int enb_12bit_vhash;
4148 pr_err("-->eqos_vlan_rx_kill_vid: vid = %d\n", vid);
4150 if (pdata->vlan_hash_filtering) {
4152 (bitrev32(~crc32_le(~0, (unsigned char *)&vid, 2)) >> 28);
4154 enb_12bit_vhash = hw_if->get_vlan_tag_comparison();
4155 if (enb_12bit_vhash) {
4156 /* neget 4-bit crc value for 12-bit VLAN hash comparison */
4157 new_index = (1 << (~crc32_val & 0xF));
4159 new_index = (1 << (crc32_val & 0xF));
4162 old_index = hw_if->get_vlan_hash_table_reg();
4163 old_index &= ~new_index;
4164 hw_if->update_vlan_hash_table_reg(old_index);
4165 pdata->vlan_ht_or_id = old_index;
4167 /* By default, receive only VLAN pkt with VID = 1
4168 * becasue writting 0 will pass all VLAN pkt */
4169 hw_if->update_vlan_id(1);
4170 pdata->vlan_ht_or_id = 1;
4173 pr_err("<--eqos_vlan_rx_kill_vid\n");
4175 /* FIXME: Check if any errors need to be returned in case of failure */
4179 static int eqos_set_mac_address(struct net_device *dev, void *p)
4181 if (is_valid_ether_addr(dev->dev_addr))
4184 return eth_mac_addr(dev, p);
4188 * \brief API to add vid to HW filter.
4190 * \details This function is invoked by upper layer when a new VALN id is
4191 * registered. This function updates the HW filter with new VLAN id.
4192 * New vlan id can be added with vconfig -
4193 * vconfig add <interface_name > <vlan_id>
4195 * \param[in] dev - pointer to net_device structure
4196 * \param[in] vid - new vlan id.
4200 static int eqos_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4202 struct eqos_prv_data *pdata = netdev_priv(dev);
4203 struct hw_if_struct *hw_if = &(pdata->hw_if);
4204 unsigned short new_index, old_index;
4206 unsigned int enb_12bit_vhash;
4208 pr_err("-->eqos_vlan_rx_add_vid: vid = %d\n", vid);
4210 if (pdata->vlan_hash_filtering) {
4211 /* The upper 4 bits of the calculated CRC are used to
4212 * index the content of the VLAN Hash Table Reg.
4215 (bitrev32(~crc32_le(~0, (unsigned char *)&vid, 2)) >> 28);
4217 /* These 4(0xF) bits determines the bit within the
4218 * VLAN Hash Table Reg 0
4220 enb_12bit_vhash = hw_if->get_vlan_tag_comparison();
4221 if (enb_12bit_vhash) {
4222 /* neget 4-bit crc value for 12-bit VLAN hash comparison */
4223 new_index = (1 << (~crc32_val & 0xF));
4225 new_index = (1 << (crc32_val & 0xF));
4228 old_index = hw_if->get_vlan_hash_table_reg();
4229 old_index |= new_index;
4230 hw_if->update_vlan_hash_table_reg(old_index);
4231 pdata->vlan_ht_or_id = old_index;
4233 hw_if->update_vlan_id(vid);
4234 pdata->vlan_ht_or_id = vid;
4237 pr_err("<--eqos_vlan_rx_add_vid\n");
4239 /* FIXME: Check if any errors need to be returned in case of failure */
4244 * \brief API called to put device in powerdown mode
4246 * \details This function is invoked by ioctl function when the user issues an
4247 * ioctl command to move the device to power down state. Following operations
4248 * are performed in this function.
4252 * - Stop DMA TX and RX process.
4253 * - Enable power down mode using PMT module.
4255 * \param[in] dev – pointer to net device structure.
4256 * \param[in] wakeup_type – remote wake-on-lan or magic packet.
4257 * \param[in] caller – netif_detach gets called conditionally based
4258 * on caller, IOCTL or DRIVER-suspend
4262 * \retval zero on success and -ve number on failure.
4265 INT eqos_powerdown(struct net_device *dev, UINT wakeup_type, UINT caller)
4267 struct eqos_prv_data *pdata = netdev_priv(dev);
4268 struct hw_if_struct *hw_if = &(pdata->hw_if);
4270 pr_debug("-->eqos_powerdown\n");
4272 if (!dev || !netif_running(dev) ||
4273 (caller == EQOS_IOCTL_CONTEXT && pdata->power_down)) {
4275 ("Device is already powered down and will powerup for %s\n",
4276 EQOS_POWER_DOWN_TYPE(pdata));
4277 pr_debug("<--eqos_powerdown\n");
4282 phy_stop(pdata->phydev);
4284 spin_lock(&pdata->pmt_lock);
4286 if (caller == EQOS_DRIVER_CONTEXT)
4287 netif_device_detach(dev);
4289 netif_tx_disable(dev);
4290 eqos_all_ch_napi_disable(pdata);
4292 /* stop DMA TX/RX */
4293 eqos_stop_all_ch_tx_dma(pdata);
4294 eqos_stop_all_ch_rx_dma(pdata);
4296 /* enable power down mode by programming the PMT regs */
4297 if (wakeup_type & EQOS_REMOTE_WAKEUP)
4298 hw_if->enable_remote_pmt();
4299 if (wakeup_type & EQOS_MAGIC_WAKEUP)
4300 hw_if->enable_magic_pmt();
4301 pdata->power_down_type = wakeup_type;
4303 if (caller == EQOS_IOCTL_CONTEXT)
4304 pdata->power_down = 1;
4306 spin_unlock(&pdata->pmt_lock);
4308 pr_debug("<--eqos_powerdown\n");
4314 * \brief API to powerup the device
4316 * \details This function is invoked by ioctl function when the user issues an
4317 * ioctl command to move the device to out of power down state. Following
4318 * operations are performed in this function.
4319 * - Wakeup the device using PMT module if supported.
4321 * - Enable MAC and DMA TX and RX process.
4323 * - Starts the queue.
4325 * \param[in] dev – pointer to net device structure.
4326 * \param[in] caller – netif_attach gets called conditionally based
4327 * on caller, IOCTL or DRIVER-suspend
4331 * \retval zero on success and -ve number on failure.
4334 INT eqos_powerup(struct net_device *dev, UINT caller)
4336 struct eqos_prv_data *pdata = netdev_priv(dev);
4337 struct hw_if_struct *hw_if = &(pdata->hw_if);
4339 pr_debug("-->eqos_powerup\n");
4341 if (!dev || !netif_running(dev) ||
4342 (caller == EQOS_IOCTL_CONTEXT && !pdata->power_down)) {
4343 pr_err("Device is already powered up\n");
4347 spin_lock(&pdata->pmt_lock);
4349 if (pdata->power_down_type & EQOS_MAGIC_WAKEUP) {
4350 hw_if->disable_magic_pmt();
4351 pdata->power_down_type &= ~EQOS_MAGIC_WAKEUP;
4354 if (pdata->power_down_type & EQOS_REMOTE_WAKEUP) {
4355 hw_if->disable_remote_pmt();
4356 pdata->power_down_type &= ~EQOS_REMOTE_WAKEUP;
4359 pdata->power_down = 0;
4362 phy_start(pdata->phydev);
4364 /* enable MAC TX/RX */
4365 hw_if->start_mac_tx_rx();
4367 /* enable DMA TX/RX */
4368 eqos_start_all_ch_tx_dma(pdata);
4369 eqos_start_all_ch_rx_dma(pdata);
4371 if (caller == EQOS_DRIVER_CONTEXT)
4372 netif_device_attach(dev);
4374 eqos_napi_enable_mq(pdata);
4376 netif_tx_start_all_queues(dev);
4378 spin_unlock(&pdata->pmt_lock);
4380 pr_debug("<--eqos_powerup\n");
4386 * \brief API to configure remote wakeup
4388 * \details This function is invoked by ioctl function when the user issues an
4389 * ioctl command to move the device to power down state using remote wakeup.
4391 * \param[in] dev – pointer to net device structure.
4392 * \param[in] req – pointer to ioctl data structure.
4396 * \retval zero on success and -ve number on failure.
4399 INT eqos_configure_remotewakeup(struct net_device *dev,
4400 struct ifr_data_struct *req)
4402 struct eqos_prv_data *pdata = netdev_priv(dev);
4403 struct hw_if_struct *hw_if = &(pdata->hw_if);
4405 if (!dev || !netif_running(dev) || !pdata->hw_feat.rwk_sel
4406 || pdata->power_down) {
4408 ("Device is already powered down and will powerup for %s\n",
4409 EQOS_POWER_DOWN_TYPE(pdata));
4413 hw_if->configure_rwk_filter(req->rwk_filter_values,
4414 req->rwk_filter_length);
4416 eqos_powerdown(dev, EQOS_REMOTE_WAKEUP, EQOS_IOCTL_CONTEXT);
4422 * \details This function is invoked by ioctl function when the user issues an
4423 * ioctl command to change the RX DMA PBL value. This function will program
4424 * the device to configure the user specified RX PBL value.
4426 * \param[in] pdata – pointer to private data structure.
4427 * \param[in] rx_pbl – RX DMA pbl value to be programmed.
4434 static void eqos_config_rx_pbl(struct eqos_prv_data *pdata,
4435 UINT rx_pbl, UINT qinx)
4437 struct hw_if_struct *hw_if = &(pdata->hw_if);
4440 pr_debug("-->eqos_config_rx_pbl: %d\n", rx_pbl);
4449 hw_if->config_rx_pbl_val(qinx, rx_pbl);
4450 hw_if->config_pblx8(qinx, 0);
4455 hw_if->config_rx_pbl_val(qinx, rx_pbl / 8);
4456 hw_if->config_pblx8(qinx, 1);
4461 switch (pblx8_val) {
4463 pr_err("Tx PBL[%d] value: %d\n",
4464 qinx, hw_if->get_tx_pbl_val(qinx));
4465 pr_err("Rx PBL[%d] value: %d\n",
4466 qinx, hw_if->get_rx_pbl_val(qinx));
4469 pr_err("Tx PBL[%d] value: %d\n",
4470 qinx, (hw_if->get_tx_pbl_val(qinx) * 8));
4471 pr_err("Rx PBL[%d] value: %d\n",
4472 qinx, (hw_if->get_rx_pbl_val(qinx) * 8));
4476 pr_debug("<--eqos_config_rx_pbl\n");
4480 * \details This function is invoked by ioctl function when the user issues an
4481 * ioctl command to change the TX DMA PBL value. This function will program
4482 * the device to configure the user specified TX PBL value.
4484 * \param[in] pdata – pointer to private data structure.
4485 * \param[in] tx_pbl – TX DMA pbl value to be programmed.
4492 static void eqos_config_tx_pbl(struct eqos_prv_data *pdata,
4493 UINT tx_pbl, UINT qinx)
4495 struct hw_if_struct *hw_if = &(pdata->hw_if);
4498 pr_debug("-->eqos_config_tx_pbl: %d\n", tx_pbl);
4507 hw_if->config_tx_pbl_val(qinx, tx_pbl);
4508 hw_if->config_pblx8(qinx, 0);
4513 hw_if->config_tx_pbl_val(qinx, tx_pbl / 8);
4514 hw_if->config_pblx8(qinx, 1);
4519 switch (pblx8_val) {
4521 pr_err("Tx PBL[%d] value: %d\n",
4522 qinx, hw_if->get_tx_pbl_val(qinx));
4523 pr_err("Rx PBL[%d] value: %d\n",
4524 qinx, hw_if->get_rx_pbl_val(qinx));
4527 pr_err("Tx PBL[%d] value: %d\n",
4528 qinx, (hw_if->get_tx_pbl_val(qinx) * 8));
4529 pr_err("Rx PBL[%d] value: %d\n",
4530 qinx, (hw_if->get_rx_pbl_val(qinx) * 8));
4534 pr_debug("<--eqos_config_tx_pbl\n");
4538 * \details This function is invoked by ioctl function when the user issues an
4539 * ioctl command to select the DCB algorithm.
4541 * \param[in] pdata – pointer to private data structure.
4542 * \param[in] req – pointer to ioctl data structure.
4549 static void eqos_program_dcb_algorithm(struct eqos_prv_data *pdata,
4550 struct ifr_data_struct *req)
4552 struct eqos_dcb_algorithm l_dcb_struct, *u_dcb_struct =
4553 (struct eqos_dcb_algorithm *)req->ptr;
4554 struct hw_if_struct *hw_if = &pdata->hw_if;
4556 pr_debug("-->eqos_program_dcb_algorithm\n");
4558 if (copy_from_user(&l_dcb_struct, u_dcb_struct,
4559 sizeof(struct eqos_dcb_algorithm)))
4560 pr_err("Failed to fetch DCB Struct info from user\n");
4562 hw_if->set_tx_queue_operating_mode(l_dcb_struct.qinx,
4563 (UINT) l_dcb_struct.op_mode);
4564 hw_if->set_dcb_algorithm(l_dcb_struct.algorithm);
4565 hw_if->set_dcb_queue_weight(l_dcb_struct.qinx, l_dcb_struct.weight);
4567 pr_debug("<--eqos_program_dcb_algorithm\n");
4573 * \details This function is invoked by ioctl function when the user issues an
4574 * ioctl command to select the AVB algorithm. This function also configures other
4575 * parameters like send and idle slope, high and low credit.
4577 * \param[in] pdata – pointer to private data structure.
4578 * \param[in] req – pointer to ioctl data structure.
4585 static void eqos_program_avb_algorithm(struct eqos_prv_data *pdata,
4586 struct ifr_data_struct *req)
4588 struct eqos_avb_algorithm l_avb_struct, *u_avb_struct =
4589 (struct eqos_avb_algorithm *)req->ptr;
4590 struct hw_if_struct *hw_if = &pdata->hw_if;
4592 pr_debug("-->eqos_program_avb_algorithm\n");
4594 if (copy_from_user(&l_avb_struct, u_avb_struct,
4595 sizeof(struct eqos_avb_algorithm)))
4596 pr_err("Failed to fetch AVB Struct info from user\n");
4598 hw_if->set_tx_queue_operating_mode(l_avb_struct.qinx,
4599 (UINT) l_avb_struct.op_mode);
4600 hw_if->set_avb_algorithm(l_avb_struct.qinx, l_avb_struct.algorithm);
4601 hw_if->config_credit_control(l_avb_struct.qinx, l_avb_struct.cc);
4602 hw_if->config_send_slope(l_avb_struct.qinx, l_avb_struct.send_slope);
4603 hw_if->config_idle_slope(l_avb_struct.qinx, l_avb_struct.idle_slope);
4604 hw_if->config_high_credit(l_avb_struct.qinx, l_avb_struct.hi_credit);
4605 hw_if->config_low_credit(l_avb_struct.qinx, l_avb_struct.low_credit);
4607 pr_debug("<--eqos_program_avb_algorithm\n");
4613 * \brief API to read the registers & prints the value.
4614 * \details This function will read all the device register except
4615 * data register & prints the values.
4620 void dbgpr_regs(void)
4629 MAC_PMTCSR_RD(val0);
4630 MMC_RXICMP_ERR_OCTETS_RD(val1);
4631 MMC_RXICMP_GD_OCTETS_RD(val2);
4632 MMC_RXTCP_ERR_OCTETS_RD(val3);
4633 MMC_RXTCP_GD_OCTETS_RD(val4);
4634 MMC_RXUDP_ERR_OCTETS_RD(val5);
4636 pr_debug("dbgpr_regs: MAC_PMTCSR:%#x\n"
4637 "dbgpr_regs: MMC_RXICMP_ERR_OCTETS:%#x\n"
4638 "dbgpr_regs: MMC_RXICMP_GD_OCTETS:%#x\n"
4639 "dbgpr_regs: MMC_RXTCP_ERR_OCTETS:%#x\n"
4640 "dbgpr_regs: MMC_RXTCP_GD_OCTETS:%#x\n"
4641 "dbgpr_regs: MMC_RXUDP_ERR_OCTETS:%#x\n",
4642 val0, val1, val2, val3, val4, val5);
4644 MMC_RXUDP_GD_OCTETS_RD(val0);
4645 MMC_RXIPV6_NOPAY_OCTETS_RD(val1);
4646 MMC_RXIPV6_HDRERR_OCTETS_RD(val2);
4647 MMC_RXIPV6_GD_OCTETS_RD(val3);
4648 MMC_RXIPV4_UDSBL_OCTETS_RD(val4);
4649 MMC_RXIPV4_FRAG_OCTETS_RD(val5);
4651 pr_debug("dbgpr_regs: MMC_RXUDP_GD_OCTETS:%#x\n"
4652 "dbgpr_regs: MMC_RXIPV6_NOPAY_OCTETS:%#x\n"
4653 "dbgpr_regs: MMC_RXIPV6_HDRERR_OCTETS:%#x\n"
4654 "dbgpr_regs: MMC_RXIPV6_GD_OCTETS:%#x\n"
4655 "dbgpr_regs: MMC_RXIPV4_UDSBL_OCTETS:%#x\n"
4656 "dbgpr_regs: MMC_RXIPV4_FRAG_OCTETS:%#x\n",
4657 val0, val1, val2, val3, val4, val5);
4659 MMC_RXIPV4_NOPAY_OCTETS_RD(val0);
4660 MMC_RXIPV4_HDRERR_OCTETS_RD(val1);
4661 MMC_RXIPV4_GD_OCTETS_RD(val2);
4662 MMC_RXICMP_ERR_PKTS_RD(val3);
4663 MMC_RXICMP_GD_PKTS_RD(val4);
4664 MMC_RXTCP_ERR_PKTS_RD(val5);
4666 pr_debug("dbgpr_regs: MMC_RXIPV4_NOPAY_OCTETS:%#x\n"
4667 "dbgpr_regs: MMC_RXIPV4_HDRERR_OCTETS:%#x\n"
4668 "dbgpr_regs: MMC_RXIPV4_GD_OCTETS:%#x\n"
4669 "dbgpr_regs: MMC_RXICMP_ERR_PKTS:%#x\n"
4670 "dbgpr_regs: MMC_RXICMP_GD_PKTS:%#x\n"
4671 "dbgpr_regs: MMC_RXTCP_ERR_PKTS:%#x\n",
4672 val0, val1, val2, val3, val4, val5);
4674 MMC_RXTCP_GD_PKTS_RD(val0);
4675 MMC_RXUDP_ERR_PKTS_RD(val1);
4676 MMC_RXUDP_GD_PKTS_RD(val2);
4677 MMC_RXIPV6_NOPAY_PKTS_RD(val3);
4678 MMC_RXIPV6_HDRERR_PKTS_RD(val4);
4679 MMC_RXIPV6_GD_PKTS_RD(val5);
4681 pr_debug("dbgpr_regs: MMC_RXTCP_GD_PKTS:%#x\n"
4682 "dbgpr_regs: MMC_RXUDP_ERR_PKTS:%#x\n"
4683 "dbgpr_regs: MMC_RXUDP_GD_PKTS:%#x\n"
4684 "dbgpr_regs: MMC_RXIPV6_NOPAY_PKTS:%#x\n"
4685 "dbgpr_regs: MMC_RXIPV6_HDRERR_PKTS:%#x\n"
4686 "dbgpr_regs: MMC_RXIPV6_GD_PKTS:%#x\n",
4687 val0, val1, val2, val3, val4, val5);
4689 MMC_RXIPV4_UBSBL_PKTS_RD(val0);
4690 MMC_RXIPV4_FRAG_PKTS_RD(val1);
4691 MMC_RXIPV4_NOPAY_PKTS_RD(val2);
4692 MMC_RXIPV4_HDRERR_PKTS_RD(val3);
4693 MMC_RXIPV4_GD_PKTS_RD(val4);
4694 MMC_RXCTRLPACKETS_G_RD(val5);
4696 pr_debug("dbgpr_regs: MMC_RXIPV4_UBSBL_PKTS:%#x\n"
4697 "dbgpr_regs: MMC_RXIPV4_FRAG_PKTS:%#x\n"
4698 "dbgpr_regs: MMC_RXIPV4_NOPAY_PKTS:%#x\n"
4699 "dbgpr_regs: MMC_RXIPV4_HDRERR_PKTS:%#x\n"
4700 "dbgpr_regs: MMC_RXIPV4_GD_PKTS:%#x\n"
4701 "dbgpr_regs: MMC_RXCTRLPACKETS_G:%#x\n",
4702 val0, val1, val2, val3, val4, val5);
4704 MMC_RXRCVERROR_RD(val0);
4705 MMC_RXWATCHDOGERROR_RD(val1);
4706 MMC_RXVLANPACKETS_GB_RD(val2);
4707 MMC_RXFIFOOVERFLOW_RD(val3);
4708 MMC_RXPAUSEPACKETS_RD(val4);
4709 MMC_RXOUTOFRANGETYPE_RD(val5);
4711 pr_debug("dbgpr_regs: MMC_RXRCVERROR:%#x\n"
4712 "dbgpr_regs: MMC_RXWATCHDOGERROR:%#x\n"
4713 "dbgpr_regs: MMC_RXVLANPACKETS_GB:%#x\n"
4714 "dbgpr_regs: MMC_RXFIFOOVERFLOW:%#x\n"
4715 "dbgpr_regs: MMC_RXPAUSEPACKETS:%#x\n"
4716 "dbgpr_regs: MMC_RXOUTOFRANGETYPE:%#x\n",
4717 val0, val1, val2, val3, val4, val5);
4719 MMC_RXLENGTHERROR_RD(val0);
4720 MMC_RXUNICASTPACKETS_G_RD(val1);
4721 MMC_RX1024TOMAXOCTETS_GB_RD(val2);
4722 MMC_RX512TO1023OCTETS_GB_RD(val3);
4723 MMC_RX256TO511OCTETS_GB_RD(val4);
4724 MMC_RX128TO255OCTETS_GB_RD(val5);
4726 pr_debug("dbgpr_regs: MMC_RXLENGTHERROR:%#x\n"
4727 "dbgpr_regs: MMC_RXUNICASTPACKETS_G:%#x\n"
4728 "dbgpr_regs: MMC_RX1024TOMAXOCTETS_GB:%#x\n"
4729 "dbgpr_regs: MMC_RX512TO1023OCTETS_GB:%#x\n"
4730 "dbgpr_regs: MMC_RX256TO511OCTETS_GB:%#x\n"
4731 "dbgpr_regs: MMC_RX128TO255OCTETS_GB:%#x\n",
4732 val0, val1, val2, val3, val4, val5);
4734 MMC_RX65TO127OCTETS_GB_RD(val0);
4735 MMC_RX64OCTETS_GB_RD(val1);
4736 MMC_RXOVERSIZE_G_RD(val2);
4737 MMC_RXUNDERSIZE_G_RD(val3);
4738 MMC_RXJABBERERROR_RD(val4);
4739 MMC_RXRUNTERROR_RD(val5);
4741 pr_debug("dbgpr_regs: MMC_RX65TO127OCTETS_GB:%#x\n"
4742 "dbgpr_regs: MMC_RX64OCTETS_GB:%#x\n"
4743 "dbgpr_regs: MMC_RXOVERSIZE_G:%#x\n"
4744 "dbgpr_regs: MMC_RXUNDERSIZE_G:%#x\n"
4745 "dbgpr_regs: MMC_RXJABBERERROR:%#x\n"
4746 "dbgpr_regs: MMC_RXRUNTERROR:%#x\n",
4747 val0, val1, val2, val3, val4, val5);
4749 MMC_RXALIGNMENTERROR_RD(val0);
4750 MMC_RXCRCERROR_RD(val1);
4751 MMC_RXMULTICASTPACKETS_G_RD(val2);
4752 MMC_RXBROADCASTPACKETS_G_RD(val3);
4753 MMC_RXOCTETCOUNT_G_RD(val4);
4754 MMC_RXOCTETCOUNT_GB_RD(val5);
4756 pr_debug("dbgpr_regs: MMC_RXALIGNMENTERROR:%#x\n"
4757 "dbgpr_regs: MMC_RXCRCERROR:%#x\n"
4758 "dbgpr_regs: MMC_RXMULTICASTPACKETS_G:%#x\n"
4759 "dbgpr_regs: MMC_RXBROADCASTPACKETS_G:%#x\n"
4760 "dbgpr_regs: MMC_RXOCTETCOUNT_G:%#x\n"
4761 "dbgpr_regs: MMC_RXOCTETCOUNT_GB:%#x\n",
4762 val0, val1, val2, val3, val4, val5);
4764 MMC_RXPACKETCOUNT_GB_RD(val0);
4765 MMC_TXOVERSIZE_G_RD(val1);
4766 MMC_TXVLANPACKETS_G_RD(val2);
4767 MMC_TXPAUSEPACKETS_RD(val3);
4768 MMC_TXEXCESSDEF_RD(val4);
4769 MMC_TXPACKETSCOUNT_G_RD(val5);
4771 pr_debug("dbgpr_regs: MMC_RXPACKETCOUNT_GB:%#x\n"
4772 "dbgpr_regs: MMC_TXOVERSIZE_G:%#x\n"
4773 "dbgpr_regs: MMC_TXVLANPACKETS_G:%#x\n"
4774 "dbgpr_regs: MMC_TXPAUSEPACKETS:%#x\n"
4775 "dbgpr_regs: MMC_TXEXCESSDEF:%#x\n"
4776 "dbgpr_regs: MMC_TXPACKETSCOUNT_G:%#x\n",
4777 val0, val1, val2, val3, val4, val5);
4779 MMC_TXOCTETCOUNT_G_RD(val0);
4780 MMC_TXCARRIERERROR_RD(val1);
4781 MMC_TXEXESSCOL_RD(val2);
4782 MMC_TXLATECOL_RD(val3);
4783 MMC_TXDEFERRED_RD(val4);
4784 MMC_TXMULTICOL_G_RD(val5);
4786 pr_debug("dbgpr_regs: MMC_TXOCTETCOUNT_G:%#x\n"
4787 "dbgpr_regs: MMC_TXCARRIERERROR:%#x\n"
4788 "dbgpr_regs: MMC_TXEXESSCOL:%#x\n"
4789 "dbgpr_regs: MMC_TXLATECOL:%#x\n"
4790 "dbgpr_regs: MMC_TXDEFERRED:%#x\n"
4791 "dbgpr_regs: MMC_TXMULTICOL_G:%#x\n",
4792 val0, val1, val2, val3, val4, val5);
4794 MMC_TXSINGLECOL_G_RD(val0);
4795 MMC_TXUNDERFLOWERROR_RD(val1);
4796 MMC_TXBROADCASTPACKETS_GB_RD(val2);
4797 MMC_TXMULTICASTPACKETS_GB_RD(val3);
4798 MMC_TXUNICASTPACKETS_GB_RD(val4);
4799 MMC_TX1024TOMAXOCTETS_GB_RD(val5);
4801 pr_debug("dbgpr_regs: MMC_TXSINGLECOL_G:%#x\n"
4802 "dbgpr_regs: MMC_TXUNDERFLOWERROR:%#x\n"
4803 "dbgpr_regs: MMC_TXBROADCASTPACKETS_GB:%#x\n"
4804 "dbgpr_regs: MMC_TXMULTICASTPACKETS_GB:%#x\n"
4805 "dbgpr_regs: MMC_TXUNICASTPACKETS_GB:%#x\n"
4806 "dbgpr_regs: MMC_TX1024TOMAXOCTETS_GB:%#x\n",
4807 val0, val1, val2, val3, val4, val5);
4809 MMC_TX512TO1023OCTETS_GB_RD(val0);
4810 MMC_TX256TO511OCTETS_GB_RD(val1);
4811 MMC_TX128TO255OCTETS_GB_RD(val2);
4812 MMC_TX65TO127OCTETS_GB_RD(val3);
4813 MMC_TX64OCTETS_GB_RD(val4);
4814 MMC_TXMULTICASTPACKETS_G_RD(val5);
4816 pr_debug("dbgpr_regs: MMC_TX512TO1023OCTETS_GB:%#x\n"
4817 "dbgpr_regs: MMC_TX256TO511OCTETS_GB:%#x\n"
4818 "dbgpr_regs: MMC_TX128TO255OCTETS_GB:%#x\n"
4819 "dbgpr_regs: MMC_TX65TO127OCTETS_GB:%#x\n"
4820 "dbgpr_regs: MMC_TX64OCTETS_GB:%#x\n"
4821 "dbgpr_regs: MMC_TXMULTICASTPACKETS_G:%#x\n",
4822 val0, val1, val2, val3, val4, val5);
4824 MMC_TXBROADCASTPACKETS_G_RD(val0);
4825 MMC_TXPACKETCOUNT_GB_RD(val1);
4826 MMC_TXOCTETCOUNT_GB_RD(val2);
4827 MMC_IPC_INTR_RX_RD(val3);
4828 MMC_IPC_INTR_MASK_RX_RD(val4);
4829 MMC_INTR_MASK_TX_RD(val5);
4831 pr_debug("dbgpr_regs: MMC_TXBROADCASTPACKETS_G:%#x\n"
4832 "dbgpr_regs: MMC_TXPACKETCOUNT_GB:%#x\n"
4833 "dbgpr_regs: MMC_TXOCTETCOUNT_GB:%#x\n"
4834 "dbgpr_regs: MMC_IPC_INTR_RX:%#x\n"
4835 "dbgpr_regs: MMC_IPC_INTR_MASK_RX:%#x\n"
4836 "dbgpr_regs: MMC_INTR_MASK_TX:%#x\n",
4837 val0, val1, val2, val3, val4, val5);
4839 MMC_INTR_MASK_RX_RD(val0);
4840 MMC_INTR_TX_RD(val1);
4841 MMC_INTR_RX_RD(val2);
4846 pr_debug("dbgpr_regs: MMC_INTR_MASK_RX:%#x\n"
4847 "dbgpr_regs: MMC_INTR_TX:%#x\n"
4848 "dbgpr_regs: MMC_INTR_RX:%#x\n"
4849 "dbgpr_regs: MMC_CNTRL:%#x\n"
4850 "dbgpr_regs: MAC_MA1LR:%#x\n"
4851 "dbgpr_regs: MAC_MA1HR:%#x\n",
4852 val0, val1, val2, val3, val4, val5);
4857 MAC_GMIIDR_RD(val3);
4858 MAC_GMIIAR_RD(val4);
4861 pr_debug("dbgpr_regs: MAC_MA0LR:%#x\n"
4862 "dbgpr_regs: MAC_MA0HR:%#x\n"
4863 "dbgpr_regs: MAC_GPIOR:%#x\n"
4864 "dbgpr_regs: MAC_GMIIDR:%#x\n"
4865 "dbgpr_regs: MAC_GMIIAR:%#x\n"
4866 "dbgpr_regs: MAC_HFR2:%#x\n", val0, val1, val2, val3, val4, val5);
4875 pr_debug("dbgpr_regs: MAC_HFR1:%#x\n"
4876 "dbgpr_regs: MAC_HFR0:%#x\n"
4877 "dbgpr_regs: MAC_MDR:%#x\n"
4878 "dbgpr_regs: MAC_VR:%#x\n"
4879 "dbgpr_regs: MAC_HTR7:%#x\n"
4880 "dbgpr_regs: MAC_HTR6:%#x\n", val0, val1, val2, val3, val4, val5);
4889 pr_debug("dbgpr_regs: MAC_HTR5:%#x\n"
4890 "dbgpr_regs: MAC_HTR4:%#x\n"
4891 "dbgpr_regs: MAC_HTR3:%#x\n"
4892 "dbgpr_regs: MAC_HTR2:%#x\n"
4893 "dbgpr_regs: MAC_HTR1:%#x\n"
4894 "dbgpr_regs: MAC_HTR0:%#x\n", val0, val1, val2, val3, val4, val5);
4896 DMA_RIWTR7_RD(val0);
4897 DMA_RIWTR6_RD(val1);
4898 DMA_RIWTR5_RD(val2);
4899 DMA_RIWTR4_RD(val3);
4900 DMA_RIWTR3_RD(val4);
4901 DMA_RIWTR2_RD(val5);
4903 pr_debug("dbgpr_regs: DMA_RIWTR7:%#x\n"
4904 "dbgpr_regs: DMA_RIWTR6:%#x\n"
4905 "dbgpr_regs: DMA_RIWTR5:%#x\n"
4906 "dbgpr_regs: DMA_RIWTR4:%#x\n"
4907 "dbgpr_regs: DMA_RIWTR3:%#x\n"
4908 "dbgpr_regs: DMA_RIWTR2:%#x\n",
4909 val0, val1, val2, val3, val4, val5);
4911 DMA_RIWTR1_RD(val0);
4912 DMA_RIWTR0_RD(val1);
4913 DMA_RDRLR7_RD(val2);
4914 DMA_RDRLR6_RD(val3);
4915 DMA_RDRLR5_RD(val4);
4916 DMA_RDRLR4_RD(val5);
4918 pr_debug("dbgpr_regs: DMA_RIWTR1:%#x\n"
4919 "dbgpr_regs: DMA_RIWTR0:%#x\n"
4920 "dbgpr_regs: DMA_RDRLR7:%#x\n"
4921 "dbgpr_regs: DMA_RDRLR6:%#x\n"
4922 "dbgpr_regs: DMA_RDRLR5:%#x\n"
4923 "dbgpr_regs: DMA_RDRLR4:%#x\n",
4924 val0, val1, val2, val3, val4, val5);
4926 DMA_RDRLR3_RD(val0);
4927 DMA_RDRLR2_RD(val1);
4928 DMA_RDRLR1_RD(val2);
4929 DMA_RDRLR0_RD(val3);
4930 DMA_TDRLR7_RD(val4);
4931 DMA_TDRLR6_RD(val5);
4933 pr_debug("dbgpr_regs: DMA_RDRLR3:%#x\n"
4934 "dbgpr_regs: DMA_RDRLR2:%#x\n"
4935 "dbgpr_regs: DMA_RDRLR1:%#x\n"
4936 "dbgpr_regs: DMA_RDRLR0:%#x\n"
4937 "dbgpr_regs: DMA_TDRLR7:%#x\n"
4938 "dbgpr_regs: DMA_TDRLR6:%#x\n",
4939 val0, val1, val2, val3, val4, val5);
4941 DMA_TDRLR5_RD(val0);
4942 DMA_TDRLR4_RD(val1);
4943 DMA_TDRLR3_RD(val2);
4944 DMA_TDRLR2_RD(val3);
4945 DMA_TDRLR1_RD(val4);
4946 DMA_TDRLR0_RD(val5);
4948 pr_debug("dbgpr_regs: DMA_TDRLR5:%#x\n"
4949 "dbgpr_regs: DMA_TDRLR4:%#x\n"
4950 "dbgpr_regs: DMA_TDRLR3:%#x\n"
4951 "dbgpr_regs: DMA_TDRLR2:%#x\n"
4952 "dbgpr_regs: DMA_TDRLR1:%#x\n"
4953 "dbgpr_regs: DMA_TDRLR0:%#x\n",
4954 val0, val1, val2, val3, val4, val5);
4956 DMA_RDTP_RPDR7_RD(val0);
4957 DMA_RDTP_RPDR6_RD(val1);
4958 DMA_RDTP_RPDR5_RD(val2);
4959 DMA_RDTP_RPDR4_RD(val3);
4960 DMA_RDTP_RPDR3_RD(val4);
4961 DMA_RDTP_RPDR2_RD(val5);
4963 pr_debug("dbgpr_regs: DMA_RDTP_RPDR7:%#x\n"
4964 "dbgpr_regs: DMA_RDTP_RPDR6:%#x\n"
4965 "dbgpr_regs: DMA_RDTP_RPDR5:%#x\n"
4966 "dbgpr_regs: DMA_RDTP_RPDR4:%#x\n"
4967 "dbgpr_regs: DMA_RDTP_RPDR3:%#x\n"
4968 "dbgpr_regs: DMA_RDTP_RPDR2:%#x\n",
4969 val0, val1, val2, val3, val4, val5);
4971 DMA_RDTP_RPDR1_RD(val0);
4972 DMA_RDTP_RPDR0_RD(val1);
4973 DMA_TDTP_TPDR7_RD(val2);
4974 DMA_TDTP_TPDR6_RD(val3);
4975 DMA_TDTP_TPDR5_RD(val4);
4976 DMA_TDTP_TPDR4_RD(val5);
4978 pr_debug("dbgpr_regs: DMA_RDTP_RPDR1:%#x\n"
4979 "dbgpr_regs: DMA_RDTP_RPDR0:%#x\n"
4980 "dbgpr_regs: DMA_TDTP_TPDR7:%#x\n"
4981 "dbgpr_regs: DMA_TDTP_TPDR6:%#x\n"
4982 "dbgpr_regs: DMA_TDTP_TPDR5:%#x\n"
4983 "dbgpr_regs: DMA_TDTP_TPDR4:%#x\n",
4984 val0, val1, val2, val3, val4, val5);
4986 DMA_TDTP_TPDR3_RD(val0);
4987 DMA_TDTP_TPDR2_RD(val1);
4988 DMA_TDTP_TPDR1_RD(val2);
4989 DMA_TDTP_TPDR0_RD(val3);
4990 DMA_RDLAR7_RD(val4);
4991 DMA_RDLAR6_RD(val5);
4993 pr_debug("dbgpr_regs: DMA_TDTP_TPDR3:%#x\n"
4994 "dbgpr_regs: DMA_TDTP_TPDR2:%#x\n"
4995 "dbgpr_regs: DMA_TDTP_TPDR1:%#x\n"
4996 "dbgpr_regs: DMA_TDTP_TPDR0:%#x\n"
4997 "dbgpr_regs: DMA_RDLAR7:%#x\n"
4998 "dbgpr_regs: DMA_RDLAR6:%#x\n",
4999 val0, val1, val2, val3, val4, val5);
5001 DMA_RDLAR5_RD(val0);
5002 DMA_RDLAR4_RD(val1);
5003 DMA_RDLAR3_RD(val2);
5004 DMA_RDLAR2_RD(val3);
5005 DMA_RDLAR1_RD(val4);
5006 DMA_RDLAR0_RD(val5);
5008 pr_debug("dbgpr_regs: DMA_RDLAR5:%#x\n"
5009 "dbgpr_regs: DMA_RDLAR4:%#x\n"
5010 "dbgpr_regs: DMA_RDLAR3:%#x\n"
5011 "dbgpr_regs: DMA_RDLAR2:%#x\n"
5012 "dbgpr_regs: DMA_RDLAR1:%#x\n"
5013 "dbgpr_regs: DMA_RDLAR0:%#x\n",
5014 val0, val1, val2, val3, val4, val5);
5016 DMA_TDLAR7_RD(val0);
5017 DMA_TDLAR6_RD(val1);
5018 DMA_TDLAR5_RD(val2);
5019 DMA_TDLAR4_RD(val3);
5020 DMA_TDLAR3_RD(val4);
5021 DMA_TDLAR2_RD(val5);
5023 pr_debug("dbgpr_regs: DMA_TDLAR7:%#x\n"
5024 "dbgpr_regs: DMA_TDLAR6:%#x\n"
5025 "dbgpr_regs: DMA_TDLAR5:%#x\n"
5026 "dbgpr_regs: DMA_TDLAR4:%#x\n"
5027 "dbgpr_regs: DMA_TDLAR3:%#x\n"
5028 "dbgpr_regs: DMA_TDLAR2:%#x\n",
5029 val0, val1, val2, val3, val4, val5);
5031 DMA_TDLAR1_RD(val0);
5032 DMA_TDLAR0_RD(val1);
5038 pr_debug("dbgpr_regs: DMA_TDLAR1:%#x\n"
5039 "dbgpr_regs: DMA_TDLAR0:%#x\n"
5040 "dbgpr_regs: DMA_IER7:%#x\n"
5041 "dbgpr_regs: DMA_IER6:%#x\n"
5042 "dbgpr_regs: DMA_IER5:%#x\n"
5043 "dbgpr_regs: DMA_IER4:%#x\n", val0, val1, val2, val3, val4, val5);
5052 pr_debug("dbgpr_regs: DMA_IER3:%#x\n"
5053 "dbgpr_regs: DMA_IER2:%#x\n"
5054 "dbgpr_regs: DMA_IER1:%#x\n"
5055 "dbgpr_regs: DMA_IER0:%#x\n"
5056 "dbgpr_regs: MAC_IMR:%#x\n"
5057 "dbgpr_regs: MAC_ISR:%#x\n", val0, val1, val2, val3, val4, val5);
5066 pr_debug("dbgpr_regs: MTL_ISR:%#x\n"
5067 "dbgpr_regs: DMA_SR7:%#x\n"
5068 "dbgpr_regs: DMA_SR6:%#x\n"
5069 "dbgpr_regs: DMA_SR5:%#x\n"
5070 "dbgpr_regs: DMA_SR4:%#x\n"
5071 "dbgpr_regs: DMA_SR3:%#x\n", val0, val1, val2, val3, val4, val5);
5080 pr_debug("dbgpr_regs: DMA_SR2:%#x\n"
5081 "dbgpr_regs: DMA_SR1:%#x\n"
5082 "dbgpr_regs: DMA_SR0:%#x\n"
5083 "dbgpr_regs: DMA_ISR:%#x\n"
5084 "dbgpr_regs: DMA_DSR2:%#x\n"
5085 "dbgpr_regs: DMA_DSR1:%#x\n", val0, val1, val2, val3, val4, val5);
5091 DMA_CHRBAR7_RD(val4);
5092 DMA_CHRBAR6_RD(val5);
5094 pr_debug("dbgpr_regs: DMA_DSR0:%#x\n"
5095 "dbgpr_regs: MTL_Q0RDR:%#x\n"
5096 "dbgpr_regs: MTL_Q0ESR:%#x\n"
5097 "dbgpr_regs: MTL_Q0TDR:%#x\n"
5098 "dbgpr_regs: DMA_CHRBAR7:%#x\n"
5099 "dbgpr_regs: DMA_CHRBAR6:%#x\n",
5100 val0, val1, val2, val3, val4, val5);
5102 DMA_CHRBAR5_RD(val0);
5103 DMA_CHRBAR4_RD(val1);
5104 DMA_CHRBAR3_RD(val2);
5105 DMA_CHRBAR2_RD(val3);
5106 DMA_CHRBAR1_RD(val4);
5107 DMA_CHRBAR0_RD(val5);
5109 pr_debug("dbgpr_regs: DMA_CHRBAR5:%#x\n"
5110 "dbgpr_regs: DMA_CHRBAR4:%#x\n"
5111 "dbgpr_regs: DMA_CHRBAR3:%#x\n"
5112 "dbgpr_regs: DMA_CHRBAR2:%#x\n"
5113 "dbgpr_regs: DMA_CHRBAR1:%#x\n"
5114 "dbgpr_regs: DMA_CHRBAR0:%#x\n",
5115 val0, val1, val2, val3, val4, val5);
5117 DMA_CHTBAR7_RD(val0);
5118 DMA_CHTBAR6_RD(val1);
5119 DMA_CHTBAR5_RD(val2);
5120 DMA_CHTBAR4_RD(val3);
5121 DMA_CHTBAR3_RD(val4);
5122 DMA_CHTBAR2_RD(val5);
5124 pr_debug("dbgpr_regs: DMA_CHTBAR7:%#x\n"
5125 "dbgpr_regs: DMA_CHTBAR6:%#x\n"
5126 "dbgpr_regs: DMA_CHTBAR5:%#x\n"
5127 "dbgpr_regs: DMA_CHTBAR4:%#x\n"
5128 "dbgpr_regs: DMA_CHTBAR3:%#x\n"
5129 "dbgpr_regs: DMA_CHTBAR2:%#x\n",
5130 val0, val1, val2, val3, val4, val5);
5132 DMA_CHTBAR1_RD(val0);
5133 DMA_CHTBAR0_RD(val1);
5134 DMA_CHRDR7_RD(val2);
5135 DMA_CHRDR6_RD(val3);
5136 DMA_CHRDR5_RD(val4);
5137 DMA_CHRDR4_RD(val5);
5139 pr_debug("dbgpr_regs: DMA_CHTBAR1:%#x\n"
5140 "dbgpr_regs: DMA_CHTBAR0:%#x\n"
5141 "dbgpr_regs: DMA_CHRDR7:%#x\n"
5142 "dbgpr_regs: DMA_CHRDR6:%#x\n"
5143 "dbgpr_regs: DMA_CHRDR5:%#x\n"
5144 "dbgpr_regs: DMA_CHRDR4:%#x\n",
5145 val0, val1, val2, val3, val4, val5);
5147 DMA_CHRDR3_RD(val0);
5148 DMA_CHRDR2_RD(val1);
5149 DMA_CHRDR1_RD(val2);
5150 DMA_CHRDR0_RD(val3);
5151 DMA_CHTDR7_RD(val4);
5152 DMA_CHTDR6_RD(val5);
5154 pr_debug("dbgpr_regs: DMA_CHRDR3:%#x\n"
5155 "dbgpr_regs: DMA_CHRDR2:%#x\n"
5156 "dbgpr_regs: DMA_CHRDR1:%#x\n"
5157 "dbgpr_regs: DMA_CHRDR0:%#x\n"
5158 "dbgpr_regs: DMA_CHTDR7:%#x\n"
5159 "dbgpr_regs: DMA_CHTDR6:%#x\n",
5160 val0, val1, val2, val3, val4, val5);
5162 DMA_CHTDR5_RD(val0);
5163 DMA_CHTDR4_RD(val1);
5164 DMA_CHTDR3_RD(val2);
5165 DMA_CHTDR2_RD(val3);
5166 DMA_CHTDR1_RD(val4);
5167 DMA_CHTDR0_RD(val5);
5169 pr_debug("dbgpr_regs: DMA_CHTDR5:%#x\n"
5170 "dbgpr_regs: DMA_CHTDR4:%#x\n"
5171 "dbgpr_regs: DMA_CHTDR3:%#x\n"
5172 "dbgpr_regs: DMA_CHTDR2:%#x\n"
5173 "dbgpr_regs: DMA_CHTDR1:%#x\n"
5174 "dbgpr_regs: DMA_CHTDR0:%#x\n",
5175 val0, val1, val2, val3, val4, val5);
5177 DMA_SFCSR7_RD(val0);
5178 DMA_SFCSR6_RD(val1);
5179 DMA_SFCSR5_RD(val2);
5180 DMA_SFCSR4_RD(val3);
5181 DMA_SFCSR3_RD(val4);
5182 DMA_SFCSR2_RD(val5);
5184 pr_debug("dbgpr_regs: DMA_SFCSR7:%#x\n"
5185 "dbgpr_regs: DMA_SFCSR6:%#x\n"
5186 "dbgpr_regs: DMA_SFCSR5:%#x\n"
5187 "dbgpr_regs: DMA_SFCSR4:%#x\n"
5188 "dbgpr_regs: DMA_SFCSR3:%#x\n"
5189 "dbgpr_regs: DMA_SFCSR2:%#x\n",
5190 val0, val1, val2, val3, val4, val5);
5192 DMA_SFCSR1_RD(val0);
5193 DMA_SFCSR0_RD(val1);
5194 MAC_IVLANTIRR_RD(val2);
5195 MAC_VLANTIRR_RD(val3);
5196 MAC_VLANHTR_RD(val4);
5197 MAC_VLANTR_RD(val5);
5199 pr_debug("dbgpr_regs: DMA_SFCSR1:%#x\n"
5200 "dbgpr_regs: DMA_SFCSR0:%#x\n"
5201 "dbgpr_regs: MAC_IVLANTIRR:%#x\n"
5202 "dbgpr_regs: MAC_VLANTIRR:%#x\n"
5203 "dbgpr_regs: MAC_VLANHTR:%#x\n"
5204 "dbgpr_regs: MAC_VLANTR:%#x\n",
5205 val0, val1, val2, val3, val4, val5);
5211 MTL_Q0ROMR_RD(val4);
5214 pr_debug("dbgpr_regs: DMA_SBUS:%#x\n"
5215 "dbgpr_regs: DMA_BMR:%#x\n"
5216 "dbgpr_regs: MTL_Q0RCR:%#x\n"
5217 "dbgpr_regs: MTL_Q0OCR:%#x\n"
5218 "dbgpr_regs: MTL_Q0ROMR:%#x\n"
5219 "dbgpr_regs: MTL_Q0QR:%#x\n", val0, val1, val2, val3, val4, val5);
5223 MTL_Q0TOMR_RD(val2);
5224 MTL_RQDCM1R_RD(val3);
5225 MTL_RQDCM0R_RD(val4);
5228 pr_debug("dbgpr_regs: MTL_Q0ECR:%#x\n"
5229 "dbgpr_regs: MTL_Q0UCR:%#x\n"
5230 "dbgpr_regs: MTL_Q0TOMR:%#x\n"
5231 "dbgpr_regs: MTL_RQDCM1R:%#x\n"
5232 "dbgpr_regs: MTL_RQDCM0R:%#x\n"
5233 "dbgpr_regs: MTL_FDDR:%#x\n", val0, val1, val2, val3, val4, val5);
5239 MAC_TQPM1R_RD(val4);
5240 MAC_TQPM0R_RD(val5);
5242 pr_debug("dbgpr_regs: MTL_FDACS:%#x\n"
5243 "dbgpr_regs: MTL_OMR:%#x\n"
5244 "dbgpr_regs: MAC_RQC1R:%#x\n"
5245 "dbgpr_regs: MAC_RQC0R:%#x\n"
5246 "dbgpr_regs: MAC_TQPM1R:%#x\n"
5247 "dbgpr_regs: MAC_TQPM0R:%#x\n",
5248 val0, val1, val2, val3, val4, val5);
5251 MAC_QTFCR7_RD(val1);
5252 MAC_QTFCR6_RD(val2);
5253 MAC_QTFCR5_RD(val3);
5254 MAC_QTFCR4_RD(val4);
5255 MAC_QTFCR3_RD(val5);
5257 pr_debug("dbgpr_regs: MAC_RFCR:%#x\n"
5258 "dbgpr_regs: MAC_QTFCR7:%#x\n"
5259 "dbgpr_regs: MAC_QTFCR6:%#x\n"
5260 "dbgpr_regs: MAC_QTFCR5:%#x\n"
5261 "dbgpr_regs: MAC_QTFCR4:%#x\n"
5262 "dbgpr_regs: MAC_QTFCR3:%#x\n",
5263 val0, val1, val2, val3, val4, val5);
5265 MAC_QTFCR2_RD(val0);
5266 MAC_QTFCR1_RD(val1);
5267 MAC_Q0TFCR_RD(val2);
5268 DMA_AXI4CR7_RD(val3);
5269 DMA_AXI4CR6_RD(val4);
5270 DMA_AXI4CR5_RD(val5);
5272 pr_debug("dbgpr_regs: MAC_QTFCR2:%#x\n"
5273 "dbgpr_regs: MAC_QTFCR1:%#x\n"
5274 "dbgpr_regs: MAC_Q0TFCR:%#x\n"
5275 "dbgpr_regs: DMA_AXI4CR7:%#x\n"
5276 "dbgpr_regs: DMA_AXI4CR6:%#x\n"
5277 "dbgpr_regs: DMA_AXI4CR5:%#x\n",
5278 val0, val1, val2, val3, val4, val5);
5280 DMA_AXI4CR4_RD(val0);
5281 DMA_AXI4CR3_RD(val1);
5282 DMA_AXI4CR2_RD(val2);
5283 DMA_AXI4CR1_RD(val3);
5284 DMA_AXI4CR0_RD(val4);
5287 pr_debug("dbgpr_regs: DMA_AXI4CR4:%#x\n"
5288 "dbgpr_regs: DMA_AXI4CR3:%#x\n"
5289 "dbgpr_regs: DMA_AXI4CR2:%#x\n"
5290 "dbgpr_regs: DMA_AXI4CR1:%#x\n"
5291 "dbgpr_regs: DMA_AXI4CR0:%#x\n"
5292 "dbgpr_regs: DMA_RCR7:%#x\n", val0, val1, val2, val3, val4, val5);
5301 pr_debug("dbgpr_regs: DMA_RCR6:%#x\n"
5302 "dbgpr_regs: DMA_RCR5:%#x\n"
5303 "dbgpr_regs: DMA_RCR4:%#x\n"
5304 "dbgpr_regs: DMA_RCR3:%#x\n"
5305 "dbgpr_regs: DMA_RCR2:%#x\n"
5306 "dbgpr_regs: DMA_RCR1:%#x\n", val0, val1, val2, val3, val4, val5);
5315 pr_debug("dbgpr_regs: DMA_RCR0:%#x\n"
5316 "dbgpr_regs: DMA_TCR7:%#x\n"
5317 "dbgpr_regs: DMA_TCR6:%#x\n"
5318 "dbgpr_regs: DMA_TCR5:%#x\n"
5319 "dbgpr_regs: DMA_TCR4:%#x\n"
5320 "dbgpr_regs: DMA_TCR3:%#x\n", val0, val1, val2, val3, val4, val5);
5329 pr_debug("dbgpr_regs: DMA_TCR2:%#x\n"
5330 "dbgpr_regs: DMA_TCR1:%#x\n"
5331 "dbgpr_regs: DMA_TCR0:%#x\n"
5332 "dbgpr_regs: DMA_CR7:%#x\n"
5333 "dbgpr_regs: DMA_CR6:%#x\n"
5334 "dbgpr_regs: DMA_CR5:%#x\n", val0, val1, val2, val3, val4, val5);
5343 pr_debug("dbgpr_regs: DMA_CR4:%#x\n"
5344 "dbgpr_regs: DMA_CR3:%#x\n"
5345 "dbgpr_regs: DMA_CR2:%#x\n"
5346 "dbgpr_regs: DMA_CR1:%#x\n"
5347 "dbgpr_regs: DMA_CR0:%#x\n"
5348 "dbgpr_regs: MAC_WTR:%#x\n", val0, val1, val2, val3, val4, val5);
5354 pr_debug("dbgpr_regs: MAC_MPFR:%#x\n"
5355 "dbgpr_regs: MAC_MECR:%#x\n"
5356 "dbgpr_regs: MAC_MCR:%#x\n", val0, val1, val2);
5363 * \details This function is invoked by eqos_start_xmit and
5364 * process_tx_completions function for dumping the TX descriptor contents
5365 * which are prepared for packet transmission and which are transmitted by
5366 * device. It is mainly used during development phase for debug purpose. Use
5367 * of these function may affect the performance during normal operation.
5369 * \param[in] pdata – pointer to private data structure.
5370 * \param[in] first_desc_idx – first descriptor index for the current
5372 * \param[in] last_desc_idx – last descriptor index for the current transfer.
5373 * \param[in] flag – to indicate from which function it is called.
5378 void dump_tx_desc(struct eqos_prv_data *pdata, int first_desc_idx,
5379 int last_desc_idx, int flag, UINT qinx)
5382 struct s_tx_desc *desc = NULL;
5385 if (first_desc_idx == last_desc_idx) {
5386 desc = GET_TX_DESC_PTR(qinx, first_desc_idx);
5388 TX_NORMAL_DESC_TDES3_CTXT_RD(desc->tdes3, ctxt);
5390 pr_err("\n%s[%02d %4p %03d %s] = %#x:%#x:%#x:%#x\n",
5391 (ctxt == 1) ? "TX_CONTXT_DESC" : "ptx_desc",
5392 qinx, desc, first_desc_idx,
5393 ((flag == 1) ? "QUEUED FOR TRANSMISSION" :
5395 0) ? "FREED/FETCHED BY DEVICE" : "DEBUG DESC DUMP")),
5396 desc->tdes0, desc->tdes1, desc->tdes2, desc->tdes3);
5399 if (first_desc_idx > last_desc_idx)
5400 lp_cnt = last_desc_idx + TX_DESC_CNT - first_desc_idx;
5402 lp_cnt = last_desc_idx - first_desc_idx;
5404 for (i = first_desc_idx; lp_cnt >= 0; lp_cnt--) {
5405 desc = GET_TX_DESC_PTR(qinx, i);
5407 TX_NORMAL_DESC_TDES3_CTXT_RD(desc->tdes3, ctxt);
5409 pr_err("\n%s[%02d %4p %03d %s] = %#x:%#x:%#x:%#x\n",
5411 1) ? "TX_CONTXT_DESC" : "ptx_desc", qinx,
5414 1) ? "QUEUED FOR TRANSMISSION" :
5415 "FREED/FETCHED BY DEVICE"), desc->tdes0,
5416 desc->tdes1, desc->tdes2, desc->tdes3);
5417 INCR_TX_DESC_INDEX(i, 1);
5423 * \details This function is invoked by poll function for dumping the
5424 * RX descriptor contents. It is mainly used during development phase for
5425 * debug purpose. Use of these function may affect the performance during
5428 * \param[in] pdata – pointer to private data structure.
5433 void dump_rx_desc(UINT qinx, struct s_rx_desc *desc, int desc_idx)
5435 pr_err("\nprx_desc[%02d %4p %03d RECEIVED FROM DEVICE]"
5436 " = %#x:%#x:%#x:%#x",
5437 qinx, desc, desc_idx, desc->rdes0, desc->rdes1,
5438 desc->rdes2, desc->rdes3);
5442 * \details This function is invoked by start_xmit and poll function for
5443 * dumping the content of packet to be transmitted by device or received
5444 * from device. It is mainly used during development phase for debug purpose.
5445 * Use of these functions may affect the performance during normal operation.
5447 * \param[in] skb – pointer to socket buffer structure.
5448 * \param[in] len – length of packet to be transmitted/received.
5449 * \param[in] tx_rx – packet to be transmitted or received.
5450 * \param[in] desc_idx – descriptor index to be used for transmission or
5451 * reception of packet.
5456 void print_pkt(struct sk_buff *skb, int len, bool tx_rx, int desc_idx)
5459 unsigned char *buf = skb->data;
5462 ("\n\n/***********************************************************/\n");
5464 pr_err("%s pkt of %d Bytes [DESC index = %d]\n\n",
5465 (tx_rx ? "TX" : "RX"), len, desc_idx);
5466 pr_err("Dst MAC addr(6 bytes)\n");
5467 for (i = 0; i < 6; i++)
5468 printk("%#.2x%s", buf[i], (((i == 5) ? "" : ":")));
5469 pr_err("\nSrc MAC addr(6 bytes)\n");
5470 for (i = 6; i <= 11; i++)
5471 printk("%#.2x%s", buf[i], (((i == 11) ? "" : ":")));
5472 i = (buf[12] << 8 | buf[13]);
5473 pr_err("\nType/Length(2 bytes)\n%#x", i);
5475 pr_err("\nPay Load : %d bytes\n", (len - 14));
5476 for (i = 14, j = 1; i < len; i++, j++) {
5477 printk("%#.2x%s", buf[i], (((i == (len - 1)) ? "" : ":")));
5483 ("/*************************************************************/\n\n");
5487 * \details This function is invoked by probe function. This function will
5488 * initialize default receive coalesce parameters and sw timer value and store
5489 * it in respective receive data structure.
5491 * \param[in] pdata – pointer to private data structure.
5496 void eqos_init_rx_coalesce(struct eqos_prv_data *pdata)
5498 struct rx_ring *prx_ring = NULL;
5501 pr_debug("-->eqos_init_rx_coalesce\n");
5503 for (i = 0; i < EQOS_RX_QUEUE_CNT; i++) {
5504 prx_ring = GET_RX_WRAPPER_DESC(i);
5506 prx_ring->use_riwt = 1;
5507 prx_ring->rx_coal_frames = EQOS_RX_MAX_FRAMES;
5509 eqos_usec2riwt(EQOS_OPTIMAL_DMA_RIWT_USEC, pdata);
5512 pr_debug("<--eqos_init_rx_coalesce\n");
5516 * \details This function is invoked by open() function. This function will
5517 * clear MMC structure.
5519 * \param[in] pdata – pointer to private data structure.
5524 static void eqos_mmc_setup(struct eqos_prv_data *pdata)
5526 pr_debug("-->eqos_mmc_setup\n");
5528 if (pdata->hw_feat.mmc_sel) {
5529 memset(&pdata->mmc, 0, sizeof(struct eqos_mmc_counters));
5531 pr_err("No MMC/RMON module available in the HW\n");
5533 pr_debug("<--eqos_mmc_setup\n");
5536 inline unsigned int eqos_reg_read(volatile ULONG *ptr)
5538 return ioread32((void *)ptr);
5542 * \details This function is invoked by ethtool function when user wants to
5543 * read MMC counters. This function will read the MMC if supported by core
5544 * and store it in eqos_mmc_counters structure. By default all the
5545 * MMC are programmed "read on reset" hence all the fields of the
5546 * eqos_mmc_counters are incremented.
5548 * open() function. This function will
5549 * initialize MMC control register ie it disable all MMC interrupt and all
5550 * MMC register are configured to clear on read.
5552 * \param[in] pdata – pointer to private data structure.
5557 void eqos_mmc_read(struct eqos_mmc_counters *mmc)
5559 pr_debug("-->eqos_mmc_read\n");
5561 /* MMC TX counter registers */
5562 mmc->mmc_tx_octetcount_gb += eqos_reg_read(MMC_TXOCTETCOUNT_GB_OFFSET);
5563 mmc->mmc_tx_framecount_gb += eqos_reg_read(MMC_TXPACKETCOUNT_GB_OFFSET);
5564 mmc->mmc_tx_broadcastframe_g +=
5565 eqos_reg_read(MMC_TXBROADCASTPACKETS_G_OFFSET);
5566 mmc->mmc_tx_multicastframe_g +=
5567 eqos_reg_read(MMC_TXMULTICASTPACKETS_G_OFFSET);
5568 mmc->mmc_tx_64_octets_gb += eqos_reg_read(MMC_TX64OCTETS_GB_OFFSET);
5569 mmc->mmc_tx_65_to_127_octets_gb +=
5570 eqos_reg_read(MMC_TX65TO127OCTETS_GB_OFFSET);
5571 mmc->mmc_tx_128_to_255_octets_gb +=
5572 eqos_reg_read(MMC_TX128TO255OCTETS_GB_OFFSET);
5573 mmc->mmc_tx_256_to_511_octets_gb +=
5574 eqos_reg_read(MMC_TX256TO511OCTETS_GB_OFFSET);
5575 mmc->mmc_tx_512_to_1023_octets_gb +=
5576 eqos_reg_read(MMC_TX512TO1023OCTETS_GB_OFFSET);
5577 mmc->mmc_tx_1024_to_max_octets_gb +=
5578 eqos_reg_read(MMC_TX1024TOMAXOCTETS_GB_OFFSET);
5579 mmc->mmc_tx_unicast_gb += eqos_reg_read(MMC_TXUNICASTPACKETS_GB_OFFSET);
5580 mmc->mmc_tx_multicast_gb +=
5581 eqos_reg_read(MMC_TXMULTICASTPACKETS_GB_OFFSET);
5582 mmc->mmc_tx_broadcast_gb +=
5583 eqos_reg_read(MMC_TXBROADCASTPACKETS_GB_OFFSET);
5584 mmc->mmc_tx_underflow_error +=
5585 eqos_reg_read(MMC_TXUNDERFLOWERROR_OFFSET);
5586 mmc->mmc_tx_singlecol_g += eqos_reg_read(MMC_TXSINGLECOL_G_OFFSET);
5587 mmc->mmc_tx_multicol_g += eqos_reg_read(MMC_TXMULTICOL_G_OFFSET);
5588 mmc->mmc_tx_deferred += eqos_reg_read(MMC_TXDEFERRED_OFFSET);
5589 mmc->mmc_tx_latecol += eqos_reg_read(MMC_TXLATECOL_OFFSET);
5590 mmc->mmc_tx_exesscol += eqos_reg_read(MMC_TXEXESSCOL_OFFSET);
5591 mmc->mmc_tx_carrier_error += eqos_reg_read(MMC_TXCARRIERERROR_OFFSET);
5592 mmc->mmc_tx_octetcount_g += eqos_reg_read(MMC_TXOCTETCOUNT_G_OFFSET);
5593 mmc->mmc_tx_framecount_g += eqos_reg_read(MMC_TXPACKETSCOUNT_G_OFFSET);
5594 mmc->mmc_tx_excessdef += eqos_reg_read(MMC_TXEXCESSDEF_OFFSET);
5595 mmc->mmc_tx_pause_frame += eqos_reg_read(MMC_TXPAUSEPACKETS_OFFSET);
5596 mmc->mmc_tx_vlan_frame_g += eqos_reg_read(MMC_TXVLANPACKETS_G_OFFSET);
5597 mmc->mmc_tx_osize_frame_g += eqos_reg_read(MMC_TXOVERSIZE_G_OFFSET);
5599 /* MMC RX counter registers */
5600 mmc->mmc_rx_framecount_gb += eqos_reg_read(MMC_RXPACKETCOUNT_GB_OFFSET);
5601 mmc->mmc_rx_octetcount_gb += eqos_reg_read(MMC_RXOCTETCOUNT_GB_OFFSET);
5602 mmc->mmc_rx_octetcount_g += eqos_reg_read(MMC_RXOCTETCOUNT_G_OFFSET);
5603 mmc->mmc_rx_broadcastframe_g +=
5604 eqos_reg_read(MMC_RXBROADCASTPACKETS_G_OFFSET);
5605 mmc->mmc_rx_multicastframe_g +=
5606 eqos_reg_read(MMC_RXMULTICASTPACKETS_G_OFFSET);
5607 mmc->mmc_rx_crc_errror += eqos_reg_read(MMC_RXCRCERROR_OFFSET);
5608 mmc->mmc_rx_align_error += eqos_reg_read(MMC_RXALIGNMENTERROR_OFFSET);
5609 mmc->mmc_rx_run_error += eqos_reg_read(MMC_RXRUNTERROR_OFFSET);
5610 mmc->mmc_rx_jabber_error += eqos_reg_read(MMC_RXJABBERERROR_OFFSET);
5611 mmc->mmc_rx_undersize_g += eqos_reg_read(MMC_RXUNDERSIZE_G_OFFSET);
5612 mmc->mmc_rx_oversize_g += eqos_reg_read(MMC_RXOVERSIZE_G_OFFSET);
5613 mmc->mmc_rx_64_octets_gb += eqos_reg_read(MMC_RX64OCTETS_GB_OFFSET);
5614 mmc->mmc_rx_65_to_127_octets_gb +=
5615 eqos_reg_read(MMC_RX65TO127OCTETS_GB_OFFSET);
5616 mmc->mmc_rx_128_to_255_octets_gb +=
5617 eqos_reg_read(MMC_RX128TO255OCTETS_GB_OFFSET);
5618 mmc->mmc_rx_256_to_511_octets_gb +=
5619 eqos_reg_read(MMC_RX256TO511OCTETS_GB_OFFSET);
5620 mmc->mmc_rx_512_to_1023_octets_gb +=
5621 eqos_reg_read(MMC_RX512TO1023OCTETS_GB_OFFSET);
5622 mmc->mmc_rx_1024_to_max_octets_gb +=
5623 eqos_reg_read(MMC_RX1024TOMAXOCTETS_GB_OFFSET);
5624 mmc->mmc_rx_unicast_g += eqos_reg_read(MMC_RXUNICASTPACKETS_G_OFFSET);
5625 mmc->mmc_rx_length_error += eqos_reg_read(MMC_RXLENGTHERROR_OFFSET);
5626 mmc->mmc_rx_outofrangetype +=
5627 eqos_reg_read(MMC_RXOUTOFRANGETYPE_OFFSET);
5628 mmc->mmc_rx_pause_frames += eqos_reg_read(MMC_RXPAUSEPACKETS_OFFSET);
5629 mmc->mmc_rx_fifo_overflow += eqos_reg_read(MMC_RXFIFOOVERFLOW_OFFSET);
5630 mmc->mmc_rx_vlan_frames_gb +=
5631 eqos_reg_read(MMC_RXVLANPACKETS_GB_OFFSET);
5632 mmc->mmc_rx_watchdog_error += eqos_reg_read(MMC_RXWATCHDOGERROR_OFFSET);
5633 mmc->mmc_rx_receive_error += eqos_reg_read(MMC_RXRCVERROR_OFFSET);
5634 mmc->mmc_rx_ctrl_frames_g += eqos_reg_read(MMC_RXCTRLPACKETS_G_OFFSET);
5637 mmc->mmc_rx_ipc_intr_mask += eqos_reg_read(MMC_IPC_INTR_MASK_RX_OFFSET);
5638 mmc->mmc_rx_ipc_intr += eqos_reg_read(MMC_IPC_INTR_RX_OFFSET);
5641 mmc->mmc_rx_ipv4_gd += eqos_reg_read(MMC_RXIPV4_GD_PKTS_OFFSET);
5642 mmc->mmc_rx_ipv4_hderr += eqos_reg_read(MMC_RXIPV4_HDRERR_PKTS_OFFSET);
5643 mmc->mmc_rx_ipv4_nopay += eqos_reg_read(MMC_RXIPV4_NOPAY_PKTS_OFFSET);
5644 mmc->mmc_rx_ipv4_frag += eqos_reg_read(MMC_RXIPV4_FRAG_PKTS_OFFSET);
5645 mmc->mmc_rx_ipv4_udsbl += eqos_reg_read(MMC_RXIPV4_UBSBL_PKTS_OFFSET);
5648 mmc->mmc_rx_ipv6_gd += eqos_reg_read(MMC_RXIPV6_GD_PKTS_OFFSET);
5649 mmc->mmc_rx_ipv6_hderr += eqos_reg_read(MMC_RXIPV6_HDRERR_PKTS_OFFSET);
5650 mmc->mmc_rx_ipv6_nopay += eqos_reg_read(MMC_RXIPV6_NOPAY_PKTS_OFFSET);
5653 mmc->mmc_rx_udp_gd += eqos_reg_read(MMC_RXUDP_GD_PKTS_OFFSET);
5654 mmc->mmc_rx_udp_err += eqos_reg_read(MMC_RXUDP_ERR_PKTS_OFFSET);
5655 mmc->mmc_rx_tcp_gd += eqos_reg_read(MMC_RXTCP_GD_PKTS_OFFSET);
5656 mmc->mmc_rx_tcp_err += eqos_reg_read(MMC_RXTCP_ERR_PKTS_OFFSET);
5657 mmc->mmc_rx_icmp_gd += eqos_reg_read(MMC_RXICMP_GD_PKTS_OFFSET);
5658 mmc->mmc_rx_icmp_err += eqos_reg_read(MMC_RXICMP_ERR_PKTS_OFFSET);
5661 mmc->mmc_rx_ipv4_gd_octets +=
5662 eqos_reg_read(MMC_RXIPV4_GD_OCTETS_OFFSET);
5663 mmc->mmc_rx_ipv4_hderr_octets +=
5664 eqos_reg_read(MMC_RXIPV4_HDRERR_OCTETS_OFFSET);
5665 mmc->mmc_rx_ipv4_nopay_octets +=
5666 eqos_reg_read(MMC_RXIPV4_NOPAY_OCTETS_OFFSET);
5667 mmc->mmc_rx_ipv4_frag_octets +=
5668 eqos_reg_read(MMC_RXIPV4_FRAG_OCTETS_OFFSET);
5669 mmc->mmc_rx_ipv4_udsbl_octets +=
5670 eqos_reg_read(MMC_RXIPV4_UDSBL_OCTETS_OFFSET);
5673 mmc->mmc_rx_ipv6_gd_octets +=
5674 eqos_reg_read(MMC_RXIPV6_GD_OCTETS_OFFSET);
5675 mmc->mmc_rx_ipv6_hderr_octets +=
5676 eqos_reg_read(MMC_RXIPV6_HDRERR_OCTETS_OFFSET);
5677 mmc->mmc_rx_ipv6_nopay_octets +=
5678 eqos_reg_read(MMC_RXIPV6_NOPAY_OCTETS_OFFSET);
5681 mmc->mmc_rx_udp_gd_octets += eqos_reg_read(MMC_RXUDP_GD_OCTETS_OFFSET);
5682 mmc->mmc_rx_udp_err_octets +=
5683 eqos_reg_read(MMC_RXUDP_ERR_OCTETS_OFFSET);
5684 mmc->mmc_rx_tcp_gd_octets += eqos_reg_read(MMC_RXTCP_GD_OCTETS_OFFSET);
5685 mmc->mmc_rx_tcp_err_octets +=
5686 eqos_reg_read(MMC_RXTCP_ERR_OCTETS_OFFSET);
5687 mmc->mmc_rx_icmp_gd_octets +=
5688 eqos_reg_read(MMC_RXICMP_GD_OCTETS_OFFSET);
5689 mmc->mmc_rx_icmp_err_octets +=
5690 eqos_reg_read(MMC_RXICMP_ERR_OCTETS_OFFSET);
5692 pr_debug("<--eqos_mmc_read\n");
5695 static const struct net_device_ops eqos_netdev_ops = {
5696 .ndo_open = eqos_open,
5697 .ndo_stop = eqos_close,
5698 .ndo_start_xmit = eqos_start_xmit,
5699 .ndo_get_stats = eqos_get_stats,
5700 .ndo_set_rx_mode = eqos_set_rx_mode,
5701 .ndo_set_features = eqos_set_features,
5702 .ndo_do_ioctl = eqos_ioctl,
5703 .ndo_change_mtu = eqos_change_mtu,
5704 #ifdef EQOS_QUEUE_SELECT_ALGO
5705 .ndo_select_queue = eqos_select_queue,
5707 .ndo_vlan_rx_add_vid = eqos_vlan_rx_add_vid,
5708 .ndo_vlan_rx_kill_vid = eqos_vlan_rx_kill_vid,
5709 .ndo_set_mac_address = eqos_set_mac_address,
5712 struct net_device_ops *eqos_get_netdev_ops(void)
5714 return (struct net_device_ops *)&eqos_netdev_ops;
5718 static void eqos_disable_all_irqs(struct eqos_prv_data *pdata)
5720 struct hw_if_struct *hw_if = &pdata->hw_if;
5723 pr_debug("-->%s()\n", __func__);
5725 for (i = 0; i < pdata->num_chans; i++)
5726 hw_if->disable_chan_interrupts(i, pdata);
5728 /* disable mac interrupts */
5731 /* ensure irqs are not executing */
5732 synchronize_irq(pdata->common_irq);
5733 for (i = 0; i < pdata->num_chans; i++) {
5734 if (pdata->rx_irq_alloc_mask & (1 << i))
5735 synchronize_irq(pdata->rx_irqs[i]);
5736 if (pdata->tx_irq_alloc_mask & (1 << i))
5737 synchronize_irq(pdata->tx_irqs[i]);
5740 pr_debug("<--%s()\n", __func__);
5743 void eqos_stop_dev(struct eqos_prv_data *pdata)
5745 struct hw_if_struct *hw_if = &pdata->hw_if;
5746 struct desc_if_struct *desc_if = &pdata->desc_if;
5749 pr_debug("-->%s()\n", __func__);
5751 #ifdef CONFIG_TEGRA_PTP_NOTIFIER
5752 /* Unregister broadcasting MAC timestamp to clients */
5753 tegra_unregister_hwtime_source();
5755 /* Stop the PHY state machine */
5757 phy_stop_machine(pdata->phydev);
5759 /* turn off sources of data into dev */
5760 netif_tx_disable(pdata->dev);
5762 hw_if->stop_mac_rx();
5763 eqos_disable_all_irqs(pdata);
5764 eqos_all_ch_napi_disable(pdata);
5766 /* Ensure no tx thread is running. We have
5767 * already prevented any new callers of or tx thread above.
5768 * Below will allow any remaining tx threads to complete.
5770 for (i = 0; i < pdata->num_chans; i++) {
5771 spin_lock(&pdata->chinfo[i].chan_tx_lock);
5772 spin_unlock(&pdata->chinfo[i].chan_tx_lock);
5776 eqos_stop_all_ch_tx_dma(pdata);
5778 /* disable MAC TX */
5779 hw_if->stop_mac_tx();
5782 eqos_stop_all_ch_rx_dma(pdata);
5784 del_timer_sync(&pdata->eee_ctrl_timer);
5786 /* return tx skbs */
5787 desc_if->tx_skb_free_mem(pdata, pdata->num_chans);
5790 desc_if->rx_skb_free_mem(pdata, pdata->num_chans);
5792 pr_debug("<--%s()\n", __func__);
5795 void eqos_start_dev(struct eqos_prv_data *pdata)
5797 struct hw_if_struct *hw_if = &pdata->hw_if;
5798 struct desc_if_struct *desc_if = &pdata->desc_if;
5800 pr_debug("-->%s()\n", __func__);
5802 /* issue CAR reset to device */
5803 hw_if->car_reset(pdata);
5804 hw_if->pad_calibrate(pdata);
5806 /* default configuration */
5807 eqos_default_common_confs(pdata);
5808 eqos_default_tx_confs(pdata);
5809 eqos_default_rx_confs(pdata);
5810 eqos_configure_rx_fun_ptr(pdata);
5812 desc_if->wrapper_tx_desc_init(pdata);
5813 desc_if->wrapper_rx_desc_init(pdata);
5815 eqos_napi_enable_mq(pdata);
5817 eqos_set_rx_mode(pdata->dev);
5818 eqos_mmc_setup(pdata);
5820 /* initializes MAC and DMA */
5823 MAC_1US_TIC_WR(pdata->csr_clock_speed - 1);
5825 if (pdata->hw_feat.pcs_sel)
5826 hw_if->control_an(1, 0);
5828 if (pdata->phydev) {
5831 pdata->oldduplex = -1;
5833 phy_start(pdata->phydev);
5834 phy_start_machine(pdata->phydev);
5836 #ifdef EQOS_ENABLE_EEE
5838 pdata->eee_enabled = eqos_eee_init(pdata);
5840 pdata->eee_enabled = false;
5842 pdata->eee_enabled = false;
5846 netif_tx_start_all_queues(pdata->dev);
5848 pr_debug("<--%s()\n", __func__);
5851 void eqos_iso_work(struct work_struct *work)
5853 struct eqos_prv_data *pdata =
5854 container_of(work, struct eqos_prv_data, iso_work);
5855 struct phy_device *phydev = pdata->phydev;
5856 struct eqos_cfg *pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg;
5860 pr_debug("-->%s()\n", __func__);
5862 if (pdt_cfg->eth_iso_enable) {
5864 iso_bw = pdata->dt_cfg.iso_bw;
5868 ret = tegra_isomgr_reserve(pdata->isomgr_handle, iso_bw, 0);
5870 dev_err(&pdata->pdev->dev,
5871 "EQOS ISO BW %d reservation failed with %d\n",
5876 ret = tegra_isomgr_realize(pdata->isomgr_handle);
5878 dev_err(&pdata->pdev->dev,
5879 "EQOS ISO BW realize failed with %d\n", ret);
5882 pr_debug("<--%s()\n", __func__);
5884 void eqos_fbe_work(struct work_struct *work)
5886 struct eqos_prv_data *pdata =
5887 container_of(work, struct eqos_prv_data, fbe_work);
5891 pr_debug("-->%s()\n", __func__);
5893 mutex_lock(&pdata->hw_change_lock);
5894 if (pdata->hw_stopped)
5898 while (pdata->fbe_chan_mask) {
5899 if (pdata->fbe_chan_mask & 1) {
5900 DMA_SR_RD(i, dma_sr_reg);
5902 dev_err(&pdata->pdev->dev,
5903 "Fatal Bus Error on chan %d, SRreg=0x%.8x\n",
5906 pdata->fbe_chan_mask >>= 1;
5909 eqos_stop_dev(pdata);
5910 eqos_start_dev(pdata);
5912 mutex_unlock(&pdata->hw_change_lock);
5914 pr_debug("<--%s()\n", __func__);