1 /* =========================================================================
2 * The Synopsys DWC ETHER QOS Software Driver and documentation (hereinafter
3 * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
4 * otherwise expressly agreed to in writing between Synopsys and you.
6 * The Software IS NOT an item of Licensed Software or Licensed Product under
7 * any End User Software License Agreement or Agreement for Licensed Product
8 * with Synopsys or any supplement thereto. Permission is hereby granted,
9 * free of charge, to any person obtaining a copy of this software annotated
10 * with this license and the Software, to deal in the Software without
11 * restriction, including without limitation the rights to use, copy, modify,
12 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
13 * and to permit persons to whom the Software is furnished to do so, subject
14 * to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
30 * ========================================================================= */
32 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
34 * This program is free software; you can redistribute it and/or modify it
35 * under the terms and conditions of the GNU General Public License,
36 * version 2, as published by the Free Software Foundation.
38 * This program is distributed in the hope it will be useful, but WITHOUT
39 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
40 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
44 * @brief: Driver functions.
47 #include <linux/gpio.h>
48 #include <linux/time.h>
53 extern ULONG eqos_base_addr;
56 #include <linux/inet_lro.h>
57 #include <soc/tegra/chip-id.h>
59 static INT eqos_status;
60 static int handle_txrx_completions(struct eqos_prv_data *pdata, int qinx);
62 /* raw spinlock to get HW PTP time and kernel time atomically */
63 static DEFINE_RAW_SPINLOCK(eqos_ts_lock);
65 /* SA(Source Address) operations on TX */
66 unsigned char mac_addr0[6] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 };
67 unsigned char mac_addr1[6] = { 0x00, 0x66, 0x77, 0x88, 0x99, 0xaa };
69 /* module parameters for configuring the queue modes
70 * set default mode as GENERIC
72 /* Value of "2" enables mtl tx q */
73 static int q_op_mode[EQOS_MAX_TX_QUEUE_CNT] = {
84 /* Store the IRQ names to be used by /proc/interrupts */
85 static char irq_names[8][32];
87 module_param_array(q_op_mode, int, NULL, S_IRUGO | S_IWUSR);
88 MODULE_PARM_DESC(q_op_mode,
89 "MTL queue operation mode [0-DISABLED, 1-AVB, 2-DCB, 3-GENERIC]");
91 void eqos_stop_all_ch_tx_dma(struct eqos_prv_data *pdata)
93 struct hw_if_struct *hw_if = &(pdata->hw_if);
96 pr_debug("-->eqos_stop_all_ch_tx_dma\n");
98 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++)
99 hw_if->stop_dma_tx(pdata, qinx);
101 pr_debug("<--eqos_stop_all_ch_tx_dma\n");
104 static int is_ptp_addr(char *addr)
106 if ((addr[0] == PTP1_MAC0) &&
107 (addr[1] == PTP1_MAC1) &&
108 (addr[2] == PTP1_MAC2) &&
109 (addr[3] == PTP1_MAC3) &&
110 (addr[4] == PTP1_MAC4) && (addr[5] == PTP1_MAC5))
112 else if ((addr[0] == PTP2_MAC0) &&
113 (addr[1] == PTP2_MAC1) &&
114 (addr[2] == PTP2_MAC2) &&
115 (addr[3] == PTP2_MAC3) &&
116 (addr[4] == PTP2_MAC4) && (addr[5] == PTP2_MAC5))
122 /*Check if Channel 0 is PTP and has data 0xee
123 Check if Channel 1 is AV and has data 0xbb or 0xcc
124 Check if Channel 2 is AV and has data 0xdd*/
125 #ifdef ENABLE_CHANNEL_DATA_CHECK
126 static void check_channel_data(struct sk_buff *skb, unsigned int qinx,
130 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF788) &&
131 ((*(((char *)skb->data) + 80) & 0xFF) != 0xee)) ||
133 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF022) &&
134 (((*(((char *)skb->data) + 80) & 0xFF) != 0xbb) &&
135 ((*(((char *)skb->data) + 80) & 0xFF) != 0xcc))) ||
137 ((*(((short *)skb->data) + 6) & 0xFFFF) == 0xF022) &&
138 ((*(((char *)skb->data) + 80) & 0xFF) != 0xdd))) {
140 pr_err("Incorrect %s data 0x%x in Q %d\n",
141 ((is_rx) ? "RX" : "TX"), *(((char *)skb->data) + 80), qinx);
146 static void eqos_stop_all_ch_rx_dma(struct eqos_prv_data *pdata)
148 struct hw_if_struct *hw_if = &(pdata->hw_if);
151 pr_debug("-->eqos_stop_all_ch_rx_dma\n");
153 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
154 hw_if->stop_dma_rx(qinx);
156 pr_debug("<--eqos_stop_all_ch_rx_dma\n");
159 static void eqos_start_all_ch_tx_dma(struct eqos_prv_data *pdata)
161 struct hw_if_struct *hw_if = &(pdata->hw_if);
164 pr_debug("-->eqos_start_all_ch_tx_dma\n");
166 for (i = 0; i < EQOS_TX_QUEUE_CNT; i++)
167 hw_if->start_dma_tx(i);
169 pr_debug("<--eqos_start_all_ch_tx_dma\n");
172 static void eqos_start_all_ch_rx_dma(struct eqos_prv_data *pdata)
174 struct hw_if_struct *hw_if = &(pdata->hw_if);
177 pr_debug("-->eqos_start_all_ch_rx_dma\n");
179 for (i = 0; i < EQOS_RX_QUEUE_CNT; i++)
180 hw_if->start_dma_rx(i);
182 pr_debug("<--eqos_start_all_ch_rx_dma\n");
185 static void eqos_napi_enable_mq(struct eqos_prv_data *pdata)
187 struct eqos_rx_queue *rx_queue = NULL;
190 pr_debug("-->eqos_napi_enable_mq\n");
192 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
193 rx_queue = GET_RX_QUEUE_PTR(qinx);
194 napi_enable(&rx_queue->napi);
197 pr_debug("<--eqos_napi_enable_mq\n");
200 static void eqos_all_ch_napi_disable(struct eqos_prv_data *pdata)
202 struct eqos_rx_queue *rx_queue = NULL;
205 pr_debug("-->eqos_napi_disable\n");
207 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
208 rx_queue = GET_RX_QUEUE_PTR(qinx);
209 napi_disable(&rx_queue->napi);
212 pr_debug("<--eqos_napi_disable\n");
215 void eqos_disable_all_ch_rx_interrpt(struct eqos_prv_data *pdata)
217 struct hw_if_struct *hw_if = &(pdata->hw_if);
220 pr_debug("-->eqos_disable_all_ch_rx_interrpt\n");
222 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
223 hw_if->disable_rx_interrupt(qinx, pdata);
225 pr_debug("<--eqos_disable_all_ch_rx_interrpt\n");
228 void eqos_enable_all_ch_rx_interrpt(struct eqos_prv_data *pdata)
230 struct hw_if_struct *hw_if = &(pdata->hw_if);
233 pr_debug("-->eqos_enable_all_ch_rx_interrpt\n");
235 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++)
236 hw_if->enable_rx_interrupt(qinx, pdata);
238 pr_debug("<--eqos_enable_all_ch_rx_interrpt\n");
241 void handle_non_ti_ri_chan_intrs(struct eqos_prv_data *pdata, int qinx)
246 pr_debug("-->%s(), chan=%d\n", __func__, qinx);
248 DMA_SR_RD(qinx, dma_sr);
250 DMA_IER_RD(qinx, dma_ier);
252 pr_debug("DMA_SR[%d] = %#lx, DMA_IER= %#lx\n", qinx, dma_sr, dma_ier);
254 /*on ufpga, update of DMA_IER is really slow, such that interrupt
255 * would happen, but read of IER returns old value. This would
256 * cause driver to return when there really was an interrupt asserted.
257 * so for now, comment this out.
259 /* process only those interrupts which we
262 if (!(tegra_platform_is_unit_fpga()))
263 dma_sr = (dma_sr & dma_ier);
265 /* mask off ri and ti */
266 dma_sr &= ~(((0x1) << 6) | 1);
271 /* ack non ti/ri ints */
272 DMA_SR_WR(qinx, dma_sr);
274 if ((GET_VALUE(dma_sr, DMA_SR_RBU_LPOS, DMA_SR_RBU_HPOS) & 1))
275 pdata->xstats.rx_buf_unavailable_irq_n[qinx]++;
277 if (tegra_platform_is_unit_fpga())
278 dma_sr = (dma_sr & dma_ier);
280 if (GET_VALUE(dma_sr, DMA_SR_TPS_LPOS, DMA_SR_TPS_HPOS) & 1) {
281 pdata->xstats.tx_process_stopped_irq_n[qinx]++;
282 eqos_status = -E_DMA_SR_TPS;
284 if (GET_VALUE(dma_sr, DMA_SR_TBU_LPOS, DMA_SR_TBU_HPOS) & 1) {
285 pdata->xstats.tx_buf_unavailable_irq_n[qinx]++;
286 eqos_status = -E_DMA_SR_TBU;
288 if (GET_VALUE(dma_sr, DMA_SR_RPS_LPOS, DMA_SR_RPS_HPOS) & 1) {
289 pdata->xstats.rx_process_stopped_irq_n[qinx]++;
290 eqos_status = -E_DMA_SR_RPS;
292 if (GET_VALUE(dma_sr, DMA_SR_RWT_LPOS, DMA_SR_RWT_HPOS) & 1) {
293 pdata->xstats.rx_watchdog_irq_n++;
294 eqos_status = S_DMA_SR_RWT;
296 if (GET_VALUE(dma_sr, DMA_SR_FBE_LPOS, DMA_SR_FBE_HPOS) & 1) {
297 pdata->xstats.fatal_bus_error_irq_n++;
298 pdata->fbe_chan_mask |= (1 << qinx);
299 eqos_status = -E_DMA_SR_FBE;
300 schedule_work(&pdata->fbe_work);
303 pr_debug("<--%s()\n", __func__);
306 void handle_ti_ri_chan_intrs(struct eqos_prv_data *pdata,
307 int qinx, int *pnapi_sched)
313 struct hw_if_struct *hw_if = &(pdata->hw_if);
315 struct eqos_rx_queue *rx_queue = NULL;
317 pr_debug("-->%s(), chan=%d\n", __func__, qinx);
319 rx_queue = GET_RX_QUEUE_PTR(qinx);
321 DMA_SR_RD(qinx, dma_sr);
323 DMA_IER_RD(qinx, dma_ier);
324 VIRT_INTR_CH_STAT_RD(qinx, ch_stat_reg);
325 VIRT_INTR_CH_CRTL_RD(qinx, ch_crtl_reg);
327 pr_debug("DMA_SR[%d] = %#lx, DMA_IER= %#lx\n", qinx, dma_sr, dma_ier);
329 pr_debug("VIRT_INTR_CH_STAT[%d] = %#x, VIRT_INTR_CH_CRTL= %#x\n",
330 qinx, ch_stat_reg, ch_crtl_reg);
332 /*on ufpga, update of DMA_IER is really slow, such that interrupt
333 * would happen, but read of IER returns old value. This would
334 * cause driver to return when there really was an interrupt asserted.
335 * so for now, comment this out.
337 /* process only those interrupts which we
340 if (!(tegra_platform_is_unit_fpga()))
341 ch_stat_reg &= ch_crtl_reg;
343 if (ch_stat_reg == 0)
346 if (ch_stat_reg & VIRT_INTR_CH_CRTL_RX_WR_MASK) {
347 DMA_SR_WR(qinx, ((0x1) << 6) | ((0x1) << 15));
348 VIRT_INTR_CH_STAT_WR(qinx, VIRT_INTR_CH_CRTL_RX_WR_MASK);
349 pdata->xstats.rx_normal_irq_n[qinx]++;
352 if (tegra_platform_is_unit_fpga())
353 ch_stat_reg &= ch_crtl_reg;
355 if (ch_stat_reg & VIRT_INTR_CH_CRTL_TX_WR_MASK) {
356 DMA_SR_WR(qinx, ((0x1) << 0) | ((0x1) << 15));
357 VIRT_INTR_CH_STAT_WR(qinx, VIRT_INTR_CH_CRTL_TX_WR_MASK);
358 pdata->xstats.tx_normal_irq_n[qinx]++;
361 if (likely(napi_schedule_prep(&rx_queue->napi))) {
362 hw_if->disable_chan_interrupts(qinx, pdata);
363 __napi_schedule(&rx_queue->napi);
365 /* Do nothing here. */
366 pr_alert("Ethernet Interrupt while in poll!\n");
368 pr_debug("<--%s()\n", __func__);
371 void handle_mac_intrs(struct eqos_prv_data *pdata, ULONG dma_isr)
378 struct net_device *dev = pdata->dev;
380 pr_debug("-->%s()\n", __func__);
384 /* Handle MAC interrupts */
385 if (GET_VALUE(dma_isr, DMA_ISR_MACIS_LPOS, DMA_ISR_MACIS_HPOS) & 1) {
386 /* handle only those MAC interrupts which are enabled */
388 mac_isr = (mac_isr & mac_imr);
391 * RemoteWake and MagicPacket events will be received by PHY supporting
392 * these features on silicon and can be used to wake up Tegra.
393 * Still let the below code be here in case we ever get this interrupt.
395 if (GET_VALUE(mac_isr, MAC_ISR_PMTIS_LPOS, MAC_ISR_PMTIS_HPOS) &
397 pdata->xstats.pmt_irq_n++;
398 eqos_status = S_MAC_ISR_PMTIS;
399 MAC_PMTCSR_RD(mac_pmtcsr);
400 pr_debug("commonisr: PMTCSR : %#lx\n", mac_pmtcsr);
401 if (pdata->power_down)
402 eqos_powerup(pdata->dev, EQOS_IOCTL_CONTEXT);
405 /* RGMII/SMII interrupt */
407 (mac_isr, MAC_ISR_RGSMIIS_LPOS, MAC_ISR_RGSMIIS_HPOS) & 1) {
409 pr_debug("RGMII/SMII interrupt: MAC_PCS = %#lx\n",
411 #ifdef HWA_NV_1637630
414 /* Comment out this block of code(1637630)
415 * as it was preventing 10mb to work.
417 if ((mac_pcs & 0x80000) == 0x80000) {
419 netif_carrier_on(dev);
420 if ((mac_pcs & 0x10000) == 0x10000) {
421 pdata->pcs_duplex = 1;
422 hw_if->set_full_duplex();
424 pdata->pcs_duplex = 0;
425 hw_if->set_half_duplex();
428 if ((mac_pcs & 0x60000) == 0x0) {
429 pdata->pcs_speed = SPEED_10;
430 hw_if->set_mii_speed_10();
431 } else if ((mac_pcs & 0x60000) == 0x20000) {
432 pdata->pcs_speed = SPEED_100;
433 hw_if->set_mii_speed_100();
434 } else if ((mac_pcs & 0x60000) == 0x30000) {
435 pdata->pcs_speed = SPEED_1000;
436 hw_if->set_gmii_speed();
438 pr_err("Link is UP:%dMbps & %s duplex\n",
440 pdata->pcs_duplex ? "Full" : "Half");
442 pr_err("Link is Down\n");
444 netif_carrier_off(dev);
449 /* PCS Link Status interrupt */
451 (mac_isr, MAC_ISR_PCSLCHGIS_LPOS,
452 MAC_ISR_PCSLCHGIS_HPOS) & 1) {
453 pr_err("PCS Link Status interrupt\n");
455 if (GET_VALUE(mac_ans, MAC_ANS_LS_LPOS, MAC_ANS_LS_HPOS)
457 pr_err("Link: Up\n");
458 netif_carrier_on(dev);
461 pr_err("Link: Down\n");
462 netif_carrier_off(dev);
467 /* PCS Auto-Negotiation Complete interrupt */
469 (mac_isr, MAC_ISR_PCSANCIS_LPOS,
470 MAC_ISR_PCSANCIS_HPOS) & 1) {
471 pr_err("PCS Auto-Negotiation Complete interrupt\n");
476 if (GET_VALUE(mac_isr, MAC_ISR_LPI_LPOS, MAC_ISR_LPI_HPOS) & 1) {
477 eqos_handle_eee_interrupt(pdata);
481 pr_debug("<--%s()\n", __func__);
486 * Only used when multi irq is enabled
489 irqreturn_t eqos_common_isr(int irq, void *device_id)
492 struct eqos_prv_data *pdata = (struct eqos_prv_data *)device_id;
495 pr_debug("-->%s()\n", __func__);
501 pr_debug("DMA_ISR = %#lx\n", dma_isr);
504 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++)
505 handle_non_ti_ri_chan_intrs(pdata, qinx);
507 handle_mac_intrs(pdata, dma_isr);
509 pr_debug("<--%s()\n", __func__);
515 /* Only used when multi irq is enabled.
516 * Will only handle tx/rx for one channel.
518 irqreturn_t eqos_ch_isr(int irq, void *device_id)
520 struct eqos_prv_data *pdata = (struct eqos_prv_data *)device_id;
525 i = smp_processor_id();
527 if ((irq == pdata->rx_irqs[0]) || (irq == pdata->tx_irqs[0]))
529 else if ((irq == pdata->rx_irqs[1]) || (irq == pdata->tx_irqs[1]))
531 else if ((irq == pdata->rx_irqs[2]) || (irq == pdata->tx_irqs[2]))
533 else if ((irq == pdata->rx_irqs[3]) || (irq == pdata->tx_irqs[3]))
536 pr_debug("-->%s(): cpu=%d, chan=%d\n", __func__, i, qinx);
539 handle_ti_ri_chan_intrs(pdata, qinx, &napi_sched);
541 pr_debug("%s(): irq %d not handled\n", __func__, irq);
545 spin_lock(&pdata->chinfo[qinx].irq_lock);
546 handle_ti_ri_chan_intrs(pdata, qinx, &napi_sched);
547 spin_unlock(&pdata->chinfo[qinx].irq_lock);
549 pr_debug("<--%s()\n", __func__);
556 * \brief API to get all hw features.
558 * \details This function is used to check what are all the different
559 * features the device supports.
561 * \param[in] pdata - pointer to driver private structure
566 void eqos_get_all_hw_features(struct eqos_prv_data *pdata)
568 unsigned int mac_hfr0;
569 unsigned int mac_hfr1;
570 unsigned int mac_hfr2;
572 pr_debug("-->eqos_get_all_hw_features\n");
574 MAC_HFR0_RD(mac_hfr0);
575 MAC_HFR1_RD(mac_hfr1);
576 MAC_HFR2_RD(mac_hfr2);
578 memset(&pdata->hw_feat, 0, sizeof(pdata->hw_feat));
579 pdata->hw_feat.mii_sel = ((mac_hfr0 >> 0) & MAC_HFR0_MIISEL_MASK);
580 pdata->hw_feat.gmii_sel = ((mac_hfr0 >> 1) & MAC_HFR0_GMIISEL_MASK);
581 pdata->hw_feat.hd_sel = ((mac_hfr0 >> 2) & MAC_HFR0_HDSEL_MASK);
582 pdata->hw_feat.pcs_sel = ((mac_hfr0 >> 3) & MAC_HFR0_PCSSEL_MASK);
583 pdata->hw_feat.vlan_hash_en = 0;
584 pdata->hw_feat.sma_sel = ((mac_hfr0 >> 5) & MAC_HFR0_SMASEL_MASK);
585 pdata->hw_feat.rwk_sel = ((mac_hfr0 >> 6) & MAC_HFR0_RWKSEL_MASK);
586 pdata->hw_feat.mgk_sel = ((mac_hfr0 >> 7) & MAC_HFR0_MGKSEL_MASK);
587 pdata->hw_feat.mmc_sel = ((mac_hfr0 >> 8) & MAC_HFR0_MMCSEL_MASK);
588 pdata->hw_feat.arp_offld_en =
589 ((mac_hfr0 >> 9) & MAC_HFR0_ARPOFFLDEN_MASK);
590 pdata->hw_feat.ts_sel = ((mac_hfr0 >> 12) & MAC_HFR0_TSSSEL_MASK);
591 pdata->hw_feat.eee_sel = ((mac_hfr0 >> 13) & MAC_HFR0_EEESEL_MASK);
592 pdata->hw_feat.tx_coe_sel = ((mac_hfr0 >> 14) & MAC_HFR0_TXCOESEL_MASK);
593 pdata->hw_feat.rx_coe_sel = ((mac_hfr0 >> 16) & MAC_HFR0_RXCOE_MASK);
594 pdata->hw_feat.mac_addr16_sel =
595 ((mac_hfr0 >> 18) & MAC_HFR0_ADDMACADRSEL_MASK);
596 pdata->hw_feat.mac_addr32_sel =
597 ((mac_hfr0 >> 23) & MAC_HFR0_MACADR32SEL_MASK);
598 pdata->hw_feat.mac_addr64_sel =
599 ((mac_hfr0 >> 24) & MAC_HFR0_MACADR64SEL_MASK);
600 pdata->hw_feat.tsstssel = ((mac_hfr0 >> 25) & MAC_HFR0_TSINTSEL_MASK);
601 pdata->hw_feat.sa_vlan_ins =
602 ((mac_hfr0 >> 27) & MAC_HFR0_SAVLANINS_MASK);
603 pdata->hw_feat.act_phy_sel =
604 ((mac_hfr0 >> 28) & MAC_HFR0_ACTPHYSEL_MASK);
606 pdata->hw_feat.rx_fifo_size =
607 ((mac_hfr1 >> 0) & MAC_HFR1_RXFIFOSIZE_MASK);
608 pdata->hw_feat.tx_fifo_size =
609 ((mac_hfr1 >> 6) & MAC_HFR1_TXFIFOSIZE_MASK);
610 pdata->hw_feat.adv_ts_hword =
611 ((mac_hfr1 >> 13) & MAC_HFR1_ADVTHWORD_MASK);
612 pdata->hw_feat.dcb_en = ((mac_hfr1 >> 16) & MAC_HFR1_DCBEN_MASK);
613 pdata->hw_feat.sph_en = ((mac_hfr1 >> 17) & MAC_HFR1_SPHEN_MASK);
614 pdata->hw_feat.tso_en = ((mac_hfr1 >> 18) & MAC_HFR1_TSOEN_MASK);
615 pdata->hw_feat.dma_debug_gen =
616 ((mac_hfr1 >> 19) & MAC_HFR1_DMADEBUGEN_MASK);
617 pdata->hw_feat.av_sel = ((mac_hfr1 >> 20) & MAC_HFR1_AVSEL_MASK);
618 pdata->hw_feat.lp_mode_en = ((mac_hfr1 >> 23) & MAC_HFR1_LPMODEEN_MASK);
619 #ifdef ENABLE_PERFECT_L2_FILTER
620 pdata->hw_feat.hash_tbl_sz = 0;
622 pdata->hw_feat.hash_tbl_sz =
623 ((mac_hfr1 >> 24) & MAC_HFR1_HASHTBLSZ_MASK);
625 pdata->hw_feat.l3l4_filter_num =
626 ((mac_hfr1 >> 27) & MAC_HFR1_L3L4FILTERNUM_MASK);
628 pdata->hw_feat.rx_q_cnt = ((mac_hfr2 >> 0) & MAC_HFR2_RXQCNT_MASK);
629 pdata->hw_feat.tx_q_cnt = ((mac_hfr2 >> 6) & MAC_HFR2_TXQCNT_MASK);
630 pdata->hw_feat.rx_ch_cnt = ((mac_hfr2 >> 12) & MAC_HFR2_RXCHCNT_MASK);
631 pdata->hw_feat.tx_ch_cnt = ((mac_hfr2 >> 18) & MAC_HFR2_TXCHCNT_MASK);
632 pdata->hw_feat.pps_out_num =
633 ((mac_hfr2 >> 24) & MAC_HFR2_PPSOUTNUM_MASK);
634 pdata->hw_feat.aux_snap_num =
635 ((mac_hfr2 >> 28) & MAC_HFR2_AUXSNAPNUM_MASK);
637 if (pdata->hw_feat.mac_addr64_sel)
638 pdata->max_addr_reg_cnt = 128;
639 else if (pdata->hw_feat.mac_addr32_sel)
640 pdata->max_addr_reg_cnt = 64;
641 else if (pdata->hw_feat.mac_addr16_sel)
642 pdata->max_addr_reg_cnt = 32;
644 pdata->max_addr_reg_cnt = 1;
646 switch (pdata->hw_feat.hash_tbl_sz) {
648 pdata->max_hash_table_size = 0;
651 pdata->max_hash_table_size = 64;
654 pdata->max_hash_table_size = 128;
657 pdata->max_hash_table_size = 256;
661 pr_debug("<--eqos_get_all_hw_features\n");
665 * \brief API to print all hw features.
667 * \details This function is used to print all the device feature.
669 * \param[in] pdata - pointer to driver private structure
674 void eqos_print_all_hw_features(struct eqos_prv_data *pdata)
678 pr_debug("-->eqos_print_all_hw_features\n");
681 pr_err("=====================================================/\n");
683 pr_err("10/100 Mbps Support : %s\n",
684 pdata->hw_feat.mii_sel ? "YES" : "NO");
685 pr_err("1000 Mbps Support : %s\n",
686 pdata->hw_feat.gmii_sel ? "YES" : "NO");
687 pr_err("Half-duplex Support : %s\n",
688 pdata->hw_feat.hd_sel ? "YES" : "NO");
689 pr_err("PCS Registers(TBI/SGMII/RTBI PHY interface) : %s\n",
690 pdata->hw_feat.pcs_sel ? "YES" : "NO");
691 pr_err("VLAN Hash Filter Selected : %s\n",
692 pdata->hw_feat.vlan_hash_en ? "YES" : "NO");
693 pdata->vlan_hash_filtering = pdata->hw_feat.vlan_hash_en;
694 pr_err("SMA (MDIO) Interface : %s\n",
695 pdata->hw_feat.sma_sel ? "YES" : "NO");
696 pr_err("PMT Remote Wake-up Packet Enable : %s\n",
697 pdata->hw_feat.rwk_sel ? "YES" : "NO");
698 pr_err("PMT Magic Packet Enable : %s\n",
699 pdata->hw_feat.mgk_sel ? "YES" : "NO");
700 pr_err("RMON/MMC Module Enable : %s\n",
701 pdata->hw_feat.mmc_sel ? "YES" : "NO");
702 pr_err("ARP Offload Enabled : %s\n",
703 pdata->hw_feat.arp_offld_en ? "YES" : "NO");
704 pr_err("IEEE 1588-2008 Timestamp Enabled : %s\n",
705 pdata->hw_feat.ts_sel ? "YES" : "NO");
706 pr_err("Energy Efficient Ethernet Enabled : %s\n",
707 pdata->hw_feat.eee_sel ? "YES" : "NO");
708 pr_err("Transmit Checksum Offload Enabled : %s\n",
709 pdata->hw_feat.tx_coe_sel ? "YES" : "NO");
710 pr_err("Receive Checksum Offload Enabled : %s\n",
711 pdata->hw_feat.rx_coe_sel ? "YES" : "NO");
712 pr_err("MAC Addresses 16–31 Selected : %s\n",
713 pdata->hw_feat.mac_addr16_sel ? "YES" : "NO");
714 pr_err("MAC Addresses 32–63 Selected : %s\n",
715 pdata->hw_feat.mac_addr32_sel ? "YES" : "NO");
716 pr_err("MAC Addresses 64–127 Selected : %s\n",
717 pdata->hw_feat.mac_addr64_sel ? "YES" : "NO");
719 switch (pdata->hw_feat.tsstssel) {
733 pr_err("Timestamp System Time Source : %s\n", str);
734 pr_err("Source Address or VLAN Insertion Enable : %s\n",
735 pdata->hw_feat.sa_vlan_ins ? "YES" : "NO");
737 switch (pdata->hw_feat.act_phy_sel) {
765 pr_err("Active PHY Selected : %s\n", str);
767 switch (pdata->hw_feat.rx_fifo_size) {
807 pr_err("MTL Receive FIFO Size : %s\n", str);
809 switch (pdata->hw_feat.tx_fifo_size) {
849 pr_err("MTL Transmit FIFO Size : %s\n", str);
850 pr_err("IEEE 1588 High Word Register Enable : %s\n",
851 pdata->hw_feat.adv_ts_hword ? "YES" : "NO");
852 pr_err("DCB Feature Enable : %s\n",
853 pdata->hw_feat.dcb_en ? "YES" : "NO");
854 pr_err("Split Header Feature Enable : %s\n",
855 pdata->hw_feat.sph_en ? "YES" : "NO");
856 pr_err("TCP Segmentation Offload Enable : %s\n",
857 pdata->hw_feat.tso_en ? "YES" : "NO");
858 pr_err("DMA Debug Registers Enabled : %s\n",
859 pdata->hw_feat.dma_debug_gen ? "YES" : "NO");
860 pr_err("AV Feature Enabled : %s\n",
861 pdata->hw_feat.av_sel ? "YES" : "NO");
862 pr_err("Low Power Mode Enabled : %s\n",
863 pdata->hw_feat.lp_mode_en ? "YES" : "NO");
865 switch (pdata->hw_feat.hash_tbl_sz) {
867 str = "No hash table selected";
879 pr_err("Hash Table Size : %s\n", str);
881 ("Total number of L3 or L4 Filters : %d L3/L4 Filter\n",
882 pdata->hw_feat.l3l4_filter_num);
883 pr_err("Number of MTL Receive Queues : %d\n",
884 (pdata->hw_feat.rx_q_cnt + 1));
885 pr_err("Number of MTL Transmit Queues : %d\n",
886 (pdata->hw_feat.tx_q_cnt + 1));
887 pr_err("Number of DMA Receive Channels : %d\n",
888 (pdata->hw_feat.rx_ch_cnt + 1));
889 pr_err("Number of DMA Transmit Channels : %d\n",
890 (pdata->hw_feat.tx_ch_cnt + 1));
892 switch (pdata->hw_feat.pps_out_num) {
894 str = "No PPS output";
897 str = "1 PPS output";
900 str = "2 PPS output";
903 str = "3 PPS output";
906 str = "4 PPS output";
911 pr_err("Number of PPS Outputs : %s\n", str);
913 switch (pdata->hw_feat.aux_snap_num) {
915 str = "No auxillary input";
918 str = "1 auxillary input";
921 str = "2 auxillary input";
924 str = "3 auxillary input";
927 str = "4 auxillary input";
932 pr_err("Number of Auxiliary Snapshot Inputs : %s", str);
935 pr_err("=====================================================/\n");
937 pr_debug("<--eqos_print_all_hw_features\n");
941 * \brief allcation of Rx skb's for default rx mode.
943 * \details This function is invoked by other api's for
944 * allocating the Rx skb's with default Rx mode.
946 * \param[in] pdata – pointer to private data structure.
947 * \param[in] buffer – pointer to wrapper receive buffer data structure.
948 * \param[in] gfp – the type of memory allocation.
952 * \retval 0 on success and -ve number on failure.
955 static int eqos_alloc_rx_buf(struct eqos_prv_data *pdata,
956 struct rx_swcx_desc *prx_swcx_desc, gfp_t gfp)
958 struct sk_buff *skb = prx_swcx_desc->skb;
960 pr_debug("-->eqos_alloc_rx_buf\n");
964 if (prx_swcx_desc->dma)
970 __netdev_alloc_skb_ip_align(pdata->dev, pdata->rx_buffer_len, gfp);
972 prx_swcx_desc->skb = NULL;
973 pr_err("Failed to allocate skb\n");
976 prx_swcx_desc->skb = skb;
977 prx_swcx_desc->len = pdata->rx_buffer_len;
980 prx_swcx_desc->dma = dma_map_single(&pdata->pdev->dev, skb->data,
981 pdata->rx_buffer_len,
983 if (dma_mapping_error(&pdata->pdev->dev, prx_swcx_desc->dma)) {
984 pr_err("failed to do the RX dma map\n");
989 prx_swcx_desc->mapped_as_page = Y_FALSE;
991 pr_debug("<--eqos_alloc_rx_buf\n");
997 * \brief api to configure Rx function pointer after reset.
999 * \details This function will initialize the receive function pointers
1000 * which are used for allocating skb's and receiving the packets based
1001 * Rx mode - default.
1003 * \param[in] pdata – pointer to private data structure.
1008 static void eqos_configure_rx_fun_ptr(struct eqos_prv_data *pdata)
1010 pr_debug("-->eqos_configure_rx_fun_ptr\n");
1012 pdata->process_rx_completions = process_rx_completions;
1013 pdata->alloc_rx_buf = eqos_alloc_rx_buf;
1015 pr_debug("<--eqos_configure_rx_fun_ptr\n");
1019 * \brief api to initialize default values.
1021 * \details This function is used to initialize differnet parameters to
1022 * default values which are common parameters between Tx and Rx path.
1024 * \param[in] pdata – pointer to private data structure.
1029 static void eqos_default_common_confs(struct eqos_prv_data *pdata)
1031 pr_debug("-->eqos_default_common_confs\n");
1033 pdata->drop_tx_pktburstcnt = 1;
1034 pdata->mac_enable_count = 0;
1035 pdata->incr_incrx = EQOS_INCR_ENABLE;
1036 pdata->flow_ctrl = EQOS_FLOW_CTRL_TX_RX;
1037 pdata->oldflow_ctrl = EQOS_FLOW_CTRL_TX_RX;
1038 pdata->power_down = 0;
1039 pdata->tx_sa_ctrl_via_desc = EQOS_SA0_NONE;
1040 pdata->tx_sa_ctrl_via_reg = EQOS_SA0_NONE;
1041 pdata->hwts_tx_en = 0;
1042 pdata->hwts_rx_en = 0;
1043 pdata->l3_l4_filter = 0;
1044 pdata->l2_filtering_mode = !!pdata->hw_feat.hash_tbl_sz;
1045 pdata->tx_path_in_lpi_mode = 0;
1046 pdata->use_lpi_tx_automate = true;
1047 pdata->eee_active = 0;
1048 pdata->one_nsec_accuracy = 1;
1050 pr_debug("<--eqos_default_common_confs\n");
1054 * \brief api to initialize Tx parameters.
1056 * \details This function is used to initialize all Tx
1057 * parameters to default values on reset.
1059 * \param[in] pdata – pointer to private data structure.
1060 * \param[in] qinx – DMA channel/queue number to be initialized.
1065 static void eqos_default_tx_confs_single_q(struct eqos_prv_data *pdata,
1068 struct eqos_tx_queue *queue_data = GET_TX_QUEUE_PTR(qinx);
1069 struct tx_ring *ptx_ring =
1070 GET_TX_WRAPPER_DESC(qinx);
1072 pr_debug("-->eqos_default_tx_confs_single_q\n");
1074 queue_data->q_op_mode = q_op_mode[qinx];
1076 ptx_ring->tx_threshold_val = EQOS_TX_THRESHOLD_32;
1077 ptx_ring->tsf_on = EQOS_TSF_ENABLE;
1078 ptx_ring->osf_on = EQOS_OSF_ENABLE;
1079 ptx_ring->tx_pbl = EQOS_PBL_16;
1080 ptx_ring->tx_vlan_tag_via_reg = Y_FALSE;
1081 ptx_ring->tx_vlan_tag_ctrl = EQOS_TX_VLAN_TAG_INSERT;
1082 ptx_ring->vlan_tag_present = 0;
1083 ptx_ring->context_setup = 0;
1084 ptx_ring->default_mss = 0;
1086 pr_debug("<--eqos_default_tx_confs_single_q\n");
1090 * \brief api to initialize Rx parameters.
1092 * \details This function is used to initialize all Rx
1093 * parameters to default values on reset.
1095 * \param[in] pdata – pointer to private data structure.
1096 * \param[in] qinx – DMA queue/channel number to be initialized.
1101 static void eqos_default_rx_confs_single_q(struct eqos_prv_data *pdata,
1104 struct rx_ring *prx_ring =
1105 GET_RX_WRAPPER_DESC(qinx);
1107 pr_debug("-->eqos_default_rx_confs_single_q\n");
1109 prx_ring->rx_threshold_val = EQOS_RX_THRESHOLD_64;
1110 prx_ring->rsf_on = EQOS_RSF_DISABLE;
1111 prx_ring->rx_pbl = EQOS_PBL_16;
1112 prx_ring->rx_outer_vlan_strip = EQOS_RX_VLAN_STRIP_ALWAYS;
1113 prx_ring->rx_inner_vlan_strip = EQOS_RX_VLAN_STRIP_ALWAYS;
1115 pr_debug("<--eqos_default_rx_confs_single_q\n");
1118 static void eqos_default_tx_confs(struct eqos_prv_data *pdata)
1122 pr_debug("-->eqos_default_tx_confs\n");
1124 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++) {
1125 eqos_default_tx_confs_single_q(pdata, qinx);
1128 pr_debug("<--eqos_default_tx_confs\n");
1131 static void eqos_default_rx_confs(struct eqos_prv_data *pdata)
1135 pr_debug("-->eqos_default_rx_confs\n");
1137 for (qinx = 0; qinx < EQOS_RX_QUEUE_CNT; qinx++) {
1138 eqos_default_rx_confs_single_q(pdata, qinx);
1141 pr_debug("<--eqos_default_rx_confs\n");
1144 void free_txrx_irqs(struct eqos_prv_data *pdata)
1148 pr_debug("-->%s()\n", __func__);
1150 free_irq(pdata->common_irq, pdata);
1152 for (i = 0; i < pdata->num_chans; i++) {
1153 if (pdata->rx_irq_alloc_mask & (1 << i)) {
1154 irq_set_affinity_hint(pdata->rx_irqs[i], NULL);
1155 free_irq(pdata->rx_irqs[i], pdata);
1157 if (pdata->tx_irq_alloc_mask & (1 << i)) {
1158 irq_set_affinity_hint(pdata->tx_irqs[i], NULL);
1159 free_irq(pdata->tx_irqs[i], pdata);
1163 pr_debug("<--%s()\n", __func__);
1166 int request_txrx_irqs(struct eqos_prv_data *pdata)
1168 int ret = Y_SUCCESS;
1170 struct chan_data *pchinfo;
1171 struct platform_device *pdev = pdata->pdev;
1173 pr_debug("-->%s()\n", __func__);
1175 pdata->irq_number = pdata->dev->irq;
1177 ret = request_irq(pdata->common_irq,
1178 eqos_common_isr, IRQF_SHARED, "ether_qos.common_irq", pdata);
1179 if (ret != Y_SUCCESS) {
1180 pr_err("Unable to register %d\n", pdata->common_irq);
1182 goto err_common_irq;
1185 for (i = 0; i < pdata->num_chans; i++) {
1187 snprintf(irq_names[j], 32, "%s.rx%d", dev_name(&pdev->dev), i);
1188 ret = request_irq(pdata->rx_irqs[i],
1189 eqos_ch_isr, 0, irq_names[j++], pdata);
1191 pr_err("Unable to register %d\n", pdata->rx_irqs[i]);
1195 snprintf(irq_names[j], 32, "%s.tx%d", dev_name(&pdev->dev), i);
1196 ret = request_irq(pdata->tx_irqs[i],
1197 eqos_ch_isr, 0, irq_names[j++], pdata);
1199 pr_err("Unable to register %d\n", pdata->tx_irqs[i]);
1203 pchinfo = &pdata->chinfo[i];
1205 irq_set_affinity_hint(pdata->rx_irqs[i],
1206 cpumask_of(pchinfo->cpu));
1207 pdata->rx_irq_alloc_mask |= (1 << i);
1209 irq_set_affinity_hint(pdata->tx_irqs[i],
1210 cpumask_of(pchinfo->cpu));
1211 pdata->tx_irq_alloc_mask |= (1 << i);
1213 pr_debug("<--%s()\n", __func__);
1218 free_txrx_irqs(pdata);
1219 free_irq(pdata->common_irq, pdata);
1222 pr_debug("<--%s(): error\n", __func__);
1228 * \brief API to open a deivce for data transmission & reception.
1230 * \details Opens the interface. The interface is opned whenever
1231 * ifconfig activates it. The open method should register any
1232 * system resource it needs like I/O ports, IRQ, DMA, etc,
1233 * turn on the hardware, and perform any other setup your device requires.
1235 * \param[in] dev - pointer to net_device structure
1239 * \retval 0 on success & negative number on failure.
1242 static int eqos_open(struct net_device *dev)
1244 struct eqos_prv_data *pdata = netdev_priv(dev);
1245 int ret = Y_SUCCESS;
1246 struct desc_if_struct *desc_if = &pdata->desc_if;
1248 pr_debug("-->eqos_open\n");
1250 if (!is_valid_ether_addr(dev->dev_addr))
1251 return -EADDRNOTAVAIL;
1253 ret = request_txrx_irqs(pdata);
1254 if (ret != Y_SUCCESS)
1257 ret = desc_if->alloc_buff_and_desc(pdata);
1259 dev_err(&pdata->pdev->dev,
1260 "Failed to allocate buffer/descriptor memory\n");
1262 goto err_out_desc_buf_alloc_failed;
1265 mutex_lock(&pdata->hw_change_lock);
1266 eqos_start_dev(pdata);
1268 pdata->hw_stopped = false;
1269 mutex_unlock(&pdata->hw_change_lock);
1271 pr_debug("<--%s()\n", __func__);
1274 err_out_desc_buf_alloc_failed:
1275 free_txrx_irqs(pdata);
1278 pr_debug("<--%s()\n", __func__);
1283 * \brief API to close a device.
1285 * \details Stops the interface. The interface is stopped when it is brought
1286 * down. This function should reverse operations performed at open time.
1288 * \param[in] dev - pointer to net_device structure
1292 * \retval 0 on success & negative number on failure.
1295 static int eqos_close(struct net_device *dev)
1297 struct eqos_prv_data *pdata = netdev_priv(dev);
1298 struct desc_if_struct *desc_if = &pdata->desc_if;
1300 pr_debug("-->%s\n", __func__);
1302 mutex_lock(&pdata->hw_change_lock);
1303 eqos_stop_dev(pdata);
1305 desc_if->free_buff_and_desc(pdata);
1306 free_txrx_irqs(pdata);
1308 pdata->hw_stopped = true;
1309 mutex_unlock(&pdata->hw_change_lock);
1311 /* cancel iso work */
1312 cancel_work_sync(&pdata->iso_work);
1313 /* Cancel FBE handling work */
1314 cancel_work_sync(&pdata->fbe_work);
1316 pr_debug("<--%s\n", __func__);
1321 * \brief API to configure the multicast address in device.
1323 * \details This function collects all the multicast addresse
1324 * and updates the device.
1326 * \param[in] dev - pointer to net_device structure.
1328 * \retval 0 if perfect filtering is seleted & 1 if hash
1329 * filtering is seleted.
1331 static int eqos_prepare_mc_list(struct net_device *dev)
1333 struct eqos_prv_data *pdata = netdev_priv(dev);
1334 struct hw_if_struct *hw_if = &(pdata->hw_if);
1335 u32 mc_filter[EQOS_HTR_CNT];
1336 struct netdev_hw_addr *ha = NULL;
1340 DBGPR_FILTER("-->eqos_prepare_mc_list\n");
1342 if (pdata->l2_filtering_mode) {
1344 ("select HASH FILTERING for mc addresses: mc_count = %d\n",
1345 netdev_mc_count(dev));
1347 memset(mc_filter, 0, sizeof(mc_filter));
1349 if (pdata->max_hash_table_size == 64) {
1350 netdev_for_each_mc_addr(ha, dev) {
1352 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1353 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1354 ha->addr[3], ha->addr[4], ha->addr[5]);
1355 /* The upper 6 bits of the calculated CRC are used to
1356 * index the content of the Hash Table Reg 0 and 1.
1359 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1361 /* The most significant bit determines the register
1362 * to use (Hash Table Reg X, X = 0 and 1) while the
1363 * other 5(0x1F) bits determines the bit within the
1366 mc_filter[crc32_val >> 5] |=
1367 (1 << (crc32_val & 0x1F));
1369 } else if (pdata->max_hash_table_size == 128) {
1370 netdev_for_each_mc_addr(ha, dev) {
1372 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1373 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1374 ha->addr[3], ha->addr[4], ha->addr[5]);
1375 /* The upper 7 bits of the calculated CRC are used to
1376 * index the content of the Hash Table Reg 0,1,2 and 3.
1379 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1382 pr_err("crc_le = %#x, crc_be = %#x\n",
1383 bitrev32(~crc32_le(~0, ha->addr, 6)),
1384 bitrev32(~crc32_be(~0, ha->addr, 6)));
1386 /* The most significant 2 bits determines the register
1387 * to use (Hash Table Reg X, X = 0,1,2 and 3) while the
1388 * other 5(0x1F) bits determines the bit within the
1391 mc_filter[crc32_val >> 5] |=
1392 (1 << (crc32_val & 0x1F));
1394 } else if (pdata->max_hash_table_size == 256) {
1395 netdev_for_each_mc_addr(ha, dev) {
1397 ("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1398 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1399 ha->addr[3], ha->addr[4], ha->addr[5]);
1400 /* The upper 8 bits of the calculated CRC are used to
1401 * index the content of the Hash Table Reg 0,1,2,3,4,
1405 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1407 /* The most significant 3 bits determines the register
1408 * to use (Hash Table Reg X, X = 0,1,2,3,4,5,6 and 7) while
1409 * the other 5(0x1F) bits determines the bit within the
1412 mc_filter[crc32_val >> 5] |=
1413 (1 << (crc32_val & 0x1F));
1417 for (i = 0; i < EQOS_HTR_CNT; i++)
1418 hw_if->update_hash_table_reg(i, mc_filter[i]);
1422 ("select PERFECT FILTERING for mc addresses, mc_count = %d, max_addr_reg_cnt = %d\n",
1423 netdev_mc_count(dev), pdata->max_addr_reg_cnt);
1425 netdev_for_each_mc_addr(ha, dev) {
1426 DBGPR_FILTER("mc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1428 ha->addr[0], ha->addr[1], ha->addr[2],
1429 ha->addr[3], ha->addr[4], ha->addr[5]);
1431 hw_if->update_mac_addr1_31_low_high_reg(i,
1435 hw_if->update_mac_addr32_127_low_high_reg(i,
1439 if ((pdata->ptp_cfg.use_tagged_ptp) &&
1440 (is_ptp_addr(ha->addr)))
1441 hw_if->config_ptp_channel(pdata->ptp_cfg.
1448 DBGPR_FILTER("<--eqos_prepare_mc_list\n");
1454 * \brief API to configure the unicast address in device.
1456 * \details This function collects all the unicast addresses
1457 * and updates the device.
1459 * \param[in] dev - pointer to net_device structure.
1461 * \retval 0 if perfect filtering is seleted & 1 if hash
1462 * filtering is seleted.
1464 static int eqos_prepare_uc_list(struct net_device *dev)
1466 struct eqos_prv_data *pdata = netdev_priv(dev);
1467 struct hw_if_struct *hw_if = &(pdata->hw_if);
1468 u32 uc_filter[EQOS_HTR_CNT];
1469 struct netdev_hw_addr *ha = NULL;
1473 DBGPR_FILTER("-->eqos_prepare_uc_list\n");
1475 if (pdata->l2_filtering_mode) {
1477 ("select HASH FILTERING for uc addresses: uc_count = %d\n",
1478 netdev_uc_count(dev));
1480 memset(uc_filter, 0, sizeof(uc_filter));
1482 if (pdata->max_hash_table_size == 64) {
1483 netdev_for_each_uc_addr(ha, dev) {
1485 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1486 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1487 ha->addr[3], ha->addr[4], ha->addr[5]);
1489 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1491 uc_filter[crc32_val >> 5] |=
1492 (1 << (crc32_val & 0x1F));
1494 } else if (pdata->max_hash_table_size == 128) {
1495 netdev_for_each_uc_addr(ha, dev) {
1497 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1498 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1499 ha->addr[3], ha->addr[4], ha->addr[5]);
1501 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1503 uc_filter[crc32_val >> 5] |=
1504 (1 << (crc32_val & 0x1F));
1506 } else if (pdata->max_hash_table_size == 256) {
1507 netdev_for_each_uc_addr(ha, dev) {
1509 ("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1510 i++, ha->addr[0], ha->addr[1], ha->addr[2],
1511 ha->addr[3], ha->addr[4], ha->addr[5]);
1513 (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
1515 uc_filter[crc32_val >> 5] |=
1516 (1 << (crc32_val & 0x1F));
1520 /* configure hash value of real/default interface also */
1522 ("real/default dev_addr = %#x:%#x:%#x:%#x:%#x:%#x\n",
1523 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1524 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1526 if (pdata->max_hash_table_size == 64) {
1528 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 26);
1529 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1530 } else if (pdata->max_hash_table_size == 128) {
1532 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 25);
1533 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1535 } else if (pdata->max_hash_table_size == 256) {
1537 (bitrev32(~crc32_le(~0, dev->dev_addr, 6)) >> 24);
1538 uc_filter[crc32_val >> 5] |= (1 << (crc32_val & 0x1F));
1541 for (i = 0; i < EQOS_HTR_CNT; i++)
1542 hw_if->update_hash_table_reg(i, uc_filter[i]);
1546 ("select PERFECT FILTERING for uc addresses: uc_count = %d\n",
1547 netdev_uc_count(dev));
1549 netdev_for_each_uc_addr(ha, dev) {
1550 DBGPR_FILTER("uc addr[%d] = %#x:%#x:%#x:%#x:%#x:%#x\n",
1551 i, ha->addr[0], ha->addr[1], ha->addr[2],
1552 ha->addr[3], ha->addr[4], ha->addr[5]);
1554 hw_if->update_mac_addr1_31_low_high_reg(i,
1558 hw_if->update_mac_addr32_127_low_high_reg(i,
1565 DBGPR_FILTER("<--eqos_prepare_uc_list\n");
1571 * \brief API to set the device receive mode
1573 * \details The set_multicast_list function is called when the multicast list
1574 * for the device changes and when the flags change.
1576 * \param[in] dev - pointer to net_device structure.
1580 static void eqos_set_rx_mode(struct net_device *dev)
1582 struct eqos_prv_data *pdata = netdev_priv(dev);
1583 struct hw_if_struct *hw_if = &(pdata->hw_if);
1584 unsigned char pr_mode = 0;
1585 unsigned char huc_mode = 0;
1586 unsigned char hmc_mode = 0;
1587 unsigned char pm_mode = 0;
1588 unsigned char hpf_mode = 0;
1591 DBGPR_FILTER("-->eqos_set_rx_mode\n");
1593 spin_lock(&pdata->lock);
1595 if (dev->flags & IFF_PROMISC) {
1597 ("PROMISCUOUS MODE (Accept all packets irrespective of DA)\n");
1599 #ifdef ENABLE_PERFECT_L2_FILTER
1600 } else if ((dev->flags & IFF_ALLMULTI)) {
1602 } else if ((dev->flags & IFF_ALLMULTI) ||
1603 (netdev_mc_count(dev) > (pdata->max_hash_table_size))) {
1605 DBGPR_FILTER("pass all multicast pkt\n");
1607 if (pdata->max_hash_table_size) {
1608 for (i = 0; i < EQOS_HTR_CNT; i++)
1609 hw_if->update_hash_table_reg(i, 0xffffffff);
1611 } else if (!netdev_mc_empty(dev)) {
1612 DBGPR_FILTER("pass list of multicast pkt\n");
1613 if ((netdev_mc_count(dev) > (pdata->max_addr_reg_cnt - 1)) &&
1614 (!pdata->max_hash_table_size)) {
1615 /* switch to PROMISCUOUS mode */
1618 mode = eqos_prepare_mc_list(dev);
1620 /* Hash filtering for multicast */
1623 /* Perfect filtering for multicast */
1630 /* Handle multiple unicast addresses */
1631 if ((netdev_uc_count(dev) > (pdata->max_addr_reg_cnt - 1)) &&
1632 (!pdata->max_hash_table_size)) {
1633 /* switch to PROMISCUOUS mode */
1635 } else if (!netdev_uc_empty(dev)) {
1636 mode = eqos_prepare_uc_list(dev);
1638 /* Hash filtering for unicast */
1641 /* Perfect filtering for unicast */
1647 hw_if->config_mac_pkt_filter_reg(pr_mode, huc_mode,
1648 hmc_mode, pm_mode, hpf_mode);
1650 spin_unlock(&pdata->lock);
1652 pr_debug("<--eqos_set_rx_mode\n");
1657 * \brief API to transmit the packets
1659 * \details The start_xmit function initiates the transmission of a packet.
1660 * The full packet (protocol headers and all) is contained in a socket buffer
1661 * (sk_buff) structure.
1663 * \param[in] skb - pointer to sk_buff structure
1664 * \param[in] dev - pointer to net_device structure
1671 static int eqos_start_xmit(struct sk_buff *skb, struct net_device *dev)
1673 struct eqos_prv_data *pdata = netdev_priv(dev);
1674 UINT qinx = skb_get_queue_mapping(skb);
1676 struct tx_ring *ptx_ring = GET_TX_WRAPPER_DESC(qinx);
1677 struct s_tx_pkt_features *tx_pkt_features = GET_TX_PKT_FEATURES_PTR(qinx);
1680 struct hw_if_struct *hw_if = &pdata->hw_if;
1681 struct desc_if_struct *desc_if = &pdata->desc_if;
1682 INT retval = NETDEV_TX_OK;
1685 pr_debug("-->eqos_start_xmit: skb->len = %d, qinx = %u\n", skb->len, qinx);
1687 if (ptx_ring->tx_pkt_queued > (TX_DESC_CNT >> 2))
1688 process_tx_completions(pdata->dev, pdata, qinx);
1690 spin_lock(&pdata->chinfo[qinx].chan_tx_lock);
1692 if (skb->len <= 0) {
1693 dev_kfree_skb_any(skb);
1694 pr_err("%s : Empty skb received from stack\n", dev->name);
1695 goto tx_netdev_return;
1699 memset(tx_pkt_features, 0, sizeof(struct s_tx_pkt_features));
1701 #ifdef EQOS_ENABLE_VLAN_TAG
1702 ptx_ring->vlan_tag_present = 0;
1703 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
1704 if (vlan_tx_tag_present(skb)) {
1705 USHORT vlan_tag = vlan_tx_tag_get(skb);
1707 if (skb_vlan_tag_present(skb)) {
1708 USHORT vlan_tag = skb_vlan_tag_get(skb);
1710 vlan_tag |= (skb->priority << 13);
1711 ptx_ring->vlan_tag_present = 1;
1712 if (vlan_tag != ptx_ring->vlan_tag_id ||
1713 ptx_ring->context_setup == 1) {
1714 ptx_ring->vlan_tag_id = vlan_tag;
1715 if (Y_TRUE == ptx_ring->tx_vlan_tag_via_reg) {
1716 pr_err("VLAN control info update via reg\n");
1717 hw_if->enable_vlan_reg_control(ptx_ring);
1719 hw_if->enable_vlan_desc_control(pdata);
1720 TX_PKT_FEATURES_PKT_ATTRIBUTES_VLAN_PKT_WR
1721 (tx_pkt_features->pkt_attributes, 1);
1722 TX_PKT_FEATURES_VLAN_TAG_VT_WR
1723 (tx_pkt_features->vlan_tag, vlan_tag);
1726 pdata->xstats.tx_vlan_pkt_n++;
1730 /* check for hw tstamping */
1731 if (pdata->hw_feat.tsstssel && pdata->hwts_tx_en) {
1732 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1733 /* declare that device is doing timestamping */
1734 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1735 TX_PKT_FEATURES_PKT_ATTRIBUTES_PTP_ENABLE_WR
1736 (tx_pkt_features->pkt_attributes, 1);
1738 ("Got PTP pkt to transmit [qinx = %d, cur_tx = %d]\n",
1739 qinx, ptx_ring->cur_tx);
1743 tso = desc_if->handle_tso(dev, skb);
1745 pr_err("Unable to handle TSO\n");
1746 dev_kfree_skb_any(skb);
1747 retval = NETDEV_TX_OK;
1748 goto tx_netdev_return;
1751 pdata->xstats.tx_tso_pkt_n++;
1752 TX_PKT_FEATURES_PKT_ATTRIBUTES_TSO_ENABLE_WR(tx_pkt_features->
1754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1755 TX_PKT_FEATURES_PKT_ATTRIBUTES_CSUM_ENABLE_WR(tx_pkt_features->
1760 cnt = desc_if->tx_swcx_alloc(dev, skb);
1763 ptx_ring->queue_stopped = 1;
1764 netif_stop_subqueue(dev, qinx);
1765 pr_debug("%s(): TX ring full for queue %d\n",
1767 retval = NETDEV_TX_BUSY;
1768 goto tx_netdev_return;
1770 dev_kfree_skb_any(skb);
1771 retval = NETDEV_TX_OK;
1772 goto tx_netdev_return;
1775 dev->trans_start = jiffies;
1777 ptx_ring->free_desc_cnt -= cnt;
1778 ptx_ring->tx_pkt_queued += cnt;
1780 #ifdef EQOS_ENABLE_TX_PKT_DUMP
1781 print_pkt(skb, skb->len, 1, (ptx_ring->cur_tx - 1));
1784 #ifdef ENABLE_CHANNEL_DATA_CHECK
1785 check_channel_data(skb, qinx, 0);
1788 if ((pdata->eee_enabled) && (pdata->tx_path_in_lpi_mode) &&
1789 (!pdata->use_lpi_tx_automate))
1790 eqos_disable_eee_mode(pdata);
1792 /* fallback to software time stamping if core doesn't
1793 * support hardware time stamping */
1794 if ((pdata->hw_feat.tsstssel == 0) || (pdata->hwts_tx_en == 0))
1795 skb_tx_timestamp(skb);
1797 /* configure required descriptor fields for transmission */
1798 hw_if->pre_xmit(pdata, qinx);
1801 spin_unlock(&pdata->chinfo[qinx].chan_tx_lock);
1803 pr_debug("<--eqos_start_xmit\n");
1808 static void eqos_print_rx_tstamp_info(struct s_rx_desc *rxdesc,
1813 char *tstamp_dropped = NULL;
1814 char *tstamp_available = NULL;
1815 char *ptp_version = NULL;
1816 char *ptp_pkt_type = NULL;
1817 char *ptp_msg_type = NULL;
1819 DBGPR_PTP("-->eqos_print_rx_tstamp_info\n");
1821 /* status in rdes1 is not valid */
1822 if (!(rxdesc->rdes3 & EQOS_RDESC3_RS1V))
1825 ptp_status = rxdesc->rdes1;
1826 tstamp_dropped = ((ptp_status & 0x8000) ? "YES" : "NO");
1827 tstamp_available = ((ptp_status & 0x4000) ? "YES" : "NO");
1829 ((ptp_status & 0x2000) ? "v2 (1588-2008)" : "v1 (1588-2002)");
1831 ((ptp_status & 0x1000) ? "ptp over Eth" : "ptp over IPv4/6");
1833 pkt_type = ((ptp_status & 0xF00) >> 8);
1836 ptp_msg_type = "NO PTP msg received";
1839 ptp_msg_type = "SYNC";
1842 ptp_msg_type = "Follow_Up";
1845 ptp_msg_type = "Delay_Req";
1848 ptp_msg_type = "Delay_Resp";
1851 ptp_msg_type = "Pdelay_Req";
1854 ptp_msg_type = "Pdelay_Resp";
1857 ptp_msg_type = "Pdelay_Resp_Follow_up";
1860 ptp_msg_type = "Announce";
1863 ptp_msg_type = "Management";
1866 ptp_msg_type = "Signaling";
1872 ptp_msg_type = "Reserved";
1875 ptp_msg_type = "PTP pkr with Reserved Msg Type";
1879 DBGPR_PTP("Rx timestamp detail for queue %d\n"
1880 "tstamp dropped = %s\n"
1881 "tstamp available = %s\n"
1882 "PTP version = %s\n"
1883 "PTP Pkt Type = %s\n"
1884 "PTP Msg Type = %s\n",
1885 qinx, tstamp_dropped, tstamp_available,
1886 ptp_version, ptp_pkt_type, ptp_msg_type);
1888 DBGPR_PTP("<--eqos_print_rx_tstamp_info\n");
1892 * \brief API to get rx time stamp value.
1894 * \details This function will read received packet's timestamp from
1895 * the descriptor and pass it to stack and also perform some sanity checks.
1897 * \param[in] pdata - pointer to private data structure.
1898 * \param[in] skb - pointer to sk_buff structure.
1899 * \param[in] prx_ring - pointer to wrapper receive descriptor structure.
1900 * \param[in] qinx - Queue/Channel number.
1904 * \retval 0 if no context descriptor
1905 * \retval 1 if timestamp is valid
1906 * \retval 2 if time stamp is corrupted
1909 static unsigned char eqos_get_rx_hwtstamp(struct eqos_prv_data *pdata,
1910 struct sk_buff *skb,
1912 *prx_ring, unsigned int qinx)
1914 struct s_rx_desc *prx_desc =
1915 GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
1916 struct s_rx_context_desc *rx_context_desc = NULL;
1917 struct hw_if_struct *hw_if = &(pdata->hw_if);
1918 struct skb_shared_hwtstamps *shhwtstamp = NULL;
1922 DBGPR_PTP("-->eqos_get_rx_hwtstamp\n");
1924 eqos_print_rx_tstamp_info(prx_desc, qinx);
1926 prx_ring->dirty_rx++;
1927 INCR_RX_DESC_INDEX(prx_ring->cur_rx, 1);
1928 rx_context_desc = GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
1930 DBGPR_PTP("\nRX_CONTEX_DESC[%d %4p %d RECEIVED FROM DEVICE]"
1931 " = %#x:%#x:%#x:%#x",
1932 qinx, rx_context_desc, prx_ring->cur_rx,
1933 rx_context_desc->rdes0, rx_context_desc->rdes1,
1934 rx_context_desc->rdes2, rx_context_desc->rdes3);
1936 /* check rx tsatmp */
1937 for (retry = 0; retry < 10; retry++) {
1938 ret = hw_if->get_rx_tstamp_status(rx_context_desc);
1940 /* time stamp is valid */
1942 } else if (ret == 0) {
1943 pr_err("Device has not yet updated the context "
1944 "desc to hold Rx time stamp(retry = %d)\n",
1948 ("Error: Rx time stamp is corrupted(retry = %d)\n",
1955 pr_err("Device has not yet updated the context "
1956 "desc to hold Rx time stamp(retry = %d)\n", retry);
1957 prx_ring->dirty_rx--;
1958 DECR_RX_DESC_INDEX(prx_ring->cur_rx);
1962 pdata->xstats.rx_timestamp_captured_n++;
1963 /* get valid tstamp */
1964 ns = hw_if->get_rx_tstamp(rx_context_desc);
1966 shhwtstamp = skb_hwtstamps(skb);
1967 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
1968 shhwtstamp->hwtstamp = ns_to_ktime(ns);
1970 DBGPR_PTP("<--eqos_get_rx_hwtstamp\n");
1976 * \brief API to get tx time stamp value.
1978 * \details This function will read timestamp from the descriptor
1979 * and pass it to stack and also perform some sanity checks.
1981 * \param[in] pdata - pointer to private data structure.
1982 * \param[in] txdesc - pointer to transmit descriptor structure.
1983 * \param[in] skb - pointer to sk_buff structure.
1987 * \retval 1 if time stamp is taken
1988 * \retval 0 if time stamp in not taken/valid
1991 static unsigned int eqos_get_tx_hwtstamp(struct eqos_prv_data *pdata,
1992 struct s_tx_desc *txdesc,
1993 struct sk_buff *skb)
1995 struct hw_if_struct *hw_if = &(pdata->hw_if);
1996 struct skb_shared_hwtstamps shhwtstamp;
1999 DBGPR_PTP("-->eqos_get_tx_hwtstamp\n");
2001 if (hw_if->drop_tx_status_enabled() == 0) {
2002 /* check tx tstamp status */
2003 if (!hw_if->get_tx_tstamp_status(txdesc)) {
2005 ("tx timestamp is not captured for this packet\n");
2009 /* get the valid tstamp */
2010 ns = hw_if->get_tx_tstamp(txdesc);
2012 /* drop tx status mode is enabled, hence read time
2013 * stamp from register instead of descriptor */
2015 /* check tx tstamp status */
2016 if (!hw_if->get_tx_tstamp_status_via_reg()) {
2018 ("tx timestamp is not captured for this packet\n");
2022 /* get the valid tstamp */
2023 ns = hw_if->get_tx_tstamp_via_reg();
2026 pdata->xstats.tx_timestamp_captured_n++;
2027 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2028 shhwtstamp.hwtstamp = ns_to_ktime(ns);
2029 /* pass tstamp to stack */
2030 skb_tstamp_tx(skb, &shhwtstamp);
2032 DBGPR_PTP("<--eqos_get_tx_hwtstamp\n");
2038 * \brief API to update the tx status.
2040 * \details This function is called in isr handler once after getting
2041 * transmit complete interrupt to update the transmited packet status
2042 * and it does some house keeping work like updating the
2043 * private data structure variables.
2045 * \param[in] dev - pointer to net_device structure
2046 * \param[in] pdata - pointer to private data structure.
2051 static void process_tx_completions(struct net_device *dev,
2052 struct eqos_prv_data *pdata, UINT qinx)
2054 struct tx_ring *ptx_ring =
2055 GET_TX_WRAPPER_DESC(qinx);
2056 struct s_tx_desc *ptx_desc = NULL;
2057 struct tx_swcx_desc *ptx_swcx_desc = NULL;
2058 struct hw_if_struct *hw_if = &(pdata->hw_if);
2059 struct desc_if_struct *desc_if = &(pdata->desc_if);
2060 int err_incremented;
2061 unsigned int tstamp_taken = 0;
2063 pr_debug("-->%s(): ptx_ring->tx_pkt_queued = %d"
2064 " dirty_tx = %d, qinx = %u\n",
2066 ptx_ring->tx_pkt_queued, ptx_ring->dirty_tx, qinx);
2068 spin_lock(&pdata->chinfo[qinx].chan_tx_lock);
2070 pdata->xstats.tx_clean_n[qinx]++;
2071 while (ptx_ring->tx_pkt_queued > 0) {
2072 ptx_desc = GET_TX_DESC_PTR(qinx, ptx_ring->dirty_tx);
2073 ptx_swcx_desc = GET_TX_BUF_PTR(qinx, ptx_ring->dirty_tx);
2076 if (!hw_if->tx_complete(ptx_desc))
2079 #ifdef EQOS_ENABLE_TX_DESC_DUMP
2080 dump_tx_desc(pdata, ptx_ring->dirty_tx, ptx_ring->dirty_tx,
2084 /* update the tx error if any by looking at last segment
2085 * for NORMAL descriptors
2087 if ((hw_if->get_tx_desc_ls(ptx_desc)) &&
2088 !(hw_if->get_tx_desc_ctxt(ptx_desc))) {
2089 if (ptx_swcx_desc->skb == NULL) {
2090 dev_err(&pdata->pdev->dev,
2091 "NULL SKB in process_tx_completions()\n");
2093 /* check whether skb support hw tstamp */
2094 if ((pdata->hw_feat.tsstssel) &&
2095 (skb_shinfo(ptx_swcx_desc->skb)->
2096 tx_flags & SKBTX_IN_PROGRESS)) {
2098 eqos_get_tx_hwtstamp(pdata, ptx_desc,
2099 ptx_swcx_desc->skb);
2102 ("passed tx timestamp to stack[qinx = %d, dirty_tx = %d]\n",
2103 qinx, ptx_ring->dirty_tx);
2107 err_incremented = 0;
2108 if (hw_if->tx_window_error) {
2109 if (hw_if->tx_window_error(ptx_desc)) {
2110 err_incremented = 1;
2111 dev->stats.tx_window_errors++;
2114 if (hw_if->tx_aborted_error) {
2115 if (hw_if->tx_aborted_error(ptx_desc)) {
2116 err_incremented = 1;
2117 dev->stats.tx_aborted_errors++;
2118 if (hw_if->tx_handle_aborted_error)
2120 tx_handle_aborted_error
2124 if (hw_if->tx_carrier_lost_error) {
2125 if (hw_if->tx_carrier_lost_error(ptx_desc)) {
2126 err_incremented = 1;
2127 dev->stats.tx_carrier_errors++;
2130 if (hw_if->tx_fifo_underrun) {
2131 if (hw_if->tx_fifo_underrun(ptx_desc)) {
2132 err_incremented = 1;
2133 dev->stats.tx_fifo_errors++;
2134 if (hw_if->tx_update_fifo_threshold)
2136 tx_update_fifo_threshold
2140 if (hw_if->tx_get_collision_count)
2141 dev->stats.collisions +=
2142 hw_if->tx_get_collision_count(ptx_desc);
2144 if (err_incremented == 1)
2145 dev->stats.tx_errors++;
2147 pdata->xstats.q_tx_pkt_n[qinx]++;
2148 pdata->xstats.tx_pkt_n++;
2149 dev->stats.tx_packets++;
2151 dev->stats.tx_bytes += ptx_swcx_desc->len;
2152 desc_if->tx_swcx_free(pdata, ptx_swcx_desc);
2154 /* reset the descriptor so that driver/host can reuse it */
2155 hw_if->tx_desc_reset(ptx_ring->dirty_tx, pdata, qinx);
2157 INCR_TX_DESC_INDEX(ptx_ring->dirty_tx, 1);
2158 ptx_ring->free_desc_cnt++;
2159 ptx_ring->tx_pkt_queued--;
2162 if ((ptx_ring->queue_stopped == 1) && (ptx_ring->free_desc_cnt > 0)) {
2163 ptx_ring->queue_stopped = 0;
2164 netif_wake_subqueue(dev, qinx);
2167 if ((pdata->eee_enabled) && (!pdata->tx_path_in_lpi_mode) &&
2168 (!pdata->use_lpi_tx_automate)) {
2169 eqos_enable_eee_mode(pdata);
2170 mod_timer(&pdata->eee_ctrl_timer,
2171 EQOS_LPI_TIMER(EQOS_DEFAULT_LPI_TIMER));
2174 spin_unlock(&pdata->chinfo[qinx].chan_tx_lock);
2176 pr_debug("<--%s(): ptx_ring->tx_pkt_queued = %d\n",
2177 __func__, ptx_ring->tx_pkt_queued);
2180 #ifdef YDEBUG_FILTER
2181 static void eqos_check_rx_filter_status(struct s_rx_desc *prx_desc)
2183 u32 rdes2 = prx_desc->rdes2;
2184 u32 rdes3 = prx_desc->rdes3;
2186 /* Receive Status rdes2 Valid ? */
2187 if ((rdes3 & 0x8000000) == 0x8000000) {
2188 if ((rdes2 & 0x400) == 0x400)
2189 pr_err("ARP pkt received\n");
2190 if ((rdes2 & 0x800) == 0x800)
2191 pr_err("ARP reply not generated\n");
2192 if ((rdes2 & 0x8000) == 0x8000)
2193 pr_err("VLAN pkt passed VLAN filter\n");
2194 if ((rdes2 & 0x10000) == 0x10000)
2195 pr_err("SA Address filter fail\n");
2196 if ((rdes2 & 0x20000) == 0x20000)
2197 pr_err("DA Addess filter fail\n");
2198 if ((rdes2 & 0x40000) == 0x40000)
2200 ("pkt passed the HASH filter in MAC and HASH value = %#x\n",
2201 (rdes2 >> 19) & 0xff);
2202 if ((rdes2 & 0x8000000) == 0x8000000)
2203 pr_err("L3 filter(%d) Match\n", ((rdes2 >> 29) & 0x7));
2204 if ((rdes2 & 0x10000000) == 0x10000000)
2205 pr_err("L4 filter(%d) Match\n", ((rdes2 >> 29) & 0x7));
2208 #endif /* YDEBUG_FILTER */
2210 /* pass skb to upper layer */
2211 static void eqos_receive_skb(struct eqos_prv_data *pdata,
2212 struct net_device *dev, struct sk_buff *skb,
2215 struct eqos_rx_queue *rx_queue = GET_RX_QUEUE_PTR(qinx);
2217 skb_record_rx_queue(skb, qinx);
2219 skb->protocol = eth_type_trans(skb, dev);
2221 if (dev->features & NETIF_F_GRO) {
2222 napi_gro_receive(&rx_queue->napi, skb);
2223 } else if ((dev->features & NETIF_F_LRO) &&
2224 (skb->ip_summed == CHECKSUM_UNNECESSARY)) {
2225 lro_receive_skb(&rx_queue->lro_mgr, skb, (void *)pdata);
2226 rx_queue->lro_flush_needed = 1;
2228 netif_receive_skb(skb);
2232 /* Receive Checksum Offload configuration */
2233 static inline void eqos_config_rx_csum(struct eqos_prv_data *pdata,
2234 struct sk_buff *skb,
2235 struct s_rx_desc *prx_desc)
2239 skb->ip_summed = CHECKSUM_NONE;
2241 if ((pdata->dev_state & NETIF_F_RXCSUM) == NETIF_F_RXCSUM) {
2242 /* Receive Status rdes1 Valid ? */
2243 if ((prx_desc->rdes3 & EQOS_RDESC3_RS1V)) {
2244 /* check(rdes1.IPCE bit) whether device has done csum correctly or not */
2245 RX_NORMAL_DESC_RDES1_RD(prx_desc->rdes1, rdes1);
2246 if ((rdes1 & 0xC8) == 0x0)
2247 skb->ip_summed = CHECKSUM_UNNECESSARY; /* csum done by device */
2252 static inline void eqos_get_rx_vlan(struct eqos_prv_data *pdata,
2253 struct sk_buff *skb,
2254 struct s_rx_desc *prx_desc)
2256 USHORT vlan_tag = 0;
2258 if ((pdata->dev_state & NETIF_F_HW_VLAN_CTAG_RX) ==
2259 NETIF_F_HW_VLAN_CTAG_RX) {
2260 /* Receive Status rdes0 Valid ? */
2261 if ((prx_desc->rdes3 & EQOS_RDESC3_RS0V)) {
2262 /* device received frame with VLAN Tag or
2263 * double VLAN Tag ? */
2264 if (((prx_desc->rdes3 & EQOS_RDESC3_LT) ==
2266 ((prx_desc->rdes3 & EQOS_RDESC3_LT) ==
2268 vlan_tag = prx_desc->rdes0 & 0xffff;
2269 /* insert VLAN tag into skb */
2270 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2272 pdata->xstats.rx_vlan_pkt_n++;
2278 /* This api check for payload type and returns
2279 * 1 if payload load is TCP else returns 0;
2281 static int eqos_check_for_tcp_payload(struct s_rx_desc *rxdesc)
2286 if (rxdesc->rdes3 & EQOS_RDESC3_RS1V) {
2287 pt_type = rxdesc->rdes1 & EQOS_RDESC1_PT;
2288 if (pt_type == EQOS_RDESC1_PT_TCP)
2296 * \brief API to pass the Rx packets to stack if default mode
2299 * \details This function is invoked by main NAPI function in default
2300 * Rx mode. This function checks the
2301 * device descriptor for the packets and passes it to stack if any packtes
2302 * are received by device.
2304 * \param[in] pdata - pointer to private data structure.
2305 * \param[in] quota - maximum no. of packets that we are allowed to pass
2306 * to into the kernel.
2307 * \param[in] qinx - DMA channel/queue no. to be checked for packet.
2311 * \retval number of packets received.
2314 static int process_rx_completions(struct eqos_prv_data *pdata,
2315 int quota, UINT qinx)
2317 struct rx_ring *prx_ring =
2318 GET_RX_WRAPPER_DESC(qinx);
2319 struct net_device *dev = pdata->dev;
2320 struct desc_if_struct *desc_if = &pdata->desc_if;
2321 struct hw_if_struct *hw_if = &(pdata->hw_if);
2322 struct sk_buff *skb = NULL;
2324 struct rx_swcx_desc *prx_swcx_desc = NULL;
2325 struct s_rx_desc *prx_desc = NULL;
2327 UINT err_bits = EQOS_RDESC3_ES_BITS;
2328 u32 sw_cur_rx_desc_addr = 0;
2329 u32 hw_cur_rx_desc_addr = 0;
2333 pr_debug("-->%s(): qinx = %u, quota = %d\n", __func__, qinx, quota);
2335 hw_cur_rx_desc_addr = prx_ring->hw_last_rx_desc_addr;
2336 while (received < quota) {
2337 prx_swcx_desc = GET_RX_BUF_PTR(qinx, prx_ring->cur_rx);
2338 prx_desc = GET_RX_DESC_PTR(qinx, prx_ring->cur_rx);
2340 sw_cur_rx_desc_addr =
2341 GET_RX_DESC_DMA_ADDR(qinx, prx_ring->cur_rx);
2343 /* check for data availability */
2344 if (!(prx_desc->rdes3 & EQOS_RDESC3_OWN) &&
2345 prx_swcx_desc->skb) {
2346 if (hw_cur_rx_desc_addr == sw_cur_rx_desc_addr) {
2348 prx_ring->hw_last_rx_desc_addr);
2349 if (prx_ring->hw_last_rx_desc_addr ==
2350 hw_cur_rx_desc_addr)
2352 hw_cur_rx_desc_addr =
2353 prx_ring->hw_last_rx_desc_addr;
2355 #ifdef EQOS_ENABLE_RX_DESC_DUMP
2356 dump_rx_desc(qinx, prx_desc, prx_ring->cur_rx);
2358 /* assign it to new skb */
2359 skb = prx_swcx_desc->skb;
2360 prx_swcx_desc->skb = NULL;
2362 dma_unmap_single(&pdata->pdev->dev, prx_swcx_desc->dma,
2363 pdata->rx_buffer_len,
2365 prx_swcx_desc->dma = 0;
2367 /* get the packet length */
2368 pkt_len = (prx_desc->rdes3 & EQOS_RDESC3_PL);
2370 #ifdef EQOS_ENABLE_RX_PKT_DUMP
2371 print_pkt(skb, pkt_len, 0, (prx_ring->cur_rx));
2374 #ifdef ENABLE_CHANNEL_DATA_CHECK
2375 check_channel_data(skb, qinx, 1);
2378 /* check for bad/oversized packet,
2379 * error is valid only for last descriptor
2380 * (OWN + LD bit set).
2382 if (tegra_platform_is_unit_fpga())
2383 err_bits = EQOS_RDESC3_CRC | EQOS_RDESC3_OF;
2385 if (!(prx_desc->rdes3 & err_bits) &&
2386 (prx_desc->rdes3 & EQOS_RDESC3_LD)) {
2387 /* pkt_len = pkt_len - 4; *//* CRC stripping */
2389 /* code added for copybreak, this should improve
2390 * performance for small pkts with large amount
2391 * of reassembly being done in the stack
2393 if (pkt_len < EQOS_COPYBREAK_DEFAULT) {
2394 struct sk_buff *new_skb =
2395 netdev_alloc_skb_ip_align(dev,
2398 skb_copy_to_linear_data_offset
2399 (new_skb, -NET_IP_ALIGN,
2400 (skb->data - NET_IP_ALIGN),
2401 (pkt_len + NET_IP_ALIGN));
2402 /* recycle actual desc skb */
2403 prx_swcx_desc->skb = skb;
2406 /* just continue the old skb */
2409 skb_put(skb, pkt_len);
2411 eqos_config_rx_csum(pdata, skb, prx_desc);
2413 #ifdef EQOS_ENABLE_VLAN_TAG
2414 eqos_get_rx_vlan(pdata, skb, prx_desc);
2417 #ifdef YDEBUG_FILTER
2418 eqos_check_rx_filter_status(prx_desc);
2421 if (pdata->hw_feat.tsstssel &&
2422 pdata->hwts_rx_en &&
2424 rx_tstamp_available(prx_desc)) {
2425 /* get rx tstamp if available */
2426 ret = eqos_get_rx_hwtstamp(pdata, skb,
2430 /* device has not yet updated
2431 * the CONTEXT desc to hold the
2432 * time stamp, hence delay the
2435 prx_swcx_desc->skb = skb;
2436 prx_swcx_desc->dma =
2437 dma_map_single(&pdata->
2440 pdata->rx_buffer_len,
2443 if (dma_mapping_error
2445 prx_swcx_desc->dma))
2447 ("failed to do the RX dma map\n");
2448 goto rx_tstmp_failed;
2452 if (!(dev->features & NETIF_F_GRO) &&
2453 (dev->features & NETIF_F_LRO)) {
2455 eqos_check_for_tcp_payload
2459 dev->last_rx = jiffies;
2460 /* update the statistics */
2461 dev->stats.rx_packets++;
2462 dev->stats.rx_bytes += skb->len;
2463 eqos_receive_skb(pdata, dev, skb, qinx);
2466 dump_rx_desc(qinx, prx_desc,
2468 if (!(prx_desc->rdes3 & EQOS_RDESC3_LD))
2469 pr_debug("Received oversized pkt,"
2470 "spanned across multiple desc\n");
2473 prx_swcx_desc->skb = skb;
2474 dev->stats.rx_errors++;
2475 eqos_update_rx_errors(dev,
2479 prx_ring->dirty_rx++;
2480 if (prx_ring->dirty_rx >=
2481 prx_ring->skb_realloc_threshold)
2482 desc_if->realloc_skb(pdata, qinx);
2484 INCR_RX_DESC_INDEX(prx_ring->cur_rx, 1);
2486 /* no more data to read */
2493 if (prx_ring->dirty_rx)
2494 desc_if->realloc_skb(pdata, qinx);
2496 pr_debug("<--%s(): received = %d, qinx=%d\n", __func__, received, qinx);
2502 * \brief API to update the rx status.
2504 * \details This function is called in poll function to update the
2505 * status of received packets.
2507 * \param[in] dev - pointer to net_device structure.
2508 * \param[in] rx_status - value of received packet status.
2513 void eqos_update_rx_errors(struct net_device *dev, unsigned int rx_status)
2515 pr_debug("-->eqos_update_rx_errors\n");
2517 /* received pkt with crc error */
2518 if ((rx_status & 0x1000000))
2519 dev->stats.rx_crc_errors++;
2521 /* received frame alignment */
2522 if ((rx_status & 0x100000))
2523 dev->stats.rx_frame_errors++;
2525 /* receiver fifo overrun */
2526 if ((rx_status & 0x200000))
2527 dev->stats.rx_fifo_errors++;
2529 pr_debug("<--eqos_update_rx_errors\n");
2532 static int handle_txrx_completions(struct eqos_prv_data *pdata, int qinx)
2534 struct eqos_rx_queue *rx_queue;
2536 int budget = pdata->dt_cfg.chan_napi_quota[qinx];
2538 pr_debug("-->%s(): chan=%d\n", __func__, qinx);
2540 rx_queue = GET_RX_QUEUE_PTR(qinx);
2542 /* check for tx descriptor status */
2543 process_tx_completions(pdata->dev, pdata, qinx);
2544 rx_queue->lro_flush_needed = 0;
2546 received = pdata->process_rx_completions(pdata, budget, qinx);
2548 pdata->xstats.rx_pkt_n += received;
2549 pdata->xstats.q_rx_pkt_n[qinx] += received;
2551 if (rx_queue->lro_flush_needed)
2552 lro_flush_all(&rx_queue->lro_mgr);
2554 pr_debug("<--%s():\n", __func__);
2559 static void do_txrx_post_processing(struct eqos_prv_data *pdata,
2560 struct napi_struct *napi,
2561 int received, int budget)
2563 struct eqos_rx_queue *rx_queue;
2565 struct hw_if_struct *hw_if = &(pdata->hw_if);
2567 pr_debug("-->%s():\n", __func__);
2569 /* If we processed all pkts, we are done;
2570 * tell the kernel & re-enable interrupt
2572 if (received < budget) {
2573 rx_queue = container_of(napi, struct eqos_rx_queue, napi);
2574 qinx = rx_queue->chan_num;
2575 hw_if = &pdata->hw_if;
2576 if (pdata->dev->features & NETIF_F_GRO) {
2577 /* to turn off polling */
2578 napi_complete(napi);
2580 /* Enable RX interrupt */
2581 hw_if->enable_chan_interrupts(qinx, pdata);
2584 spin_lock(&pdata->lock);
2585 __napi_complete(napi);
2587 /* Enable RX interrupt */
2588 hw_if->enable_chan_interrupts(qinx, pdata);
2590 spin_unlock(&pdata->lock);
2593 pr_debug("<--%s():\n", __func__);
2597 int eqos_napi_mq(struct napi_struct *napi, int budget)
2599 struct eqos_rx_queue *rx_queue =
2600 container_of(napi, struct eqos_rx_queue, napi);
2601 struct eqos_prv_data *pdata = rx_queue->pdata;
2603 int qinx = rx_queue->chan_num;
2606 pr_debug("-->%s(): budget = %d\n", __func__, budget);
2608 pdata->xstats.napi_poll_n++;
2609 received = handle_txrx_completions(pdata, qinx);
2611 do_txrx_post_processing(pdata, napi, received,
2612 pdata->dt_cfg.chan_napi_quota[qinx]);
2614 pr_debug("<--%s()\n", __func__);
2620 * \brief API to return the device/interface status.
2622 * \details The get_stats function is called whenever an application needs to
2623 * get statistics for the interface. For example, this happend when ifconfig
2624 * or netstat -i is run.
2626 * \param[in] dev - pointer to net_device structure.
2628 * \return net_device_stats structure
2630 * \retval net_device_stats - returns pointer to net_device_stats structure.
2633 static struct net_device_stats *eqos_get_stats(struct net_device *dev)
2641 * \brief User defined parameter setting API
2643 * \details This function is invoked by kernel to update the device
2644 * configuration to new features. This function supports enabling and
2645 * disabling of TX and RX csum features.
2647 * \param[in] dev – pointer to net device structure.
2648 * \param[in] features – device feature to be enabled/disabled.
2655 static int eqos_set_features(struct net_device *dev, netdev_features_t features)
2657 struct eqos_prv_data *pdata = netdev_priv(dev);
2658 struct hw_if_struct *hw_if = &(pdata->hw_if);
2659 UINT dev_rxcsum_enable;
2660 #ifdef EQOS_ENABLE_VLAN_TAG
2661 UINT dev_rxvlan_enable, dev_txvlan_enable;
2663 pr_debug("-->eqos_set_features\n");
2665 if (pdata->hw_feat.rx_coe_sel) {
2666 dev_rxcsum_enable = !!(pdata->dev_state & NETIF_F_RXCSUM);
2668 if (((features & NETIF_F_RXCSUM) == NETIF_F_RXCSUM)
2669 && !dev_rxcsum_enable) {
2670 hw_if->enable_rx_csum();
2671 pdata->dev_state |= NETIF_F_RXCSUM;
2672 pr_err("State change - rxcsum enable\n");
2673 } else if (((features & NETIF_F_RXCSUM) == 0)
2674 && dev_rxcsum_enable) {
2675 hw_if->disable_rx_csum();
2676 pdata->dev_state &= ~NETIF_F_RXCSUM;
2677 pr_err("State change - rxcsum disable\n");
2680 #ifdef EQOS_ENABLE_VLAN_TAG
2681 dev_rxvlan_enable = !!(pdata->dev_state & NETIF_F_HW_VLAN_CTAG_RX);
2682 if (((features & NETIF_F_HW_VLAN_CTAG_RX) == NETIF_F_HW_VLAN_CTAG_RX)
2683 && !dev_rxvlan_enable) {
2684 pdata->dev_state |= NETIF_F_HW_VLAN_CTAG_RX;
2686 config_rx_outer_vlan_stripping(EQOS_RX_VLAN_STRIP_ALWAYS);
2687 pr_err("State change - rxvlan enable\n");
2688 } else if (((features & NETIF_F_HW_VLAN_CTAG_RX) == 0) &&
2689 dev_rxvlan_enable) {
2690 pdata->dev_state &= ~NETIF_F_HW_VLAN_CTAG_RX;
2691 hw_if->config_rx_outer_vlan_stripping(EQOS_RX_NO_VLAN_STRIP);
2692 pr_err("State change - rxvlan disable\n");
2695 dev_txvlan_enable = !!(pdata->dev_state & NETIF_F_HW_VLAN_CTAG_TX);
2696 if (((features & NETIF_F_HW_VLAN_CTAG_TX) == NETIF_F_HW_VLAN_CTAG_TX)
2697 && !dev_txvlan_enable) {
2698 pdata->dev_state |= NETIF_F_HW_VLAN_CTAG_TX;
2699 pr_err("State change - txvlan enable\n");
2700 } else if (((features & NETIF_F_HW_VLAN_CTAG_TX) == 0) &&
2701 dev_txvlan_enable) {
2702 pdata->dev_state &= ~NETIF_F_HW_VLAN_CTAG_TX;
2703 pr_err("State change - txvlan disable\n");
2705 #endif /* EQOS_ENABLE_VLAN_TAG */
2707 pr_debug("<--eqos_set_features\n");
2713 * \details This function is invoked by ioctl function when user issues
2714 * an ioctl command to enable/disable L3/L4 filtering.
2716 * \param[in] dev – pointer to net device structure.
2717 * \param[in] flags – flag to indicate whether L3/L4 filtering to be
2722 * \retval zero on success and -ve number on failure.
2724 static int eqos_config_l3_l4_filtering(struct net_device *dev,
2727 struct eqos_prv_data *pdata = netdev_priv(dev);
2728 struct hw_if_struct *hw_if = &(pdata->hw_if);
2731 DBGPR_FILTER("-->eqos_config_l3_l4_filtering\n");
2733 if (flags && pdata->l3_l4_filter) {
2734 pr_err("L3/L4 filtering is already enabled\n");
2738 if (!flags && !pdata->l3_l4_filter) {
2739 pr_err("L3/L4 filtering is already disabled\n");
2743 pdata->l3_l4_filter = !!flags;
2744 hw_if->config_l3_l4_filter_enable(pdata->l3_l4_filter);
2746 DBGPR_FILTER("Succesfully %s L3/L4 filtering\n",
2747 (flags ? "ENABLED" : "DISABLED"));
2749 DBGPR_FILTER("<--eqos_config_l3_l4_filtering\n");
2755 * \details This function is invoked by ioctl function when user issues an
2756 * ioctl command to configure L3(IPv4) filtering. This function does following,
2757 * - enable/disable IPv4 filtering.
2758 * - select source/destination address matching.
2759 * - select perfect/inverse matching.
2760 * - Update the IPv4 address into MAC register.
2762 * \param[in] dev – pointer to net device structure.
2763 * \param[in] req – pointer to IOCTL specific structure.
2767 * \retval zero on success and -ve number on failure.
2769 static int eqos_config_ip4_filters(struct net_device *dev,
2770 struct ifr_data_struct *req)
2772 struct eqos_prv_data *pdata = netdev_priv(dev);
2773 struct hw_if_struct *hw_if = &(pdata->hw_if);
2774 struct eqos_l3_l4_filter *u_l3_filter =
2775 (struct eqos_l3_l4_filter *)req->ptr;
2776 struct eqos_l3_l4_filter l_l3_filter;
2779 DBGPR_FILTER("-->eqos_config_ip4_filters\n");
2781 if (pdata->hw_feat.l3l4_filter_num == 0)
2782 return EQOS_NO_HW_SUPPORT;
2784 if (copy_from_user(&l_l3_filter, u_l3_filter,
2785 sizeof(struct eqos_l3_l4_filter)))
2788 if ((l_l3_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2789 pr_err("%d filter is not supported in the HW\n",
2790 l_l3_filter.filter_no);
2791 return EQOS_NO_HW_SUPPORT;
2794 if (!pdata->l3_l4_filter) {
2795 hw_if->config_l3_l4_filter_enable(1);
2796 pdata->l3_l4_filter = 1;
2799 /* configure the L3 filters */
2800 hw_if->config_l3_filters(l_l3_filter.filter_no,
2801 l_l3_filter.filter_enb_dis, 0,
2802 l_l3_filter.src_dst_addr_match,
2803 l_l3_filter.perfect_inverse_match);
2805 if (!l_l3_filter.src_dst_addr_match)
2806 hw_if->update_ip4_addr0(l_l3_filter.filter_no,
2807 l_l3_filter.ip4_addr);
2809 hw_if->update_ip4_addr1(l_l3_filter.filter_no,
2810 l_l3_filter.ip4_addr);
2813 ("Successfully %s IPv4 %s %s addressing filtering on %d filter\n",
2814 (l_l3_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2815 (l_l3_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2816 (l_l3_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2817 l_l3_filter.filter_no);
2819 DBGPR_FILTER("<--eqos_config_ip4_filters\n");
2825 * \details This function is invoked by ioctl function when user issues an
2826 * ioctl command to configure L3(IPv6) filtering. This function does following,
2827 * - enable/disable IPv6 filtering.
2828 * - select source/destination address matching.
2829 * - select perfect/inverse matching.
2830 * - Update the IPv6 address into MAC register.
2832 * \param[in] dev – pointer to net device structure.
2833 * \param[in] req – pointer to IOCTL specific structure.
2837 * \retval zero on success and -ve number on failure.
2839 static int eqos_config_ip6_filters(struct net_device *dev,
2840 struct ifr_data_struct *req)
2842 struct eqos_prv_data *pdata = netdev_priv(dev);
2843 struct hw_if_struct *hw_if = &(pdata->hw_if);
2844 struct eqos_l3_l4_filter *u_l3_filter =
2845 (struct eqos_l3_l4_filter *)req->ptr;
2846 struct eqos_l3_l4_filter l_l3_filter;
2849 DBGPR_FILTER("-->eqos_config_ip6_filters\n");
2851 if (pdata->hw_feat.l3l4_filter_num == 0)
2852 return EQOS_NO_HW_SUPPORT;
2854 if (copy_from_user(&l_l3_filter, u_l3_filter,
2855 sizeof(struct eqos_l3_l4_filter)))
2858 if ((l_l3_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2859 pr_err("%d filter is not supported in the HW\n",
2860 l_l3_filter.filter_no);
2861 return EQOS_NO_HW_SUPPORT;
2864 if (!pdata->l3_l4_filter) {
2865 hw_if->config_l3_l4_filter_enable(1);
2866 pdata->l3_l4_filter = 1;
2869 /* configure the L3 filters */
2870 hw_if->config_l3_filters(l_l3_filter.filter_no,
2871 l_l3_filter.filter_enb_dis, 1,
2872 l_l3_filter.src_dst_addr_match,
2873 l_l3_filter.perfect_inverse_match);
2875 hw_if->update_ip6_addr(l_l3_filter.filter_no, l_l3_filter.ip6_addr);
2878 ("Successfully %s IPv6 %s %s addressing filtering on %d filter\n",
2879 (l_l3_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2880 (l_l3_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2881 (l_l3_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2882 l_l3_filter.filter_no);
2884 DBGPR_FILTER("<--eqos_config_ip6_filters\n");
2890 * \details This function is invoked by ioctl function when user issues an
2891 * ioctl command to configure L4(TCP/UDP) filtering. This function does following,
2892 * - enable/disable L4 filtering.
2893 * - select TCP/UDP filtering.
2894 * - select source/destination port matching.
2895 * - select perfect/inverse matching.
2896 * - Update the port number into MAC register.
2898 * \param[in] dev – pointer to net device structure.
2899 * \param[in] req – pointer to IOCTL specific structure.
2900 * \param[in] tcp_udp – flag to indicate TCP/UDP filtering.
2904 * \retval zero on success and -ve number on failure.
2906 static int eqos_config_tcp_udp_filters(struct net_device *dev,
2907 struct ifr_data_struct *req, int tcp_udp)
2909 struct eqos_prv_data *pdata = netdev_priv(dev);
2910 struct hw_if_struct *hw_if = &(pdata->hw_if);
2911 struct eqos_l3_l4_filter *u_l4_filter =
2912 (struct eqos_l3_l4_filter *)req->ptr;
2913 struct eqos_l3_l4_filter l_l4_filter;
2916 DBGPR_FILTER("-->eqos_config_tcp_udp_filters\n");
2918 if (pdata->hw_feat.l3l4_filter_num == 0)
2919 return EQOS_NO_HW_SUPPORT;
2921 if (copy_from_user(&l_l4_filter, u_l4_filter,
2922 sizeof(struct eqos_l3_l4_filter)))
2925 if ((l_l4_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2926 pr_err("%d filter is not supported in the HW\n",
2927 l_l4_filter.filter_no);
2928 return EQOS_NO_HW_SUPPORT;
2931 if (!pdata->l3_l4_filter) {
2932 hw_if->config_l3_l4_filter_enable(1);
2933 pdata->l3_l4_filter = 1;
2936 /* configure the L4 filters */
2937 hw_if->config_l4_filters(l_l4_filter.filter_no,
2938 l_l4_filter.filter_enb_dis,
2940 l_l4_filter.src_dst_addr_match,
2941 l_l4_filter.perfect_inverse_match);
2943 if (l_l4_filter.src_dst_addr_match)
2944 hw_if->update_l4_da_port_no(l_l4_filter.filter_no,
2945 l_l4_filter.port_no);
2947 hw_if->update_l4_sa_port_no(l_l4_filter.filter_no,
2948 l_l4_filter.port_no);
2951 ("Successfully %s %s %s %s Port number filtering on %d filter\n",
2952 (l_l4_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2953 (tcp_udp ? "UDP" : "TCP"),
2954 (l_l4_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2955 (l_l4_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2956 l_l4_filter.filter_no);
2958 DBGPR_FILTER("<--eqos_config_tcp_udp_filters\n");
2964 * \details This function is invoked by ioctl function when user issues an
2965 * ioctl command to configure VALN filtering. This function does following,
2966 * - enable/disable VLAN filtering.
2967 * - select perfect/hash filtering.
2969 * \param[in] dev – pointer to net device structure.
2970 * \param[in] req – pointer to IOCTL specific structure.
2974 * \retval zero on success and -ve number on failure.
2976 static int eqos_config_vlan_filter(struct net_device *dev,
2977 struct ifr_data_struct *req)
2979 struct eqos_prv_data *pdata = netdev_priv(dev);
2980 struct hw_if_struct *hw_if = &(pdata->hw_if);
2981 struct eqos_vlan_filter *u_vlan_filter =
2982 (struct eqos_vlan_filter *)req->ptr;
2983 struct eqos_vlan_filter l_vlan_filter;
2986 DBGPR_FILTER("-->eqos_config_vlan_filter\n");
2988 if (copy_from_user(&l_vlan_filter, u_vlan_filter,
2989 sizeof(struct eqos_vlan_filter)))
2992 if ((l_vlan_filter.perfect_hash) && (pdata->hw_feat.vlan_hash_en == 0)) {
2993 pr_err("VLAN HASH filtering is not supported\n");
2994 return EQOS_NO_HW_SUPPORT;
2997 /* configure the vlan filter */
2998 hw_if->config_vlan_filtering(l_vlan_filter.filter_enb_dis,
2999 l_vlan_filter.perfect_hash,
3000 l_vlan_filter.perfect_inverse_match);
3001 pdata->vlan_hash_filtering = l_vlan_filter.perfect_hash;
3003 DBGPR_FILTER("Successfully %s VLAN %s filtering and %s matching\n",
3004 (l_vlan_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
3005 (l_vlan_filter.perfect_hash ? "HASH" : "PERFECT"),
3007 perfect_inverse_match ? "INVERSE" : "PERFECT"));
3009 DBGPR_FILTER("<--eqos_config_vlan_filter\n");
3015 * \details This function is invoked by ioctl function when user issues an
3016 * ioctl command to enable/disable ARP offloading feature.
3018 * \param[in] dev – pointer to net device structure.
3019 * \param[in] req – pointer to IOCTL specific structure.
3023 * \retval zero on success and -ve number on failure.
3025 static int eqos_config_arp_offload(struct net_device *dev,
3026 struct ifr_data_struct *req)
3028 struct eqos_prv_data *pdata = netdev_priv(dev);
3029 struct hw_if_struct *hw_if = &(pdata->hw_if);
3030 struct eqos_arp_offload *u_arp_offload =
3031 (struct eqos_arp_offload *)req->ptr;
3032 struct eqos_arp_offload l_arp_offload;
3035 pr_err("-->eqos_config_arp_offload\n");
3037 if (pdata->hw_feat.arp_offld_en == 0)
3038 return EQOS_NO_HW_SUPPORT;
3040 if (copy_from_user(&l_arp_offload, u_arp_offload,
3041 sizeof(struct eqos_arp_offload)))
3044 /* configure the L3 filters */
3045 hw_if->config_arp_offload(req->flags);
3046 hw_if->update_arp_offload_ip_addr(l_arp_offload.ip_addr);
3047 pdata->arp_offload = req->flags;
3049 pr_err("Successfully %s arp Offload\n",
3050 (req->flags ? "ENABLED" : "DISABLED"));
3052 pr_err("<--eqos_config_arp_offload\n");
3058 * \details This function is invoked by ioctl function when user issues an
3059 * ioctl command to configure L2 destination addressing filtering mode. This
3060 * function dose following,
3061 * - selects perfect/hash filtering.
3062 * - selects perfect/inverse matching.
3064 * \param[in] dev – pointer to net device structure.
3065 * \param[in] req – pointer to IOCTL specific structure.
3069 * \retval zero on success and -ve number on failure.
3071 static int eqos_confing_l2_da_filter(struct net_device *dev,
3072 struct ifr_data_struct *req)
3074 struct eqos_prv_data *pdata = netdev_priv(dev);
3075 struct hw_if_struct *hw_if = &(pdata->hw_if);
3076 struct eqos_l2_da_filter *u_l2_da_filter =
3077 (struct eqos_l2_da_filter *)req->ptr;
3078 struct eqos_l2_da_filter l_l2_da_filter;
3081 DBGPR_FILTER("-->eqos_confing_l2_da_filter\n");
3083 if (copy_from_user(&l_l2_da_filter, u_l2_da_filter,
3084 sizeof(struct eqos_l2_da_filter)))
3087 if (l_l2_da_filter.perfect_hash) {
3088 if (pdata->hw_feat.hash_tbl_sz > 0)
3089 pdata->l2_filtering_mode = 1;
3091 ret = EQOS_NO_HW_SUPPORT;
3093 if (pdata->max_addr_reg_cnt > 1)
3094 pdata->l2_filtering_mode = 0;
3096 ret = EQOS_NO_HW_SUPPORT;
3099 /* configure L2 DA perfect/inverse_matching */
3100 hw_if->config_l2_da_perfect_inverse_match(l_l2_da_filter.
3101 perfect_inverse_match);
3104 ("Successfully selected L2 %s filtering and %s DA matching\n",
3105 (l_l2_da_filter.perfect_hash ? "HASH" : "PERFECT"),
3106 (l_l2_da_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"));
3108 DBGPR_FILTER("<--eqos_confing_l2_da_filter\n");
3114 * \details This function is invoked by ioctl function when user issues
3115 * an ioctl command to enable/disable mac loopback mode.
3117 * \param[in] dev – pointer to net device structure.
3118 * \param[in] flags – flag to indicate whether mac loopback mode to be
3123 * \retval zero on success and -ve number on failure.
3125 int eqos_config_mac_loopback_mode(struct net_device *dev,
3128 struct eqos_prv_data *pdata = netdev_priv(dev);
3129 struct hw_if_struct *hw_if = &(pdata->hw_if);
3132 pr_debug("-->eqos_config_mac_loopback_mode\n");
3134 if (flags && pdata->mac_loopback_mode) {
3135 pr_err("MAC loopback mode is already enabled\n");
3138 if (!flags && !pdata->mac_loopback_mode) {
3139 pr_err("MAC loopback mode is already disabled\n");
3142 pdata->mac_loopback_mode = !!flags;
3143 hw_if->config_mac_loopback_mode(flags);
3145 pr_err("Succesfully %s MAC loopback mode\n",
3146 (flags ? "enabled" : "disabled"));
3148 pr_debug("<--eqos_config_mac_loopback_mode\n");
3153 static VOID eqos_config_timer_registers(struct eqos_prv_data *pdata)
3155 struct timespec now;
3156 struct hw_if_struct *hw_if = &(pdata->hw_if);
3159 pr_debug("-->eqos_config_timer_registers\n");
3161 /* program Sub Second Increment Reg */
3162 hw_if->config_sub_second_increment(EQOS_SYSCLOCK);
3165 * addend = 2^32/freq_div_ratio;
3167 * where, freq_div_ratio = EQOS_SYSCLOCK/50MHz
3169 * hence, addend = ((2^32) * 50MHz)/EQOS_SYSCLOCK;
3171 * NOTE: EQOS_SYSCLOCK should be >= 50MHz to
3172 * achive 20ns accuracy.
3174 * 2^x * y == (y << x), hence
3175 * 2^32 * 6250000 ==> (6250000 << 32)
3177 temp = (u64) (62500000ULL << 32);
3178 pdata->default_addend = div_u64(temp, 125000000);
3180 hw_if->config_addend(pdata->default_addend);
3182 /* initialize system time */
3183 getnstimeofday(&now);
3184 hw_if->init_systime(now.tv_sec, now.tv_nsec);
3186 pr_debug("-->eqos_config_timer_registers\n");
3190 * \details This function is invoked by ioctl function when user issues
3191 * an ioctl command to configure PTP offloading feature.
3193 * \param[in] pdata - pointer to private data structure.
3194 * \param[in] flags – Each bit in this variable carry some information related
3195 * double vlan processing.
3199 * \retval zero on success and -ve number on failure.
3201 static int eqos_config_ptpoffload(struct eqos_prv_data *pdata,
3202 struct eqos_config_ptpoffloading *u_conf_ptp)
3206 struct eqos_config_ptpoffloading l_conf_ptp;
3207 struct hw_if_struct *hw_if = &(pdata->hw_if);
3209 if (copy_from_user(&l_conf_ptp, u_conf_ptp,
3210 sizeof(struct eqos_config_ptpoffloading))) {
3211 pr_err("Failed to fetch Double vlan Struct info from user\n");
3212 return EQOS_CONFIG_FAIL;
3215 pr_err("-->eqos_config_ptpoffload - %d\n", l_conf_ptp.mode);
3217 pto_cntrl = MAC_PTOCR_PTOEN; /* enable ptp offloading */
3218 mac_tcr = MAC_TCR_TSENA | MAC_TCR_TSIPENA | MAC_TCR_TSVER2ENA
3219 | MAC_TCR_TSCFUPDT | MAC_TCR_TSCTRLSSR;
3220 if (l_conf_ptp.mode == EQOS_PTP_ORDINARY_SLAVE) {
3222 mac_tcr |= MAC_TCR_TSEVENTENA;
3223 pdata->ptp_offloading_mode = EQOS_PTP_ORDINARY_SLAVE;
3225 } else if (l_conf_ptp.mode == EQOS_PTP_TRASPARENT_SLAVE) {
3227 pto_cntrl |= MAC_PTOCR_APDREQEN;
3228 mac_tcr |= MAC_TCR_TSEVENTENA;
3229 mac_tcr |= MAC_TCR_SNAPTYPSEL_1;
3230 pdata->ptp_offloading_mode = EQOS_PTP_TRASPARENT_SLAVE;
3232 } else if (l_conf_ptp.mode == EQOS_PTP_ORDINARY_MASTER) {
3234 pto_cntrl |= MAC_PTOCR_ASYNCEN;
3235 mac_tcr |= MAC_TCR_TSEVENTENA;
3236 mac_tcr |= MAC_TCR_TSMASTERENA;
3237 pdata->ptp_offloading_mode = EQOS_PTP_ORDINARY_MASTER;
3239 } else if (l_conf_ptp.mode == EQOS_PTP_TRASPARENT_MASTER) {
3241 pto_cntrl |= MAC_PTOCR_ASYNCEN | MAC_PTOCR_APDREQEN;
3242 mac_tcr |= MAC_TCR_SNAPTYPSEL_1;
3243 mac_tcr |= MAC_TCR_TSEVENTENA;
3244 mac_tcr |= MAC_TCR_TSMASTERENA;
3245 pdata->ptp_offloading_mode = EQOS_PTP_TRASPARENT_MASTER;
3247 } else if (l_conf_ptp.mode == EQOS_PTP_PEER_TO_PEER_TRANSPARENT) {
3249 pto_cntrl |= MAC_PTOCR_APDREQEN;
3250 mac_tcr |= MAC_TCR_SNAPTYPSEL_3;
3251 pdata->ptp_offloading_mode = EQOS_PTP_PEER_TO_PEER_TRANSPARENT;
3254 pdata->ptp_offload = 1;
3255 if (l_conf_ptp.en_dis == EQOS_PTP_OFFLOADING_DISABLE) {
3258 pdata->ptp_offload = 0;
3261 pto_cntrl |= (l_conf_ptp.domain_num << 8);
3262 hw_if->config_hw_time_stamping(mac_tcr);
3263 eqos_config_timer_registers(pdata);
3264 hw_if->config_ptpoffload_engine(pto_cntrl, l_conf_ptp.mc_uc);
3266 pr_err("<--eqos_config_ptpoffload\n");
3272 * \details This function is invoked by ioctl function when user issues
3273 * an ioctl command to enable/disable pfc.
3275 * \param[in] dev – pointer to net device structure.
3276 * \param[in] flags – flag to indicate whether pfc to be enabled/disabled.
3280 * \retval zero on success and -ve number on failure.
3282 static int eqos_config_pfc(struct net_device *dev, unsigned int flags)
3284 struct eqos_prv_data *pdata = netdev_priv(dev);
3285 struct hw_if_struct *hw_if = &(pdata->hw_if);
3288 pr_debug("-->eqos_config_pfc\n");
3290 if (!pdata->hw_feat.dcb_en) {
3291 pr_err("PFC is not supported\n");
3292 return EQOS_NO_HW_SUPPORT;
3295 hw_if->config_pfc(flags);
3297 pr_err("Succesfully %s PFC(Priority Based Flow Control)\n",
3298 (flags ? "enabled" : "disabled"));
3300 pr_debug("<--eqos_config_pfc\n");
3305 static int eqos_handle_prv_ts_ioctl(struct eqos_prv_data *pdata,
3308 struct hw_if_struct *hw_if = &pdata->hw_if;
3309 struct ifr_data_timestamp_struct req;
3310 unsigned long flags;
3315 pr_debug("-->eqos_handle_prv_ts_ioctl\n");
3317 if (copy_from_user(&req, ifr->ifr_data, sizeof(req)))
3320 raw_spin_lock_irqsave(&eqos_ts_lock, flags);
3322 switch (req.clockid) {
3323 case CLOCK_REALTIME:
3324 ktime_get_real_ts(&req.kernel_ts);
3327 case CLOCK_MONOTONIC:
3328 ktime_get_ts(&req.kernel_ts);
3333 pr_err("Unsupported clockid\n");
3336 raw_spin_lock(&pdata->ptp_lock);
3338 ns = hw_if->get_systime();
3340 raw_spin_unlock(&pdata->ptp_lock);
3342 raw_spin_unlock_irqrestore(&eqos_ts_lock, flags);
3344 req.hw_ptp_ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
3345 req.hw_ptp_ts.tv_nsec = reminder;
3347 pr_debug("<--eqos_ptp_get_time: tv_sec = %ld, tv_nsec = %ld\n",
3348 req.hw_ptp_ts.tv_sec, req.hw_ptp_ts.tv_nsec);
3350 if (copy_to_user(ifr->ifr_data, &req, sizeof(req)))
3353 pr_debug("<--eqos_handle_prv_ts_ioctl\n");
3359 * \brief Driver IOCTL routine
3361 * \details This function is invoked by main ioctl function when
3362 * users request to configure various device features like,
3363 * PMT module, TX and RX PBL, TX and RX FIFO threshold level,
3364 * TX and RX OSF mode, SA insert/replacement, L2/L3/L4 and
3365 * VLAN filtering, AVB/DCB algorithm etc.
3367 * \param[in] pdata – pointer to private data structure.
3368 * \param[in] req – pointer to ioctl structure.
3372 * \retval 0 - success
3373 * \retval negative - failure
3376 static int eqos_handle_prv_ioctl(struct eqos_prv_data *pdata,
3377 struct ifr_data_struct *req)
3379 unsigned int qinx = req->qinx;
3380 struct tx_ring *ptx_ring =
3381 GET_TX_WRAPPER_DESC(qinx);
3382 struct rx_ring *prx_ring =
3383 GET_RX_WRAPPER_DESC(qinx);
3384 struct hw_if_struct *hw_if = &(pdata->hw_if);
3385 struct net_device *dev = pdata->dev;
3388 pr_debug("-->eqos_handle_prv_ioctl\n");
3390 if (qinx > EQOS_QUEUE_CNT) {
3391 pr_err("Queue number %d is invalid\n"
3392 "Hardware has only %d Tx/Rx Queues\n",
3393 qinx, EQOS_QUEUE_CNT);
3394 ret = EQOS_NO_HW_SUPPORT;
3399 case EQOS_POWERUP_MAGIC_CMD:
3400 if (pdata->hw_feat.mgk_sel) {
3401 ret = eqos_powerup(dev, EQOS_IOCTL_CONTEXT);
3403 ret = EQOS_CONFIG_SUCCESS;
3405 ret = EQOS_CONFIG_FAIL;
3407 ret = EQOS_NO_HW_SUPPORT;
3411 case EQOS_POWERDOWN_MAGIC_CMD:
3412 if (pdata->hw_feat.mgk_sel) {
3416 EQOS_IOCTL_CONTEXT);
3418 ret = EQOS_CONFIG_SUCCESS;
3420 ret = EQOS_CONFIG_FAIL;
3422 ret = EQOS_NO_HW_SUPPORT;
3426 case EQOS_POWERUP_REMOTE_WAKEUP_CMD:
3427 if (pdata->hw_feat.rwk_sel) {
3428 ret = eqos_powerup(dev, EQOS_IOCTL_CONTEXT);
3430 ret = EQOS_CONFIG_SUCCESS;
3432 ret = EQOS_CONFIG_FAIL;
3434 ret = EQOS_NO_HW_SUPPORT;
3438 case EQOS_POWERDOWN_REMOTE_WAKEUP_CMD:
3439 if (pdata->hw_feat.rwk_sel) {
3440 ret = eqos_configure_remotewakeup(dev, req);
3442 ret = EQOS_CONFIG_SUCCESS;
3444 ret = EQOS_CONFIG_FAIL;
3446 ret = EQOS_NO_HW_SUPPORT;
3450 case EQOS_RX_THRESHOLD_CMD:
3451 prx_ring->rx_threshold_val = req->flags;
3452 hw_if->config_rx_threshold(qinx,
3453 prx_ring->rx_threshold_val);
3454 pr_err("Configured Rx threshold with %d\n",
3455 prx_ring->rx_threshold_val);
3458 case EQOS_TX_THRESHOLD_CMD:
3459 ptx_ring->tx_threshold_val = req->flags;
3460 hw_if->config_tx_threshold(qinx,
3461 ptx_ring->tx_threshold_val);
3462 pr_err("Configured Tx threshold with %d\n",
3463 ptx_ring->tx_threshold_val);
3467 prx_ring->rsf_on = req->flags;
3468 hw_if->config_rsf_mode(qinx, prx_ring->rsf_on);
3469 pr_err("Receive store and forward mode %s\n",
3470 (prx_ring->rsf_on) ? "enabled" : "disabled");
3474 ptx_ring->tsf_on = req->flags;
3475 hw_if->config_tsf_mode(qinx, ptx_ring->tsf_on);
3476 pr_err("Transmit store and forward mode %s\n",
3477 (ptx_ring->tsf_on) ? "enabled" : "disabled");
3481 ptx_ring->osf_on = req->flags;
3482 hw_if->config_osf_mode(qinx, ptx_ring->osf_on);
3483 pr_err("Transmit DMA OSF mode is %s\n",
3484 (ptx_ring->osf_on) ? "enabled" : "disabled");
3487 case EQOS_INCR_INCRX_CMD:
3488 pdata->incr_incrx = req->flags;
3489 hw_if->config_incr_incrx_mode(pdata->incr_incrx);
3490 pr_err("%s mode is enabled\n",
3491 (pdata->incr_incrx) ? "INCRX" : "INCR");
3494 case EQOS_RX_PBL_CMD:
3495 prx_ring->rx_pbl = req->flags;
3496 eqos_config_rx_pbl(pdata, prx_ring->rx_pbl, qinx);
3499 case EQOS_TX_PBL_CMD:
3500 ptx_ring->tx_pbl = req->flags;
3501 eqos_config_tx_pbl(pdata, ptx_ring->tx_pbl, qinx);
3504 case EQOS_PTPOFFLOADING_CMD:
3505 if (pdata->hw_feat.tsstssel) {
3506 ret = eqos_config_ptpoffload(pdata, req->ptr);
3508 pr_err("No HW support for PTP\n");
3509 ret = EQOS_NO_HW_SUPPORT;
3513 case EQOS_SA0_DESC_CMD:
3514 if (pdata->hw_feat.sa_vlan_ins) {
3515 pdata->tx_sa_ctrl_via_desc = req->flags;
3516 pdata->tx_sa_ctrl_via_reg = EQOS_SA0_NONE;
3517 if (req->flags == EQOS_SA0_NONE) {
3518 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3521 memcpy(pdata->mac_addr, mac_addr0,
3524 hw_if->configure_mac_addr0_reg(pdata->mac_addr);
3525 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3527 ("SA will use MAC0 with descriptor for configuration %d\n",
3528 pdata->tx_sa_ctrl_via_desc);
3531 ("Device doesn't supports SA Insertion/Replacement\n");
3532 ret = EQOS_NO_HW_SUPPORT;
3536 case EQOS_SA1_DESC_CMD:
3537 if (pdata->hw_feat.sa_vlan_ins) {
3538 pdata->tx_sa_ctrl_via_desc = req->flags;
3539 pdata->tx_sa_ctrl_via_reg = EQOS_SA1_NONE;
3540 if (req->flags == EQOS_SA1_NONE) {
3541 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3544 memcpy(pdata->mac_addr, mac_addr1,
3547 hw_if->configure_mac_addr1_reg(pdata->mac_addr);
3548 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3550 ("SA will use MAC1 with descriptor for configuration %d\n",
3551 pdata->tx_sa_ctrl_via_desc);
3554 ("Device doesn't supports SA Insertion/Replacement\n");
3555 ret = EQOS_NO_HW_SUPPORT;
3559 case EQOS_SA0_REG_CMD:
3560 if (pdata->hw_feat.sa_vlan_ins) {
3561 pdata->tx_sa_ctrl_via_reg = req->flags;
3562 pdata->tx_sa_ctrl_via_desc = EQOS_SA0_NONE;
3563 if (req->flags == EQOS_SA0_NONE) {
3564 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3567 memcpy(pdata->mac_addr, mac_addr0,
3570 hw_if->configure_mac_addr0_reg(pdata->mac_addr);
3571 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3573 ("SA will use MAC0 with register for configuration %d\n",
3574 pdata->tx_sa_ctrl_via_desc);
3577 ("Device doesn't supports SA Insertion/Replacement\n");
3578 ret = EQOS_NO_HW_SUPPORT;
3582 case EQOS_SA1_REG_CMD:
3583 if (pdata->hw_feat.sa_vlan_ins) {
3584 pdata->tx_sa_ctrl_via_reg = req->flags;
3585 pdata->tx_sa_ctrl_via_desc = EQOS_SA1_NONE;
3586 if (req->flags == EQOS_SA1_NONE) {
3587 memcpy(pdata->mac_addr, pdata->dev->dev_addr,
3590 memcpy(pdata->mac_addr, mac_addr1,
3593 hw_if->configure_mac_addr1_reg(pdata->mac_addr);
3594 hw_if->configure_sa_via_reg(pdata->tx_sa_ctrl_via_reg);
3596 ("SA will use MAC1 with register for configuration %d\n",
3597 pdata->tx_sa_ctrl_via_desc);
3600 ("Device doesn't supports SA Insertion/Replacement\n");
3601 ret = EQOS_NO_HW_SUPPORT;
3605 case EQOS_SETUP_CONTEXT_DESCRIPTOR:
3606 if (pdata->hw_feat.sa_vlan_ins) {
3607 ptx_ring->context_setup = req->context_setup;
3608 if (ptx_ring->context_setup == 1) {
3609 pr_err("Context descriptor will be transmitted"
3610 " with every normal descriptor on %d DMA Channel\n",
3613 pr_err("Context descriptor will be setup"
3614 " only if VLAN id changes %d\n", qinx);
3617 pr_err("Device doesn't support VLAN operations\n");
3618 ret = EQOS_NO_HW_SUPPORT;
3622 case EQOS_GET_RX_QCNT:
3623 req->qinx = EQOS_RX_QUEUE_CNT;
3626 case EQOS_GET_TX_QCNT:
3627 req->qinx = EQOS_TX_QUEUE_CNT;
3630 case EQOS_GET_CONNECTED_SPEED:
3631 req->connected_speed = pdata->speed;
3634 case EQOS_DCB_ALGORITHM:
3635 eqos_program_dcb_algorithm(pdata, req);
3638 case EQOS_AVB_ALGORITHM:
3639 eqos_program_avb_algorithm(pdata, req);
3642 case EQOS_L3_L4_FILTER_CMD:
3643 if (pdata->hw_feat.l3l4_filter_num > 0) {
3644 ret = eqos_config_l3_l4_filtering(dev, req->flags);
3646 ret = EQOS_CONFIG_SUCCESS;
3648 ret = EQOS_CONFIG_FAIL;
3650 ret = EQOS_NO_HW_SUPPORT;
3653 case EQOS_IPV4_FILTERING_CMD:
3654 ret = eqos_config_ip4_filters(dev, req);
3656 case EQOS_IPV6_FILTERING_CMD:
3657 ret = eqos_config_ip6_filters(dev, req);
3659 case EQOS_UDP_FILTERING_CMD:
3660 ret = eqos_config_tcp_udp_filters(dev, req, 1);
3662 case EQOS_TCP_FILTERING_CMD:
3663 ret = eqos_config_tcp_udp_filters(dev, req, 0);
3665 case EQOS_VLAN_FILTERING_CMD:
3666 ret = eqos_config_vlan_filter(dev, req);
3668 case EQOS_L2_DA_FILTERING_CMD:
3669 ret = eqos_confing_l2_da_filter(dev, req);
3671 case EQOS_ARP_OFFLOAD_CMD:
3672 ret = eqos_config_arp_offload(dev, req);
3674 case EQOS_AXI_PBL_CMD:
3675 pdata->axi_pbl = req->flags;
3676 hw_if->config_axi_pbl_val(pdata->axi_pbl);
3677 pr_err("AXI PBL value: %d\n", pdata->axi_pbl);
3679 case EQOS_AXI_WORL_CMD:
3680 pdata->axi_worl = req->flags;
3681 hw_if->config_axi_worl_val(pdata->axi_worl);
3682 pr_err("AXI WORL value: %d\n", pdata->axi_worl);
3684 case EQOS_AXI_RORL_CMD:
3685 pdata->axi_rorl = req->flags;
3686 hw_if->config_axi_rorl_val(pdata->axi_rorl);
3687 pr_err("AXI RORL value: %d\n", pdata->axi_rorl);
3689 case EQOS_MAC_LOOPBACK_MODE_CMD:
3690 ret = eqos_config_mac_loopback_mode(dev, req->flags);
3692 ret = EQOS_CONFIG_SUCCESS;
3694 ret = EQOS_CONFIG_FAIL;
3697 ret = eqos_config_pfc(dev, req->flags);
3699 case EQOS_PHY_LOOPBACK:
3700 ret = eqos_handle_phy_loopback(pdata, (void *)req);
3702 case EQOS_MEM_ISO_TEST:
3703 ret = eqos_handle_mem_iso_ioctl(pdata, (void *)req);
3705 case EQOS_CSR_ISO_TEST:
3706 ret = eqos_handle_csr_iso_ioctl(pdata, (void *)req);
3710 pr_err("Unsupported command call\n");
3713 pr_debug("<--eqos_handle_prv_ioctl\n");
3719 * \brief control hw timestamping.
3721 * \details This function is used to configure the MAC to enable/disable both
3722 * outgoing(Tx) and incoming(Rx) packets time stamping based on user input.
3724 * \param[in] pdata – pointer to private data structure.
3725 * \param[in] ifr – pointer to IOCTL specific structure.
3729 * \retval 0 - success
3730 * \retval negative - failure
3733 static int eqos_handle_hwtstamp_ioctl(struct eqos_prv_data *pdata,
3736 struct hw_if_struct *hw_if = &(pdata->hw_if);
3737 struct hwtstamp_config config;
3740 u32 ptp_over_ipv4_udp = 0;
3741 u32 ptp_over_ipv6_udp = 0;
3742 u32 ptp_over_ethernet = 0;
3743 u32 snap_type_sel = 0;
3744 u32 ts_master_en = 0;
3745 u32 ts_event_en = 0;
3746 u32 av_8021asm_en = 0;
3749 struct timespec now;
3751 DBGPR_PTP("-->eqos_handle_hwtstamp_ioctl\n");
3753 if (!pdata->hw_feat.tsstssel) {
3754 pr_err("No hw timestamping is available in this core\n");
3758 if (copy_from_user(&config, ifr->ifr_data,
3759 sizeof(struct hwtstamp_config)))
3762 DBGPR_PTP("config.flags = %#x, tx_type = %#x, rx_filter = %#x\n",
3763 config.flags, config.tx_type, config.rx_filter);
3765 /* reserved for future extensions */
3769 switch (config.tx_type) {
3770 case HWTSTAMP_TX_OFF:
3771 pdata->hwts_tx_en = 0;
3773 case HWTSTAMP_TX_ON:
3774 pdata->hwts_tx_en = 1;
3780 switch (config.rx_filter) {
3781 /* time stamp no incoming packet at all */
3782 case HWTSTAMP_FILTER_NONE:
3783 config.rx_filter = HWTSTAMP_FILTER_NONE;
3786 /* PTP v1, UDP, any kind of event packet */
3787 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3788 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
3789 /* take time stamp for all event messages */
3790 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3792 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3793 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3796 /* PTP v1, UDP, Sync packet */
3797 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3798 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
3799 /* take time stamp for SYNC messages only */
3800 ts_event_en = MAC_TCR_TSEVENTENA;
3802 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3803 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3806 /* PTP v1, UDP, Delay_req packet */
3807 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3808 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
3809 /* take time stamp for Delay_Req messages only */
3810 ts_master_en = MAC_TCR_TSMASTERENA;
3811 ts_event_en = MAC_TCR_TSEVENTENA;
3813 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3814 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3817 /* PTP v2, UDP, any kind of event packet */
3818 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3819 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
3820 ptp_v2 = MAC_TCR_TSVER2ENA;
3821 /* take time stamp for all event messages */
3822 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3824 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3825 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3828 /* PTP v2, UDP, Sync packet */
3829 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3830 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
3831 ptp_v2 = MAC_TCR_TSVER2ENA;
3832 /* take time stamp for SYNC messages only */
3833 ts_event_en = MAC_TCR_TSEVENTENA;
3835 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3836 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3839 /* PTP v2, UDP, Delay_req packet */
3840 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3841 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
3842 ptp_v2 = MAC_TCR_TSVER2ENA;
3843 /* take time stamp for Delay_Req messages only */
3844 ts_master_en = MAC_TCR_TSMASTERENA;
3845 ts_event_en = MAC_TCR_TSEVENTENA;
3847 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3848 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3851 /* PTP v2/802.AS1, any layer, any kind of event packet */
3852 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3853 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3854 ptp_v2 = MAC_TCR_TSVER2ENA;
3855 /* take time stamp for all event messages */
3856 snap_type_sel = MAC_TCR_SNAPTYPSEL_1;
3858 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3859 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3860 ptp_over_ethernet = MAC_TCR_TSIPENA;
3861 /* for VLAN tagged PTP, AV8021ASMEN bit should not be set */
3862 #ifdef DWC_1588_VLAN_UNTAGGED
3863 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3867 /* PTP v2/802.AS1, any layer, Sync packet */
3868 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3869 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
3870 ptp_v2 = MAC_TCR_TSVER2ENA;
3871 /* take time stamp for SYNC messages only */
3872 ts_event_en = MAC_TCR_TSEVENTENA;
3874 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3875 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3876 ptp_over_ethernet = MAC_TCR_TSIPENA;
3877 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3880 /* PTP v2/802.AS1, any layer, Delay_req packet */
3881 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3882 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
3883 ptp_v2 = MAC_TCR_TSVER2ENA;
3884 /* take time stamp for Delay_Req messages only */
3885 ts_master_en = MAC_TCR_TSMASTERENA;
3886 ts_event_en = MAC_TCR_TSEVENTENA;
3888 ptp_over_ipv4_udp = MAC_TCR_TSIPV4ENA;
3889 ptp_over_ipv6_udp = MAC_TCR_TSIPV6ENA;
3890 ptp_over_ethernet = MAC_TCR_TSIPENA;
3891 av_8021asm_en = MAC_TCR_AV8021ASMEN;
3894 /* time stamp any incoming packet */
3895 case HWTSTAMP_FILTER_ALL:
3896 config.rx_filter = HWTSTAMP_FILTER_ALL;
3897 tstamp_all = MAC_TCR_TSENALL;
3904 ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
3906 if (!pdata->hwts_tx_en && !pdata->hwts_rx_en) {
3907 /* disable hw time stamping */
3908 hw_if->config_hw_time_stamping(mac_tcr);
3911 (MAC_TCR_TSENA | MAC_TCR_TSCFUPDT | MAC_TCR_TSCTRLSSR |
3912 tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp
3913 | ptp_over_ipv4_udp | ts_event_en | ts_master_en |
3914 snap_type_sel | av_8021asm_en);
3916 if (!pdata->one_nsec_accuracy)
3917 mac_tcr &= ~MAC_TCR_TSCTRLSSR;
3919 hw_if->config_hw_time_stamping(mac_tcr);
3921 /* program Sub Second Increment Reg */
3922 hw_if->config_sub_second_increment(EQOS_SYSCLOCK);
3925 * addend = 2^32/freq_div_ratio;
3927 * where, freq_div_ratio = EQOS_SYSCLOCK/50MHz
3929 * hence, addend = ((2^32) * 50MHz)/EQOS_SYSCLOCK;
3931 * NOTE: EQOS_SYSCLOCK should be >= 50MHz to
3932 * achive 20ns accuracy.
3934 * 2^x * y == (y << x), hence
3935 * 2^32 * 6250000 ==> (6250000 << 32)
3937 temp = (u64) (62500000ULL << 32);
3938 pdata->default_addend = div_u64(temp, 125000000);
3940 hw_if->config_addend(pdata->default_addend);
3942 /* initialize system time */
3943 getnstimeofday(&now);
3944 hw_if->init_systime(now.tv_sec, now.tv_nsec);
3946 DBGPR_PTP("-->eqos registering get_ptp function\n");
3947 /* Register broadcasting MAC timestamp to clients */
3948 tegra_register_hwtime_source(hw_if->get_systime);
3951 DBGPR_PTP("config.flags = %#x, tx_type = %#x, rx_filter = %#x\n",
3952 config.flags, config.tx_type, config.rx_filter);
3954 DBGPR_PTP("<--eqos_handle_hwtstamp_ioctl\n");
3956 return (copy_to_user(ifr->ifr_data, &config,
3957 sizeof(struct hwtstamp_config))) ? -EFAULT : 0;
3961 * \brief Driver IOCTL routine
3963 * \details This function is invoked by kernel when a user request an ioctl
3964 * which can't be handled by the generic interface code. Following operations
3965 * are performed in this functions.
3966 * - Configuring the PMT module.
3967 * - Configuring TX and RX PBL.
3968 * - Configuring the TX and RX FIFO threshold level.
3969 * - Configuring the TX and RX OSF mode.
3971 * \param[in] dev – pointer to net device structure.
3972 * \param[in] ifr – pointer to IOCTL specific structure.
3973 * \param[in] cmd – IOCTL command.
3977 * \retval 0 - success
3978 * \retval negative - failure
3981 static int eqos_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3983 struct eqos_prv_data *pdata = netdev_priv(dev);
3984 struct ifr_data_struct *req = ifr->ifr_ifru.ifru_data;
3985 struct mii_ioctl_data *data = if_mii(ifr);
3986 unsigned int reg_val = 0;
3989 pr_debug("-->eqos_ioctl\n");
3991 if ((!netif_running(dev)) || (!pdata->phydev)) {
3992 pr_debug("<--eqos_ioctl - error\n");
3996 spin_lock(&pdata->lock);
3999 data->phy_id = pdata->phyaddr;
4000 pr_err("PHY ID: SIOCGMIIPHY\n");
4005 eqos_mdio_read_direct(pdata, pdata->phyaddr,
4006 (data->reg_num & 0x1F), ®_val);
4010 data->val_out = reg_val;
4011 pr_err("PHY ID: SIOCGMIIREG reg:%#x reg_val:%#x\n",
4012 (data->reg_num & 0x1F), reg_val);
4016 pr_err("PHY ID: SIOCSMIIPHY\n");
4019 case EQOS_PRV_IOCTL:
4020 ret = eqos_handle_prv_ioctl(pdata, req);
4021 req->command_error = ret;
4024 case EQOS_PRV_TS_IOCTL:
4025 ret = eqos_handle_prv_ts_ioctl(pdata, ifr);
4029 ret = eqos_handle_hwtstamp_ioctl(pdata, ifr);
4034 pr_err("Unsupported IOCTL call\n");
4036 spin_unlock(&pdata->lock);
4038 pr_debug("<--eqos_ioctl\n");
4044 * \brief API to change MTU.
4046 * \details This function is invoked by upper layer when user changes
4047 * MTU (Maximum Transfer Unit). The MTU is used by the Network layer
4048 * to driver packet transmission. Ethernet has a default MTU of
4049 * 1500Bytes. This value can be changed with ifconfig -
4050 * ifconfig <interface_name> mtu <new_mtu_value>
4052 * \param[in] dev - pointer to net_device structure
4053 * \param[in] new_mtu - the new MTU for the device.
4057 * \retval 0 - on success and -ve on failure.
4060 static INT eqos_change_mtu(struct net_device *dev, INT new_mtu)
4062 struct eqos_prv_data *pdata = netdev_priv(dev);
4063 struct platform_device *pdev = pdata->pdev;
4064 int max_frame = (new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
4066 pr_debug("-->eqos_change_mtu: new_mtu:%d\n", new_mtu);
4068 #ifdef EQOS_CONFIG_PGTEST
4069 dev_err(&pdev->dev, "jumbo frames not supported with PG test\n");
4072 if (pdata->dt_cfg.use_multi_q) {
4074 "mtu cannot be modified in multi queue mode\n");
4077 if ((new_mtu != 1500) && (new_mtu != 4074) && (new_mtu != 9000)) {
4078 dev_err(&pdev->dev, "valid mtus are 1500, 4074, or 9000\n");
4081 if (max_frame > (pdata->dt_cfg.phy_max_frame_size)) {
4082 dev_err(&pdev->dev, "mtu exceeds phy max frame size of %d",
4083 pdata->dt_cfg.phy_max_frame_size);
4086 if (dev->mtu == new_mtu) {
4087 dev_err(&pdev->dev, "already configured to mtu %d\n", new_mtu);
4091 dev_info(&pdev->dev, "changing MTU from %d to %d\n", dev->mtu, new_mtu);
4093 mutex_lock(&pdata->hw_change_lock);
4094 if (!pdata->hw_stopped)
4095 eqos_stop_dev(pdata);
4097 if (max_frame <= 2048) {
4098 pdata->rx_buffer_len = 2048;
4100 pdata->rx_buffer_len = ALIGN_SIZE(max_frame);
4102 pdata->rx_max_frame_size = max_frame;
4106 if (!pdata->hw_stopped)
4107 eqos_start_dev(pdata);
4109 mutex_unlock(&pdata->hw_change_lock);
4111 pr_debug("<--eqos_change_mtu\n");
4116 #ifdef EQOS_QUEUE_SELECT_ALGO
4117 u16 eqos_select_queue(struct net_device *dev,
4118 struct sk_buff *skb, void *accel_priv,
4119 select_queue_fallback_t fallback)
4121 int txqueue_select = -1;
4122 struct eqos_prv_data *pdata = netdev_priv(dev);
4123 struct eqos_cfg *pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg;
4126 pr_debug("-->eqos_select_queue\n");
4128 for (i = 0; i <= EQOS_TX_QUEUE_CNT; i++) {
4129 if (pdt_cfg->q_prio[i] == skb->priority) {
4135 if (txqueue_select < 0)
4138 pr_debug("<--eqos_select_queue txqueue-select:%d\n", txqueue_select);
4140 return txqueue_select;
4144 unsigned int crc32_snps_le(unsigned int initval, unsigned char *data,
4147 unsigned int crc = initval;
4148 unsigned int poly = 0x04c11db7;
4149 unsigned int temp = 0;
4150 unsigned char my_data = 0;
4152 for (bit_count = 0; bit_count < size; bit_count++) {
4153 if ((bit_count % 8) == 0)
4154 my_data = data[bit_count / 8];
4155 DBGPR_FILTER("%s my_data = %x crc=%x\n", __func__, my_data,
4157 temp = ((crc >> 31) ^ my_data) & 0x1;
4163 DBGPR_FILTER("%s my_data = %x crc=%x\n", __func__, my_data, crc);
4168 * \brief API to delete vid to HW filter.
4170 * \details This function is invoked by upper layer when a VLAN id is removed.
4171 * This function deletes the VLAN id from the HW filter.
4172 * vlan id can be removed with vconfig -
4173 * vconfig rem <interface_name > <vlan_id>
4175 * \param[in] dev - pointer to net_device structure
4176 * \param[in] vid - vlan id to be removed.
4180 static int eqos_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4182 struct eqos_prv_data *pdata = netdev_priv(dev);
4183 struct hw_if_struct *hw_if = &(pdata->hw_if);
4184 unsigned short new_index, old_index;
4186 unsigned int enb_12bit_vhash;
4188 pr_err("-->eqos_vlan_rx_kill_vid: vid = %d\n", vid);
4190 if (pdata->vlan_hash_filtering) {
4192 (bitrev32(~crc32_le(~0, (unsigned char *)&vid, 2)) >> 28);
4194 enb_12bit_vhash = hw_if->get_vlan_tag_comparison();
4195 if (enb_12bit_vhash) {
4196 /* neget 4-bit crc value for 12-bit VLAN hash comparison */
4197 new_index = (1 << (~crc32_val & 0xF));
4199 new_index = (1 << (crc32_val & 0xF));
4202 old_index = hw_if->get_vlan_hash_table_reg();
4203 old_index &= ~new_index;
4204 hw_if->update_vlan_hash_table_reg(old_index);
4205 pdata->vlan_ht_or_id = old_index;
4207 /* By default, receive only VLAN pkt with VID = 1
4208 * becasue writting 0 will pass all VLAN pkt */
4209 hw_if->update_vlan_id(1);
4210 pdata->vlan_ht_or_id = 1;
4213 pr_err("<--eqos_vlan_rx_kill_vid\n");
4215 /* FIXME: Check if any errors need to be returned in case of failure */
4219 static int eqos_set_mac_address(struct net_device *dev, void *p)
4221 if (is_valid_ether_addr(dev->dev_addr))
4224 return eth_mac_addr(dev, p);
4228 * \brief API to add vid to HW filter.
4230 * \details This function is invoked by upper layer when a new VALN id is
4231 * registered. This function updates the HW filter with new VLAN id.
4232 * New vlan id can be added with vconfig -
4233 * vconfig add <interface_name > <vlan_id>
4235 * \param[in] dev - pointer to net_device structure
4236 * \param[in] vid - new vlan id.
4240 static int eqos_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4242 struct eqos_prv_data *pdata = netdev_priv(dev);
4243 struct hw_if_struct *hw_if = &(pdata->hw_if);
4244 unsigned short new_index, old_index;
4246 unsigned int enb_12bit_vhash;
4248 pr_err("-->eqos_vlan_rx_add_vid: vid = %d\n", vid);
4250 if (pdata->vlan_hash_filtering) {
4251 /* The upper 4 bits of the calculated CRC are used to
4252 * index the content of the VLAN Hash Table Reg.
4255 (bitrev32(~crc32_le(~0, (unsigned char *)&vid, 2)) >> 28);
4257 /* These 4(0xF) bits determines the bit within the
4258 * VLAN Hash Table Reg 0
4260 enb_12bit_vhash = hw_if->get_vlan_tag_comparison();
4261 if (enb_12bit_vhash) {
4262 /* neget 4-bit crc value for 12-bit VLAN hash comparison */
4263 new_index = (1 << (~crc32_val & 0xF));
4265 new_index = (1 << (crc32_val & 0xF));
4268 old_index = hw_if->get_vlan_hash_table_reg();
4269 old_index |= new_index;
4270 hw_if->update_vlan_hash_table_reg(old_index);
4271 pdata->vlan_ht_or_id = old_index;
4273 hw_if->update_vlan_id(vid);
4274 pdata->vlan_ht_or_id = vid;
4277 pr_err("<--eqos_vlan_rx_add_vid\n");
4279 /* FIXME: Check if any errors need to be returned in case of failure */
4284 * \brief API called to put device in powerdown mode
4286 * \details This function is invoked by ioctl function when the user issues an
4287 * ioctl command to move the device to power down state. Following operations
4288 * are performed in this function.
4292 * - Stop DMA TX and RX process.
4293 * - Enable power down mode using PMT module.
4295 * \param[in] dev – pointer to net device structure.
4296 * \param[in] wakeup_type – remote wake-on-lan or magic packet.
4297 * \param[in] caller – netif_detach gets called conditionally based
4298 * on caller, IOCTL or DRIVER-suspend
4302 * \retval zero on success and -ve number on failure.
4305 INT eqos_powerdown(struct net_device *dev, UINT wakeup_type, UINT caller)
4307 struct eqos_prv_data *pdata = netdev_priv(dev);
4308 struct hw_if_struct *hw_if = &(pdata->hw_if);
4310 pr_debug("-->eqos_powerdown\n");
4312 if (!dev || !netif_running(dev) ||
4313 (caller == EQOS_IOCTL_CONTEXT && pdata->power_down)) {
4315 ("Device is already powered down and will powerup for %s\n",
4316 EQOS_POWER_DOWN_TYPE(pdata));
4317 pr_debug("<--eqos_powerdown\n");
4322 phy_stop(pdata->phydev);
4324 spin_lock(&pdata->pmt_lock);
4326 if (caller == EQOS_DRIVER_CONTEXT)
4327 netif_device_detach(dev);
4329 netif_tx_disable(dev);
4330 eqos_all_ch_napi_disable(pdata);
4332 /* stop DMA TX/RX */
4333 eqos_stop_all_ch_tx_dma(pdata);
4334 eqos_stop_all_ch_rx_dma(pdata);
4336 /* enable power down mode by programming the PMT regs */
4337 if (wakeup_type & EQOS_REMOTE_WAKEUP)
4338 hw_if->enable_remote_pmt();
4339 if (wakeup_type & EQOS_MAGIC_WAKEUP)
4340 hw_if->enable_magic_pmt();
4341 pdata->power_down_type = wakeup_type;
4343 if (caller == EQOS_IOCTL_CONTEXT)
4344 pdata->power_down = 1;
4346 spin_unlock(&pdata->pmt_lock);
4348 pr_debug("<--eqos_powerdown\n");
4354 * \brief API to powerup the device
4356 * \details This function is invoked by ioctl function when the user issues an
4357 * ioctl command to move the device to out of power down state. Following
4358 * operations are performed in this function.
4359 * - Wakeup the device using PMT module if supported.
4361 * - Enable MAC and DMA TX and RX process.
4363 * - Starts the queue.
4365 * \param[in] dev – pointer to net device structure.
4366 * \param[in] caller – netif_attach gets called conditionally based
4367 * on caller, IOCTL or DRIVER-suspend
4371 * \retval zero on success and -ve number on failure.
4374 INT eqos_powerup(struct net_device *dev, UINT caller)
4376 struct eqos_prv_data *pdata = netdev_priv(dev);
4377 struct hw_if_struct *hw_if = &(pdata->hw_if);
4379 pr_debug("-->eqos_powerup\n");
4381 if (!dev || !netif_running(dev) ||
4382 (caller == EQOS_IOCTL_CONTEXT && !pdata->power_down)) {
4383 pr_err("Device is already powered up\n");
4387 spin_lock(&pdata->pmt_lock);
4389 if (pdata->power_down_type & EQOS_MAGIC_WAKEUP) {
4390 hw_if->disable_magic_pmt();
4391 pdata->power_down_type &= ~EQOS_MAGIC_WAKEUP;
4394 if (pdata->power_down_type & EQOS_REMOTE_WAKEUP) {
4395 hw_if->disable_remote_pmt();
4396 pdata->power_down_type &= ~EQOS_REMOTE_WAKEUP;
4399 pdata->power_down = 0;
4402 phy_start(pdata->phydev);
4404 /* enable MAC TX/RX */
4405 hw_if->start_mac_tx_rx();
4407 /* enable DMA TX/RX */
4408 eqos_start_all_ch_tx_dma(pdata);
4409 eqos_start_all_ch_rx_dma(pdata);
4411 if (caller == EQOS_DRIVER_CONTEXT)
4412 netif_device_attach(dev);
4414 eqos_napi_enable_mq(pdata);
4416 netif_tx_start_all_queues(dev);
4418 spin_unlock(&pdata->pmt_lock);
4420 pr_debug("<--eqos_powerup\n");
4426 * \brief API to configure remote wakeup
4428 * \details This function is invoked by ioctl function when the user issues an
4429 * ioctl command to move the device to power down state using remote wakeup.
4431 * \param[in] dev – pointer to net device structure.
4432 * \param[in] req – pointer to ioctl data structure.
4436 * \retval zero on success and -ve number on failure.
4439 INT eqos_configure_remotewakeup(struct net_device *dev,
4440 struct ifr_data_struct *req)
4442 struct eqos_prv_data *pdata = netdev_priv(dev);
4443 struct hw_if_struct *hw_if = &(pdata->hw_if);
4445 if (!dev || !netif_running(dev) || !pdata->hw_feat.rwk_sel
4446 || pdata->power_down) {
4448 ("Device is already powered down and will powerup for %s\n",
4449 EQOS_POWER_DOWN_TYPE(pdata));
4453 hw_if->configure_rwk_filter(req->rwk_filter_values,
4454 req->rwk_filter_length);
4456 eqos_powerdown(dev, EQOS_REMOTE_WAKEUP, EQOS_IOCTL_CONTEXT);
4462 * \details This function is invoked by ioctl function when the user issues an
4463 * ioctl command to change the RX DMA PBL value. This function will program
4464 * the device to configure the user specified RX PBL value.
4466 * \param[in] pdata – pointer to private data structure.
4467 * \param[in] rx_pbl – RX DMA pbl value to be programmed.
4474 static void eqos_config_rx_pbl(struct eqos_prv_data *pdata,
4475 UINT rx_pbl, UINT qinx)
4477 struct hw_if_struct *hw_if = &(pdata->hw_if);
4480 pr_debug("-->eqos_config_rx_pbl: %d\n", rx_pbl);
4489 hw_if->config_rx_pbl_val(qinx, rx_pbl);
4490 hw_if->config_pblx8(qinx, 0);
4495 hw_if->config_rx_pbl_val(qinx, rx_pbl / 8);
4496 hw_if->config_pblx8(qinx, 1);
4501 switch (pblx8_val) {
4503 pr_err("Tx PBL[%d] value: %d\n",
4504 qinx, hw_if->get_tx_pbl_val(qinx));
4505 pr_err("Rx PBL[%d] value: %d\n",
4506 qinx, hw_if->get_rx_pbl_val(qinx));
4509 pr_err("Tx PBL[%d] value: %d\n",
4510 qinx, (hw_if->get_tx_pbl_val(qinx) * 8));
4511 pr_err("Rx PBL[%d] value: %d\n",
4512 qinx, (hw_if->get_rx_pbl_val(qinx) * 8));
4516 pr_debug("<--eqos_config_rx_pbl\n");
4520 * \details This function is invoked by ioctl function when the user issues an
4521 * ioctl command to change the TX DMA PBL value. This function will program
4522 * the device to configure the user specified TX PBL value.
4524 * \param[in] pdata – pointer to private data structure.
4525 * \param[in] tx_pbl – TX DMA pbl value to be programmed.
4532 static void eqos_config_tx_pbl(struct eqos_prv_data *pdata,
4533 UINT tx_pbl, UINT qinx)
4535 struct hw_if_struct *hw_if = &(pdata->hw_if);
4538 pr_debug("-->eqos_config_tx_pbl: %d\n", tx_pbl);
4547 hw_if->config_tx_pbl_val(qinx, tx_pbl);
4548 hw_if->config_pblx8(qinx, 0);
4553 hw_if->config_tx_pbl_val(qinx, tx_pbl / 8);
4554 hw_if->config_pblx8(qinx, 1);
4559 switch (pblx8_val) {
4561 pr_err("Tx PBL[%d] value: %d\n",
4562 qinx, hw_if->get_tx_pbl_val(qinx));
4563 pr_err("Rx PBL[%d] value: %d\n",
4564 qinx, hw_if->get_rx_pbl_val(qinx));
4567 pr_err("Tx PBL[%d] value: %d\n",
4568 qinx, (hw_if->get_tx_pbl_val(qinx) * 8));
4569 pr_err("Rx PBL[%d] value: %d\n",
4570 qinx, (hw_if->get_rx_pbl_val(qinx) * 8));
4574 pr_debug("<--eqos_config_tx_pbl\n");
4578 * \details This function is invoked by ioctl function when the user issues an
4579 * ioctl command to select the DCB algorithm.
4581 * \param[in] pdata – pointer to private data structure.
4582 * \param[in] req – pointer to ioctl data structure.
4589 static void eqos_program_dcb_algorithm(struct eqos_prv_data *pdata,
4590 struct ifr_data_struct *req)
4592 struct eqos_dcb_algorithm l_dcb_struct, *u_dcb_struct =
4593 (struct eqos_dcb_algorithm *)req->ptr;
4594 struct hw_if_struct *hw_if = &pdata->hw_if;
4596 pr_debug("-->eqos_program_dcb_algorithm\n");
4598 if (copy_from_user(&l_dcb_struct, u_dcb_struct,
4599 sizeof(struct eqos_dcb_algorithm)))
4600 pr_err("Failed to fetch DCB Struct info from user\n");
4602 hw_if->set_tx_queue_operating_mode(l_dcb_struct.qinx,
4603 (UINT) l_dcb_struct.op_mode);
4604 hw_if->set_dcb_algorithm(l_dcb_struct.algorithm);
4605 hw_if->set_dcb_queue_weight(l_dcb_struct.qinx, l_dcb_struct.weight);
4607 pr_debug("<--eqos_program_dcb_algorithm\n");
4613 * \details This function is invoked by ioctl function when the user issues an
4614 * ioctl command to select the AVB algorithm. This function also configures other
4615 * parameters like send and idle slope, high and low credit.
4617 * \param[in] pdata – pointer to private data structure.
4618 * \param[in] req – pointer to ioctl data structure.
4625 static void eqos_program_avb_algorithm(struct eqos_prv_data *pdata,
4626 struct ifr_data_struct *req)
4628 struct eqos_avb_algorithm l_avb_struct, *u_avb_struct =
4629 (struct eqos_avb_algorithm *)req->ptr;
4630 struct hw_if_struct *hw_if = &pdata->hw_if;
4632 pr_debug("-->eqos_program_avb_algorithm\n");
4634 if (copy_from_user(&l_avb_struct, u_avb_struct,
4635 sizeof(struct eqos_avb_algorithm)))
4636 pr_err("Failed to fetch AVB Struct info from user\n");
4638 hw_if->set_tx_queue_operating_mode(l_avb_struct.qinx,
4639 (UINT) l_avb_struct.op_mode);
4640 hw_if->set_avb_algorithm(l_avb_struct.qinx, l_avb_struct.algorithm);
4641 hw_if->config_credit_control(l_avb_struct.qinx, l_avb_struct.cc);
4642 hw_if->config_send_slope(l_avb_struct.qinx, l_avb_struct.send_slope);
4643 hw_if->config_idle_slope(l_avb_struct.qinx, l_avb_struct.idle_slope);
4644 hw_if->config_high_credit(l_avb_struct.qinx, l_avb_struct.hi_credit);
4645 hw_if->config_low_credit(l_avb_struct.qinx, l_avb_struct.low_credit);
4647 pr_debug("<--eqos_program_avb_algorithm\n");
4653 * \brief API to read the registers & prints the value.
4654 * \details This function will read all the device register except
4655 * data register & prints the values.
4660 void dbgpr_regs(void)
4669 MAC_PMTCSR_RD(val0);
4670 MMC_RXICMP_ERR_OCTETS_RD(val1);
4671 MMC_RXICMP_GD_OCTETS_RD(val2);
4672 MMC_RXTCP_ERR_OCTETS_RD(val3);
4673 MMC_RXTCP_GD_OCTETS_RD(val4);
4674 MMC_RXUDP_ERR_OCTETS_RD(val5);
4676 pr_debug("dbgpr_regs: MAC_PMTCSR:%#x\n"
4677 "dbgpr_regs: MMC_RXICMP_ERR_OCTETS:%#x\n"
4678 "dbgpr_regs: MMC_RXICMP_GD_OCTETS:%#x\n"
4679 "dbgpr_regs: MMC_RXTCP_ERR_OCTETS:%#x\n"
4680 "dbgpr_regs: MMC_RXTCP_GD_OCTETS:%#x\n"
4681 "dbgpr_regs: MMC_RXUDP_ERR_OCTETS:%#x\n",
4682 val0, val1, val2, val3, val4, val5);
4684 MMC_RXUDP_GD_OCTETS_RD(val0);
4685 MMC_RXIPV6_NOPAY_OCTETS_RD(val1);
4686 MMC_RXIPV6_HDRERR_OCTETS_RD(val2);
4687 MMC_RXIPV6_GD_OCTETS_RD(val3);
4688 MMC_RXIPV4_UDSBL_OCTETS_RD(val4);
4689 MMC_RXIPV4_FRAG_OCTETS_RD(val5);
4691 pr_debug("dbgpr_regs: MMC_RXUDP_GD_OCTETS:%#x\n"
4692 "dbgpr_regs: MMC_RXIPV6_NOPAY_OCTETS:%#x\n"
4693 "dbgpr_regs: MMC_RXIPV6_HDRERR_OCTETS:%#x\n"
4694 "dbgpr_regs: MMC_RXIPV6_GD_OCTETS:%#x\n"
4695 "dbgpr_regs: MMC_RXIPV4_UDSBL_OCTETS:%#x\n"
4696 "dbgpr_regs: MMC_RXIPV4_FRAG_OCTETS:%#x\n",
4697 val0, val1, val2, val3, val4, val5);
4699 MMC_RXIPV4_NOPAY_OCTETS_RD(val0);
4700 MMC_RXIPV4_HDRERR_OCTETS_RD(val1);
4701 MMC_RXIPV4_GD_OCTETS_RD(val2);
4702 MMC_RXICMP_ERR_PKTS_RD(val3);
4703 MMC_RXICMP_GD_PKTS_RD(val4);
4704 MMC_RXTCP_ERR_PKTS_RD(val5);
4706 pr_debug("dbgpr_regs: MMC_RXIPV4_NOPAY_OCTETS:%#x\n"
4707 "dbgpr_regs: MMC_RXIPV4_HDRERR_OCTETS:%#x\n"
4708 "dbgpr_regs: MMC_RXIPV4_GD_OCTETS:%#x\n"
4709 "dbgpr_regs: MMC_RXICMP_ERR_PKTS:%#x\n"
4710 "dbgpr_regs: MMC_RXICMP_GD_PKTS:%#x\n"
4711 "dbgpr_regs: MMC_RXTCP_ERR_PKTS:%#x\n",
4712 val0, val1, val2, val3, val4, val5);
4714 MMC_RXTCP_GD_PKTS_RD(val0);
4715 MMC_RXUDP_ERR_PKTS_RD(val1);
4716 MMC_RXUDP_GD_PKTS_RD(val2);
4717 MMC_RXIPV6_NOPAY_PKTS_RD(val3);
4718 MMC_RXIPV6_HDRERR_PKTS_RD(val4);
4719 MMC_RXIPV6_GD_PKTS_RD(val5);
4721 pr_debug("dbgpr_regs: MMC_RXTCP_GD_PKTS:%#x\n"
4722 "dbgpr_regs: MMC_RXUDP_ERR_PKTS:%#x\n"
4723 "dbgpr_regs: MMC_RXUDP_GD_PKTS:%#x\n"
4724 "dbgpr_regs: MMC_RXIPV6_NOPAY_PKTS:%#x\n"
4725 "dbgpr_regs: MMC_RXIPV6_HDRERR_PKTS:%#x\n"
4726 "dbgpr_regs: MMC_RXIPV6_GD_PKTS:%#x\n",
4727 val0, val1, val2, val3, val4, val5);
4729 MMC_RXIPV4_UBSBL_PKTS_RD(val0);
4730 MMC_RXIPV4_FRAG_PKTS_RD(val1);
4731 MMC_RXIPV4_NOPAY_PKTS_RD(val2);
4732 MMC_RXIPV4_HDRERR_PKTS_RD(val3);
4733 MMC_RXIPV4_GD_PKTS_RD(val4);
4734 MMC_RXCTRLPACKETS_G_RD(val5);
4736 pr_debug("dbgpr_regs: MMC_RXIPV4_UBSBL_PKTS:%#x\n"
4737 "dbgpr_regs: MMC_RXIPV4_FRAG_PKTS:%#x\n"
4738 "dbgpr_regs: MMC_RXIPV4_NOPAY_PKTS:%#x\n"
4739 "dbgpr_regs: MMC_RXIPV4_HDRERR_PKTS:%#x\n"
4740 "dbgpr_regs: MMC_RXIPV4_GD_PKTS:%#x\n"
4741 "dbgpr_regs: MMC_RXCTRLPACKETS_G:%#x\n",
4742 val0, val1, val2, val3, val4, val5);
4744 MMC_RXRCVERROR_RD(val0);
4745 MMC_RXWATCHDOGERROR_RD(val1);
4746 MMC_RXVLANPACKETS_GB_RD(val2);
4747 MMC_RXFIFOOVERFLOW_RD(val3);
4748 MMC_RXPAUSEPACKETS_RD(val4);
4749 MMC_RXOUTOFRANGETYPE_RD(val5);
4751 pr_debug("dbgpr_regs: MMC_RXRCVERROR:%#x\n"
4752 "dbgpr_regs: MMC_RXWATCHDOGERROR:%#x\n"
4753 "dbgpr_regs: MMC_RXVLANPACKETS_GB:%#x\n"
4754 "dbgpr_regs: MMC_RXFIFOOVERFLOW:%#x\n"
4755 "dbgpr_regs: MMC_RXPAUSEPACKETS:%#x\n"
4756 "dbgpr_regs: MMC_RXOUTOFRANGETYPE:%#x\n",
4757 val0, val1, val2, val3, val4, val5);
4759 MMC_RXLENGTHERROR_RD(val0);
4760 MMC_RXUNICASTPACKETS_G_RD(val1);
4761 MMC_RX1024TOMAXOCTETS_GB_RD(val2);
4762 MMC_RX512TO1023OCTETS_GB_RD(val3);
4763 MMC_RX256TO511OCTETS_GB_RD(val4);
4764 MMC_RX128TO255OCTETS_GB_RD(val5);
4766 pr_debug("dbgpr_regs: MMC_RXLENGTHERROR:%#x\n"
4767 "dbgpr_regs: MMC_RXUNICASTPACKETS_G:%#x\n"
4768 "dbgpr_regs: MMC_RX1024TOMAXOCTETS_GB:%#x\n"
4769 "dbgpr_regs: MMC_RX512TO1023OCTETS_GB:%#x\n"
4770 "dbgpr_regs: MMC_RX256TO511OCTETS_GB:%#x\n"
4771 "dbgpr_regs: MMC_RX128TO255OCTETS_GB:%#x\n",
4772 val0, val1, val2, val3, val4, val5);
4774 MMC_RX65TO127OCTETS_GB_RD(val0);
4775 MMC_RX64OCTETS_GB_RD(val1);
4776 MMC_RXOVERSIZE_G_RD(val2);
4777 MMC_RXUNDERSIZE_G_RD(val3);
4778 MMC_RXJABBERERROR_RD(val4);
4779 MMC_RXRUNTERROR_RD(val5);
4781 pr_debug("dbgpr_regs: MMC_RX65TO127OCTETS_GB:%#x\n"
4782 "dbgpr_regs: MMC_RX64OCTETS_GB:%#x\n"
4783 "dbgpr_regs: MMC_RXOVERSIZE_G:%#x\n"
4784 "dbgpr_regs: MMC_RXUNDERSIZE_G:%#x\n"
4785 "dbgpr_regs: MMC_RXJABBERERROR:%#x\n"
4786 "dbgpr_regs: MMC_RXRUNTERROR:%#x\n",
4787 val0, val1, val2, val3, val4, val5);
4789 MMC_RXALIGNMENTERROR_RD(val0);
4790 MMC_RXCRCERROR_RD(val1);
4791 MMC_RXMULTICASTPACKETS_G_RD(val2);
4792 MMC_RXBROADCASTPACKETS_G_RD(val3);
4793 MMC_RXOCTETCOUNT_G_RD(val4);
4794 MMC_RXOCTETCOUNT_GB_RD(val5);
4796 pr_debug("dbgpr_regs: MMC_RXALIGNMENTERROR:%#x\n"
4797 "dbgpr_regs: MMC_RXCRCERROR:%#x\n"
4798 "dbgpr_regs: MMC_RXMULTICASTPACKETS_G:%#x\n"
4799 "dbgpr_regs: MMC_RXBROADCASTPACKETS_G:%#x\n"
4800 "dbgpr_regs: MMC_RXOCTETCOUNT_G:%#x\n"
4801 "dbgpr_regs: MMC_RXOCTETCOUNT_GB:%#x\n",
4802 val0, val1, val2, val3, val4, val5);
4804 MMC_RXPACKETCOUNT_GB_RD(val0);
4805 MMC_TXOVERSIZE_G_RD(val1);
4806 MMC_TXVLANPACKETS_G_RD(val2);
4807 MMC_TXPAUSEPACKETS_RD(val3);
4808 MMC_TXEXCESSDEF_RD(val4);
4809 MMC_TXPACKETSCOUNT_G_RD(val5);
4811 pr_debug("dbgpr_regs: MMC_RXPACKETCOUNT_GB:%#x\n"
4812 "dbgpr_regs: MMC_TXOVERSIZE_G:%#x\n"
4813 "dbgpr_regs: MMC_TXVLANPACKETS_G:%#x\n"
4814 "dbgpr_regs: MMC_TXPAUSEPACKETS:%#x\n"
4815 "dbgpr_regs: MMC_TXEXCESSDEF:%#x\n"
4816 "dbgpr_regs: MMC_TXPACKETSCOUNT_G:%#x\n",
4817 val0, val1, val2, val3, val4, val5);
4819 MMC_TXOCTETCOUNT_G_RD(val0);
4820 MMC_TXCARRIERERROR_RD(val1);
4821 MMC_TXEXESSCOL_RD(val2);
4822 MMC_TXLATECOL_RD(val3);
4823 MMC_TXDEFERRED_RD(val4);
4824 MMC_TXMULTICOL_G_RD(val5);
4826 pr_debug("dbgpr_regs: MMC_TXOCTETCOUNT_G:%#x\n"
4827 "dbgpr_regs: MMC_TXCARRIERERROR:%#x\n"
4828 "dbgpr_regs: MMC_TXEXESSCOL:%#x\n"
4829 "dbgpr_regs: MMC_TXLATECOL:%#x\n"
4830 "dbgpr_regs: MMC_TXDEFERRED:%#x\n"
4831 "dbgpr_regs: MMC_TXMULTICOL_G:%#x\n",
4832 val0, val1, val2, val3, val4, val5);
4834 MMC_TXSINGLECOL_G_RD(val0);
4835 MMC_TXUNDERFLOWERROR_RD(val1);
4836 MMC_TXBROADCASTPACKETS_GB_RD(val2);
4837 MMC_TXMULTICASTPACKETS_GB_RD(val3);
4838 MMC_TXUNICASTPACKETS_GB_RD(val4);
4839 MMC_TX1024TOMAXOCTETS_GB_RD(val5);
4841 pr_debug("dbgpr_regs: MMC_TXSINGLECOL_G:%#x\n"
4842 "dbgpr_regs: MMC_TXUNDERFLOWERROR:%#x\n"
4843 "dbgpr_regs: MMC_TXBROADCASTPACKETS_GB:%#x\n"
4844 "dbgpr_regs: MMC_TXMULTICASTPACKETS_GB:%#x\n"
4845 "dbgpr_regs: MMC_TXUNICASTPACKETS_GB:%#x\n"
4846 "dbgpr_regs: MMC_TX1024TOMAXOCTETS_GB:%#x\n",
4847 val0, val1, val2, val3, val4, val5);
4849 MMC_TX512TO1023OCTETS_GB_RD(val0);
4850 MMC_TX256TO511OCTETS_GB_RD(val1);
4851 MMC_TX128TO255OCTETS_GB_RD(val2);
4852 MMC_TX65TO127OCTETS_GB_RD(val3);
4853 MMC_TX64OCTETS_GB_RD(val4);
4854 MMC_TXMULTICASTPACKETS_G_RD(val5);
4856 pr_debug("dbgpr_regs: MMC_TX512TO1023OCTETS_GB:%#x\n"
4857 "dbgpr_regs: MMC_TX256TO511OCTETS_GB:%#x\n"
4858 "dbgpr_regs: MMC_TX128TO255OCTETS_GB:%#x\n"
4859 "dbgpr_regs: MMC_TX65TO127OCTETS_GB:%#x\n"
4860 "dbgpr_regs: MMC_TX64OCTETS_GB:%#x\n"
4861 "dbgpr_regs: MMC_TXMULTICASTPACKETS_G:%#x\n",
4862 val0, val1, val2, val3, val4, val5);
4864 MMC_TXBROADCASTPACKETS_G_RD(val0);
4865 MMC_TXPACKETCOUNT_GB_RD(val1);
4866 MMC_TXOCTETCOUNT_GB_RD(val2);
4867 MMC_IPC_INTR_RX_RD(val3);
4868 MMC_IPC_INTR_MASK_RX_RD(val4);
4869 MMC_INTR_MASK_TX_RD(val5);
4871 pr_debug("dbgpr_regs: MMC_TXBROADCASTPACKETS_G:%#x\n"
4872 "dbgpr_regs: MMC_TXPACKETCOUNT_GB:%#x\n"
4873 "dbgpr_regs: MMC_TXOCTETCOUNT_GB:%#x\n"
4874 "dbgpr_regs: MMC_IPC_INTR_RX:%#x\n"
4875 "dbgpr_regs: MMC_IPC_INTR_MASK_RX:%#x\n"
4876 "dbgpr_regs: MMC_INTR_MASK_TX:%#x\n",
4877 val0, val1, val2, val3, val4, val5);
4879 MMC_INTR_MASK_RX_RD(val0);
4880 MMC_INTR_TX_RD(val1);
4881 MMC_INTR_RX_RD(val2);
4886 pr_debug("dbgpr_regs: MMC_INTR_MASK_RX:%#x\n"
4887 "dbgpr_regs: MMC_INTR_TX:%#x\n"
4888 "dbgpr_regs: MMC_INTR_RX:%#x\n"
4889 "dbgpr_regs: MMC_CNTRL:%#x\n"
4890 "dbgpr_regs: MAC_MA1LR:%#x\n"
4891 "dbgpr_regs: MAC_MA1HR:%#x\n",
4892 val0, val1, val2, val3, val4, val5);
4897 MAC_GMIIDR_RD(val3);
4898 MAC_GMIIAR_RD(val4);
4901 pr_debug("dbgpr_regs: MAC_MA0LR:%#x\n"
4902 "dbgpr_regs: MAC_MA0HR:%#x\n"
4903 "dbgpr_regs: MAC_GPIOR:%#x\n"
4904 "dbgpr_regs: MAC_GMIIDR:%#x\n"
4905 "dbgpr_regs: MAC_GMIIAR:%#x\n"
4906 "dbgpr_regs: MAC_HFR2:%#x\n", val0, val1, val2, val3, val4, val5);
4915 pr_debug("dbgpr_regs: MAC_HFR1:%#x\n"
4916 "dbgpr_regs: MAC_HFR0:%#x\n"
4917 "dbgpr_regs: MAC_MDR:%#x\n"
4918 "dbgpr_regs: MAC_VR:%#x\n"
4919 "dbgpr_regs: MAC_HTR7:%#x\n"
4920 "dbgpr_regs: MAC_HTR6:%#x\n", val0, val1, val2, val3, val4, val5);
4929 pr_debug("dbgpr_regs: MAC_HTR5:%#x\n"
4930 "dbgpr_regs: MAC_HTR4:%#x\n"
4931 "dbgpr_regs: MAC_HTR3:%#x\n"
4932 "dbgpr_regs: MAC_HTR2:%#x\n"
4933 "dbgpr_regs: MAC_HTR1:%#x\n"
4934 "dbgpr_regs: MAC_HTR0:%#x\n", val0, val1, val2, val3, val4, val5);
4936 DMA_RIWTR7_RD(val0);
4937 DMA_RIWTR6_RD(val1);
4938 DMA_RIWTR5_RD(val2);
4939 DMA_RIWTR4_RD(val3);
4940 DMA_RIWTR3_RD(val4);
4941 DMA_RIWTR2_RD(val5);
4943 pr_debug("dbgpr_regs: DMA_RIWTR7:%#x\n"
4944 "dbgpr_regs: DMA_RIWTR6:%#x\n"
4945 "dbgpr_regs: DMA_RIWTR5:%#x\n"
4946 "dbgpr_regs: DMA_RIWTR4:%#x\n"
4947 "dbgpr_regs: DMA_RIWTR3:%#x\n"
4948 "dbgpr_regs: DMA_RIWTR2:%#x\n",
4949 val0, val1, val2, val3, val4, val5);
4951 DMA_RIWTR1_RD(val0);
4952 DMA_RIWTR0_RD(val1);
4953 DMA_RDRLR7_RD(val2);
4954 DMA_RDRLR6_RD(val3);
4955 DMA_RDRLR5_RD(val4);
4956 DMA_RDRLR4_RD(val5);
4958 pr_debug("dbgpr_regs: DMA_RIWTR1:%#x\n"
4959 "dbgpr_regs: DMA_RIWTR0:%#x\n"
4960 "dbgpr_regs: DMA_RDRLR7:%#x\n"
4961 "dbgpr_regs: DMA_RDRLR6:%#x\n"
4962 "dbgpr_regs: DMA_RDRLR5:%#x\n"
4963 "dbgpr_regs: DMA_RDRLR4:%#x\n",
4964 val0, val1, val2, val3, val4, val5);
4966 DMA_RDRLR3_RD(val0);
4967 DMA_RDRLR2_RD(val1);
4968 DMA_RDRLR1_RD(val2);
4969 DMA_RDRLR0_RD(val3);
4970 DMA_TDRLR7_RD(val4);
4971 DMA_TDRLR6_RD(val5);
4973 pr_debug("dbgpr_regs: DMA_RDRLR3:%#x\n"
4974 "dbgpr_regs: DMA_RDRLR2:%#x\n"
4975 "dbgpr_regs: DMA_RDRLR1:%#x\n"
4976 "dbgpr_regs: DMA_RDRLR0:%#x\n"
4977 "dbgpr_regs: DMA_TDRLR7:%#x\n"
4978 "dbgpr_regs: DMA_TDRLR6:%#x\n",
4979 val0, val1, val2, val3, val4, val5);
4981 DMA_TDRLR5_RD(val0);
4982 DMA_TDRLR4_RD(val1);
4983 DMA_TDRLR3_RD(val2);
4984 DMA_TDRLR2_RD(val3);
4985 DMA_TDRLR1_RD(val4);
4986 DMA_TDRLR0_RD(val5);
4988 pr_debug("dbgpr_regs: DMA_TDRLR5:%#x\n"
4989 "dbgpr_regs: DMA_TDRLR4:%#x\n"
4990 "dbgpr_regs: DMA_TDRLR3:%#x\n"
4991 "dbgpr_regs: DMA_TDRLR2:%#x\n"
4992 "dbgpr_regs: DMA_TDRLR1:%#x\n"
4993 "dbgpr_regs: DMA_TDRLR0:%#x\n",
4994 val0, val1, val2, val3, val4, val5);
4996 DMA_RDTP_RPDR7_RD(val0);
4997 DMA_RDTP_RPDR6_RD(val1);
4998 DMA_RDTP_RPDR5_RD(val2);
4999 DMA_RDTP_RPDR4_RD(val3);
5000 DMA_RDTP_RPDR3_RD(val4);
5001 DMA_RDTP_RPDR2_RD(val5);
5003 pr_debug("dbgpr_regs: DMA_RDTP_RPDR7:%#x\n"
5004 "dbgpr_regs: DMA_RDTP_RPDR6:%#x\n"
5005 "dbgpr_regs: DMA_RDTP_RPDR5:%#x\n"
5006 "dbgpr_regs: DMA_RDTP_RPDR4:%#x\n"
5007 "dbgpr_regs: DMA_RDTP_RPDR3:%#x\n"
5008 "dbgpr_regs: DMA_RDTP_RPDR2:%#x\n",
5009 val0, val1, val2, val3, val4, val5);
5011 DMA_RDTP_RPDR1_RD(val0);
5012 DMA_RDTP_RPDR0_RD(val1);
5013 DMA_TDTP_TPDR7_RD(val2);
5014 DMA_TDTP_TPDR6_RD(val3);
5015 DMA_TDTP_TPDR5_RD(val4);
5016 DMA_TDTP_TPDR4_RD(val5);
5018 pr_debug("dbgpr_regs: DMA_RDTP_RPDR1:%#x\n"
5019 "dbgpr_regs: DMA_RDTP_RPDR0:%#x\n"
5020 "dbgpr_regs: DMA_TDTP_TPDR7:%#x\n"
5021 "dbgpr_regs: DMA_TDTP_TPDR6:%#x\n"
5022 "dbgpr_regs: DMA_TDTP_TPDR5:%#x\n"
5023 "dbgpr_regs: DMA_TDTP_TPDR4:%#x\n",
5024 val0, val1, val2, val3, val4, val5);
5026 DMA_TDTP_TPDR3_RD(val0);
5027 DMA_TDTP_TPDR2_RD(val1);
5028 DMA_TDTP_TPDR1_RD(val2);
5029 DMA_TDTP_TPDR0_RD(val3);
5030 DMA_RDLAR7_RD(val4);
5031 DMA_RDLAR6_RD(val5);
5033 pr_debug("dbgpr_regs: DMA_TDTP_TPDR3:%#x\n"
5034 "dbgpr_regs: DMA_TDTP_TPDR2:%#x\n"
5035 "dbgpr_regs: DMA_TDTP_TPDR1:%#x\n"
5036 "dbgpr_regs: DMA_TDTP_TPDR0:%#x\n"
5037 "dbgpr_regs: DMA_RDLAR7:%#x\n"
5038 "dbgpr_regs: DMA_RDLAR6:%#x\n",
5039 val0, val1, val2, val3, val4, val5);
5041 DMA_RDLAR5_RD(val0);
5042 DMA_RDLAR4_RD(val1);
5043 DMA_RDLAR3_RD(val2);
5044 DMA_RDLAR2_RD(val3);
5045 DMA_RDLAR1_RD(val4);
5046 DMA_RDLAR0_RD(val5);
5048 pr_debug("dbgpr_regs: DMA_RDLAR5:%#x\n"
5049 "dbgpr_regs: DMA_RDLAR4:%#x\n"
5050 "dbgpr_regs: DMA_RDLAR3:%#x\n"
5051 "dbgpr_regs: DMA_RDLAR2:%#x\n"
5052 "dbgpr_regs: DMA_RDLAR1:%#x\n"
5053 "dbgpr_regs: DMA_RDLAR0:%#x\n",
5054 val0, val1, val2, val3, val4, val5);
5056 DMA_TDLAR7_RD(val0);
5057 DMA_TDLAR6_RD(val1);
5058 DMA_TDLAR5_RD(val2);
5059 DMA_TDLAR4_RD(val3);
5060 DMA_TDLAR3_RD(val4);
5061 DMA_TDLAR2_RD(val5);
5063 pr_debug("dbgpr_regs: DMA_TDLAR7:%#x\n"
5064 "dbgpr_regs: DMA_TDLAR6:%#x\n"
5065 "dbgpr_regs: DMA_TDLAR5:%#x\n"
5066 "dbgpr_regs: DMA_TDLAR4:%#x\n"
5067 "dbgpr_regs: DMA_TDLAR3:%#x\n"
5068 "dbgpr_regs: DMA_TDLAR2:%#x\n",
5069 val0, val1, val2, val3, val4, val5);
5071 DMA_TDLAR1_RD(val0);
5072 DMA_TDLAR0_RD(val1);
5078 pr_debug("dbgpr_regs: DMA_TDLAR1:%#x\n"
5079 "dbgpr_regs: DMA_TDLAR0:%#x\n"
5080 "dbgpr_regs: DMA_IER7:%#x\n"
5081 "dbgpr_regs: DMA_IER6:%#x\n"
5082 "dbgpr_regs: DMA_IER5:%#x\n"
5083 "dbgpr_regs: DMA_IER4:%#x\n", val0, val1, val2, val3, val4, val5);
5092 pr_debug("dbgpr_regs: DMA_IER3:%#x\n"
5093 "dbgpr_regs: DMA_IER2:%#x\n"
5094 "dbgpr_regs: DMA_IER1:%#x\n"
5095 "dbgpr_regs: DMA_IER0:%#x\n"
5096 "dbgpr_regs: MAC_IMR:%#x\n"
5097 "dbgpr_regs: MAC_ISR:%#x\n", val0, val1, val2, val3, val4, val5);
5106 pr_debug("dbgpr_regs: MTL_ISR:%#x\n"
5107 "dbgpr_regs: DMA_SR7:%#x\n"
5108 "dbgpr_regs: DMA_SR6:%#x\n"
5109 "dbgpr_regs: DMA_SR5:%#x\n"
5110 "dbgpr_regs: DMA_SR4:%#x\n"
5111 "dbgpr_regs: DMA_SR3:%#x\n", val0, val1, val2, val3, val4, val5);
5120 pr_debug("dbgpr_regs: DMA_SR2:%#x\n"
5121 "dbgpr_regs: DMA_SR1:%#x\n"
5122 "dbgpr_regs: DMA_SR0:%#x\n"
5123 "dbgpr_regs: DMA_ISR:%#x\n"
5124 "dbgpr_regs: DMA_DSR2:%#x\n"
5125 "dbgpr_regs: DMA_DSR1:%#x\n", val0, val1, val2, val3, val4, val5);
5131 DMA_CHRBAR7_RD(val4);
5132 DMA_CHRBAR6_RD(val5);
5134 pr_debug("dbgpr_regs: DMA_DSR0:%#x\n"
5135 "dbgpr_regs: MTL_Q0RDR:%#x\n"
5136 "dbgpr_regs: MTL_Q0ESR:%#x\n"
5137 "dbgpr_regs: MTL_Q0TDR:%#x\n"
5138 "dbgpr_regs: DMA_CHRBAR7:%#x\n"
5139 "dbgpr_regs: DMA_CHRBAR6:%#x\n",
5140 val0, val1, val2, val3, val4, val5);
5142 DMA_CHRBAR5_RD(val0);
5143 DMA_CHRBAR4_RD(val1);
5144 DMA_CHRBAR3_RD(val2);
5145 DMA_CHRBAR2_RD(val3);
5146 DMA_CHRBAR1_RD(val4);
5147 DMA_CHRBAR0_RD(val5);
5149 pr_debug("dbgpr_regs: DMA_CHRBAR5:%#x\n"
5150 "dbgpr_regs: DMA_CHRBAR4:%#x\n"
5151 "dbgpr_regs: DMA_CHRBAR3:%#x\n"
5152 "dbgpr_regs: DMA_CHRBAR2:%#x\n"
5153 "dbgpr_regs: DMA_CHRBAR1:%#x\n"
5154 "dbgpr_regs: DMA_CHRBAR0:%#x\n",
5155 val0, val1, val2, val3, val4, val5);
5157 DMA_CHTBAR7_RD(val0);
5158 DMA_CHTBAR6_RD(val1);
5159 DMA_CHTBAR5_RD(val2);
5160 DMA_CHTBAR4_RD(val3);
5161 DMA_CHTBAR3_RD(val4);
5162 DMA_CHTBAR2_RD(val5);
5164 pr_debug("dbgpr_regs: DMA_CHTBAR7:%#x\n"
5165 "dbgpr_regs: DMA_CHTBAR6:%#x\n"
5166 "dbgpr_regs: DMA_CHTBAR5:%#x\n"
5167 "dbgpr_regs: DMA_CHTBAR4:%#x\n"
5168 "dbgpr_regs: DMA_CHTBAR3:%#x\n"
5169 "dbgpr_regs: DMA_CHTBAR2:%#x\n",
5170 val0, val1, val2, val3, val4, val5);
5172 DMA_CHTBAR1_RD(val0);
5173 DMA_CHTBAR0_RD(val1);
5174 DMA_CHRDR7_RD(val2);
5175 DMA_CHRDR6_RD(val3);
5176 DMA_CHRDR5_RD(val4);
5177 DMA_CHRDR4_RD(val5);
5179 pr_debug("dbgpr_regs: DMA_CHTBAR1:%#x\n"
5180 "dbgpr_regs: DMA_CHTBAR0:%#x\n"
5181 "dbgpr_regs: DMA_CHRDR7:%#x\n"
5182 "dbgpr_regs: DMA_CHRDR6:%#x\n"
5183 "dbgpr_regs: DMA_CHRDR5:%#x\n"
5184 "dbgpr_regs: DMA_CHRDR4:%#x\n",
5185 val0, val1, val2, val3, val4, val5);
5187 DMA_CHRDR3_RD(val0);
5188 DMA_CHRDR2_RD(val1);
5189 DMA_CHRDR1_RD(val2);
5190 DMA_CHRDR0_RD(val3);
5191 DMA_CHTDR7_RD(val4);
5192 DMA_CHTDR6_RD(val5);
5194 pr_debug("dbgpr_regs: DMA_CHRDR3:%#x\n"
5195 "dbgpr_regs: DMA_CHRDR2:%#x\n"
5196 "dbgpr_regs: DMA_CHRDR1:%#x\n"
5197 "dbgpr_regs: DMA_CHRDR0:%#x\n"
5198 "dbgpr_regs: DMA_CHTDR7:%#x\n"
5199 "dbgpr_regs: DMA_CHTDR6:%#x\n",
5200 val0, val1, val2, val3, val4, val5);
5202 DMA_CHTDR5_RD(val0);
5203 DMA_CHTDR4_RD(val1);
5204 DMA_CHTDR3_RD(val2);
5205 DMA_CHTDR2_RD(val3);
5206 DMA_CHTDR1_RD(val4);
5207 DMA_CHTDR0_RD(val5);
5209 pr_debug("dbgpr_regs: DMA_CHTDR5:%#x\n"
5210 "dbgpr_regs: DMA_CHTDR4:%#x\n"
5211 "dbgpr_regs: DMA_CHTDR3:%#x\n"
5212 "dbgpr_regs: DMA_CHTDR2:%#x\n"
5213 "dbgpr_regs: DMA_CHTDR1:%#x\n"
5214 "dbgpr_regs: DMA_CHTDR0:%#x\n",
5215 val0, val1, val2, val3, val4, val5);
5217 DMA_SFCSR7_RD(val0);
5218 DMA_SFCSR6_RD(val1);
5219 DMA_SFCSR5_RD(val2);
5220 DMA_SFCSR4_RD(val3);
5221 DMA_SFCSR3_RD(val4);
5222 DMA_SFCSR2_RD(val5);
5224 pr_debug("dbgpr_regs: DMA_SFCSR7:%#x\n"
5225 "dbgpr_regs: DMA_SFCSR6:%#x\n"
5226 "dbgpr_regs: DMA_SFCSR5:%#x\n"
5227 "dbgpr_regs: DMA_SFCSR4:%#x\n"
5228 "dbgpr_regs: DMA_SFCSR3:%#x\n"
5229 "dbgpr_regs: DMA_SFCSR2:%#x\n",
5230 val0, val1, val2, val3, val4, val5);
5232 DMA_SFCSR1_RD(val0);
5233 DMA_SFCSR0_RD(val1);
5234 MAC_IVLANTIRR_RD(val2);
5235 MAC_VLANTIRR_RD(val3);
5236 MAC_VLANHTR_RD(val4);
5237 MAC_VLANTR_RD(val5);
5239 pr_debug("dbgpr_regs: DMA_SFCSR1:%#x\n"
5240 "dbgpr_regs: DMA_SFCSR0:%#x\n"
5241 "dbgpr_regs: MAC_IVLANTIRR:%#x\n"
5242 "dbgpr_regs: MAC_VLANTIRR:%#x\n"
5243 "dbgpr_regs: MAC_VLANHTR:%#x\n"
5244 "dbgpr_regs: MAC_VLANTR:%#x\n",
5245 val0, val1, val2, val3, val4, val5);
5251 MTL_Q0ROMR_RD(val4);
5254 pr_debug("dbgpr_regs: DMA_SBUS:%#x\n"
5255 "dbgpr_regs: DMA_BMR:%#x\n"
5256 "dbgpr_regs: MTL_Q0RCR:%#x\n"
5257 "dbgpr_regs: MTL_Q0OCR:%#x\n"
5258 "dbgpr_regs: MTL_Q0ROMR:%#x\n"
5259 "dbgpr_regs: MTL_Q0QR:%#x\n", val0, val1, val2, val3, val4, val5);
5263 MTL_Q0TOMR_RD(val2);
5264 MTL_RQDCM1R_RD(val3);
5265 MTL_RQDCM0R_RD(val4);
5268 pr_debug("dbgpr_regs: MTL_Q0ECR:%#x\n"
5269 "dbgpr_regs: MTL_Q0UCR:%#x\n"
5270 "dbgpr_regs: MTL_Q0TOMR:%#x\n"
5271 "dbgpr_regs: MTL_RQDCM1R:%#x\n"
5272 "dbgpr_regs: MTL_RQDCM0R:%#x\n"
5273 "dbgpr_regs: MTL_FDDR:%#x\n", val0, val1, val2, val3, val4, val5);
5279 MAC_TQPM1R_RD(val4);
5280 MAC_TQPM0R_RD(val5);
5282 pr_debug("dbgpr_regs: MTL_FDACS:%#x\n"
5283 "dbgpr_regs: MTL_OMR:%#x\n"
5284 "dbgpr_regs: MAC_RQC1R:%#x\n"
5285 "dbgpr_regs: MAC_RQC0R:%#x\n"
5286 "dbgpr_regs: MAC_TQPM1R:%#x\n"
5287 "dbgpr_regs: MAC_TQPM0R:%#x\n",
5288 val0, val1, val2, val3, val4, val5);
5291 MAC_QTFCR7_RD(val1);
5292 MAC_QTFCR6_RD(val2);
5293 MAC_QTFCR5_RD(val3);
5294 MAC_QTFCR4_RD(val4);
5295 MAC_QTFCR3_RD(val5);
5297 pr_debug("dbgpr_regs: MAC_RFCR:%#x\n"
5298 "dbgpr_regs: MAC_QTFCR7:%#x\n"
5299 "dbgpr_regs: MAC_QTFCR6:%#x\n"
5300 "dbgpr_regs: MAC_QTFCR5:%#x\n"
5301 "dbgpr_regs: MAC_QTFCR4:%#x\n"
5302 "dbgpr_regs: MAC_QTFCR3:%#x\n",
5303 val0, val1, val2, val3, val4, val5);
5305 MAC_QTFCR2_RD(val0);
5306 MAC_QTFCR1_RD(val1);
5307 MAC_Q0TFCR_RD(val2);
5308 DMA_AXI4CR7_RD(val3);
5309 DMA_AXI4CR6_RD(val4);
5310 DMA_AXI4CR5_RD(val5);
5312 pr_debug("dbgpr_regs: MAC_QTFCR2:%#x\n"
5313 "dbgpr_regs: MAC_QTFCR1:%#x\n"
5314 "dbgpr_regs: MAC_Q0TFCR:%#x\n"
5315 "dbgpr_regs: DMA_AXI4CR7:%#x\n"
5316 "dbgpr_regs: DMA_AXI4CR6:%#x\n"
5317 "dbgpr_regs: DMA_AXI4CR5:%#x\n",
5318 val0, val1, val2, val3, val4, val5);
5320 DMA_AXI4CR4_RD(val0);
5321 DMA_AXI4CR3_RD(val1);
5322 DMA_AXI4CR2_RD(val2);
5323 DMA_AXI4CR1_RD(val3);
5324 DMA_AXI4CR0_RD(val4);
5327 pr_debug("dbgpr_regs: DMA_AXI4CR4:%#x\n"
5328 "dbgpr_regs: DMA_AXI4CR3:%#x\n"
5329 "dbgpr_regs: DMA_AXI4CR2:%#x\n"
5330 "dbgpr_regs: DMA_AXI4CR1:%#x\n"
5331 "dbgpr_regs: DMA_AXI4CR0:%#x\n"
5332 "dbgpr_regs: DMA_RCR7:%#x\n", val0, val1, val2, val3, val4, val5);
5341 pr_debug("dbgpr_regs: DMA_RCR6:%#x\n"
5342 "dbgpr_regs: DMA_RCR5:%#x\n"
5343 "dbgpr_regs: DMA_RCR4:%#x\n"
5344 "dbgpr_regs: DMA_RCR3:%#x\n"
5345 "dbgpr_regs: DMA_RCR2:%#x\n"
5346 "dbgpr_regs: DMA_RCR1:%#x\n", val0, val1, val2, val3, val4, val5);
5355 pr_debug("dbgpr_regs: DMA_RCR0:%#x\n"
5356 "dbgpr_regs: DMA_TCR7:%#x\n"
5357 "dbgpr_regs: DMA_TCR6:%#x\n"
5358 "dbgpr_regs: DMA_TCR5:%#x\n"
5359 "dbgpr_regs: DMA_TCR4:%#x\n"
5360 "dbgpr_regs: DMA_TCR3:%#x\n", val0, val1, val2, val3, val4, val5);
5369 pr_debug("dbgpr_regs: DMA_TCR2:%#x\n"
5370 "dbgpr_regs: DMA_TCR1:%#x\n"
5371 "dbgpr_regs: DMA_TCR0:%#x\n"
5372 "dbgpr_regs: DMA_CR7:%#x\n"
5373 "dbgpr_regs: DMA_CR6:%#x\n"
5374 "dbgpr_regs: DMA_CR5:%#x\n", val0, val1, val2, val3, val4, val5);
5383 pr_debug("dbgpr_regs: DMA_CR4:%#x\n"
5384 "dbgpr_regs: DMA_CR3:%#x\n"
5385 "dbgpr_regs: DMA_CR2:%#x\n"
5386 "dbgpr_regs: DMA_CR1:%#x\n"
5387 "dbgpr_regs: DMA_CR0:%#x\n"
5388 "dbgpr_regs: MAC_WTR:%#x\n", val0, val1, val2, val3, val4, val5);
5394 pr_debug("dbgpr_regs: MAC_MPFR:%#x\n"
5395 "dbgpr_regs: MAC_MECR:%#x\n"
5396 "dbgpr_regs: MAC_MCR:%#x\n", val0, val1, val2);
5403 * \details This function is invoked by eqos_start_xmit and
5404 * process_tx_completions function for dumping the TX descriptor contents
5405 * which are prepared for packet transmission and which are transmitted by
5406 * device. It is mainly used during development phase for debug purpose. Use
5407 * of these function may affect the performance during normal operation.
5409 * \param[in] pdata – pointer to private data structure.
5410 * \param[in] first_desc_idx – first descriptor index for the current
5412 * \param[in] last_desc_idx – last descriptor index for the current transfer.
5413 * \param[in] flag – to indicate from which function it is called.
5418 void dump_tx_desc(struct eqos_prv_data *pdata, int first_desc_idx,
5419 int last_desc_idx, int flag, UINT qinx)
5422 struct s_tx_desc *desc = NULL;
5425 if (first_desc_idx == last_desc_idx) {
5426 desc = GET_TX_DESC_PTR(qinx, first_desc_idx);
5428 TX_NORMAL_DESC_TDES3_CTXT_RD(desc->tdes3, ctxt);
5430 pr_err("\n%s[%02d %4p %03d %s] = %#x:%#x:%#x:%#x\n",
5431 (ctxt == 1) ? "TX_CONTXT_DESC" : "ptx_desc",
5432 qinx, desc, first_desc_idx,
5433 ((flag == 1) ? "QUEUED FOR TRANSMISSION" :
5435 0) ? "FREED/FETCHED BY DEVICE" : "DEBUG DESC DUMP")),
5436 desc->tdes0, desc->tdes1, desc->tdes2, desc->tdes3);
5439 if (first_desc_idx > last_desc_idx)
5440 lp_cnt = last_desc_idx + TX_DESC_CNT - first_desc_idx;
5442 lp_cnt = last_desc_idx - first_desc_idx;
5444 for (i = first_desc_idx; lp_cnt >= 0; lp_cnt--) {
5445 desc = GET_TX_DESC_PTR(qinx, i);
5447 TX_NORMAL_DESC_TDES3_CTXT_RD(desc->tdes3, ctxt);
5449 pr_err("\n%s[%02d %4p %03d %s] = %#x:%#x:%#x:%#x\n",
5451 1) ? "TX_CONTXT_DESC" : "ptx_desc", qinx,
5454 1) ? "QUEUED FOR TRANSMISSION" :
5455 "FREED/FETCHED BY DEVICE"), desc->tdes0,
5456 desc->tdes1, desc->tdes2, desc->tdes3);
5457 INCR_TX_DESC_INDEX(i, 1);
5463 * \details This function is invoked by poll function for dumping the
5464 * RX descriptor contents. It is mainly used during development phase for
5465 * debug purpose. Use of these function may affect the performance during
5468 * \param[in] pdata – pointer to private data structure.
5473 void dump_rx_desc(UINT qinx, struct s_rx_desc *desc, int desc_idx)
5475 pr_err("\nprx_desc[%02d %4p %03d RECEIVED FROM DEVICE]"
5476 " = %#x:%#x:%#x:%#x",
5477 qinx, desc, desc_idx, desc->rdes0, desc->rdes1,
5478 desc->rdes2, desc->rdes3);
5482 * \details This function is invoked by start_xmit and poll function for
5483 * dumping the content of packet to be transmitted by device or received
5484 * from device. It is mainly used during development phase for debug purpose.
5485 * Use of these functions may affect the performance during normal operation.
5487 * \param[in] skb – pointer to socket buffer structure.
5488 * \param[in] len – length of packet to be transmitted/received.
5489 * \param[in] tx_rx – packet to be transmitted or received.
5490 * \param[in] desc_idx – descriptor index to be used for transmission or
5491 * reception of packet.
5496 void print_pkt(struct sk_buff *skb, int len, bool tx_rx, int desc_idx)
5499 unsigned char *buf = skb->data;
5502 ("\n\n/***********************************************************/\n");
5504 pr_err("%s pkt of %d Bytes [DESC index = %d]\n\n",
5505 (tx_rx ? "TX" : "RX"), len, desc_idx);
5506 pr_err("Dst MAC addr(6 bytes)\n");
5507 for (i = 0; i < 6; i++)
5508 printk("%#.2x%s", buf[i], (((i == 5) ? "" : ":")));
5509 pr_err("\nSrc MAC addr(6 bytes)\n");
5510 for (i = 6; i <= 11; i++)
5511 printk("%#.2x%s", buf[i], (((i == 11) ? "" : ":")));
5512 i = (buf[12] << 8 | buf[13]);
5513 pr_err("\nType/Length(2 bytes)\n%#x", i);
5515 pr_err("\nPay Load : %d bytes\n", (len - 14));
5516 for (i = 14, j = 1; i < len; i++, j++) {
5517 printk("%#.2x%s", buf[i], (((i == (len - 1)) ? "" : ":")));
5523 ("/*************************************************************/\n\n");
5527 * \details This function is invoked by probe function. This function will
5528 * initialize default receive coalesce parameters and sw timer value and store
5529 * it in respective receive data structure.
5531 * \param[in] pdata – pointer to private data structure.
5536 void eqos_init_rx_coalesce(struct eqos_prv_data *pdata)
5538 struct rx_ring *prx_ring = NULL;
5541 pr_debug("-->eqos_init_rx_coalesce\n");
5543 for (i = 0; i < EQOS_RX_QUEUE_CNT; i++) {
5544 prx_ring = GET_RX_WRAPPER_DESC(i);
5546 prx_ring->use_riwt = 1;
5547 prx_ring->rx_coal_frames = EQOS_RX_MAX_FRAMES;
5549 eqos_usec2riwt(EQOS_OPTIMAL_DMA_RIWT_USEC, pdata);
5552 pr_debug("<--eqos_init_rx_coalesce\n");
5556 * \details This function is invoked by open() function. This function will
5557 * clear MMC structure.
5559 * \param[in] pdata – pointer to private data structure.
5564 static void eqos_mmc_setup(struct eqos_prv_data *pdata)
5566 pr_debug("-->eqos_mmc_setup\n");
5568 if (pdata->hw_feat.mmc_sel) {
5569 memset(&pdata->mmc, 0, sizeof(struct eqos_mmc_counters));
5571 pr_err("No MMC/RMON module available in the HW\n");
5573 pr_debug("<--eqos_mmc_setup\n");
5576 inline unsigned int eqos_reg_read(volatile ULONG *ptr)
5578 return ioread32((void *)ptr);
5582 * \details This function is invoked by ethtool function when user wants to
5583 * read MMC counters. This function will read the MMC if supported by core
5584 * and store it in eqos_mmc_counters structure. By default all the
5585 * MMC are programmed "read on reset" hence all the fields of the
5586 * eqos_mmc_counters are incremented.
5588 * open() function. This function will
5589 * initialize MMC control register ie it disable all MMC interrupt and all
5590 * MMC register are configured to clear on read.
5592 * \param[in] pdata – pointer to private data structure.
5597 void eqos_mmc_read(struct eqos_mmc_counters *mmc)
5599 pr_debug("-->eqos_mmc_read\n");
5601 /* MMC TX counter registers */
5602 mmc->mmc_tx_octetcount_gb += eqos_reg_read(MMC_TXOCTETCOUNT_GB_OFFSET);
5603 mmc->mmc_tx_framecount_gb += eqos_reg_read(MMC_TXPACKETCOUNT_GB_OFFSET);
5604 mmc->mmc_tx_broadcastframe_g +=
5605 eqos_reg_read(MMC_TXBROADCASTPACKETS_G_OFFSET);
5606 mmc->mmc_tx_multicastframe_g +=
5607 eqos_reg_read(MMC_TXMULTICASTPACKETS_G_OFFSET);
5608 mmc->mmc_tx_64_octets_gb += eqos_reg_read(MMC_TX64OCTETS_GB_OFFSET);
5609 mmc->mmc_tx_65_to_127_octets_gb +=
5610 eqos_reg_read(MMC_TX65TO127OCTETS_GB_OFFSET);
5611 mmc->mmc_tx_128_to_255_octets_gb +=
5612 eqos_reg_read(MMC_TX128TO255OCTETS_GB_OFFSET);
5613 mmc->mmc_tx_256_to_511_octets_gb +=
5614 eqos_reg_read(MMC_TX256TO511OCTETS_GB_OFFSET);
5615 mmc->mmc_tx_512_to_1023_octets_gb +=
5616 eqos_reg_read(MMC_TX512TO1023OCTETS_GB_OFFSET);
5617 mmc->mmc_tx_1024_to_max_octets_gb +=
5618 eqos_reg_read(MMC_TX1024TOMAXOCTETS_GB_OFFSET);
5619 mmc->mmc_tx_unicast_gb += eqos_reg_read(MMC_TXUNICASTPACKETS_GB_OFFSET);
5620 mmc->mmc_tx_multicast_gb +=
5621 eqos_reg_read(MMC_TXMULTICASTPACKETS_GB_OFFSET);
5622 mmc->mmc_tx_broadcast_gb +=
5623 eqos_reg_read(MMC_TXBROADCASTPACKETS_GB_OFFSET);
5624 mmc->mmc_tx_underflow_error +=
5625 eqos_reg_read(MMC_TXUNDERFLOWERROR_OFFSET);
5626 mmc->mmc_tx_singlecol_g += eqos_reg_read(MMC_TXSINGLECOL_G_OFFSET);
5627 mmc->mmc_tx_multicol_g += eqos_reg_read(MMC_TXMULTICOL_G_OFFSET);
5628 mmc->mmc_tx_deferred += eqos_reg_read(MMC_TXDEFERRED_OFFSET);
5629 mmc->mmc_tx_latecol += eqos_reg_read(MMC_TXLATECOL_OFFSET);
5630 mmc->mmc_tx_exesscol += eqos_reg_read(MMC_TXEXESSCOL_OFFSET);
5631 mmc->mmc_tx_carrier_error += eqos_reg_read(MMC_TXCARRIERERROR_OFFSET);
5632 mmc->mmc_tx_octetcount_g += eqos_reg_read(MMC_TXOCTETCOUNT_G_OFFSET);
5633 mmc->mmc_tx_framecount_g += eqos_reg_read(MMC_TXPACKETSCOUNT_G_OFFSET);
5634 mmc->mmc_tx_excessdef += eqos_reg_read(MMC_TXEXCESSDEF_OFFSET);
5635 mmc->mmc_tx_pause_frame += eqos_reg_read(MMC_TXPAUSEPACKETS_OFFSET);
5636 mmc->mmc_tx_vlan_frame_g += eqos_reg_read(MMC_TXVLANPACKETS_G_OFFSET);
5637 mmc->mmc_tx_osize_frame_g += eqos_reg_read(MMC_TXOVERSIZE_G_OFFSET);
5639 /* MMC RX counter registers */
5640 mmc->mmc_rx_framecount_gb += eqos_reg_read(MMC_RXPACKETCOUNT_GB_OFFSET);
5641 mmc->mmc_rx_octetcount_gb += eqos_reg_read(MMC_RXOCTETCOUNT_GB_OFFSET);
5642 mmc->mmc_rx_octetcount_g += eqos_reg_read(MMC_RXOCTETCOUNT_G_OFFSET);
5643 mmc->mmc_rx_broadcastframe_g +=
5644 eqos_reg_read(MMC_RXBROADCASTPACKETS_G_OFFSET);
5645 mmc->mmc_rx_multicastframe_g +=
5646 eqos_reg_read(MMC_RXMULTICASTPACKETS_G_OFFSET);
5647 mmc->mmc_rx_crc_errror += eqos_reg_read(MMC_RXCRCERROR_OFFSET);
5648 mmc->mmc_rx_align_error += eqos_reg_read(MMC_RXALIGNMENTERROR_OFFSET);
5649 mmc->mmc_rx_run_error += eqos_reg_read(MMC_RXRUNTERROR_OFFSET);
5650 mmc->mmc_rx_jabber_error += eqos_reg_read(MMC_RXJABBERERROR_OFFSET);
5651 mmc->mmc_rx_undersize_g += eqos_reg_read(MMC_RXUNDERSIZE_G_OFFSET);
5652 mmc->mmc_rx_oversize_g += eqos_reg_read(MMC_RXOVERSIZE_G_OFFSET);
5653 mmc->mmc_rx_64_octets_gb += eqos_reg_read(MMC_RX64OCTETS_GB_OFFSET);
5654 mmc->mmc_rx_65_to_127_octets_gb +=
5655 eqos_reg_read(MMC_RX65TO127OCTETS_GB_OFFSET);
5656 mmc->mmc_rx_128_to_255_octets_gb +=
5657 eqos_reg_read(MMC_RX128TO255OCTETS_GB_OFFSET);
5658 mmc->mmc_rx_256_to_511_octets_gb +=
5659 eqos_reg_read(MMC_RX256TO511OCTETS_GB_OFFSET);
5660 mmc->mmc_rx_512_to_1023_octets_gb +=
5661 eqos_reg_read(MMC_RX512TO1023OCTETS_GB_OFFSET);
5662 mmc->mmc_rx_1024_to_max_octets_gb +=
5663 eqos_reg_read(MMC_RX1024TOMAXOCTETS_GB_OFFSET);
5664 mmc->mmc_rx_unicast_g += eqos_reg_read(MMC_RXUNICASTPACKETS_G_OFFSET);
5665 mmc->mmc_rx_length_error += eqos_reg_read(MMC_RXLENGTHERROR_OFFSET);
5666 mmc->mmc_rx_outofrangetype +=
5667 eqos_reg_read(MMC_RXOUTOFRANGETYPE_OFFSET);
5668 mmc->mmc_rx_pause_frames += eqos_reg_read(MMC_RXPAUSEPACKETS_OFFSET);
5669 mmc->mmc_rx_fifo_overflow += eqos_reg_read(MMC_RXFIFOOVERFLOW_OFFSET);
5670 mmc->mmc_rx_vlan_frames_gb +=
5671 eqos_reg_read(MMC_RXVLANPACKETS_GB_OFFSET);
5672 mmc->mmc_rx_watchdog_error += eqos_reg_read(MMC_RXWATCHDOGERROR_OFFSET);
5673 mmc->mmc_rx_receive_error += eqos_reg_read(MMC_RXRCVERROR_OFFSET);
5674 mmc->mmc_rx_ctrl_frames_g += eqos_reg_read(MMC_RXCTRLPACKETS_G_OFFSET);
5677 mmc->mmc_rx_ipc_intr_mask += eqos_reg_read(MMC_IPC_INTR_MASK_RX_OFFSET);
5678 mmc->mmc_rx_ipc_intr += eqos_reg_read(MMC_IPC_INTR_RX_OFFSET);
5681 mmc->mmc_rx_ipv4_gd += eqos_reg_read(MMC_RXIPV4_GD_PKTS_OFFSET);
5682 mmc->mmc_rx_ipv4_hderr += eqos_reg_read(MMC_RXIPV4_HDRERR_PKTS_OFFSET);
5683 mmc->mmc_rx_ipv4_nopay += eqos_reg_read(MMC_RXIPV4_NOPAY_PKTS_OFFSET);
5684 mmc->mmc_rx_ipv4_frag += eqos_reg_read(MMC_RXIPV4_FRAG_PKTS_OFFSET);
5685 mmc->mmc_rx_ipv4_udsbl += eqos_reg_read(MMC_RXIPV4_UBSBL_PKTS_OFFSET);
5688 mmc->mmc_rx_ipv6_gd += eqos_reg_read(MMC_RXIPV6_GD_PKTS_OFFSET);
5689 mmc->mmc_rx_ipv6_hderr += eqos_reg_read(MMC_RXIPV6_HDRERR_PKTS_OFFSET);
5690 mmc->mmc_rx_ipv6_nopay += eqos_reg_read(MMC_RXIPV6_NOPAY_PKTS_OFFSET);
5693 mmc->mmc_rx_udp_gd += eqos_reg_read(MMC_RXUDP_GD_PKTS_OFFSET);
5694 mmc->mmc_rx_udp_err += eqos_reg_read(MMC_RXUDP_ERR_PKTS_OFFSET);
5695 mmc->mmc_rx_tcp_gd += eqos_reg_read(MMC_RXTCP_GD_PKTS_OFFSET);
5696 mmc->mmc_rx_tcp_err += eqos_reg_read(MMC_RXTCP_ERR_PKTS_OFFSET);
5697 mmc->mmc_rx_icmp_gd += eqos_reg_read(MMC_RXICMP_GD_PKTS_OFFSET);
5698 mmc->mmc_rx_icmp_err += eqos_reg_read(MMC_RXICMP_ERR_PKTS_OFFSET);
5701 mmc->mmc_rx_ipv4_gd_octets +=
5702 eqos_reg_read(MMC_RXIPV4_GD_OCTETS_OFFSET);
5703 mmc->mmc_rx_ipv4_hderr_octets +=
5704 eqos_reg_read(MMC_RXIPV4_HDRERR_OCTETS_OFFSET);
5705 mmc->mmc_rx_ipv4_nopay_octets +=
5706 eqos_reg_read(MMC_RXIPV4_NOPAY_OCTETS_OFFSET);
5707 mmc->mmc_rx_ipv4_frag_octets +=
5708 eqos_reg_read(MMC_RXIPV4_FRAG_OCTETS_OFFSET);
5709 mmc->mmc_rx_ipv4_udsbl_octets +=
5710 eqos_reg_read(MMC_RXIPV4_UDSBL_OCTETS_OFFSET);
5713 mmc->mmc_rx_ipv6_gd_octets +=
5714 eqos_reg_read(MMC_RXIPV6_GD_OCTETS_OFFSET);
5715 mmc->mmc_rx_ipv6_hderr_octets +=
5716 eqos_reg_read(MMC_RXIPV6_HDRERR_OCTETS_OFFSET);
5717 mmc->mmc_rx_ipv6_nopay_octets +=
5718 eqos_reg_read(MMC_RXIPV6_NOPAY_OCTETS_OFFSET);
5721 mmc->mmc_rx_udp_gd_octets += eqos_reg_read(MMC_RXUDP_GD_OCTETS_OFFSET);
5722 mmc->mmc_rx_udp_err_octets +=
5723 eqos_reg_read(MMC_RXUDP_ERR_OCTETS_OFFSET);
5724 mmc->mmc_rx_tcp_gd_octets += eqos_reg_read(MMC_RXTCP_GD_OCTETS_OFFSET);
5725 mmc->mmc_rx_tcp_err_octets +=
5726 eqos_reg_read(MMC_RXTCP_ERR_OCTETS_OFFSET);
5727 mmc->mmc_rx_icmp_gd_octets +=
5728 eqos_reg_read(MMC_RXICMP_GD_OCTETS_OFFSET);
5729 mmc->mmc_rx_icmp_err_octets +=
5730 eqos_reg_read(MMC_RXICMP_ERR_OCTETS_OFFSET);
5732 pr_debug("<--eqos_mmc_read\n");
5735 static const struct net_device_ops eqos_netdev_ops = {
5736 .ndo_open = eqos_open,
5737 .ndo_stop = eqos_close,
5738 .ndo_start_xmit = eqos_start_xmit,
5739 .ndo_get_stats = eqos_get_stats,
5740 .ndo_set_rx_mode = eqos_set_rx_mode,
5741 .ndo_set_features = eqos_set_features,
5742 .ndo_do_ioctl = eqos_ioctl,
5743 .ndo_change_mtu = eqos_change_mtu,
5744 #ifdef EQOS_QUEUE_SELECT_ALGO
5745 .ndo_select_queue = eqos_select_queue,
5747 .ndo_vlan_rx_add_vid = eqos_vlan_rx_add_vid,
5748 .ndo_vlan_rx_kill_vid = eqos_vlan_rx_kill_vid,
5749 .ndo_set_mac_address = eqos_set_mac_address,
5752 struct net_device_ops *eqos_get_netdev_ops(void)
5754 return (struct net_device_ops *)&eqos_netdev_ops;
5758 static void eqos_disable_all_irqs(struct eqos_prv_data *pdata)
5760 struct hw_if_struct *hw_if = &pdata->hw_if;
5763 pr_debug("-->%s()\n", __func__);
5765 for (i = 0; i < pdata->num_chans; i++)
5766 hw_if->disable_chan_interrupts(i, pdata);
5768 /* disable mac interrupts */
5771 /* ensure irqs are not executing */
5772 synchronize_irq(pdata->common_irq);
5773 for (i = 0; i < pdata->num_chans; i++) {
5774 if (pdata->rx_irq_alloc_mask & (1 << i))
5775 synchronize_irq(pdata->rx_irqs[i]);
5776 if (pdata->tx_irq_alloc_mask & (1 << i))
5777 synchronize_irq(pdata->tx_irqs[i]);
5780 pr_debug("<--%s()\n", __func__);
5783 void eqos_stop_dev(struct eqos_prv_data *pdata)
5785 struct hw_if_struct *hw_if = &pdata->hw_if;
5786 struct desc_if_struct *desc_if = &pdata->desc_if;
5789 pr_debug("-->%s()\n", __func__);
5791 #ifdef CONFIG_TEGRA_PTP_NOTIFIER
5792 /* Unregister broadcasting MAC timestamp to clients */
5793 tegra_unregister_hwtime_source();
5796 if (pdata->phydev && pdata->phydev->drv &&
5797 pdata->phydev->drv->low_power_mode) {
5798 pdata->phydev->drv->low_power_mode(pdata->phydev, true);
5799 if (!pdata->suspended)
5800 phy_stop_interrupts(pdata->phydev);
5801 } else if (pdata->phydev) {
5803 phy_stop(pdata->phydev);
5804 gpio_set_value(pdata->phy_reset_gpio, 0);
5807 /* Stop the PHY state machine */
5809 phy_stop_machine(pdata->phydev);
5811 /* turn off sources of data into dev */
5812 netif_tx_disable(pdata->dev);
5814 hw_if->stop_mac_rx();
5815 eqos_disable_all_irqs(pdata);
5816 eqos_all_ch_napi_disable(pdata);
5818 /* Ensure no tx thread is running. We have
5819 * already prevented any new callers of or tx thread above.
5820 * Below will allow any remaining tx threads to complete.
5822 for (i = 0; i < pdata->num_chans; i++) {
5823 spin_lock(&pdata->chinfo[i].chan_tx_lock);
5824 spin_unlock(&pdata->chinfo[i].chan_tx_lock);
5828 eqos_stop_all_ch_tx_dma(pdata);
5830 /* disable MAC TX */
5831 hw_if->stop_mac_tx();
5834 eqos_stop_all_ch_rx_dma(pdata);
5836 del_timer_sync(&pdata->eee_ctrl_timer);
5838 /* return tx skbs */
5839 desc_if->tx_skb_free_mem(pdata, pdata->num_chans);
5842 desc_if->rx_skb_free_mem(pdata, pdata->num_chans);
5844 pr_debug("<--%s()\n", __func__);
5847 void eqos_start_dev(struct eqos_prv_data *pdata)
5849 struct hw_if_struct *hw_if = &pdata->hw_if;
5850 struct desc_if_struct *desc_if = &pdata->desc_if;
5852 pr_debug("-->%s()\n", __func__);
5854 if (pdata->phydev->drv->low_power_mode) {
5855 /* reset the PHY Broadcom PHY needs minimum of 2us delay */
5856 pr_debug("%s(): exit from iddq-lp mode\n", __func__);
5857 gpio_set_value(pdata->phy_reset_gpio, 0);
5858 usleep_range(10, 11);
5859 gpio_set_value(pdata->phy_reset_gpio, 1);
5860 pdata->phydev->drv->low_power_mode(pdata->phydev, false);
5861 } else if (!gpio_get_value(pdata->phy_reset_gpio))
5863 /* deassert phy reset */
5864 gpio_set_value(pdata->phy_reset_gpio, 1);
5867 /* issue CAR reset to device */
5868 hw_if->car_reset(pdata);
5869 hw_if->pad_calibrate(pdata);
5871 if (pdata->phydev->drv->low_power_mode) {
5872 if (pdata->suspended == 0 && netif_running(pdata->dev))
5873 phy_start_interrupts(pdata->phydev);
5876 /* default configuration */
5877 eqos_default_common_confs(pdata);
5878 eqos_default_tx_confs(pdata);
5879 eqos_default_rx_confs(pdata);
5880 eqos_configure_rx_fun_ptr(pdata);
5882 desc_if->wrapper_tx_desc_init(pdata);
5883 desc_if->wrapper_rx_desc_init(pdata);
5885 eqos_napi_enable_mq(pdata);
5887 eqos_set_rx_mode(pdata->dev);
5888 eqos_mmc_setup(pdata);
5890 /* initializes MAC and DMA */
5893 MAC_1US_TIC_WR(pdata->csr_clock_speed - 1);
5895 if (pdata->hw_feat.pcs_sel)
5896 hw_if->control_an(1, 0);
5898 if (pdata->phydev) {
5901 pdata->oldduplex = -1;
5903 phy_start(pdata->phydev);
5904 phy_start_machine(pdata->phydev);
5906 #ifdef EQOS_ENABLE_EEE
5908 pdata->eee_enabled = eqos_eee_init(pdata);
5910 pdata->eee_enabled = false;
5912 pdata->eee_enabled = false;
5916 netif_tx_start_all_queues(pdata->dev);
5918 pr_debug("<--%s()\n", __func__);
5921 void eqos_iso_work(struct work_struct *work)
5923 struct eqos_prv_data *pdata =
5924 container_of(work, struct eqos_prv_data, iso_work);
5925 struct phy_device *phydev = pdata->phydev;
5926 struct eqos_cfg *pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg;
5930 pr_debug("-->%s()\n", __func__);
5932 if (pdt_cfg->eth_iso_enable) {
5934 iso_bw = pdata->dt_cfg.iso_bw;
5938 ret = tegra_isomgr_reserve(pdata->isomgr_handle, iso_bw, 0);
5940 dev_err(&pdata->pdev->dev,
5941 "EQOS ISO BW %d reservation failed with %d\n",
5946 ret = tegra_isomgr_realize(pdata->isomgr_handle);
5948 dev_err(&pdata->pdev->dev,
5949 "EQOS ISO BW realize failed with %d\n", ret);
5952 pr_debug("<--%s()\n", __func__);
5954 void eqos_fbe_work(struct work_struct *work)
5956 struct eqos_prv_data *pdata =
5957 container_of(work, struct eqos_prv_data, fbe_work);
5961 pr_debug("-->%s()\n", __func__);
5963 mutex_lock(&pdata->hw_change_lock);
5964 if (pdata->hw_stopped)
5968 while (pdata->fbe_chan_mask) {
5969 if (pdata->fbe_chan_mask & 1) {
5970 DMA_SR_RD(i, dma_sr_reg);
5972 dev_err(&pdata->pdev->dev,
5973 "Fatal Bus Error on chan %d, SRreg=0x%.8x\n",
5976 pdata->fbe_chan_mask >>= 1;
5979 eqos_stop_dev(pdata);
5980 eqos_start_dev(pdata);
5982 mutex_unlock(&pdata->hw_change_lock);
5984 pr_debug("<--%s()\n", __func__);