1 /* bnx2x_stats.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include "bnx2x_stats.h"
21 #include "bnx2x_cmn.h"
27 * General service functions
30 static inline long bnx2x_hilo(u32 *hiref)
32 u32 lo = *(hiref + 1);
33 #if (BITS_PER_LONG == 64)
36 return HILO_U64(hi, lo);
42 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
46 /* 'newest' convention - shmem2 cotains the size of the port stats */
47 if (SHMEM2_HAS(bp, sizeof_port_stats)) {
48 u32 size = SHMEM2_RD(bp, sizeof_port_stats);
52 /* prevent newer BC from causing buffer overflow */
53 if (res > sizeof(struct host_port_stats))
54 res = sizeof(struct host_port_stats);
57 /* Older convention - all BCs support the port stats' fields up until
58 * the 'not_used' field
61 res = offsetof(struct host_port_stats, not_used) + 4;
63 /* if PFC stats are supported by the MFW, DMA them as well */
64 if (bp->flags & BC_SUPPORTS_PFC_STATS) {
65 res += offsetof(struct host_port_stats,
67 offsetof(struct host_port_stats,
68 pfc_frames_tx_hi) + 4 ;
74 WARN_ON(res > 2 * DMAE_LEN32_RD_MAX);
79 * Init service functions
82 /* Post the next statistics ramrod. Protect it with the spin in
83 * order to ensure the strict order between statistics ramrods
84 * (each ramrod has a sequence number passed in a
85 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
88 static void bnx2x_storm_stats_post(struct bnx2x *bp)
90 if (!bp->stats_pending) {
93 spin_lock_bh(&bp->stats_lock);
95 if (bp->stats_pending) {
96 spin_unlock_bh(&bp->stats_lock);
100 bp->fw_stats_req->hdr.drv_stats_counter =
101 cpu_to_le16(bp->stats_counter++);
103 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
104 bp->fw_stats_req->hdr.drv_stats_counter);
108 /* send FW stats ramrod */
109 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
110 U64_HI(bp->fw_stats_req_mapping),
111 U64_LO(bp->fw_stats_req_mapping),
112 NONE_CONNECTION_TYPE);
114 bp->stats_pending = 1;
116 spin_unlock_bh(&bp->stats_lock);
120 static void bnx2x_hw_stats_post(struct bnx2x *bp)
122 struct dmae_command *dmae = &bp->stats_dmae;
123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
125 *stats_comp = DMAE_COMP_VAL;
126 if (CHIP_REV_IS_SLOW(bp))
129 /* Update MCP's statistics if possible */
131 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
132 sizeof(bp->func_stats));
135 if (bp->executer_idx) {
136 int loader_idx = PMF_DMAE_C(bp);
137 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
138 true, DMAE_COMP_GRC);
139 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
141 memset(dmae, 0, sizeof(struct dmae_command));
142 dmae->opcode = opcode;
143 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
144 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
145 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
146 sizeof(struct dmae_command) *
147 (loader_idx + 1)) >> 2;
148 dmae->dst_addr_hi = 0;
149 dmae->len = sizeof(struct dmae_command) >> 2;
152 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
153 dmae->comp_addr_hi = 0;
157 bnx2x_post_dmae(bp, dmae, loader_idx);
159 } else if (bp->func_stx) {
161 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
165 static int bnx2x_stats_comp(struct bnx2x *bp)
167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
171 while (*stats_comp != DMAE_COMP_VAL) {
173 BNX2X_ERR("timeout waiting for stats finished\n");
177 usleep_range(1000, 1000);
183 * Statistics service functions
186 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
188 struct dmae_command *dmae;
190 int loader_idx = PMF_DMAE_C(bp);
191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
194 if (!bp->port.pmf || !bp->port.port_stx) {
199 bp->executer_idx = 0;
201 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
203 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
204 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
205 dmae->src_addr_lo = bp->port.port_stx >> 2;
206 dmae->src_addr_hi = 0;
207 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
208 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
209 dmae->len = DMAE_LEN32_RD_MAX;
210 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
211 dmae->comp_addr_hi = 0;
214 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
215 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
216 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
217 dmae->src_addr_hi = 0;
218 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
219 DMAE_LEN32_RD_MAX * 4);
220 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
221 DMAE_LEN32_RD_MAX * 4);
222 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
224 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
225 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
226 dmae->comp_val = DMAE_COMP_VAL;
229 bnx2x_hw_stats_post(bp);
230 bnx2x_stats_comp(bp);
233 static void bnx2x_port_stats_init(struct bnx2x *bp)
235 struct dmae_command *dmae;
236 int port = BP_PORT(bp);
238 int loader_idx = PMF_DMAE_C(bp);
240 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
243 if (!bp->link_vars.link_up || !bp->port.pmf) {
248 bp->executer_idx = 0;
251 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
252 true, DMAE_COMP_GRC);
254 if (bp->port.port_stx) {
256 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
257 dmae->opcode = opcode;
258 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
259 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
260 dmae->dst_addr_lo = bp->port.port_stx >> 2;
261 dmae->dst_addr_hi = 0;
262 dmae->len = bnx2x_get_port_stats_dma_len(bp);
263 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
264 dmae->comp_addr_hi = 0;
270 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
271 dmae->opcode = opcode;
272 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
273 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
274 dmae->dst_addr_lo = bp->func_stx >> 2;
275 dmae->dst_addr_hi = 0;
276 dmae->len = sizeof(struct host_func_stats) >> 2;
277 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
278 dmae->comp_addr_hi = 0;
283 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
284 true, DMAE_COMP_GRC);
286 /* EMAC is special */
287 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
288 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
290 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
291 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
292 dmae->opcode = opcode;
293 dmae->src_addr_lo = (mac_addr +
294 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
298 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
299 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
300 dmae->comp_addr_hi = 0;
303 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
305 dmae->opcode = opcode;
306 dmae->src_addr_lo = (mac_addr +
307 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
308 dmae->src_addr_hi = 0;
309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
310 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
311 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
312 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
314 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
315 dmae->comp_addr_hi = 0;
318 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
320 dmae->opcode = opcode;
321 dmae->src_addr_lo = (mac_addr +
322 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
323 dmae->src_addr_hi = 0;
324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
325 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
327 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
328 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
330 dmae->comp_addr_hi = 0;
333 u32 tx_src_addr_lo, rx_src_addr_lo;
336 /* configure the params according to MAC type */
337 switch (bp->link_vars.mac_type) {
339 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
340 NIG_REG_INGRESS_BMAC0_MEM);
342 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
343 BIGMAC_REGISTER_TX_STAT_GTBYT */
344 if (CHIP_IS_E1x(bp)) {
345 tx_src_addr_lo = (mac_addr +
346 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
347 tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
348 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
349 rx_src_addr_lo = (mac_addr +
350 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
351 rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
352 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
354 tx_src_addr_lo = (mac_addr +
355 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
356 tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
357 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
358 rx_src_addr_lo = (mac_addr +
359 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
360 rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
361 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
365 case MAC_TYPE_UMAC: /* handled by MSTAT */
366 case MAC_TYPE_XMAC: /* handled by MSTAT */
368 mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
369 tx_src_addr_lo = (mac_addr +
370 MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
371 rx_src_addr_lo = (mac_addr +
372 MSTAT_REG_RX_STAT_GR64_LO) >> 2;
373 tx_len = sizeof(bp->slowpath->
374 mac_stats.mstat_stats.stats_tx) >> 2;
375 rx_len = sizeof(bp->slowpath->
376 mac_stats.mstat_stats.stats_rx) >> 2;
381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
382 dmae->opcode = opcode;
383 dmae->src_addr_lo = tx_src_addr_lo;
384 dmae->src_addr_hi = 0;
386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
387 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
388 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
389 dmae->comp_addr_hi = 0;
393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
394 dmae->opcode = opcode;
395 dmae->src_addr_hi = 0;
396 dmae->src_addr_lo = rx_src_addr_lo;
398 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
400 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
402 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
403 dmae->comp_addr_hi = 0;
408 if (!CHIP_IS_E3(bp)) {
409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
410 dmae->opcode = opcode;
411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
412 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
413 dmae->src_addr_hi = 0;
414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
415 offsetof(struct nig_stats, egress_mac_pkt0_lo));
416 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
417 offsetof(struct nig_stats, egress_mac_pkt0_lo));
418 dmae->len = (2*sizeof(u32)) >> 2;
419 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
420 dmae->comp_addr_hi = 0;
423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
424 dmae->opcode = opcode;
425 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
426 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
427 dmae->src_addr_hi = 0;
428 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
429 offsetof(struct nig_stats, egress_mac_pkt1_lo));
430 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
431 offsetof(struct nig_stats, egress_mac_pkt1_lo));
432 dmae->len = (2*sizeof(u32)) >> 2;
433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
434 dmae->comp_addr_hi = 0;
438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
439 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
440 true, DMAE_COMP_PCI);
441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
442 NIG_REG_STAT0_BRB_DISCARD) >> 2;
443 dmae->src_addr_hi = 0;
444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
446 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
448 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
449 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
450 dmae->comp_val = DMAE_COMP_VAL;
455 static void bnx2x_func_stats_init(struct bnx2x *bp)
457 struct dmae_command *dmae = &bp->stats_dmae;
458 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
466 bp->executer_idx = 0;
467 memset(dmae, 0, sizeof(struct dmae_command));
469 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
470 true, DMAE_COMP_PCI);
471 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
472 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
473 dmae->dst_addr_lo = bp->func_stx >> 2;
474 dmae->dst_addr_hi = 0;
475 dmae->len = sizeof(struct host_func_stats) >> 2;
476 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
477 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
478 dmae->comp_val = DMAE_COMP_VAL;
483 static void bnx2x_stats_start(struct bnx2x *bp)
486 bnx2x_port_stats_init(bp);
488 else if (bp->func_stx)
489 bnx2x_func_stats_init(bp);
491 bnx2x_hw_stats_post(bp);
492 bnx2x_storm_stats_post(bp);
495 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
497 bnx2x_stats_comp(bp);
498 bnx2x_stats_pmf_update(bp);
499 bnx2x_stats_start(bp);
502 static void bnx2x_stats_restart(struct bnx2x *bp)
504 bnx2x_stats_comp(bp);
505 bnx2x_stats_start(bp);
508 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
510 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
511 struct bnx2x_eth_stats *estats = &bp->eth_stats;
517 if (CHIP_IS_E1x(bp)) {
518 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
520 /* the macros below will use "bmac1_stats" type */
521 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
522 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
523 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
524 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
525 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
526 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
527 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
528 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
529 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
531 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
532 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
533 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
534 UPDATE_STAT64(tx_stat_gt127,
535 tx_stat_etherstatspkts65octetsto127octets);
536 UPDATE_STAT64(tx_stat_gt255,
537 tx_stat_etherstatspkts128octetsto255octets);
538 UPDATE_STAT64(tx_stat_gt511,
539 tx_stat_etherstatspkts256octetsto511octets);
540 UPDATE_STAT64(tx_stat_gt1023,
541 tx_stat_etherstatspkts512octetsto1023octets);
542 UPDATE_STAT64(tx_stat_gt1518,
543 tx_stat_etherstatspkts1024octetsto1522octets);
544 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
545 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
546 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
547 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
548 UPDATE_STAT64(tx_stat_gterr,
549 tx_stat_dot3statsinternalmactransmiterrors);
550 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
553 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
555 /* the macros below will use "bmac2_stats" type */
556 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
557 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
558 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
559 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
560 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
561 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
562 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
564 UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
566 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
567 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
568 UPDATE_STAT64(tx_stat_gt127,
569 tx_stat_etherstatspkts65octetsto127octets);
570 UPDATE_STAT64(tx_stat_gt255,
571 tx_stat_etherstatspkts128octetsto255octets);
572 UPDATE_STAT64(tx_stat_gt511,
573 tx_stat_etherstatspkts256octetsto511octets);
574 UPDATE_STAT64(tx_stat_gt1023,
575 tx_stat_etherstatspkts512octetsto1023octets);
576 UPDATE_STAT64(tx_stat_gt1518,
577 tx_stat_etherstatspkts1024octetsto1522octets);
578 UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
579 UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
580 UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
581 UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
582 UPDATE_STAT64(tx_stat_gterr,
583 tx_stat_dot3statsinternalmactransmiterrors);
584 UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
586 /* collect PFC stats */
587 pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
588 pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
590 pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
591 pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
594 estats->pause_frames_received_hi =
595 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
596 estats->pause_frames_received_lo =
597 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
599 estats->pause_frames_sent_hi =
600 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
601 estats->pause_frames_sent_lo =
602 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
604 estats->pfc_frames_received_hi =
605 pstats->pfc_frames_rx_hi;
606 estats->pfc_frames_received_lo =
607 pstats->pfc_frames_rx_lo;
608 estats->pfc_frames_sent_hi =
609 pstats->pfc_frames_tx_hi;
610 estats->pfc_frames_sent_lo =
611 pstats->pfc_frames_tx_lo;
614 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
616 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
617 struct bnx2x_eth_stats *estats = &bp->eth_stats;
619 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
621 ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
622 ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
623 ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
624 ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
625 ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
626 ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
627 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
628 ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
629 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
630 ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
632 /* collect pfc stats */
633 ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
634 pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
635 ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
636 pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
638 ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
639 ADD_STAT64(stats_tx.tx_gt127,
640 tx_stat_etherstatspkts65octetsto127octets);
641 ADD_STAT64(stats_tx.tx_gt255,
642 tx_stat_etherstatspkts128octetsto255octets);
643 ADD_STAT64(stats_tx.tx_gt511,
644 tx_stat_etherstatspkts256octetsto511octets);
645 ADD_STAT64(stats_tx.tx_gt1023,
646 tx_stat_etherstatspkts512octetsto1023octets);
647 ADD_STAT64(stats_tx.tx_gt1518,
648 tx_stat_etherstatspkts1024octetsto1522octets);
649 ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
651 ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
652 ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
653 ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
655 ADD_STAT64(stats_tx.tx_gterr,
656 tx_stat_dot3statsinternalmactransmiterrors);
657 ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
659 estats->etherstatspkts1024octetsto1522octets_hi =
660 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
661 estats->etherstatspkts1024octetsto1522octets_lo =
662 pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
664 estats->etherstatspktsover1522octets_hi =
665 pstats->mac_stx[1].tx_stat_mac_2047_hi;
666 estats->etherstatspktsover1522octets_lo =
667 pstats->mac_stx[1].tx_stat_mac_2047_lo;
669 ADD_64(estats->etherstatspktsover1522octets_hi,
670 pstats->mac_stx[1].tx_stat_mac_4095_hi,
671 estats->etherstatspktsover1522octets_lo,
672 pstats->mac_stx[1].tx_stat_mac_4095_lo);
674 ADD_64(estats->etherstatspktsover1522octets_hi,
675 pstats->mac_stx[1].tx_stat_mac_9216_hi,
676 estats->etherstatspktsover1522octets_lo,
677 pstats->mac_stx[1].tx_stat_mac_9216_lo);
679 ADD_64(estats->etherstatspktsover1522octets_hi,
680 pstats->mac_stx[1].tx_stat_mac_16383_hi,
681 estats->etherstatspktsover1522octets_lo,
682 pstats->mac_stx[1].tx_stat_mac_16383_lo);
684 estats->pause_frames_received_hi =
685 pstats->mac_stx[1].rx_stat_mac_xpf_hi;
686 estats->pause_frames_received_lo =
687 pstats->mac_stx[1].rx_stat_mac_xpf_lo;
689 estats->pause_frames_sent_hi =
690 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
691 estats->pause_frames_sent_lo =
692 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
694 estats->pfc_frames_received_hi =
695 pstats->pfc_frames_rx_hi;
696 estats->pfc_frames_received_lo =
697 pstats->pfc_frames_rx_lo;
698 estats->pfc_frames_sent_hi =
699 pstats->pfc_frames_tx_hi;
700 estats->pfc_frames_sent_lo =
701 pstats->pfc_frames_tx_lo;
704 static void bnx2x_emac_stats_update(struct bnx2x *bp)
706 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
707 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
708 struct bnx2x_eth_stats *estats = &bp->eth_stats;
710 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
711 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
712 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
713 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
714 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
715 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
716 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
717 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
718 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
719 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
720 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
721 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
722 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
723 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
724 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
725 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
726 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
727 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
728 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
729 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
730 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
731 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
732 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
733 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
734 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
735 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
736 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
737 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
738 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
739 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
740 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
742 estats->pause_frames_received_hi =
743 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
744 estats->pause_frames_received_lo =
745 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
746 ADD_64(estats->pause_frames_received_hi,
747 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
748 estats->pause_frames_received_lo,
749 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
751 estats->pause_frames_sent_hi =
752 pstats->mac_stx[1].tx_stat_outxonsent_hi;
753 estats->pause_frames_sent_lo =
754 pstats->mac_stx[1].tx_stat_outxonsent_lo;
755 ADD_64(estats->pause_frames_sent_hi,
756 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
757 estats->pause_frames_sent_lo,
758 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
761 static int bnx2x_hw_stats_update(struct bnx2x *bp)
763 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
764 struct nig_stats *old = &(bp->port.old_nig_stats);
765 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
766 struct bnx2x_eth_stats *estats = &bp->eth_stats;
772 switch (bp->link_vars.mac_type) {
774 bnx2x_bmac_stats_update(bp);
778 bnx2x_emac_stats_update(bp);
783 bnx2x_mstat_stats_update(bp);
786 case MAC_TYPE_NONE: /* unreached */
788 "stats updated by DMAE but no MAC active\n");
791 default: /* unreached */
792 BNX2X_ERR("Unknown MAC type\n");
795 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
796 new->brb_discard - old->brb_discard);
797 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
798 new->brb_truncate - old->brb_truncate);
800 if (!CHIP_IS_E3(bp)) {
801 UPDATE_STAT64_NIG(egress_mac_pkt0,
802 etherstatspkts1024octetsto1522octets);
803 UPDATE_STAT64_NIG(egress_mac_pkt1,
804 etherstatspktsover1522octets);
807 memcpy(old, new, sizeof(struct nig_stats));
809 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
810 sizeof(struct mac_stx));
811 estats->brb_drop_hi = pstats->brb_drop_hi;
812 estats->brb_drop_lo = pstats->brb_drop_lo;
814 pstats->host_port_stats_counter++;
816 if (CHIP_IS_E3(bp)) {
817 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
818 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
819 estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
824 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
825 if (nig_timer_max != estats->nig_timer_max) {
826 estats->nig_timer_max = nig_timer_max;
827 BNX2X_ERR("NIG timer max (%u)\n",
828 estats->nig_timer_max);
835 static int bnx2x_storm_stats_update(struct bnx2x *bp)
837 struct tstorm_per_port_stats *tport =
838 &bp->fw_stats_data->port.tstorm_port_statistics;
839 struct tstorm_per_pf_stats *tfunc =
840 &bp->fw_stats_data->pf.tstorm_pf_statistics;
841 struct host_func_stats *fstats = &bp->func_stats;
842 struct bnx2x_eth_stats *estats = &bp->eth_stats;
843 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
844 struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
846 u16 cur_stats_counter;
848 /* Make sure we use the value of the counter
849 * used for sending the last stats ramrod.
851 spin_lock_bh(&bp->stats_lock);
852 cur_stats_counter = bp->stats_counter - 1;
853 spin_unlock_bh(&bp->stats_lock);
855 /* are storm stats valid? */
856 if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
858 "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
859 le16_to_cpu(counters->xstats_counter), bp->stats_counter);
863 if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
865 "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
866 le16_to_cpu(counters->ustats_counter), bp->stats_counter);
870 if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
872 "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
873 le16_to_cpu(counters->cstats_counter), bp->stats_counter);
877 if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
879 "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
880 le16_to_cpu(counters->tstats_counter), bp->stats_counter);
884 estats->error_bytes_received_hi = 0;
885 estats->error_bytes_received_lo = 0;
887 for_each_eth_queue(bp, i) {
888 struct bnx2x_fastpath *fp = &bp->fp[i];
889 struct tstorm_per_queue_stats *tclient =
890 &bp->fw_stats_data->queue_stats[i].
891 tstorm_queue_statistics;
892 struct tstorm_per_queue_stats *old_tclient =
893 &bnx2x_fp_stats(bp, fp)->old_tclient;
894 struct ustorm_per_queue_stats *uclient =
895 &bp->fw_stats_data->queue_stats[i].
896 ustorm_queue_statistics;
897 struct ustorm_per_queue_stats *old_uclient =
898 &bnx2x_fp_stats(bp, fp)->old_uclient;
899 struct xstorm_per_queue_stats *xclient =
900 &bp->fw_stats_data->queue_stats[i].
901 xstorm_queue_statistics;
902 struct xstorm_per_queue_stats *old_xclient =
903 &bnx2x_fp_stats(bp, fp)->old_xclient;
904 struct bnx2x_eth_q_stats *qstats =
905 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
906 struct bnx2x_eth_q_stats_old *qstats_old =
907 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
911 DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
912 i, xclient->ucast_pkts_sent,
913 xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
915 DP(BNX2X_MSG_STATS, "---------------\n");
917 UPDATE_QSTAT(tclient->rcv_bcast_bytes,
918 total_broadcast_bytes_received);
919 UPDATE_QSTAT(tclient->rcv_mcast_bytes,
920 total_multicast_bytes_received);
921 UPDATE_QSTAT(tclient->rcv_ucast_bytes,
922 total_unicast_bytes_received);
925 * sum to total_bytes_received all
926 * unicast/multicast/broadcast
928 qstats->total_bytes_received_hi =
929 qstats->total_broadcast_bytes_received_hi;
930 qstats->total_bytes_received_lo =
931 qstats->total_broadcast_bytes_received_lo;
933 ADD_64(qstats->total_bytes_received_hi,
934 qstats->total_multicast_bytes_received_hi,
935 qstats->total_bytes_received_lo,
936 qstats->total_multicast_bytes_received_lo);
938 ADD_64(qstats->total_bytes_received_hi,
939 qstats->total_unicast_bytes_received_hi,
940 qstats->total_bytes_received_lo,
941 qstats->total_unicast_bytes_received_lo);
943 qstats->valid_bytes_received_hi =
944 qstats->total_bytes_received_hi;
945 qstats->valid_bytes_received_lo =
946 qstats->total_bytes_received_lo;
949 UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
950 total_unicast_packets_received);
951 UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
952 total_multicast_packets_received);
953 UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
954 total_broadcast_packets_received);
955 UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
956 etherstatsoverrsizepkts);
957 UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
959 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
960 total_unicast_packets_received);
961 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
962 total_multicast_packets_received);
963 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
964 total_broadcast_packets_received);
965 UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
966 UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
967 UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
969 UPDATE_QSTAT(xclient->bcast_bytes_sent,
970 total_broadcast_bytes_transmitted);
971 UPDATE_QSTAT(xclient->mcast_bytes_sent,
972 total_multicast_bytes_transmitted);
973 UPDATE_QSTAT(xclient->ucast_bytes_sent,
974 total_unicast_bytes_transmitted);
977 * sum to total_bytes_transmitted all
978 * unicast/multicast/broadcast
980 qstats->total_bytes_transmitted_hi =
981 qstats->total_unicast_bytes_transmitted_hi;
982 qstats->total_bytes_transmitted_lo =
983 qstats->total_unicast_bytes_transmitted_lo;
985 ADD_64(qstats->total_bytes_transmitted_hi,
986 qstats->total_broadcast_bytes_transmitted_hi,
987 qstats->total_bytes_transmitted_lo,
988 qstats->total_broadcast_bytes_transmitted_lo);
990 ADD_64(qstats->total_bytes_transmitted_hi,
991 qstats->total_multicast_bytes_transmitted_hi,
992 qstats->total_bytes_transmitted_lo,
993 qstats->total_multicast_bytes_transmitted_lo);
995 UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
996 total_unicast_packets_transmitted);
997 UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
998 total_multicast_packets_transmitted);
999 UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
1000 total_broadcast_packets_transmitted);
1002 UPDATE_EXTEND_TSTAT(checksum_discard,
1003 total_packets_received_checksum_discarded);
1004 UPDATE_EXTEND_TSTAT(ttl0_discard,
1005 total_packets_received_ttl0_discarded);
1007 UPDATE_EXTEND_XSTAT(error_drop_pkts,
1008 total_transmitted_dropped_packets_error);
1010 /* TPA aggregations completed */
1011 UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
1012 /* Number of network frames aggregated by TPA */
1013 UPDATE_EXTEND_E_USTAT(coalesced_pkts,
1014 total_tpa_aggregated_frames);
1015 /* Total number of bytes in completed TPA aggregations */
1016 UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
1018 UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
1020 UPDATE_FSTAT_QSTAT(total_bytes_received);
1021 UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
1022 UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
1023 UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
1024 UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
1025 UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
1026 UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
1027 UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
1028 UPDATE_FSTAT_QSTAT(valid_bytes_received);
1031 ADD_64(estats->total_bytes_received_hi,
1032 estats->rx_stat_ifhcinbadoctets_hi,
1033 estats->total_bytes_received_lo,
1034 estats->rx_stat_ifhcinbadoctets_lo);
1036 ADD_64(estats->total_bytes_received_hi,
1037 le32_to_cpu(tfunc->rcv_error_bytes.hi),
1038 estats->total_bytes_received_lo,
1039 le32_to_cpu(tfunc->rcv_error_bytes.lo));
1041 ADD_64(estats->error_bytes_received_hi,
1042 le32_to_cpu(tfunc->rcv_error_bytes.hi),
1043 estats->error_bytes_received_lo,
1044 le32_to_cpu(tfunc->rcv_error_bytes.lo));
1046 UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
1048 ADD_64(estats->error_bytes_received_hi,
1049 estats->rx_stat_ifhcinbadoctets_hi,
1050 estats->error_bytes_received_lo,
1051 estats->rx_stat_ifhcinbadoctets_lo);
1054 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1055 UPDATE_FW_STAT(mac_filter_discard);
1056 UPDATE_FW_STAT(mf_tag_discard);
1057 UPDATE_FW_STAT(brb_truncate_discard);
1058 UPDATE_FW_STAT(mac_discard);
1061 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
1063 bp->stats_pending = 0;
1068 static void bnx2x_net_stats_update(struct bnx2x *bp)
1070 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1071 struct net_device_stats *nstats = &bp->dev->stats;
1075 nstats->rx_packets =
1076 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
1077 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
1078 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
1080 nstats->tx_packets =
1081 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
1082 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
1083 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
1085 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
1087 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
1089 tmp = estats->mac_discard;
1090 for_each_rx_queue(bp, i) {
1091 struct tstorm_per_queue_stats *old_tclient =
1092 &bp->fp_stats[i].old_tclient;
1093 tmp += le32_to_cpu(old_tclient->checksum_discard);
1095 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1097 nstats->tx_dropped = 0;
1100 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
1102 nstats->collisions =
1103 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
1105 nstats->rx_length_errors =
1106 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
1107 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
1108 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
1109 bnx2x_hilo(&estats->brb_truncate_hi);
1110 nstats->rx_crc_errors =
1111 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
1112 nstats->rx_frame_errors =
1113 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
1114 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
1115 nstats->rx_missed_errors = 0;
1117 nstats->rx_errors = nstats->rx_length_errors +
1118 nstats->rx_over_errors +
1119 nstats->rx_crc_errors +
1120 nstats->rx_frame_errors +
1121 nstats->rx_fifo_errors +
1122 nstats->rx_missed_errors;
1124 nstats->tx_aborted_errors =
1125 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
1126 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
1127 nstats->tx_carrier_errors =
1128 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
1129 nstats->tx_fifo_errors = 0;
1130 nstats->tx_heartbeat_errors = 0;
1131 nstats->tx_window_errors = 0;
1133 nstats->tx_errors = nstats->tx_aborted_errors +
1134 nstats->tx_carrier_errors +
1135 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
1138 static void bnx2x_drv_stats_update(struct bnx2x *bp)
1140 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1143 for_each_queue(bp, i) {
1144 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1145 struct bnx2x_eth_q_stats_old *qstats_old =
1146 &bp->fp_stats[i].eth_q_stats_old;
1148 UPDATE_ESTAT_QSTAT(driver_xoff);
1149 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1150 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1151 UPDATE_ESTAT_QSTAT(hw_csum_err);
1155 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
1159 if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
1160 val = SHMEM2_RD(bp, edebug_driver_if[1]);
1162 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
1169 static void bnx2x_stats_update(struct bnx2x *bp)
1171 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1173 if (bnx2x_edebug_stats_stopped(bp))
1176 if (*stats_comp != DMAE_COMP_VAL)
1180 bnx2x_hw_stats_update(bp);
1182 if (bnx2x_storm_stats_update(bp)) {
1183 if (bp->stats_pending++ == 3) {
1184 BNX2X_ERR("storm stats were not updated for 3 times\n");
1190 bnx2x_net_stats_update(bp);
1191 bnx2x_drv_stats_update(bp);
1193 if (netif_msg_timer(bp)) {
1194 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1196 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
1197 estats->brb_drop_lo, estats->brb_truncate_lo);
1200 bnx2x_hw_stats_post(bp);
1201 bnx2x_storm_stats_post(bp);
1204 static void bnx2x_port_stats_stop(struct bnx2x *bp)
1206 struct dmae_command *dmae;
1208 int loader_idx = PMF_DMAE_C(bp);
1209 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1211 bp->executer_idx = 0;
1213 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1215 if (bp->port.port_stx) {
1217 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1219 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1220 opcode, DMAE_COMP_GRC);
1222 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1223 opcode, DMAE_COMP_PCI);
1225 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1226 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1227 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1228 dmae->dst_addr_hi = 0;
1229 dmae->len = bnx2x_get_port_stats_dma_len(bp);
1231 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
1232 dmae->comp_addr_hi = 0;
1235 dmae->comp_addr_lo =
1236 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1237 dmae->comp_addr_hi =
1238 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1239 dmae->comp_val = DMAE_COMP_VAL;
1247 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1249 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1250 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1251 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1252 dmae->dst_addr_lo = bp->func_stx >> 2;
1253 dmae->dst_addr_hi = 0;
1254 dmae->len = sizeof(struct host_func_stats) >> 2;
1255 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1256 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1257 dmae->comp_val = DMAE_COMP_VAL;
1263 static void bnx2x_stats_stop(struct bnx2x *bp)
1267 bnx2x_stats_comp(bp);
1270 update = (bnx2x_hw_stats_update(bp) == 0);
1272 update |= (bnx2x_storm_stats_update(bp) == 0);
1275 bnx2x_net_stats_update(bp);
1278 bnx2x_port_stats_stop(bp);
1280 bnx2x_hw_stats_post(bp);
1281 bnx2x_stats_comp(bp);
1285 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
1289 static const struct {
1290 void (*action)(struct bnx2x *bp);
1291 enum bnx2x_stats_state next_state;
1292 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
1295 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
1296 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
1297 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
1298 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
1301 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
1302 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
1303 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
1304 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
1308 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1310 enum bnx2x_stats_state state;
1311 if (unlikely(bp->panic))
1314 spin_lock_bh(&bp->stats_lock);
1315 state = bp->stats_state;
1316 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1317 spin_unlock_bh(&bp->stats_lock);
1319 bnx2x_stats_stm[state][event].action(bp);
1321 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1322 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1323 state, event, bp->stats_state);
1326 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1328 struct dmae_command *dmae;
1329 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
1332 if (!bp->port.pmf || !bp->port.port_stx) {
1333 BNX2X_ERR("BUG!\n");
1337 bp->executer_idx = 0;
1339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1340 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1341 true, DMAE_COMP_PCI);
1342 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1343 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1344 dmae->dst_addr_lo = bp->port.port_stx >> 2;
1345 dmae->dst_addr_hi = 0;
1346 dmae->len = bnx2x_get_port_stats_dma_len(bp);
1347 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
1348 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
1349 dmae->comp_val = DMAE_COMP_VAL;
1352 bnx2x_hw_stats_post(bp);
1353 bnx2x_stats_comp(bp);
1356 /* This function will prepare the statistics ramrod data the way
1357 * we will only have to increment the statistics counter and
1358 * send the ramrod each time we have to.
1360 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1363 int first_queue_query_index;
1364 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
1366 dma_addr_t cur_data_offset;
1367 struct stats_query_entry *cur_query_entry;
1369 stats_hdr->cmd_num = bp->fw_stats_num;
1370 stats_hdr->drv_stats_counter = 0;
1372 /* storm_counters struct contains the counters of completed
1373 * statistics requests per storm which are incremented by FW
1374 * each time it completes hadning a statistics ramrod. We will
1375 * check these counters in the timer handler and discard a
1376 * (statistics) ramrod completion.
1378 cur_data_offset = bp->fw_stats_data_mapping +
1379 offsetof(struct bnx2x_fw_stats_data, storm_counters);
1381 stats_hdr->stats_counters_addrs.hi =
1382 cpu_to_le32(U64_HI(cur_data_offset));
1383 stats_hdr->stats_counters_addrs.lo =
1384 cpu_to_le32(U64_LO(cur_data_offset));
1386 /* prepare to the first stats ramrod (will be completed with
1387 * the counters equal to zero) - init counters to somethig different.
1389 memset(&bp->fw_stats_data->storm_counters, 0xff,
1390 sizeof(struct stats_counter));
1392 /**** Port FW statistics data ****/
1393 cur_data_offset = bp->fw_stats_data_mapping +
1394 offsetof(struct bnx2x_fw_stats_data, port);
1396 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
1398 cur_query_entry->kind = STATS_TYPE_PORT;
1399 /* For port query index is a DONT CARE */
1400 cur_query_entry->index = BP_PORT(bp);
1401 /* For port query funcID is a DONT CARE */
1402 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1403 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1404 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1406 /**** PF FW statistics data ****/
1407 cur_data_offset = bp->fw_stats_data_mapping +
1408 offsetof(struct bnx2x_fw_stats_data, pf);
1410 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
1412 cur_query_entry->kind = STATS_TYPE_PF;
1413 /* For PF query index is a DONT CARE */
1414 cur_query_entry->index = BP_PORT(bp);
1415 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1416 cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
1417 cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
1419 /**** FCoE FW statistics data ****/
1421 cur_data_offset = bp->fw_stats_data_mapping +
1422 offsetof(struct bnx2x_fw_stats_data, fcoe);
1425 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
1427 cur_query_entry->kind = STATS_TYPE_FCOE;
1428 /* For FCoE query index is a DONT CARE */
1429 cur_query_entry->index = BP_PORT(bp);
1430 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1431 cur_query_entry->address.hi =
1432 cpu_to_le32(U64_HI(cur_data_offset));
1433 cur_query_entry->address.lo =
1434 cpu_to_le32(U64_LO(cur_data_offset));
1437 /**** Clients' queries ****/
1438 cur_data_offset = bp->fw_stats_data_mapping +
1439 offsetof(struct bnx2x_fw_stats_data, queue_stats);
1441 /* first queue query index depends whether FCoE offloaded request will
1442 * be included in the ramrod
1445 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
1447 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
1449 for_each_eth_queue(bp, i) {
1452 query[first_queue_query_index + i];
1454 cur_query_entry->kind = STATS_TYPE_QUEUE;
1455 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
1456 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1457 cur_query_entry->address.hi =
1458 cpu_to_le32(U64_HI(cur_data_offset));
1459 cur_query_entry->address.lo =
1460 cpu_to_le32(U64_LO(cur_data_offset));
1462 cur_data_offset += sizeof(struct per_queue_stats);
1465 /* add FCoE queue query if needed */
1469 query[first_queue_query_index + i];
1471 cur_query_entry->kind = STATS_TYPE_QUEUE;
1472 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1473 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1474 cur_query_entry->address.hi =
1475 cpu_to_le32(U64_HI(cur_data_offset));
1476 cur_query_entry->address.lo =
1477 cpu_to_le32(U64_LO(cur_data_offset));
1481 void bnx2x_stats_init(struct bnx2x *bp)
1483 int /*abs*/port = BP_PORT(bp);
1484 int mb_idx = BP_FW_MB_IDX(bp);
1487 bp->stats_pending = 0;
1488 bp->executer_idx = 0;
1489 bp->stats_counter = 0;
1491 /* port and func stats for management */
1492 if (!BP_NOMCP(bp)) {
1493 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1494 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1497 bp->port.port_stx = 0;
1500 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
1501 bp->port.port_stx, bp->func_stx);
1503 /* pmf should retrieve port statistics from SP on a non-init*/
1504 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
1505 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1509 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
1510 bp->port.old_nig_stats.brb_discard =
1511 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
1512 bp->port.old_nig_stats.brb_truncate =
1513 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
1514 if (!CHIP_IS_E3(bp)) {
1515 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
1516 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
1517 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
1518 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1521 /* function stats */
1522 for_each_queue(bp, i) {
1523 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1525 memset(&fp_stats->old_tclient, 0,
1526 sizeof(fp_stats->old_tclient));
1527 memset(&fp_stats->old_uclient, 0,
1528 sizeof(fp_stats->old_uclient));
1529 memset(&fp_stats->old_xclient, 0,
1530 sizeof(fp_stats->old_xclient));
1531 if (bp->stats_init) {
1532 memset(&fp_stats->eth_q_stats, 0,
1533 sizeof(fp_stats->eth_q_stats));
1534 memset(&fp_stats->eth_q_stats_old, 0,
1535 sizeof(fp_stats->eth_q_stats_old));
1539 /* Prepare statistics ramrod data */
1540 bnx2x_prep_fw_stats_req(bp);
1542 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1543 if (bp->stats_init) {
1544 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1545 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1546 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1547 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1548 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1550 /* Clean SP from previous statistics */
1552 memset(bnx2x_sp(bp, func_stats), 0,
1553 sizeof(struct host_func_stats));
1554 bnx2x_func_stats_init(bp);
1555 bnx2x_hw_stats_post(bp);
1556 bnx2x_stats_comp(bp);
1560 bp->stats_state = STATS_STATE_DISABLED;
1562 if (bp->port.pmf && bp->port.port_stx)
1563 bnx2x_port_stats_base_init(bp);
1565 /* mark the end of statistics initializiation */
1566 bp->stats_init = false;
1569 void bnx2x_save_statistics(struct bnx2x *bp)
1572 struct net_device_stats *nstats = &bp->dev->stats;
1574 /* save queue statistics */
1575 for_each_eth_queue(bp, i) {
1576 struct bnx2x_fastpath *fp = &bp->fp[i];
1577 struct bnx2x_eth_q_stats *qstats =
1578 &bnx2x_fp_stats(bp, fp)->eth_q_stats;
1579 struct bnx2x_eth_q_stats_old *qstats_old =
1580 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
1582 UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
1583 UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
1584 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
1585 UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
1586 UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
1587 UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
1588 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
1589 UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
1590 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
1591 UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
1592 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
1593 UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
1594 UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
1595 UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
1598 /* save net_device_stats statistics */
1599 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1601 /* store port firmware statistics */
1602 if (bp->port.pmf && IS_MF(bp)) {
1603 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1604 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1605 UPDATE_FW_STAT_OLD(mac_filter_discard);
1606 UPDATE_FW_STAT_OLD(mf_tag_discard);
1607 UPDATE_FW_STAT_OLD(brb_truncate_discard);
1608 UPDATE_FW_STAT_OLD(mac_discard);
1612 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1616 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1617 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1618 struct per_queue_stats *fcoe_q_stats =
1619 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
1621 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1622 &fcoe_q_stats->tstorm_queue_statistics;
1624 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1625 &fcoe_q_stats->ustorm_queue_statistics;
1627 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1628 &fcoe_q_stats->xstorm_queue_statistics;
1630 struct fcoe_statistics_params *fw_fcoe_stat =
1631 &bp->fw_stats_data->fcoe;
1633 memset(afex_stats, 0, sizeof(struct afex_stats));
1635 for_each_eth_queue(bp, i) {
1636 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
1638 ADD_64(afex_stats->rx_unicast_bytes_hi,
1639 qstats->total_unicast_bytes_received_hi,
1640 afex_stats->rx_unicast_bytes_lo,
1641 qstats->total_unicast_bytes_received_lo);
1643 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1644 qstats->total_broadcast_bytes_received_hi,
1645 afex_stats->rx_broadcast_bytes_lo,
1646 qstats->total_broadcast_bytes_received_lo);
1648 ADD_64(afex_stats->rx_multicast_bytes_hi,
1649 qstats->total_multicast_bytes_received_hi,
1650 afex_stats->rx_multicast_bytes_lo,
1651 qstats->total_multicast_bytes_received_lo);
1653 ADD_64(afex_stats->rx_unicast_frames_hi,
1654 qstats->total_unicast_packets_received_hi,
1655 afex_stats->rx_unicast_frames_lo,
1656 qstats->total_unicast_packets_received_lo);
1658 ADD_64(afex_stats->rx_broadcast_frames_hi,
1659 qstats->total_broadcast_packets_received_hi,
1660 afex_stats->rx_broadcast_frames_lo,
1661 qstats->total_broadcast_packets_received_lo);
1663 ADD_64(afex_stats->rx_multicast_frames_hi,
1664 qstats->total_multicast_packets_received_hi,
1665 afex_stats->rx_multicast_frames_lo,
1666 qstats->total_multicast_packets_received_lo);
1668 /* sum to rx_frames_discarded all discraded
1669 * packets due to size, ttl0 and checksum
1671 ADD_64(afex_stats->rx_frames_discarded_hi,
1672 qstats->total_packets_received_checksum_discarded_hi,
1673 afex_stats->rx_frames_discarded_lo,
1674 qstats->total_packets_received_checksum_discarded_lo);
1676 ADD_64(afex_stats->rx_frames_discarded_hi,
1677 qstats->total_packets_received_ttl0_discarded_hi,
1678 afex_stats->rx_frames_discarded_lo,
1679 qstats->total_packets_received_ttl0_discarded_lo);
1681 ADD_64(afex_stats->rx_frames_discarded_hi,
1682 qstats->etherstatsoverrsizepkts_hi,
1683 afex_stats->rx_frames_discarded_lo,
1684 qstats->etherstatsoverrsizepkts_lo);
1686 ADD_64(afex_stats->rx_frames_dropped_hi,
1687 qstats->no_buff_discard_hi,
1688 afex_stats->rx_frames_dropped_lo,
1689 qstats->no_buff_discard_lo);
1691 ADD_64(afex_stats->tx_unicast_bytes_hi,
1692 qstats->total_unicast_bytes_transmitted_hi,
1693 afex_stats->tx_unicast_bytes_lo,
1694 qstats->total_unicast_bytes_transmitted_lo);
1696 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1697 qstats->total_broadcast_bytes_transmitted_hi,
1698 afex_stats->tx_broadcast_bytes_lo,
1699 qstats->total_broadcast_bytes_transmitted_lo);
1701 ADD_64(afex_stats->tx_multicast_bytes_hi,
1702 qstats->total_multicast_bytes_transmitted_hi,
1703 afex_stats->tx_multicast_bytes_lo,
1704 qstats->total_multicast_bytes_transmitted_lo);
1706 ADD_64(afex_stats->tx_unicast_frames_hi,
1707 qstats->total_unicast_packets_transmitted_hi,
1708 afex_stats->tx_unicast_frames_lo,
1709 qstats->total_unicast_packets_transmitted_lo);
1711 ADD_64(afex_stats->tx_broadcast_frames_hi,
1712 qstats->total_broadcast_packets_transmitted_hi,
1713 afex_stats->tx_broadcast_frames_lo,
1714 qstats->total_broadcast_packets_transmitted_lo);
1716 ADD_64(afex_stats->tx_multicast_frames_hi,
1717 qstats->total_multicast_packets_transmitted_hi,
1718 afex_stats->tx_multicast_frames_lo,
1719 qstats->total_multicast_packets_transmitted_lo);
1721 ADD_64(afex_stats->tx_frames_dropped_hi,
1722 qstats->total_transmitted_dropped_packets_error_hi,
1723 afex_stats->tx_frames_dropped_lo,
1724 qstats->total_transmitted_dropped_packets_error_lo);
1727 /* now add FCoE statistics which are collected separately
1728 * (both offloaded and non offloaded)
1731 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1733 afex_stats->rx_unicast_bytes_lo,
1734 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1736 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1737 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1738 afex_stats->rx_unicast_bytes_lo,
1739 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1741 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1742 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1743 afex_stats->rx_broadcast_bytes_lo,
1744 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1746 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1747 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1748 afex_stats->rx_multicast_bytes_lo,
1749 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1751 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1753 afex_stats->rx_unicast_frames_lo,
1754 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1756 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1758 afex_stats->rx_unicast_frames_lo,
1759 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1761 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1763 afex_stats->rx_broadcast_frames_lo,
1764 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1766 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1768 afex_stats->rx_multicast_frames_lo,
1769 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1771 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1773 afex_stats->rx_frames_discarded_lo,
1774 fcoe_q_tstorm_stats->checksum_discard);
1776 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1778 afex_stats->rx_frames_discarded_lo,
1779 fcoe_q_tstorm_stats->pkts_too_big_discard);
1781 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1783 afex_stats->rx_frames_discarded_lo,
1784 fcoe_q_tstorm_stats->ttl0_discard);
1786 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1788 afex_stats->rx_frames_dropped_lo,
1789 fcoe_q_tstorm_stats->no_buff_discard);
1791 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1793 afex_stats->rx_frames_dropped_lo,
1794 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1796 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1798 afex_stats->rx_frames_dropped_lo,
1799 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1801 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1803 afex_stats->rx_frames_dropped_lo,
1804 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1806 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1808 afex_stats->rx_frames_dropped_lo,
1809 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1811 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1813 afex_stats->rx_frames_dropped_lo,
1814 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1816 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1818 afex_stats->tx_unicast_bytes_lo,
1819 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1821 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1822 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1823 afex_stats->tx_unicast_bytes_lo,
1824 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1826 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1827 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1828 afex_stats->tx_broadcast_bytes_lo,
1829 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1831 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1832 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1833 afex_stats->tx_multicast_bytes_lo,
1834 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1836 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1838 afex_stats->tx_unicast_frames_lo,
1839 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1841 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1843 afex_stats->tx_unicast_frames_lo,
1844 fcoe_q_xstorm_stats->ucast_pkts_sent);
1846 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1848 afex_stats->tx_broadcast_frames_lo,
1849 fcoe_q_xstorm_stats->bcast_pkts_sent);
1851 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1853 afex_stats->tx_multicast_frames_lo,
1854 fcoe_q_xstorm_stats->mcast_pkts_sent);
1856 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1858 afex_stats->tx_frames_dropped_lo,
1859 fcoe_q_xstorm_stats->error_drop_pkts);
1862 /* if port stats are requested, add them to the PMF
1863 * stats, as anyway they will be accumulated by the
1864 * MCP before sent to the switch
1866 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1867 ADD_64(afex_stats->rx_frames_dropped_hi,
1869 afex_stats->rx_frames_dropped_lo,
1870 estats->mac_filter_discard);
1871 ADD_64(afex_stats->rx_frames_dropped_hi,
1873 afex_stats->rx_frames_dropped_lo,
1874 estats->brb_truncate_discard);
1875 ADD_64(afex_stats->rx_frames_discarded_hi,
1877 afex_stats->rx_frames_discarded_lo,
1878 estats->mac_discard);