]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
mm/bootmem.c: remove unused wrapper function reserve_bootmem_generic()
[can-eth-gw-linux.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { 0 }
48 };
49 MODULE_DEVICE_TABLE(pci, be_dev_ids);
50 /* UE Status Low CSR */
51 static const char * const ue_status_low_desc[] = {
52         "CEV",
53         "CTX",
54         "DBUF",
55         "ERX",
56         "Host",
57         "MPU",
58         "NDMA",
59         "PTC ",
60         "RDMA ",
61         "RXF ",
62         "RXIPS ",
63         "RXULP0 ",
64         "RXULP1 ",
65         "RXULP2 ",
66         "TIM ",
67         "TPOST ",
68         "TPRE ",
69         "TXIPS ",
70         "TXULP0 ",
71         "TXULP1 ",
72         "UC ",
73         "WDMA ",
74         "TXULP2 ",
75         "HOST1 ",
76         "P0_OB_LINK ",
77         "P1_OB_LINK ",
78         "HOST_GPIO ",
79         "MBOX ",
80         "AXGMAC0",
81         "AXGMAC1",
82         "JTAG",
83         "MPU_INTPEND"
84 };
85 /* UE Status High CSR */
86 static const char * const ue_status_hi_desc[] = {
87         "LPCMEMHOST",
88         "MGMT_MAC",
89         "PCS0ONLINE",
90         "MPU_IRAM",
91         "PCS1ONLINE",
92         "PCTL0",
93         "PCTL1",
94         "PMEM",
95         "RR",
96         "TXPB",
97         "RXPP",
98         "XAUI",
99         "TXP",
100         "ARM",
101         "IPC",
102         "HOST2",
103         "HOST3",
104         "HOST4",
105         "HOST5",
106         "HOST6",
107         "HOST7",
108         "HOST8",
109         "HOST9",
110         "NETC",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown"
119 };
120
121 /* Is BE in a multi-channel mode */
122 static inline bool be_is_mc(struct be_adapter *adapter) {
123         return (adapter->function_mode & FLEX10_MODE ||
124                 adapter->function_mode & VNIC_MODE ||
125                 adapter->function_mode & UMC_ENABLED);
126 }
127
128 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129 {
130         struct be_dma_mem *mem = &q->dma_mem;
131         if (mem->va) {
132                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133                                   mem->dma);
134                 mem->va = NULL;
135         }
136 }
137
138 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
139                 u16 len, u16 entry_size)
140 {
141         struct be_dma_mem *mem = &q->dma_mem;
142
143         memset(q, 0, sizeof(*q));
144         q->len = len;
145         q->entry_size = entry_size;
146         mem->size = len * entry_size;
147         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
148                                      GFP_KERNEL);
149         if (!mem->va)
150                 return -ENOMEM;
151         memset(mem->va, 0, mem->size);
152         return 0;
153 }
154
155 static void be_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         if (adapter->eeh_error)
160                 return;
161
162         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
163                                 &reg);
164         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
166         if (!enabled && enable)
167                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168         else if (enabled && !enable)
169                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170         else
171                 return;
172
173         pci_write_config_dword(adapter->pdev,
174                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
175 }
176
177 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
178 {
179         u32 val = 0;
180         val |= qid & DB_RQ_RING_ID_MASK;
181         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
182
183         wmb();
184         iowrite32(val, adapter->db + DB_RQ_OFFSET);
185 }
186
187 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_TXULP_RING_ID_MASK;
191         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
195 }
196
197 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
198                 bool arm, bool clear_int, u16 num_popped)
199 {
200         u32 val = 0;
201         val |= qid & DB_EQ_RING_ID_MASK;
202         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
203                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
204
205         if (adapter->eeh_error)
206                 return;
207
208         if (arm)
209                 val |= 1 << DB_EQ_REARM_SHIFT;
210         if (clear_int)
211                 val |= 1 << DB_EQ_CLR_SHIFT;
212         val |= 1 << DB_EQ_EVNT_SHIFT;
213         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
214         iowrite32(val, adapter->db + DB_EQ_OFFSET);
215 }
216
217 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
218 {
219         u32 val = 0;
220         val |= qid & DB_CQ_RING_ID_MASK;
221         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
222                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
223
224         if (adapter->eeh_error)
225                 return;
226
227         if (arm)
228                 val |= 1 << DB_CQ_REARM_SHIFT;
229         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
230         iowrite32(val, adapter->db + DB_CQ_OFFSET);
231 }
232
233 static int be_mac_addr_set(struct net_device *netdev, void *p)
234 {
235         struct be_adapter *adapter = netdev_priv(netdev);
236         struct sockaddr *addr = p;
237         int status = 0;
238         u8 current_mac[ETH_ALEN];
239         u32 pmac_id = adapter->pmac_id[0];
240
241         if (!is_valid_ether_addr(addr->sa_data))
242                 return -EADDRNOTAVAIL;
243
244         status = be_cmd_mac_addr_query(adapter, current_mac, false,
245                                        adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561         wrb->rsvd0 = 0;
562 }
563
564 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565                                         struct sk_buff *skb)
566 {
567         u8 vlan_prio;
568         u16 vlan_tag;
569
570         vlan_tag = vlan_tx_tag_get(skb);
571         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572         /* If vlan priority provided by OS is NOT in available bmap */
573         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575                                 adapter->recommended_prio;
576
577         return vlan_tag;
578 }
579
580 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581 {
582         return vlan_tx_tag_present(skb) || adapter->pvid;
583 }
584
585 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
587 {
588         u16 vlan_tag;
589
590         memset(hdr, 0, sizeof(*hdr));
591
592         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
594         if (skb_is_gso(skb)) {
595                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597                         hdr, skb_shinfo(skb)->gso_size);
598                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600                 if (lancer_chip(adapter) && adapter->sli_family  ==
601                                                         LANCER_A0_SLI_FAMILY) {
602                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603                         if (is_tcp_pkt(skb))
604                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605                                                                 tcpcs, hdr, 1);
606                         else if (is_udp_pkt(skb))
607                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608                                                                 udpcs, hdr, 1);
609                 }
610         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611                 if (is_tcp_pkt(skb))
612                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613                 else if (is_udp_pkt(skb))
614                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615         }
616
617         if (vlan_tx_tag_present(skb)) {
618                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
619                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
620                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
621         }
622
623         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627 }
628
629 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
630                 bool unmap_single)
631 {
632         dma_addr_t dma;
633
634         be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
637         if (wrb->frag_len) {
638                 if (unmap_single)
639                         dma_unmap_single(dev, dma, wrb->frag_len,
640                                          DMA_TO_DEVICE);
641                 else
642                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
643         }
644 }
645
646 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
647                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648 {
649         dma_addr_t busaddr;
650         int i, copied = 0;
651         struct device *dev = &adapter->pdev->dev;
652         struct sk_buff *first_skb = skb;
653         struct be_eth_wrb *wrb;
654         struct be_eth_hdr_wrb *hdr;
655         bool map_single = false;
656         u16 map_head;
657
658         hdr = queue_head_node(txq);
659         queue_head_inc(txq);
660         map_head = txq->head;
661
662         if (skb->len > skb->data_len) {
663                 int len = skb_headlen(skb);
664                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665                 if (dma_mapping_error(dev, busaddr))
666                         goto dma_err;
667                 map_single = true;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, len);
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += len;
673         }
674
675         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
676                 const struct skb_frag_struct *frag =
677                         &skb_shinfo(skb)->frags[i];
678                 busaddr = skb_frag_dma_map(dev, frag, 0,
679                                            skb_frag_size(frag), DMA_TO_DEVICE);
680                 if (dma_mapping_error(dev, busaddr))
681                         goto dma_err;
682                 wrb = queue_head_node(txq);
683                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
684                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685                 queue_head_inc(txq);
686                 copied += skb_frag_size(frag);
687         }
688
689         if (dummy_wrb) {
690                 wrb = queue_head_node(txq);
691                 wrb_fill(wrb, 0, 0);
692                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693                 queue_head_inc(txq);
694         }
695
696         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
697         be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699         return copied;
700 dma_err:
701         txq->head = map_head;
702         while (copied) {
703                 wrb = queue_head_node(txq);
704                 unmap_tx_frag(dev, wrb, map_single);
705                 map_single = false;
706                 copied -= wrb->frag_len;
707                 queue_head_inc(txq);
708         }
709         return 0;
710 }
711
712 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713                                              struct sk_buff *skb)
714 {
715         u16 vlan_tag = 0;
716
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (unlikely(!skb))
719                 return skb;
720
721         if (vlan_tx_tag_present(skb)) {
722                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723                 __vlan_put_tag(skb, vlan_tag);
724                 skb->vlan_tci = 0;
725         }
726
727         return skb;
728 }
729
730 static netdev_tx_t be_xmit(struct sk_buff *skb,
731                         struct net_device *netdev)
732 {
733         struct be_adapter *adapter = netdev_priv(netdev);
734         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735         struct be_queue_info *txq = &txo->q;
736         struct iphdr *ip = NULL;
737         u32 wrb_cnt = 0, copied = 0;
738         u32 start = txq->head, eth_hdr_len;
739         bool dummy_wrb, stopped = false;
740
741         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742                 VLAN_ETH_HLEN : ETH_HLEN;
743
744         /* HW has a bug which considers padding bytes as legal
745          * and modifies the IPv4 hdr's 'tot_len' field
746          */
747         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748                         is_ipv4_pkt(skb)) {
749                 ip = (struct iphdr *)ip_hdr(skb);
750                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751         }
752
753         /* HW has a bug wherein it will calculate CSUM for VLAN
754          * pkts even though it is disabled.
755          * Manually insert VLAN in pkt.
756          */
757         if (skb->ip_summed != CHECKSUM_PARTIAL &&
758                         be_vlan_tag_chk(adapter, skb)) {
759                 skb = be_insert_vlan_in_pkt(adapter, skb);
760                 if (unlikely(!skb))
761                         goto tx_drop;
762         }
763
764         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
765
766         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
767         if (copied) {
768                 int gso_segs = skb_shinfo(skb)->gso_segs;
769
770                 /* record the sent skb in the sent_skb table */
771                 BUG_ON(txo->sent_skb_list[start]);
772                 txo->sent_skb_list[start] = skb;
773
774                 /* Ensure txq has space for the next skb; Else stop the queue
775                  * *BEFORE* ringing the tx doorbell, so that we serialze the
776                  * tx compls of the current transmit which'll wake up the queue
777                  */
778                 atomic_add(wrb_cnt, &txq->used);
779                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780                                                                 txq->len) {
781                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
782                         stopped = true;
783                 }
784
785                 be_txq_notify(adapter, txq->id, wrb_cnt);
786
787                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
788         } else {
789                 txq->head = start;
790                 dev_kfree_skb_any(skb);
791         }
792 tx_drop:
793         return NETDEV_TX_OK;
794 }
795
796 static int be_change_mtu(struct net_device *netdev, int new_mtu)
797 {
798         struct be_adapter *adapter = netdev_priv(netdev);
799         if (new_mtu < BE_MIN_MTU ||
800                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801                                         (ETH_HLEN + ETH_FCS_LEN))) {
802                 dev_info(&adapter->pdev->dev,
803                         "MTU must be between %d and %d bytes\n",
804                         BE_MIN_MTU,
805                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
806                 return -EINVAL;
807         }
808         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809                         netdev->mtu, new_mtu);
810         netdev->mtu = new_mtu;
811         return 0;
812 }
813
814 /*
815  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816  * If the user configures more, place BE in vlan promiscuous mode.
817  */
818 static int be_vid_config(struct be_adapter *adapter)
819 {
820         u16 vids[BE_NUM_VLANS_SUPPORTED];
821         u16 num = 0, i;
822         int status = 0;
823
824         /* No need to further configure vids if in promiscuous mode */
825         if (adapter->promiscuous)
826                 return 0;
827
828         if (adapter->vlans_added > adapter->max_vlans)
829                 goto set_vlan_promisc;
830
831         /* Construct VLAN Table to give to HW */
832         for (i = 0; i < VLAN_N_VID; i++)
833                 if (adapter->vlan_tag[i])
834                         vids[num++] = cpu_to_le16(i);
835
836         status = be_cmd_vlan_config(adapter, adapter->if_handle,
837                                     vids, num, 1, 0);
838
839         /* Set to VLAN promisc mode as setting VLAN filter failed */
840         if (status) {
841                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843                 goto set_vlan_promisc;
844         }
845
846         return status;
847
848 set_vlan_promisc:
849         status = be_cmd_vlan_config(adapter, adapter->if_handle,
850                                     NULL, 0, 1, 1);
851         return status;
852 }
853
854 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
855 {
856         struct be_adapter *adapter = netdev_priv(netdev);
857         int status = 0;
858
859         if (!be_physfn(adapter)) {
860                 status = -EINVAL;
861                 goto ret;
862         }
863
864         adapter->vlan_tag[vid] = 1;
865         if (adapter->vlans_added <= (adapter->max_vlans + 1))
866                 status = be_vid_config(adapter);
867
868         if (!status)
869                 adapter->vlans_added++;
870         else
871                 adapter->vlan_tag[vid] = 0;
872 ret:
873         return status;
874 }
875
876 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
877 {
878         struct be_adapter *adapter = netdev_priv(netdev);
879         int status = 0;
880
881         if (!be_physfn(adapter)) {
882                 status = -EINVAL;
883                 goto ret;
884         }
885
886         adapter->vlan_tag[vid] = 0;
887         if (adapter->vlans_added <= adapter->max_vlans)
888                 status = be_vid_config(adapter);
889
890         if (!status)
891                 adapter->vlans_added--;
892         else
893                 adapter->vlan_tag[vid] = 1;
894 ret:
895         return status;
896 }
897
898 static void be_set_rx_mode(struct net_device *netdev)
899 {
900         struct be_adapter *adapter = netdev_priv(netdev);
901         int status;
902
903         if (netdev->flags & IFF_PROMISC) {
904                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
905                 adapter->promiscuous = true;
906                 goto done;
907         }
908
909         /* BE was previously in promiscuous mode; disable it */
910         if (adapter->promiscuous) {
911                 adapter->promiscuous = false;
912                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
913
914                 if (adapter->vlans_added)
915                         be_vid_config(adapter);
916         }
917
918         /* Enable multicast promisc if num configured exceeds what we support */
919         if (netdev->flags & IFF_ALLMULTI ||
920                         netdev_mc_count(netdev) > BE_MAX_MC) {
921                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922                 goto done;
923         }
924
925         if (netdev_uc_count(netdev) != adapter->uc_macs) {
926                 struct netdev_hw_addr *ha;
927                 int i = 1; /* First slot is claimed by the Primary MAC */
928
929                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930                         be_cmd_pmac_del(adapter, adapter->if_handle,
931                                         adapter->pmac_id[i], 0);
932                 }
933
934                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936                         adapter->promiscuous = true;
937                         goto done;
938                 }
939
940                 netdev_for_each_uc_addr(ha, adapter->netdev) {
941                         adapter->uc_macs++; /* First slot is for Primary MAC */
942                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943                                         adapter->if_handle,
944                                         &adapter->pmac_id[adapter->uc_macs], 0);
945                 }
946         }
947
948         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950         /* Set to MCAST promisc mode if setting MULTICAST address fails */
951         if (status) {
952                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955         }
956 done:
957         return;
958 }
959
960 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961 {
962         struct be_adapter *adapter = netdev_priv(netdev);
963         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964         int status;
965
966         if (!sriov_enabled(adapter))
967                 return -EPERM;
968
969         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
970                 return -EINVAL;
971
972         if (lancer_chip(adapter)) {
973                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
974         } else {
975                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976                                          vf_cfg->pmac_id, vf + 1);
977
978                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979                                          &vf_cfg->pmac_id, vf + 1);
980         }
981
982         if (status)
983                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984                                 mac, vf);
985         else
986                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
987
988         return status;
989 }
990
991 static int be_get_vf_config(struct net_device *netdev, int vf,
992                         struct ifla_vf_info *vi)
993 {
994         struct be_adapter *adapter = netdev_priv(netdev);
995         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
996
997         if (!sriov_enabled(adapter))
998                 return -EPERM;
999
1000         if (vf >= adapter->num_vfs)
1001                 return -EINVAL;
1002
1003         vi->vf = vf;
1004         vi->tx_rate = vf_cfg->tx_rate;
1005         vi->vlan = vf_cfg->vlan_tag;
1006         vi->qos = 0;
1007         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1008
1009         return 0;
1010 }
1011
1012 static int be_set_vf_vlan(struct net_device *netdev,
1013                         int vf, u16 vlan, u8 qos)
1014 {
1015         struct be_adapter *adapter = netdev_priv(netdev);
1016         int status = 0;
1017
1018         if (!sriov_enabled(adapter))
1019                 return -EPERM;
1020
1021         if (vf >= adapter->num_vfs || vlan > 4095)
1022                 return -EINVAL;
1023
1024         if (vlan) {
1025                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026                         /* If this is new value, program it. Else skip. */
1027                         adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029                         status = be_cmd_set_hsw_config(adapter, vlan,
1030                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1031                 }
1032         } else {
1033                 /* Reset Transparent Vlan Tagging. */
1034                 adapter->vf_cfg[vf].vlan_tag = 0;
1035                 vlan = adapter->vf_cfg[vf].def_vid;
1036                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037                         adapter->vf_cfg[vf].if_handle);
1038         }
1039
1040
1041         if (status)
1042                 dev_info(&adapter->pdev->dev,
1043                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1044         return status;
1045 }
1046
1047 static int be_set_vf_tx_rate(struct net_device *netdev,
1048                         int vf, int rate)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         int status = 0;
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         if (rate < 100 || rate > 10000) {
1060                 dev_err(&adapter->pdev->dev,
1061                         "tx rate must be between 100 and 10000 Mbps\n");
1062                 return -EINVAL;
1063         }
1064
1065         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066
1067         if (status)
1068                 dev_err(&adapter->pdev->dev,
1069                                 "tx rate %d on VF %d failed\n", rate, vf);
1070         else
1071                 adapter->vf_cfg[vf].tx_rate = rate;
1072         return status;
1073 }
1074
1075 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076 {
1077         struct pci_dev *dev, *pdev = adapter->pdev;
1078         int vfs = 0, assigned_vfs = 0, pos;
1079         u16 offset, stride;
1080
1081         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1082         if (!pos)
1083                 return 0;
1084         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088         while (dev) {
1089                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1090                         vfs++;
1091                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1092                                 assigned_vfs++;
1093                 }
1094                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1095         }
1096         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1097 }
1098
1099 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1100 {
1101         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1102         ulong now = jiffies;
1103         ulong delta = now - stats->rx_jiffies;
1104         u64 pkts;
1105         unsigned int start, eqd;
1106
1107         if (!eqo->enable_aic) {
1108                 eqd = eqo->eqd;
1109                 goto modify_eqd;
1110         }
1111
1112         if (eqo->idx >= adapter->num_rx_qs)
1113                 return;
1114
1115         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1116
1117         /* Wrapped around */
1118         if (time_before(now, stats->rx_jiffies)) {
1119                 stats->rx_jiffies = now;
1120                 return;
1121         }
1122
1123         /* Update once a second */
1124         if (delta < HZ)
1125                 return;
1126
1127         do {
1128                 start = u64_stats_fetch_begin_bh(&stats->sync);
1129                 pkts = stats->rx_pkts;
1130         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1131
1132         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1133         stats->rx_pkts_prev = pkts;
1134         stats->rx_jiffies = now;
1135         eqd = (stats->rx_pps / 110000) << 3;
1136         eqd = min(eqd, eqo->max_eqd);
1137         eqd = max(eqd, eqo->min_eqd);
1138         if (eqd < 10)
1139                 eqd = 0;
1140
1141 modify_eqd:
1142         if (eqd != eqo->cur_eqd) {
1143                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1144                 eqo->cur_eqd = eqd;
1145         }
1146 }
1147
1148 static void be_rx_stats_update(struct be_rx_obj *rxo,
1149                 struct be_rx_compl_info *rxcp)
1150 {
1151         struct be_rx_stats *stats = rx_stats(rxo);
1152
1153         u64_stats_update_begin(&stats->sync);
1154         stats->rx_compl++;
1155         stats->rx_bytes += rxcp->pkt_size;
1156         stats->rx_pkts++;
1157         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1158                 stats->rx_mcast_pkts++;
1159         if (rxcp->err)
1160                 stats->rx_compl_err++;
1161         u64_stats_update_end(&stats->sync);
1162 }
1163
1164 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1165 {
1166         /* L4 checksum is not reliable for non TCP/UDP packets.
1167          * Also ignore ipcksm for ipv6 pkts */
1168         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1169                                 (rxcp->ip_csum || rxcp->ipv6);
1170 }
1171
1172 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1173                                                 u16 frag_idx)
1174 {
1175         struct be_adapter *adapter = rxo->adapter;
1176         struct be_rx_page_info *rx_page_info;
1177         struct be_queue_info *rxq = &rxo->q;
1178
1179         rx_page_info = &rxo->page_info_tbl[frag_idx];
1180         BUG_ON(!rx_page_info->page);
1181
1182         if (rx_page_info->last_page_user) {
1183                 dma_unmap_page(&adapter->pdev->dev,
1184                                dma_unmap_addr(rx_page_info, bus),
1185                                adapter->big_page_size, DMA_FROM_DEVICE);
1186                 rx_page_info->last_page_user = false;
1187         }
1188
1189         atomic_dec(&rxq->used);
1190         return rx_page_info;
1191 }
1192
1193 /* Throwaway the data in the Rx completion */
1194 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1195                                 struct be_rx_compl_info *rxcp)
1196 {
1197         struct be_queue_info *rxq = &rxo->q;
1198         struct be_rx_page_info *page_info;
1199         u16 i, num_rcvd = rxcp->num_rcvd;
1200
1201         for (i = 0; i < num_rcvd; i++) {
1202                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1203                 put_page(page_info->page);
1204                 memset(page_info, 0, sizeof(*page_info));
1205                 index_inc(&rxcp->rxq_idx, rxq->len);
1206         }
1207 }
1208
1209 /*
1210  * skb_fill_rx_data forms a complete skb for an ether frame
1211  * indicated by rxcp.
1212  */
1213 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1214                              struct be_rx_compl_info *rxcp)
1215 {
1216         struct be_queue_info *rxq = &rxo->q;
1217         struct be_rx_page_info *page_info;
1218         u16 i, j;
1219         u16 hdr_len, curr_frag_len, remaining;
1220         u8 *start;
1221
1222         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1223         start = page_address(page_info->page) + page_info->page_offset;
1224         prefetch(start);
1225
1226         /* Copy data in the first descriptor of this completion */
1227         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1228
1229         skb->len = curr_frag_len;
1230         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1231                 memcpy(skb->data, start, curr_frag_len);
1232                 /* Complete packet has now been moved to data */
1233                 put_page(page_info->page);
1234                 skb->data_len = 0;
1235                 skb->tail += curr_frag_len;
1236         } else {
1237                 hdr_len = ETH_HLEN;
1238                 memcpy(skb->data, start, hdr_len);
1239                 skb_shinfo(skb)->nr_frags = 1;
1240                 skb_frag_set_page(skb, 0, page_info->page);
1241                 skb_shinfo(skb)->frags[0].page_offset =
1242                                         page_info->page_offset + hdr_len;
1243                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1244                 skb->data_len = curr_frag_len - hdr_len;
1245                 skb->truesize += rx_frag_size;
1246                 skb->tail += hdr_len;
1247         }
1248         page_info->page = NULL;
1249
1250         if (rxcp->pkt_size <= rx_frag_size) {
1251                 BUG_ON(rxcp->num_rcvd != 1);
1252                 return;
1253         }
1254
1255         /* More frags present for this completion */
1256         index_inc(&rxcp->rxq_idx, rxq->len);
1257         remaining = rxcp->pkt_size - curr_frag_len;
1258         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1259                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1260                 curr_frag_len = min(remaining, rx_frag_size);
1261
1262                 /* Coalesce all frags from the same physical page in one slot */
1263                 if (page_info->page_offset == 0) {
1264                         /* Fresh page */
1265                         j++;
1266                         skb_frag_set_page(skb, j, page_info->page);
1267                         skb_shinfo(skb)->frags[j].page_offset =
1268                                                         page_info->page_offset;
1269                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1270                         skb_shinfo(skb)->nr_frags++;
1271                 } else {
1272                         put_page(page_info->page);
1273                 }
1274
1275                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1276                 skb->len += curr_frag_len;
1277                 skb->data_len += curr_frag_len;
1278                 skb->truesize += rx_frag_size;
1279                 remaining -= curr_frag_len;
1280                 index_inc(&rxcp->rxq_idx, rxq->len);
1281                 page_info->page = NULL;
1282         }
1283         BUG_ON(j > MAX_SKB_FRAGS);
1284 }
1285
1286 /* Process the RX completion indicated by rxcp when GRO is disabled */
1287 static void be_rx_compl_process(struct be_rx_obj *rxo,
1288                                 struct be_rx_compl_info *rxcp)
1289 {
1290         struct be_adapter *adapter = rxo->adapter;
1291         struct net_device *netdev = adapter->netdev;
1292         struct sk_buff *skb;
1293
1294         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1295         if (unlikely(!skb)) {
1296                 rx_stats(rxo)->rx_drops_no_skbs++;
1297                 be_rx_compl_discard(rxo, rxcp);
1298                 return;
1299         }
1300
1301         skb_fill_rx_data(rxo, skb, rxcp);
1302
1303         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1304                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1305         else
1306                 skb_checksum_none_assert(skb);
1307
1308         skb->protocol = eth_type_trans(skb, netdev);
1309         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1310         if (netdev->features & NETIF_F_RXHASH)
1311                 skb->rxhash = rxcp->rss_hash;
1312
1313
1314         if (rxcp->vlanf)
1315                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1316
1317         netif_receive_skb(skb);
1318 }
1319
1320 /* Process the RX completion indicated by rxcp when GRO is enabled */
1321 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1322                              struct be_rx_compl_info *rxcp)
1323 {
1324         struct be_adapter *adapter = rxo->adapter;
1325         struct be_rx_page_info *page_info;
1326         struct sk_buff *skb = NULL;
1327         struct be_queue_info *rxq = &rxo->q;
1328         u16 remaining, curr_frag_len;
1329         u16 i, j;
1330
1331         skb = napi_get_frags(napi);
1332         if (!skb) {
1333                 be_rx_compl_discard(rxo, rxcp);
1334                 return;
1335         }
1336
1337         remaining = rxcp->pkt_size;
1338         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1339                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1340
1341                 curr_frag_len = min(remaining, rx_frag_size);
1342
1343                 /* Coalesce all frags from the same physical page in one slot */
1344                 if (i == 0 || page_info->page_offset == 0) {
1345                         /* First frag or Fresh page */
1346                         j++;
1347                         skb_frag_set_page(skb, j, page_info->page);
1348                         skb_shinfo(skb)->frags[j].page_offset =
1349                                                         page_info->page_offset;
1350                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1351                 } else {
1352                         put_page(page_info->page);
1353                 }
1354                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1355                 skb->truesize += rx_frag_size;
1356                 remaining -= curr_frag_len;
1357                 index_inc(&rxcp->rxq_idx, rxq->len);
1358                 memset(page_info, 0, sizeof(*page_info));
1359         }
1360         BUG_ON(j > MAX_SKB_FRAGS);
1361
1362         skb_shinfo(skb)->nr_frags = j + 1;
1363         skb->len = rxcp->pkt_size;
1364         skb->data_len = rxcp->pkt_size;
1365         skb->ip_summed = CHECKSUM_UNNECESSARY;
1366         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1367         if (adapter->netdev->features & NETIF_F_RXHASH)
1368                 skb->rxhash = rxcp->rss_hash;
1369
1370         if (rxcp->vlanf)
1371                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1372
1373         napi_gro_frags(napi);
1374 }
1375
1376 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1377                                  struct be_rx_compl_info *rxcp)
1378 {
1379         rxcp->pkt_size =
1380                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1381         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1382         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1383         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1384         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1385         rxcp->ip_csum =
1386                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1387         rxcp->l4_csum =
1388                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1389         rxcp->ipv6 =
1390                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1391         rxcp->rxq_idx =
1392                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1393         rxcp->num_rcvd =
1394                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1395         rxcp->pkt_type =
1396                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1397         rxcp->rss_hash =
1398                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1399         if (rxcp->vlanf) {
1400                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1401                                           compl);
1402                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1403                                                compl);
1404         }
1405         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1406 }
1407
1408 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1409                                  struct be_rx_compl_info *rxcp)
1410 {
1411         rxcp->pkt_size =
1412                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1413         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1414         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1415         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1416         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1417         rxcp->ip_csum =
1418                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1419         rxcp->l4_csum =
1420                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1421         rxcp->ipv6 =
1422                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1423         rxcp->rxq_idx =
1424                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1425         rxcp->num_rcvd =
1426                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1427         rxcp->pkt_type =
1428                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1429         rxcp->rss_hash =
1430                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1431         if (rxcp->vlanf) {
1432                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1433                                           compl);
1434                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1435                                                compl);
1436         }
1437         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1438 }
1439
1440 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1441 {
1442         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1443         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1444         struct be_adapter *adapter = rxo->adapter;
1445
1446         /* For checking the valid bit it is Ok to use either definition as the
1447          * valid bit is at the same position in both v0 and v1 Rx compl */
1448         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1449                 return NULL;
1450
1451         rmb();
1452         be_dws_le_to_cpu(compl, sizeof(*compl));
1453
1454         if (adapter->be3_native)
1455                 be_parse_rx_compl_v1(compl, rxcp);
1456         else
1457                 be_parse_rx_compl_v0(compl, rxcp);
1458
1459         if (rxcp->vlanf) {
1460                 /* vlanf could be wrongly set in some cards.
1461                  * ignore if vtm is not set */
1462                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1463                         rxcp->vlanf = 0;
1464
1465                 if (!lancer_chip(adapter))
1466                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1467
1468                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1469                     !adapter->vlan_tag[rxcp->vlan_tag])
1470                         rxcp->vlanf = 0;
1471         }
1472
1473         /* As the compl has been parsed, reset it; we wont touch it again */
1474         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1475
1476         queue_tail_inc(&rxo->cq);
1477         return rxcp;
1478 }
1479
1480 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1481 {
1482         u32 order = get_order(size);
1483
1484         if (order > 0)
1485                 gfp |= __GFP_COMP;
1486         return  alloc_pages(gfp, order);
1487 }
1488
1489 /*
1490  * Allocate a page, split it to fragments of size rx_frag_size and post as
1491  * receive buffers to BE
1492  */
1493 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1494 {
1495         struct be_adapter *adapter = rxo->adapter;
1496         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1497         struct be_queue_info *rxq = &rxo->q;
1498         struct page *pagep = NULL;
1499         struct be_eth_rx_d *rxd;
1500         u64 page_dmaaddr = 0, frag_dmaaddr;
1501         u32 posted, page_offset = 0;
1502
1503         page_info = &rxo->page_info_tbl[rxq->head];
1504         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1505                 if (!pagep) {
1506                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1507                         if (unlikely(!pagep)) {
1508                                 rx_stats(rxo)->rx_post_fail++;
1509                                 break;
1510                         }
1511                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1512                                                     0, adapter->big_page_size,
1513                                                     DMA_FROM_DEVICE);
1514                         page_info->page_offset = 0;
1515                 } else {
1516                         get_page(pagep);
1517                         page_info->page_offset = page_offset + rx_frag_size;
1518                 }
1519                 page_offset = page_info->page_offset;
1520                 page_info->page = pagep;
1521                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1522                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1523
1524                 rxd = queue_head_node(rxq);
1525                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1526                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1527
1528                 /* Any space left in the current big page for another frag? */
1529                 if ((page_offset + rx_frag_size + rx_frag_size) >
1530                                         adapter->big_page_size) {
1531                         pagep = NULL;
1532                         page_info->last_page_user = true;
1533                 }
1534
1535                 prev_page_info = page_info;
1536                 queue_head_inc(rxq);
1537                 page_info = &rxo->page_info_tbl[rxq->head];
1538         }
1539         if (pagep)
1540                 prev_page_info->last_page_user = true;
1541
1542         if (posted) {
1543                 atomic_add(posted, &rxq->used);
1544                 be_rxq_notify(adapter, rxq->id, posted);
1545         } else if (atomic_read(&rxq->used) == 0) {
1546                 /* Let be_worker replenish when memory is available */
1547                 rxo->rx_post_starved = true;
1548         }
1549 }
1550
1551 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1552 {
1553         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1554
1555         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1556                 return NULL;
1557
1558         rmb();
1559         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1560
1561         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1562
1563         queue_tail_inc(tx_cq);
1564         return txcp;
1565 }
1566
1567 static u16 be_tx_compl_process(struct be_adapter *adapter,
1568                 struct be_tx_obj *txo, u16 last_index)
1569 {
1570         struct be_queue_info *txq = &txo->q;
1571         struct be_eth_wrb *wrb;
1572         struct sk_buff **sent_skbs = txo->sent_skb_list;
1573         struct sk_buff *sent_skb;
1574         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1575         bool unmap_skb_hdr = true;
1576
1577         sent_skb = sent_skbs[txq->tail];
1578         BUG_ON(!sent_skb);
1579         sent_skbs[txq->tail] = NULL;
1580
1581         /* skip header wrb */
1582         queue_tail_inc(txq);
1583
1584         do {
1585                 cur_index = txq->tail;
1586                 wrb = queue_tail_node(txq);
1587                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1588                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1589                 unmap_skb_hdr = false;
1590
1591                 num_wrbs++;
1592                 queue_tail_inc(txq);
1593         } while (cur_index != last_index);
1594
1595         kfree_skb(sent_skb);
1596         return num_wrbs;
1597 }
1598
1599 /* Return the number of events in the event queue */
1600 static inline int events_get(struct be_eq_obj *eqo)
1601 {
1602         struct be_eq_entry *eqe;
1603         int num = 0;
1604
1605         do {
1606                 eqe = queue_tail_node(&eqo->q);
1607                 if (eqe->evt == 0)
1608                         break;
1609
1610                 rmb();
1611                 eqe->evt = 0;
1612                 num++;
1613                 queue_tail_inc(&eqo->q);
1614         } while (true);
1615
1616         return num;
1617 }
1618
1619 static int event_handle(struct be_eq_obj *eqo)
1620 {
1621         bool rearm = false;
1622         int num = events_get(eqo);
1623
1624         /* Deal with any spurious interrupts that come without events */
1625         if (!num)
1626                 rearm = true;
1627
1628         if (num || msix_enabled(eqo->adapter))
1629                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1630
1631         if (num)
1632                 napi_schedule(&eqo->napi);
1633
1634         return num;
1635 }
1636
1637 /* Leaves the EQ is disarmed state */
1638 static void be_eq_clean(struct be_eq_obj *eqo)
1639 {
1640         int num = events_get(eqo);
1641
1642         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1643 }
1644
1645 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1646 {
1647         struct be_rx_page_info *page_info;
1648         struct be_queue_info *rxq = &rxo->q;
1649         struct be_queue_info *rx_cq = &rxo->cq;
1650         struct be_rx_compl_info *rxcp;
1651         u16 tail;
1652
1653         /* First cleanup pending rx completions */
1654         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1655                 be_rx_compl_discard(rxo, rxcp);
1656                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1657         }
1658
1659         /* Then free posted rx buffer that were not used */
1660         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1661         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1662                 page_info = get_rx_page_info(rxo, tail);
1663                 put_page(page_info->page);
1664                 memset(page_info, 0, sizeof(*page_info));
1665         }
1666         BUG_ON(atomic_read(&rxq->used));
1667         rxq->tail = rxq->head = 0;
1668 }
1669
1670 static void be_tx_compl_clean(struct be_adapter *adapter)
1671 {
1672         struct be_tx_obj *txo;
1673         struct be_queue_info *txq;
1674         struct be_eth_tx_compl *txcp;
1675         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1676         struct sk_buff *sent_skb;
1677         bool dummy_wrb;
1678         int i, pending_txqs;
1679
1680         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1681         do {
1682                 pending_txqs = adapter->num_tx_qs;
1683
1684                 for_all_tx_queues(adapter, txo, i) {
1685                         txq = &txo->q;
1686                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1687                                 end_idx =
1688                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1689                                                       wrb_index, txcp);
1690                                 num_wrbs += be_tx_compl_process(adapter, txo,
1691                                                                 end_idx);
1692                                 cmpl++;
1693                         }
1694                         if (cmpl) {
1695                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1696                                 atomic_sub(num_wrbs, &txq->used);
1697                                 cmpl = 0;
1698                                 num_wrbs = 0;
1699                         }
1700                         if (atomic_read(&txq->used) == 0)
1701                                 pending_txqs--;
1702                 }
1703
1704                 if (pending_txqs == 0 || ++timeo > 200)
1705                         break;
1706
1707                 mdelay(1);
1708         } while (true);
1709
1710         for_all_tx_queues(adapter, txo, i) {
1711                 txq = &txo->q;
1712                 if (atomic_read(&txq->used))
1713                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1714                                 atomic_read(&txq->used));
1715
1716                 /* free posted tx for which compls will never arrive */
1717                 while (atomic_read(&txq->used)) {
1718                         sent_skb = txo->sent_skb_list[txq->tail];
1719                         end_idx = txq->tail;
1720                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1721                                                    &dummy_wrb);
1722                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1723                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1724                         atomic_sub(num_wrbs, &txq->used);
1725                 }
1726         }
1727 }
1728
1729 static void be_evt_queues_destroy(struct be_adapter *adapter)
1730 {
1731         struct be_eq_obj *eqo;
1732         int i;
1733
1734         for_all_evt_queues(adapter, eqo, i) {
1735                 if (eqo->q.created) {
1736                         be_eq_clean(eqo);
1737                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1738                 }
1739                 be_queue_free(adapter, &eqo->q);
1740         }
1741 }
1742
1743 static int be_evt_queues_create(struct be_adapter *adapter)
1744 {
1745         struct be_queue_info *eq;
1746         struct be_eq_obj *eqo;
1747         int i, rc;
1748
1749         adapter->num_evt_qs = num_irqs(adapter);
1750
1751         for_all_evt_queues(adapter, eqo, i) {
1752                 eqo->adapter = adapter;
1753                 eqo->tx_budget = BE_TX_BUDGET;
1754                 eqo->idx = i;
1755                 eqo->max_eqd = BE_MAX_EQD;
1756                 eqo->enable_aic = true;
1757
1758                 eq = &eqo->q;
1759                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760                                         sizeof(struct be_eq_entry));
1761                 if (rc)
1762                         return rc;
1763
1764                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1765                 if (rc)
1766                         return rc;
1767         }
1768         return 0;
1769 }
1770
1771 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1772 {
1773         struct be_queue_info *q;
1774
1775         q = &adapter->mcc_obj.q;
1776         if (q->created)
1777                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1778         be_queue_free(adapter, q);
1779
1780         q = &adapter->mcc_obj.cq;
1781         if (q->created)
1782                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1783         be_queue_free(adapter, q);
1784 }
1785
1786 /* Must be called only after TX qs are created as MCC shares TX EQ */
1787 static int be_mcc_queues_create(struct be_adapter *adapter)
1788 {
1789         struct be_queue_info *q, *cq;
1790
1791         cq = &adapter->mcc_obj.cq;
1792         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1793                         sizeof(struct be_mcc_compl)))
1794                 goto err;
1795
1796         /* Use the default EQ for MCC completions */
1797         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1798                 goto mcc_cq_free;
1799
1800         q = &adapter->mcc_obj.q;
1801         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1802                 goto mcc_cq_destroy;
1803
1804         if (be_cmd_mccq_create(adapter, q, cq))
1805                 goto mcc_q_free;
1806
1807         return 0;
1808
1809 mcc_q_free:
1810         be_queue_free(adapter, q);
1811 mcc_cq_destroy:
1812         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1813 mcc_cq_free:
1814         be_queue_free(adapter, cq);
1815 err:
1816         return -1;
1817 }
1818
1819 static void be_tx_queues_destroy(struct be_adapter *adapter)
1820 {
1821         struct be_queue_info *q;
1822         struct be_tx_obj *txo;
1823         u8 i;
1824
1825         for_all_tx_queues(adapter, txo, i) {
1826                 q = &txo->q;
1827                 if (q->created)
1828                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1829                 be_queue_free(adapter, q);
1830
1831                 q = &txo->cq;
1832                 if (q->created)
1833                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1834                 be_queue_free(adapter, q);
1835         }
1836 }
1837
1838 static int be_num_txqs_want(struct be_adapter *adapter)
1839 {
1840         if (sriov_want(adapter) || be_is_mc(adapter) ||
1841             lancer_chip(adapter) || !be_physfn(adapter) ||
1842             adapter->generation == BE_GEN2)
1843                 return 1;
1844         else
1845                 return MAX_TX_QS;
1846 }
1847
1848 static int be_tx_cqs_create(struct be_adapter *adapter)
1849 {
1850         struct be_queue_info *cq, *eq;
1851         int status;
1852         struct be_tx_obj *txo;
1853         u8 i;
1854
1855         adapter->num_tx_qs = be_num_txqs_want(adapter);
1856         if (adapter->num_tx_qs != MAX_TX_QS) {
1857                 rtnl_lock();
1858                 netif_set_real_num_tx_queues(adapter->netdev,
1859                         adapter->num_tx_qs);
1860                 rtnl_unlock();
1861         }
1862
1863         for_all_tx_queues(adapter, txo, i) {
1864                 cq = &txo->cq;
1865                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1866                                         sizeof(struct be_eth_tx_compl));
1867                 if (status)
1868                         return status;
1869
1870                 /* If num_evt_qs is less than num_tx_qs, then more than
1871                  * one txq share an eq
1872                  */
1873                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1874                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1875                 if (status)
1876                         return status;
1877         }
1878         return 0;
1879 }
1880
1881 static int be_tx_qs_create(struct be_adapter *adapter)
1882 {
1883         struct be_tx_obj *txo;
1884         int i, status;
1885
1886         for_all_tx_queues(adapter, txo, i) {
1887                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1888                                         sizeof(struct be_eth_wrb));
1889                 if (status)
1890                         return status;
1891
1892                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1893                 if (status)
1894                         return status;
1895         }
1896
1897         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1898                  adapter->num_tx_qs);
1899         return 0;
1900 }
1901
1902 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1903 {
1904         struct be_queue_info *q;
1905         struct be_rx_obj *rxo;
1906         int i;
1907
1908         for_all_rx_queues(adapter, rxo, i) {
1909                 q = &rxo->cq;
1910                 if (q->created)
1911                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1912                 be_queue_free(adapter, q);
1913         }
1914 }
1915
1916 static int be_rx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *eq, *cq;
1919         struct be_rx_obj *rxo;
1920         int rc, i;
1921
1922         /* We'll create as many RSS rings as there are irqs.
1923          * But when there's only one irq there's no use creating RSS rings
1924          */
1925         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1926                                 num_irqs(adapter) + 1 : 1;
1927         if (adapter->num_rx_qs != MAX_RX_QS) {
1928                 rtnl_lock();
1929                 netif_set_real_num_rx_queues(adapter->netdev,
1930                                              adapter->num_rx_qs);
1931                 rtnl_unlock();
1932         }
1933
1934         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1935         for_all_rx_queues(adapter, rxo, i) {
1936                 rxo->adapter = adapter;
1937                 cq = &rxo->cq;
1938                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1939                                 sizeof(struct be_eth_rx_compl));
1940                 if (rc)
1941                         return rc;
1942
1943                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1944                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1945                 if (rc)
1946                         return rc;
1947         }
1948
1949         dev_info(&adapter->pdev->dev,
1950                  "created %d RSS queue(s) and 1 default RX queue\n",
1951                  adapter->num_rx_qs - 1);
1952         return 0;
1953 }
1954
1955 static irqreturn_t be_intx(int irq, void *dev)
1956 {
1957         struct be_adapter *adapter = dev;
1958         int num_evts;
1959
1960         /* With INTx only one EQ is used */
1961         num_evts = event_handle(&adapter->eq_obj[0]);
1962         if (num_evts)
1963                 return IRQ_HANDLED;
1964         else
1965                 return IRQ_NONE;
1966 }
1967
1968 static irqreturn_t be_msix(int irq, void *dev)
1969 {
1970         struct be_eq_obj *eqo = dev;
1971
1972         event_handle(eqo);
1973         return IRQ_HANDLED;
1974 }
1975
1976 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1977 {
1978         return (rxcp->tcpf && !rxcp->err) ? true : false;
1979 }
1980
1981 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982                         int budget)
1983 {
1984         struct be_adapter *adapter = rxo->adapter;
1985         struct be_queue_info *rx_cq = &rxo->cq;
1986         struct be_rx_compl_info *rxcp;
1987         u32 work_done;
1988
1989         for (work_done = 0; work_done < budget; work_done++) {
1990                 rxcp = be_rx_compl_get(rxo);
1991                 if (!rxcp)
1992                         break;
1993
1994                 /* Is it a flush compl that has no data */
1995                 if (unlikely(rxcp->num_rcvd == 0))
1996                         goto loop_continue;
1997
1998                 /* Discard compl with partial DMA Lancer B0 */
1999                 if (unlikely(!rxcp->pkt_size)) {
2000                         be_rx_compl_discard(rxo, rxcp);
2001                         goto loop_continue;
2002                 }
2003
2004                 /* On BE drop pkts that arrive due to imperfect filtering in
2005                  * promiscuous mode on some skews
2006                  */
2007                 if (unlikely(rxcp->port != adapter->port_num &&
2008                                 !lancer_chip(adapter))) {
2009                         be_rx_compl_discard(rxo, rxcp);
2010                         goto loop_continue;
2011                 }
2012
2013                 if (do_gro(rxcp))
2014                         be_rx_compl_process_gro(rxo, napi, rxcp);
2015                 else
2016                         be_rx_compl_process(rxo, rxcp);
2017 loop_continue:
2018                 be_rx_stats_update(rxo, rxcp);
2019         }
2020
2021         if (work_done) {
2022                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2023
2024                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025                         be_post_rx_frags(rxo, GFP_ATOMIC);
2026         }
2027
2028         return work_done;
2029 }
2030
2031 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032                           int budget, int idx)
2033 {
2034         struct be_eth_tx_compl *txcp;
2035         int num_wrbs = 0, work_done;
2036
2037         for (work_done = 0; work_done < budget; work_done++) {
2038                 txcp = be_tx_compl_get(&txo->cq);
2039                 if (!txcp)
2040                         break;
2041                 num_wrbs += be_tx_compl_process(adapter, txo,
2042                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043                                         wrb_index, txcp));
2044         }
2045
2046         if (work_done) {
2047                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048                 atomic_sub(num_wrbs, &txo->q.used);
2049
2050                 /* As Tx wrbs have been freed up, wake up netdev queue
2051                  * if it was stopped due to lack of tx wrbs.  */
2052                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2054                         netif_wake_subqueue(adapter->netdev, idx);
2055                 }
2056
2057                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058                 tx_stats(txo)->tx_compl += work_done;
2059                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060         }
2061         return (work_done < budget); /* Done */
2062 }
2063
2064 int be_poll(struct napi_struct *napi, int budget)
2065 {
2066         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067         struct be_adapter *adapter = eqo->adapter;
2068         int max_work = 0, work, i;
2069         bool tx_done;
2070
2071         /* Process all TXQs serviced by this EQ */
2072         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074                                         eqo->tx_budget, i);
2075                 if (!tx_done)
2076                         max_work = budget;
2077         }
2078
2079         /* This loop will iterate twice for EQ0 in which
2080          * completions of the last RXQ (default one) are also processed
2081          * For other EQs the loop iterates only once
2082          */
2083         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085                 max_work = max(work, max_work);
2086         }
2087
2088         if (is_mcc_eqo(eqo))
2089                 be_process_mcc(adapter);
2090
2091         if (max_work < budget) {
2092                 napi_complete(napi);
2093                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094         } else {
2095                 /* As we'll continue in polling mode, count and clear events */
2096                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2097         }
2098         return max_work;
2099 }
2100
2101 void be_detect_error(struct be_adapter *adapter)
2102 {
2103         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2105         u32 i;
2106
2107         if (be_crit_error(adapter))
2108                 return;
2109
2110         if (lancer_chip(adapter)) {
2111                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113                         sliport_err1 = ioread32(adapter->db +
2114                                         SLIPORT_ERROR1_OFFSET);
2115                         sliport_err2 = ioread32(adapter->db +
2116                                         SLIPORT_ERROR2_OFFSET);
2117                 }
2118         } else {
2119                 pci_read_config_dword(adapter->pdev,
2120                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2121                 pci_read_config_dword(adapter->pdev,
2122                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123                 pci_read_config_dword(adapter->pdev,
2124                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125                 pci_read_config_dword(adapter->pdev,
2126                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2127
2128                 ue_lo = (ue_lo & ~ue_lo_mask);
2129                 ue_hi = (ue_hi & ~ue_hi_mask);
2130         }
2131
2132         /* On certain platforms BE hardware can indicate spurious UEs.
2133          * Allow the h/w to stop working completely in case of a real UE.
2134          * Hence not setting the hw_error for UE detection.
2135          */
2136         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2137                 adapter->hw_error = true;
2138                 dev_err(&adapter->pdev->dev,
2139                         "Error detected in the card\n");
2140         }
2141
2142         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2143                 dev_err(&adapter->pdev->dev,
2144                         "ERR: sliport status 0x%x\n", sliport_status);
2145                 dev_err(&adapter->pdev->dev,
2146                         "ERR: sliport error1 0x%x\n", sliport_err1);
2147                 dev_err(&adapter->pdev->dev,
2148                         "ERR: sliport error2 0x%x\n", sliport_err2);
2149         }
2150
2151         if (ue_lo) {
2152                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2153                         if (ue_lo & 1)
2154                                 dev_err(&adapter->pdev->dev,
2155                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2156                 }
2157         }
2158
2159         if (ue_hi) {
2160                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2161                         if (ue_hi & 1)
2162                                 dev_err(&adapter->pdev->dev,
2163                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2164                 }
2165         }
2166
2167 }
2168
2169 static void be_msix_disable(struct be_adapter *adapter)
2170 {
2171         if (msix_enabled(adapter)) {
2172                 pci_disable_msix(adapter->pdev);
2173                 adapter->num_msix_vec = 0;
2174         }
2175 }
2176
2177 static uint be_num_rss_want(struct be_adapter *adapter)
2178 {
2179         u32 num = 0;
2180         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2181              !sriov_want(adapter) && be_physfn(adapter)) {
2182                 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2183                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2184         }
2185         return num;
2186 }
2187
2188 static void be_msix_enable(struct be_adapter *adapter)
2189 {
2190 #define BE_MIN_MSIX_VECTORS             1
2191         int i, status, num_vec, num_roce_vec = 0;
2192         struct device *dev = &adapter->pdev->dev;
2193
2194         /* If RSS queues are not used, need a vec for default RX Q */
2195         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2196         if (be_roce_supported(adapter)) {
2197                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2198                                         (num_online_cpus() + 1));
2199                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2200                 num_vec += num_roce_vec;
2201                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2202         }
2203         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2204
2205         for (i = 0; i < num_vec; i++)
2206                 adapter->msix_entries[i].entry = i;
2207
2208         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2209         if (status == 0) {
2210                 goto done;
2211         } else if (status >= BE_MIN_MSIX_VECTORS) {
2212                 num_vec = status;
2213                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2214                                 num_vec) == 0)
2215                         goto done;
2216         }
2217
2218         dev_warn(dev, "MSIx enable failed\n");
2219         return;
2220 done:
2221         if (be_roce_supported(adapter)) {
2222                 if (num_vec > num_roce_vec) {
2223                         adapter->num_msix_vec = num_vec - num_roce_vec;
2224                         adapter->num_msix_roce_vec =
2225                                 num_vec - adapter->num_msix_vec;
2226                 } else {
2227                         adapter->num_msix_vec = num_vec;
2228                         adapter->num_msix_roce_vec = 0;
2229                 }
2230         } else
2231                 adapter->num_msix_vec = num_vec;
2232         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2233         return;
2234 }
2235
2236 static inline int be_msix_vec_get(struct be_adapter *adapter,
2237                                 struct be_eq_obj *eqo)
2238 {
2239         return adapter->msix_entries[eqo->idx].vector;
2240 }
2241
2242 static int be_msix_register(struct be_adapter *adapter)
2243 {
2244         struct net_device *netdev = adapter->netdev;
2245         struct be_eq_obj *eqo;
2246         int status, i, vec;
2247
2248         for_all_evt_queues(adapter, eqo, i) {
2249                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2250                 vec = be_msix_vec_get(adapter, eqo);
2251                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2252                 if (status)
2253                         goto err_msix;
2254         }
2255
2256         return 0;
2257 err_msix:
2258         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2259                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2260         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2261                 status);
2262         be_msix_disable(adapter);
2263         return status;
2264 }
2265
2266 static int be_irq_register(struct be_adapter *adapter)
2267 {
2268         struct net_device *netdev = adapter->netdev;
2269         int status;
2270
2271         if (msix_enabled(adapter)) {
2272                 status = be_msix_register(adapter);
2273                 if (status == 0)
2274                         goto done;
2275                 /* INTx is not supported for VF */
2276                 if (!be_physfn(adapter))
2277                         return status;
2278         }
2279
2280         /* INTx */
2281         netdev->irq = adapter->pdev->irq;
2282         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2283                         adapter);
2284         if (status) {
2285                 dev_err(&adapter->pdev->dev,
2286                         "INTx request IRQ failed - err %d\n", status);
2287                 return status;
2288         }
2289 done:
2290         adapter->isr_registered = true;
2291         return 0;
2292 }
2293
2294 static void be_irq_unregister(struct be_adapter *adapter)
2295 {
2296         struct net_device *netdev = adapter->netdev;
2297         struct be_eq_obj *eqo;
2298         int i;
2299
2300         if (!adapter->isr_registered)
2301                 return;
2302
2303         /* INTx */
2304         if (!msix_enabled(adapter)) {
2305                 free_irq(netdev->irq, adapter);
2306                 goto done;
2307         }
2308
2309         /* MSIx */
2310         for_all_evt_queues(adapter, eqo, i)
2311                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2312
2313 done:
2314         adapter->isr_registered = false;
2315 }
2316
2317 static void be_rx_qs_destroy(struct be_adapter *adapter)
2318 {
2319         struct be_queue_info *q;
2320         struct be_rx_obj *rxo;
2321         int i;
2322
2323         for_all_rx_queues(adapter, rxo, i) {
2324                 q = &rxo->q;
2325                 if (q->created) {
2326                         be_cmd_rxq_destroy(adapter, q);
2327                         /* After the rxq is invalidated, wait for a grace time
2328                          * of 1ms for all dma to end and the flush compl to
2329                          * arrive
2330                          */
2331                         mdelay(1);
2332                         be_rx_cq_clean(rxo);
2333                 }
2334                 be_queue_free(adapter, q);
2335         }
2336 }
2337
2338 static int be_close(struct net_device *netdev)
2339 {
2340         struct be_adapter *adapter = netdev_priv(netdev);
2341         struct be_eq_obj *eqo;
2342         int i;
2343
2344         be_roce_dev_close(adapter);
2345
2346         be_async_mcc_disable(adapter);
2347
2348         if (!lancer_chip(adapter))
2349                 be_intr_set(adapter, false);
2350
2351         for_all_evt_queues(adapter, eqo, i) {
2352                 napi_disable(&eqo->napi);
2353                 if (msix_enabled(adapter))
2354                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2355                 else
2356                         synchronize_irq(netdev->irq);
2357                 be_eq_clean(eqo);
2358         }
2359
2360         be_irq_unregister(adapter);
2361
2362         /* Wait for all pending tx completions to arrive so that
2363          * all tx skbs are freed.
2364          */
2365         be_tx_compl_clean(adapter);
2366
2367         be_rx_qs_destroy(adapter);
2368         return 0;
2369 }
2370
2371 static int be_rx_qs_create(struct be_adapter *adapter)
2372 {
2373         struct be_rx_obj *rxo;
2374         int rc, i, j;
2375         u8 rsstable[128];
2376
2377         for_all_rx_queues(adapter, rxo, i) {
2378                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2379                                     sizeof(struct be_eth_rx_d));
2380                 if (rc)
2381                         return rc;
2382         }
2383
2384         /* The FW would like the default RXQ to be created first */
2385         rxo = default_rxo(adapter);
2386         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2387                                adapter->if_handle, false, &rxo->rss_id);
2388         if (rc)
2389                 return rc;
2390
2391         for_all_rss_queues(adapter, rxo, i) {
2392                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2393                                        rx_frag_size, adapter->if_handle,
2394                                        true, &rxo->rss_id);
2395                 if (rc)
2396                         return rc;
2397         }
2398
2399         if (be_multi_rxq(adapter)) {
2400                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2401                         for_all_rss_queues(adapter, rxo, i) {
2402                                 if ((j + i) >= 128)
2403                                         break;
2404                                 rsstable[j + i] = rxo->rss_id;
2405                         }
2406                 }
2407                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2408                 if (rc)
2409                         return rc;
2410         }
2411
2412         /* First time posting */
2413         for_all_rx_queues(adapter, rxo, i)
2414                 be_post_rx_frags(rxo, GFP_KERNEL);
2415         return 0;
2416 }
2417
2418 static int be_open(struct net_device *netdev)
2419 {
2420         struct be_adapter *adapter = netdev_priv(netdev);
2421         struct be_eq_obj *eqo;
2422         struct be_rx_obj *rxo;
2423         struct be_tx_obj *txo;
2424         u8 link_status;
2425         int status, i;
2426
2427         status = be_rx_qs_create(adapter);
2428         if (status)
2429                 goto err;
2430
2431         be_irq_register(adapter);
2432
2433         if (!lancer_chip(adapter))
2434                 be_intr_set(adapter, true);
2435
2436         for_all_rx_queues(adapter, rxo, i)
2437                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2438
2439         for_all_tx_queues(adapter, txo, i)
2440                 be_cq_notify(adapter, txo->cq.id, true, 0);
2441
2442         be_async_mcc_enable(adapter);
2443
2444         for_all_evt_queues(adapter, eqo, i) {
2445                 napi_enable(&eqo->napi);
2446                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2447         }
2448
2449         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2450         if (!status)
2451                 be_link_status_update(adapter, link_status);
2452
2453         be_roce_dev_open(adapter);
2454         return 0;
2455 err:
2456         be_close(adapter->netdev);
2457         return -EIO;
2458 }
2459
2460 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2461 {
2462         struct be_dma_mem cmd;
2463         int status = 0;
2464         u8 mac[ETH_ALEN];
2465
2466         memset(mac, 0, ETH_ALEN);
2467
2468         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2469         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2470                                     GFP_KERNEL);
2471         if (cmd.va == NULL)
2472                 return -1;
2473         memset(cmd.va, 0, cmd.size);
2474
2475         if (enable) {
2476                 status = pci_write_config_dword(adapter->pdev,
2477                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2478                 if (status) {
2479                         dev_err(&adapter->pdev->dev,
2480                                 "Could not enable Wake-on-lan\n");
2481                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2482                                           cmd.dma);
2483                         return status;
2484                 }
2485                 status = be_cmd_enable_magic_wol(adapter,
2486                                 adapter->netdev->dev_addr, &cmd);
2487                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2488                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2489         } else {
2490                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2491                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2492                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2493         }
2494
2495         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2496         return status;
2497 }
2498
2499 /*
2500  * Generate a seed MAC address from the PF MAC Address using jhash.
2501  * MAC Address for VFs are assigned incrementally starting from the seed.
2502  * These addresses are programmed in the ASIC by the PF and the VF driver
2503  * queries for the MAC address during its probe.
2504  */
2505 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2506 {
2507         u32 vf;
2508         int status = 0;
2509         u8 mac[ETH_ALEN];
2510         struct be_vf_cfg *vf_cfg;
2511
2512         be_vf_eth_addr_generate(adapter, mac);
2513
2514         for_all_vfs(adapter, vf_cfg, vf) {
2515                 if (lancer_chip(adapter)) {
2516                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2517                 } else {
2518                         status = be_cmd_pmac_add(adapter, mac,
2519                                                  vf_cfg->if_handle,
2520                                                  &vf_cfg->pmac_id, vf + 1);
2521                 }
2522
2523                 if (status)
2524                         dev_err(&adapter->pdev->dev,
2525                         "Mac address assignment failed for VF %d\n", vf);
2526                 else
2527                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2528
2529                 mac[5] += 1;
2530         }
2531         return status;
2532 }
2533
2534 static void be_vf_clear(struct be_adapter *adapter)
2535 {
2536         struct be_vf_cfg *vf_cfg;
2537         u32 vf;
2538
2539         if (be_find_vfs(adapter, ASSIGNED)) {
2540                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2541                 goto done;
2542         }
2543
2544         for_all_vfs(adapter, vf_cfg, vf) {
2545                 if (lancer_chip(adapter))
2546                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2547                 else
2548                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2549                                         vf_cfg->pmac_id, vf + 1);
2550
2551                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2552         }
2553         pci_disable_sriov(adapter->pdev);
2554 done:
2555         kfree(adapter->vf_cfg);
2556         adapter->num_vfs = 0;
2557 }
2558
2559 static int be_clear(struct be_adapter *adapter)
2560 {
2561         int i = 1;
2562
2563         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2564                 cancel_delayed_work_sync(&adapter->work);
2565                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2566         }
2567
2568         if (sriov_enabled(adapter))
2569                 be_vf_clear(adapter);
2570
2571         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2572                 be_cmd_pmac_del(adapter, adapter->if_handle,
2573                         adapter->pmac_id[i], 0);
2574
2575         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2576
2577         be_mcc_queues_destroy(adapter);
2578         be_rx_cqs_destroy(adapter);
2579         be_tx_queues_destroy(adapter);
2580         be_evt_queues_destroy(adapter);
2581
2582         be_msix_disable(adapter);
2583         return 0;
2584 }
2585
2586 static int be_vf_setup_init(struct be_adapter *adapter)
2587 {
2588         struct be_vf_cfg *vf_cfg;
2589         int vf;
2590
2591         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2592                                   GFP_KERNEL);
2593         if (!adapter->vf_cfg)
2594                 return -ENOMEM;
2595
2596         for_all_vfs(adapter, vf_cfg, vf) {
2597                 vf_cfg->if_handle = -1;
2598                 vf_cfg->pmac_id = -1;
2599         }
2600         return 0;
2601 }
2602
2603 static int be_vf_setup(struct be_adapter *adapter)
2604 {
2605         struct be_vf_cfg *vf_cfg;
2606         struct device *dev = &adapter->pdev->dev;
2607         u32 cap_flags, en_flags, vf;
2608         u16 def_vlan, lnk_speed;
2609         int status, enabled_vfs;
2610
2611         enabled_vfs = be_find_vfs(adapter, ENABLED);
2612         if (enabled_vfs) {
2613                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2614                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2615                 return 0;
2616         }
2617
2618         if (num_vfs > adapter->dev_num_vfs) {
2619                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2620                          adapter->dev_num_vfs, num_vfs);
2621                 num_vfs = adapter->dev_num_vfs;
2622         }
2623
2624         status = pci_enable_sriov(adapter->pdev, num_vfs);
2625         if (!status) {
2626                 adapter->num_vfs = num_vfs;
2627         } else {
2628                 /* Platform doesn't support SRIOV though device supports it */
2629                 dev_warn(dev, "SRIOV enable failed\n");
2630                 return 0;
2631         }
2632
2633         status = be_vf_setup_init(adapter);
2634         if (status)
2635                 goto err;
2636
2637         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2638                                 BE_IF_FLAGS_MULTICAST;
2639         for_all_vfs(adapter, vf_cfg, vf) {
2640                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2641                                           &vf_cfg->if_handle, vf + 1);
2642                 if (status)
2643                         goto err;
2644         }
2645
2646         if (!enabled_vfs) {
2647                 status = be_vf_eth_addr_config(adapter);
2648                 if (status)
2649                         goto err;
2650         }
2651
2652         for_all_vfs(adapter, vf_cfg, vf) {
2653                 lnk_speed = 1000;
2654                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2655                 if (status)
2656                         goto err;
2657                 vf_cfg->tx_rate = lnk_speed * 10;
2658
2659                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2660                                 vf + 1, vf_cfg->if_handle);
2661                 if (status)
2662                         goto err;
2663                 vf_cfg->def_vid = def_vlan;
2664         }
2665         return 0;
2666 err:
2667         return status;
2668 }
2669
2670 static void be_setup_init(struct be_adapter *adapter)
2671 {
2672         adapter->vlan_prio_bmap = 0xff;
2673         adapter->phy.link_speed = -1;
2674         adapter->if_handle = -1;
2675         adapter->be3_native = false;
2676         adapter->promiscuous = false;
2677         adapter->eq_next_idx = 0;
2678 }
2679
2680 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2681                            bool *active_mac, u32 *pmac_id)
2682 {
2683         int status = 0;
2684
2685         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2686                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2687                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2688                         *active_mac = true;
2689                 else
2690                         *active_mac = false;
2691
2692                 return status;
2693         }
2694
2695         if (lancer_chip(adapter)) {
2696                 status = be_cmd_get_mac_from_list(adapter, mac,
2697                                                   active_mac, pmac_id, 0);
2698                 if (*active_mac) {
2699                         status = be_cmd_mac_addr_query(adapter, mac, false,
2700                                                        if_handle, *pmac_id);
2701                 }
2702         } else if (be_physfn(adapter)) {
2703                 /* For BE3, for PF get permanent MAC */
2704                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2705                 *active_mac = false;
2706         } else {
2707                 /* For BE3, for VF get soft MAC assigned by PF*/
2708                 status = be_cmd_mac_addr_query(adapter, mac, false,
2709                                                if_handle, 0);
2710                 *active_mac = true;
2711         }
2712         return status;
2713 }
2714
2715 /* Routine to query per function resource limits */
2716 static int be_get_config(struct be_adapter *adapter)
2717 {
2718         int pos;
2719         u16 dev_num_vfs;
2720
2721         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722         if (pos) {
2723                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2724                                      &dev_num_vfs);
2725                 if (!lancer_chip(adapter))
2726                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727                 adapter->dev_num_vfs = dev_num_vfs;
2728         }
2729         return 0;
2730 }
2731
2732 static int be_setup(struct be_adapter *adapter)
2733 {
2734         struct device *dev = &adapter->pdev->dev;
2735         u32 cap_flags, en_flags;
2736         u32 tx_fc, rx_fc;
2737         int status;
2738         u8 mac[ETH_ALEN];
2739         bool active_mac;
2740
2741         be_setup_init(adapter);
2742
2743         be_get_config(adapter);
2744
2745         be_cmd_req_native_mode(adapter);
2746
2747         be_msix_enable(adapter);
2748
2749         status = be_evt_queues_create(adapter);
2750         if (status)
2751                 goto err;
2752
2753         status = be_tx_cqs_create(adapter);
2754         if (status)
2755                 goto err;
2756
2757         status = be_rx_cqs_create(adapter);
2758         if (status)
2759                 goto err;
2760
2761         status = be_mcc_queues_create(adapter);
2762         if (status)
2763                 goto err;
2764
2765         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769
2770         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2771                 cap_flags |= BE_IF_FLAGS_RSS;
2772                 en_flags |= BE_IF_FLAGS_RSS;
2773         }
2774
2775         if (lancer_chip(adapter) && !be_physfn(adapter)) {
2776                 en_flags = BE_IF_FLAGS_UNTAGGED |
2777                             BE_IF_FLAGS_BROADCAST |
2778                             BE_IF_FLAGS_MULTICAST;
2779                 cap_flags = en_flags;
2780         }
2781
2782         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2783                                   &adapter->if_handle, 0);
2784         if (status != 0)
2785                 goto err;
2786
2787         memset(mac, 0, ETH_ALEN);
2788         active_mac = false;
2789         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2790                                  &active_mac, &adapter->pmac_id[0]);
2791         if (status != 0)
2792                 goto err;
2793
2794         if (!active_mac) {
2795                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2796                                          &adapter->pmac_id[0], 0);
2797                 if (status != 0)
2798                         goto err;
2799         }
2800
2801         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2802                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2803                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2804         }
2805
2806         status = be_tx_qs_create(adapter);
2807         if (status)
2808                 goto err;
2809
2810         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2811
2812         if (adapter->vlans_added)
2813                 be_vid_config(adapter);
2814
2815         be_set_rx_mode(adapter->netdev);
2816
2817         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2818
2819         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2820                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2821                                         adapter->rx_fc);
2822
2823         if (be_physfn(adapter) && num_vfs) {
2824                 if (adapter->dev_num_vfs)
2825                         be_vf_setup(adapter);
2826                 else
2827                         dev_warn(dev, "device doesn't support SRIOV\n");
2828         }
2829
2830         be_cmd_get_phy_info(adapter);
2831         if (be_pause_supported(adapter))
2832                 adapter->phy.fc_autoneg = 1;
2833
2834         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2835         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2836         return 0;
2837 err:
2838         be_clear(adapter);
2839         return status;
2840 }
2841
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843 static void be_netpoll(struct net_device *netdev)
2844 {
2845         struct be_adapter *adapter = netdev_priv(netdev);
2846         struct be_eq_obj *eqo;
2847         int i;
2848
2849         for_all_evt_queues(adapter, eqo, i)
2850                 event_handle(eqo);
2851
2852         return;
2853 }
2854 #endif
2855
2856 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2857 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2858
2859 static bool be_flash_redboot(struct be_adapter *adapter,
2860                         const u8 *p, u32 img_start, int image_size,
2861                         int hdr_size)
2862 {
2863         u32 crc_offset;
2864         u8 flashed_crc[4];
2865         int status;
2866
2867         crc_offset = hdr_size + img_start + image_size - 4;
2868
2869         p += crc_offset;
2870
2871         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2872                         (image_size - 4));
2873         if (status) {
2874                 dev_err(&adapter->pdev->dev,
2875                 "could not get crc from flash, not flashing redboot\n");
2876                 return false;
2877         }
2878
2879         /*update redboot only if crc does not match*/
2880         if (!memcmp(flashed_crc, p, 4))
2881                 return false;
2882         else
2883                 return true;
2884 }
2885
2886 static bool phy_flashing_required(struct be_adapter *adapter)
2887 {
2888         return (adapter->phy.phy_type == TN_8022 &&
2889                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2890 }
2891
2892 static bool is_comp_in_ufi(struct be_adapter *adapter,
2893                            struct flash_section_info *fsec, int type)
2894 {
2895         int i = 0, img_type = 0;
2896         struct flash_section_info_g2 *fsec_g2 = NULL;
2897
2898         if (adapter->generation != BE_GEN3)
2899                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900
2901         for (i = 0; i < MAX_FLASH_COMP; i++) {
2902                 if (fsec_g2)
2903                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2904                 else
2905                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2906
2907                 if (img_type == type)
2908                         return true;
2909         }
2910         return false;
2911
2912 }
2913
2914 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2915                                          int header_size,
2916                                          const struct firmware *fw)
2917 {
2918         struct flash_section_info *fsec = NULL;
2919         const u8 *p = fw->data;
2920
2921         p += header_size;
2922         while (p < (fw->data + fw->size)) {
2923                 fsec = (struct flash_section_info *)p;
2924                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2925                         return fsec;
2926                 p += 32;
2927         }
2928         return NULL;
2929 }
2930
2931 static int be_flash_data(struct be_adapter *adapter,
2932                          const struct firmware *fw,
2933                          struct be_dma_mem *flash_cmd,
2934                          int num_of_images)
2935
2936 {
2937         int status = 0, i, filehdr_size = 0;
2938         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939         u32 total_bytes = 0, flash_op;
2940         int num_bytes;
2941         const u8 *p = fw->data;
2942         struct be_cmd_write_flashrom *req = flash_cmd->va;
2943         const struct flash_comp *pflashcomp;
2944         int num_comp, hdr_size;
2945         struct flash_section_info *fsec = NULL;
2946
2947         struct flash_comp gen3_flash_types[] = {
2948                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2949                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2950                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2951                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2952                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2953                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2954                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2955                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2956                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2957                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2958                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2959                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2960                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2961                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2962                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2963                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2964                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2965                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2966                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2967                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2968         };
2969
2970         struct flash_comp gen2_flash_types[] = {
2971                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2972                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2973                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2974                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2975                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2976                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2977                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2978                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2979                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2980                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2981                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2982                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2983                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2984                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2985                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2986                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987         };
2988
2989         if (adapter->generation == BE_GEN3) {
2990                 pflashcomp = gen3_flash_types;
2991                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2992                 num_comp = ARRAY_SIZE(gen3_flash_types);
2993         } else {
2994                 pflashcomp = gen2_flash_types;
2995                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2996                 num_comp = ARRAY_SIZE(gen2_flash_types);
2997         }
2998         /* Get flash section info*/
2999         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000         if (!fsec) {
3001                 dev_err(&adapter->pdev->dev,
3002                         "Invalid Cookie. UFI corrupted ?\n");
3003                 return -1;
3004         }
3005         for (i = 0; i < num_comp; i++) {
3006                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3007                         continue;
3008
3009                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3010                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011                         continue;
3012
3013                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
3014                         if (!phy_flashing_required(adapter))
3015                                 continue;
3016                 }
3017
3018                 hdr_size = filehdr_size +
3019                            (num_of_images * sizeof(struct image_hdr));
3020
3021                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3022                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3023                                        pflashcomp[i].size, hdr_size)))
3024                         continue;
3025
3026                 /* Flash the component */
3027                 p = fw->data;
3028                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029                 if (p + pflashcomp[i].size > fw->data + fw->size)
3030                         return -1;
3031                 total_bytes = pflashcomp[i].size;
3032                 while (total_bytes) {
3033                         if (total_bytes > 32*1024)
3034                                 num_bytes = 32*1024;
3035                         else
3036                                 num_bytes = total_bytes;
3037                         total_bytes -= num_bytes;
3038                         if (!total_bytes) {
3039                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040                                         flash_op = FLASHROM_OPER_PHY_FLASH;
3041                                 else
3042                                         flash_op = FLASHROM_OPER_FLASH;
3043                         } else {
3044                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3046                                 else
3047                                         flash_op = FLASHROM_OPER_SAVE;
3048                         }
3049                         memcpy(req->params.data_buf, p, num_bytes);
3050                         p += num_bytes;
3051                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3052                                 pflashcomp[i].optype, flash_op, num_bytes);
3053                         if (status) {
3054                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3055                                         (pflashcomp[i].optype ==
3056                                                 OPTYPE_PHY_FW))
3057                                         break;
3058                                 dev_err(&adapter->pdev->dev,
3059                                         "cmd to write to flash rom failed.\n");
3060                                 return -1;
3061                         }
3062                 }
3063         }
3064         return 0;
3065 }
3066
3067 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3068 {
3069         if (fhdr == NULL)
3070                 return 0;
3071         if (fhdr->build[0] == '3')
3072                 return BE_GEN3;
3073         else if (fhdr->build[0] == '2')
3074                 return BE_GEN2;
3075         else
3076                 return 0;
3077 }
3078
3079 static int lancer_wait_idle(struct be_adapter *adapter)
3080 {
3081 #define SLIPORT_IDLE_TIMEOUT 30
3082         u32 reg_val;
3083         int status = 0, i;
3084
3085         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3086                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3087                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3088                         break;
3089
3090                 ssleep(1);
3091         }
3092
3093         if (i == SLIPORT_IDLE_TIMEOUT)
3094                 status = -1;
3095
3096         return status;
3097 }
3098
3099 static int lancer_fw_reset(struct be_adapter *adapter)
3100 {
3101         int status = 0;
3102
3103         status = lancer_wait_idle(adapter);
3104         if (status)
3105                 return status;
3106
3107         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3108                   PHYSDEV_CONTROL_OFFSET);
3109
3110         return status;
3111 }
3112
3113 static int lancer_fw_download(struct be_adapter *adapter,
3114                                 const struct firmware *fw)
3115 {
3116 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3117 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3118         struct be_dma_mem flash_cmd;
3119         const u8 *data_ptr = NULL;
3120         u8 *dest_image_ptr = NULL;
3121         size_t image_size = 0;
3122         u32 chunk_size = 0;
3123         u32 data_written = 0;
3124         u32 offset = 0;
3125         int status = 0;
3126         u8 add_status = 0;
3127         u8 change_status;
3128
3129         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3130                 dev_err(&adapter->pdev->dev,
3131                         "FW Image not properly aligned. "
3132                         "Length must be 4 byte aligned.\n");
3133                 status = -EINVAL;
3134                 goto lancer_fw_exit;
3135         }
3136
3137         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3138                                 + LANCER_FW_DOWNLOAD_CHUNK;
3139         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3140                                                 &flash_cmd.dma, GFP_KERNEL);
3141         if (!flash_cmd.va) {
3142                 status = -ENOMEM;
3143                 dev_err(&adapter->pdev->dev,
3144                         "Memory allocation failure while flashing\n");
3145                 goto lancer_fw_exit;
3146         }
3147
3148         dest_image_ptr = flash_cmd.va +
3149                                 sizeof(struct lancer_cmd_req_write_object);
3150         image_size = fw->size;
3151         data_ptr = fw->data;
3152
3153         while (image_size) {
3154                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3155
3156                 /* Copy the image chunk content. */
3157                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3158
3159                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3160                                                  chunk_size, offset,
3161                                                  LANCER_FW_DOWNLOAD_LOCATION,
3162                                                  &data_written, &change_status,
3163                                                  &add_status);
3164                 if (status)
3165                         break;
3166
3167                 offset += data_written;
3168                 data_ptr += data_written;
3169                 image_size -= data_written;
3170         }
3171
3172         if (!status) {
3173                 /* Commit the FW written */
3174                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3175                                                  0, offset,
3176                                                  LANCER_FW_DOWNLOAD_LOCATION,
3177                                                  &data_written, &change_status,
3178                                                  &add_status);
3179         }
3180
3181         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3182                                 flash_cmd.dma);
3183         if (status) {
3184                 dev_err(&adapter->pdev->dev,
3185                         "Firmware load error. "
3186                         "Status code: 0x%x Additional Status: 0x%x\n",
3187                         status, add_status);
3188                 goto lancer_fw_exit;
3189         }
3190
3191         if (change_status == LANCER_FW_RESET_NEEDED) {
3192                 status = lancer_fw_reset(adapter);
3193                 if (status) {
3194                         dev_err(&adapter->pdev->dev,
3195                                 "Adapter busy for FW reset.\n"
3196                                 "New FW will not be active.\n");
3197                         goto lancer_fw_exit;
3198                 }
3199         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3200                         dev_err(&adapter->pdev->dev,
3201                                 "System reboot required for new FW"
3202                                 " to be active\n");
3203         }
3204
3205         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3206 lancer_fw_exit:
3207         return status;
3208 }
3209
3210 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211 {
3212         struct flash_file_hdr_g2 *fhdr;
3213         struct flash_file_hdr_g3 *fhdr3;
3214         struct image_hdr *img_hdr_ptr = NULL;
3215         struct be_dma_mem flash_cmd;
3216         const u8 *p;
3217         int status = 0, i = 0, num_imgs = 0;
3218
3219         p = fw->data;
3220         fhdr = (struct flash_file_hdr_g2 *) p;
3221
3222         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3223         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224                                           &flash_cmd.dma, GFP_KERNEL);
3225         if (!flash_cmd.va) {
3226                 status = -ENOMEM;
3227                 dev_err(&adapter->pdev->dev,
3228                         "Memory allocation failure while flashing\n");
3229                 goto be_fw_exit;
3230         }
3231
3232         if ((adapter->generation == BE_GEN3) &&
3233                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3234                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3235                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3236                 for (i = 0; i < num_imgs; i++) {
3237                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3238                                         (sizeof(struct flash_file_hdr_g3) +
3239                                          i * sizeof(struct image_hdr)));
3240                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3241                                 status = be_flash_data(adapter, fw, &flash_cmd,
3242                                                         num_imgs);
3243                 }
3244         } else if ((adapter->generation == BE_GEN2) &&
3245                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3246                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247         } else {
3248                 dev_err(&adapter->pdev->dev,
3249                         "UFI and Interface are not compatible for flashing\n");
3250                 status = -1;
3251         }
3252
3253         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254                           flash_cmd.dma);
3255         if (status) {
3256                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3257                 goto be_fw_exit;
3258         }
3259
3260         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3261
3262 be_fw_exit:
3263         return status;
3264 }
3265
3266 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3267 {
3268         const struct firmware *fw;
3269         int status;
3270
3271         if (!netif_running(adapter->netdev)) {
3272                 dev_err(&adapter->pdev->dev,
3273                         "Firmware load not allowed (interface is down)\n");
3274                 return -1;
3275         }
3276
3277         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3278         if (status)
3279                 goto fw_exit;
3280
3281         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3282
3283         if (lancer_chip(adapter))
3284                 status = lancer_fw_download(adapter, fw);
3285         else
3286                 status = be_fw_download(adapter, fw);
3287
3288 fw_exit:
3289         release_firmware(fw);
3290         return status;
3291 }
3292
3293 static const struct net_device_ops be_netdev_ops = {
3294         .ndo_open               = be_open,
3295         .ndo_stop               = be_close,
3296         .ndo_start_xmit         = be_xmit,
3297         .ndo_set_rx_mode        = be_set_rx_mode,
3298         .ndo_set_mac_address    = be_mac_addr_set,
3299         .ndo_change_mtu         = be_change_mtu,
3300         .ndo_get_stats64        = be_get_stats64,
3301         .ndo_validate_addr      = eth_validate_addr,
3302         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3303         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3304         .ndo_set_vf_mac         = be_set_vf_mac,
3305         .ndo_set_vf_vlan        = be_set_vf_vlan,
3306         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3307         .ndo_get_vf_config      = be_get_vf_config,
3308 #ifdef CONFIG_NET_POLL_CONTROLLER
3309         .ndo_poll_controller    = be_netpoll,
3310 #endif
3311 };
3312
3313 static void be_netdev_init(struct net_device *netdev)
3314 {
3315         struct be_adapter *adapter = netdev_priv(netdev);
3316         struct be_eq_obj *eqo;
3317         int i;
3318
3319         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3320                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3321                 NETIF_F_HW_VLAN_TX;
3322         if (be_multi_rxq(adapter))
3323                 netdev->hw_features |= NETIF_F_RXHASH;
3324
3325         netdev->features |= netdev->hw_features |
3326                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3327
3328         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3329                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3330
3331         netdev->priv_flags |= IFF_UNICAST_FLT;
3332
3333         netdev->flags |= IFF_MULTICAST;
3334
3335         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3336
3337         netdev->netdev_ops = &be_netdev_ops;
3338
3339         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3340
3341         for_all_evt_queues(adapter, eqo, i)
3342                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3343 }
3344
3345 static void be_unmap_pci_bars(struct be_adapter *adapter)
3346 {
3347         if (adapter->csr)
3348                 iounmap(adapter->csr);
3349         if (adapter->db)
3350                 iounmap(adapter->db);
3351         if (adapter->roce_db.base)
3352                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353 }
3354
3355 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3356 {
3357         struct pci_dev *pdev = adapter->pdev;
3358         u8 __iomem *addr;
3359
3360         addr = pci_iomap(pdev, 2, 0);
3361         if (addr == NULL)
3362                 return -ENOMEM;
3363
3364         adapter->roce_db.base = addr;
3365         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3366         adapter->roce_db.size = 8192;
3367         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3368         return 0;
3369 }
3370
3371 static int be_map_pci_bars(struct be_adapter *adapter)
3372 {
3373         u8 __iomem *addr;
3374         int db_reg;
3375
3376         if (lancer_chip(adapter)) {
3377                 if (be_type_2_3(adapter)) {
3378                         addr = ioremap_nocache(
3379                                         pci_resource_start(adapter->pdev, 0),
3380                                         pci_resource_len(adapter->pdev, 0));
3381                         if (addr == NULL)
3382                                 return -ENOMEM;
3383                         adapter->db = addr;
3384                 }
3385                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386                         if (lancer_roce_map_pci_bars(adapter))
3387                                 goto pci_map_err;
3388                 }
3389                 return 0;
3390         }
3391
3392         if (be_physfn(adapter)) {
3393                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394                                 pci_resource_len(adapter->pdev, 2));
3395                 if (addr == NULL)
3396                         return -ENOMEM;
3397                 adapter->csr = addr;
3398         }
3399
3400         if (adapter->generation == BE_GEN2) {
3401                 db_reg = 4;
3402         } else {
3403                 if (be_physfn(adapter))
3404                         db_reg = 4;
3405                 else
3406                         db_reg = 0;
3407         }
3408         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409                                 pci_resource_len(adapter->pdev, db_reg));
3410         if (addr == NULL)
3411                 goto pci_map_err;
3412         adapter->db = addr;
3413         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3414                 adapter->roce_db.size = 4096;
3415                 adapter->roce_db.io_addr =
3416                                 pci_resource_start(adapter->pdev, db_reg);
3417                 adapter->roce_db.total_size =
3418                                 pci_resource_len(adapter->pdev, db_reg);
3419         }
3420         return 0;
3421 pci_map_err:
3422         be_unmap_pci_bars(adapter);
3423         return -ENOMEM;
3424 }
3425
3426 static void be_ctrl_cleanup(struct be_adapter *adapter)
3427 {
3428         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3429
3430         be_unmap_pci_bars(adapter);
3431
3432         if (mem->va)
3433                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3434                                   mem->dma);
3435
3436         mem = &adapter->rx_filter;
3437         if (mem->va)
3438                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439                                   mem->dma);
3440         kfree(adapter->pmac_id);
3441 }
3442
3443 static int be_ctrl_init(struct be_adapter *adapter)
3444 {
3445         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3448         int status;
3449
3450         status = be_map_pci_bars(adapter);
3451         if (status)
3452                 goto done;
3453
3454         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3455         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3456                                                 mbox_mem_alloc->size,
3457                                                 &mbox_mem_alloc->dma,
3458                                                 GFP_KERNEL);
3459         if (!mbox_mem_alloc->va) {
3460                 status = -ENOMEM;
3461                 goto unmap_pci_bars;
3462         }
3463         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3464         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3465         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3466         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3467
3468         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3469         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3470                                         &rx_filter->dma, GFP_KERNEL);
3471         if (rx_filter->va == NULL) {
3472                 status = -ENOMEM;
3473                 goto free_mbox;
3474         }
3475         memset(rx_filter->va, 0, rx_filter->size);
3476
3477         /* primary mac needs 1 pmac entry */
3478         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3479                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3480         if (!adapter->pmac_id)
3481                 return -ENOMEM;
3482
3483         mutex_init(&adapter->mbox_lock);
3484         spin_lock_init(&adapter->mcc_lock);
3485         spin_lock_init(&adapter->mcc_cq_lock);
3486
3487         init_completion(&adapter->flash_compl);
3488         pci_save_state(adapter->pdev);
3489         return 0;
3490
3491 free_mbox:
3492         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3493                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3494
3495 unmap_pci_bars:
3496         be_unmap_pci_bars(adapter);
3497
3498 done:
3499         return status;
3500 }
3501
3502 static void be_stats_cleanup(struct be_adapter *adapter)
3503 {
3504         struct be_dma_mem *cmd = &adapter->stats_cmd;
3505
3506         if (cmd->va)
3507                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3508                                   cmd->va, cmd->dma);
3509 }
3510
3511 static int be_stats_init(struct be_adapter *adapter)
3512 {
3513         struct be_dma_mem *cmd = &adapter->stats_cmd;
3514
3515         if (adapter->generation == BE_GEN2) {
3516                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3517         } else {
3518                 if (lancer_chip(adapter))
3519                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3520                 else
3521                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3522         }
3523         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3524                                      GFP_KERNEL);
3525         if (cmd->va == NULL)
3526                 return -1;
3527         memset(cmd->va, 0, cmd->size);
3528         return 0;
3529 }
3530
3531 static void __devexit be_remove(struct pci_dev *pdev)
3532 {
3533         struct be_adapter *adapter = pci_get_drvdata(pdev);
3534
3535         if (!adapter)
3536                 return;
3537
3538         be_roce_dev_remove(adapter);
3539
3540         cancel_delayed_work_sync(&adapter->func_recovery_work);
3541
3542         unregister_netdev(adapter->netdev);
3543
3544         be_clear(adapter);
3545
3546         /* tell fw we're done with firing cmds */
3547         be_cmd_fw_clean(adapter);
3548
3549         be_stats_cleanup(adapter);
3550
3551         be_ctrl_cleanup(adapter);
3552
3553         pci_disable_pcie_error_reporting(pdev);
3554
3555         pci_set_drvdata(pdev, NULL);
3556         pci_release_regions(pdev);
3557         pci_disable_device(pdev);
3558
3559         free_netdev(adapter->netdev);
3560 }
3561
3562 bool be_is_wol_supported(struct be_adapter *adapter)
3563 {
3564         return ((adapter->wol_cap & BE_WOL_CAP) &&
3565                 !be_is_wol_excluded(adapter)) ? true : false;
3566 }
3567
3568 u32 be_get_fw_log_level(struct be_adapter *adapter)
3569 {
3570         struct be_dma_mem extfat_cmd;
3571         struct be_fat_conf_params *cfgs;
3572         int status;
3573         u32 level = 0;
3574         int j;
3575
3576         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3577         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3578         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3579                                              &extfat_cmd.dma);
3580
3581         if (!extfat_cmd.va) {
3582                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3583                         __func__);
3584                 goto err;
3585         }
3586
3587         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3588         if (!status) {
3589                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3590                                                 sizeof(struct be_cmd_resp_hdr));
3591                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3592                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3593                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3594                 }
3595         }
3596         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3597                             extfat_cmd.dma);
3598 err:
3599         return level;
3600 }
3601 static int be_get_initial_config(struct be_adapter *adapter)
3602 {
3603         int status;
3604         u32 level;
3605
3606         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3607                         &adapter->function_mode, &adapter->function_caps);
3608         if (status)
3609                 return status;
3610
3611         if (adapter->function_mode & FLEX10_MODE)
3612                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3613         else
3614                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3615
3616         if (be_physfn(adapter))
3617                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3618         else
3619                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3620
3621         status = be_cmd_get_cntl_attributes(adapter);
3622         if (status)
3623                 return status;
3624
3625         status = be_cmd_get_acpi_wol_cap(adapter);
3626         if (status) {
3627                 /* in case of a failure to get wol capabillities
3628                  * check the exclusion list to determine WOL capability */
3629                 if (!be_is_wol_excluded(adapter))
3630                         adapter->wol_cap |= BE_WOL_CAP;
3631         }
3632
3633         if (be_is_wol_supported(adapter))
3634                 adapter->wol = true;
3635
3636         /* Must be a power of 2 or else MODULO will BUG_ON */
3637         adapter->be_get_temp_freq = 64;
3638
3639         level = be_get_fw_log_level(adapter);
3640         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3641
3642         return 0;
3643 }
3644
3645 static int be_dev_type_check(struct be_adapter *adapter)
3646 {
3647         struct pci_dev *pdev = adapter->pdev;
3648         u32 sli_intf = 0, if_type;
3649
3650         switch (pdev->device) {
3651         case BE_DEVICE_ID1:
3652         case OC_DEVICE_ID1:
3653                 adapter->generation = BE_GEN2;
3654                 break;
3655         case BE_DEVICE_ID2:
3656         case OC_DEVICE_ID2:
3657                 adapter->generation = BE_GEN3;
3658                 break;
3659         case OC_DEVICE_ID3:
3660         case OC_DEVICE_ID4:
3661                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3662                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663                                                 SLI_INTF_IF_TYPE_SHIFT;
3664                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665                                                 SLI_INTF_IF_TYPE_SHIFT;
3666                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3667                         !be_type_2_3(adapter)) {
3668                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3669                         return -EINVAL;
3670                 }
3671                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3672                                          SLI_INTF_FAMILY_SHIFT);
3673                 adapter->generation = BE_GEN3;
3674                 break;
3675         case OC_DEVICE_ID5:
3676                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3677                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3678                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3679                         return -EINVAL;
3680                 }
3681                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3682                                          SLI_INTF_FAMILY_SHIFT);
3683                 adapter->generation = BE_GEN3;
3684                 break;
3685         default:
3686                 adapter->generation = 0;
3687         }
3688
3689         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3690         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691         return 0;
3692 }
3693
3694 static int lancer_recover_func(struct be_adapter *adapter)
3695 {
3696         int status;
3697
3698         status = lancer_test_and_set_rdy_state(adapter);
3699         if (status)
3700                 goto err;
3701
3702         if (netif_running(adapter->netdev))
3703                 be_close(adapter->netdev);
3704
3705         be_clear(adapter);
3706
3707         adapter->hw_error = false;
3708         adapter->fw_timeout = false;
3709
3710         status = be_setup(adapter);
3711         if (status)
3712                 goto err;
3713
3714         if (netif_running(adapter->netdev)) {
3715                 status = be_open(adapter->netdev);
3716                 if (status)
3717                         goto err;
3718         }
3719
3720         dev_err(&adapter->pdev->dev,
3721                 "Adapter SLIPORT recovery succeeded\n");
3722         return 0;
3723 err:
3724         dev_err(&adapter->pdev->dev,
3725                 "Adapter SLIPORT recovery failed\n");
3726
3727         return status;
3728 }
3729
3730 static void be_func_recovery_task(struct work_struct *work)
3731 {
3732         struct be_adapter *adapter =
3733                 container_of(work, struct be_adapter,  func_recovery_work.work);
3734         int status;
3735
3736         be_detect_error(adapter);
3737
3738         if (adapter->hw_error && lancer_chip(adapter)) {
3739
3740                 if (adapter->eeh_error)
3741                         goto out;
3742
3743                 rtnl_lock();
3744                 netif_device_detach(adapter->netdev);
3745                 rtnl_unlock();
3746
3747                 status = lancer_recover_func(adapter);
3748
3749                 if (!status)
3750                         netif_device_attach(adapter->netdev);
3751         }
3752
3753 out:
3754         schedule_delayed_work(&adapter->func_recovery_work,
3755                               msecs_to_jiffies(1000));
3756 }
3757
3758 static void be_worker(struct work_struct *work)
3759 {
3760         struct be_adapter *adapter =
3761                 container_of(work, struct be_adapter, work.work);
3762         struct be_rx_obj *rxo;
3763         struct be_eq_obj *eqo;
3764         int i;
3765
3766         /* when interrupts are not yet enabled, just reap any pending
3767         * mcc completions */
3768         if (!netif_running(adapter->netdev)) {
3769                 local_bh_disable();
3770                 be_process_mcc(adapter);
3771                 local_bh_enable();
3772                 goto reschedule;
3773         }
3774
3775         if (!adapter->stats_cmd_sent) {
3776                 if (lancer_chip(adapter))
3777                         lancer_cmd_get_pport_stats(adapter,
3778                                                 &adapter->stats_cmd);
3779                 else
3780                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3781         }
3782
3783         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3784                 be_cmd_get_die_temperature(adapter);
3785
3786         for_all_rx_queues(adapter, rxo, i) {
3787                 if (rxo->rx_post_starved) {
3788                         rxo->rx_post_starved = false;
3789                         be_post_rx_frags(rxo, GFP_KERNEL);
3790                 }
3791         }
3792
3793         for_all_evt_queues(adapter, eqo, i)
3794                 be_eqd_update(adapter, eqo);
3795
3796 reschedule:
3797         adapter->work_counter++;
3798         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3799 }
3800
3801 static bool be_reset_required(struct be_adapter *adapter)
3802 {
3803         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3804 }
3805
3806 static char *mc_name(struct be_adapter *adapter)
3807 {
3808         if (adapter->function_mode & FLEX10_MODE)
3809                 return "FLEX10";
3810         else if (adapter->function_mode & VNIC_MODE)
3811                 return "vNIC";
3812         else if (adapter->function_mode & UMC_ENABLED)
3813                 return "UMC";
3814         else
3815                 return "";
3816 }
3817
3818 static inline char *func_name(struct be_adapter *adapter)
3819 {
3820         return be_physfn(adapter) ? "PF" : "VF";
3821 }
3822
3823 static int __devinit be_probe(struct pci_dev *pdev,
3824                         const struct pci_device_id *pdev_id)
3825 {
3826         int status = 0;
3827         struct be_adapter *adapter;
3828         struct net_device *netdev;
3829         char port_name;
3830
3831         status = pci_enable_device(pdev);
3832         if (status)
3833                 goto do_none;
3834
3835         status = pci_request_regions(pdev, DRV_NAME);
3836         if (status)
3837                 goto disable_dev;
3838         pci_set_master(pdev);
3839
3840         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3841         if (netdev == NULL) {
3842                 status = -ENOMEM;
3843                 goto rel_reg;
3844         }
3845         adapter = netdev_priv(netdev);
3846         adapter->pdev = pdev;
3847         pci_set_drvdata(pdev, adapter);
3848
3849         status = be_dev_type_check(adapter);
3850         if (status)
3851                 goto free_netdev;
3852
3853         adapter->netdev = netdev;
3854         SET_NETDEV_DEV(netdev, &pdev->dev);
3855
3856         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3857         if (!status) {
3858                 netdev->features |= NETIF_F_HIGHDMA;
3859         } else {
3860                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3861                 if (status) {
3862                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3863                         goto free_netdev;
3864                 }
3865         }
3866
3867         status = pci_enable_pcie_error_reporting(pdev);
3868         if (status)
3869                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
3870
3871         status = be_ctrl_init(adapter);
3872         if (status)
3873                 goto free_netdev;
3874
3875         /* sync up with fw's ready state */
3876         if (be_physfn(adapter)) {
3877                 status = be_fw_wait_ready(adapter);
3878                 if (status)
3879                         goto ctrl_clean;
3880         }
3881
3882         /* tell fw we're ready to fire cmds */
3883         status = be_cmd_fw_init(adapter);
3884         if (status)
3885                 goto ctrl_clean;
3886
3887         if (be_reset_required(adapter)) {
3888                 status = be_cmd_reset_function(adapter);
3889                 if (status)
3890                         goto ctrl_clean;
3891         }
3892
3893         /* The INTR bit may be set in the card when probed by a kdump kernel
3894          * after a crash.
3895          */
3896         if (!lancer_chip(adapter))
3897                 be_intr_set(adapter, false);
3898
3899         status = be_stats_init(adapter);
3900         if (status)
3901                 goto ctrl_clean;
3902
3903         status = be_get_initial_config(adapter);
3904         if (status)
3905                 goto stats_clean;
3906
3907         INIT_DELAYED_WORK(&adapter->work, be_worker);
3908         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
3909         adapter->rx_fc = adapter->tx_fc = true;
3910
3911         status = be_setup(adapter);
3912         if (status)
3913                 goto stats_clean;
3914
3915         be_netdev_init(netdev);
3916         status = register_netdev(netdev);
3917         if (status != 0)
3918                 goto unsetup;
3919
3920         be_roce_dev_add(adapter);
3921
3922         schedule_delayed_work(&adapter->func_recovery_work,
3923                               msecs_to_jiffies(1000));
3924
3925         be_cmd_query_port_name(adapter, &port_name);
3926
3927         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
3928                  func_name(adapter), mc_name(adapter), port_name);
3929
3930         return 0;
3931
3932 unsetup:
3933         be_clear(adapter);
3934 stats_clean:
3935         be_stats_cleanup(adapter);
3936 ctrl_clean:
3937         be_ctrl_cleanup(adapter);
3938 free_netdev:
3939         free_netdev(netdev);
3940         pci_set_drvdata(pdev, NULL);
3941 rel_reg:
3942         pci_release_regions(pdev);
3943 disable_dev:
3944         pci_disable_device(pdev);
3945 do_none:
3946         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3947         return status;
3948 }
3949
3950 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3951 {
3952         struct be_adapter *adapter = pci_get_drvdata(pdev);
3953         struct net_device *netdev =  adapter->netdev;
3954
3955         if (adapter->wol)
3956                 be_setup_wol(adapter, true);
3957
3958         cancel_delayed_work_sync(&adapter->func_recovery_work);
3959
3960         netif_device_detach(netdev);
3961         if (netif_running(netdev)) {
3962                 rtnl_lock();
3963                 be_close(netdev);
3964                 rtnl_unlock();
3965         }
3966         be_clear(adapter);
3967
3968         pci_save_state(pdev);
3969         pci_disable_device(pdev);
3970         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3971         return 0;
3972 }
3973
3974 static int be_resume(struct pci_dev *pdev)
3975 {
3976         int status = 0;
3977         struct be_adapter *adapter = pci_get_drvdata(pdev);
3978         struct net_device *netdev =  adapter->netdev;
3979
3980         netif_device_detach(netdev);
3981
3982         status = pci_enable_device(pdev);
3983         if (status)
3984                 return status;
3985
3986         pci_set_power_state(pdev, 0);
3987         pci_restore_state(pdev);
3988
3989         /* tell fw we're ready to fire cmds */
3990         status = be_cmd_fw_init(adapter);
3991         if (status)
3992                 return status;
3993
3994         be_setup(adapter);
3995         if (netif_running(netdev)) {
3996                 rtnl_lock();
3997                 be_open(netdev);
3998                 rtnl_unlock();
3999         }
4000
4001         schedule_delayed_work(&adapter->func_recovery_work,
4002                               msecs_to_jiffies(1000));
4003         netif_device_attach(netdev);
4004
4005         if (adapter->wol)
4006                 be_setup_wol(adapter, false);
4007
4008         return 0;
4009 }
4010
4011 /*
4012  * An FLR will stop BE from DMAing any data.
4013  */
4014 static void be_shutdown(struct pci_dev *pdev)
4015 {
4016         struct be_adapter *adapter = pci_get_drvdata(pdev);
4017
4018         if (!adapter)
4019                 return;
4020
4021         cancel_delayed_work_sync(&adapter->work);
4022         cancel_delayed_work_sync(&adapter->func_recovery_work);
4023
4024         netif_device_detach(adapter->netdev);
4025
4026         if (adapter->wol)
4027                 be_setup_wol(adapter, true);
4028
4029         be_cmd_reset_function(adapter);
4030
4031         pci_disable_device(pdev);
4032 }
4033
4034 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4035                                 pci_channel_state_t state)
4036 {
4037         struct be_adapter *adapter = pci_get_drvdata(pdev);
4038         struct net_device *netdev =  adapter->netdev;
4039
4040         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4041
4042         adapter->eeh_error = true;
4043
4044         cancel_delayed_work_sync(&adapter->func_recovery_work);
4045
4046         rtnl_lock();
4047         netif_device_detach(netdev);
4048         rtnl_unlock();
4049
4050         if (netif_running(netdev)) {
4051                 rtnl_lock();
4052                 be_close(netdev);
4053                 rtnl_unlock();
4054         }
4055         be_clear(adapter);
4056
4057         if (state == pci_channel_io_perm_failure)
4058                 return PCI_ERS_RESULT_DISCONNECT;
4059
4060         pci_disable_device(pdev);
4061
4062         /* The error could cause the FW to trigger a flash debug dump.
4063          * Resetting the card while flash dump is in progress
4064          * can cause it not to recover; wait for it to finish
4065          */
4066         ssleep(30);
4067         return PCI_ERS_RESULT_NEED_RESET;
4068 }
4069
4070 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4071 {
4072         struct be_adapter *adapter = pci_get_drvdata(pdev);
4073         int status;
4074
4075         dev_info(&adapter->pdev->dev, "EEH reset\n");
4076         be_clear_all_error(adapter);
4077
4078         status = pci_enable_device(pdev);
4079         if (status)
4080                 return PCI_ERS_RESULT_DISCONNECT;
4081
4082         pci_set_master(pdev);
4083         pci_set_power_state(pdev, 0);
4084         pci_restore_state(pdev);
4085
4086         /* Check if card is ok and fw is ready */
4087         status = be_fw_wait_ready(adapter);
4088         if (status)
4089                 return PCI_ERS_RESULT_DISCONNECT;
4090
4091         pci_cleanup_aer_uncorrect_error_status(pdev);
4092         return PCI_ERS_RESULT_RECOVERED;
4093 }
4094
4095 static void be_eeh_resume(struct pci_dev *pdev)
4096 {
4097         int status = 0;
4098         struct be_adapter *adapter = pci_get_drvdata(pdev);
4099         struct net_device *netdev =  adapter->netdev;
4100
4101         dev_info(&adapter->pdev->dev, "EEH resume\n");
4102
4103         pci_save_state(pdev);
4104
4105         /* tell fw we're ready to fire cmds */
4106         status = be_cmd_fw_init(adapter);
4107         if (status)
4108                 goto err;
4109
4110         status = be_cmd_reset_function(adapter);
4111         if (status)
4112                 goto err;
4113
4114         status = be_setup(adapter);
4115         if (status)
4116                 goto err;
4117
4118         if (netif_running(netdev)) {
4119                 status = be_open(netdev);
4120                 if (status)
4121                         goto err;
4122         }
4123
4124         schedule_delayed_work(&adapter->func_recovery_work,
4125                               msecs_to_jiffies(1000));
4126         netif_device_attach(netdev);
4127         return;
4128 err:
4129         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4130 }
4131
4132 static const struct pci_error_handlers be_eeh_handlers = {
4133         .error_detected = be_eeh_err_detected,
4134         .slot_reset = be_eeh_reset,
4135         .resume = be_eeh_resume,
4136 };
4137
4138 static struct pci_driver be_driver = {
4139         .name = DRV_NAME,
4140         .id_table = be_dev_ids,
4141         .probe = be_probe,
4142         .remove = be_remove,
4143         .suspend = be_suspend,
4144         .resume = be_resume,
4145         .shutdown = be_shutdown,
4146         .err_handler = &be_eeh_handlers
4147 };
4148
4149 static int __init be_init_module(void)
4150 {
4151         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4152             rx_frag_size != 2048) {
4153                 printk(KERN_WARNING DRV_NAME
4154                         " : Module param rx_frag_size must be 2048/4096/8192."
4155                         " Using 2048\n");
4156                 rx_frag_size = 2048;
4157         }
4158
4159         return pci_register_driver(&be_driver);
4160 }
4161 module_init(be_init_module);
4162
4163 static void __exit be_exit_module(void)
4164 {
4165         pci_unregister_driver(&be_driver);
4166 }
4167 module_exit(be_exit_module);