1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2010-2011 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include <linux/module.h>
11 #include "net_driver.h"
17 #include "mcdi_pcol.h"
21 /* Number of longs required to track all the VIs in a VF */
22 #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
24 /* Maximum number of RX queues supported */
25 #define VF_MAX_RX_QUEUES 63
28 * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
29 * @VF_TX_FILTER_OFF: Disabled
30 * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
31 * 2 TX queues allowed per VF.
32 * @VF_TX_FILTER_ON: Enabled
34 enum efx_vf_tx_filter_mode {
41 * struct efx_vf - Back-end resource and protocol state for a PCI VF
42 * @efx: The Efx NIC owning this VF
43 * @pci_rid: The PCI requester ID for this VF
44 * @pci_name: The PCI name (formatted address) of this VF
45 * @index: Index of VF within its port and PF.
46 * @req: VFDI incoming request work item. Incoming USR_EV events are received
47 * by the NAPI handler, but must be handled by executing MCDI requests
49 * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
50 * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
51 * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
52 * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
54 * @busy: VFDI request queued to be processed or being processed. Receiving
55 * a VFDI request when @busy is set is an error condition.
56 * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
57 * @buftbl_base: Buffer table entries for this VF start at this index.
58 * @rx_filtering: Receive filtering has been requested by the VF driver.
59 * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
60 * @rx_filter_qid: VF relative qid for RX filter requested by VF.
61 * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
62 * @tx_filter_mode: Transmit MAC filtering mode.
63 * @tx_filter_id: Transmit MAC filter ID.
64 * @addr: The MAC address and outer vlan tag of the VF.
65 * @status_addr: VF DMA address of page for &struct vfdi_status updates.
66 * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
67 * @peer_page_addrs and @peer_page_count from simultaneous
68 * updates by the VM and consumption by
69 * efx_sriov_update_vf_addr()
70 * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
71 * @peer_page_count: Number of entries in @peer_page_count.
72 * @evq0_addrs: Array of guest pages backing evq0.
73 * @evq0_count: Number of entries in @evq0_addrs.
74 * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
75 * to wait for flush completions.
76 * @txq_lock: Mutex for TX queue allocation.
77 * @txq_mask: Mask of initialized transmit queues.
78 * @txq_count: Number of initialized transmit queues.
79 * @rxq_mask: Mask of initialized receive queues.
80 * @rxq_count: Number of initialized receive queues.
81 * @rxq_retry_mask: Mask or receive queues that need to be flushed again
82 * due to flush failure.
83 * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
84 * @reset_work: Work item to schedule a VF reset.
89 char pci_name[13]; /* dddd:bb:dd.f */
91 struct work_struct req;
97 struct efx_buffer buf;
100 enum efx_filter_flags rx_filter_flags;
101 unsigned rx_filter_qid;
103 enum efx_vf_tx_filter_mode tx_filter_mode;
105 struct vfdi_endpoint addr;
107 struct mutex status_lock;
108 u64 *peer_page_addrs;
109 unsigned peer_page_count;
110 u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
113 wait_queue_head_t flush_waitq;
114 struct mutex txq_lock;
115 unsigned long txq_mask[VI_MASK_LENGTH];
117 unsigned long rxq_mask[VI_MASK_LENGTH];
119 unsigned long rxq_retry_mask[VI_MASK_LENGTH];
120 atomic_t rxq_retry_count;
121 struct work_struct reset_work;
124 struct efx_memcpy_req {
125 unsigned int from_rid;
134 * struct efx_local_addr - A MAC address on the vswitch without a VF.
136 * Siena does not have a switch, so VFs can't transmit data to each
137 * other. Instead the VFs must be made aware of the local addresses
138 * on the vswitch, so that they can arrange for an alternative
139 * software datapath to be used.
141 * @link: List head for insertion into efx->local_addr_list.
142 * @addr: Ethernet address
144 struct efx_local_addr {
145 struct list_head link;
150 * struct efx_endpoint_page - Page of vfdi_endpoint structures
152 * @link: List head for insertion into efx->local_page_list.
153 * @ptr: Pointer to page.
154 * @addr: DMA address of page.
156 struct efx_endpoint_page {
157 struct list_head link;
162 /* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
163 #define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
164 ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
165 #define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
166 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
167 (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
168 #define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
169 (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
170 (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
172 #define EFX_FIELD_MASK(_field) \
173 ((1 << _field ## _WIDTH) - 1)
175 /* VFs can only use this many transmit channels */
176 static unsigned int vf_max_tx_channels = 2;
177 module_param(vf_max_tx_channels, uint, 0444);
178 MODULE_PARM_DESC(vf_max_tx_channels,
179 "Limit the number of TX channels VFs can use");
181 static int max_vfs = -1;
182 module_param(max_vfs, int, 0444);
183 MODULE_PARM_DESC(max_vfs,
184 "Reduce the number of VFs initialized by the driver");
186 /* Workqueue used by VFDI communication. We can't use the global
187 * workqueue because it may be running the VF driver's probe()
188 * routine, which will be blocked there waiting for a VFDI response.
190 static struct workqueue_struct *vfdi_workqueue;
192 static unsigned abs_index(struct efx_vf *vf, unsigned index)
194 return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
197 static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
198 unsigned *vi_scale_out, unsigned *vf_total_out)
200 u8 inbuf[MC_CMD_SRIOV_IN_LEN];
201 u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
202 unsigned vi_scale, vf_total;
206 MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
207 MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
208 MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
210 rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
211 outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
214 if (outlen < MC_CMD_SRIOV_OUT_LEN)
217 vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
218 vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
219 if (vi_scale > EFX_VI_SCALE_MAX)
223 *vi_scale_out = vi_scale;
225 *vf_total_out = vf_total;
230 static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
234 EFX_POPULATE_OWORD_2(reg,
235 FRF_CZ_USREV_DIS, enabled ? 0 : 1,
236 FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
237 efx_writeo(efx, ®, FR_CZ_USR_EV_CFG);
240 static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
245 u32 from_rid, from_hi, from_lo;
248 mb(); /* Finish writing source/reading dest before DMA starts */
250 used = MC_CMD_MEMCPY_IN_LEN(count);
251 if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
254 /* Allocate room for the largest request */
255 inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
260 MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
261 while (count-- > 0) {
262 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
264 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
266 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
267 (u32)(req->to_addr >> 32));
268 if (req->from_buf == NULL) {
269 from_rid = req->from_rid;
270 from_lo = (u32)req->from_addr;
271 from_hi = (u32)(req->from_addr >> 32);
273 if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
278 from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
281 memcpy(inbuf + used, req->from_buf, req->length);
285 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
286 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
288 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
290 MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
294 record += MC_CMD_MEMCPY_IN_RECORD_LEN;
297 rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
301 mb(); /* Don't write source/read dest before DMA is complete */
306 /* The TX filter is entirely controlled by this driver, and is modified
307 * underneath the feet of the VF
309 static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
311 struct efx_nic *efx = vf->efx;
312 struct efx_filter_spec filter;
316 if (vf->tx_filter_id != -1) {
317 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
319 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
320 vf->pci_name, vf->tx_filter_id);
321 vf->tx_filter_id = -1;
324 if (is_zero_ether_addr(vf->addr.mac_addr))
327 /* Turn on TX filtering automatically if not explicitly
328 * enabled or disabled.
330 if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
331 vf->tx_filter_mode = VF_TX_FILTER_ON;
333 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
334 efx_filter_init_tx(&filter, abs_index(vf, 0));
335 rc = efx_filter_set_eth_local(&filter,
336 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
340 rc = efx_filter_insert_filter(efx, &filter, true);
342 netif_warn(efx, hw, efx->net_dev,
343 "Unable to migrate tx filter for vf %s\n",
346 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
348 vf->tx_filter_id = rc;
352 /* The RX filter is managed here on behalf of the VF driver */
353 static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
355 struct efx_nic *efx = vf->efx;
356 struct efx_filter_spec filter;
360 if (vf->rx_filter_id != -1) {
361 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
363 netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
364 vf->pci_name, vf->rx_filter_id);
365 vf->rx_filter_id = -1;
368 if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
371 vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
372 efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
374 abs_index(vf, vf->rx_filter_qid));
375 rc = efx_filter_set_eth_local(&filter,
376 vlan ? vlan : EFX_FILTER_VID_UNSPEC,
380 rc = efx_filter_insert_filter(efx, &filter, true);
382 netif_warn(efx, hw, efx->net_dev,
383 "Unable to insert rx filter for vf %s\n",
386 netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
388 vf->rx_filter_id = rc;
392 static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
394 efx_sriov_reset_tx_filter(vf);
395 efx_sriov_reset_rx_filter(vf);
396 queue_work(vfdi_workqueue, &vf->efx->peer_work);
399 /* Push the peer list to this VF. The caller must hold status_lock to interlock
400 * with VFDI requests, and they must be serialised against manipulation of
401 * local_page_list, either by acquiring local_lock or by running from
402 * efx_sriov_peer_work()
404 static void __efx_sriov_push_vf_status(struct efx_vf *vf)
406 struct efx_nic *efx = vf->efx;
407 struct vfdi_status *status = efx->vfdi_status.addr;
408 struct efx_memcpy_req copy[4];
409 struct efx_endpoint_page *epp;
410 unsigned int pos, count;
411 unsigned data_offset;
414 WARN_ON(!mutex_is_locked(&vf->status_lock));
415 WARN_ON(!vf->status_addr);
417 status->local = vf->addr;
418 status->generation_end = ++status->generation_start;
420 memset(copy, '\0', sizeof(copy));
421 /* Write generation_start */
422 copy[0].from_buf = &status->generation_start;
423 copy[0].to_rid = vf->pci_rid;
424 copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
426 copy[0].length = sizeof(status->generation_start);
427 /* DMA the rest of the structure (excluding the generations). This
428 * assumes that the non-generation portion of vfdi_status is in
429 * one chunk starting at the version member.
431 data_offset = offsetof(struct vfdi_status, version);
432 copy[1].from_rid = efx->pci_dev->devfn;
433 copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
434 copy[1].to_rid = vf->pci_rid;
435 copy[1].to_addr = vf->status_addr + data_offset;
436 copy[1].length = status->length - data_offset;
438 /* Copy the peer pages */
441 list_for_each_entry(epp, &efx->local_page_list, link) {
442 if (count == vf->peer_page_count) {
443 /* The VF driver will know they need to provide more
444 * pages because peer_addr_count is too large.
448 copy[pos].from_buf = NULL;
449 copy[pos].from_rid = efx->pci_dev->devfn;
450 copy[pos].from_addr = epp->addr;
451 copy[pos].to_rid = vf->pci_rid;
452 copy[pos].to_addr = vf->peer_page_addrs[count];
453 copy[pos].length = EFX_PAGE_SIZE;
455 if (++pos == ARRAY_SIZE(copy)) {
456 efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
462 /* Write generation_end */
463 copy[pos].from_buf = &status->generation_end;
464 copy[pos].to_rid = vf->pci_rid;
465 copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
467 copy[pos].length = sizeof(status->generation_end);
468 efx_sriov_memcpy(efx, copy, pos + 1);
470 /* Notify the guest */
471 EFX_POPULATE_QWORD_3(event,
472 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
473 VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
474 VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
476 efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
480 static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
481 u64 *addr, unsigned count)
486 for (pos = 0; pos < count; ++pos) {
487 EFX_POPULATE_QWORD_3(buf,
488 FRF_AZ_BUF_ADR_REGION, 0,
490 addr ? addr[pos] >> 12 : 0,
491 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
492 efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
497 static bool bad_vf_index(struct efx_nic *efx, unsigned index)
499 return index >= efx_vf_size(efx);
502 static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
504 unsigned max_buf_count = max_entry_count *
505 sizeof(efx_qword_t) / EFX_BUF_SIZE;
507 return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
510 /* Check that VI specified by per-port index belongs to a VF.
511 * Optionally set VF index and VI index within the VF.
513 static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
514 struct efx_vf **vf_out, unsigned *rel_index_out)
518 if (abs_index < EFX_VI_BASE)
520 vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
521 if (vf_i >= efx->vf_init_count)
525 *vf_out = efx->vf + vf_i;
527 *rel_index_out = abs_index % efx_vf_size(efx);
531 static int efx_vfdi_init_evq(struct efx_vf *vf)
533 struct efx_nic *efx = vf->efx;
534 struct vfdi_req *req = vf->buf.addr;
535 unsigned vf_evq = req->u.init_evq.index;
536 unsigned buf_count = req->u.init_evq.buf_count;
537 unsigned abs_evq = abs_index(vf, vf_evq);
538 unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
541 if (bad_vf_index(efx, vf_evq) ||
542 bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
544 netif_err(efx, hw, efx->net_dev,
545 "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
546 vf->pci_name, vf_evq, buf_count);
547 return VFDI_RC_EINVAL;
550 efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
552 EFX_POPULATE_OWORD_3(reg,
553 FRF_CZ_TIMER_Q_EN, 1,
554 FRF_CZ_HOST_NOTIFY_MODE, 0,
555 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
556 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
557 EFX_POPULATE_OWORD_3(reg,
559 FRF_AZ_EVQ_SIZE, __ffs(buf_count),
560 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
561 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
564 memcpy(vf->evq0_addrs, req->u.init_evq.addr,
565 buf_count * sizeof(u64));
566 vf->evq0_count = buf_count;
569 return VFDI_RC_SUCCESS;
572 static int efx_vfdi_init_rxq(struct efx_vf *vf)
574 struct efx_nic *efx = vf->efx;
575 struct vfdi_req *req = vf->buf.addr;
576 unsigned vf_rxq = req->u.init_rxq.index;
577 unsigned vf_evq = req->u.init_rxq.evq;
578 unsigned buf_count = req->u.init_rxq.buf_count;
579 unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
583 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
584 vf_rxq >= VF_MAX_RX_QUEUES ||
585 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
587 netif_err(efx, hw, efx->net_dev,
588 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
589 "buf_count %d\n", vf->pci_name, vf_rxq,
591 return VFDI_RC_EINVAL;
593 if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
595 efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
597 label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
598 EFX_POPULATE_OWORD_6(reg,
599 FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
600 FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
601 FRF_AZ_RX_DESCQ_LABEL, label,
602 FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
603 FRF_AZ_RX_DESCQ_JUMBO,
604 !!(req->u.init_rxq.flags &
605 VFDI_RXQ_FLAG_SCATTER_EN),
606 FRF_AZ_RX_DESCQ_EN, 1);
607 efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
608 abs_index(vf, vf_rxq));
610 return VFDI_RC_SUCCESS;
613 static int efx_vfdi_init_txq(struct efx_vf *vf)
615 struct efx_nic *efx = vf->efx;
616 struct vfdi_req *req = vf->buf.addr;
617 unsigned vf_txq = req->u.init_txq.index;
618 unsigned vf_evq = req->u.init_txq.evq;
619 unsigned buf_count = req->u.init_txq.buf_count;
620 unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
621 unsigned label, eth_filt_en;
624 if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
625 vf_txq >= vf_max_tx_channels ||
626 bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
628 netif_err(efx, hw, efx->net_dev,
629 "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
630 "buf_count %d\n", vf->pci_name, vf_txq,
632 return VFDI_RC_EINVAL;
635 mutex_lock(&vf->txq_lock);
636 if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
638 mutex_unlock(&vf->txq_lock);
639 efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
641 eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
643 label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
644 EFX_POPULATE_OWORD_8(reg,
645 FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
646 FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
647 FRF_AZ_TX_DESCQ_EN, 1,
648 FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
649 FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
650 FRF_AZ_TX_DESCQ_LABEL, label,
651 FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
652 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
653 efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
654 abs_index(vf, vf_txq));
656 return VFDI_RC_SUCCESS;
659 /* Returns true when efx_vfdi_fini_all_queues should wake */
660 static bool efx_vfdi_flush_wake(struct efx_vf *vf)
662 /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
665 return (!vf->txq_count && !vf->rxq_count) ||
666 atomic_read(&vf->rxq_retry_count);
669 static void efx_vfdi_flush_clear(struct efx_vf *vf)
671 memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
673 memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
675 memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
676 atomic_set(&vf->rxq_retry_count, 0);
679 static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
681 struct efx_nic *efx = vf->efx;
683 unsigned count = efx_vf_size(efx);
684 unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
685 unsigned timeout = HZ;
686 unsigned index, rxqs_count;
690 BUILD_BUG_ON(VF_MAX_RX_QUEUES >
691 MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
693 rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
695 return VFDI_RC_ENOMEM;
698 if (efx->fc_disable++ == 0)
699 efx_mcdi_set_mac(efx);
702 /* Flush all the initialized queues */
704 for (index = 0; index < count; ++index) {
705 if (test_bit(index, vf->txq_mask)) {
706 EFX_POPULATE_OWORD_2(reg,
707 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
708 FRF_AZ_TX_FLUSH_DESCQ,
710 efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ);
712 if (test_bit(index, vf->rxq_mask))
713 rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
716 atomic_set(&vf->rxq_retry_count, 0);
717 while (timeout && (vf->rxq_count || vf->txq_count)) {
718 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
719 rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
722 timeout = wait_event_timeout(vf->flush_waitq,
723 efx_vfdi_flush_wake(vf),
726 for (index = 0; index < count; ++index) {
727 if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
728 atomic_dec(&vf->rxq_retry_count);
730 cpu_to_le32(vf_offset + index);
736 if (--efx->fc_disable == 0)
737 efx_mcdi_set_mac(efx);
740 /* Irrespective of success/failure, fini the queues */
742 for (index = 0; index < count; ++index) {
743 efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
745 efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
747 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL,
749 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL,
752 efx_sriov_bufs(efx, vf->buftbl_base, NULL,
753 EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
755 efx_vfdi_flush_clear(vf);
759 return timeout ? 0 : VFDI_RC_ETIMEDOUT;
762 static int efx_vfdi_insert_filter(struct efx_vf *vf)
764 struct efx_nic *efx = vf->efx;
765 struct vfdi_req *req = vf->buf.addr;
766 unsigned vf_rxq = req->u.mac_filter.rxq;
769 if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
771 netif_err(efx, hw, efx->net_dev,
772 "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
773 "flags 0x%x\n", vf->pci_name, vf_rxq,
774 req->u.mac_filter.flags);
775 return VFDI_RC_EINVAL;
779 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
780 flags |= EFX_FILTER_FLAG_RX_RSS;
781 if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
782 flags |= EFX_FILTER_FLAG_RX_SCATTER;
783 vf->rx_filter_flags = flags;
784 vf->rx_filter_qid = vf_rxq;
785 vf->rx_filtering = true;
787 efx_sriov_reset_rx_filter(vf);
788 queue_work(vfdi_workqueue, &efx->peer_work);
790 return VFDI_RC_SUCCESS;
793 static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
795 vf->rx_filtering = false;
796 efx_sriov_reset_rx_filter(vf);
797 queue_work(vfdi_workqueue, &vf->efx->peer_work);
799 return VFDI_RC_SUCCESS;
802 static int efx_vfdi_set_status_page(struct efx_vf *vf)
804 struct efx_nic *efx = vf->efx;
805 struct vfdi_req *req = vf->buf.addr;
806 u64 page_count = req->u.set_status_page.peer_page_count;
809 offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
810 / sizeof(req->u.set_status_page.peer_page_addr[0]);
812 if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
814 netif_err(efx, hw, efx->net_dev,
815 "ERROR: Invalid SET_STATUS_PAGE from %s\n",
817 return VFDI_RC_EINVAL;
820 mutex_lock(&efx->local_lock);
821 mutex_lock(&vf->status_lock);
822 vf->status_addr = req->u.set_status_page.dma_addr;
824 kfree(vf->peer_page_addrs);
825 vf->peer_page_addrs = NULL;
826 vf->peer_page_count = 0;
829 vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
831 if (vf->peer_page_addrs) {
832 memcpy(vf->peer_page_addrs,
833 req->u.set_status_page.peer_page_addr,
834 page_count * sizeof(u64));
835 vf->peer_page_count = page_count;
839 __efx_sriov_push_vf_status(vf);
840 mutex_unlock(&vf->status_lock);
841 mutex_unlock(&efx->local_lock);
843 return VFDI_RC_SUCCESS;
846 static int efx_vfdi_clear_status_page(struct efx_vf *vf)
848 mutex_lock(&vf->status_lock);
850 mutex_unlock(&vf->status_lock);
852 return VFDI_RC_SUCCESS;
855 typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
857 static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
858 [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
859 [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
860 [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
861 [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
862 [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
863 [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
864 [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
865 [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
868 static void efx_sriov_vfdi(struct work_struct *work)
870 struct efx_vf *vf = container_of(work, struct efx_vf, req);
871 struct efx_nic *efx = vf->efx;
872 struct vfdi_req *req = vf->buf.addr;
873 struct efx_memcpy_req copy[2];
876 /* Copy this page into the local address space */
877 memset(copy, '\0', sizeof(copy));
878 copy[0].from_rid = vf->pci_rid;
879 copy[0].from_addr = vf->req_addr;
880 copy[0].to_rid = efx->pci_dev->devfn;
881 copy[0].to_addr = vf->buf.dma_addr;
882 copy[0].length = EFX_PAGE_SIZE;
883 rc = efx_sriov_memcpy(efx, copy, 1);
885 /* If we can't get the request, we can't reply to the caller */
887 netif_err(efx, hw, efx->net_dev,
888 "ERROR: Unable to fetch VFDI request from %s rc %d\n",
894 if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
895 rc = vfdi_ops[req->op](vf);
897 netif_dbg(efx, hw, efx->net_dev,
898 "vfdi request %d from %s ok\n",
899 req->op, vf->pci_name);
902 netif_dbg(efx, hw, efx->net_dev,
903 "ERROR: Unrecognised request %d from VF %s addr "
904 "%llx\n", req->op, vf->pci_name,
905 (unsigned long long)vf->req_addr);
906 rc = VFDI_RC_EOPNOTSUPP;
909 /* Allow subsequent VF requests */
913 /* Respond to the request */
915 req->op = VFDI_OP_RESPONSE;
917 memset(copy, '\0', sizeof(copy));
918 copy[0].from_buf = &req->rc;
919 copy[0].to_rid = vf->pci_rid;
920 copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
921 copy[0].length = sizeof(req->rc);
922 copy[1].from_buf = &req->op;
923 copy[1].to_rid = vf->pci_rid;
924 copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
925 copy[1].length = sizeof(req->op);
927 (void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
932 /* After a reset the event queues inside the guests no longer exist. Fill the
933 * event ring in guest memory with VFDI reset events, then (re-initialise) the
934 * event queue to raise an interrupt. The guest driver will then recover.
936 static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
938 struct efx_nic *efx = vf->efx;
939 struct efx_memcpy_req copy_req[4];
941 unsigned int pos, count, k, buftbl, abs_evq;
946 BUG_ON(buffer->len != EFX_PAGE_SIZE);
950 BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
952 mutex_lock(&vf->status_lock);
953 EFX_POPULATE_QWORD_3(event,
954 FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
955 VFDI_EV_SEQ, vf->msg_seqno,
956 VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
958 for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
959 memcpy(buffer->addr + pos, &event, sizeof(event));
961 for (pos = 0; pos < vf->evq0_count; pos += count) {
962 count = min_t(unsigned, vf->evq0_count - pos,
963 ARRAY_SIZE(copy_req));
964 for (k = 0; k < count; k++) {
965 copy_req[k].from_buf = NULL;
966 copy_req[k].from_rid = efx->pci_dev->devfn;
967 copy_req[k].from_addr = buffer->dma_addr;
968 copy_req[k].to_rid = vf->pci_rid;
969 copy_req[k].to_addr = vf->evq0_addrs[pos + k];
970 copy_req[k].length = EFX_PAGE_SIZE;
972 rc = efx_sriov_memcpy(efx, copy_req, count);
975 netif_err(efx, hw, efx->net_dev,
976 "ERROR: Unable to notify %s of reset"
977 ": %d\n", vf->pci_name, -rc);
982 /* Reinitialise, arm and trigger evq0 */
983 abs_evq = abs_index(vf, 0);
984 buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
985 efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
987 EFX_POPULATE_OWORD_3(reg,
988 FRF_CZ_TIMER_Q_EN, 1,
989 FRF_CZ_HOST_NOTIFY_MODE, 0,
990 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
991 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
992 EFX_POPULATE_OWORD_3(reg,
994 FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
995 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
996 efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
997 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
998 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
1000 mutex_unlock(&vf->status_lock);
1003 static void efx_sriov_reset_vf_work(struct work_struct *work)
1005 struct efx_vf *vf = container_of(work, struct efx_vf, req);
1006 struct efx_nic *efx = vf->efx;
1007 struct efx_buffer buf;
1009 if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
1010 efx_sriov_reset_vf(vf, &buf);
1011 efx_nic_free_buffer(efx, &buf);
1015 static void efx_sriov_handle_no_channel(struct efx_nic *efx)
1017 netif_err(efx, drv, efx->net_dev,
1018 "ERROR: IOV requires MSI-X and 1 additional interrupt"
1019 "vector. IOV disabled\n");
1023 static int efx_sriov_probe_channel(struct efx_channel *channel)
1025 channel->efx->vfdi_channel = channel;
1030 efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
1032 snprintf(buf, len, "%s-iov", channel->efx->name);
1035 static const struct efx_channel_type efx_sriov_channel_type = {
1036 .handle_no_channel = efx_sriov_handle_no_channel,
1037 .pre_probe = efx_sriov_probe_channel,
1038 .post_remove = efx_channel_dummy_op_void,
1039 .get_name = efx_sriov_get_channel_name,
1040 /* no copy operation; channel must not be reallocated */
1041 .keep_eventq = true,
1044 void efx_sriov_probe(struct efx_nic *efx)
1051 if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
1053 if (count > 0 && count > max_vfs)
1056 /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
1057 efx->vf_count = count;
1059 efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
1062 /* Copy the list of individual addresses into the vfdi_status.peers
1063 * array and auxillary pages, protected by %local_lock. Drop that lock
1064 * and then broadcast the address list to every VF.
1066 static void efx_sriov_peer_work(struct work_struct *data)
1068 struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
1069 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1071 struct efx_local_addr *local_addr;
1072 struct vfdi_endpoint *peer;
1073 struct efx_endpoint_page *epp;
1074 struct list_head pages;
1075 unsigned int peer_space;
1076 unsigned int peer_count;
1079 mutex_lock(&efx->local_lock);
1081 /* Move the existing peer pages off %local_page_list */
1082 INIT_LIST_HEAD(&pages);
1083 list_splice_tail_init(&efx->local_page_list, &pages);
1085 /* Populate the VF addresses starting from entry 1 (entry 0 is
1088 peer = vfdi_status->peers + 1;
1089 peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
1091 for (pos = 0; pos < efx->vf_count; ++pos) {
1094 mutex_lock(&vf->status_lock);
1095 if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
1099 BUG_ON(peer_space == 0);
1101 mutex_unlock(&vf->status_lock);
1104 /* Fill the remaining addresses */
1105 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1106 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
1110 if (--peer_space == 0) {
1111 if (list_empty(&pages)) {
1112 epp = kmalloc(sizeof(*epp), GFP_KERNEL);
1115 epp->ptr = dma_alloc_coherent(
1116 &efx->pci_dev->dev, EFX_PAGE_SIZE,
1117 &epp->addr, GFP_KERNEL);
1123 epp = list_first_entry(
1124 &pages, struct efx_endpoint_page, link);
1125 list_del(&epp->link);
1128 list_add_tail(&epp->link, &efx->local_page_list);
1129 peer = (struct vfdi_endpoint *)epp->ptr;
1130 peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
1133 vfdi_status->peer_count = peer_count;
1134 mutex_unlock(&efx->local_lock);
1136 /* Free any now unused endpoint pages */
1137 while (!list_empty(&pages)) {
1138 epp = list_first_entry(
1139 &pages, struct efx_endpoint_page, link);
1140 list_del(&epp->link);
1141 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1142 epp->ptr, epp->addr);
1146 /* Finally, push the pages */
1147 for (pos = 0; pos < efx->vf_count; ++pos) {
1150 mutex_lock(&vf->status_lock);
1151 if (vf->status_addr)
1152 __efx_sriov_push_vf_status(vf);
1153 mutex_unlock(&vf->status_lock);
1157 static void efx_sriov_free_local(struct efx_nic *efx)
1159 struct efx_local_addr *local_addr;
1160 struct efx_endpoint_page *epp;
1162 while (!list_empty(&efx->local_addr_list)) {
1163 local_addr = list_first_entry(&efx->local_addr_list,
1164 struct efx_local_addr, link);
1165 list_del(&local_addr->link);
1169 while (!list_empty(&efx->local_page_list)) {
1170 epp = list_first_entry(&efx->local_page_list,
1171 struct efx_endpoint_page, link);
1172 list_del(&epp->link);
1173 dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
1174 epp->ptr, epp->addr);
1179 static int efx_sriov_vf_alloc(struct efx_nic *efx)
1184 efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
1188 for (index = 0; index < efx->vf_count; ++index) {
1189 vf = efx->vf + index;
1193 vf->rx_filter_id = -1;
1194 vf->tx_filter_mode = VF_TX_FILTER_AUTO;
1195 vf->tx_filter_id = -1;
1196 INIT_WORK(&vf->req, efx_sriov_vfdi);
1197 INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
1198 init_waitqueue_head(&vf->flush_waitq);
1199 mutex_init(&vf->status_lock);
1200 mutex_init(&vf->txq_lock);
1206 static void efx_sriov_vfs_fini(struct efx_nic *efx)
1211 for (pos = 0; pos < efx->vf_count; ++pos) {
1214 efx_nic_free_buffer(efx, &vf->buf);
1215 kfree(vf->peer_page_addrs);
1216 vf->peer_page_addrs = NULL;
1217 vf->peer_page_count = 0;
1223 static int efx_sriov_vfs_init(struct efx_nic *efx)
1225 struct pci_dev *pci_dev = efx->pci_dev;
1226 unsigned index, devfn, sriov, buftbl_base;
1231 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
1235 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
1236 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
1238 buftbl_base = efx->vf_buftbl_base;
1239 devfn = pci_dev->devfn + offset;
1240 for (index = 0; index < efx->vf_count; ++index) {
1241 vf = efx->vf + index;
1243 /* Reserve buffer entries */
1244 vf->buftbl_base = buftbl_base;
1245 buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
1247 vf->pci_rid = devfn;
1248 snprintf(vf->pci_name, sizeof(vf->pci_name),
1249 "%04x:%02x:%02x.%d",
1250 pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
1251 PCI_SLOT(devfn), PCI_FUNC(devfn));
1253 rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
1263 efx_sriov_vfs_fini(efx);
1267 int efx_sriov_init(struct efx_nic *efx)
1269 struct net_device *net_dev = efx->net_dev;
1270 struct vfdi_status *vfdi_status;
1273 /* Ensure there's room for vf_channel */
1274 BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
1275 /* Ensure that VI_BASE is aligned on VI_SCALE */
1276 BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
1278 if (efx->vf_count == 0)
1281 rc = efx_sriov_cmd(efx, true, NULL, NULL);
1285 rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
1288 vfdi_status = efx->vfdi_status.addr;
1289 memset(vfdi_status, 0, sizeof(*vfdi_status));
1290 vfdi_status->version = 1;
1291 vfdi_status->length = sizeof(*vfdi_status);
1292 vfdi_status->max_tx_channels = vf_max_tx_channels;
1293 vfdi_status->vi_scale = efx->vi_scale;
1294 vfdi_status->rss_rxq_count = efx->rss_spread;
1295 vfdi_status->peer_count = 1 + efx->vf_count;
1296 vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
1298 rc = efx_sriov_vf_alloc(efx);
1302 mutex_init(&efx->local_lock);
1303 INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
1304 INIT_LIST_HEAD(&efx->local_addr_list);
1305 INIT_LIST_HEAD(&efx->local_page_list);
1307 rc = efx_sriov_vfs_init(efx);
1312 memcpy(vfdi_status->peers[0].mac_addr,
1313 net_dev->dev_addr, ETH_ALEN);
1314 efx->vf_init_count = efx->vf_count;
1317 efx_sriov_usrev(efx, true);
1319 /* At this point we must be ready to accept VFDI requests */
1321 rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
1325 netif_info(efx, probe, net_dev,
1326 "enabled SR-IOV for %d VFs, %d VI per VF\n",
1327 efx->vf_count, efx_vf_size(efx));
1331 efx_sriov_usrev(efx, false);
1333 efx->vf_init_count = 0;
1335 efx_sriov_vfs_fini(efx);
1337 cancel_work_sync(&efx->peer_work);
1338 efx_sriov_free_local(efx);
1341 efx_nic_free_buffer(efx, &efx->vfdi_status);
1343 efx_sriov_cmd(efx, false, NULL, NULL);
1348 void efx_sriov_fini(struct efx_nic *efx)
1353 if (efx->vf_init_count == 0)
1356 /* Disable all interfaces to reconfiguration */
1357 BUG_ON(efx->vfdi_channel->enabled);
1358 efx_sriov_usrev(efx, false);
1360 efx->vf_init_count = 0;
1363 /* Flush all reconfiguration work */
1364 for (pos = 0; pos < efx->vf_count; ++pos) {
1366 cancel_work_sync(&vf->req);
1367 cancel_work_sync(&vf->reset_work);
1369 cancel_work_sync(&efx->peer_work);
1371 pci_disable_sriov(efx->pci_dev);
1373 /* Tear down back-end state */
1374 efx_sriov_vfs_fini(efx);
1375 efx_sriov_free_local(efx);
1377 efx_nic_free_buffer(efx, &efx->vfdi_status);
1378 efx_sriov_cmd(efx, false, NULL, NULL);
1381 void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
1383 struct efx_nic *efx = channel->efx;
1385 unsigned qid, seq, type, data;
1387 qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
1389 /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
1390 BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
1391 seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
1392 type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
1393 data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
1395 netif_vdbg(efx, hw, efx->net_dev,
1396 "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
1397 qid, seq, type, data);
1399 if (map_vi_index(efx, qid, &vf, NULL))
1404 if (type == VFDI_EV_TYPE_REQ_WORD0) {
1406 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1407 vf->req_seqno = seq + 1;
1409 } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
1412 switch (vf->req_type) {
1413 case VFDI_EV_TYPE_REQ_WORD0:
1414 case VFDI_EV_TYPE_REQ_WORD1:
1415 case VFDI_EV_TYPE_REQ_WORD2:
1416 vf->req_addr |= (u64)data << (vf->req_type << 4);
1420 case VFDI_EV_TYPE_REQ_WORD3:
1421 vf->req_addr |= (u64)data << 48;
1422 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1424 queue_work(vfdi_workqueue, &vf->req);
1429 if (net_ratelimit())
1430 netif_err(efx, hw, efx->net_dev,
1431 "ERROR: Screaming VFDI request from %s\n",
1433 /* Reset the request and sequence number */
1434 vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
1435 vf->req_seqno = seq + 1;
1438 void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
1442 if (vf_i > efx->vf_init_count)
1444 vf = efx->vf + vf_i;
1445 netif_info(efx, hw, efx->net_dev,
1446 "FLR on VF %s\n", vf->pci_name);
1448 vf->status_addr = 0;
1449 efx_vfdi_remove_all_filters(vf);
1450 efx_vfdi_flush_clear(vf);
1455 void efx_sriov_mac_address_changed(struct efx_nic *efx)
1457 struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
1459 if (!efx->vf_init_count)
1461 memcpy(vfdi_status->peers[0].mac_addr,
1462 efx->net_dev->dev_addr, ETH_ALEN);
1463 queue_work(vfdi_workqueue, &efx->peer_work);
1466 void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1469 unsigned queue, qid;
1471 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1472 if (map_vi_index(efx, queue, &vf, &qid))
1474 /* Ignore flush completions triggered by an FLR */
1475 if (!test_bit(qid, vf->txq_mask))
1478 __clear_bit(qid, vf->txq_mask);
1481 if (efx_vfdi_flush_wake(vf))
1482 wake_up(&vf->flush_waitq);
1485 void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1488 unsigned ev_failed, queue, qid;
1490 queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1491 ev_failed = EFX_QWORD_FIELD(*event,
1492 FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1493 if (map_vi_index(efx, queue, &vf, &qid))
1495 if (!test_bit(qid, vf->rxq_mask))
1499 set_bit(qid, vf->rxq_retry_mask);
1500 atomic_inc(&vf->rxq_retry_count);
1502 __clear_bit(qid, vf->rxq_mask);
1505 if (efx_vfdi_flush_wake(vf))
1506 wake_up(&vf->flush_waitq);
1509 /* Called from napi. Schedule the reset work item */
1510 void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
1515 if (map_vi_index(efx, dmaq, &vf, &rel))
1518 if (net_ratelimit())
1519 netif_err(efx, hw, efx->net_dev,
1520 "VF %d DMA Q %d reports descriptor fetch error.\n",
1522 queue_work(vfdi_workqueue, &vf->reset_work);
1526 void efx_sriov_reset(struct efx_nic *efx)
1529 struct efx_buffer buf;
1534 if (efx->vf_init_count == 0)
1537 efx_sriov_usrev(efx, true);
1538 (void)efx_sriov_cmd(efx, true, NULL, NULL);
1540 if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
1543 for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
1544 vf = efx->vf + vf_i;
1545 efx_sriov_reset_vf(vf, &buf);
1548 efx_nic_free_buffer(efx, &buf);
1551 int efx_init_sriov(void)
1553 /* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
1554 * efx_sriov_peer_work() spend almost all their time sleeping for
1555 * MCDI to complete anyway
1557 vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
1558 if (!vfdi_workqueue)
1564 void efx_fini_sriov(void)
1566 destroy_workqueue(vfdi_workqueue);
1569 int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1571 struct efx_nic *efx = netdev_priv(net_dev);
1574 if (vf_i >= efx->vf_init_count)
1576 vf = efx->vf + vf_i;
1578 mutex_lock(&vf->status_lock);
1579 memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
1580 __efx_sriov_update_vf_addr(vf);
1581 mutex_unlock(&vf->status_lock);
1586 int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
1589 struct efx_nic *efx = netdev_priv(net_dev);
1593 if (vf_i >= efx->vf_init_count)
1595 vf = efx->vf + vf_i;
1597 mutex_lock(&vf->status_lock);
1598 tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
1599 vf->addr.tci = htons(tci);
1600 __efx_sriov_update_vf_addr(vf);
1601 mutex_unlock(&vf->status_lock);
1606 int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
1609 struct efx_nic *efx = netdev_priv(net_dev);
1613 if (vf_i >= efx->vf_init_count)
1615 vf = efx->vf + vf_i;
1617 mutex_lock(&vf->txq_lock);
1618 if (vf->txq_count == 0) {
1619 vf->tx_filter_mode =
1620 spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
1623 /* This cannot be changed while TX queues are running */
1626 mutex_unlock(&vf->txq_lock);
1630 int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1631 struct ifla_vf_info *ivi)
1633 struct efx_nic *efx = netdev_priv(net_dev);
1637 if (vf_i >= efx->vf_init_count)
1639 vf = efx->vf + vf_i;
1642 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
1644 tci = ntohs(vf->addr.tci);
1645 ivi->vlan = tci & VLAN_VID_MASK;
1646 ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
1647 ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;