2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 ib->coalescing_timeo = coalescing_timeo;
26 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 (u32)ib->coalescing_timeo, 0);
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 call_rxf_stop_cbfn(rxf);
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
84 if (rxf->flags & BNA_RXF_F_PAUSED) {
85 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 call_rxf_start_cbfn(rxf);
88 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
92 call_rxf_stop_cbfn(rxf);
100 call_rxf_cam_fltr_cbfn(rxf);
104 rxf->flags |= BNA_RXF_F_PAUSED;
105 call_rxf_pause_cbfn(rxf);
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
110 call_rxf_resume_cbfn(rxf);
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121 call_rxf_pause_cbfn(rxf);
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
130 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
134 call_rxf_cam_fltr_cbfn(rxf);
138 rxf->flags &= ~BNA_RXF_F_PAUSED;
139 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150 if (!bna_rxf_cfg_apply(rxf)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
161 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
165 bna_rxf_cfg_reset(rxf);
166 call_rxf_start_cbfn(rxf);
167 call_rxf_cam_fltr_cbfn(rxf);
168 call_rxf_resume_cbfn(rxf);
169 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
177 rxf->flags |= BNA_RXF_F_PAUSED;
178 call_rxf_start_cbfn(rxf);
179 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
183 if (!bna_rxf_cfg_apply(rxf)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197 call_rxf_start_cbfn(rxf);
198 call_rxf_cam_fltr_cbfn(rxf);
199 call_rxf_resume_cbfn(rxf);
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
208 bna_rxf_cfg_reset(rxf);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
213 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
217 rxf->flags |= BNA_RXF_F_PAUSED;
218 if (!bna_rxf_fltr_clear(rxf))
219 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
221 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
239 bna_rxf_cfg_reset(rxf);
240 call_rxf_pause_cbfn(rxf);
241 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
245 if (!bna_rxf_fltr_clear(rxf)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
267 bna_rxf_cfg_reset(rxf);
268 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 enum bfi_enet_h2i_msgs req_type)
280 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 req->mh.num_entries = htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 sizeof(struct bfi_enet_ucast_req), &req->mh);
288 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294 struct bfi_enet_mcast_add_req *req =
295 &rxf->bfi_enet_cmd.mcast_add_req;
297 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
299 req->mh.num_entries = htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310 struct bfi_enet_mcast_del_req *req =
311 &rxf->bfi_enet_cmd.mcast_del_req;
313 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
315 req->mh.num_entries = htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 req->handle = htons(handle);
318 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 req->mh.num_entries = htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 req->enable = status;
333 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 sizeof(struct bfi_enet_enable_req), &req->mh);
335 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 req->mh.num_entries = htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 req->enable = status;
348 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 sizeof(struct bfi_enet_enable_req), &req->mh);
350 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
360 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 req->mh.num_entries = htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 req->block_idx = block_idx;
365 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
369 htonl(rxf->vlan_filter_table[j]);
371 req->bit_mask[i] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 req->mh.num_entries = htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 req->enable = rxf->vlan_strip_status;
388 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 sizeof(struct bfi_enet_enable_req), &req->mh);
390 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
396 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 req->mh.num_entries = htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 req->size = htons(rxf->rit_size);
403 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 sizeof(struct bfi_enet_rit_req), &req->mh);
406 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
412 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
415 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 req->mh.num_entries = htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 req->cfg.type = rxf->rss_cfg.hash_type;
420 req->cfg.mask = rxf->rss_cfg.hash_mask;
421 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
423 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
432 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 req->enable = rxf->rss_status;
439 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 sizeof(struct bfi_enet_enable_req), &req->mh);
441 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
449 struct list_head *qe;
451 list_for_each(qe, &rxf->mcast_active_q) {
452 mac = (struct bna_mac *)qe;
453 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
457 list_for_each(qe, &rxf->mcast_pending_del_q) {
458 mac = (struct bna_mac *)qe;
459 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469 struct bna_mcam_handle *mchandle;
470 struct list_head *qe;
472 list_for_each(qe, &rxf->mcast_handle_q) {
473 mchandle = (struct bna_mcam_handle *)qe;
474 if (mchandle->handle == handle)
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484 struct bna_mac *mcmac;
485 struct bna_mcam_handle *mchandle;
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle == NULL) {
490 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 mchandle->handle = handle;
492 mchandle->refcnt = 0;
493 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
496 mcmac->handle = mchandle;
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 enum bna_cleanup_type cleanup)
503 struct bna_mcam_handle *mchandle;
506 mchandle = mac->handle;
507 if (mchandle == NULL)
511 if (mchandle->refcnt == 0) {
512 if (cleanup == BNA_HARD_CLEANUP) {
513 bna_bfi_mcast_del_req(rxf, mchandle->handle);
516 list_del(&mchandle->qe);
517 bfa_q_qe_init(&mchandle->qe);
518 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528 struct bna_mac *mac = NULL;
529 struct list_head *qe;
532 /* Delete multicast entries previousely added */
533 while (!list_empty(&rxf->mcast_pending_del_q)) {
534 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
536 mac = (struct bna_mac *)qe;
537 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
543 /* Add multicast entries */
544 if (!list_empty(&rxf->mcast_pending_add_q)) {
545 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
547 mac = (struct bna_mac *)qe;
548 list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 bna_bfi_mcast_add_req(rxf, mac);
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559 u8 vlan_pending_bitmask;
562 if (rxf->vlan_pending_bitmask) {
563 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
566 vlan_pending_bitmask >>= 1;
568 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579 struct list_head *qe;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf->mcast_pending_del_q)) {
585 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
587 mac = (struct bna_mac *)qe;
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf->mcast_active_q)) {
596 bfa_q_deq(&rxf->mcast_active_q, &qe);
598 list_add_tail(qe, &rxf->mcast_pending_add_q);
599 mac = (struct bna_mac *)qe;
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610 if (rxf->rss_pending) {
611 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 bna_bfi_rit_cfg(rxf);
617 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 bna_bfi_rss_cfg(rxf);
623 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 bna_bfi_rss_enable(rxf);
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
636 if (bna_rxf_ucast_cfg_apply(rxf))
639 if (bna_rxf_mcast_cfg_apply(rxf))
642 if (bna_rxf_promisc_cfg_apply(rxf))
645 if (bna_rxf_allmulti_cfg_apply(rxf))
648 if (bna_rxf_vlan_cfg_apply(rxf))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
654 if (bna_rxf_rss_cfg_apply(rxf))
660 /* Only software reset */
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
664 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
667 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
670 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
673 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
682 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_vlan_cfg_soft_reset(rxf);
687 bna_rxf_rss_cfg_soft_reset(rxf);
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
693 struct bna_rx *rx = rxf->rx;
695 struct list_head *qe;
698 rxf->rit_size = rit_size;
699 list_for_each(qe, &rx->rxp_q) {
700 rxp = (struct bna_rxp *)qe;
701 rxf->rit[offset] = rxp->cq.ccb->id;
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
714 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
717 struct bfi_enet_mcast_add_req *req =
718 &rxf->bfi_enet_cmd.mcast_add_req;
719 struct bfi_enet_mcast_add_rsp *rsp =
720 (struct bfi_enet_mcast_add_rsp *)msghdr;
722 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
724 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
728 bna_rxf_init(struct bna_rxf *rxf,
730 struct bna_rx_config *q_config,
731 struct bna_res_info *res_info)
735 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
736 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
737 rxf->ucast_pending_set = 0;
738 rxf->ucast_active_set = 0;
739 INIT_LIST_HEAD(&rxf->ucast_active_q);
740 rxf->ucast_pending_mac = NULL;
742 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
743 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
744 INIT_LIST_HEAD(&rxf->mcast_active_q);
745 INIT_LIST_HEAD(&rxf->mcast_handle_q);
747 if (q_config->paused)
748 rxf->flags |= BNA_RXF_F_PAUSED;
751 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
752 bna_rit_init(rxf, q_config->num_paths);
754 rxf->rss_status = q_config->rss_status;
755 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
756 rxf->rss_cfg = q_config->rss_config;
757 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
758 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
759 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
762 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
763 memset(rxf->vlan_filter_table, 0,
764 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
765 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
766 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
768 rxf->vlan_strip_status = q_config->vlan_strip_status;
770 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
774 bna_rxf_uninit(struct bna_rxf *rxf)
778 rxf->ucast_pending_set = 0;
779 rxf->ucast_active_set = 0;
781 while (!list_empty(&rxf->ucast_pending_add_q)) {
782 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
783 bfa_q_qe_init(&mac->qe);
784 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
787 if (rxf->ucast_pending_mac) {
788 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
789 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
790 rxf->ucast_pending_mac);
791 rxf->ucast_pending_mac = NULL;
794 while (!list_empty(&rxf->mcast_pending_add_q)) {
795 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
796 bfa_q_qe_init(&mac->qe);
797 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
800 rxf->rxmode_pending = 0;
801 rxf->rxmode_pending_bitmask = 0;
802 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
803 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
804 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
805 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
807 rxf->rss_pending = 0;
808 rxf->vlan_strip_pending = false;
816 bna_rx_cb_rxf_started(struct bna_rx *rx)
818 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
822 bna_rxf_start(struct bna_rxf *rxf)
824 rxf->start_cbfn = bna_rx_cb_rxf_started;
825 rxf->start_cbarg = rxf->rx;
826 bfa_fsm_send_event(rxf, RXF_E_START);
830 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
832 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
836 bna_rxf_stop(struct bna_rxf *rxf)
838 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
839 rxf->stop_cbarg = rxf->rx;
840 bfa_fsm_send_event(rxf, RXF_E_STOP);
844 bna_rxf_fail(struct bna_rxf *rxf)
846 bfa_fsm_send_event(rxf, RXF_E_FAIL);
850 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
851 void (*cbfn)(struct bnad *, struct bna_rx *))
853 struct bna_rxf *rxf = &rx->rxf;
855 if (rxf->ucast_pending_mac == NULL) {
856 rxf->ucast_pending_mac =
857 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
858 if (rxf->ucast_pending_mac == NULL)
859 return BNA_CB_UCAST_CAM_FULL;
860 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
863 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
864 rxf->ucast_pending_set = 1;
865 rxf->cam_fltr_cbfn = cbfn;
866 rxf->cam_fltr_cbarg = rx->bna->bnad;
868 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
870 return BNA_CB_SUCCESS;
874 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
875 void (*cbfn)(struct bnad *, struct bna_rx *))
877 struct bna_rxf *rxf = &rx->rxf;
880 /* Check if already added or pending addition */
881 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
882 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
884 cbfn(rx->bna->bnad, rx);
885 return BNA_CB_SUCCESS;
888 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
890 return BNA_CB_MCAST_LIST_FULL;
891 bfa_q_qe_init(&mac->qe);
892 memcpy(mac->addr, addr, ETH_ALEN);
893 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
895 rxf->cam_fltr_cbfn = cbfn;
896 rxf->cam_fltr_cbarg = rx->bna->bnad;
898 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
900 return BNA_CB_SUCCESS;
904 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
905 void (*cbfn)(struct bnad *, struct bna_rx *))
907 struct bna_rxf *rxf = &rx->rxf;
908 struct list_head list_head;
909 struct list_head *qe;
915 INIT_LIST_HEAD(&list_head);
916 for (i = 0, mcaddr = mclist; i < count; i++) {
917 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
920 bfa_q_qe_init(&mac->qe);
921 memcpy(mac->addr, mcaddr, ETH_ALEN);
922 list_add_tail(&mac->qe, &list_head);
927 /* Purge the pending_add_q */
928 while (!list_empty(&rxf->mcast_pending_add_q)) {
929 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
931 mac = (struct bna_mac *)qe;
932 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
935 /* Schedule active_q entries for deletion */
936 while (!list_empty(&rxf->mcast_active_q)) {
937 bfa_q_deq(&rxf->mcast_active_q, &qe);
938 mac = (struct bna_mac *)qe;
939 bfa_q_qe_init(&mac->qe);
940 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
943 /* Add the new entries */
944 while (!list_empty(&list_head)) {
945 bfa_q_deq(&list_head, &qe);
946 mac = (struct bna_mac *)qe;
947 bfa_q_qe_init(&mac->qe);
948 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
951 rxf->cam_fltr_cbfn = cbfn;
952 rxf->cam_fltr_cbarg = rx->bna->bnad;
953 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
955 return BNA_CB_SUCCESS;
958 while (!list_empty(&list_head)) {
959 bfa_q_deq(&list_head, &qe);
960 mac = (struct bna_mac *)qe;
961 bfa_q_qe_init(&mac->qe);
962 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
965 return BNA_CB_MCAST_LIST_FULL;
969 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
971 struct bna_rxf *rxf = &rx->rxf;
972 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
973 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
974 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
976 rxf->vlan_filter_table[index] |= bit;
977 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
978 rxf->vlan_pending_bitmask |= (1 << group_id);
979 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
984 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
986 struct bna_rxf *rxf = &rx->rxf;
987 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
991 rxf->vlan_filter_table[index] &= ~bit;
992 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
993 rxf->vlan_pending_bitmask |= (1 << group_id);
994 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
999 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1001 struct bna_mac *mac = NULL;
1002 struct list_head *qe;
1004 /* Delete MAC addresses previousely added */
1005 if (!list_empty(&rxf->ucast_pending_del_q)) {
1006 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1008 mac = (struct bna_mac *)qe;
1009 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1010 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1014 /* Set default unicast MAC */
1015 if (rxf->ucast_pending_set) {
1016 rxf->ucast_pending_set = 0;
1017 memcpy(rxf->ucast_active_mac.addr,
1018 rxf->ucast_pending_mac->addr, ETH_ALEN);
1019 rxf->ucast_active_set = 1;
1020 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1021 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1025 /* Add additional MAC entries */
1026 if (!list_empty(&rxf->ucast_pending_add_q)) {
1027 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1029 mac = (struct bna_mac *)qe;
1030 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1031 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1039 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1041 struct list_head *qe;
1042 struct bna_mac *mac;
1044 /* Throw away delete pending ucast entries */
1045 while (!list_empty(&rxf->ucast_pending_del_q)) {
1046 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1048 mac = (struct bna_mac *)qe;
1049 if (cleanup == BNA_SOFT_CLEANUP)
1050 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1052 bna_bfi_ucast_req(rxf, mac,
1053 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1054 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1059 /* Move active ucast entries to pending_add_q */
1060 while (!list_empty(&rxf->ucast_active_q)) {
1061 bfa_q_deq(&rxf->ucast_active_q, &qe);
1063 list_add_tail(qe, &rxf->ucast_pending_add_q);
1064 if (cleanup == BNA_HARD_CLEANUP) {
1065 mac = (struct bna_mac *)qe;
1066 bna_bfi_ucast_req(rxf, mac,
1067 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1072 if (rxf->ucast_active_set) {
1073 rxf->ucast_pending_set = 1;
1074 rxf->ucast_active_set = 0;
1075 if (cleanup == BNA_HARD_CLEANUP) {
1076 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1077 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1086 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1088 struct bna *bna = rxf->rx->bna;
1090 /* Enable/disable promiscuous mode */
1091 if (is_promisc_enable(rxf->rxmode_pending,
1092 rxf->rxmode_pending_bitmask)) {
1093 /* move promisc configuration from pending -> active */
1094 promisc_inactive(rxf->rxmode_pending,
1095 rxf->rxmode_pending_bitmask);
1096 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1097 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1099 } else if (is_promisc_disable(rxf->rxmode_pending,
1100 rxf->rxmode_pending_bitmask)) {
1101 /* move promisc configuration from pending -> active */
1102 promisc_inactive(rxf->rxmode_pending,
1103 rxf->rxmode_pending_bitmask);
1104 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1105 bna->promisc_rid = BFI_INVALID_RID;
1106 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1114 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1116 struct bna *bna = rxf->rx->bna;
1118 /* Clear pending promisc mode disable */
1119 if (is_promisc_disable(rxf->rxmode_pending,
1120 rxf->rxmode_pending_bitmask)) {
1121 promisc_inactive(rxf->rxmode_pending,
1122 rxf->rxmode_pending_bitmask);
1123 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1124 bna->promisc_rid = BFI_INVALID_RID;
1125 if (cleanup == BNA_HARD_CLEANUP) {
1126 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1131 /* Move promisc mode config from active -> pending */
1132 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1133 promisc_enable(rxf->rxmode_pending,
1134 rxf->rxmode_pending_bitmask);
1135 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1136 if (cleanup == BNA_HARD_CLEANUP) {
1137 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1146 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1148 /* Enable/disable allmulti mode */
1149 if (is_allmulti_enable(rxf->rxmode_pending,
1150 rxf->rxmode_pending_bitmask)) {
1151 /* move allmulti configuration from pending -> active */
1152 allmulti_inactive(rxf->rxmode_pending,
1153 rxf->rxmode_pending_bitmask);
1154 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1155 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1157 } else if (is_allmulti_disable(rxf->rxmode_pending,
1158 rxf->rxmode_pending_bitmask)) {
1159 /* move allmulti configuration from pending -> active */
1160 allmulti_inactive(rxf->rxmode_pending,
1161 rxf->rxmode_pending_bitmask);
1162 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1163 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1171 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1173 /* Clear pending allmulti mode disable */
1174 if (is_allmulti_disable(rxf->rxmode_pending,
1175 rxf->rxmode_pending_bitmask)) {
1176 allmulti_inactive(rxf->rxmode_pending,
1177 rxf->rxmode_pending_bitmask);
1178 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1179 if (cleanup == BNA_HARD_CLEANUP) {
1180 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1185 /* Move allmulti mode config from active -> pending */
1186 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1187 allmulti_enable(rxf->rxmode_pending,
1188 rxf->rxmode_pending_bitmask);
1189 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1190 if (cleanup == BNA_HARD_CLEANUP) {
1191 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1200 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1202 struct bna *bna = rxf->rx->bna;
1205 if (is_promisc_enable(rxf->rxmode_pending,
1206 rxf->rxmode_pending_bitmask) ||
1207 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1208 /* Do nothing if pending enable or already enabled */
1209 } else if (is_promisc_disable(rxf->rxmode_pending,
1210 rxf->rxmode_pending_bitmask)) {
1211 /* Turn off pending disable command */
1212 promisc_inactive(rxf->rxmode_pending,
1213 rxf->rxmode_pending_bitmask);
1215 /* Schedule enable */
1216 promisc_enable(rxf->rxmode_pending,
1217 rxf->rxmode_pending_bitmask);
1218 bna->promisc_rid = rxf->rx->rid;
1226 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1228 struct bna *bna = rxf->rx->bna;
1231 if (is_promisc_disable(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask) ||
1233 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1234 /* Do nothing if pending disable or already disabled */
1235 } else if (is_promisc_enable(rxf->rxmode_pending,
1236 rxf->rxmode_pending_bitmask)) {
1237 /* Turn off pending enable command */
1238 promisc_inactive(rxf->rxmode_pending,
1239 rxf->rxmode_pending_bitmask);
1240 bna->promisc_rid = BFI_INVALID_RID;
1241 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1242 /* Schedule disable */
1243 promisc_disable(rxf->rxmode_pending,
1244 rxf->rxmode_pending_bitmask);
1252 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1256 if (is_allmulti_enable(rxf->rxmode_pending,
1257 rxf->rxmode_pending_bitmask) ||
1258 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1259 /* Do nothing if pending enable or already enabled */
1260 } else if (is_allmulti_disable(rxf->rxmode_pending,
1261 rxf->rxmode_pending_bitmask)) {
1262 /* Turn off pending disable command */
1263 allmulti_inactive(rxf->rxmode_pending,
1264 rxf->rxmode_pending_bitmask);
1266 /* Schedule enable */
1267 allmulti_enable(rxf->rxmode_pending,
1268 rxf->rxmode_pending_bitmask);
1276 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1280 if (is_allmulti_disable(rxf->rxmode_pending,
1281 rxf->rxmode_pending_bitmask) ||
1282 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1283 /* Do nothing if pending disable or already disabled */
1284 } else if (is_allmulti_enable(rxf->rxmode_pending,
1285 rxf->rxmode_pending_bitmask)) {
1286 /* Turn off pending enable command */
1287 allmulti_inactive(rxf->rxmode_pending,
1288 rxf->rxmode_pending_bitmask);
1289 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1290 /* Schedule disable */
1291 allmulti_disable(rxf->rxmode_pending,
1292 rxf->rxmode_pending_bitmask);
1300 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1302 if (rxf->vlan_strip_pending) {
1303 rxf->vlan_strip_pending = false;
1304 bna_bfi_vlan_strip_enable(rxf);
1313 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1316 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1317 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1319 #define call_rx_stop_cbfn(rx) \
1321 if ((rx)->stop_cbfn) { \
1322 void (*cbfn)(void *, struct bna_rx *); \
1324 cbfn = (rx)->stop_cbfn; \
1325 cbarg = (rx)->stop_cbarg; \
1326 (rx)->stop_cbfn = NULL; \
1327 (rx)->stop_cbarg = NULL; \
1332 #define call_rx_stall_cbfn(rx) \
1334 if ((rx)->rx_stall_cbfn) \
1335 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1340 struct bna_dma_addr cur_q_addr = \
1341 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1342 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1343 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1344 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1345 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1346 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1347 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1350 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1351 static void bna_rx_enet_stop(struct bna_rx *rx);
1352 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1354 bfa_fsm_state_decl(bna_rx, stopped,
1355 struct bna_rx, enum bna_rx_event);
1356 bfa_fsm_state_decl(bna_rx, start_wait,
1357 struct bna_rx, enum bna_rx_event);
1358 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1359 struct bna_rx, enum bna_rx_event);
1360 bfa_fsm_state_decl(bna_rx, started,
1361 struct bna_rx, enum bna_rx_event);
1362 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1363 struct bna_rx, enum bna_rx_event);
1364 bfa_fsm_state_decl(bna_rx, stop_wait,
1365 struct bna_rx, enum bna_rx_event);
1366 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1367 struct bna_rx, enum bna_rx_event);
1368 bfa_fsm_state_decl(bna_rx, failed,
1369 struct bna_rx, enum bna_rx_event);
1370 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1371 struct bna_rx, enum bna_rx_event);
1373 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1375 call_rx_stop_cbfn(rx);
1378 static void bna_rx_sm_stopped(struct bna_rx *rx,
1379 enum bna_rx_event event)
1383 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1387 call_rx_stop_cbfn(rx);
1395 bfa_sm_fault(event);
1400 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1402 bna_bfi_rx_enet_start(rx);
1406 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1411 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1416 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1417 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1421 bna_rx_enet_stop(rx);
1425 bfa_sm_fault(event);
1430 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1431 enum bna_rx_event event)
1435 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1439 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1443 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1447 bfa_sm_fault(event);
1452 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1454 rx->rx_post_cbfn(rx->bna->bnad, rx);
1455 bna_rxf_start(&rx->rxf);
1459 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1464 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1468 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1469 bna_rxf_fail(&rx->rxf);
1470 call_rx_stall_cbfn(rx);
1471 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1474 case RX_E_RXF_STARTED:
1475 bna_rxf_stop(&rx->rxf);
1478 case RX_E_RXF_STOPPED:
1479 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1480 call_rx_stall_cbfn(rx);
1481 bna_rx_enet_stop(rx);
1485 bfa_sm_fault(event);
1492 bna_rx_sm_started_entry(struct bna_rx *rx)
1494 struct bna_rxp *rxp;
1495 struct list_head *qe_rxp;
1496 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1499 list_for_each(qe_rxp, &rx->rxp_q) {
1500 rxp = (struct bna_rxp *)qe_rxp;
1501 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1504 bna_ethport_cb_rx_started(&rx->bna->ethport);
1508 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1512 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1513 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1514 bna_rxf_stop(&rx->rxf);
1518 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1519 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1520 bna_rxf_fail(&rx->rxf);
1521 call_rx_stall_cbfn(rx);
1522 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1526 bfa_sm_fault(event);
1531 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1532 enum bna_rx_event event)
1536 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1540 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1541 bna_rxf_fail(&rx->rxf);
1542 call_rx_stall_cbfn(rx);
1543 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1546 case RX_E_RXF_STARTED:
1547 bfa_fsm_set_state(rx, bna_rx_sm_started);
1551 bfa_sm_fault(event);
1557 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1562 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1566 case RX_E_RXF_STOPPED:
1570 case RX_E_CLEANUP_DONE:
1571 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1575 bfa_sm_fault(event);
1581 bna_rx_sm_failed_entry(struct bna_rx *rx)
1586 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1590 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1594 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1598 case RX_E_RXF_STARTED:
1599 case RX_E_RXF_STOPPED:
1603 case RX_E_CLEANUP_DONE:
1604 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1608 bfa_sm_fault(event);
1613 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1618 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1622 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1626 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1629 case RX_E_CLEANUP_DONE:
1630 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1634 bfa_sm_fault(event);
1640 bna_bfi_rx_enet_start(struct bna_rx *rx)
1642 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1643 struct bna_rxp *rxp = NULL;
1644 struct bna_rxq *q0 = NULL, *q1 = NULL;
1645 struct list_head *rxp_qe;
1648 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1649 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1650 cfg_req->mh.num_entries = htons(
1651 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1653 cfg_req->num_queue_sets = rx->num_paths;
1654 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1656 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1657 rxp = (struct bna_rxp *)rxp_qe;
1659 GET_RXQS(rxp, q0, q1);
1660 switch (rxp->type) {
1664 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1666 cfg_req->q_cfg[i].qs.rx_buffer_size =
1667 htons((u16)q1->buffer_size);
1670 case BNA_RXP_SINGLE:
1671 /* Large/Single RxQ */
1672 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1675 bna_enet_mtu_get(&rx->bna->enet);
1676 cfg_req->q_cfg[i].ql.rx_buffer_size =
1677 htons((u16)q0->buffer_size);
1684 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1687 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1688 rxp->cq.ib.ib_seg_host_addr.lsb;
1689 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1690 rxp->cq.ib.ib_seg_host_addr.msb;
1691 cfg_req->q_cfg[i].ib.intr.msix_index =
1692 htons((u16)rxp->cq.ib.intr_vector);
1695 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1696 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1697 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1698 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1699 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1700 ? BNA_STATUS_T_ENABLED :
1701 BNA_STATUS_T_DISABLED;
1702 cfg_req->ib_cfg.coalescing_timeout =
1703 htonl((u32)rxp->cq.ib.coalescing_timeo);
1704 cfg_req->ib_cfg.inter_pkt_timeout =
1705 htonl((u32)rxp->cq.ib.interpkt_timeo);
1706 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1708 switch (rxp->type) {
1710 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1714 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1715 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1716 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1717 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1720 case BNA_RXP_SINGLE:
1721 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1727 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1729 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1730 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1731 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1735 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1737 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1739 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1740 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1741 req->mh.num_entries = htons(
1742 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1743 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1745 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1749 bna_rx_enet_stop(struct bna_rx *rx)
1751 struct bna_rxp *rxp;
1752 struct list_head *qe_rxp;
1755 list_for_each(qe_rxp, &rx->rxp_q) {
1756 rxp = (struct bna_rxp *)qe_rxp;
1757 bna_ib_stop(rx->bna, &rxp->cq.ib);
1760 bna_bfi_rx_enet_stop(rx);
1764 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1766 if ((rx_mod->rx_free_count == 0) ||
1767 (rx_mod->rxp_free_count == 0) ||
1768 (rx_mod->rxq_free_count == 0))
1771 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1772 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1773 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1776 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1777 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1784 static struct bna_rxq *
1785 bna_rxq_get(struct bna_rx_mod *rx_mod)
1787 struct bna_rxq *rxq = NULL;
1788 struct list_head *qe = NULL;
1790 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1791 rx_mod->rxq_free_count--;
1792 rxq = (struct bna_rxq *)qe;
1793 bfa_q_qe_init(&rxq->qe);
1799 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1801 bfa_q_qe_init(&rxq->qe);
1802 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1803 rx_mod->rxq_free_count++;
1806 static struct bna_rxp *
1807 bna_rxp_get(struct bna_rx_mod *rx_mod)
1809 struct list_head *qe = NULL;
1810 struct bna_rxp *rxp = NULL;
1812 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1813 rx_mod->rxp_free_count--;
1814 rxp = (struct bna_rxp *)qe;
1815 bfa_q_qe_init(&rxp->qe);
1821 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1823 bfa_q_qe_init(&rxp->qe);
1824 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1825 rx_mod->rxp_free_count++;
1828 static struct bna_rx *
1829 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1831 struct list_head *qe = NULL;
1832 struct bna_rx *rx = NULL;
1834 if (type == BNA_RX_T_REGULAR) {
1835 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1837 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1839 rx_mod->rx_free_count--;
1840 rx = (struct bna_rx *)qe;
1841 bfa_q_qe_init(&rx->qe);
1842 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1849 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1851 struct list_head *prev_qe = NULL;
1852 struct list_head *qe;
1854 bfa_q_qe_init(&rx->qe);
1856 list_for_each(qe, &rx_mod->rx_free_q) {
1857 if (((struct bna_rx *)qe)->rid < rx->rid)
1863 if (prev_qe == NULL) {
1864 /* This is the first entry */
1865 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1866 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1867 /* This is the last entry */
1868 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1870 /* Somewhere in the middle */
1871 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1872 bfa_q_prev(&rx->qe) = prev_qe;
1873 bfa_q_next(prev_qe) = &rx->qe;
1874 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1877 rx_mod->rx_free_count++;
1881 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1884 switch (rxp->type) {
1885 case BNA_RXP_SINGLE:
1886 rxp->rxq.single.only = q0;
1887 rxp->rxq.single.reserved = NULL;
1890 rxp->rxq.slr.large = q0;
1891 rxp->rxq.slr.small = q1;
1894 rxp->rxq.hds.data = q0;
1895 rxp->rxq.hds.hdr = q1;
1903 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1904 struct bna_rxp *rxp,
1907 struct bna_mem_descr *qpt_mem,
1908 struct bna_mem_descr *swqpt_mem,
1909 struct bna_mem_descr *page_mem)
1913 struct bna_dma_addr bna_dma;
1916 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1917 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1918 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1919 rxq->qpt.page_count = page_count;
1920 rxq->qpt.page_size = page_size;
1922 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1923 rxq->rcb->sw_q = page_mem->kva;
1925 kva = page_mem->kva;
1926 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1928 for (i = 0; i < rxq->qpt.page_count; i++) {
1929 rxq->rcb->sw_qpt[i] = kva;
1932 BNA_SET_DMA_ADDR(dma, &bna_dma);
1933 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1935 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1942 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1945 struct bna_mem_descr *qpt_mem,
1946 struct bna_mem_descr *swqpt_mem,
1947 struct bna_mem_descr *page_mem)
1951 struct bna_dma_addr bna_dma;
1954 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1955 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1956 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1957 rxp->cq.qpt.page_count = page_count;
1958 rxp->cq.qpt.page_size = page_size;
1960 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1961 rxp->cq.ccb->sw_q = page_mem->kva;
1963 kva = page_mem->kva;
1964 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1966 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1967 rxp->cq.ccb->sw_qpt[i] = kva;
1970 BNA_SET_DMA_ADDR(dma, &bna_dma);
1971 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1973 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1980 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1982 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1984 bfa_wc_down(&rx_mod->rx_stop_wc);
1988 bna_rx_mod_cb_rx_stopped_all(void *arg)
1990 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1992 if (rx_mod->stop_cbfn)
1993 rx_mod->stop_cbfn(&rx_mod->bna->enet);
1994 rx_mod->stop_cbfn = NULL;
1998 bna_rx_start(struct bna_rx *rx)
2000 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2001 if (rx->rx_flags & BNA_RX_F_ENABLED)
2002 bfa_fsm_send_event(rx, RX_E_START);
2006 bna_rx_stop(struct bna_rx *rx)
2008 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2009 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2010 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2012 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2013 rx->stop_cbarg = &rx->bna->rx_mod;
2014 bfa_fsm_send_event(rx, RX_E_STOP);
2019 bna_rx_fail(struct bna_rx *rx)
2021 /* Indicate Enet is not enabled, and failed */
2022 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2023 bfa_fsm_send_event(rx, RX_E_FAIL);
2027 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2030 struct list_head *qe;
2032 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2033 if (type == BNA_RX_T_LOOPBACK)
2034 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2036 list_for_each(qe, &rx_mod->rx_active_q) {
2037 rx = (struct bna_rx *)qe;
2038 if (rx->type == type)
2044 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2047 struct list_head *qe;
2049 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2050 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2052 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2054 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2056 list_for_each(qe, &rx_mod->rx_active_q) {
2057 rx = (struct bna_rx *)qe;
2058 if (rx->type == type) {
2059 bfa_wc_up(&rx_mod->rx_stop_wc);
2064 bfa_wc_wait(&rx_mod->rx_stop_wc);
2068 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2071 struct list_head *qe;
2073 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2074 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2076 list_for_each(qe, &rx_mod->rx_active_q) {
2077 rx = (struct bna_rx *)qe;
2082 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2083 struct bna_res_info *res_info)
2086 struct bna_rx *rx_ptr;
2087 struct bna_rxp *rxp_ptr;
2088 struct bna_rxq *rxq_ptr;
2093 rx_mod->rx = (struct bna_rx *)
2094 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2095 rx_mod->rxp = (struct bna_rxp *)
2096 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2097 rx_mod->rxq = (struct bna_rxq *)
2098 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2100 /* Initialize the queues */
2101 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2102 rx_mod->rx_free_count = 0;
2103 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2104 rx_mod->rxq_free_count = 0;
2105 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2106 rx_mod->rxp_free_count = 0;
2107 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2109 /* Build RX queues */
2110 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2111 rx_ptr = &rx_mod->rx[index];
2113 bfa_q_qe_init(&rx_ptr->qe);
2114 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2116 rx_ptr->rid = index;
2117 rx_ptr->stop_cbfn = NULL;
2118 rx_ptr->stop_cbarg = NULL;
2120 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2121 rx_mod->rx_free_count++;
2124 /* build RX-path queue */
2125 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2126 rxp_ptr = &rx_mod->rxp[index];
2127 bfa_q_qe_init(&rxp_ptr->qe);
2128 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2129 rx_mod->rxp_free_count++;
2132 /* build RXQ queue */
2133 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2134 rxq_ptr = &rx_mod->rxq[index];
2135 bfa_q_qe_init(&rxq_ptr->qe);
2136 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2137 rx_mod->rxq_free_count++;
2142 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2144 struct list_head *qe;
2148 list_for_each(qe, &rx_mod->rx_free_q)
2152 list_for_each(qe, &rx_mod->rxp_free_q)
2156 list_for_each(qe, &rx_mod->rxq_free_q)
2163 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2165 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2166 struct bna_rxp *rxp = NULL;
2167 struct bna_rxq *q0 = NULL, *q1 = NULL;
2168 struct list_head *rxp_qe;
2171 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2172 sizeof(struct bfi_enet_rx_cfg_rsp));
2174 rx->hw_id = cfg_rsp->hw_id;
2176 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2178 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2179 rxp = (struct bna_rxp *)rxp_qe;
2180 GET_RXQS(rxp, q0, q1);
2182 /* Setup doorbells */
2183 rxp->cq.ccb->i_dbell->doorbell_addr =
2184 rx->bna->pcidev.pci_bar_kva
2185 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2186 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2188 rx->bna->pcidev.pci_bar_kva
2189 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2190 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2193 rx->bna->pcidev.pci_bar_kva
2194 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2195 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2198 /* Initialize producer/consumer indexes */
2199 (*rxp->cq.ccb->hw_producer_index) = 0;
2200 rxp->cq.ccb->producer_index = 0;
2201 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2203 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2206 bfa_fsm_send_event(rx, RX_E_STARTED);
2210 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2212 bfa_fsm_send_event(rx, RX_E_STOPPED);
2216 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2218 u32 cq_size, hq_size, dq_size;
2219 u32 cpage_count, hpage_count, dpage_count;
2220 struct bna_mem_info *mem_info;
2225 dq_depth = q_cfg->q_depth;
2226 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2227 cq_depth = dq_depth + hq_depth;
2229 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2230 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2231 cq_size = ALIGN(cq_size, PAGE_SIZE);
2232 cpage_count = SIZE_TO_PAGES(cq_size);
2234 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2235 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2236 dq_size = ALIGN(dq_size, PAGE_SIZE);
2237 dpage_count = SIZE_TO_PAGES(dq_size);
2239 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2240 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2241 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2242 hq_size = ALIGN(hq_size, PAGE_SIZE);
2243 hpage_count = SIZE_TO_PAGES(hq_size);
2247 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2248 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2249 mem_info->mem_type = BNA_MEM_T_KVA;
2250 mem_info->len = sizeof(struct bna_ccb);
2251 mem_info->num = q_cfg->num_paths;
2253 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2254 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2255 mem_info->mem_type = BNA_MEM_T_KVA;
2256 mem_info->len = sizeof(struct bna_rcb);
2257 mem_info->num = BNA_GET_RXQS(q_cfg);
2259 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2260 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2261 mem_info->mem_type = BNA_MEM_T_DMA;
2262 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2263 mem_info->num = q_cfg->num_paths;
2265 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2266 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2267 mem_info->mem_type = BNA_MEM_T_KVA;
2268 mem_info->len = cpage_count * sizeof(void *);
2269 mem_info->num = q_cfg->num_paths;
2271 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2272 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2273 mem_info->mem_type = BNA_MEM_T_DMA;
2274 mem_info->len = PAGE_SIZE * cpage_count;
2275 mem_info->num = q_cfg->num_paths;
2277 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2278 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2279 mem_info->mem_type = BNA_MEM_T_DMA;
2280 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2281 mem_info->num = q_cfg->num_paths;
2283 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2284 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2285 mem_info->mem_type = BNA_MEM_T_KVA;
2286 mem_info->len = dpage_count * sizeof(void *);
2287 mem_info->num = q_cfg->num_paths;
2289 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2290 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2291 mem_info->mem_type = BNA_MEM_T_DMA;
2292 mem_info->len = PAGE_SIZE * dpage_count;
2293 mem_info->num = q_cfg->num_paths;
2295 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2296 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2297 mem_info->mem_type = BNA_MEM_T_DMA;
2298 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2299 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2301 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2302 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2303 mem_info->mem_type = BNA_MEM_T_KVA;
2304 mem_info->len = hpage_count * sizeof(void *);
2305 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2307 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2308 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2309 mem_info->mem_type = BNA_MEM_T_DMA;
2310 mem_info->len = PAGE_SIZE * hpage_count;
2311 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2313 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2314 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2315 mem_info->mem_type = BNA_MEM_T_DMA;
2316 mem_info->len = BFI_IBIDX_SIZE;
2317 mem_info->num = q_cfg->num_paths;
2319 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2320 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2321 mem_info->mem_type = BNA_MEM_T_KVA;
2322 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2325 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2326 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2327 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2331 bna_rx_create(struct bna *bna, struct bnad *bnad,
2332 struct bna_rx_config *rx_cfg,
2333 const struct bna_rx_event_cbfn *rx_cbfn,
2334 struct bna_res_info *res_info,
2337 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2339 struct bna_rxp *rxp;
2342 struct bna_intr_info *intr_info;
2344 struct bna_mem_descr *ccb_mem;
2345 struct bna_mem_descr *rcb_mem;
2346 struct bna_mem_descr *unmapq_mem;
2347 struct bna_mem_descr *cqpt_mem;
2348 struct bna_mem_descr *cswqpt_mem;
2349 struct bna_mem_descr *cpage_mem;
2350 struct bna_mem_descr *hqpt_mem;
2351 struct bna_mem_descr *dqpt_mem;
2352 struct bna_mem_descr *hsqpt_mem;
2353 struct bna_mem_descr *dsqpt_mem;
2354 struct bna_mem_descr *hpage_mem;
2355 struct bna_mem_descr *dpage_mem;
2357 int dpage_count, hpage_count, rcb_idx;
2359 if (!bna_rx_res_check(rx_mod, rx_cfg))
2362 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2363 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2364 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2365 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2366 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2367 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2368 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2369 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2370 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2371 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2372 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2373 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2374 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2376 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2379 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2382 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2385 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2388 INIT_LIST_HEAD(&rx->rxp_q);
2389 rx->stop_cbfn = NULL;
2390 rx->stop_cbarg = NULL;
2393 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2394 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2395 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2396 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2397 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2398 /* Following callbacks are mandatory */
2399 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2400 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2402 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2404 case BNA_RX_T_REGULAR:
2405 if (!(rx->bna->rx_mod.flags &
2406 BNA_RX_MOD_F_ENET_LOOPBACK))
2407 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2409 case BNA_RX_T_LOOPBACK:
2410 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2411 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2416 rx->num_paths = rx_cfg->num_paths;
2417 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2418 rxp = bna_rxp_get(rx_mod);
2419 list_add_tail(&rxp->qe, &rx->rxp_q);
2420 rxp->type = rx_cfg->rxp_type;
2424 q0 = bna_rxq_get(rx_mod);
2425 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2428 q1 = bna_rxq_get(rx_mod);
2430 if (1 == intr_info->num)
2431 rxp->vector = intr_info->idl[0].vector;
2433 rxp->vector = intr_info->idl[i].vector;
2437 rxp->cq.ib.ib_seg_host_addr.lsb =
2438 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2439 rxp->cq.ib.ib_seg_host_addr.msb =
2440 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2441 rxp->cq.ib.ib_seg_host_addr_kva =
2442 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2443 rxp->cq.ib.intr_type = intr_info->intr_type;
2444 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2445 rxp->cq.ib.intr_vector = rxp->vector;
2447 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2448 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2449 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2450 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2452 bna_rxp_add_rxqs(rxp, q0, q1);
2459 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2460 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2462 q0->rcb->q_depth = rx_cfg->q_depth;
2464 q0->rcb->bnad = bna->bnad;
2466 q0->rx_packets = q0->rx_bytes = 0;
2467 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2469 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2470 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2472 if (rx->rcb_setup_cbfn)
2473 rx->rcb_setup_cbfn(bnad, q0->rcb);
2481 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2482 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2484 q1->rcb->q_depth = rx_cfg->q_depth;
2486 q1->rcb->bnad = bna->bnad;
2488 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2489 rx_cfg->hds_config.forced_offset
2490 : rx_cfg->small_buff_size;
2491 q1->rx_packets = q1->rx_bytes = 0;
2492 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2494 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2495 &hqpt_mem[i], &hsqpt_mem[i],
2498 if (rx->rcb_setup_cbfn)
2499 rx->rcb_setup_cbfn(bnad, q1->rcb);
2504 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2505 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2506 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2507 0 : rx_cfg->q_depth);
2508 rxp->cq.ccb->cq = &rxp->cq;
2509 rxp->cq.ccb->rcb[0] = q0->rcb;
2510 q0->rcb->ccb = rxp->cq.ccb;
2512 rxp->cq.ccb->rcb[1] = q1->rcb;
2513 q1->rcb->ccb = rxp->cq.ccb;
2515 rxp->cq.ccb->hw_producer_index =
2516 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2517 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2518 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2519 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2520 rxp->cq.ccb->rx_coalescing_timeo =
2521 rxp->cq.ib.coalescing_timeo;
2522 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2523 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2524 rxp->cq.ccb->bnad = bna->bnad;
2525 rxp->cq.ccb->id = i;
2527 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2528 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2530 if (rx->ccb_setup_cbfn)
2531 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2534 rx->hds_cfg = rx_cfg->hds_config;
2536 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2538 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2540 rx_mod->rid_mask |= (1 << rx->rid);
2546 bna_rx_destroy(struct bna_rx *rx)
2548 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2549 struct bna_rxq *q0 = NULL;
2550 struct bna_rxq *q1 = NULL;
2551 struct bna_rxp *rxp;
2552 struct list_head *qe;
2554 bna_rxf_uninit(&rx->rxf);
2556 while (!list_empty(&rx->rxp_q)) {
2557 bfa_q_deq(&rx->rxp_q, &rxp);
2558 GET_RXQS(rxp, q0, q1);
2559 if (rx->rcb_destroy_cbfn)
2560 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2564 bna_rxq_put(rx_mod, q0);
2567 if (rx->rcb_destroy_cbfn)
2568 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2572 bna_rxq_put(rx_mod, q1);
2574 rxp->rxq.slr.large = NULL;
2575 rxp->rxq.slr.small = NULL;
2577 if (rx->ccb_destroy_cbfn)
2578 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2581 bna_rxp_put(rx_mod, rxp);
2584 list_for_each(qe, &rx_mod->rx_active_q) {
2585 if (qe == &rx->qe) {
2587 bfa_q_qe_init(&rx->qe);
2592 rx_mod->rid_mask &= ~(1 << rx->rid);
2596 bna_rx_put(rx_mod, rx);
2600 bna_rx_enable(struct bna_rx *rx)
2602 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2605 rx->rx_flags |= BNA_RX_F_ENABLED;
2606 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2607 bfa_fsm_send_event(rx, RX_E_START);
2611 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2612 void (*cbfn)(void *, struct bna_rx *))
2614 if (type == BNA_SOFT_CLEANUP) {
2615 /* h/w should not be accessed. Treat we're stopped */
2616 (*cbfn)(rx->bna->bnad, rx);
2618 rx->stop_cbfn = cbfn;
2619 rx->stop_cbarg = rx->bna->bnad;
2621 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2623 bfa_fsm_send_event(rx, RX_E_STOP);
2628 bna_rx_cleanup_complete(struct bna_rx *rx)
2630 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2634 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2635 enum bna_rxmode bitmask,
2636 void (*cbfn)(struct bnad *, struct bna_rx *))
2638 struct bna_rxf *rxf = &rx->rxf;
2639 int need_hw_config = 0;
2643 if (is_promisc_enable(new_mode, bitmask)) {
2644 /* If promisc mode is already enabled elsewhere in the system */
2645 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2646 (rx->bna->promisc_rid != rxf->rx->rid))
2649 /* If default mode is already enabled in the system */
2650 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2653 /* Trying to enable promiscuous and default mode together */
2654 if (is_default_enable(new_mode, bitmask))
2658 if (is_default_enable(new_mode, bitmask)) {
2659 /* If default mode is already enabled elsewhere in the system */
2660 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2661 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2665 /* If promiscuous mode is already enabled in the system */
2666 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2670 /* Process the commands */
2672 if (is_promisc_enable(new_mode, bitmask)) {
2673 if (bna_rxf_promisc_enable(rxf))
2675 } else if (is_promisc_disable(new_mode, bitmask)) {
2676 if (bna_rxf_promisc_disable(rxf))
2680 if (is_allmulti_enable(new_mode, bitmask)) {
2681 if (bna_rxf_allmulti_enable(rxf))
2683 } else if (is_allmulti_disable(new_mode, bitmask)) {
2684 if (bna_rxf_allmulti_disable(rxf))
2688 /* Trigger h/w if needed */
2690 if (need_hw_config) {
2691 rxf->cam_fltr_cbfn = cbfn;
2692 rxf->cam_fltr_cbarg = rx->bna->bnad;
2693 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2695 (*cbfn)(rx->bna->bnad, rx);
2697 return BNA_CB_SUCCESS;
2704 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2706 struct bna_rxf *rxf = &rx->rxf;
2708 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2709 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2710 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2711 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2716 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2718 struct bna_rxp *rxp;
2719 struct list_head *qe;
2721 list_for_each(qe, &rx->rxp_q) {
2722 rxp = (struct bna_rxp *)qe;
2723 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2724 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2729 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2733 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2734 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2735 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2739 bna_rx_dim_update(struct bna_ccb *ccb)
2741 struct bna *bna = ccb->cq->rx->bna;
2743 u32 pkt_rt, small_rt, large_rt;
2744 u8 coalescing_timeo;
2746 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2747 (ccb->pkt_rate.large_pkt_cnt == 0))
2750 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2752 small_rt = ccb->pkt_rate.small_pkt_cnt;
2753 large_rt = ccb->pkt_rate.large_pkt_cnt;
2755 pkt_rt = small_rt + large_rt;
2757 if (pkt_rt < BNA_PKT_RATE_10K)
2758 load = BNA_LOAD_T_LOW_4;
2759 else if (pkt_rt < BNA_PKT_RATE_20K)
2760 load = BNA_LOAD_T_LOW_3;
2761 else if (pkt_rt < BNA_PKT_RATE_30K)
2762 load = BNA_LOAD_T_LOW_2;
2763 else if (pkt_rt < BNA_PKT_RATE_40K)
2764 load = BNA_LOAD_T_LOW_1;
2765 else if (pkt_rt < BNA_PKT_RATE_50K)
2766 load = BNA_LOAD_T_HIGH_1;
2767 else if (pkt_rt < BNA_PKT_RATE_60K)
2768 load = BNA_LOAD_T_HIGH_2;
2769 else if (pkt_rt < BNA_PKT_RATE_80K)
2770 load = BNA_LOAD_T_HIGH_3;
2772 load = BNA_LOAD_T_HIGH_4;
2774 if (small_rt > (large_rt << 1))
2779 ccb->pkt_rate.small_pkt_cnt = 0;
2780 ccb->pkt_rate.large_pkt_cnt = 0;
2782 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2783 ccb->rx_coalescing_timeo = coalescing_timeo;
2786 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2789 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2802 #define call_tx_stop_cbfn(tx) \
2804 if ((tx)->stop_cbfn) { \
2805 void (*cbfn)(void *, struct bna_tx *); \
2807 cbfn = (tx)->stop_cbfn; \
2808 cbarg = (tx)->stop_cbarg; \
2809 (tx)->stop_cbfn = NULL; \
2810 (tx)->stop_cbarg = NULL; \
2811 cbfn(cbarg, (tx)); \
2815 #define call_tx_prio_change_cbfn(tx) \
2817 if ((tx)->prio_change_cbfn) { \
2818 void (*cbfn)(struct bnad *, struct bna_tx *); \
2819 cbfn = (tx)->prio_change_cbfn; \
2820 (tx)->prio_change_cbfn = NULL; \
2821 cbfn((tx)->bna->bnad, (tx)); \
2825 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2826 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2827 static void bna_tx_enet_stop(struct bna_tx *tx);
2835 TX_E_PRIO_CHANGE = 6,
2836 TX_E_CLEANUP_DONE = 7,
2840 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2841 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2842 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2843 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2844 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2846 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2848 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2850 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2851 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2855 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2857 call_tx_stop_cbfn(tx);
2861 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2865 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2869 call_tx_stop_cbfn(tx);
2876 case TX_E_PRIO_CHANGE:
2877 call_tx_prio_change_cbfn(tx);
2880 case TX_E_BW_UPDATE:
2885 bfa_sm_fault(event);
2890 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2892 bna_bfi_tx_enet_start(tx);
2896 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2900 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2901 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2905 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2906 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2910 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2911 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2912 BNA_TX_F_BW_UPDATED);
2913 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2915 bfa_fsm_set_state(tx, bna_tx_sm_started);
2918 case TX_E_PRIO_CHANGE:
2919 tx->flags |= BNA_TX_F_PRIO_CHANGED;
2922 case TX_E_BW_UPDATE:
2923 tx->flags |= BNA_TX_F_BW_UPDATED;
2927 bfa_sm_fault(event);
2932 bna_tx_sm_started_entry(struct bna_tx *tx)
2934 struct bna_txq *txq;
2935 struct list_head *qe;
2936 int is_regular = (tx->type == BNA_TX_T_REGULAR);
2938 list_for_each(qe, &tx->txq_q) {
2939 txq = (struct bna_txq *)qe;
2940 txq->tcb->priority = txq->priority;
2942 bna_ib_start(tx->bna, &txq->ib, is_regular);
2944 tx->tx_resume_cbfn(tx->bna->bnad, tx);
2948 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2952 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2953 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2954 bna_tx_enet_stop(tx);
2958 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2959 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2960 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2963 case TX_E_PRIO_CHANGE:
2964 case TX_E_BW_UPDATE:
2965 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2969 bfa_sm_fault(event);
2974 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2979 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2984 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2985 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2990 * We are here due to start_wait -> stop_wait transition on
2993 bna_tx_enet_stop(tx);
2996 case TX_E_PRIO_CHANGE:
2997 case TX_E_BW_UPDATE:
3002 bfa_sm_fault(event);
3007 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3012 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3016 case TX_E_PRIO_CHANGE:
3017 case TX_E_BW_UPDATE:
3021 case TX_E_CLEANUP_DONE:
3022 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3026 bfa_sm_fault(event);
3031 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3033 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3034 bna_tx_enet_stop(tx);
3038 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3042 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3046 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3047 call_tx_prio_change_cbfn(tx);
3048 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3052 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3055 case TX_E_PRIO_CHANGE:
3056 case TX_E_BW_UPDATE:
3061 bfa_sm_fault(event);
3066 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3068 call_tx_prio_change_cbfn(tx);
3069 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3073 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3077 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3081 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3084 case TX_E_PRIO_CHANGE:
3085 case TX_E_BW_UPDATE:
3089 case TX_E_CLEANUP_DONE:
3090 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3094 bfa_sm_fault(event);
3099 bna_tx_sm_failed_entry(struct bna_tx *tx)
3104 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3108 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3112 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3119 case TX_E_CLEANUP_DONE:
3120 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3124 bfa_sm_fault(event);
3129 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3134 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3138 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3142 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3145 case TX_E_CLEANUP_DONE:
3146 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3149 case TX_E_BW_UPDATE:
3154 bfa_sm_fault(event);
3159 bna_bfi_tx_enet_start(struct bna_tx *tx)
3161 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3162 struct bna_txq *txq = NULL;
3163 struct list_head *qe;
3166 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3167 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3168 cfg_req->mh.num_entries = htons(
3169 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3171 cfg_req->num_queues = tx->num_txq;
3172 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3174 i++, qe = bfa_q_next(qe)) {
3175 txq = (struct bna_txq *)qe;
3177 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3178 cfg_req->q_cfg[i].q.priority = txq->priority;
3180 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3181 txq->ib.ib_seg_host_addr.lsb;
3182 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3183 txq->ib.ib_seg_host_addr.msb;
3184 cfg_req->q_cfg[i].ib.intr.msix_index =
3185 htons((u16)txq->ib.intr_vector);
3188 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3189 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3190 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3191 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3192 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3193 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3194 cfg_req->ib_cfg.coalescing_timeout =
3195 htonl((u32)txq->ib.coalescing_timeo);
3196 cfg_req->ib_cfg.inter_pkt_timeout =
3197 htonl((u32)txq->ib.interpkt_timeo);
3198 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3200 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3201 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3202 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3203 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3205 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3206 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3207 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3211 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3213 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3215 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3216 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3217 req->mh.num_entries = htons(
3218 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3219 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3221 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3225 bna_tx_enet_stop(struct bna_tx *tx)
3227 struct bna_txq *txq;
3228 struct list_head *qe;
3231 list_for_each(qe, &tx->txq_q) {
3232 txq = (struct bna_txq *)qe;
3233 bna_ib_stop(tx->bna, &txq->ib);
3236 bna_bfi_tx_enet_stop(tx);
3240 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3241 struct bna_mem_descr *qpt_mem,
3242 struct bna_mem_descr *swqpt_mem,
3243 struct bna_mem_descr *page_mem)
3247 struct bna_dma_addr bna_dma;
3250 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3251 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3252 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3253 txq->qpt.page_count = page_count;
3254 txq->qpt.page_size = page_size;
3256 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3257 txq->tcb->sw_q = page_mem->kva;
3259 kva = page_mem->kva;
3260 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3262 for (i = 0; i < page_count; i++) {
3263 txq->tcb->sw_qpt[i] = kva;
3266 BNA_SET_DMA_ADDR(dma, &bna_dma);
3267 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3269 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3275 static struct bna_tx *
3276 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3278 struct list_head *qe = NULL;
3279 struct bna_tx *tx = NULL;
3281 if (list_empty(&tx_mod->tx_free_q))
3283 if (type == BNA_TX_T_REGULAR) {
3284 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3286 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3288 tx = (struct bna_tx *)qe;
3289 bfa_q_qe_init(&tx->qe);
3296 bna_tx_free(struct bna_tx *tx)
3298 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3299 struct bna_txq *txq;
3300 struct list_head *prev_qe;
3301 struct list_head *qe;
3303 while (!list_empty(&tx->txq_q)) {
3304 bfa_q_deq(&tx->txq_q, &txq);
3305 bfa_q_qe_init(&txq->qe);
3308 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3311 list_for_each(qe, &tx_mod->tx_active_q) {
3312 if (qe == &tx->qe) {
3314 bfa_q_qe_init(&tx->qe);
3323 list_for_each(qe, &tx_mod->tx_free_q) {
3324 if (((struct bna_tx *)qe)->rid < tx->rid)
3331 if (prev_qe == NULL) {
3332 /* This is the first entry */
3333 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3334 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3335 /* This is the last entry */
3336 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3338 /* Somewhere in the middle */
3339 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3340 bfa_q_prev(&tx->qe) = prev_qe;
3341 bfa_q_next(prev_qe) = &tx->qe;
3342 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3347 bna_tx_start(struct bna_tx *tx)
3349 tx->flags |= BNA_TX_F_ENET_STARTED;
3350 if (tx->flags & BNA_TX_F_ENABLED)
3351 bfa_fsm_send_event(tx, TX_E_START);
3355 bna_tx_stop(struct bna_tx *tx)
3357 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3358 tx->stop_cbarg = &tx->bna->tx_mod;
3360 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3361 bfa_fsm_send_event(tx, TX_E_STOP);
3365 bna_tx_fail(struct bna_tx *tx)
3367 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3368 bfa_fsm_send_event(tx, TX_E_FAIL);
3372 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3374 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3375 struct bna_txq *txq = NULL;
3376 struct list_head *qe;
3379 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3380 sizeof(struct bfi_enet_tx_cfg_rsp));
3382 tx->hw_id = cfg_rsp->hw_id;
3384 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3385 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3386 txq = (struct bna_txq *)qe;
3388 /* Setup doorbells */
3389 txq->tcb->i_dbell->doorbell_addr =
3390 tx->bna->pcidev.pci_bar_kva
3391 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3393 tx->bna->pcidev.pci_bar_kva
3394 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3395 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3397 /* Initialize producer/consumer indexes */
3398 (*txq->tcb->hw_consumer_index) = 0;
3399 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3402 bfa_fsm_send_event(tx, TX_E_STARTED);
3406 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3408 bfa_fsm_send_event(tx, TX_E_STOPPED);
3412 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3415 struct list_head *qe;
3417 list_for_each(qe, &tx_mod->tx_active_q) {
3418 tx = (struct bna_tx *)qe;
3419 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3424 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3428 struct bna_mem_info *mem_info;
3430 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3431 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3432 mem_info->mem_type = BNA_MEM_T_KVA;
3433 mem_info->len = sizeof(struct bna_tcb);
3434 mem_info->num = num_txq;
3436 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3437 q_size = ALIGN(q_size, PAGE_SIZE);
3438 page_count = q_size >> PAGE_SHIFT;
3440 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3441 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3442 mem_info->mem_type = BNA_MEM_T_DMA;
3443 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3444 mem_info->num = num_txq;
3446 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3447 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3448 mem_info->mem_type = BNA_MEM_T_KVA;
3449 mem_info->len = page_count * sizeof(void *);
3450 mem_info->num = num_txq;
3452 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3453 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3454 mem_info->mem_type = BNA_MEM_T_DMA;
3455 mem_info->len = PAGE_SIZE * page_count;
3456 mem_info->num = num_txq;
3458 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3459 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3460 mem_info->mem_type = BNA_MEM_T_DMA;
3461 mem_info->len = BFI_IBIDX_SIZE;
3462 mem_info->num = num_txq;
3464 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3465 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3467 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3471 bna_tx_create(struct bna *bna, struct bnad *bnad,
3472 struct bna_tx_config *tx_cfg,
3473 const struct bna_tx_event_cbfn *tx_cbfn,
3474 struct bna_res_info *res_info, void *priv)
3476 struct bna_intr_info *intr_info;
3477 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3479 struct bna_txq *txq;
3480 struct list_head *qe;
3484 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3485 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3492 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3497 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3505 INIT_LIST_HEAD(&tx->txq_q);
3506 for (i = 0; i < tx_cfg->num_txq; i++) {
3507 if (list_empty(&tx_mod->txq_free_q))
3510 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3511 bfa_q_qe_init(&txq->qe);
3512 list_add_tail(&txq->qe, &tx->txq_q);
3522 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3523 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3524 /* Following callbacks are mandatory */
3525 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3526 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3527 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3529 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3531 tx->num_txq = tx_cfg->num_txq;
3534 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3536 case BNA_TX_T_REGULAR:
3537 if (!(tx->bna->tx_mod.flags &
3538 BNA_TX_MOD_F_ENET_LOOPBACK))
3539 tx->flags |= BNA_TX_F_ENET_STARTED;
3541 case BNA_TX_T_LOOPBACK:
3542 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3543 tx->flags |= BNA_TX_F_ENET_STARTED;
3551 list_for_each(qe, &tx->txq_q) {
3552 txq = (struct bna_txq *)qe;
3553 txq->tcb = (struct bna_tcb *)
3554 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3555 txq->tx_packets = 0;
3559 txq->ib.ib_seg_host_addr.lsb =
3560 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3561 txq->ib.ib_seg_host_addr.msb =
3562 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3563 txq->ib.ib_seg_host_addr_kva =
3564 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3565 txq->ib.intr_type = intr_info->intr_type;
3566 txq->ib.intr_vector = (intr_info->num == 1) ?
3567 intr_info->idl[0].vector :
3568 intr_info->idl[i].vector;
3569 if (intr_info->intr_type == BNA_INTR_T_INTX)
3570 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3571 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3572 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3573 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3577 txq->tcb->q_depth = tx_cfg->txq_depth;
3578 txq->tcb->unmap_q = (void *)
3579 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3580 txq->tcb->hw_consumer_index =
3581 (u32 *)txq->ib.ib_seg_host_addr_kva;
3582 txq->tcb->i_dbell = &txq->ib.door_bell;
3583 txq->tcb->intr_type = txq->ib.intr_type;
3584 txq->tcb->intr_vector = txq->ib.intr_vector;
3585 txq->tcb->txq = txq;
3586 txq->tcb->bnad = bnad;
3589 /* QPT, SWQPT, Pages */
3590 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3591 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3592 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3593 &res_info[BNA_TX_RES_MEM_T_PAGE].
3594 res_u.mem_info.mdl[i]);
3596 /* Callback to bnad for setting up TCB */
3597 if (tx->tcb_setup_cbfn)
3598 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3600 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3601 txq->priority = txq->tcb->id;
3603 txq->priority = tx_mod->default_prio;
3608 tx->txf_vlan_id = 0;
3610 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3612 tx_mod->rid_mask |= (1 << tx->rid);
3622 bna_tx_destroy(struct bna_tx *tx)
3624 struct bna_txq *txq;
3625 struct list_head *qe;
3627 list_for_each(qe, &tx->txq_q) {
3628 txq = (struct bna_txq *)qe;
3629 if (tx->tcb_destroy_cbfn)
3630 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3633 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3638 bna_tx_enable(struct bna_tx *tx)
3640 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3643 tx->flags |= BNA_TX_F_ENABLED;
3645 if (tx->flags & BNA_TX_F_ENET_STARTED)
3646 bfa_fsm_send_event(tx, TX_E_START);
3650 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3651 void (*cbfn)(void *, struct bna_tx *))
3653 if (type == BNA_SOFT_CLEANUP) {
3654 (*cbfn)(tx->bna->bnad, tx);
3658 tx->stop_cbfn = cbfn;
3659 tx->stop_cbarg = tx->bna->bnad;
3661 tx->flags &= ~BNA_TX_F_ENABLED;
3663 bfa_fsm_send_event(tx, TX_E_STOP);
3667 bna_tx_cleanup_complete(struct bna_tx *tx)
3669 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3673 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3675 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3677 bfa_wc_down(&tx_mod->tx_stop_wc);
3681 bna_tx_mod_cb_tx_stopped_all(void *arg)
3683 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3685 if (tx_mod->stop_cbfn)
3686 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3687 tx_mod->stop_cbfn = NULL;
3691 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3692 struct bna_res_info *res_info)
3699 tx_mod->tx = (struct bna_tx *)
3700 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3701 tx_mod->txq = (struct bna_txq *)
3702 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3704 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3705 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3707 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3709 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3710 tx_mod->tx[i].rid = i;
3711 bfa_q_qe_init(&tx_mod->tx[i].qe);
3712 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3713 bfa_q_qe_init(&tx_mod->txq[i].qe);
3714 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3717 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3718 tx_mod->default_prio = 0;
3719 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3720 tx_mod->iscsi_prio = -1;
3724 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3726 struct list_head *qe;
3730 list_for_each(qe, &tx_mod->tx_free_q)
3734 list_for_each(qe, &tx_mod->txq_free_q)
3741 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3744 struct list_head *qe;
3746 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3747 if (type == BNA_TX_T_LOOPBACK)
3748 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3750 list_for_each(qe, &tx_mod->tx_active_q) {
3751 tx = (struct bna_tx *)qe;
3752 if (tx->type == type)
3758 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3761 struct list_head *qe;
3763 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3764 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3766 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3768 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3770 list_for_each(qe, &tx_mod->tx_active_q) {
3771 tx = (struct bna_tx *)qe;
3772 if (tx->type == type) {
3773 bfa_wc_up(&tx_mod->tx_stop_wc);
3778 bfa_wc_wait(&tx_mod->tx_stop_wc);
3782 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3785 struct list_head *qe;
3787 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3788 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3790 list_for_each(qe, &tx_mod->tx_active_q) {
3791 tx = (struct bna_tx *)qe;
3797 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3799 struct bna_txq *txq;
3800 struct list_head *qe;
3802 list_for_each(qe, &tx->txq_q) {
3803 txq = (struct bna_txq *)qe;
3804 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);