1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
91 struct lpfc_sli_ring *pring,
92 struct lpfc_iocbq *cmdiocb);
95 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
100 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
102 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
103 * @srcp: Source memory pointer.
104 * @destp: Destination memory pointer.
105 * @cnt: Number of words required to be copied.
106 * Must be a multiple of sizeof(uint64_t)
108 * This function is used for copying data between driver memory
109 * and the SLI WQ. This function also changes the endianness
110 * of each word if native endianness is different from SLI
111 * endianness. This function can be called with or without
115 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
117 uint64_t *src = srcp;
118 uint64_t *dest = destp;
121 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
125 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
129 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
130 * @q: The Work Queue to operate on.
131 * @wqe: The work Queue Entry to put on the Work queue.
133 * This routine will copy the contents of @wqe to the next available entry on
134 * the @q. This function will then ring the Work Queue Doorbell to signal the
135 * HBA to start processing the Work Queue Entry. This function returns 0 if
136 * successful. If no entries are available on @q then this function will return
138 * The caller is expected to hold the hbalock when calling this routine.
141 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
143 union lpfc_wqe *temp_wqe;
144 struct lpfc_register doorbell;
151 /* sanity check on queue memory */
154 temp_wqe = q->qe[q->host_index].wqe;
156 /* If the host has not yet processed the next entry then we are done */
157 idx = ((q->host_index + 1) % q->entry_count);
158 if (idx == q->hba_index) {
163 /* set consumption flag every once in a while */
164 if (!((q->host_index + 1) % q->notify_interval))
165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
167 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
168 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
169 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
170 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
171 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
172 /* write to DPP aperture taking advatage of Combined Writes */
173 tmp = (uint8_t *)temp_wqe;
175 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
176 __raw_writeq(*((uint64_t *)(tmp + i)),
179 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
180 __raw_writel(*((uint32_t *)(tmp + i)),
184 /* ensure WQE bcopy and DPP flushed before doorbell write */
187 /* Update the host index before invoking device */
188 host_index = q->host_index;
194 if (q->db_format == LPFC_DB_LIST_FORMAT) {
195 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
196 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
198 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
200 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
203 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
204 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
206 /* Leave bits <23:16> clear for if_type 6 dpp */
207 if_type = bf_get(lpfc_sli_intf_if_type,
208 &q->phba->sli4_hba.sli_intf);
209 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
210 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
213 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
214 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
215 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
219 writel(doorbell.word0, q->db_regaddr);
225 * lpfc_sli4_wq_release - Updates internal hba index for WQ
226 * @q: The Work Queue to operate on.
227 * @index: The index to advance the hba index to.
229 * This routine will update the HBA index of a queue to reflect consumption of
230 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
231 * an entry the host calls this function to update the queue's internal
232 * pointers. This routine returns the number of entries that were consumed by
236 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
238 uint32_t released = 0;
240 /* sanity check on queue memory */
244 if (q->hba_index == index)
247 q->hba_index = ((q->hba_index + 1) % q->entry_count);
249 } while (q->hba_index != index);
254 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
255 * @q: The Mailbox Queue to operate on.
256 * @wqe: The Mailbox Queue Entry to put on the Work queue.
258 * This routine will copy the contents of @mqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
263 * The caller is expected to hold the hbalock when calling this routine.
266 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
268 struct lpfc_mqe *temp_mqe;
269 struct lpfc_register doorbell;
271 /* sanity check on queue memory */
274 temp_mqe = q->qe[q->host_index].mqe;
276 /* If the host has not yet processed the next entry then we are done */
277 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
279 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
280 /* Save off the mailbox pointer for completion */
281 q->phba->mbox = (MAILBOX_t *)temp_mqe;
283 /* Update the host index before invoking device */
284 q->host_index = ((q->host_index + 1) % q->entry_count);
288 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
289 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
290 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
295 * lpfc_sli4_mq_release - Updates internal hba index for MQ
296 * @q: The Mailbox Queue to operate on.
298 * This routine will update the HBA index of a queue to reflect consumption of
299 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
300 * an entry the host calls this function to update the queue's internal
301 * pointers. This routine returns the number of entries that were consumed by
305 lpfc_sli4_mq_release(struct lpfc_queue *q)
307 /* sanity check on queue memory */
311 /* Clear the mailbox pointer for completion */
312 q->phba->mbox = NULL;
313 q->hba_index = ((q->hba_index + 1) % q->entry_count);
318 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
319 * @q: The Event Queue to get the first valid EQE from
321 * This routine will get the first valid Event Queue Entry from @q, update
322 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
323 * the Queue (no more work to do), or the Queue is full of EQEs that have been
324 * processed, but not popped back to the HBA then this routine will return NULL.
326 static struct lpfc_eqe *
327 lpfc_sli4_eq_get(struct lpfc_queue *q)
329 struct lpfc_eqe *eqe;
331 /* sanity check on queue memory */
334 eqe = q->qe[q->host_index].eqe;
336 /* If the next EQE is not valid then we are done */
337 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
341 * insert barrier for instruction interlock : data from the hardware
342 * must have the valid bit checked before it can be copied and acted
343 * upon. Speculative instructions were allowing a bcopy at the start
344 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
345 * after our return, to copy data before the valid bit check above
346 * was done. As such, some of the copied data was stale. The barrier
347 * ensures the check is before any data is copied.
354 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
355 * @q: The Event Queue to disable interrupts
359 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
361 struct lpfc_register doorbell;
364 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
365 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
366 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
367 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
368 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
369 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
373 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
374 * @q: The Event Queue to disable interrupts
378 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
380 struct lpfc_register doorbell;
383 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
384 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
388 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
389 * @phba: adapter with EQ
390 * @q: The Event Queue that the host has completed processing for.
391 * @count: Number of elements that have been consumed
392 * @arm: Indicates whether the host wants to arms this CQ.
394 * This routine will notify the HBA, by ringing the doorbell, that count
395 * number of EQEs have been processed. The @arm parameter indicates whether
396 * the queue should be rearmed when ringing the doorbell.
399 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
400 uint32_t count, bool arm)
402 struct lpfc_register doorbell;
404 /* sanity check on queue memory */
405 if (unlikely(!q || (count == 0 && !arm)))
408 /* ring doorbell for number popped */
411 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
412 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
414 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
415 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
416 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
417 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
418 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
419 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
420 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
421 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
422 readl(q->phba->sli4_hba.EQDBregaddr);
426 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
427 * @phba: adapter with EQ
428 * @q: The Event Queue that the host has completed processing for.
429 * @count: Number of elements that have been consumed
430 * @arm: Indicates whether the host wants to arms this CQ.
432 * This routine will notify the HBA, by ringing the doorbell, that count
433 * number of EQEs have been processed. The @arm parameter indicates whether
434 * the queue should be rearmed when ringing the doorbell.
437 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
438 uint32_t count, bool arm)
440 struct lpfc_register doorbell;
442 /* sanity check on queue memory */
443 if (unlikely(!q || (count == 0 && !arm)))
446 /* ring doorbell for number popped */
449 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
450 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
451 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
452 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
453 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
454 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
455 readl(q->phba->sli4_hba.EQDBregaddr);
459 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
460 struct lpfc_eqe *eqe)
462 if (!phba->sli4_hba.pc_sli4_params.eqav)
463 bf_set_le32(lpfc_eqe_valid, eqe, 0);
465 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
467 /* if the index wrapped around, toggle the valid bit */
468 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
469 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
473 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
475 struct lpfc_eqe *eqe;
478 /* walk all the EQ entries and drop on the floor */
479 eqe = lpfc_sli4_eq_get(eq);
481 __lpfc_sli4_consume_eqe(phba, eq, eqe);
483 eqe = lpfc_sli4_eq_get(eq);
486 /* Clear and re-arm the EQ */
487 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
491 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
493 struct lpfc_eqe *eqe;
494 int count = 0, consumed = 0;
496 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
499 eqe = lpfc_sli4_eq_get(eq);
501 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
502 __lpfc_sli4_consume_eqe(phba, eq, eqe);
505 if (!(++count % eq->max_proc_limit))
508 if (!(count % eq->notify_interval)) {
509 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
514 eqe = lpfc_sli4_eq_get(eq);
516 eq->EQ_processed += count;
518 /* Track the max number of EQEs processed in 1 intr */
519 if (count > eq->EQ_max_eqe)
520 eq->EQ_max_eqe = count;
522 eq->queue_claimed = 0;
525 /* Always clear and re-arm the EQ */
526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
532 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
533 * @q: The Completion Queue to get the first valid CQE from
535 * This routine will get the first valid Completion Queue Entry from @q, update
536 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
537 * the Queue (no more work to do), or the Queue is full of CQEs that have been
538 * processed, but not popped back to the HBA then this routine will return NULL.
540 static struct lpfc_cqe *
541 lpfc_sli4_cq_get(struct lpfc_queue *q)
543 struct lpfc_cqe *cqe;
545 /* sanity check on queue memory */
548 cqe = q->qe[q->host_index].cqe;
550 /* If the next CQE is not valid then we are done */
551 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
555 * insert barrier for instruction interlock : data from the hardware
556 * must have the valid bit checked before it can be copied and acted
557 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
558 * instructions allowing action on content before valid bit checked,
559 * add barrier here as well. May not be needed as "content" is a
560 * single 32-bit entity here (vs multi word structure for cq's).
567 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
568 struct lpfc_cqe *cqe)
570 if (!phba->sli4_hba.pc_sli4_params.cqav)
571 bf_set_le32(lpfc_cqe_valid, cqe, 0);
573 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
575 /* if the index wrapped around, toggle the valid bit */
576 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
577 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
581 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
582 * @phba: the adapter with the CQ
583 * @q: The Completion Queue that the host has completed processing for.
584 * @count: the number of elements that were consumed
585 * @arm: Indicates whether the host wants to arms this CQ.
587 * This routine will notify the HBA, by ringing the doorbell, that the
588 * CQEs have been processed. The @arm parameter specifies whether the
589 * queue should be rearmed when ringing the doorbell.
592 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
593 uint32_t count, bool arm)
595 struct lpfc_register doorbell;
597 /* sanity check on queue memory */
598 if (unlikely(!q || (count == 0 && !arm)))
601 /* ring doorbell for number popped */
604 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
605 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
606 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
607 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
608 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
609 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
610 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
614 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
615 * @phba: the adapter with the CQ
616 * @q: The Completion Queue that the host has completed processing for.
617 * @count: the number of elements that were consumed
618 * @arm: Indicates whether the host wants to arms this CQ.
620 * This routine will notify the HBA, by ringing the doorbell, that the
621 * CQEs have been processed. The @arm parameter specifies whether the
622 * queue should be rearmed when ringing the doorbell.
625 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
626 uint32_t count, bool arm)
628 struct lpfc_register doorbell;
630 /* sanity check on queue memory */
631 if (unlikely(!q || (count == 0 && !arm)))
634 /* ring doorbell for number popped */
637 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
638 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
639 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
640 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
644 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
645 * @q: The Header Receive Queue to operate on.
646 * @wqe: The Receive Queue Entry to put on the Receive queue.
648 * This routine will copy the contents of @wqe to the next available entry on
649 * the @q. This function will then ring the Receive Queue Doorbell to signal the
650 * HBA to start processing the Receive Queue Entry. This function returns the
651 * index that the rqe was copied to if successful. If no entries are available
652 * on @q then this function will return -ENOMEM.
653 * The caller is expected to hold the hbalock when calling this routine.
656 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
657 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
659 struct lpfc_rqe *temp_hrqe;
660 struct lpfc_rqe *temp_drqe;
661 struct lpfc_register doorbell;
665 /* sanity check on queue memory */
666 if (unlikely(!hq) || unlikely(!dq))
668 hq_put_index = hq->host_index;
669 dq_put_index = dq->host_index;
670 temp_hrqe = hq->qe[hq_put_index].rqe;
671 temp_drqe = dq->qe[dq_put_index].rqe;
673 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
675 if (hq_put_index != dq_put_index)
677 /* If the host has not yet processed the next entry then we are done */
678 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
680 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
681 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
683 /* Update the host index to point to the next slot */
684 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
685 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
688 /* Ring The Header Receive Queue Doorbell */
689 if (!(hq->host_index % hq->notify_interval)) {
691 if (hq->db_format == LPFC_DB_RING_FORMAT) {
692 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
693 hq->notify_interval);
694 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
695 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
696 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
697 hq->notify_interval);
698 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
700 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
704 writel(doorbell.word0, hq->db_regaddr);
710 * lpfc_sli4_rq_release - Updates internal hba index for RQ
711 * @q: The Header Receive Queue to operate on.
713 * This routine will update the HBA index of a queue to reflect consumption of
714 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
715 * consumed an entry the host calls this function to update the queue's
716 * internal pointers. This routine returns the number of entries that were
717 * consumed by the HBA.
720 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
722 /* sanity check on queue memory */
723 if (unlikely(!hq) || unlikely(!dq))
726 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
728 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
729 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
734 * lpfc_cmd_iocb - Get next command iocb entry in the ring
735 * @phba: Pointer to HBA context object.
736 * @pring: Pointer to driver SLI ring object.
738 * This function returns pointer to next command iocb entry
739 * in the command ring. The caller must hold hbalock to prevent
740 * other threads consume the next command iocb.
741 * SLI-2/SLI-3 provide different sized iocbs.
743 static inline IOCB_t *
744 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
746 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
747 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
751 * lpfc_resp_iocb - Get next response iocb entry in the ring
752 * @phba: Pointer to HBA context object.
753 * @pring: Pointer to driver SLI ring object.
755 * This function returns pointer to next response iocb entry
756 * in the response ring. The caller must hold hbalock to make sure
757 * that no other thread consume the next response iocb.
758 * SLI-2/SLI-3 provide different sized iocbs.
760 static inline IOCB_t *
761 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
763 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
764 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
768 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
769 * @phba: Pointer to HBA context object.
771 * This function is called with hbalock held. This function
772 * allocates a new driver iocb object from the iocb pool. If the
773 * allocation is successful, it returns pointer to the newly
774 * allocated iocb object else it returns NULL.
777 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
779 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
780 struct lpfc_iocbq * iocbq = NULL;
782 lockdep_assert_held(&phba->hbalock);
784 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
787 if (phba->iocb_cnt > phba->iocb_max)
788 phba->iocb_max = phba->iocb_cnt;
793 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
794 * @phba: Pointer to HBA context object.
795 * @xritag: XRI value.
797 * This function clears the sglq pointer from the array of acive
798 * sglq's. The xritag that is passed in is used to index into the
799 * array. Before the xritag can be used it needs to be adjusted
800 * by subtracting the xribase.
802 * Returns sglq ponter = success, NULL = Failure.
805 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
807 struct lpfc_sglq *sglq;
809 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
810 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
815 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
816 * @phba: Pointer to HBA context object.
817 * @xritag: XRI value.
819 * This function returns the sglq pointer from the array of acive
820 * sglq's. The xritag that is passed in is used to index into the
821 * array. Before the xritag can be used it needs to be adjusted
822 * by subtracting the xribase.
824 * Returns sglq ponter = success, NULL = Failure.
827 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
829 struct lpfc_sglq *sglq;
831 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
836 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
837 * @phba: Pointer to HBA context object.
838 * @xritag: xri used in this exchange.
839 * @rrq: The RRQ to be cleared.
843 lpfc_clr_rrq_active(struct lpfc_hba *phba,
845 struct lpfc_node_rrq *rrq)
847 struct lpfc_nodelist *ndlp = NULL;
849 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
850 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
852 /* The target DID could have been swapped (cable swap)
853 * we should use the ndlp from the findnode if it is
856 if ((!ndlp) && rrq->ndlp)
862 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
865 rrq->rrq_stop_time = 0;
868 mempool_free(rrq, phba->rrq_pool);
872 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
873 * @phba: Pointer to HBA context object.
875 * This function is called with hbalock held. This function
876 * Checks if stop_time (ratov from setting rrq active) has
877 * been reached, if it has and the send_rrq flag is set then
878 * it will call lpfc_send_rrq. If the send_rrq flag is not set
879 * then it will just call the routine to clear the rrq and
880 * free the rrq resource.
881 * The timer is set to the next rrq that is going to expire before
882 * leaving the routine.
886 lpfc_handle_rrq_active(struct lpfc_hba *phba)
888 struct lpfc_node_rrq *rrq;
889 struct lpfc_node_rrq *nextrrq;
890 unsigned long next_time;
891 unsigned long iflags;
894 spin_lock_irqsave(&phba->hbalock, iflags);
895 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
896 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
897 list_for_each_entry_safe(rrq, nextrrq,
898 &phba->active_rrq_list, list) {
899 if (time_after(jiffies, rrq->rrq_stop_time))
900 list_move(&rrq->list, &send_rrq);
901 else if (time_before(rrq->rrq_stop_time, next_time))
902 next_time = rrq->rrq_stop_time;
904 spin_unlock_irqrestore(&phba->hbalock, iflags);
905 if ((!list_empty(&phba->active_rrq_list)) &&
906 (!(phba->pport->load_flag & FC_UNLOADING)))
907 mod_timer(&phba->rrq_tmr, next_time);
908 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
909 list_del(&rrq->list);
911 /* this call will free the rrq */
912 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
913 else if (lpfc_send_rrq(phba, rrq)) {
914 /* if we send the rrq then the completion handler
915 * will clear the bit in the xribitmap.
917 lpfc_clr_rrq_active(phba, rrq->xritag,
924 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
925 * @vport: Pointer to vport context object.
926 * @xri: The xri used in the exchange.
927 * @did: The targets DID for this exchange.
929 * returns NULL = rrq not found in the phba->active_rrq_list.
930 * rrq = rrq for this xri and target.
932 struct lpfc_node_rrq *
933 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
935 struct lpfc_hba *phba = vport->phba;
936 struct lpfc_node_rrq *rrq;
937 struct lpfc_node_rrq *nextrrq;
938 unsigned long iflags;
940 if (phba->sli_rev != LPFC_SLI_REV4)
942 spin_lock_irqsave(&phba->hbalock, iflags);
943 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
944 if (rrq->vport == vport && rrq->xritag == xri &&
945 rrq->nlp_DID == did){
946 list_del(&rrq->list);
947 spin_unlock_irqrestore(&phba->hbalock, iflags);
951 spin_unlock_irqrestore(&phba->hbalock, iflags);
956 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
957 * @vport: Pointer to vport context object.
958 * @ndlp: Pointer to the lpfc_node_list structure.
959 * If ndlp is NULL Remove all active RRQs for this vport from the
960 * phba->active_rrq_list and clear the rrq.
961 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
964 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
967 struct lpfc_hba *phba = vport->phba;
968 struct lpfc_node_rrq *rrq;
969 struct lpfc_node_rrq *nextrrq;
970 unsigned long iflags;
973 if (phba->sli_rev != LPFC_SLI_REV4)
976 lpfc_sli4_vport_delete_els_xri_aborted(vport);
977 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
979 spin_lock_irqsave(&phba->hbalock, iflags);
980 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
981 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
982 list_move(&rrq->list, &rrq_list);
983 spin_unlock_irqrestore(&phba->hbalock, iflags);
985 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
986 list_del(&rrq->list);
987 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
992 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
993 * @phba: Pointer to HBA context object.
994 * @ndlp: Targets nodelist pointer for this exchange.
995 * @xritag the xri in the bitmap to test.
997 * This function is called with hbalock held. This function
998 * returns 0 = rrq not active for this xri
999 * 1 = rrq is valid for this xri.
1002 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1005 lockdep_assert_held(&phba->hbalock);
1008 if (!ndlp->active_rrqs_xri_bitmap)
1010 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1017 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1018 * @phba: Pointer to HBA context object.
1019 * @ndlp: nodelist pointer for this target.
1020 * @xritag: xri used in this exchange.
1021 * @rxid: Remote Exchange ID.
1022 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1024 * This function takes the hbalock.
1025 * The active bit is always set in the active rrq xri_bitmap even
1026 * if there is no slot avaiable for the other rrq information.
1028 * returns 0 rrq actived for this xri
1029 * < 0 No memory or invalid ndlp.
1032 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1033 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1035 unsigned long iflags;
1036 struct lpfc_node_rrq *rrq;
1042 if (!phba->cfg_enable_rrq)
1045 spin_lock_irqsave(&phba->hbalock, iflags);
1046 if (phba->pport->load_flag & FC_UNLOADING) {
1047 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1052 * set the active bit even if there is no mem available.
1054 if (NLP_CHK_FREE_REQ(ndlp))
1057 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1060 if (!ndlp->active_rrqs_xri_bitmap)
1063 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1066 spin_unlock_irqrestore(&phba->hbalock, iflags);
1067 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1070 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1071 " DID:0x%x Send:%d\n",
1072 xritag, rxid, ndlp->nlp_DID, send_rrq);
1075 if (phba->cfg_enable_rrq == 1)
1076 rrq->send_rrq = send_rrq;
1079 rrq->xritag = xritag;
1080 rrq->rrq_stop_time = jiffies +
1081 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1083 rrq->nlp_DID = ndlp->nlp_DID;
1084 rrq->vport = ndlp->vport;
1086 spin_lock_irqsave(&phba->hbalock, iflags);
1087 empty = list_empty(&phba->active_rrq_list);
1088 list_add_tail(&rrq->list, &phba->active_rrq_list);
1089 phba->hba_flag |= HBA_RRQ_ACTIVE;
1091 lpfc_worker_wake_up(phba);
1092 spin_unlock_irqrestore(&phba->hbalock, iflags);
1095 spin_unlock_irqrestore(&phba->hbalock, iflags);
1096 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1097 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1098 " DID:0x%x Send:%d\n",
1099 xritag, rxid, ndlp->nlp_DID, send_rrq);
1104 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1105 * @phba: Pointer to HBA context object.
1106 * @piocb: Pointer to the iocbq.
1108 * This function is called with the ring lock held. This function
1109 * gets a new driver sglq object from the sglq list. If the
1110 * list is not empty then it is successful, it returns pointer to the newly
1111 * allocated sglq object else it returns NULL.
1113 static struct lpfc_sglq *
1114 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1116 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1117 struct lpfc_sglq *sglq = NULL;
1118 struct lpfc_sglq *start_sglq = NULL;
1119 struct lpfc_io_buf *lpfc_cmd;
1120 struct lpfc_nodelist *ndlp;
1123 lockdep_assert_held(&phba->hbalock);
1125 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1126 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1127 ndlp = lpfc_cmd->rdata->pnode;
1128 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1129 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1130 ndlp = piocbq->context_un.ndlp;
1131 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1132 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1135 ndlp = piocbq->context_un.ndlp;
1137 ndlp = piocbq->context1;
1140 spin_lock(&phba->sli4_hba.sgl_list_lock);
1141 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1146 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1147 test_bit(sglq->sli4_lxritag,
1148 ndlp->active_rrqs_xri_bitmap)) {
1149 /* This xri has an rrq outstanding for this DID.
1150 * put it back in the list and get another xri.
1152 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1154 list_remove_head(lpfc_els_sgl_list, sglq,
1155 struct lpfc_sglq, list);
1156 if (sglq == start_sglq) {
1157 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1165 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1166 sglq->state = SGL_ALLOCATED;
1168 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1173 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1174 * @phba: Pointer to HBA context object.
1175 * @piocb: Pointer to the iocbq.
1177 * This function is called with the sgl_list lock held. This function
1178 * gets a new driver sglq object from the sglq list. If the
1179 * list is not empty then it is successful, it returns pointer to the newly
1180 * allocated sglq object else it returns NULL.
1183 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1185 struct list_head *lpfc_nvmet_sgl_list;
1186 struct lpfc_sglq *sglq = NULL;
1188 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1190 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1192 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1195 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1196 sglq->state = SGL_ALLOCATED;
1201 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1202 * @phba: Pointer to HBA context object.
1204 * This function is called with no lock held. This function
1205 * allocates a new driver iocb object from the iocb pool. If the
1206 * allocation is successful, it returns pointer to the newly
1207 * allocated iocb object else it returns NULL.
1210 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1212 struct lpfc_iocbq * iocbq = NULL;
1213 unsigned long iflags;
1215 spin_lock_irqsave(&phba->hbalock, iflags);
1216 iocbq = __lpfc_sli_get_iocbq(phba);
1217 spin_unlock_irqrestore(&phba->hbalock, iflags);
1222 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1223 * @phba: Pointer to HBA context object.
1224 * @iocbq: Pointer to driver iocb object.
1226 * This function is called with hbalock held to release driver
1227 * iocb object to the iocb pool. The iotag in the iocb object
1228 * does not change for each use of the iocb object. This function
1229 * clears all other fields of the iocb object when it is freed.
1230 * The sqlq structure that holds the xritag and phys and virtual
1231 * mappings for the scatter gather list is retrieved from the
1232 * active array of sglq. The get of the sglq pointer also clears
1233 * the entry in the array. If the status of the IO indiactes that
1234 * this IO was aborted then the sglq entry it put on the
1235 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1236 * IO has good status or fails for any other reason then the sglq
1237 * entry is added to the free list (lpfc_els_sgl_list).
1240 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1242 struct lpfc_sglq *sglq;
1243 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1244 unsigned long iflag = 0;
1245 struct lpfc_sli_ring *pring;
1247 lockdep_assert_held(&phba->hbalock);
1249 if (iocbq->sli4_xritag == NO_XRI)
1252 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1256 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1257 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1259 sglq->state = SGL_FREED;
1261 list_add_tail(&sglq->list,
1262 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1263 spin_unlock_irqrestore(
1264 &phba->sli4_hba.sgl_list_lock, iflag);
1268 pring = phba->sli4_hba.els_wq->pring;
1269 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1270 (sglq->state != SGL_XRI_ABORTED)) {
1271 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1273 list_add(&sglq->list,
1274 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1275 spin_unlock_irqrestore(
1276 &phba->sli4_hba.sgl_list_lock, iflag);
1278 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1280 sglq->state = SGL_FREED;
1282 list_add_tail(&sglq->list,
1283 &phba->sli4_hba.lpfc_els_sgl_list);
1284 spin_unlock_irqrestore(
1285 &phba->sli4_hba.sgl_list_lock, iflag);
1287 /* Check if TXQ queue needs to be serviced */
1288 if (!list_empty(&pring->txq))
1289 lpfc_worker_wake_up(phba);
1295 * Clean all volatile data fields, preserve iotag and node struct.
1297 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1298 iocbq->sli4_lxritag = NO_XRI;
1299 iocbq->sli4_xritag = NO_XRI;
1300 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1302 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1307 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1308 * @phba: Pointer to HBA context object.
1309 * @iocbq: Pointer to driver iocb object.
1311 * This function is called with hbalock held to release driver
1312 * iocb object to the iocb pool. The iotag in the iocb object
1313 * does not change for each use of the iocb object. This function
1314 * clears all other fields of the iocb object when it is freed.
1317 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1319 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1321 lockdep_assert_held(&phba->hbalock);
1324 * Clean all volatile data fields, preserve iotag and node struct.
1326 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1327 iocbq->sli4_xritag = NO_XRI;
1328 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1332 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1333 * @phba: Pointer to HBA context object.
1334 * @iocbq: Pointer to driver iocb object.
1336 * This function is called with hbalock held to release driver
1337 * iocb object to the iocb pool. The iotag in the iocb object
1338 * does not change for each use of the iocb object. This function
1339 * clears all other fields of the iocb object when it is freed.
1342 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1344 lockdep_assert_held(&phba->hbalock);
1346 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1351 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1352 * @phba: Pointer to HBA context object.
1353 * @iocbq: Pointer to driver iocb object.
1355 * This function is called with no lock held to release the iocb to
1359 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361 unsigned long iflags;
1364 * Clean all volatile data fields, preserve iotag and node struct.
1366 spin_lock_irqsave(&phba->hbalock, iflags);
1367 __lpfc_sli_release_iocbq(phba, iocbq);
1368 spin_unlock_irqrestore(&phba->hbalock, iflags);
1372 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1373 * @phba: Pointer to HBA context object.
1374 * @iocblist: List of IOCBs.
1375 * @ulpstatus: ULP status in IOCB command field.
1376 * @ulpWord4: ULP word-4 in IOCB command field.
1378 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1379 * on the list by invoking the complete callback function associated with the
1380 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1384 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1385 uint32_t ulpstatus, uint32_t ulpWord4)
1387 struct lpfc_iocbq *piocb;
1389 while (!list_empty(iocblist)) {
1390 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1391 if (!piocb->iocb_cmpl)
1392 lpfc_sli_release_iocbq(phba, piocb);
1394 piocb->iocb.ulpStatus = ulpstatus;
1395 piocb->iocb.un.ulpWord[4] = ulpWord4;
1396 (piocb->iocb_cmpl) (phba, piocb, piocb);
1403 * lpfc_sli_iocb_cmd_type - Get the iocb type
1404 * @iocb_cmnd: iocb command code.
1406 * This function is called by ring event handler function to get the iocb type.
1407 * This function translates the iocb command to an iocb command type used to
1408 * decide the final disposition of each completed IOCB.
1409 * The function returns
1410 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1411 * LPFC_SOL_IOCB if it is a solicited iocb completion
1412 * LPFC_ABORT_IOCB if it is an abort iocb
1413 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1415 * The caller is not required to hold any lock.
1417 static lpfc_iocb_type
1418 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1420 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1422 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1425 switch (iocb_cmnd) {
1426 case CMD_XMIT_SEQUENCE_CR:
1427 case CMD_XMIT_SEQUENCE_CX:
1428 case CMD_XMIT_BCAST_CN:
1429 case CMD_XMIT_BCAST_CX:
1430 case CMD_ELS_REQUEST_CR:
1431 case CMD_ELS_REQUEST_CX:
1432 case CMD_CREATE_XRI_CR:
1433 case CMD_CREATE_XRI_CX:
1434 case CMD_GET_RPI_CN:
1435 case CMD_XMIT_ELS_RSP_CX:
1436 case CMD_GET_RPI_CR:
1437 case CMD_FCP_IWRITE_CR:
1438 case CMD_FCP_IWRITE_CX:
1439 case CMD_FCP_IREAD_CR:
1440 case CMD_FCP_IREAD_CX:
1441 case CMD_FCP_ICMND_CR:
1442 case CMD_FCP_ICMND_CX:
1443 case CMD_FCP_TSEND_CX:
1444 case CMD_FCP_TRSP_CX:
1445 case CMD_FCP_TRECEIVE_CX:
1446 case CMD_FCP_AUTO_TRSP_CX:
1447 case CMD_ADAPTER_MSG:
1448 case CMD_ADAPTER_DUMP:
1449 case CMD_XMIT_SEQUENCE64_CR:
1450 case CMD_XMIT_SEQUENCE64_CX:
1451 case CMD_XMIT_BCAST64_CN:
1452 case CMD_XMIT_BCAST64_CX:
1453 case CMD_ELS_REQUEST64_CR:
1454 case CMD_ELS_REQUEST64_CX:
1455 case CMD_FCP_IWRITE64_CR:
1456 case CMD_FCP_IWRITE64_CX:
1457 case CMD_FCP_IREAD64_CR:
1458 case CMD_FCP_IREAD64_CX:
1459 case CMD_FCP_ICMND64_CR:
1460 case CMD_FCP_ICMND64_CX:
1461 case CMD_FCP_TSEND64_CX:
1462 case CMD_FCP_TRSP64_CX:
1463 case CMD_FCP_TRECEIVE64_CX:
1464 case CMD_GEN_REQUEST64_CR:
1465 case CMD_GEN_REQUEST64_CX:
1466 case CMD_XMIT_ELS_RSP64_CX:
1467 case DSSCMD_IWRITE64_CR:
1468 case DSSCMD_IWRITE64_CX:
1469 case DSSCMD_IREAD64_CR:
1470 case DSSCMD_IREAD64_CX:
1471 type = LPFC_SOL_IOCB;
1473 case CMD_ABORT_XRI_CN:
1474 case CMD_ABORT_XRI_CX:
1475 case CMD_CLOSE_XRI_CN:
1476 case CMD_CLOSE_XRI_CX:
1477 case CMD_XRI_ABORTED_CX:
1478 case CMD_ABORT_MXRI64_CN:
1479 case CMD_XMIT_BLS_RSP64_CX:
1480 type = LPFC_ABORT_IOCB;
1482 case CMD_RCV_SEQUENCE_CX:
1483 case CMD_RCV_ELS_REQ_CX:
1484 case CMD_RCV_SEQUENCE64_CX:
1485 case CMD_RCV_ELS_REQ64_CX:
1486 case CMD_ASYNC_STATUS:
1487 case CMD_IOCB_RCV_SEQ64_CX:
1488 case CMD_IOCB_RCV_ELS64_CX:
1489 case CMD_IOCB_RCV_CONT64_CX:
1490 case CMD_IOCB_RET_XRI64_CX:
1491 type = LPFC_UNSOL_IOCB;
1493 case CMD_IOCB_XMIT_MSEQ64_CR:
1494 case CMD_IOCB_XMIT_MSEQ64_CX:
1495 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1496 case CMD_IOCB_RCV_ELS_LIST64_CX:
1497 case CMD_IOCB_CLOSE_EXTENDED_CN:
1498 case CMD_IOCB_ABORT_EXTENDED_CN:
1499 case CMD_IOCB_RET_HBQE64_CN:
1500 case CMD_IOCB_FCP_IBIDIR64_CR:
1501 case CMD_IOCB_FCP_IBIDIR64_CX:
1502 case CMD_IOCB_FCP_ITASKMGT64_CX:
1503 case CMD_IOCB_LOGENTRY_CN:
1504 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1505 printk("%s - Unhandled SLI-3 Command x%x\n",
1506 __func__, iocb_cmnd);
1507 type = LPFC_UNKNOWN_IOCB;
1510 type = LPFC_UNKNOWN_IOCB;
1518 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1519 * @phba: Pointer to HBA context object.
1521 * This function is called from SLI initialization code
1522 * to configure every ring of the HBA's SLI interface. The
1523 * caller is not required to hold any lock. This function issues
1524 * a config_ring mailbox command for each ring.
1525 * This function returns zero if successful else returns a negative
1529 lpfc_sli_ring_map(struct lpfc_hba *phba)
1531 struct lpfc_sli *psli = &phba->sli;
1536 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1540 phba->link_state = LPFC_INIT_MBX_CMDS;
1541 for (i = 0; i < psli->num_rings; i++) {
1542 lpfc_config_ring(phba, i, pmb);
1543 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1544 if (rc != MBX_SUCCESS) {
1545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1546 "0446 Adapter failed to init (%d), "
1547 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1549 rc, pmbox->mbxCommand,
1550 pmbox->mbxStatus, i);
1551 phba->link_state = LPFC_HBA_ERROR;
1556 mempool_free(pmb, phba->mbox_mem_pool);
1561 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1562 * @phba: Pointer to HBA context object.
1563 * @pring: Pointer to driver SLI ring object.
1564 * @piocb: Pointer to the driver iocb object.
1566 * This function is called with hbalock held. The function adds the
1567 * new iocb to txcmplq of the given ring. This function always returns
1568 * 0. If this function is called for ELS ring, this function checks if
1569 * there is a vport associated with the ELS command. This function also
1570 * starts els_tmofunc timer if this is an ELS command.
1573 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1574 struct lpfc_iocbq *piocb)
1576 lockdep_assert_held(&phba->hbalock);
1580 list_add_tail(&piocb->list, &pring->txcmplq);
1581 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1582 pring->txcmplq_cnt++;
1584 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1585 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1586 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1587 BUG_ON(!piocb->vport);
1588 if (!(piocb->vport->load_flag & FC_UNLOADING))
1589 mod_timer(&piocb->vport->els_tmofunc,
1591 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1598 * lpfc_sli_ringtx_get - Get first element of the txq
1599 * @phba: Pointer to HBA context object.
1600 * @pring: Pointer to driver SLI ring object.
1602 * This function is called with hbalock held to get next
1603 * iocb in txq of the given ring. If there is any iocb in
1604 * the txq, the function returns first iocb in the list after
1605 * removing the iocb from the list, else it returns NULL.
1608 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1610 struct lpfc_iocbq *cmd_iocb;
1612 lockdep_assert_held(&phba->hbalock);
1614 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1619 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1620 * @phba: Pointer to HBA context object.
1621 * @pring: Pointer to driver SLI ring object.
1623 * This function is called with hbalock held and the caller must post the
1624 * iocb without releasing the lock. If the caller releases the lock,
1625 * iocb slot returned by the function is not guaranteed to be available.
1626 * The function returns pointer to the next available iocb slot if there
1627 * is available slot in the ring, else it returns NULL.
1628 * If the get index of the ring is ahead of the put index, the function
1629 * will post an error attention event to the worker thread to take the
1630 * HBA to offline state.
1633 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1635 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1636 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1638 lockdep_assert_held(&phba->hbalock);
1640 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1641 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1642 pring->sli.sli3.next_cmdidx = 0;
1644 if (unlikely(pring->sli.sli3.local_getidx ==
1645 pring->sli.sli3.next_cmdidx)) {
1647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1649 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1651 "0315 Ring %d issue: portCmdGet %d "
1652 "is bigger than cmd ring %d\n",
1654 pring->sli.sli3.local_getidx,
1657 phba->link_state = LPFC_HBA_ERROR;
1659 * All error attention handlers are posted to
1662 phba->work_ha |= HA_ERATT;
1663 phba->work_hs = HS_FFER3;
1665 lpfc_worker_wake_up(phba);
1670 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1674 return lpfc_cmd_iocb(phba, pring);
1678 * lpfc_sli_next_iotag - Get an iotag for the iocb
1679 * @phba: Pointer to HBA context object.
1680 * @iocbq: Pointer to driver iocb object.
1682 * This function gets an iotag for the iocb. If there is no unused iotag and
1683 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1684 * array and assigns a new iotag.
1685 * The function returns the allocated iotag if successful, else returns zero.
1686 * Zero is not a valid iotag.
1687 * The caller is not required to hold any lock.
1690 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1692 struct lpfc_iocbq **new_arr;
1693 struct lpfc_iocbq **old_arr;
1695 struct lpfc_sli *psli = &phba->sli;
1698 spin_lock_irq(&phba->hbalock);
1699 iotag = psli->last_iotag;
1700 if(++iotag < psli->iocbq_lookup_len) {
1701 psli->last_iotag = iotag;
1702 psli->iocbq_lookup[iotag] = iocbq;
1703 spin_unlock_irq(&phba->hbalock);
1704 iocbq->iotag = iotag;
1706 } else if (psli->iocbq_lookup_len < (0xffff
1707 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1708 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1709 spin_unlock_irq(&phba->hbalock);
1710 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1713 spin_lock_irq(&phba->hbalock);
1714 old_arr = psli->iocbq_lookup;
1715 if (new_len <= psli->iocbq_lookup_len) {
1716 /* highly unprobable case */
1718 iotag = psli->last_iotag;
1719 if(++iotag < psli->iocbq_lookup_len) {
1720 psli->last_iotag = iotag;
1721 psli->iocbq_lookup[iotag] = iocbq;
1722 spin_unlock_irq(&phba->hbalock);
1723 iocbq->iotag = iotag;
1726 spin_unlock_irq(&phba->hbalock);
1729 if (psli->iocbq_lookup)
1730 memcpy(new_arr, old_arr,
1731 ((psli->last_iotag + 1) *
1732 sizeof (struct lpfc_iocbq *)));
1733 psli->iocbq_lookup = new_arr;
1734 psli->iocbq_lookup_len = new_len;
1735 psli->last_iotag = iotag;
1736 psli->iocbq_lookup[iotag] = iocbq;
1737 spin_unlock_irq(&phba->hbalock);
1738 iocbq->iotag = iotag;
1743 spin_unlock_irq(&phba->hbalock);
1745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1746 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1753 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1754 * @phba: Pointer to HBA context object.
1755 * @pring: Pointer to driver SLI ring object.
1756 * @iocb: Pointer to iocb slot in the ring.
1757 * @nextiocb: Pointer to driver iocb object which need to be
1758 * posted to firmware.
1760 * This function is called with hbalock held to post a new iocb to
1761 * the firmware. This function copies the new iocb to ring iocb slot and
1762 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1763 * a completion call back for this iocb else the function will free the
1767 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1768 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1770 lockdep_assert_held(&phba->hbalock);
1774 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1777 if (pring->ringno == LPFC_ELS_RING) {
1778 lpfc_debugfs_slow_ring_trc(phba,
1779 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1780 *(((uint32_t *) &nextiocb->iocb) + 4),
1781 *(((uint32_t *) &nextiocb->iocb) + 6),
1782 *(((uint32_t *) &nextiocb->iocb) + 7));
1786 * Issue iocb command to adapter
1788 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1790 pring->stats.iocb_cmd++;
1793 * If there is no completion routine to call, we can release the
1794 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1795 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1797 if (nextiocb->iocb_cmpl)
1798 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1800 __lpfc_sli_release_iocbq(phba, nextiocb);
1803 * Let the HBA know what IOCB slot will be the next one the
1804 * driver will put a command into.
1806 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1807 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1811 * lpfc_sli_update_full_ring - Update the chip attention register
1812 * @phba: Pointer to HBA context object.
1813 * @pring: Pointer to driver SLI ring object.
1815 * The caller is not required to hold any lock for calling this function.
1816 * This function updates the chip attention bits for the ring to inform firmware
1817 * that there are pending work to be done for this ring and requests an
1818 * interrupt when there is space available in the ring. This function is
1819 * called when the driver is unable to post more iocbs to the ring due
1820 * to unavailability of space in the ring.
1823 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1825 int ringno = pring->ringno;
1827 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1832 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1833 * The HBA will tell us when an IOCB entry is available.
1835 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1836 readl(phba->CAregaddr); /* flush */
1838 pring->stats.iocb_cmd_full++;
1842 * lpfc_sli_update_ring - Update chip attention register
1843 * @phba: Pointer to HBA context object.
1844 * @pring: Pointer to driver SLI ring object.
1846 * This function updates the chip attention register bit for the
1847 * given ring to inform HBA that there is more work to be done
1848 * in this ring. The caller is not required to hold any lock.
1851 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1853 int ringno = pring->ringno;
1856 * Tell the HBA that there is work to do in this ring.
1858 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1860 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1861 readl(phba->CAregaddr); /* flush */
1866 * lpfc_sli_resume_iocb - Process iocbs in the txq
1867 * @phba: Pointer to HBA context object.
1868 * @pring: Pointer to driver SLI ring object.
1870 * This function is called with hbalock held to post pending iocbs
1871 * in the txq to the firmware. This function is called when driver
1872 * detects space available in the ring.
1875 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1878 struct lpfc_iocbq *nextiocb;
1880 lockdep_assert_held(&phba->hbalock);
1884 * (a) there is anything on the txq to send
1886 * (c) link attention events can be processed (fcp ring only)
1887 * (d) IOCB processing is not blocked by the outstanding mbox command.
1890 if (lpfc_is_link_up(phba) &&
1891 (!list_empty(&pring->txq)) &&
1892 (pring->ringno != LPFC_FCP_RING ||
1893 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1895 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1896 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1897 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1900 lpfc_sli_update_ring(phba, pring);
1902 lpfc_sli_update_full_ring(phba, pring);
1909 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1910 * @phba: Pointer to HBA context object.
1911 * @hbqno: HBQ number.
1913 * This function is called with hbalock held to get the next
1914 * available slot for the given HBQ. If there is free slot
1915 * available for the HBQ it will return pointer to the next available
1916 * HBQ entry else it will return NULL.
1918 static struct lpfc_hbq_entry *
1919 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1921 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1923 lockdep_assert_held(&phba->hbalock);
1925 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1926 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1927 hbqp->next_hbqPutIdx = 0;
1929 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1930 uint32_t raw_index = phba->hbq_get[hbqno];
1931 uint32_t getidx = le32_to_cpu(raw_index);
1933 hbqp->local_hbqGetIdx = getidx;
1935 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1936 lpfc_printf_log(phba, KERN_ERR,
1937 LOG_SLI | LOG_VPORT,
1938 "1802 HBQ %d: local_hbqGetIdx "
1939 "%u is > than hbqp->entry_count %u\n",
1940 hbqno, hbqp->local_hbqGetIdx,
1943 phba->link_state = LPFC_HBA_ERROR;
1947 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1951 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1956 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1957 * @phba: Pointer to HBA context object.
1959 * This function is called with no lock held to free all the
1960 * hbq buffers while uninitializing the SLI interface. It also
1961 * frees the HBQ buffers returned by the firmware but not yet
1962 * processed by the upper layers.
1965 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1967 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1968 struct hbq_dmabuf *hbq_buf;
1969 unsigned long flags;
1972 hbq_count = lpfc_sli_hbq_count();
1973 /* Return all memory used by all HBQs */
1974 spin_lock_irqsave(&phba->hbalock, flags);
1975 for (i = 0; i < hbq_count; ++i) {
1976 list_for_each_entry_safe(dmabuf, next_dmabuf,
1977 &phba->hbqs[i].hbq_buffer_list, list) {
1978 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1979 list_del(&hbq_buf->dbuf.list);
1980 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1982 phba->hbqs[i].buffer_count = 0;
1985 /* Mark the HBQs not in use */
1986 phba->hbq_in_use = 0;
1987 spin_unlock_irqrestore(&phba->hbalock, flags);
1991 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1992 * @phba: Pointer to HBA context object.
1993 * @hbqno: HBQ number.
1994 * @hbq_buf: Pointer to HBQ buffer.
1996 * This function is called with the hbalock held to post a
1997 * hbq buffer to the firmware. If the function finds an empty
1998 * slot in the HBQ, it will post the buffer. The function will return
1999 * pointer to the hbq entry if it successfully post the buffer
2000 * else it will return NULL.
2003 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2004 struct hbq_dmabuf *hbq_buf)
2006 lockdep_assert_held(&phba->hbalock);
2007 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2011 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2012 * @phba: Pointer to HBA context object.
2013 * @hbqno: HBQ number.
2014 * @hbq_buf: Pointer to HBQ buffer.
2016 * This function is called with the hbalock held to post a hbq buffer to the
2017 * firmware. If the function finds an empty slot in the HBQ, it will post the
2018 * buffer and place it on the hbq_buffer_list. The function will return zero if
2019 * it successfully post the buffer else it will return an error.
2022 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2023 struct hbq_dmabuf *hbq_buf)
2025 struct lpfc_hbq_entry *hbqe;
2026 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2028 lockdep_assert_held(&phba->hbalock);
2029 /* Get next HBQ entry slot to use */
2030 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2032 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2034 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2035 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2036 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2037 hbqe->bde.tus.f.bdeFlags = 0;
2038 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2039 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2041 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2042 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2044 readl(phba->hbq_put + hbqno);
2045 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2052 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2053 * @phba: Pointer to HBA context object.
2054 * @hbqno: HBQ number.
2055 * @hbq_buf: Pointer to HBQ buffer.
2057 * This function is called with the hbalock held to post an RQE to the SLI4
2058 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2059 * the hbq_buffer_list and return zero, otherwise it will return an error.
2062 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2063 struct hbq_dmabuf *hbq_buf)
2066 struct lpfc_rqe hrqe;
2067 struct lpfc_rqe drqe;
2068 struct lpfc_queue *hrq;
2069 struct lpfc_queue *drq;
2071 if (hbqno != LPFC_ELS_HBQ)
2073 hrq = phba->sli4_hba.hdr_rq;
2074 drq = phba->sli4_hba.dat_rq;
2076 lockdep_assert_held(&phba->hbalock);
2077 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2078 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2079 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2080 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2081 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2084 hbq_buf->tag = (rc | (hbqno << 16));
2085 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2089 /* HBQ for ELS and CT traffic. */
2090 static struct lpfc_hbq_init lpfc_els_hbq = {
2095 .ring_mask = (1 << LPFC_ELS_RING),
2102 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2107 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2108 * @phba: Pointer to HBA context object.
2109 * @hbqno: HBQ number.
2110 * @count: Number of HBQ buffers to be posted.
2112 * This function is called with no lock held to post more hbq buffers to the
2113 * given HBQ. The function returns the number of HBQ buffers successfully
2117 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2119 uint32_t i, posted = 0;
2120 unsigned long flags;
2121 struct hbq_dmabuf *hbq_buffer;
2122 LIST_HEAD(hbq_buf_list);
2123 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2126 if ((phba->hbqs[hbqno].buffer_count + count) >
2127 lpfc_hbq_defs[hbqno]->entry_count)
2128 count = lpfc_hbq_defs[hbqno]->entry_count -
2129 phba->hbqs[hbqno].buffer_count;
2132 /* Allocate HBQ entries */
2133 for (i = 0; i < count; i++) {
2134 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2137 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2139 /* Check whether HBQ is still in use */
2140 spin_lock_irqsave(&phba->hbalock, flags);
2141 if (!phba->hbq_in_use)
2143 while (!list_empty(&hbq_buf_list)) {
2144 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2146 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2148 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2149 phba->hbqs[hbqno].buffer_count++;
2152 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2154 spin_unlock_irqrestore(&phba->hbalock, flags);
2157 spin_unlock_irqrestore(&phba->hbalock, flags);
2158 while (!list_empty(&hbq_buf_list)) {
2159 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2161 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2167 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2168 * @phba: Pointer to HBA context object.
2171 * This function posts more buffers to the HBQ. This function
2172 * is called with no lock held. The function returns the number of HBQ entries
2173 * successfully allocated.
2176 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2178 if (phba->sli_rev == LPFC_SLI_REV4)
2181 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2182 lpfc_hbq_defs[qno]->add_count);
2186 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2187 * @phba: Pointer to HBA context object.
2188 * @qno: HBQ queue number.
2190 * This function is called from SLI initialization code path with
2191 * no lock held to post initial HBQ buffers to firmware. The
2192 * function returns the number of HBQ entries successfully allocated.
2195 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2197 if (phba->sli_rev == LPFC_SLI_REV4)
2198 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2199 lpfc_hbq_defs[qno]->entry_count);
2201 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2202 lpfc_hbq_defs[qno]->init_count);
2206 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2207 * @phba: Pointer to HBA context object.
2208 * @hbqno: HBQ number.
2210 * This function removes the first hbq buffer on an hbq list and returns a
2211 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2213 static struct hbq_dmabuf *
2214 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2216 struct lpfc_dmabuf *d_buf;
2218 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2221 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2225 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2226 * @phba: Pointer to HBA context object.
2227 * @hbqno: HBQ number.
2229 * This function removes the first RQ buffer on an RQ buffer list and returns a
2230 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2232 static struct rqb_dmabuf *
2233 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2235 struct lpfc_dmabuf *h_buf;
2236 struct lpfc_rqb *rqbp;
2239 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2240 struct lpfc_dmabuf, list);
2243 rqbp->buffer_count--;
2244 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2248 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2249 * @phba: Pointer to HBA context object.
2250 * @tag: Tag of the hbq buffer.
2252 * This function searches for the hbq buffer associated with the given tag in
2253 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2254 * otherwise it returns NULL.
2256 static struct hbq_dmabuf *
2257 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2259 struct lpfc_dmabuf *d_buf;
2260 struct hbq_dmabuf *hbq_buf;
2264 if (hbqno >= LPFC_MAX_HBQS)
2267 spin_lock_irq(&phba->hbalock);
2268 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2269 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2270 if (hbq_buf->tag == tag) {
2271 spin_unlock_irq(&phba->hbalock);
2275 spin_unlock_irq(&phba->hbalock);
2276 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2277 "1803 Bad hbq tag. Data: x%x x%x\n",
2278 tag, phba->hbqs[tag >> 16].buffer_count);
2283 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2284 * @phba: Pointer to HBA context object.
2285 * @hbq_buffer: Pointer to HBQ buffer.
2287 * This function is called with hbalock. This function gives back
2288 * the hbq buffer to firmware. If the HBQ does not have space to
2289 * post the buffer, it will free the buffer.
2292 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2297 hbqno = hbq_buffer->tag >> 16;
2298 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2299 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2304 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2305 * @mbxCommand: mailbox command code.
2307 * This function is called by the mailbox event handler function to verify
2308 * that the completed mailbox command is a legitimate mailbox command. If the
2309 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2310 * and the mailbox event handler will take the HBA offline.
2313 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2317 switch (mbxCommand) {
2321 case MBX_WRITE_VPARMS:
2322 case MBX_RUN_BIU_DIAG:
2325 case MBX_CONFIG_LINK:
2326 case MBX_CONFIG_RING:
2327 case MBX_RESET_RING:
2328 case MBX_READ_CONFIG:
2329 case MBX_READ_RCONFIG:
2330 case MBX_READ_SPARM:
2331 case MBX_READ_STATUS:
2335 case MBX_READ_LNK_STAT:
2337 case MBX_UNREG_LOGIN:
2339 case MBX_DUMP_MEMORY:
2340 case MBX_DUMP_CONTEXT:
2343 case MBX_UPDATE_CFG:
2345 case MBX_DEL_LD_ENTRY:
2346 case MBX_RUN_PROGRAM:
2348 case MBX_SET_VARIABLE:
2349 case MBX_UNREG_D_ID:
2350 case MBX_KILL_BOARD:
2351 case MBX_CONFIG_FARP:
2354 case MBX_RUN_BIU_DIAG64:
2355 case MBX_CONFIG_PORT:
2356 case MBX_READ_SPARM64:
2357 case MBX_READ_RPI64:
2358 case MBX_REG_LOGIN64:
2359 case MBX_READ_TOPOLOGY:
2362 case MBX_LOAD_EXP_ROM:
2363 case MBX_ASYNCEVT_ENABLE:
2367 case MBX_PORT_CAPABILITIES:
2368 case MBX_PORT_IOV_CONTROL:
2369 case MBX_SLI4_CONFIG:
2370 case MBX_SLI4_REQ_FTRS:
2372 case MBX_UNREG_FCFI:
2377 case MBX_RESUME_RPI:
2378 case MBX_READ_EVENT_LOG_STATUS:
2379 case MBX_READ_EVENT_LOG:
2380 case MBX_SECURITY_MGMT:
2382 case MBX_ACCESS_VDATA:
2393 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2394 * @phba: Pointer to HBA context object.
2395 * @pmboxq: Pointer to mailbox command.
2397 * This is completion handler function for mailbox commands issued from
2398 * lpfc_sli_issue_mbox_wait function. This function is called by the
2399 * mailbox event handler function with no lock held. This function
2400 * will wake up thread waiting on the wait queue pointed by context1
2404 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2406 unsigned long drvr_flag;
2407 struct completion *pmbox_done;
2410 * If pmbox_done is empty, the driver thread gave up waiting and
2411 * continued running.
2413 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2414 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2415 pmbox_done = (struct completion *)pmboxq->context3;
2417 complete(pmbox_done);
2418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2424 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2425 * @phba: Pointer to HBA context object.
2426 * @pmb: Pointer to mailbox object.
2428 * This function is the default mailbox completion handler. It
2429 * frees the memory resources associated with the completed mailbox
2430 * command. If the completed command is a REG_LOGIN mailbox command,
2431 * this function will issue a UREG_LOGIN to re-claim the RPI.
2434 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2436 struct lpfc_vport *vport = pmb->vport;
2437 struct lpfc_dmabuf *mp;
2438 struct lpfc_nodelist *ndlp;
2439 struct Scsi_Host *shost;
2443 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2446 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2451 * If a REG_LOGIN succeeded after node is destroyed or node
2452 * is in re-discovery driver need to cleanup the RPI.
2454 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2455 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2456 !pmb->u.mb.mbxStatus) {
2457 rpi = pmb->u.mb.un.varWords[0];
2458 vpi = pmb->u.mb.un.varRegLogin.vpi;
2459 lpfc_unreg_login(phba, vpi, rpi, pmb);
2461 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2462 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2463 if (rc != MBX_NOT_FINISHED)
2467 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2468 !(phba->pport->load_flag & FC_UNLOADING) &&
2469 !pmb->u.mb.mbxStatus) {
2470 shost = lpfc_shost_from_vport(vport);
2471 spin_lock_irq(shost->host_lock);
2472 vport->vpi_state |= LPFC_VPI_REGISTERED;
2473 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2474 spin_unlock_irq(shost->host_lock);
2477 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2478 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2480 pmb->ctx_buf = NULL;
2481 pmb->ctx_ndlp = NULL;
2484 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2487 /* Check to see if there are any deferred events to process */
2491 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2492 "1438 UNREG cmpl deferred mbox x%x "
2493 "on NPort x%x Data: x%x x%x %p\n",
2494 ndlp->nlp_rpi, ndlp->nlp_DID,
2495 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2497 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2498 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2499 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2500 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2501 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2503 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2506 pmb->ctx_ndlp = NULL;
2509 /* Check security permission status on INIT_LINK mailbox command */
2510 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2511 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2512 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2513 "2860 SLI authentication is required "
2514 "for INIT_LINK but has not done yet\n");
2516 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2517 lpfc_sli4_mbox_cmd_free(phba, pmb);
2519 mempool_free(pmb, phba->mbox_mem_pool);
2522 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2523 * @phba: Pointer to HBA context object.
2524 * @pmb: Pointer to mailbox object.
2526 * This function is the unreg rpi mailbox completion handler. It
2527 * frees the memory resources associated with the completed mailbox
2528 * command. An additional refrenece is put on the ndlp to prevent
2529 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2530 * the unreg mailbox command completes, this routine puts the
2535 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2537 struct lpfc_vport *vport = pmb->vport;
2538 struct lpfc_nodelist *ndlp;
2540 ndlp = pmb->ctx_ndlp;
2541 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2542 if (phba->sli_rev == LPFC_SLI_REV4 &&
2543 (bf_get(lpfc_sli_intf_if_type,
2544 &phba->sli4_hba.sli_intf) >=
2545 LPFC_SLI_INTF_IF_TYPE_2)) {
2548 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2549 "0010 UNREG_LOGIN vpi:%x "
2550 "rpi:%x DID:%x defer x%x flg x%x "
2552 vport->vpi, ndlp->nlp_rpi,
2553 ndlp->nlp_DID, ndlp->nlp_defer_did,
2555 ndlp->nlp_usg_map, ndlp);
2556 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2559 /* Check to see if there are any deferred
2562 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2563 (ndlp->nlp_defer_did !=
2564 NLP_EVT_NOTHING_PENDING)) {
2566 vport, KERN_INFO, LOG_DISCOVERY,
2567 "4111 UNREG cmpl deferred "
2569 "NPort x%x Data: x%x %p\n",
2570 ndlp->nlp_rpi, ndlp->nlp_DID,
2571 ndlp->nlp_defer_did, ndlp);
2572 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2573 ndlp->nlp_defer_did =
2574 NLP_EVT_NOTHING_PENDING;
2575 lpfc_issue_els_plogi(
2576 vport, ndlp->nlp_DID, 0);
2578 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2584 mempool_free(pmb, phba->mbox_mem_pool);
2588 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2589 * @phba: Pointer to HBA context object.
2591 * This function is called with no lock held. This function processes all
2592 * the completed mailbox commands and gives it to upper layers. The interrupt
2593 * service routine processes mailbox completion interrupt and adds completed
2594 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2595 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2596 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2597 * function returns the mailbox commands to the upper layer by calling the
2598 * completion handler function of each mailbox.
2601 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2608 phba->sli.slistat.mbox_event++;
2610 /* Get all completed mailboxe buffers into the cmplq */
2611 spin_lock_irq(&phba->hbalock);
2612 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2613 spin_unlock_irq(&phba->hbalock);
2615 /* Get a Mailbox buffer to setup mailbox commands for callback */
2617 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2623 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2625 lpfc_debugfs_disc_trc(pmb->vport,
2626 LPFC_DISC_TRC_MBOX_VPORT,
2627 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2628 (uint32_t)pmbox->mbxCommand,
2629 pmbox->un.varWords[0],
2630 pmbox->un.varWords[1]);
2633 lpfc_debugfs_disc_trc(phba->pport,
2635 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2636 (uint32_t)pmbox->mbxCommand,
2637 pmbox->un.varWords[0],
2638 pmbox->un.varWords[1]);
2643 * It is a fatal error if unknown mbox command completion.
2645 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2647 /* Unknown mailbox command compl */
2648 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2649 "(%d):0323 Unknown Mailbox command "
2650 "x%x (x%x/x%x) Cmpl\n",
2651 pmb->vport ? pmb->vport->vpi : 0,
2653 lpfc_sli_config_mbox_subsys_get(phba,
2655 lpfc_sli_config_mbox_opcode_get(phba,
2657 phba->link_state = LPFC_HBA_ERROR;
2658 phba->work_hs = HS_FFER3;
2659 lpfc_handle_eratt(phba);
2663 if (pmbox->mbxStatus) {
2664 phba->sli.slistat.mbox_stat_err++;
2665 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2666 /* Mbox cmd cmpl error - RETRYing */
2667 lpfc_printf_log(phba, KERN_INFO,
2669 "(%d):0305 Mbox cmd cmpl "
2670 "error - RETRYing Data: x%x "
2671 "(x%x/x%x) x%x x%x x%x\n",
2672 pmb->vport ? pmb->vport->vpi : 0,
2674 lpfc_sli_config_mbox_subsys_get(phba,
2676 lpfc_sli_config_mbox_opcode_get(phba,
2679 pmbox->un.varWords[0],
2680 pmb->vport->port_state);
2681 pmbox->mbxStatus = 0;
2682 pmbox->mbxOwner = OWN_HOST;
2683 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2684 if (rc != MBX_NOT_FINISHED)
2689 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2690 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2691 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2692 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2694 pmb->vport ? pmb->vport->vpi : 0,
2696 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2697 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2699 *((uint32_t *) pmbox),
2700 pmbox->un.varWords[0],
2701 pmbox->un.varWords[1],
2702 pmbox->un.varWords[2],
2703 pmbox->un.varWords[3],
2704 pmbox->un.varWords[4],
2705 pmbox->un.varWords[5],
2706 pmbox->un.varWords[6],
2707 pmbox->un.varWords[7],
2708 pmbox->un.varWords[8],
2709 pmbox->un.varWords[9],
2710 pmbox->un.varWords[10]);
2713 pmb->mbox_cmpl(phba,pmb);
2719 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2720 * @phba: Pointer to HBA context object.
2721 * @pring: Pointer to driver SLI ring object.
2724 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2725 * is set in the tag the buffer is posted for a particular exchange,
2726 * the function will return the buffer without replacing the buffer.
2727 * If the buffer is for unsolicited ELS or CT traffic, this function
2728 * returns the buffer and also posts another buffer to the firmware.
2730 static struct lpfc_dmabuf *
2731 lpfc_sli_get_buff(struct lpfc_hba *phba,
2732 struct lpfc_sli_ring *pring,
2735 struct hbq_dmabuf *hbq_entry;
2737 if (tag & QUE_BUFTAG_BIT)
2738 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2739 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2742 return &hbq_entry->dbuf;
2746 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2747 * @phba: Pointer to HBA context object.
2748 * @pring: Pointer to driver SLI ring object.
2749 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2750 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2751 * @fch_type: the type for the first frame of the sequence.
2753 * This function is called with no lock held. This function uses the r_ctl and
2754 * type of the received sequence to find the correct callback function to call
2755 * to process the sequence.
2758 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2759 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2766 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2772 /* unSolicited Responses */
2773 if (pring->prt[0].profile) {
2774 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2775 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2779 /* We must search, based on rctl / type
2780 for the right routine */
2781 for (i = 0; i < pring->num_mask; i++) {
2782 if ((pring->prt[i].rctl == fch_r_ctl) &&
2783 (pring->prt[i].type == fch_type)) {
2784 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2785 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2786 (phba, pring, saveq);
2794 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2795 * @phba: Pointer to HBA context object.
2796 * @pring: Pointer to driver SLI ring object.
2797 * @saveq: Pointer to the unsolicited iocb.
2799 * This function is called with no lock held by the ring event handler
2800 * when there is an unsolicited iocb posted to the response ring by the
2801 * firmware. This function gets the buffer associated with the iocbs
2802 * and calls the event handler for the ring. This function handles both
2803 * qring buffers and hbq buffers.
2804 * When the function returns 1 the caller can free the iocb object otherwise
2805 * upper layer functions will free the iocb objects.
2808 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2809 struct lpfc_iocbq *saveq)
2813 uint32_t Rctl, Type;
2814 struct lpfc_iocbq *iocbq;
2815 struct lpfc_dmabuf *dmzbuf;
2817 irsp = &(saveq->iocb);
2819 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2820 if (pring->lpfc_sli_rcv_async_status)
2821 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2823 lpfc_printf_log(phba,
2826 "0316 Ring %d handler: unexpected "
2827 "ASYNC_STATUS iocb received evt_code "
2830 irsp->un.asyncstat.evt_code);
2834 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2835 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2836 if (irsp->ulpBdeCount > 0) {
2837 dmzbuf = lpfc_sli_get_buff(phba, pring,
2838 irsp->un.ulpWord[3]);
2839 lpfc_in_buf_free(phba, dmzbuf);
2842 if (irsp->ulpBdeCount > 1) {
2843 dmzbuf = lpfc_sli_get_buff(phba, pring,
2844 irsp->unsli3.sli3Words[3]);
2845 lpfc_in_buf_free(phba, dmzbuf);
2848 if (irsp->ulpBdeCount > 2) {
2849 dmzbuf = lpfc_sli_get_buff(phba, pring,
2850 irsp->unsli3.sli3Words[7]);
2851 lpfc_in_buf_free(phba, dmzbuf);
2857 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2858 if (irsp->ulpBdeCount != 0) {
2859 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2860 irsp->un.ulpWord[3]);
2861 if (!saveq->context2)
2862 lpfc_printf_log(phba,
2865 "0341 Ring %d Cannot find buffer for "
2866 "an unsolicited iocb. tag 0x%x\n",
2868 irsp->un.ulpWord[3]);
2870 if (irsp->ulpBdeCount == 2) {
2871 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2872 irsp->unsli3.sli3Words[7]);
2873 if (!saveq->context3)
2874 lpfc_printf_log(phba,
2877 "0342 Ring %d Cannot find buffer for an"
2878 " unsolicited iocb. tag 0x%x\n",
2880 irsp->unsli3.sli3Words[7]);
2882 list_for_each_entry(iocbq, &saveq->list, list) {
2883 irsp = &(iocbq->iocb);
2884 if (irsp->ulpBdeCount != 0) {
2885 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2886 irsp->un.ulpWord[3]);
2887 if (!iocbq->context2)
2888 lpfc_printf_log(phba,
2891 "0343 Ring %d Cannot find "
2892 "buffer for an unsolicited iocb"
2893 ". tag 0x%x\n", pring->ringno,
2894 irsp->un.ulpWord[3]);
2896 if (irsp->ulpBdeCount == 2) {
2897 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2898 irsp->unsli3.sli3Words[7]);
2899 if (!iocbq->context3)
2900 lpfc_printf_log(phba,
2903 "0344 Ring %d Cannot find "
2904 "buffer for an unsolicited "
2907 irsp->unsli3.sli3Words[7]);
2911 if (irsp->ulpBdeCount != 0 &&
2912 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2913 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2916 /* search continue save q for same XRI */
2917 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2918 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2919 saveq->iocb.unsli3.rcvsli3.ox_id) {
2920 list_add_tail(&saveq->list, &iocbq->list);
2926 list_add_tail(&saveq->clist,
2927 &pring->iocb_continue_saveq);
2928 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2929 list_del_init(&iocbq->clist);
2931 irsp = &(saveq->iocb);
2935 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2936 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2937 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2938 Rctl = FC_RCTL_ELS_REQ;
2941 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2942 Rctl = w5p->hcsw.Rctl;
2943 Type = w5p->hcsw.Type;
2945 /* Firmware Workaround */
2946 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2947 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2948 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2949 Rctl = FC_RCTL_ELS_REQ;
2951 w5p->hcsw.Rctl = Rctl;
2952 w5p->hcsw.Type = Type;
2956 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2957 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2958 "0313 Ring %d handler: unexpected Rctl x%x "
2959 "Type x%x received\n",
2960 pring->ringno, Rctl, Type);
2966 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2967 * @phba: Pointer to HBA context object.
2968 * @pring: Pointer to driver SLI ring object.
2969 * @prspiocb: Pointer to response iocb object.
2971 * This function looks up the iocb_lookup table to get the command iocb
2972 * corresponding to the given response iocb using the iotag of the
2973 * response iocb. This function is called with the hbalock held
2974 * for sli3 devices or the ring_lock for sli4 devices.
2975 * This function returns the command iocb object if it finds the command
2976 * iocb else returns NULL.
2978 static struct lpfc_iocbq *
2979 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2980 struct lpfc_sli_ring *pring,
2981 struct lpfc_iocbq *prspiocb)
2983 struct lpfc_iocbq *cmd_iocb = NULL;
2985 lockdep_assert_held(&phba->hbalock);
2987 iotag = prspiocb->iocb.ulpIoTag;
2989 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2990 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2991 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2992 /* remove from txcmpl queue list */
2993 list_del_init(&cmd_iocb->list);
2994 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2995 pring->txcmplq_cnt--;
3000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3001 "0317 iotag x%x is out of "
3002 "range: max iotag x%x wd0 x%x\n",
3003 iotag, phba->sli.last_iotag,
3004 *(((uint32_t *) &prspiocb->iocb) + 7));
3009 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3010 * @phba: Pointer to HBA context object.
3011 * @pring: Pointer to driver SLI ring object.
3014 * This function looks up the iocb_lookup table to get the command iocb
3015 * corresponding to the given iotag. This function is called with the
3017 * This function returns the command iocb object if it finds the command
3018 * iocb else returns NULL.
3020 static struct lpfc_iocbq *
3021 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3022 struct lpfc_sli_ring *pring, uint16_t iotag)
3024 struct lpfc_iocbq *cmd_iocb = NULL;
3026 lockdep_assert_held(&phba->hbalock);
3027 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3028 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3029 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3030 /* remove from txcmpl queue list */
3031 list_del_init(&cmd_iocb->list);
3032 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3033 pring->txcmplq_cnt--;
3038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3039 "0372 iotag x%x lookup error: max iotag (x%x) "
3041 iotag, phba->sli.last_iotag,
3042 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3047 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3048 * @phba: Pointer to HBA context object.
3049 * @pring: Pointer to driver SLI ring object.
3050 * @saveq: Pointer to the response iocb to be processed.
3052 * This function is called by the ring event handler for non-fcp
3053 * rings when there is a new response iocb in the response ring.
3054 * The caller is not required to hold any locks. This function
3055 * gets the command iocb associated with the response iocb and
3056 * calls the completion handler for the command iocb. If there
3057 * is no completion handler, the function will free the resources
3058 * associated with command iocb. If the response iocb is for
3059 * an already aborted command iocb, the status of the completion
3060 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3061 * This function always returns 1.
3064 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3065 struct lpfc_iocbq *saveq)
3067 struct lpfc_iocbq *cmdiocbp;
3069 unsigned long iflag;
3071 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
3072 if (phba->sli_rev == LPFC_SLI_REV4)
3073 spin_lock_irqsave(&pring->ring_lock, iflag);
3075 spin_lock_irqsave(&phba->hbalock, iflag);
3076 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3077 if (phba->sli_rev == LPFC_SLI_REV4)
3078 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3080 spin_unlock_irqrestore(&phba->hbalock, iflag);
3083 if (cmdiocbp->iocb_cmpl) {
3085 * If an ELS command failed send an event to mgmt
3088 if (saveq->iocb.ulpStatus &&
3089 (pring->ringno == LPFC_ELS_RING) &&
3090 (cmdiocbp->iocb.ulpCommand ==
3091 CMD_ELS_REQUEST64_CR))
3092 lpfc_send_els_failure_event(phba,
3096 * Post all ELS completions to the worker thread.
3097 * All other are passed to the completion callback.
3099 if (pring->ringno == LPFC_ELS_RING) {
3100 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3101 (cmdiocbp->iocb_flag &
3102 LPFC_DRIVER_ABORTED)) {
3103 spin_lock_irqsave(&phba->hbalock,
3105 cmdiocbp->iocb_flag &=
3106 ~LPFC_DRIVER_ABORTED;
3107 spin_unlock_irqrestore(&phba->hbalock,
3109 saveq->iocb.ulpStatus =
3110 IOSTAT_LOCAL_REJECT;
3111 saveq->iocb.un.ulpWord[4] =
3114 /* Firmware could still be in progress
3115 * of DMAing payload, so don't free data
3116 * buffer till after a hbeat.
3118 spin_lock_irqsave(&phba->hbalock,
3120 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3121 spin_unlock_irqrestore(&phba->hbalock,
3124 if (phba->sli_rev == LPFC_SLI_REV4) {
3125 if (saveq->iocb_flag &
3126 LPFC_EXCHANGE_BUSY) {
3127 /* Set cmdiocb flag for the
3128 * exchange busy so sgl (xri)
3129 * will not be released until
3130 * the abort xri is received
3134 &phba->hbalock, iflag);
3135 cmdiocbp->iocb_flag |=
3137 spin_unlock_irqrestore(
3138 &phba->hbalock, iflag);
3140 if (cmdiocbp->iocb_flag &
3141 LPFC_DRIVER_ABORTED) {
3143 * Clear LPFC_DRIVER_ABORTED
3144 * bit in case it was driver
3148 &phba->hbalock, iflag);
3149 cmdiocbp->iocb_flag &=
3150 ~LPFC_DRIVER_ABORTED;
3151 spin_unlock_irqrestore(
3152 &phba->hbalock, iflag);
3153 cmdiocbp->iocb.ulpStatus =
3154 IOSTAT_LOCAL_REJECT;
3155 cmdiocbp->iocb.un.ulpWord[4] =
3156 IOERR_ABORT_REQUESTED;
3158 * For SLI4, irsiocb contains
3159 * NO_XRI in sli_xritag, it
3160 * shall not affect releasing
3161 * sgl (xri) process.
3163 saveq->iocb.ulpStatus =
3164 IOSTAT_LOCAL_REJECT;
3165 saveq->iocb.un.ulpWord[4] =
3168 &phba->hbalock, iflag);
3170 LPFC_DELAY_MEM_FREE;
3171 spin_unlock_irqrestore(
3172 &phba->hbalock, iflag);
3176 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3178 lpfc_sli_release_iocbq(phba, cmdiocbp);
3181 * Unknown initiating command based on the response iotag.
3182 * This could be the case on the ELS ring because of
3185 if (pring->ringno != LPFC_ELS_RING) {
3187 * Ring <ringno> handler: unexpected completion IoTag
3190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3191 "0322 Ring %d handler: "
3192 "unexpected completion IoTag x%x "
3193 "Data: x%x x%x x%x x%x\n",
3195 saveq->iocb.ulpIoTag,
3196 saveq->iocb.ulpStatus,
3197 saveq->iocb.un.ulpWord[4],
3198 saveq->iocb.ulpCommand,
3199 saveq->iocb.ulpContext);
3207 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3208 * @phba: Pointer to HBA context object.
3209 * @pring: Pointer to driver SLI ring object.
3211 * This function is called from the iocb ring event handlers when
3212 * put pointer is ahead of the get pointer for a ring. This function signal
3213 * an error attention condition to the worker thread and the worker
3214 * thread will transition the HBA to offline state.
3217 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3219 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3221 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3222 * rsp ring <portRspMax>
3224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3225 "0312 Ring %d handler: portRspPut %d "
3226 "is bigger than rsp ring %d\n",
3227 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3228 pring->sli.sli3.numRiocb);
3230 phba->link_state = LPFC_HBA_ERROR;
3233 * All error attention handlers are posted to
3236 phba->work_ha |= HA_ERATT;
3237 phba->work_hs = HS_FFER3;
3239 lpfc_worker_wake_up(phba);
3245 * lpfc_poll_eratt - Error attention polling timer timeout handler
3246 * @ptr: Pointer to address of HBA context object.
3248 * This function is invoked by the Error Attention polling timer when the
3249 * timer times out. It will check the SLI Error Attention register for
3250 * possible attention events. If so, it will post an Error Attention event
3251 * and wake up worker thread to process it. Otherwise, it will set up the
3252 * Error Attention polling timer for the next poll.
3254 void lpfc_poll_eratt(struct timer_list *t)
3256 struct lpfc_hba *phba;
3258 uint64_t sli_intr, cnt;
3260 phba = from_timer(phba, t, eratt_poll);
3262 /* Here we will also keep track of interrupts per sec of the hba */
3263 sli_intr = phba->sli.slistat.sli_intr;
3265 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3266 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3269 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3271 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3272 do_div(cnt, phba->eratt_poll_interval);
3273 phba->sli.slistat.sli_ips = cnt;
3275 phba->sli.slistat.sli_prev_intr = sli_intr;
3277 /* Check chip HA register for error event */
3278 eratt = lpfc_sli_check_eratt(phba);
3281 /* Tell the worker thread there is work to do */
3282 lpfc_worker_wake_up(phba);
3284 /* Restart the timer for next eratt poll */
3285 mod_timer(&phba->eratt_poll,
3287 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3293 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3294 * @phba: Pointer to HBA context object.
3295 * @pring: Pointer to driver SLI ring object.
3296 * @mask: Host attention register mask for this ring.
3298 * This function is called from the interrupt context when there is a ring
3299 * event for the fcp ring. The caller does not hold any lock.
3300 * The function processes each response iocb in the response ring until it
3301 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3302 * LE bit set. The function will call the completion handler of the command iocb
3303 * if the response iocb indicates a completion for a command iocb or it is
3304 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3305 * function if this is an unsolicited iocb.
3306 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3307 * to check it explicitly.
3310 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3311 struct lpfc_sli_ring *pring, uint32_t mask)
3313 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3314 IOCB_t *irsp = NULL;
3315 IOCB_t *entry = NULL;
3316 struct lpfc_iocbq *cmdiocbq = NULL;
3317 struct lpfc_iocbq rspiocbq;
3319 uint32_t portRspPut, portRspMax;
3321 lpfc_iocb_type type;
3322 unsigned long iflag;
3323 uint32_t rsp_cmpl = 0;
3325 spin_lock_irqsave(&phba->hbalock, iflag);
3326 pring->stats.iocb_event++;
3329 * The next available response entry should never exceed the maximum
3330 * entries. If it does, treat it as an adapter hardware error.
3332 portRspMax = pring->sli.sli3.numRiocb;
3333 portRspPut = le32_to_cpu(pgp->rspPutInx);
3334 if (unlikely(portRspPut >= portRspMax)) {
3335 lpfc_sli_rsp_pointers_error(phba, pring);
3336 spin_unlock_irqrestore(&phba->hbalock, iflag);
3339 if (phba->fcp_ring_in_use) {
3340 spin_unlock_irqrestore(&phba->hbalock, iflag);
3343 phba->fcp_ring_in_use = 1;
3346 while (pring->sli.sli3.rspidx != portRspPut) {
3348 * Fetch an entry off the ring and copy it into a local data
3349 * structure. The copy involves a byte-swap since the
3350 * network byte order and pci byte orders are different.
3352 entry = lpfc_resp_iocb(phba, pring);
3353 phba->last_completion_time = jiffies;
3355 if (++pring->sli.sli3.rspidx >= portRspMax)
3356 pring->sli.sli3.rspidx = 0;
3358 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3359 (uint32_t *) &rspiocbq.iocb,
3360 phba->iocb_rsp_size);
3361 INIT_LIST_HEAD(&(rspiocbq.list));
3362 irsp = &rspiocbq.iocb;
3364 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3365 pring->stats.iocb_rsp++;
3368 if (unlikely(irsp->ulpStatus)) {
3370 * If resource errors reported from HBA, reduce
3371 * queuedepths of the SCSI device.
3373 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3374 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3375 IOERR_NO_RESOURCES)) {
3376 spin_unlock_irqrestore(&phba->hbalock, iflag);
3377 phba->lpfc_rampdown_queue_depth(phba);
3378 spin_lock_irqsave(&phba->hbalock, iflag);
3381 /* Rsp ring <ringno> error: IOCB */
3382 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3383 "0336 Rsp Ring %d error: IOCB Data: "
3384 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3386 irsp->un.ulpWord[0],
3387 irsp->un.ulpWord[1],
3388 irsp->un.ulpWord[2],
3389 irsp->un.ulpWord[3],
3390 irsp->un.ulpWord[4],
3391 irsp->un.ulpWord[5],
3392 *(uint32_t *)&irsp->un1,
3393 *((uint32_t *)&irsp->un1 + 1));
3397 case LPFC_ABORT_IOCB:
3400 * Idle exchange closed via ABTS from port. No iocb
3401 * resources need to be recovered.
3403 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3404 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3405 "0333 IOCB cmd 0x%x"
3406 " processed. Skipping"
3412 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3414 if (unlikely(!cmdiocbq))
3416 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3417 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3418 if (cmdiocbq->iocb_cmpl) {
3419 spin_unlock_irqrestore(&phba->hbalock, iflag);
3420 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3422 spin_lock_irqsave(&phba->hbalock, iflag);
3425 case LPFC_UNSOL_IOCB:
3426 spin_unlock_irqrestore(&phba->hbalock, iflag);
3427 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3428 spin_lock_irqsave(&phba->hbalock, iflag);
3431 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3432 char adaptermsg[LPFC_MAX_ADPTMSG];
3433 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3434 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3436 dev_warn(&((phba->pcidev)->dev),
3438 phba->brd_no, adaptermsg);
3440 /* Unknown IOCB command */
3441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3442 "0334 Unknown IOCB command "
3443 "Data: x%x, x%x x%x x%x x%x\n",
3444 type, irsp->ulpCommand,
3453 * The response IOCB has been processed. Update the ring
3454 * pointer in SLIM. If the port response put pointer has not
3455 * been updated, sync the pgp->rspPutInx and fetch the new port
3456 * response put pointer.
3458 writel(pring->sli.sli3.rspidx,
3459 &phba->host_gp[pring->ringno].rspGetInx);
3461 if (pring->sli.sli3.rspidx == portRspPut)
3462 portRspPut = le32_to_cpu(pgp->rspPutInx);
3465 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3466 pring->stats.iocb_rsp_full++;
3467 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3468 writel(status, phba->CAregaddr);
3469 readl(phba->CAregaddr);
3471 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3472 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3473 pring->stats.iocb_cmd_empty++;
3475 /* Force update of the local copy of cmdGetInx */
3476 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3477 lpfc_sli_resume_iocb(phba, pring);
3479 if ((pring->lpfc_sli_cmd_available))
3480 (pring->lpfc_sli_cmd_available) (phba, pring);
3484 phba->fcp_ring_in_use = 0;
3485 spin_unlock_irqrestore(&phba->hbalock, iflag);
3490 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3491 * @phba: Pointer to HBA context object.
3492 * @pring: Pointer to driver SLI ring object.
3493 * @rspiocbp: Pointer to driver response IOCB object.
3495 * This function is called from the worker thread when there is a slow-path
3496 * response IOCB to process. This function chains all the response iocbs until
3497 * seeing the iocb with the LE bit set. The function will call
3498 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3499 * completion of a command iocb. The function will call the
3500 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3501 * The function frees the resources or calls the completion handler if this
3502 * iocb is an abort completion. The function returns NULL when the response
3503 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3504 * this function shall chain the iocb on to the iocb_continueq and return the
3505 * response iocb passed in.
3507 static struct lpfc_iocbq *
3508 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3509 struct lpfc_iocbq *rspiocbp)
3511 struct lpfc_iocbq *saveq;
3512 struct lpfc_iocbq *cmdiocbp;
3513 struct lpfc_iocbq *next_iocb;
3514 IOCB_t *irsp = NULL;
3515 uint32_t free_saveq;
3516 uint8_t iocb_cmd_type;
3517 lpfc_iocb_type type;
3518 unsigned long iflag;
3521 spin_lock_irqsave(&phba->hbalock, iflag);
3522 /* First add the response iocb to the countinueq list */
3523 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3524 pring->iocb_continueq_cnt++;
3526 /* Now, determine whether the list is completed for processing */
3527 irsp = &rspiocbp->iocb;
3530 * By default, the driver expects to free all resources
3531 * associated with this iocb completion.
3534 saveq = list_get_first(&pring->iocb_continueq,
3535 struct lpfc_iocbq, list);
3536 irsp = &(saveq->iocb);
3537 list_del_init(&pring->iocb_continueq);
3538 pring->iocb_continueq_cnt = 0;
3540 pring->stats.iocb_rsp++;
3543 * If resource errors reported from HBA, reduce
3544 * queuedepths of the SCSI device.
3546 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3547 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3548 IOERR_NO_RESOURCES)) {
3549 spin_unlock_irqrestore(&phba->hbalock, iflag);
3550 phba->lpfc_rampdown_queue_depth(phba);
3551 spin_lock_irqsave(&phba->hbalock, iflag);
3554 if (irsp->ulpStatus) {
3555 /* Rsp ring <ringno> error: IOCB */
3556 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3557 "0328 Rsp Ring %d error: "
3562 "x%x x%x x%x x%x\n",
3564 irsp->un.ulpWord[0],
3565 irsp->un.ulpWord[1],
3566 irsp->un.ulpWord[2],
3567 irsp->un.ulpWord[3],
3568 irsp->un.ulpWord[4],
3569 irsp->un.ulpWord[5],
3570 *(((uint32_t *) irsp) + 6),
3571 *(((uint32_t *) irsp) + 7),
3572 *(((uint32_t *) irsp) + 8),
3573 *(((uint32_t *) irsp) + 9),
3574 *(((uint32_t *) irsp) + 10),
3575 *(((uint32_t *) irsp) + 11),
3576 *(((uint32_t *) irsp) + 12),
3577 *(((uint32_t *) irsp) + 13),
3578 *(((uint32_t *) irsp) + 14),
3579 *(((uint32_t *) irsp) + 15));
3583 * Fetch the IOCB command type and call the correct completion
3584 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3585 * get freed back to the lpfc_iocb_list by the discovery
3588 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3589 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3592 spin_unlock_irqrestore(&phba->hbalock, iflag);
3593 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3594 spin_lock_irqsave(&phba->hbalock, iflag);
3597 case LPFC_UNSOL_IOCB:
3598 spin_unlock_irqrestore(&phba->hbalock, iflag);
3599 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3600 spin_lock_irqsave(&phba->hbalock, iflag);
3605 case LPFC_ABORT_IOCB:
3607 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3608 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3611 /* Call the specified completion routine */
3612 if (cmdiocbp->iocb_cmpl) {
3613 spin_unlock_irqrestore(&phba->hbalock,
3615 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3617 spin_lock_irqsave(&phba->hbalock,
3620 __lpfc_sli_release_iocbq(phba,
3625 case LPFC_UNKNOWN_IOCB:
3626 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3627 char adaptermsg[LPFC_MAX_ADPTMSG];
3628 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3629 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3631 dev_warn(&((phba->pcidev)->dev),
3633 phba->brd_no, adaptermsg);
3635 /* Unknown IOCB command */
3636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3637 "0335 Unknown IOCB "
3638 "command Data: x%x "
3649 list_for_each_entry_safe(rspiocbp, next_iocb,
3650 &saveq->list, list) {
3651 list_del_init(&rspiocbp->list);
3652 __lpfc_sli_release_iocbq(phba, rspiocbp);
3654 __lpfc_sli_release_iocbq(phba, saveq);
3658 spin_unlock_irqrestore(&phba->hbalock, iflag);
3663 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3664 * @phba: Pointer to HBA context object.
3665 * @pring: Pointer to driver SLI ring object.
3666 * @mask: Host attention register mask for this ring.
3668 * This routine wraps the actual slow_ring event process routine from the
3669 * API jump table function pointer from the lpfc_hba struct.
3672 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3673 struct lpfc_sli_ring *pring, uint32_t mask)
3675 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3679 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3680 * @phba: Pointer to HBA context object.
3681 * @pring: Pointer to driver SLI ring object.
3682 * @mask: Host attention register mask for this ring.
3684 * This function is called from the worker thread when there is a ring event
3685 * for non-fcp rings. The caller does not hold any lock. The function will
3686 * remove each response iocb in the response ring and calls the handle
3687 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3690 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3691 struct lpfc_sli_ring *pring, uint32_t mask)
3693 struct lpfc_pgp *pgp;
3695 IOCB_t *irsp = NULL;
3696 struct lpfc_iocbq *rspiocbp = NULL;
3697 uint32_t portRspPut, portRspMax;
3698 unsigned long iflag;
3701 pgp = &phba->port_gp[pring->ringno];
3702 spin_lock_irqsave(&phba->hbalock, iflag);
3703 pring->stats.iocb_event++;
3706 * The next available response entry should never exceed the maximum
3707 * entries. If it does, treat it as an adapter hardware error.
3709 portRspMax = pring->sli.sli3.numRiocb;
3710 portRspPut = le32_to_cpu(pgp->rspPutInx);
3711 if (portRspPut >= portRspMax) {
3713 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3714 * rsp ring <portRspMax>
3716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3717 "0303 Ring %d handler: portRspPut %d "
3718 "is bigger than rsp ring %d\n",
3719 pring->ringno, portRspPut, portRspMax);
3721 phba->link_state = LPFC_HBA_ERROR;
3722 spin_unlock_irqrestore(&phba->hbalock, iflag);
3724 phba->work_hs = HS_FFER3;
3725 lpfc_handle_eratt(phba);
3731 while (pring->sli.sli3.rspidx != portRspPut) {
3733 * Build a completion list and call the appropriate handler.
3734 * The process is to get the next available response iocb, get
3735 * a free iocb from the list, copy the response data into the
3736 * free iocb, insert to the continuation list, and update the
3737 * next response index to slim. This process makes response
3738 * iocb's in the ring available to DMA as fast as possible but
3739 * pays a penalty for a copy operation. Since the iocb is
3740 * only 32 bytes, this penalty is considered small relative to
3741 * the PCI reads for register values and a slim write. When
3742 * the ulpLe field is set, the entire Command has been
3745 entry = lpfc_resp_iocb(phba, pring);
3747 phba->last_completion_time = jiffies;
3748 rspiocbp = __lpfc_sli_get_iocbq(phba);
3749 if (rspiocbp == NULL) {
3750 printk(KERN_ERR "%s: out of buffers! Failing "
3751 "completion.\n", __func__);
3755 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3756 phba->iocb_rsp_size);
3757 irsp = &rspiocbp->iocb;
3759 if (++pring->sli.sli3.rspidx >= portRspMax)
3760 pring->sli.sli3.rspidx = 0;
3762 if (pring->ringno == LPFC_ELS_RING) {
3763 lpfc_debugfs_slow_ring_trc(phba,
3764 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3765 *(((uint32_t *) irsp) + 4),
3766 *(((uint32_t *) irsp) + 6),
3767 *(((uint32_t *) irsp) + 7));
3770 writel(pring->sli.sli3.rspidx,
3771 &phba->host_gp[pring->ringno].rspGetInx);
3773 spin_unlock_irqrestore(&phba->hbalock, iflag);
3774 /* Handle the response IOCB */
3775 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3776 spin_lock_irqsave(&phba->hbalock, iflag);
3779 * If the port response put pointer has not been updated, sync
3780 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3781 * response put pointer.
3783 if (pring->sli.sli3.rspidx == portRspPut) {
3784 portRspPut = le32_to_cpu(pgp->rspPutInx);
3786 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3788 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3789 /* At least one response entry has been freed */
3790 pring->stats.iocb_rsp_full++;
3791 /* SET RxRE_RSP in Chip Att register */
3792 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3793 writel(status, phba->CAregaddr);
3794 readl(phba->CAregaddr); /* flush */
3796 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3797 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3798 pring->stats.iocb_cmd_empty++;
3800 /* Force update of the local copy of cmdGetInx */
3801 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3802 lpfc_sli_resume_iocb(phba, pring);
3804 if ((pring->lpfc_sli_cmd_available))
3805 (pring->lpfc_sli_cmd_available) (phba, pring);
3809 spin_unlock_irqrestore(&phba->hbalock, iflag);
3814 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3815 * @phba: Pointer to HBA context object.
3816 * @pring: Pointer to driver SLI ring object.
3817 * @mask: Host attention register mask for this ring.
3819 * This function is called from the worker thread when there is a pending
3820 * ELS response iocb on the driver internal slow-path response iocb worker
3821 * queue. The caller does not hold any lock. The function will remove each
3822 * response iocb from the response worker queue and calls the handle
3823 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3826 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3827 struct lpfc_sli_ring *pring, uint32_t mask)
3829 struct lpfc_iocbq *irspiocbq;
3830 struct hbq_dmabuf *dmabuf;
3831 struct lpfc_cq_event *cq_event;
3832 unsigned long iflag;
3835 spin_lock_irqsave(&phba->hbalock, iflag);
3836 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3837 spin_unlock_irqrestore(&phba->hbalock, iflag);
3838 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3839 /* Get the response iocb from the head of work queue */
3840 spin_lock_irqsave(&phba->hbalock, iflag);
3841 list_remove_head(&phba->sli4_hba.sp_queue_event,
3842 cq_event, struct lpfc_cq_event, list);
3843 spin_unlock_irqrestore(&phba->hbalock, iflag);
3845 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3846 case CQE_CODE_COMPL_WQE:
3847 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3849 /* Translate ELS WCQE to response IOCBQ */
3850 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3853 lpfc_sli_sp_handle_rspiocb(phba, pring,
3857 case CQE_CODE_RECEIVE:
3858 case CQE_CODE_RECEIVE_V1:
3859 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3861 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3868 /* Limit the number of events to 64 to avoid soft lockups */
3875 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3876 * @phba: Pointer to HBA context object.
3877 * @pring: Pointer to driver SLI ring object.
3879 * This function aborts all iocbs in the given ring and frees all the iocb
3880 * objects in txq. This function issues an abort iocb for all the iocb commands
3881 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3882 * the return of this function. The caller is not required to hold any locks.
3885 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3887 LIST_HEAD(completions);
3888 struct lpfc_iocbq *iocb, *next_iocb;
3890 if (pring->ringno == LPFC_ELS_RING) {
3891 lpfc_fabric_abort_hba(phba);
3894 /* Error everything on txq and txcmplq
3897 if (phba->sli_rev >= LPFC_SLI_REV4) {
3898 spin_lock_irq(&pring->ring_lock);
3899 list_splice_init(&pring->txq, &completions);
3901 spin_unlock_irq(&pring->ring_lock);
3903 spin_lock_irq(&phba->hbalock);
3904 /* Next issue ABTS for everything on the txcmplq */
3905 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3906 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3907 spin_unlock_irq(&phba->hbalock);
3909 spin_lock_irq(&phba->hbalock);
3910 list_splice_init(&pring->txq, &completions);
3913 /* Next issue ABTS for everything on the txcmplq */
3914 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3915 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3916 spin_unlock_irq(&phba->hbalock);
3919 /* Cancel all the IOCBs from the completions list */
3920 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3925 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3926 * @phba: Pointer to HBA context object.
3927 * @pring: Pointer to driver SLI ring object.
3929 * This function aborts all iocbs in the given ring and frees all the iocb
3930 * objects in txq. This function issues an abort iocb for all the iocb commands
3931 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3932 * the return of this function. The caller is not required to hold any locks.
3935 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3937 LIST_HEAD(completions);
3938 struct lpfc_iocbq *iocb, *next_iocb;
3940 if (pring->ringno == LPFC_ELS_RING)
3941 lpfc_fabric_abort_hba(phba);
3943 spin_lock_irq(&phba->hbalock);
3944 /* Next issue ABTS for everything on the txcmplq */
3945 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3946 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3947 spin_unlock_irq(&phba->hbalock);
3952 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3953 * @phba: Pointer to HBA context object.
3954 * @pring: Pointer to driver SLI ring object.
3956 * This function aborts all iocbs in FCP rings and frees all the iocb
3957 * objects in txq. This function issues an abort iocb for all the iocb commands
3958 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3959 * the return of this function. The caller is not required to hold any locks.
3962 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3964 struct lpfc_sli *psli = &phba->sli;
3965 struct lpfc_sli_ring *pring;
3968 /* Look on all the FCP Rings for the iotag */
3969 if (phba->sli_rev >= LPFC_SLI_REV4) {
3970 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3971 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
3972 lpfc_sli_abort_iocb_ring(phba, pring);
3975 pring = &psli->sli3_ring[LPFC_FCP_RING];
3976 lpfc_sli_abort_iocb_ring(phba, pring);
3981 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3982 * @phba: Pointer to HBA context object.
3984 * This function aborts all wqes in NVME rings. This function issues an
3985 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3986 * the txcmplq is not guaranteed to complete before the return of this
3987 * function. The caller is not required to hold any locks.
3990 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3992 struct lpfc_sli_ring *pring;
3995 if ((phba->sli_rev < LPFC_SLI_REV4) ||
3996 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3999 /* Abort all IO on each NVME ring. */
4000 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4002 lpfc_sli_abort_wqe_ring(phba, pring);
4008 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
4009 * @phba: Pointer to HBA context object.
4011 * This function flushes all iocbs in the fcp ring and frees all the iocb
4012 * objects in txq and txcmplq. This function will not issue abort iocbs
4013 * for all the iocb commands in txcmplq, they will just be returned with
4014 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4015 * slot has been permanently disabled.
4018 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4022 struct lpfc_sli *psli = &phba->sli;
4023 struct lpfc_sli_ring *pring;
4025 struct lpfc_iocbq *piocb, *next_iocb;
4027 spin_lock_irq(&phba->hbalock);
4028 /* Indicate the I/O queues are flushed */
4029 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
4030 spin_unlock_irq(&phba->hbalock);
4032 /* Look on all the FCP Rings for the iotag */
4033 if (phba->sli_rev >= LPFC_SLI_REV4) {
4034 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4035 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
4037 spin_lock_irq(&pring->ring_lock);
4038 /* Retrieve everything on txq */
4039 list_splice_init(&pring->txq, &txq);
4040 list_for_each_entry_safe(piocb, next_iocb,
4041 &pring->txcmplq, list)
4042 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4043 /* Retrieve everything on the txcmplq */
4044 list_splice_init(&pring->txcmplq, &txcmplq);
4046 pring->txcmplq_cnt = 0;
4047 spin_unlock_irq(&pring->ring_lock);
4050 lpfc_sli_cancel_iocbs(phba, &txq,
4051 IOSTAT_LOCAL_REJECT,
4053 /* Flush the txcmpq */
4054 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4055 IOSTAT_LOCAL_REJECT,
4059 pring = &psli->sli3_ring[LPFC_FCP_RING];
4061 spin_lock_irq(&phba->hbalock);
4062 /* Retrieve everything on txq */
4063 list_splice_init(&pring->txq, &txq);
4064 list_for_each_entry_safe(piocb, next_iocb,
4065 &pring->txcmplq, list)
4066 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4067 /* Retrieve everything on the txcmplq */
4068 list_splice_init(&pring->txcmplq, &txcmplq);
4070 pring->txcmplq_cnt = 0;
4071 spin_unlock_irq(&phba->hbalock);
4074 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4076 /* Flush the txcmpq */
4077 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4083 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4084 * @phba: Pointer to HBA context object.
4086 * This function flushes all wqes in the nvme rings and frees all resources
4087 * in the txcmplq. This function does not issue abort wqes for the IO
4088 * commands in txcmplq, they will just be returned with
4089 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4090 * slot has been permanently disabled.
4093 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4096 struct lpfc_sli_ring *pring;
4098 struct lpfc_iocbq *piocb, *next_iocb;
4100 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4101 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4104 /* Hint to other driver operations that a flush is in progress. */
4105 spin_lock_irq(&phba->hbalock);
4106 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4107 spin_unlock_irq(&phba->hbalock);
4109 /* Cycle through all NVME rings and complete each IO with
4110 * a local driver reason code. This is a flush so no
4111 * abort exchange to FW.
4113 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4114 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4116 spin_lock_irq(&pring->ring_lock);
4117 list_for_each_entry_safe(piocb, next_iocb,
4118 &pring->txcmplq, list)
4119 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4120 /* Retrieve everything on the txcmplq */
4121 list_splice_init(&pring->txcmplq, &txcmplq);
4122 pring->txcmplq_cnt = 0;
4123 spin_unlock_irq(&pring->ring_lock);
4125 /* Flush the txcmpq &&&PAE */
4126 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4127 IOSTAT_LOCAL_REJECT,
4133 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4134 * @phba: Pointer to HBA context object.
4135 * @mask: Bit mask to be checked.
4137 * This function reads the host status register and compares
4138 * with the provided bit mask to check if HBA completed
4139 * the restart. This function will wait in a loop for the
4140 * HBA to complete restart. If the HBA does not restart within
4141 * 15 iterations, the function will reset the HBA again. The
4142 * function returns 1 when HBA fail to restart otherwise returns
4146 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4152 /* Read the HBA Host Status Register */
4153 if (lpfc_readl(phba->HSregaddr, &status))
4157 * Check status register every 100ms for 5 retries, then every
4158 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4159 * every 2.5 sec for 4.
4160 * Break our of the loop if errors occurred during init.
4162 while (((status & mask) != mask) &&
4163 !(status & HS_FFERM) &&
4175 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4176 lpfc_sli_brdrestart(phba);
4178 /* Read the HBA Host Status Register */
4179 if (lpfc_readl(phba->HSregaddr, &status)) {
4185 /* Check to see if any errors occurred during init */
4186 if ((status & HS_FFERM) || (i >= 20)) {
4187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4188 "2751 Adapter failed to restart, "
4189 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4191 readl(phba->MBslimaddr + 0xa8),
4192 readl(phba->MBslimaddr + 0xac));
4193 phba->link_state = LPFC_HBA_ERROR;
4201 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4202 * @phba: Pointer to HBA context object.
4203 * @mask: Bit mask to be checked.
4205 * This function checks the host status register to check if HBA is
4206 * ready. This function will wait in a loop for the HBA to be ready
4207 * If the HBA is not ready , the function will will reset the HBA PCI
4208 * function again. The function returns 1 when HBA fail to be ready
4209 * otherwise returns zero.
4212 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4217 /* Read the HBA Host Status Register */
4218 status = lpfc_sli4_post_status_check(phba);
4221 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4222 lpfc_sli_brdrestart(phba);
4223 status = lpfc_sli4_post_status_check(phba);
4226 /* Check to see if any errors occurred during init */
4228 phba->link_state = LPFC_HBA_ERROR;
4231 phba->sli4_hba.intr_enable = 0;
4237 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4238 * @phba: Pointer to HBA context object.
4239 * @mask: Bit mask to be checked.
4241 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4242 * from the API jump table function pointer from the lpfc_hba struct.
4245 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4247 return phba->lpfc_sli_brdready(phba, mask);
4250 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4253 * lpfc_reset_barrier - Make HBA ready for HBA reset
4254 * @phba: Pointer to HBA context object.
4256 * This function is called before resetting an HBA. This function is called
4257 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4259 void lpfc_reset_barrier(struct lpfc_hba *phba)
4261 uint32_t __iomem *resp_buf;
4262 uint32_t __iomem *mbox_buf;
4263 volatile uint32_t mbox;
4264 uint32_t hc_copy, ha_copy, resp_data;
4268 lockdep_assert_held(&phba->hbalock);
4270 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4271 if (hdrtype != 0x80 ||
4272 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4273 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4277 * Tell the other part of the chip to suspend temporarily all
4280 resp_buf = phba->MBslimaddr;
4282 /* Disable the error attention */
4283 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4285 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4286 readl(phba->HCregaddr); /* flush */
4287 phba->link_flag |= LS_IGNORE_ERATT;
4289 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4291 if (ha_copy & HA_ERATT) {
4292 /* Clear Chip error bit */
4293 writel(HA_ERATT, phba->HAregaddr);
4294 phba->pport->stopped = 1;
4298 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4299 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4301 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4302 mbox_buf = phba->MBslimaddr;
4303 writel(mbox, mbox_buf);
4305 for (i = 0; i < 50; i++) {
4306 if (lpfc_readl((resp_buf + 1), &resp_data))
4308 if (resp_data != ~(BARRIER_TEST_PATTERN))
4314 if (lpfc_readl((resp_buf + 1), &resp_data))
4316 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4317 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4318 phba->pport->stopped)
4324 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4326 for (i = 0; i < 500; i++) {
4327 if (lpfc_readl(resp_buf, &resp_data))
4329 if (resp_data != mbox)
4338 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4340 if (!(ha_copy & HA_ERATT))
4346 if (readl(phba->HAregaddr) & HA_ERATT) {
4347 writel(HA_ERATT, phba->HAregaddr);
4348 phba->pport->stopped = 1;
4352 phba->link_flag &= ~LS_IGNORE_ERATT;
4353 writel(hc_copy, phba->HCregaddr);
4354 readl(phba->HCregaddr); /* flush */
4358 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4359 * @phba: Pointer to HBA context object.
4361 * This function issues a kill_board mailbox command and waits for
4362 * the error attention interrupt. This function is called for stopping
4363 * the firmware processing. The caller is not required to hold any
4364 * locks. This function calls lpfc_hba_down_post function to free
4365 * any pending commands after the kill. The function will return 1 when it
4366 * fails to kill the board else will return 0.
4369 lpfc_sli_brdkill(struct lpfc_hba *phba)
4371 struct lpfc_sli *psli;
4381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4382 "0329 Kill HBA Data: x%x x%x\n",
4383 phba->pport->port_state, psli->sli_flag);
4385 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4389 /* Disable the error attention */
4390 spin_lock_irq(&phba->hbalock);
4391 if (lpfc_readl(phba->HCregaddr, &status)) {
4392 spin_unlock_irq(&phba->hbalock);
4393 mempool_free(pmb, phba->mbox_mem_pool);
4396 status &= ~HC_ERINT_ENA;
4397 writel(status, phba->HCregaddr);
4398 readl(phba->HCregaddr); /* flush */
4399 phba->link_flag |= LS_IGNORE_ERATT;
4400 spin_unlock_irq(&phba->hbalock);
4402 lpfc_kill_board(phba, pmb);
4403 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4404 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4406 if (retval != MBX_SUCCESS) {
4407 if (retval != MBX_BUSY)
4408 mempool_free(pmb, phba->mbox_mem_pool);
4409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4410 "2752 KILL_BOARD command failed retval %d\n",
4412 spin_lock_irq(&phba->hbalock);
4413 phba->link_flag &= ~LS_IGNORE_ERATT;
4414 spin_unlock_irq(&phba->hbalock);
4418 spin_lock_irq(&phba->hbalock);
4419 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4420 spin_unlock_irq(&phba->hbalock);
4422 mempool_free(pmb, phba->mbox_mem_pool);
4424 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4425 * attention every 100ms for 3 seconds. If we don't get ERATT after
4426 * 3 seconds we still set HBA_ERROR state because the status of the
4427 * board is now undefined.
4429 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4431 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4433 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4437 del_timer_sync(&psli->mbox_tmo);
4438 if (ha_copy & HA_ERATT) {
4439 writel(HA_ERATT, phba->HAregaddr);
4440 phba->pport->stopped = 1;
4442 spin_lock_irq(&phba->hbalock);
4443 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4444 psli->mbox_active = NULL;
4445 phba->link_flag &= ~LS_IGNORE_ERATT;
4446 spin_unlock_irq(&phba->hbalock);
4448 lpfc_hba_down_post(phba);
4449 phba->link_state = LPFC_HBA_ERROR;
4451 return ha_copy & HA_ERATT ? 0 : 1;
4455 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4456 * @phba: Pointer to HBA context object.
4458 * This function resets the HBA by writing HC_INITFF to the control
4459 * register. After the HBA resets, this function resets all the iocb ring
4460 * indices. This function disables PCI layer parity checking during
4462 * This function returns 0 always.
4463 * The caller is not required to hold any locks.
4466 lpfc_sli_brdreset(struct lpfc_hba *phba)
4468 struct lpfc_sli *psli;
4469 struct lpfc_sli_ring *pring;
4476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4477 "0325 Reset HBA Data: x%x x%x\n",
4478 (phba->pport) ? phba->pport->port_state : 0,
4481 /* perform board reset */
4482 phba->fc_eventTag = 0;
4483 phba->link_events = 0;
4485 phba->pport->fc_myDID = 0;
4486 phba->pport->fc_prevDID = 0;
4489 /* Turn off parity checking and serr during the physical reset */
4490 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4491 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4493 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4495 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4497 /* Now toggle INITFF bit in the Host Control Register */
4498 writel(HC_INITFF, phba->HCregaddr);
4500 readl(phba->HCregaddr); /* flush */
4501 writel(0, phba->HCregaddr);
4502 readl(phba->HCregaddr); /* flush */
4504 /* Restore PCI cmd register */
4505 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4507 /* Initialize relevant SLI info */
4508 for (i = 0; i < psli->num_rings; i++) {
4509 pring = &psli->sli3_ring[i];
4511 pring->sli.sli3.rspidx = 0;
4512 pring->sli.sli3.next_cmdidx = 0;
4513 pring->sli.sli3.local_getidx = 0;
4514 pring->sli.sli3.cmdidx = 0;
4515 pring->missbufcnt = 0;
4518 phba->link_state = LPFC_WARM_START;
4523 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4524 * @phba: Pointer to HBA context object.
4526 * This function resets a SLI4 HBA. This function disables PCI layer parity
4527 * checking during resets the device. The caller is not required to hold
4530 * This function returns 0 always.
4533 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4535 struct lpfc_sli *psli = &phba->sli;
4540 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4541 "0295 Reset HBA Data: x%x x%x x%x\n",
4542 phba->pport->port_state, psli->sli_flag,
4545 /* perform board reset */
4546 phba->fc_eventTag = 0;
4547 phba->link_events = 0;
4548 phba->pport->fc_myDID = 0;
4549 phba->pport->fc_prevDID = 0;
4551 spin_lock_irq(&phba->hbalock);
4552 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4553 phba->fcf.fcf_flag = 0;
4554 spin_unlock_irq(&phba->hbalock);
4556 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4557 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4558 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4562 /* Now physically reset the device */
4563 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4564 "0389 Performing PCI function reset!\n");
4566 /* Turn off parity checking and serr during the physical reset */
4567 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4568 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4571 /* Perform FCoE PCI function reset before freeing queue memory */
4572 rc = lpfc_pci_function_reset(phba);
4574 /* Restore PCI cmd register */
4575 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4581 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4582 * @phba: Pointer to HBA context object.
4584 * This function is called in the SLI initialization code path to
4585 * restart the HBA. The caller is not required to hold any lock.
4586 * This function writes MBX_RESTART mailbox command to the SLIM and
4587 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4588 * function to free any pending commands. The function enables
4589 * POST only during the first initialization. The function returns zero.
4590 * The function does not guarantee completion of MBX_RESTART mailbox
4591 * command before the return of this function.
4594 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4597 struct lpfc_sli *psli;
4598 volatile uint32_t word0;
4599 void __iomem *to_slim;
4600 uint32_t hba_aer_enabled;
4602 spin_lock_irq(&phba->hbalock);
4604 /* Take PCIe device Advanced Error Reporting (AER) state */
4605 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4610 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4611 "0337 Restart HBA Data: x%x x%x\n",
4612 (phba->pport) ? phba->pport->port_state : 0,
4616 mb = (MAILBOX_t *) &word0;
4617 mb->mbxCommand = MBX_RESTART;
4620 lpfc_reset_barrier(phba);
4622 to_slim = phba->MBslimaddr;
4623 writel(*(uint32_t *) mb, to_slim);
4624 readl(to_slim); /* flush */
4626 /* Only skip post after fc_ffinit is completed */
4627 if (phba->pport && phba->pport->port_state)
4628 word0 = 1; /* This is really setting up word1 */
4630 word0 = 0; /* This is really setting up word1 */
4631 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4632 writel(*(uint32_t *) mb, to_slim);
4633 readl(to_slim); /* flush */
4635 lpfc_sli_brdreset(phba);
4637 phba->pport->stopped = 0;
4638 phba->link_state = LPFC_INIT_START;
4640 spin_unlock_irq(&phba->hbalock);
4642 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4643 psli->stats_start = ktime_get_seconds();
4645 /* Give the INITFF and Post time to settle. */
4648 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4649 if (hba_aer_enabled)
4650 pci_disable_pcie_error_reporting(phba->pcidev);
4652 lpfc_hba_down_post(phba);
4658 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4659 * @phba: Pointer to HBA context object.
4661 * This function is called in the SLI initialization code path to restart
4662 * a SLI4 HBA. The caller is not required to hold any lock.
4663 * At the end of the function, it calls lpfc_hba_down_post function to
4664 * free any pending commands.
4667 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4669 struct lpfc_sli *psli = &phba->sli;
4670 uint32_t hba_aer_enabled;
4674 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4675 "0296 Restart HBA Data: x%x x%x\n",
4676 phba->pport->port_state, psli->sli_flag);
4678 /* Take PCIe device Advanced Error Reporting (AER) state */
4679 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4681 rc = lpfc_sli4_brdreset(phba);
4685 spin_lock_irq(&phba->hbalock);
4686 phba->pport->stopped = 0;
4687 phba->link_state = LPFC_INIT_START;
4689 spin_unlock_irq(&phba->hbalock);
4691 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4692 psli->stats_start = ktime_get_seconds();
4694 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4695 if (hba_aer_enabled)
4696 pci_disable_pcie_error_reporting(phba->pcidev);
4698 lpfc_hba_down_post(phba);
4699 lpfc_sli4_queue_destroy(phba);
4705 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4706 * @phba: Pointer to HBA context object.
4708 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4709 * API jump table function pointer from the lpfc_hba struct.
4712 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4714 return phba->lpfc_sli_brdrestart(phba);
4718 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4719 * @phba: Pointer to HBA context object.
4721 * This function is called after a HBA restart to wait for successful
4722 * restart of the HBA. Successful restart of the HBA is indicated by
4723 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4724 * iteration, the function will restart the HBA again. The function returns
4725 * zero if HBA successfully restarted else returns negative error code.
4728 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4730 uint32_t status, i = 0;
4732 /* Read the HBA Host Status Register */
4733 if (lpfc_readl(phba->HSregaddr, &status))
4736 /* Check status register to see what current state is */
4738 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4740 /* Check every 10ms for 10 retries, then every 100ms for 90
4741 * retries, then every 1 sec for 50 retires for a total of
4742 * ~60 seconds before reset the board again and check every
4743 * 1 sec for 50 retries. The up to 60 seconds before the
4744 * board ready is required by the Falcon FIPS zeroization
4745 * complete, and any reset the board in between shall cause
4746 * restart of zeroization, further delay the board ready.
4749 /* Adapter failed to init, timeout, status reg
4751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4752 "0436 Adapter failed to init, "
4753 "timeout, status reg x%x, "
4754 "FW Data: A8 x%x AC x%x\n", status,
4755 readl(phba->MBslimaddr + 0xa8),
4756 readl(phba->MBslimaddr + 0xac));
4757 phba->link_state = LPFC_HBA_ERROR;
4761 /* Check to see if any errors occurred during init */
4762 if (status & HS_FFERM) {
4763 /* ERROR: During chipset initialization */
4764 /* Adapter failed to init, chipset, status reg
4766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4767 "0437 Adapter failed to init, "
4768 "chipset, status reg x%x, "
4769 "FW Data: A8 x%x AC x%x\n", status,
4770 readl(phba->MBslimaddr + 0xa8),
4771 readl(phba->MBslimaddr + 0xac));
4772 phba->link_state = LPFC_HBA_ERROR;
4785 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4786 lpfc_sli_brdrestart(phba);
4788 /* Read the HBA Host Status Register */
4789 if (lpfc_readl(phba->HSregaddr, &status))
4793 /* Check to see if any errors occurred during init */
4794 if (status & HS_FFERM) {
4795 /* ERROR: During chipset initialization */
4796 /* Adapter failed to init, chipset, status reg <status> */
4797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4798 "0438 Adapter failed to init, chipset, "
4800 "FW Data: A8 x%x AC x%x\n", status,
4801 readl(phba->MBslimaddr + 0xa8),
4802 readl(phba->MBslimaddr + 0xac));
4803 phba->link_state = LPFC_HBA_ERROR;
4807 /* Clear all interrupt enable conditions */
4808 writel(0, phba->HCregaddr);
4809 readl(phba->HCregaddr); /* flush */
4811 /* setup host attn register */
4812 writel(0xffffffff, phba->HAregaddr);
4813 readl(phba->HAregaddr); /* flush */
4818 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4820 * This function calculates and returns the number of HBQs required to be
4824 lpfc_sli_hbq_count(void)
4826 return ARRAY_SIZE(lpfc_hbq_defs);
4830 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4832 * This function adds the number of hbq entries in every HBQ to get
4833 * the total number of hbq entries required for the HBA and returns
4837 lpfc_sli_hbq_entry_count(void)
4839 int hbq_count = lpfc_sli_hbq_count();
4843 for (i = 0; i < hbq_count; ++i)
4844 count += lpfc_hbq_defs[i]->entry_count;
4849 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4851 * This function calculates amount of memory required for all hbq entries
4852 * to be configured and returns the total memory required.
4855 lpfc_sli_hbq_size(void)
4857 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4861 * lpfc_sli_hbq_setup - configure and initialize HBQs
4862 * @phba: Pointer to HBA context object.
4864 * This function is called during the SLI initialization to configure
4865 * all the HBQs and post buffers to the HBQ. The caller is not
4866 * required to hold any locks. This function will return zero if successful
4867 * else it will return negative error code.
4870 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4872 int hbq_count = lpfc_sli_hbq_count();
4876 uint32_t hbq_entry_index;
4878 /* Get a Mailbox buffer to setup mailbox
4879 * commands for HBA initialization
4881 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4888 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4889 phba->link_state = LPFC_INIT_MBX_CMDS;
4890 phba->hbq_in_use = 1;
4892 hbq_entry_index = 0;
4893 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4894 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4895 phba->hbqs[hbqno].hbqPutIdx = 0;
4896 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4897 phba->hbqs[hbqno].entry_count =
4898 lpfc_hbq_defs[hbqno]->entry_count;
4899 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4900 hbq_entry_index, pmb);
4901 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4903 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4904 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4905 mbxStatus <status>, ring <num> */
4907 lpfc_printf_log(phba, KERN_ERR,
4908 LOG_SLI | LOG_VPORT,
4909 "1805 Adapter failed to init. "
4910 "Data: x%x x%x x%x\n",
4912 pmbox->mbxStatus, hbqno);
4914 phba->link_state = LPFC_HBA_ERROR;
4915 mempool_free(pmb, phba->mbox_mem_pool);
4919 phba->hbq_count = hbq_count;
4921 mempool_free(pmb, phba->mbox_mem_pool);
4923 /* Initially populate or replenish the HBQs */
4924 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4925 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4930 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4931 * @phba: Pointer to HBA context object.
4933 * This function is called during the SLI initialization to configure
4934 * all the HBQs and post buffers to the HBQ. The caller is not
4935 * required to hold any locks. This function will return zero if successful
4936 * else it will return negative error code.
4939 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4941 phba->hbq_in_use = 1;
4942 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4943 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4944 phba->hbq_count = 1;
4945 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4946 /* Initially populate or replenish the HBQs */
4951 * lpfc_sli_config_port - Issue config port mailbox command
4952 * @phba: Pointer to HBA context object.
4953 * @sli_mode: sli mode - 2/3
4955 * This function is called by the sli initialization code path
4956 * to issue config_port mailbox command. This function restarts the
4957 * HBA firmware and issues a config_port mailbox command to configure
4958 * the SLI interface in the sli mode specified by sli_mode
4959 * variable. The caller is not required to hold any locks.
4960 * The function returns 0 if successful, else returns negative error
4964 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4967 uint32_t resetcount = 0, rc = 0, done = 0;
4969 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4971 phba->link_state = LPFC_HBA_ERROR;
4975 phba->sli_rev = sli_mode;
4976 while (resetcount < 2 && !done) {
4977 spin_lock_irq(&phba->hbalock);
4978 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4979 spin_unlock_irq(&phba->hbalock);
4980 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4981 lpfc_sli_brdrestart(phba);
4982 rc = lpfc_sli_chipset_init(phba);
4986 spin_lock_irq(&phba->hbalock);
4987 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4988 spin_unlock_irq(&phba->hbalock);
4991 /* Call pre CONFIG_PORT mailbox command initialization. A
4992 * value of 0 means the call was successful. Any other
4993 * nonzero value is a failure, but if ERESTART is returned,
4994 * the driver may reset the HBA and try again.
4996 rc = lpfc_config_port_prep(phba);
4997 if (rc == -ERESTART) {
4998 phba->link_state = LPFC_LINK_UNKNOWN;
5003 phba->link_state = LPFC_INIT_MBX_CMDS;
5004 lpfc_config_port(phba, pmb);
5005 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5006 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5007 LPFC_SLI3_HBQ_ENABLED |
5008 LPFC_SLI3_CRP_ENABLED |
5009 LPFC_SLI3_DSS_ENABLED);
5010 if (rc != MBX_SUCCESS) {
5011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5012 "0442 Adapter failed to init, mbxCmd x%x "
5013 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5014 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5015 spin_lock_irq(&phba->hbalock);
5016 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5017 spin_unlock_irq(&phba->hbalock);
5020 /* Allow asynchronous mailbox command to go through */
5021 spin_lock_irq(&phba->hbalock);
5022 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5023 spin_unlock_irq(&phba->hbalock);
5026 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5027 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5028 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5029 "3110 Port did not grant ASABT\n");
5034 goto do_prep_failed;
5036 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5037 if (!pmb->u.mb.un.varCfgPort.cMA) {
5039 goto do_prep_failed;
5041 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5042 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5043 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5044 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5045 phba->max_vpi : phba->max_vports;
5049 phba->fips_level = 0;
5050 phba->fips_spec_rev = 0;
5051 if (pmb->u.mb.un.varCfgPort.gdss) {
5052 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5053 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5054 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5056 "2850 Security Crypto Active. FIPS x%d "
5058 phba->fips_level, phba->fips_spec_rev);
5060 if (pmb->u.mb.un.varCfgPort.sec_err) {
5061 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5062 "2856 Config Port Security Crypto "
5064 pmb->u.mb.un.varCfgPort.sec_err);
5066 if (pmb->u.mb.un.varCfgPort.gerbm)
5067 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5068 if (pmb->u.mb.un.varCfgPort.gcrp)
5069 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5071 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5072 phba->port_gp = phba->mbox->us.s3_pgp.port;
5074 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5075 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5076 phba->cfg_enable_bg = 0;
5077 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5079 "0443 Adapter did not grant "
5084 phba->hbq_get = NULL;
5085 phba->port_gp = phba->mbox->us.s2.port;
5089 mempool_free(pmb, phba->mbox_mem_pool);
5095 * lpfc_sli_hba_setup - SLI initialization function
5096 * @phba: Pointer to HBA context object.
5098 * This function is the main SLI initialization function. This function
5099 * is called by the HBA initialization code, HBA reset code and HBA
5100 * error attention handler code. Caller is not required to hold any
5101 * locks. This function issues config_port mailbox command to configure
5102 * the SLI, setup iocb rings and HBQ rings. In the end the function
5103 * calls the config_port_post function to issue init_link mailbox
5104 * command and to start the discovery. The function will return zero
5105 * if successful, else it will return negative error code.
5108 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5114 switch (phba->cfg_sli_mode) {
5116 if (phba->cfg_enable_npiv) {
5117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5118 "1824 NPIV enabled: Override sli_mode "
5119 "parameter (%d) to auto (0).\n",
5120 phba->cfg_sli_mode);
5129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5130 "1819 Unrecognized sli_mode parameter: %d.\n",
5131 phba->cfg_sli_mode);
5135 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5137 rc = lpfc_sli_config_port(phba, mode);
5139 if (rc && phba->cfg_sli_mode == 3)
5140 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5141 "1820 Unable to select SLI-3. "
5142 "Not supported by adapter.\n");
5143 if (rc && mode != 2)
5144 rc = lpfc_sli_config_port(phba, 2);
5145 else if (rc && mode == 2)
5146 rc = lpfc_sli_config_port(phba, 3);
5148 goto lpfc_sli_hba_setup_error;
5150 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5151 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5152 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5155 "2709 This device supports "
5156 "Advanced Error Reporting (AER)\n");
5157 spin_lock_irq(&phba->hbalock);
5158 phba->hba_flag |= HBA_AER_ENABLED;
5159 spin_unlock_irq(&phba->hbalock);
5161 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5162 "2708 This device does not support "
5163 "Advanced Error Reporting (AER): %d\n",
5165 phba->cfg_aer_support = 0;
5169 if (phba->sli_rev == 3) {
5170 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5171 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5173 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5174 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5175 phba->sli3_options = 0;
5178 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5179 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5180 phba->sli_rev, phba->max_vpi);
5181 rc = lpfc_sli_ring_map(phba);
5184 goto lpfc_sli_hba_setup_error;
5186 /* Initialize VPIs. */
5187 if (phba->sli_rev == LPFC_SLI_REV3) {
5189 * The VPI bitmask and physical ID array are allocated
5190 * and initialized once only - at driver load. A port
5191 * reset doesn't need to reinitialize this memory.
5193 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5194 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5195 phba->vpi_bmask = kcalloc(longs,
5196 sizeof(unsigned long),
5198 if (!phba->vpi_bmask) {
5200 goto lpfc_sli_hba_setup_error;
5203 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5206 if (!phba->vpi_ids) {
5207 kfree(phba->vpi_bmask);
5209 goto lpfc_sli_hba_setup_error;
5211 for (i = 0; i < phba->max_vpi; i++)
5212 phba->vpi_ids[i] = i;
5217 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5218 rc = lpfc_sli_hbq_setup(phba);
5220 goto lpfc_sli_hba_setup_error;
5222 spin_lock_irq(&phba->hbalock);
5223 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5224 spin_unlock_irq(&phba->hbalock);
5226 rc = lpfc_config_port_post(phba);
5228 goto lpfc_sli_hba_setup_error;
5232 lpfc_sli_hba_setup_error:
5233 phba->link_state = LPFC_HBA_ERROR;
5234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5235 "0445 Firmware initialization failed\n");
5240 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5241 * @phba: Pointer to HBA context object.
5242 * @mboxq: mailbox pointer.
5243 * This function issue a dump mailbox command to read config region
5244 * 23 and parse the records in the region and populate driver
5248 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5250 LPFC_MBOXQ_t *mboxq;
5251 struct lpfc_dmabuf *mp;
5252 struct lpfc_mqe *mqe;
5253 uint32_t data_length;
5256 /* Program the default value of vlan_id and fc_map */
5257 phba->valid_vlan = 0;
5258 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5259 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5260 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5262 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5266 mqe = &mboxq->u.mqe;
5267 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5269 goto out_free_mboxq;
5272 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5273 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5275 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5276 "(%d):2571 Mailbox cmd x%x Status x%x "
5277 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5278 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5279 "CQ: x%x x%x x%x x%x\n",
5280 mboxq->vport ? mboxq->vport->vpi : 0,
5281 bf_get(lpfc_mqe_command, mqe),
5282 bf_get(lpfc_mqe_status, mqe),
5283 mqe->un.mb_words[0], mqe->un.mb_words[1],
5284 mqe->un.mb_words[2], mqe->un.mb_words[3],
5285 mqe->un.mb_words[4], mqe->un.mb_words[5],
5286 mqe->un.mb_words[6], mqe->un.mb_words[7],
5287 mqe->un.mb_words[8], mqe->un.mb_words[9],
5288 mqe->un.mb_words[10], mqe->un.mb_words[11],
5289 mqe->un.mb_words[12], mqe->un.mb_words[13],
5290 mqe->un.mb_words[14], mqe->un.mb_words[15],
5291 mqe->un.mb_words[16], mqe->un.mb_words[50],
5293 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5294 mboxq->mcqe.trailer);
5297 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5300 goto out_free_mboxq;
5302 data_length = mqe->un.mb_words[5];
5303 if (data_length > DMP_RGN23_SIZE) {
5304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5307 goto out_free_mboxq;
5310 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5311 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5316 mempool_free(mboxq, phba->mbox_mem_pool);
5321 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5322 * @phba: pointer to lpfc hba data structure.
5323 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5324 * @vpd: pointer to the memory to hold resulting port vpd data.
5325 * @vpd_size: On input, the number of bytes allocated to @vpd.
5326 * On output, the number of data bytes in @vpd.
5328 * This routine executes a READ_REV SLI4 mailbox command. In
5329 * addition, this routine gets the port vpd data.
5333 * -ENOMEM - could not allocated memory.
5336 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5337 uint8_t *vpd, uint32_t *vpd_size)
5341 struct lpfc_dmabuf *dmabuf;
5342 struct lpfc_mqe *mqe;
5344 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5349 * Get a DMA buffer for the vpd data resulting from the READ_REV
5352 dma_size = *vpd_size;
5353 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5354 &dmabuf->phys, GFP_KERNEL);
5355 if (!dmabuf->virt) {
5361 * The SLI4 implementation of READ_REV conflicts at word1,
5362 * bits 31:16 and SLI4 adds vpd functionality not present
5363 * in SLI3. This code corrects the conflicts.
5365 lpfc_read_rev(phba, mboxq);
5366 mqe = &mboxq->u.mqe;
5367 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5368 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5369 mqe->un.read_rev.word1 &= 0x0000FFFF;
5370 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5371 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5373 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5375 dma_free_coherent(&phba->pcidev->dev, dma_size,
5376 dmabuf->virt, dmabuf->phys);
5382 * The available vpd length cannot be bigger than the
5383 * DMA buffer passed to the port. Catch the less than
5384 * case and update the caller's size.
5386 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5387 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5389 memcpy(vpd, dmabuf->virt, *vpd_size);
5391 dma_free_coherent(&phba->pcidev->dev, dma_size,
5392 dmabuf->virt, dmabuf->phys);
5398 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5399 * @phba: pointer to lpfc hba data structure.
5401 * This routine retrieves SLI4 device physical port name this PCI function
5406 * otherwise - failed to retrieve physical port name
5409 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5411 LPFC_MBOXQ_t *mboxq;
5412 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5413 struct lpfc_controller_attribute *cntl_attr;
5414 struct lpfc_mbx_get_port_name *get_port_name;
5415 void *virtaddr = NULL;
5416 uint32_t alloclen, reqlen;
5417 uint32_t shdr_status, shdr_add_status;
5418 union lpfc_sli4_cfg_shdr *shdr;
5419 char cport_name = 0;
5422 /* We assume nothing at this point */
5423 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5424 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5426 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5429 /* obtain link type and link number via READ_CONFIG */
5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5431 lpfc_sli4_read_config(phba);
5432 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5433 goto retrieve_ppname;
5435 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5436 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5437 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5438 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5439 LPFC_SLI4_MBX_NEMBED);
5440 if (alloclen < reqlen) {
5441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5442 "3084 Allocated DMA memory size (%d) is "
5443 "less than the requested DMA memory size "
5444 "(%d)\n", alloclen, reqlen);
5446 goto out_free_mboxq;
5448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5449 virtaddr = mboxq->sge_array->addr[0];
5450 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5451 shdr = &mbx_cntl_attr->cfg_shdr;
5452 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5453 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5454 if (shdr_status || shdr_add_status || rc) {
5455 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5456 "3085 Mailbox x%x (x%x/x%x) failed, "
5457 "rc:x%x, status:x%x, add_status:x%x\n",
5458 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5459 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5460 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5461 rc, shdr_status, shdr_add_status);
5463 goto out_free_mboxq;
5465 cntl_attr = &mbx_cntl_attr->cntl_attr;
5466 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5467 phba->sli4_hba.lnk_info.lnk_tp =
5468 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5469 phba->sli4_hba.lnk_info.lnk_no =
5470 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5471 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5472 "3086 lnk_type:%d, lnk_numb:%d\n",
5473 phba->sli4_hba.lnk_info.lnk_tp,
5474 phba->sli4_hba.lnk_info.lnk_no);
5477 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5478 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5479 sizeof(struct lpfc_mbx_get_port_name) -
5480 sizeof(struct lpfc_sli4_cfg_mhdr),
5481 LPFC_SLI4_MBX_EMBED);
5482 get_port_name = &mboxq->u.mqe.un.get_port_name;
5483 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5484 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5485 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5486 phba->sli4_hba.lnk_info.lnk_tp);
5487 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5488 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5489 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5490 if (shdr_status || shdr_add_status || rc) {
5491 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5492 "3087 Mailbox x%x (x%x/x%x) failed: "
5493 "rc:x%x, status:x%x, add_status:x%x\n",
5494 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5495 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5496 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5497 rc, shdr_status, shdr_add_status);
5499 goto out_free_mboxq;
5501 switch (phba->sli4_hba.lnk_info.lnk_no) {
5502 case LPFC_LINK_NUMBER_0:
5503 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5504 &get_port_name->u.response);
5505 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5507 case LPFC_LINK_NUMBER_1:
5508 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5509 &get_port_name->u.response);
5510 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5512 case LPFC_LINK_NUMBER_2:
5513 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5514 &get_port_name->u.response);
5515 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5517 case LPFC_LINK_NUMBER_3:
5518 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5519 &get_port_name->u.response);
5520 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5526 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5527 phba->Port[0] = cport_name;
5528 phba->Port[1] = '\0';
5529 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5530 "3091 SLI get port name: %s\n", phba->Port);
5534 if (rc != MBX_TIMEOUT) {
5535 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5536 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5538 mempool_free(mboxq, phba->mbox_mem_pool);
5544 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5545 * @phba: pointer to lpfc hba data structure.
5547 * This routine is called to explicitly arm the SLI4 device's completion and
5551 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5554 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5555 struct lpfc_sli4_hdw_queue *qp;
5557 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5558 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5559 if (sli4_hba->nvmels_cq)
5560 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5563 qp = sli4_hba->hdwq;
5564 if (sli4_hba->hdwq) {
5565 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5566 sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0,
5568 sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0,
5572 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++)
5573 sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq,
5574 0, LPFC_QUEUE_REARM);
5577 if (phba->nvmet_support) {
5578 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5579 sli4_hba->sli4_write_cq_db(phba,
5580 sli4_hba->nvmet_cqset[qidx], 0,
5587 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5588 * @phba: Pointer to HBA context object.
5589 * @type: The resource extent type.
5590 * @extnt_count: buffer to hold port available extent count.
5591 * @extnt_size: buffer to hold element count per extent.
5593 * This function calls the port and retrievs the number of available
5594 * extents and their size for a particular extent type.
5596 * Returns: 0 if successful. Nonzero otherwise.
5599 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5600 uint16_t *extnt_count, uint16_t *extnt_size)
5605 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5608 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5612 /* Find out how many extents are available for this resource type */
5613 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5614 sizeof(struct lpfc_sli4_cfg_mhdr));
5615 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5616 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5617 length, LPFC_SLI4_MBX_EMBED);
5619 /* Send an extents count of 0 - the GET doesn't use it. */
5620 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5621 LPFC_SLI4_MBX_EMBED);
5627 if (!phba->sli4_hba.intr_enable)
5628 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5630 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5631 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5638 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5639 if (bf_get(lpfc_mbox_hdr_status,
5640 &rsrc_info->header.cfg_shdr.response)) {
5641 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5642 "2930 Failed to get resource extents "
5643 "Status 0x%x Add'l Status 0x%x\n",
5644 bf_get(lpfc_mbox_hdr_status,
5645 &rsrc_info->header.cfg_shdr.response),
5646 bf_get(lpfc_mbox_hdr_add_status,
5647 &rsrc_info->header.cfg_shdr.response));
5652 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5654 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5657 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5658 "3162 Retrieved extents type-%d from port: count:%d, "
5659 "size:%d\n", type, *extnt_count, *extnt_size);
5662 mempool_free(mbox, phba->mbox_mem_pool);
5667 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5668 * @phba: Pointer to HBA context object.
5669 * @type: The extent type to check.
5671 * This function reads the current available extents from the port and checks
5672 * if the extent count or extent size has changed since the last access.
5673 * Callers use this routine post port reset to understand if there is a
5674 * extent reprovisioning requirement.
5677 * -Error: error indicates problem.
5678 * 1: Extent count or size has changed.
5682 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5684 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5685 uint16_t size_diff, rsrc_ext_size;
5687 struct lpfc_rsrc_blks *rsrc_entry;
5688 struct list_head *rsrc_blk_list = NULL;
5692 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5699 case LPFC_RSC_TYPE_FCOE_RPI:
5700 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5702 case LPFC_RSC_TYPE_FCOE_VPI:
5703 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5705 case LPFC_RSC_TYPE_FCOE_XRI:
5706 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5708 case LPFC_RSC_TYPE_FCOE_VFI:
5709 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5715 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5717 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5721 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5728 * lpfc_sli4_cfg_post_extnts -
5729 * @phba: Pointer to HBA context object.
5730 * @extnt_cnt - number of available extents.
5731 * @type - the extent type (rpi, xri, vfi, vpi).
5732 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5733 * @mbox - pointer to the caller's allocated mailbox structure.
5735 * This function executes the extents allocation request. It also
5736 * takes care of the amount of memory needed to allocate or get the
5737 * allocated extents. It is the caller's responsibility to evaluate
5741 * -Error: Error value describes the condition found.
5745 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5746 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5751 uint32_t alloc_len, mbox_tmo;
5753 /* Calculate the total requested length of the dma memory */
5754 req_len = extnt_cnt * sizeof(uint16_t);
5757 * Calculate the size of an embedded mailbox. The uint32_t
5758 * accounts for extents-specific word.
5760 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5764 * Presume the allocation and response will fit into an embedded
5765 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5767 *emb = LPFC_SLI4_MBX_EMBED;
5768 if (req_len > emb_len) {
5769 req_len = extnt_cnt * sizeof(uint16_t) +
5770 sizeof(union lpfc_sli4_cfg_shdr) +
5772 *emb = LPFC_SLI4_MBX_NEMBED;
5775 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5776 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5778 if (alloc_len < req_len) {
5779 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5780 "2982 Allocated DMA memory size (x%x) is "
5781 "less than the requested DMA memory "
5782 "size (x%x)\n", alloc_len, req_len);
5785 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5789 if (!phba->sli4_hba.intr_enable)
5790 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5792 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5793 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5802 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5803 * @phba: Pointer to HBA context object.
5804 * @type: The resource extent type to allocate.
5806 * This function allocates the number of elements for the specified
5810 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5813 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5814 uint16_t rsrc_id, rsrc_start, j, k;
5817 unsigned long longs;
5818 unsigned long *bmask;
5819 struct lpfc_rsrc_blks *rsrc_blks;
5822 struct lpfc_id_range *id_array = NULL;
5823 void *virtaddr = NULL;
5824 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5825 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5826 struct list_head *ext_blk_list;
5828 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5834 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5835 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5836 "3009 No available Resource Extents "
5837 "for resource type 0x%x: Count: 0x%x, "
5838 "Size 0x%x\n", type, rsrc_cnt,
5843 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5844 "2903 Post resource extents type-0x%x: "
5845 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5847 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5851 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5858 * Figure out where the response is located. Then get local pointers
5859 * to the response data. The port does not guarantee to respond to
5860 * all extents counts request so update the local variable with the
5861 * allocated count from the port.
5863 if (emb == LPFC_SLI4_MBX_EMBED) {
5864 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5865 id_array = &rsrc_ext->u.rsp.id[0];
5866 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5868 virtaddr = mbox->sge_array->addr[0];
5869 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5870 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5871 id_array = &n_rsrc->id;
5874 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5875 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5878 * Based on the resource size and count, correct the base and max
5881 length = sizeof(struct lpfc_rsrc_blks);
5883 case LPFC_RSC_TYPE_FCOE_RPI:
5884 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5885 sizeof(unsigned long),
5887 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5891 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5894 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5895 kfree(phba->sli4_hba.rpi_bmask);
5901 * The next_rpi was initialized with the maximum available
5902 * count but the port may allocate a smaller number. Catch
5903 * that case and update the next_rpi.
5905 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5907 /* Initialize local ptrs for common extent processing later. */
5908 bmask = phba->sli4_hba.rpi_bmask;
5909 ids = phba->sli4_hba.rpi_ids;
5910 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5912 case LPFC_RSC_TYPE_FCOE_VPI:
5913 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5915 if (unlikely(!phba->vpi_bmask)) {
5919 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5921 if (unlikely(!phba->vpi_ids)) {
5922 kfree(phba->vpi_bmask);
5927 /* Initialize local ptrs for common extent processing later. */
5928 bmask = phba->vpi_bmask;
5929 ids = phba->vpi_ids;
5930 ext_blk_list = &phba->lpfc_vpi_blk_list;
5932 case LPFC_RSC_TYPE_FCOE_XRI:
5933 phba->sli4_hba.xri_bmask = kcalloc(longs,
5934 sizeof(unsigned long),
5936 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5940 phba->sli4_hba.max_cfg_param.xri_used = 0;
5941 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5944 if (unlikely(!phba->sli4_hba.xri_ids)) {
5945 kfree(phba->sli4_hba.xri_bmask);
5950 /* Initialize local ptrs for common extent processing later. */
5951 bmask = phba->sli4_hba.xri_bmask;
5952 ids = phba->sli4_hba.xri_ids;
5953 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5955 case LPFC_RSC_TYPE_FCOE_VFI:
5956 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5957 sizeof(unsigned long),
5959 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5963 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5966 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5967 kfree(phba->sli4_hba.vfi_bmask);
5972 /* Initialize local ptrs for common extent processing later. */
5973 bmask = phba->sli4_hba.vfi_bmask;
5974 ids = phba->sli4_hba.vfi_ids;
5975 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5978 /* Unsupported Opcode. Fail call. */
5982 ext_blk_list = NULL;
5987 * Complete initializing the extent configuration with the
5988 * allocated ids assigned to this function. The bitmask serves
5989 * as an index into the array and manages the available ids. The
5990 * array just stores the ids communicated to the port via the wqes.
5992 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5994 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5997 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6000 rsrc_blks = kzalloc(length, GFP_KERNEL);
6001 if (unlikely(!rsrc_blks)) {
6007 rsrc_blks->rsrc_start = rsrc_id;
6008 rsrc_blks->rsrc_size = rsrc_size;
6009 list_add_tail(&rsrc_blks->list, ext_blk_list);
6010 rsrc_start = rsrc_id;
6011 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6012 phba->sli4_hba.io_xri_start = rsrc_start +
6013 lpfc_sli4_get_iocb_cnt(phba);
6016 while (rsrc_id < (rsrc_start + rsrc_size)) {
6021 /* Entire word processed. Get next word.*/
6026 lpfc_sli4_mbox_cmd_free(phba, mbox);
6033 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6034 * @phba: Pointer to HBA context object.
6035 * @type: the extent's type.
6037 * This function deallocates all extents of a particular resource type.
6038 * SLI4 does not allow for deallocating a particular extent range. It
6039 * is the caller's responsibility to release all kernel memory resources.
6042 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6045 uint32_t length, mbox_tmo = 0;
6047 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6048 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6050 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6055 * This function sends an embedded mailbox because it only sends the
6056 * the resource type. All extents of this type are released by the
6059 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6060 sizeof(struct lpfc_sli4_cfg_mhdr));
6061 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6062 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6063 length, LPFC_SLI4_MBX_EMBED);
6065 /* Send an extents count of 0 - the dealloc doesn't use it. */
6066 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6067 LPFC_SLI4_MBX_EMBED);
6072 if (!phba->sli4_hba.intr_enable)
6073 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6075 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6076 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6083 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6084 if (bf_get(lpfc_mbox_hdr_status,
6085 &dealloc_rsrc->header.cfg_shdr.response)) {
6086 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6087 "2919 Failed to release resource extents "
6088 "for type %d - Status 0x%x Add'l Status 0x%x. "
6089 "Resource memory not released.\n",
6091 bf_get(lpfc_mbox_hdr_status,
6092 &dealloc_rsrc->header.cfg_shdr.response),
6093 bf_get(lpfc_mbox_hdr_add_status,
6094 &dealloc_rsrc->header.cfg_shdr.response));
6099 /* Release kernel memory resources for the specific type. */
6101 case LPFC_RSC_TYPE_FCOE_VPI:
6102 kfree(phba->vpi_bmask);
6103 kfree(phba->vpi_ids);
6104 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6105 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6106 &phba->lpfc_vpi_blk_list, list) {
6107 list_del_init(&rsrc_blk->list);
6110 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6112 case LPFC_RSC_TYPE_FCOE_XRI:
6113 kfree(phba->sli4_hba.xri_bmask);
6114 kfree(phba->sli4_hba.xri_ids);
6115 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6116 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6117 list_del_init(&rsrc_blk->list);
6121 case LPFC_RSC_TYPE_FCOE_VFI:
6122 kfree(phba->sli4_hba.vfi_bmask);
6123 kfree(phba->sli4_hba.vfi_ids);
6124 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6125 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6126 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6127 list_del_init(&rsrc_blk->list);
6131 case LPFC_RSC_TYPE_FCOE_RPI:
6132 /* RPI bitmask and physical id array are cleaned up earlier. */
6133 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6134 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6135 list_del_init(&rsrc_blk->list);
6143 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6146 mempool_free(mbox, phba->mbox_mem_pool);
6151 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6156 len = sizeof(struct lpfc_mbx_set_feature) -
6157 sizeof(struct lpfc_sli4_cfg_mhdr);
6158 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6159 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6160 LPFC_SLI4_MBX_EMBED);
6163 case LPFC_SET_UE_RECOVERY:
6164 bf_set(lpfc_mbx_set_feature_UER,
6165 &mbox->u.mqe.un.set_feature, 1);
6166 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6167 mbox->u.mqe.un.set_feature.param_len = 8;
6169 case LPFC_SET_MDS_DIAGS:
6170 bf_set(lpfc_mbx_set_feature_mds,
6171 &mbox->u.mqe.un.set_feature, 1);
6172 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6173 &mbox->u.mqe.un.set_feature, 1);
6174 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6175 mbox->u.mqe.un.set_feature.param_len = 8;
6183 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6184 * @phba: Pointer to HBA context object.
6186 * Disable FW logging into host memory on the adapter. To
6187 * be done before reading logs from the host memory.
6190 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6192 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6194 ras_fwlog->ras_active = false;
6196 /* Disable FW logging to host memory */
6197 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6198 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6202 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6203 * @phba: Pointer to HBA context object.
6205 * This function is called to free memory allocated for RAS FW logging
6206 * support in the driver.
6209 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6211 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6212 struct lpfc_dmabuf *dmabuf, *next;
6214 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6215 list_for_each_entry_safe(dmabuf, next,
6216 &ras_fwlog->fwlog_buff_list,
6218 list_del(&dmabuf->list);
6219 dma_free_coherent(&phba->pcidev->dev,
6220 LPFC_RAS_MAX_ENTRY_SIZE,
6221 dmabuf->virt, dmabuf->phys);
6226 if (ras_fwlog->lwpd.virt) {
6227 dma_free_coherent(&phba->pcidev->dev,
6228 sizeof(uint32_t) * 2,
6229 ras_fwlog->lwpd.virt,
6230 ras_fwlog->lwpd.phys);
6231 ras_fwlog->lwpd.virt = NULL;
6234 ras_fwlog->ras_active = false;
6238 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6239 * @phba: Pointer to HBA context object.
6240 * @fwlog_buff_count: Count of buffers to be created.
6242 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6243 * to update FW log is posted to the adapter.
6244 * Buffer count is calculated based on module param ras_fwlog_buffsize
6245 * Size of each buffer posted to FW is 64K.
6249 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6250 uint32_t fwlog_buff_count)
6252 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6253 struct lpfc_dmabuf *dmabuf;
6256 /* Initialize List */
6257 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6259 /* Allocate memory for the LWPD */
6260 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6261 sizeof(uint32_t) * 2,
6262 &ras_fwlog->lwpd.phys,
6264 if (!ras_fwlog->lwpd.virt) {
6265 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6266 "6185 LWPD Memory Alloc Failed\n");
6271 ras_fwlog->fw_buffcount = fwlog_buff_count;
6272 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6273 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6277 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6278 "6186 Memory Alloc failed FW logging");
6282 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6283 LPFC_RAS_MAX_ENTRY_SIZE,
6284 &dmabuf->phys, GFP_KERNEL);
6285 if (!dmabuf->virt) {
6288 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6289 "6187 DMA Alloc Failed FW logging");
6292 dmabuf->buffer_tag = i;
6293 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6298 lpfc_sli4_ras_dma_free(phba);
6304 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6305 * @phba: pointer to lpfc hba data structure.
6306 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6308 * Completion handler for driver's RAS MBX command to the device.
6311 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6314 union lpfc_sli4_cfg_shdr *shdr;
6315 uint32_t shdr_status, shdr_add_status;
6316 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6320 shdr = (union lpfc_sli4_cfg_shdr *)
6321 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6322 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6323 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6325 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6326 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6327 "6188 FW LOG mailbox "
6328 "completed with status x%x add_status x%x,"
6329 " mbx status x%x\n",
6330 shdr_status, shdr_add_status, mb->mbxStatus);
6332 ras_fwlog->ras_hwsupport = false;
6336 ras_fwlog->ras_active = true;
6337 mempool_free(pmb, phba->mbox_mem_pool);
6342 /* Free RAS DMA memory */
6343 lpfc_sli4_ras_dma_free(phba);
6344 mempool_free(pmb, phba->mbox_mem_pool);
6348 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6349 * @phba: pointer to lpfc hba data structure.
6350 * @fwlog_level: Logging verbosity level.
6351 * @fwlog_enable: Enable/Disable logging.
6353 * Initialize memory and post mailbox command to enable FW logging in host
6357 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6358 uint32_t fwlog_level,
6359 uint32_t fwlog_enable)
6361 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6362 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6363 struct lpfc_dmabuf *dmabuf;
6365 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6368 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6369 phba->cfg_ras_fwlog_buffsize);
6370 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6373 * If re-enabling FW logging support use earlier allocated
6374 * DMA buffers while posting MBX command.
6376 if (!ras_fwlog->lwpd.virt) {
6377 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6379 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6380 "6189 FW Log Memory Allocation Failed");
6385 /* Setup Mailbox command */
6386 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6389 "6190 RAS MBX Alloc Failed");
6394 ras_fwlog->fw_loglevel = fwlog_level;
6395 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6396 sizeof(struct lpfc_sli4_cfg_mhdr));
6398 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6399 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6400 len, LPFC_SLI4_MBX_EMBED);
6402 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6403 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6405 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6406 ras_fwlog->fw_loglevel);
6407 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6408 ras_fwlog->fw_buffcount);
6409 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6410 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6412 /* Update DMA buffer address */
6413 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6414 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6416 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6417 putPaddrLow(dmabuf->phys);
6419 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6420 putPaddrHigh(dmabuf->phys);
6423 /* Update LPWD address */
6424 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6425 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6427 mbox->vport = phba->pport;
6428 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6430 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6432 if (rc == MBX_NOT_FINISHED) {
6433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6434 "6191 FW-Log Mailbox failed. "
6435 "status %d mbxStatus : x%x", rc,
6436 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6437 mempool_free(mbox, phba->mbox_mem_pool);
6444 lpfc_sli4_ras_dma_free(phba);
6450 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6451 * @phba: Pointer to HBA context object.
6453 * Check if RAS is supported on the adapter and initialize it.
6456 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6458 /* Check RAS FW Log needs to be enabled or not */
6459 if (lpfc_check_fwlog_support(phba))
6462 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6463 LPFC_RAS_ENABLE_LOGGING);
6467 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6468 * @phba: Pointer to HBA context object.
6470 * This function allocates all SLI4 resource identifiers.
6473 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6475 int i, rc, error = 0;
6476 uint16_t count, base;
6477 unsigned long longs;
6479 if (!phba->sli4_hba.rpi_hdrs_in_use)
6480 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6481 if (phba->sli4_hba.extents_in_use) {
6483 * The port supports resource extents. The XRI, VPI, VFI, RPI
6484 * resource extent count must be read and allocated before
6485 * provisioning the resource id arrays.
6487 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6488 LPFC_IDX_RSRC_RDY) {
6490 * Extent-based resources are set - the driver could
6491 * be in a port reset. Figure out if any corrective
6492 * actions need to be taken.
6494 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6495 LPFC_RSC_TYPE_FCOE_VFI);
6498 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6499 LPFC_RSC_TYPE_FCOE_VPI);
6502 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6503 LPFC_RSC_TYPE_FCOE_XRI);
6506 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6507 LPFC_RSC_TYPE_FCOE_RPI);
6512 * It's possible that the number of resources
6513 * provided to this port instance changed between
6514 * resets. Detect this condition and reallocate
6515 * resources. Otherwise, there is no action.
6518 lpfc_printf_log(phba, KERN_INFO,
6519 LOG_MBOX | LOG_INIT,
6520 "2931 Detected extent resource "
6521 "change. Reallocating all "
6523 rc = lpfc_sli4_dealloc_extent(phba,
6524 LPFC_RSC_TYPE_FCOE_VFI);
6525 rc = lpfc_sli4_dealloc_extent(phba,
6526 LPFC_RSC_TYPE_FCOE_VPI);
6527 rc = lpfc_sli4_dealloc_extent(phba,
6528 LPFC_RSC_TYPE_FCOE_XRI);
6529 rc = lpfc_sli4_dealloc_extent(phba,
6530 LPFC_RSC_TYPE_FCOE_RPI);
6535 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6539 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6543 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6547 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6550 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6555 * The port does not support resource extents. The XRI, VPI,
6556 * VFI, RPI resource ids were determined from READ_CONFIG.
6557 * Just allocate the bitmasks and provision the resource id
6558 * arrays. If a port reset is active, the resources don't
6559 * need any action - just exit.
6561 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6562 LPFC_IDX_RSRC_RDY) {
6563 lpfc_sli4_dealloc_resource_identifiers(phba);
6564 lpfc_sli4_remove_rpis(phba);
6567 count = phba->sli4_hba.max_cfg_param.max_rpi;
6569 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6570 "3279 Invalid provisioning of "
6575 base = phba->sli4_hba.max_cfg_param.rpi_base;
6576 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6577 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6578 sizeof(unsigned long),
6580 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6584 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6586 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6588 goto free_rpi_bmask;
6591 for (i = 0; i < count; i++)
6592 phba->sli4_hba.rpi_ids[i] = base + i;
6595 count = phba->sli4_hba.max_cfg_param.max_vpi;
6597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6598 "3280 Invalid provisioning of "
6603 base = phba->sli4_hba.max_cfg_param.vpi_base;
6604 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6605 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6607 if (unlikely(!phba->vpi_bmask)) {
6611 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6613 if (unlikely(!phba->vpi_ids)) {
6615 goto free_vpi_bmask;
6618 for (i = 0; i < count; i++)
6619 phba->vpi_ids[i] = base + i;
6622 count = phba->sli4_hba.max_cfg_param.max_xri;
6624 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6625 "3281 Invalid provisioning of "
6630 base = phba->sli4_hba.max_cfg_param.xri_base;
6631 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6632 phba->sli4_hba.xri_bmask = kcalloc(longs,
6633 sizeof(unsigned long),
6635 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6639 phba->sli4_hba.max_cfg_param.xri_used = 0;
6640 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6642 if (unlikely(!phba->sli4_hba.xri_ids)) {
6644 goto free_xri_bmask;
6647 for (i = 0; i < count; i++)
6648 phba->sli4_hba.xri_ids[i] = base + i;
6651 count = phba->sli4_hba.max_cfg_param.max_vfi;
6653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6654 "3282 Invalid provisioning of "
6659 base = phba->sli4_hba.max_cfg_param.vfi_base;
6660 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6661 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6662 sizeof(unsigned long),
6664 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6668 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6670 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6672 goto free_vfi_bmask;
6675 for (i = 0; i < count; i++)
6676 phba->sli4_hba.vfi_ids[i] = base + i;
6679 * Mark all resources ready. An HBA reset doesn't need
6680 * to reset the initialization.
6682 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6688 kfree(phba->sli4_hba.vfi_bmask);
6689 phba->sli4_hba.vfi_bmask = NULL;
6691 kfree(phba->sli4_hba.xri_ids);
6692 phba->sli4_hba.xri_ids = NULL;
6694 kfree(phba->sli4_hba.xri_bmask);
6695 phba->sli4_hba.xri_bmask = NULL;
6697 kfree(phba->vpi_ids);
6698 phba->vpi_ids = NULL;
6700 kfree(phba->vpi_bmask);
6701 phba->vpi_bmask = NULL;
6703 kfree(phba->sli4_hba.rpi_ids);
6704 phba->sli4_hba.rpi_ids = NULL;
6706 kfree(phba->sli4_hba.rpi_bmask);
6707 phba->sli4_hba.rpi_bmask = NULL;
6713 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6714 * @phba: Pointer to HBA context object.
6716 * This function allocates the number of elements for the specified
6720 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6722 if (phba->sli4_hba.extents_in_use) {
6723 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6724 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6725 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6726 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6728 kfree(phba->vpi_bmask);
6729 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6730 kfree(phba->vpi_ids);
6731 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6732 kfree(phba->sli4_hba.xri_bmask);
6733 kfree(phba->sli4_hba.xri_ids);
6734 kfree(phba->sli4_hba.vfi_bmask);
6735 kfree(phba->sli4_hba.vfi_ids);
6736 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6737 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6744 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6745 * @phba: Pointer to HBA context object.
6746 * @type: The resource extent type.
6747 * @extnt_count: buffer to hold port extent count response
6748 * @extnt_size: buffer to hold port extent size response.
6750 * This function calls the port to read the host allocated extents
6751 * for a particular type.
6754 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6755 uint16_t *extnt_cnt, uint16_t *extnt_size)
6759 uint16_t curr_blks = 0;
6760 uint32_t req_len, emb_len;
6761 uint32_t alloc_len, mbox_tmo;
6762 struct list_head *blk_list_head;
6763 struct lpfc_rsrc_blks *rsrc_blk;
6765 void *virtaddr = NULL;
6766 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6767 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6768 union lpfc_sli4_cfg_shdr *shdr;
6771 case LPFC_RSC_TYPE_FCOE_VPI:
6772 blk_list_head = &phba->lpfc_vpi_blk_list;
6774 case LPFC_RSC_TYPE_FCOE_XRI:
6775 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6777 case LPFC_RSC_TYPE_FCOE_VFI:
6778 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6780 case LPFC_RSC_TYPE_FCOE_RPI:
6781 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6787 /* Count the number of extents currently allocatd for this type. */
6788 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6789 if (curr_blks == 0) {
6791 * The GET_ALLOCATED mailbox does not return the size,
6792 * just the count. The size should be just the size
6793 * stored in the current allocated block and all sizes
6794 * for an extent type are the same so set the return
6797 *extnt_size = rsrc_blk->rsrc_size;
6803 * Calculate the size of an embedded mailbox. The uint32_t
6804 * accounts for extents-specific word.
6806 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6810 * Presume the allocation and response will fit into an embedded
6811 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6813 emb = LPFC_SLI4_MBX_EMBED;
6815 if (req_len > emb_len) {
6816 req_len = curr_blks * sizeof(uint16_t) +
6817 sizeof(union lpfc_sli4_cfg_shdr) +
6819 emb = LPFC_SLI4_MBX_NEMBED;
6822 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6825 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6827 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6828 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6830 if (alloc_len < req_len) {
6831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6832 "2983 Allocated DMA memory size (x%x) is "
6833 "less than the requested DMA memory "
6834 "size (x%x)\n", alloc_len, req_len);
6838 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6844 if (!phba->sli4_hba.intr_enable)
6845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6847 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6848 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6857 * Figure out where the response is located. Then get local pointers
6858 * to the response data. The port does not guarantee to respond to
6859 * all extents counts request so update the local variable with the
6860 * allocated count from the port.
6862 if (emb == LPFC_SLI4_MBX_EMBED) {
6863 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6864 shdr = &rsrc_ext->header.cfg_shdr;
6865 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6867 virtaddr = mbox->sge_array->addr[0];
6868 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6869 shdr = &n_rsrc->cfg_shdr;
6870 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6873 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6875 "2984 Failed to read allocated resources "
6876 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6878 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6879 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6884 lpfc_sli4_mbox_cmd_free(phba, mbox);
6889 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6890 * @phba: pointer to lpfc hba data structure.
6891 * @pring: Pointer to driver SLI ring object.
6892 * @sgl_list: linked link of sgl buffers to post
6893 * @cnt: number of linked list buffers
6895 * This routine walks the list of buffers that have been allocated and
6896 * repost them to the port by using SGL block post. This is needed after a
6897 * pci_function_reset/warm_start or start. It attempts to construct blocks
6898 * of buffer sgls which contains contiguous xris and uses the non-embedded
6899 * SGL block post mailbox commands to post them to the port. For single
6900 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6901 * mailbox command for posting.
6903 * Returns: 0 = success, non-zero failure.
6906 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6907 struct list_head *sgl_list, int cnt)
6909 struct lpfc_sglq *sglq_entry = NULL;
6910 struct lpfc_sglq *sglq_entry_next = NULL;
6911 struct lpfc_sglq *sglq_entry_first = NULL;
6912 int status, total_cnt;
6913 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6914 int last_xritag = NO_XRI;
6915 LIST_HEAD(prep_sgl_list);
6916 LIST_HEAD(blck_sgl_list);
6917 LIST_HEAD(allc_sgl_list);
6918 LIST_HEAD(post_sgl_list);
6919 LIST_HEAD(free_sgl_list);
6921 spin_lock_irq(&phba->hbalock);
6922 spin_lock(&phba->sli4_hba.sgl_list_lock);
6923 list_splice_init(sgl_list, &allc_sgl_list);
6924 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6925 spin_unlock_irq(&phba->hbalock);
6928 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6929 &allc_sgl_list, list) {
6930 list_del_init(&sglq_entry->list);
6932 if ((last_xritag != NO_XRI) &&
6933 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6934 /* a hole in xri block, form a sgl posting block */
6935 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6936 post_cnt = block_cnt - 1;
6937 /* prepare list for next posting block */
6938 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6941 /* prepare list for next posting block */
6942 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6943 /* enough sgls for non-embed sgl mbox command */
6944 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6945 list_splice_init(&prep_sgl_list,
6947 post_cnt = block_cnt;
6953 /* keep track of last sgl's xritag */
6954 last_xritag = sglq_entry->sli4_xritag;
6956 /* end of repost sgl list condition for buffers */
6957 if (num_posted == total_cnt) {
6958 if (post_cnt == 0) {
6959 list_splice_init(&prep_sgl_list,
6961 post_cnt = block_cnt;
6962 } else if (block_cnt == 1) {
6963 status = lpfc_sli4_post_sgl(phba,
6964 sglq_entry->phys, 0,
6965 sglq_entry->sli4_xritag);
6967 /* successful, put sgl to posted list */
6968 list_add_tail(&sglq_entry->list,
6971 /* Failure, put sgl to free list */
6972 lpfc_printf_log(phba, KERN_WARNING,
6974 "3159 Failed to post "
6975 "sgl, xritag:x%x\n",
6976 sglq_entry->sli4_xritag);
6977 list_add_tail(&sglq_entry->list,
6984 /* continue until a nembed page worth of sgls */
6988 /* post the buffer list sgls as a block */
6989 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6993 /* success, put sgl list to posted sgl list */
6994 list_splice_init(&blck_sgl_list, &post_sgl_list);
6996 /* Failure, put sgl list to free sgl list */
6997 sglq_entry_first = list_first_entry(&blck_sgl_list,
7000 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7001 "3160 Failed to post sgl-list, "
7003 sglq_entry_first->sli4_xritag,
7004 (sglq_entry_first->sli4_xritag +
7006 list_splice_init(&blck_sgl_list, &free_sgl_list);
7007 total_cnt -= post_cnt;
7010 /* don't reset xirtag due to hole in xri block */
7012 last_xritag = NO_XRI;
7014 /* reset sgl post count for next round of posting */
7018 /* free the sgls failed to post */
7019 lpfc_free_sgl_list(phba, &free_sgl_list);
7021 /* push sgls posted to the available list */
7022 if (!list_empty(&post_sgl_list)) {
7023 spin_lock_irq(&phba->hbalock);
7024 spin_lock(&phba->sli4_hba.sgl_list_lock);
7025 list_splice_init(&post_sgl_list, sgl_list);
7026 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7027 spin_unlock_irq(&phba->hbalock);
7029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7030 "3161 Failure to post sgl to port.\n");
7034 /* return the number of XRIs actually posted */
7039 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7040 * @phba: pointer to lpfc hba data structure.
7042 * This routine walks the list of nvme buffers that have been allocated and
7043 * repost them to the port by using SGL block post. This is needed after a
7044 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7045 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7046 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7048 * Returns: 0 = success, non-zero failure.
7051 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7053 LIST_HEAD(post_nblist);
7054 int num_posted, rc = 0;
7056 /* get all NVME buffers need to repost to a local list */
7057 lpfc_io_buf_flush(phba, &post_nblist);
7059 /* post the list of nvme buffer sgls to port if available */
7060 if (!list_empty(&post_nblist)) {
7061 num_posted = lpfc_sli4_post_io_sgl_list(
7062 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7063 /* failed to post any nvme buffer, return error */
7064 if (num_posted == 0)
7071 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7075 len = sizeof(struct lpfc_mbx_set_host_data) -
7076 sizeof(struct lpfc_sli4_cfg_mhdr);
7077 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7078 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7079 LPFC_SLI4_MBX_EMBED);
7081 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7082 mbox->u.mqe.un.set_host_data.param_len =
7083 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7084 snprintf(mbox->u.mqe.un.set_host_data.data,
7085 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7086 "Linux %s v"LPFC_DRIVER_VERSION,
7087 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7091 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7092 struct lpfc_queue *drq, int count, int idx)
7095 struct lpfc_rqe hrqe;
7096 struct lpfc_rqe drqe;
7097 struct lpfc_rqb *rqbp;
7098 unsigned long flags;
7099 struct rqb_dmabuf *rqb_buffer;
7100 LIST_HEAD(rqb_buf_list);
7102 spin_lock_irqsave(&phba->hbalock, flags);
7104 for (i = 0; i < count; i++) {
7105 /* IF RQ is already full, don't bother */
7106 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7108 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7111 rqb_buffer->hrq = hrq;
7112 rqb_buffer->drq = drq;
7113 rqb_buffer->idx = idx;
7114 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7116 while (!list_empty(&rqb_buf_list)) {
7117 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7120 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7121 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7122 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7123 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7124 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7127 "6421 Cannot post to HRQ %d: %x %x %x "
7135 rqbp->rqb_free_buffer(phba, rqb_buffer);
7137 list_add_tail(&rqb_buffer->hbuf.list,
7138 &rqbp->rqb_buffer_list);
7139 rqbp->buffer_count++;
7142 spin_unlock_irqrestore(&phba->hbalock, flags);
7147 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7148 * @phba: Pointer to HBA context object.
7150 * This function is the main SLI4 device initialization PCI function. This
7151 * function is called by the HBA initialization code, HBA reset code and
7152 * HBA error attention handler code. Caller is not required to hold any
7156 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7158 int rc, i, cnt, len;
7159 LPFC_MBOXQ_t *mboxq;
7160 struct lpfc_mqe *mqe;
7163 uint32_t ftr_rsp = 0;
7164 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7165 struct lpfc_vport *vport = phba->pport;
7166 struct lpfc_dmabuf *mp;
7167 struct lpfc_rqb *rqbp;
7169 /* Perform a PCI function reset to start from clean */
7170 rc = lpfc_pci_function_reset(phba);
7174 /* Check the HBA Host Status Register for readyness */
7175 rc = lpfc_sli4_post_status_check(phba);
7179 spin_lock_irq(&phba->hbalock);
7180 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7181 spin_unlock_irq(&phba->hbalock);
7185 * Allocate a single mailbox container for initializing the
7188 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7192 /* Issue READ_REV to collect vpd and FW information. */
7193 vpd_size = SLI4_PAGE_SIZE;
7194 vpd = kzalloc(vpd_size, GFP_KERNEL);
7200 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7206 mqe = &mboxq->u.mqe;
7207 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7208 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7209 phba->hba_flag |= HBA_FCOE_MODE;
7210 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7212 phba->hba_flag &= ~HBA_FCOE_MODE;
7215 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7217 phba->hba_flag |= HBA_FIP_SUPPORT;
7219 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7221 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7223 if (phba->sli_rev != LPFC_SLI_REV4) {
7224 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7225 "0376 READ_REV Error. SLI Level %d "
7226 "FCoE enabled %d\n",
7227 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7234 * Continue initialization with default values even if driver failed
7235 * to read FCoE param config regions, only read parameters if the
7238 if (phba->hba_flag & HBA_FCOE_MODE &&
7239 lpfc_sli4_read_fcoe_params(phba))
7240 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7241 "2570 Failed to read FCoE parameters\n");
7244 * Retrieve sli4 device physical port name, failure of doing it
7245 * is considered as non-fatal.
7247 rc = lpfc_sli4_retrieve_pport_name(phba);
7249 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7250 "3080 Successful retrieving SLI4 device "
7251 "physical port name: %s.\n", phba->Port);
7254 * Evaluate the read rev and vpd data. Populate the driver
7255 * state with the results. If this routine fails, the failure
7256 * is not fatal as the driver will use generic values.
7258 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7259 if (unlikely(!rc)) {
7260 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7261 "0377 Error %d parsing vpd. "
7262 "Using defaults.\n", rc);
7267 /* Save information as VPD data */
7268 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7269 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7272 * This is because first G7 ASIC doesn't support the standard
7273 * 0x5a NVME cmd descriptor type/subtype
7275 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7276 LPFC_SLI_INTF_IF_TYPE_6) &&
7277 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7278 (phba->vpd.rev.smRev == 0) &&
7279 (phba->cfg_nvme_embed_cmd == 1))
7280 phba->cfg_nvme_embed_cmd = 0;
7282 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7283 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7285 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7287 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7289 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7291 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7292 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7293 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7294 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7295 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7296 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7297 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7298 "(%d):0380 READ_REV Status x%x "
7299 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7300 mboxq->vport ? mboxq->vport->vpi : 0,
7301 bf_get(lpfc_mqe_status, mqe),
7302 phba->vpd.rev.opFwName,
7303 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7304 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7306 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7307 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7308 if (phba->pport->cfg_lun_queue_depth > rc) {
7309 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7310 "3362 LUN queue depth changed from %d to %d\n",
7311 phba->pport->cfg_lun_queue_depth, rc);
7312 phba->pport->cfg_lun_queue_depth = rc;
7315 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7316 LPFC_SLI_INTF_IF_TYPE_0) {
7317 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7318 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7319 if (rc == MBX_SUCCESS) {
7320 phba->hba_flag |= HBA_RECOVERABLE_UE;
7321 /* Set 1Sec interval to detect UE */
7322 phba->eratt_poll_interval = 1;
7323 phba->sli4_hba.ue_to_sr = bf_get(
7324 lpfc_mbx_set_feature_UESR,
7325 &mboxq->u.mqe.un.set_feature);
7326 phba->sli4_hba.ue_to_rp = bf_get(
7327 lpfc_mbx_set_feature_UERP,
7328 &mboxq->u.mqe.un.set_feature);
7332 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7333 /* Enable MDS Diagnostics only if the SLI Port supports it */
7334 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7336 if (rc != MBX_SUCCESS)
7337 phba->mds_diags_support = 0;
7341 * Discover the port's supported feature set and match it against the
7344 lpfc_request_features(phba, mboxq);
7345 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7352 * The port must support FCP initiator mode as this is the
7353 * only mode running in the host.
7355 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7356 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7357 "0378 No support for fcpi mode.\n");
7361 /* Performance Hints are ONLY for FCoE */
7362 if (phba->hba_flag & HBA_FCOE_MODE) {
7363 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7364 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7366 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7370 * If the port cannot support the host's requested features
7371 * then turn off the global config parameters to disable the
7372 * feature in the driver. This is not a fatal error.
7374 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7375 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7376 phba->cfg_enable_bg = 0;
7377 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7382 if (phba->max_vpi && phba->cfg_enable_npiv &&
7383 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7388 "0379 Feature Mismatch Data: x%08x %08x "
7389 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7390 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7391 phba->cfg_enable_npiv, phba->max_vpi);
7392 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7393 phba->cfg_enable_bg = 0;
7394 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7395 phba->cfg_enable_npiv = 0;
7398 /* These SLI3 features are assumed in SLI4 */
7399 spin_lock_irq(&phba->hbalock);
7400 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7401 spin_unlock_irq(&phba->hbalock);
7404 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7405 * calls depends on these resources to complete port setup.
7407 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7410 "2920 Failed to alloc Resource IDs "
7415 lpfc_set_host_data(phba, mboxq);
7417 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7419 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7420 "2134 Failed to set host os driver version %x",
7424 /* Read the port's service parameters. */
7425 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7427 phba->link_state = LPFC_HBA_ERROR;
7432 mboxq->vport = vport;
7433 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7434 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7435 if (rc == MBX_SUCCESS) {
7436 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7441 * This memory was allocated by the lpfc_read_sparam routine. Release
7442 * it to the mbuf pool.
7444 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7446 mboxq->ctx_buf = NULL;
7448 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7449 "0382 READ_SPARAM command failed "
7450 "status %d, mbxStatus x%x\n",
7451 rc, bf_get(lpfc_mqe_status, mqe));
7452 phba->link_state = LPFC_HBA_ERROR;
7457 lpfc_update_vport_wwn(vport);
7459 /* Update the fc_host data structures with new wwn. */
7460 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7461 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7463 /* Create all the SLI4 queues */
7464 rc = lpfc_sli4_queue_create(phba);
7466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7467 "3089 Failed to allocate queues\n");
7471 /* Set up all the queues to the device */
7472 rc = lpfc_sli4_queue_setup(phba);
7474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7475 "0381 Error %d during queue setup.\n ", rc);
7476 goto out_stop_timers;
7478 /* Initialize the driver internal SLI layer lists. */
7479 lpfc_sli4_setup(phba);
7480 lpfc_sli4_queue_init(phba);
7482 /* update host els xri-sgl sizes and mappings */
7483 rc = lpfc_sli4_els_sgl_update(phba);
7485 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7486 "1400 Failed to update xri-sgl size and "
7487 "mapping: %d\n", rc);
7488 goto out_destroy_queue;
7491 /* register the els sgl pool to the port */
7492 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7493 phba->sli4_hba.els_xri_cnt);
7494 if (unlikely(rc < 0)) {
7495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7496 "0582 Error %d during els sgl post "
7499 goto out_destroy_queue;
7501 phba->sli4_hba.els_xri_cnt = rc;
7503 if (phba->nvmet_support) {
7504 /* update host nvmet xri-sgl sizes and mappings */
7505 rc = lpfc_sli4_nvmet_sgl_update(phba);
7507 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7508 "6308 Failed to update nvmet-sgl size "
7509 "and mapping: %d\n", rc);
7510 goto out_destroy_queue;
7513 /* register the nvmet sgl pool to the port */
7514 rc = lpfc_sli4_repost_sgl_list(
7516 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7517 phba->sli4_hba.nvmet_xri_cnt);
7518 if (unlikely(rc < 0)) {
7519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7520 "3117 Error %d during nvmet "
7523 goto out_destroy_queue;
7525 phba->sli4_hba.nvmet_xri_cnt = rc;
7527 cnt = phba->cfg_iocb_cnt * 1024;
7528 /* We need 1 iocbq for every SGL, for IO processing */
7529 cnt += phba->sli4_hba.nvmet_xri_cnt;
7531 /* update host common xri-sgl sizes and mappings */
7532 rc = lpfc_sli4_io_sgl_update(phba);
7534 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7535 "6082 Failed to update nvme-sgl size "
7536 "and mapping: %d\n", rc);
7537 goto out_destroy_queue;
7540 /* register the allocated common sgl pool to the port */
7541 rc = lpfc_sli4_repost_io_sgl_list(phba);
7543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7544 "6116 Error %d during nvme sgl post "
7546 /* Some NVME buffers were moved to abort nvme list */
7547 /* A pci function reset will repost them */
7549 goto out_destroy_queue;
7551 cnt = phba->cfg_iocb_cnt * 1024;
7554 if (!phba->sli.iocbq_lookup) {
7555 /* Initialize and populate the iocb list per host */
7556 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7557 "2821 initialize iocb list %d total %d\n",
7558 phba->cfg_iocb_cnt, cnt);
7559 rc = lpfc_init_iocb_list(phba, cnt);
7561 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7562 "1413 Failed to init iocb list.\n");
7563 goto out_destroy_queue;
7567 if (phba->nvmet_support)
7568 lpfc_nvmet_create_targetport(phba);
7570 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7571 /* Post initial buffers to all RQs created */
7572 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7573 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7574 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7575 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7576 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7577 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7578 rqbp->buffer_count = 0;
7580 lpfc_post_rq_buffer(
7581 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7582 phba->sli4_hba.nvmet_mrq_data[i],
7583 phba->cfg_nvmet_mrq_post, i);
7587 /* Post the rpi header region to the device. */
7588 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7590 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7591 "0393 Error %d during rpi post operation\n",
7594 goto out_destroy_queue;
7596 lpfc_sli4_node_prep(phba);
7598 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7599 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7601 * The FC Port needs to register FCFI (index 0)
7603 lpfc_reg_fcfi(phba, mboxq);
7604 mboxq->vport = phba->pport;
7605 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7606 if (rc != MBX_SUCCESS)
7607 goto out_unset_queue;
7609 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7610 &mboxq->u.mqe.un.reg_fcfi);
7612 /* We are a NVME Target mode with MRQ > 1 */
7614 /* First register the FCFI */
7615 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7616 mboxq->vport = phba->pport;
7617 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7618 if (rc != MBX_SUCCESS)
7619 goto out_unset_queue;
7621 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7622 &mboxq->u.mqe.un.reg_fcfi_mrq);
7624 /* Next register the MRQs */
7625 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7626 mboxq->vport = phba->pport;
7627 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7628 if (rc != MBX_SUCCESS)
7629 goto out_unset_queue;
7632 /* Check if the port is configured to be disabled */
7633 lpfc_sli_read_link_ste(phba);
7636 /* Don't post more new bufs if repost already recovered
7639 if (phba->nvmet_support == 0) {
7640 if (phba->sli4_hba.io_xri_cnt == 0) {
7641 len = lpfc_new_io_buf(
7642 phba, phba->sli4_hba.io_xri_max);
7645 goto out_unset_queue;
7648 if (phba->cfg_xri_rebalancing)
7649 lpfc_create_multixri_pools(phba);
7652 phba->cfg_xri_rebalancing = 0;
7655 /* Arm the CQs and then EQs on device */
7656 lpfc_sli4_arm_cqeq_intr(phba);
7658 /* Indicate device interrupt mode */
7659 phba->sli4_hba.intr_enable = 1;
7661 /* Allow asynchronous mailbox command to go through */
7662 spin_lock_irq(&phba->hbalock);
7663 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7664 spin_unlock_irq(&phba->hbalock);
7666 /* Post receive buffers to the device */
7667 lpfc_sli4_rb_setup(phba);
7669 /* Reset HBA FCF states after HBA reset */
7670 phba->fcf.fcf_flag = 0;
7671 phba->fcf.current_rec.flag = 0;
7673 /* Start the ELS watchdog timer */
7674 mod_timer(&vport->els_tmofunc,
7675 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7677 /* Start heart beat timer */
7678 mod_timer(&phba->hb_tmofunc,
7679 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7680 phba->hb_outstanding = 0;
7681 phba->last_completion_time = jiffies;
7683 /* start eq_delay heartbeat */
7684 if (phba->cfg_auto_imax)
7685 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7686 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7688 /* Start error attention (ERATT) polling timer */
7689 mod_timer(&phba->eratt_poll,
7690 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7692 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7693 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7694 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7696 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7697 "2829 This device supports "
7698 "Advanced Error Reporting (AER)\n");
7699 spin_lock_irq(&phba->hbalock);
7700 phba->hba_flag |= HBA_AER_ENABLED;
7701 spin_unlock_irq(&phba->hbalock);
7703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7704 "2830 This device does not support "
7705 "Advanced Error Reporting (AER)\n");
7706 phba->cfg_aer_support = 0;
7712 * The port is ready, set the host's link state to LINK_DOWN
7713 * in preparation for link interrupts.
7715 spin_lock_irq(&phba->hbalock);
7716 phba->link_state = LPFC_LINK_DOWN;
7718 /* Check if physical ports are trunked */
7719 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7720 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7721 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7722 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7723 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7724 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7725 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7726 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7727 spin_unlock_irq(&phba->hbalock);
7729 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7730 (phba->hba_flag & LINK_DISABLED)) {
7731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7732 "3103 Adapter Link is disabled.\n");
7733 lpfc_down_link(phba, mboxq);
7734 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7735 if (rc != MBX_SUCCESS) {
7736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7737 "3104 Adapter failed to issue "
7738 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7739 goto out_io_buff_free;
7741 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7742 /* don't perform init_link on SLI4 FC port loopback test */
7743 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7744 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7746 goto out_io_buff_free;
7749 mempool_free(mboxq, phba->mbox_mem_pool);
7752 /* Free allocated IO Buffers */
7755 /* Unset all the queues set up in this routine when error out */
7756 lpfc_sli4_queue_unset(phba);
7758 lpfc_free_iocb_list(phba);
7759 lpfc_sli4_queue_destroy(phba);
7761 lpfc_stop_hba_timers(phba);
7763 mempool_free(mboxq, phba->mbox_mem_pool);
7768 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7769 * @ptr: context object - pointer to hba structure.
7771 * This is the callback function for mailbox timer. The mailbox
7772 * timer is armed when a new mailbox command is issued and the timer
7773 * is deleted when the mailbox complete. The function is called by
7774 * the kernel timer code when a mailbox does not complete within
7775 * expected time. This function wakes up the worker thread to
7776 * process the mailbox timeout and returns. All the processing is
7777 * done by the worker thread function lpfc_mbox_timeout_handler.
7780 lpfc_mbox_timeout(struct timer_list *t)
7782 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7783 unsigned long iflag;
7784 uint32_t tmo_posted;
7786 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7787 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7789 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7790 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7793 lpfc_worker_wake_up(phba);
7798 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7800 * @phba: Pointer to HBA context object.
7802 * This function checks if any mailbox completions are present on the mailbox
7806 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7810 struct lpfc_queue *mcq;
7811 struct lpfc_mcqe *mcqe;
7812 bool pending_completions = false;
7815 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7818 /* Check for completions on mailbox completion queue */
7820 mcq = phba->sli4_hba.mbx_cq;
7821 idx = mcq->hba_index;
7822 qe_valid = mcq->qe_valid;
7823 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7824 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7825 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7826 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7827 pending_completions = true;
7830 idx = (idx + 1) % mcq->entry_count;
7831 if (mcq->hba_index == idx)
7834 /* if the index wrapped around, toggle the valid bit */
7835 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7836 qe_valid = (qe_valid) ? 0 : 1;
7838 return pending_completions;
7843 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7845 * @phba: Pointer to HBA context object.
7847 * For sli4, it is possible to miss an interrupt. As such mbox completions
7848 * maybe missed causing erroneous mailbox timeouts to occur. This function
7849 * checks to see if mbox completions are on the mailbox completion queue
7850 * and will process all the completions associated with the eq for the
7851 * mailbox completion queue.
7854 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7856 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7858 struct lpfc_queue *fpeq = NULL;
7861 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7864 /* Find the eq associated with the mcq */
7867 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++)
7868 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
7869 sli4_hba->mbx_cq->assoc_qid) {
7870 fpeq = sli4_hba->hdwq[eqidx].hba_eq;
7876 /* Turn off interrupts from this EQ */
7878 sli4_hba->sli4_eq_clr_intr(fpeq);
7880 /* Check to see if a mbox completion is pending */
7882 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7885 * If a mbox completion is pending, process all the events on EQ
7886 * associated with the mbox completion queue (this could include
7887 * mailbox commands, async events, els commands, receive queue data
7892 /* process and rearm the EQ */
7893 lpfc_sli4_process_eq(phba, fpeq);
7895 /* Always clear and re-arm the EQ */
7896 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7898 return mbox_pending;
7903 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7904 * @phba: Pointer to HBA context object.
7906 * This function is called from worker thread when a mailbox command times out.
7907 * The caller is not required to hold any locks. This function will reset the
7908 * HBA and recover all the pending commands.
7911 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7913 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7914 MAILBOX_t *mb = NULL;
7916 struct lpfc_sli *psli = &phba->sli;
7918 /* If the mailbox completed, process the completion and return */
7919 if (lpfc_sli4_process_missed_mbox_completions(phba))
7924 /* Check the pmbox pointer first. There is a race condition
7925 * between the mbox timeout handler getting executed in the
7926 * worklist and the mailbox actually completing. When this
7927 * race condition occurs, the mbox_active will be NULL.
7929 spin_lock_irq(&phba->hbalock);
7930 if (pmbox == NULL) {
7931 lpfc_printf_log(phba, KERN_WARNING,
7933 "0353 Active Mailbox cleared - mailbox timeout "
7935 spin_unlock_irq(&phba->hbalock);
7939 /* Mbox cmd <mbxCommand> timeout */
7940 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7941 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7943 phba->pport->port_state,
7945 phba->sli.mbox_active);
7946 spin_unlock_irq(&phba->hbalock);
7948 /* Setting state unknown so lpfc_sli_abort_iocb_ring
7949 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7950 * it to fail all outstanding SCSI IO.
7952 spin_lock_irq(&phba->pport->work_port_lock);
7953 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7954 spin_unlock_irq(&phba->pport->work_port_lock);
7955 spin_lock_irq(&phba->hbalock);
7956 phba->link_state = LPFC_LINK_UNKNOWN;
7957 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7958 spin_unlock_irq(&phba->hbalock);
7960 lpfc_sli_abort_fcp_rings(phba);
7962 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7963 "0345 Resetting board due to mailbox timeout\n");
7965 /* Reset the HBA device */
7966 lpfc_reset_hba(phba);
7970 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7971 * @phba: Pointer to HBA context object.
7972 * @pmbox: Pointer to mailbox object.
7973 * @flag: Flag indicating how the mailbox need to be processed.
7975 * This function is called by discovery code and HBA management code
7976 * to submit a mailbox command to firmware with SLI-3 interface spec. This
7977 * function gets the hbalock to protect the data structures.
7978 * The mailbox command can be submitted in polling mode, in which case
7979 * this function will wait in a polling loop for the completion of the
7981 * If the mailbox is submitted in no_wait mode (not polling) the
7982 * function will submit the command and returns immediately without waiting
7983 * for the mailbox completion. The no_wait is supported only when HBA
7984 * is in SLI2/SLI3 mode - interrupts are enabled.
7985 * The SLI interface allows only one mailbox pending at a time. If the
7986 * mailbox is issued in polling mode and there is already a mailbox
7987 * pending, then the function will return an error. If the mailbox is issued
7988 * in NO_WAIT mode and there is a mailbox pending already, the function
7989 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7990 * The sli layer owns the mailbox object until the completion of mailbox
7991 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7992 * return codes the caller owns the mailbox command after the return of
7996 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8000 struct lpfc_sli *psli = &phba->sli;
8001 uint32_t status, evtctr;
8002 uint32_t ha_copy, hc_copy;
8004 unsigned long timeout;
8005 unsigned long drvr_flag = 0;
8006 uint32_t word0, ldata;
8007 void __iomem *to_slim;
8008 int processing_queue = 0;
8010 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8012 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8013 /* processing mbox queue from intr_handler */
8014 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8015 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8018 processing_queue = 1;
8019 pmbox = lpfc_mbox_get(phba);
8021 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8026 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8027 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8029 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8030 lpfc_printf_log(phba, KERN_ERR,
8031 LOG_MBOX | LOG_VPORT,
8032 "1806 Mbox x%x failed. No vport\n",
8033 pmbox->u.mb.mbxCommand);
8035 goto out_not_finished;
8039 /* If the PCI channel is in offline state, do not post mbox. */
8040 if (unlikely(pci_channel_offline(phba->pcidev))) {
8041 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8042 goto out_not_finished;
8045 /* If HBA has a deferred error attention, fail the iocb. */
8046 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8047 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8048 goto out_not_finished;
8054 status = MBX_SUCCESS;
8056 if (phba->link_state == LPFC_HBA_ERROR) {
8057 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8059 /* Mbox command <mbxCommand> cannot issue */
8060 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8061 "(%d):0311 Mailbox command x%x cannot "
8062 "issue Data: x%x x%x\n",
8063 pmbox->vport ? pmbox->vport->vpi : 0,
8064 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8065 goto out_not_finished;
8068 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8069 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8070 !(hc_copy & HC_MBINT_ENA)) {
8071 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8072 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8073 "(%d):2528 Mailbox command x%x cannot "
8074 "issue Data: x%x x%x\n",
8075 pmbox->vport ? pmbox->vport->vpi : 0,
8076 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8077 goto out_not_finished;
8081 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8082 /* Polling for a mbox command when another one is already active
8083 * is not allowed in SLI. Also, the driver must have established
8084 * SLI2 mode to queue and process multiple mbox commands.
8087 if (flag & MBX_POLL) {
8088 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8090 /* Mbox command <mbxCommand> cannot issue */
8091 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8092 "(%d):2529 Mailbox command x%x "
8093 "cannot issue Data: x%x x%x\n",
8094 pmbox->vport ? pmbox->vport->vpi : 0,
8095 pmbox->u.mb.mbxCommand,
8096 psli->sli_flag, flag);
8097 goto out_not_finished;
8100 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8101 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8102 /* Mbox command <mbxCommand> cannot issue */
8103 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8104 "(%d):2530 Mailbox command x%x "
8105 "cannot issue Data: x%x x%x\n",
8106 pmbox->vport ? pmbox->vport->vpi : 0,
8107 pmbox->u.mb.mbxCommand,
8108 psli->sli_flag, flag);
8109 goto out_not_finished;
8112 /* Another mailbox command is still being processed, queue this
8113 * command to be processed later.
8115 lpfc_mbox_put(phba, pmbox);
8117 /* Mbox cmd issue - BUSY */
8118 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8119 "(%d):0308 Mbox cmd issue - BUSY Data: "
8120 "x%x x%x x%x x%x\n",
8121 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8123 phba->pport ? phba->pport->port_state : 0xff,
8124 psli->sli_flag, flag);
8126 psli->slistat.mbox_busy++;
8127 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8130 lpfc_debugfs_disc_trc(pmbox->vport,
8131 LPFC_DISC_TRC_MBOX_VPORT,
8132 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8133 (uint32_t)mbx->mbxCommand,
8134 mbx->un.varWords[0], mbx->un.varWords[1]);
8137 lpfc_debugfs_disc_trc(phba->pport,
8139 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8140 (uint32_t)mbx->mbxCommand,
8141 mbx->un.varWords[0], mbx->un.varWords[1]);
8147 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8149 /* If we are not polling, we MUST be in SLI2 mode */
8150 if (flag != MBX_POLL) {
8151 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8152 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8153 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8154 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8155 /* Mbox command <mbxCommand> cannot issue */
8156 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8157 "(%d):2531 Mailbox command x%x "
8158 "cannot issue Data: x%x x%x\n",
8159 pmbox->vport ? pmbox->vport->vpi : 0,
8160 pmbox->u.mb.mbxCommand,
8161 psli->sli_flag, flag);
8162 goto out_not_finished;
8164 /* timeout active mbox command */
8165 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8167 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8170 /* Mailbox cmd <cmd> issue */
8171 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8172 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8174 pmbox->vport ? pmbox->vport->vpi : 0,
8176 phba->pport ? phba->pport->port_state : 0xff,
8177 psli->sli_flag, flag);
8179 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8181 lpfc_debugfs_disc_trc(pmbox->vport,
8182 LPFC_DISC_TRC_MBOX_VPORT,
8183 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8184 (uint32_t)mbx->mbxCommand,
8185 mbx->un.varWords[0], mbx->un.varWords[1]);
8188 lpfc_debugfs_disc_trc(phba->pport,
8190 "MBOX Send: cmd:x%x mb:x%x x%x",
8191 (uint32_t)mbx->mbxCommand,
8192 mbx->un.varWords[0], mbx->un.varWords[1]);
8196 psli->slistat.mbox_cmd++;
8197 evtctr = psli->slistat.mbox_event;
8199 /* next set own bit for the adapter and copy over command word */
8200 mbx->mbxOwner = OWN_CHIP;
8202 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8203 /* Populate mbox extension offset word. */
8204 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8205 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8206 = (uint8_t *)phba->mbox_ext
8207 - (uint8_t *)phba->mbox;
8210 /* Copy the mailbox extension data */
8211 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8212 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8213 (uint8_t *)phba->mbox_ext,
8214 pmbox->in_ext_byte_len);
8216 /* Copy command data to host SLIM area */
8217 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8219 /* Populate mbox extension offset word. */
8220 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8221 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8222 = MAILBOX_HBA_EXT_OFFSET;
8224 /* Copy the mailbox extension data */
8225 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8226 lpfc_memcpy_to_slim(phba->MBslimaddr +
8227 MAILBOX_HBA_EXT_OFFSET,
8228 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8230 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8231 /* copy command data into host mbox for cmpl */
8232 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8235 /* First copy mbox command data to HBA SLIM, skip past first
8237 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8238 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8239 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8241 /* Next copy over first word, with mbxOwner set */
8242 ldata = *((uint32_t *)mbx);
8243 to_slim = phba->MBslimaddr;
8244 writel(ldata, to_slim);
8245 readl(to_slim); /* flush */
8247 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8248 /* switch over to host mailbox */
8249 psli->sli_flag |= LPFC_SLI_ACTIVE;
8256 /* Set up reference to mailbox command */
8257 psli->mbox_active = pmbox;
8258 /* Interrupt board to do it */
8259 writel(CA_MBATT, phba->CAregaddr);
8260 readl(phba->CAregaddr); /* flush */
8261 /* Don't wait for it to finish, just return */
8265 /* Set up null reference to mailbox command */
8266 psli->mbox_active = NULL;
8267 /* Interrupt board to do it */
8268 writel(CA_MBATT, phba->CAregaddr);
8269 readl(phba->CAregaddr); /* flush */
8271 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8272 /* First read mbox status word */
8273 word0 = *((uint32_t *)phba->mbox);
8274 word0 = le32_to_cpu(word0);
8276 /* First read mbox status word */
8277 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8278 spin_unlock_irqrestore(&phba->hbalock,
8280 goto out_not_finished;
8284 /* Read the HBA Host Attention Register */
8285 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8286 spin_unlock_irqrestore(&phba->hbalock,
8288 goto out_not_finished;
8290 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8293 /* Wait for command to complete */
8294 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8295 (!(ha_copy & HA_MBATT) &&
8296 (phba->link_state > LPFC_WARM_START))) {
8297 if (time_after(jiffies, timeout)) {
8298 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8299 spin_unlock_irqrestore(&phba->hbalock,
8301 goto out_not_finished;
8304 /* Check if we took a mbox interrupt while we were
8306 if (((word0 & OWN_CHIP) != OWN_CHIP)
8307 && (evtctr != psli->slistat.mbox_event))
8311 spin_unlock_irqrestore(&phba->hbalock,
8314 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8317 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8318 /* First copy command data */
8319 word0 = *((uint32_t *)phba->mbox);
8320 word0 = le32_to_cpu(word0);
8321 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8324 /* Check real SLIM for any errors */
8325 slimword0 = readl(phba->MBslimaddr);
8326 slimmb = (MAILBOX_t *) & slimword0;
8327 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8328 && slimmb->mbxStatus) {
8335 /* First copy command data */
8336 word0 = readl(phba->MBslimaddr);
8338 /* Read the HBA Host Attention Register */
8339 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8340 spin_unlock_irqrestore(&phba->hbalock,
8342 goto out_not_finished;
8346 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8347 /* copy results back to user */
8348 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8350 /* Copy the mailbox extension data */
8351 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8352 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8354 pmbox->out_ext_byte_len);
8357 /* First copy command data */
8358 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8360 /* Copy the mailbox extension data */
8361 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8362 lpfc_memcpy_from_slim(
8365 MAILBOX_HBA_EXT_OFFSET,
8366 pmbox->out_ext_byte_len);
8370 writel(HA_MBATT, phba->HAregaddr);
8371 readl(phba->HAregaddr); /* flush */
8373 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8374 status = mbx->mbxStatus;
8377 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8381 if (processing_queue) {
8382 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8383 lpfc_mbox_cmpl_put(phba, pmbox);
8385 return MBX_NOT_FINISHED;
8389 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8390 * @phba: Pointer to HBA context object.
8392 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8393 * the driver internal pending mailbox queue. It will then try to wait out the
8394 * possible outstanding mailbox command before return.
8397 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8398 * the outstanding mailbox command timed out.
8401 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8403 struct lpfc_sli *psli = &phba->sli;
8405 unsigned long timeout = 0;
8407 /* Mark the asynchronous mailbox command posting as blocked */
8408 spin_lock_irq(&phba->hbalock);
8409 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8410 /* Determine how long we might wait for the active mailbox
8411 * command to be gracefully completed by firmware.
8413 if (phba->sli.mbox_active)
8414 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8415 phba->sli.mbox_active) *
8417 spin_unlock_irq(&phba->hbalock);
8419 /* Make sure the mailbox is really active */
8421 lpfc_sli4_process_missed_mbox_completions(phba);
8423 /* Wait for the outstnading mailbox command to complete */
8424 while (phba->sli.mbox_active) {
8425 /* Check active mailbox complete status every 2ms */
8427 if (time_after(jiffies, timeout)) {
8428 /* Timeout, marked the outstanding cmd not complete */
8434 /* Can not cleanly block async mailbox command, fails it */
8436 spin_lock_irq(&phba->hbalock);
8437 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8438 spin_unlock_irq(&phba->hbalock);
8444 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8445 * @phba: Pointer to HBA context object.
8447 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8448 * commands from the driver internal pending mailbox queue. It makes sure
8449 * that there is no outstanding mailbox command before resuming posting
8450 * asynchronous mailbox commands. If, for any reason, there is outstanding
8451 * mailbox command, it will try to wait it out before resuming asynchronous
8452 * mailbox command posting.
8455 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8457 struct lpfc_sli *psli = &phba->sli;
8459 spin_lock_irq(&phba->hbalock);
8460 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8461 /* Asynchronous mailbox posting is not blocked, do nothing */
8462 spin_unlock_irq(&phba->hbalock);
8466 /* Outstanding synchronous mailbox command is guaranteed to be done,
8467 * successful or timeout, after timing-out the outstanding mailbox
8468 * command shall always be removed, so just unblock posting async
8469 * mailbox command and resume
8471 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8472 spin_unlock_irq(&phba->hbalock);
8474 /* wake up worker thread to post asynchronlous mailbox command */
8475 lpfc_worker_wake_up(phba);
8479 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8480 * @phba: Pointer to HBA context object.
8481 * @mboxq: Pointer to mailbox object.
8483 * The function waits for the bootstrap mailbox register ready bit from
8484 * port for twice the regular mailbox command timeout value.
8486 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8487 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8490 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8493 unsigned long timeout;
8494 struct lpfc_register bmbx_reg;
8496 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8500 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8501 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8505 if (time_after(jiffies, timeout))
8506 return MBXERR_ERROR;
8507 } while (!db_ready);
8513 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8514 * @phba: Pointer to HBA context object.
8515 * @mboxq: Pointer to mailbox object.
8517 * The function posts a mailbox to the port. The mailbox is expected
8518 * to be comletely filled in and ready for the port to operate on it.
8519 * This routine executes a synchronous completion operation on the
8520 * mailbox by polling for its completion.
8522 * The caller must not be holding any locks when calling this routine.
8525 * MBX_SUCCESS - mailbox posted successfully
8526 * Any of the MBX error values.
8529 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8531 int rc = MBX_SUCCESS;
8532 unsigned long iflag;
8533 uint32_t mcqe_status;
8535 struct lpfc_sli *psli = &phba->sli;
8536 struct lpfc_mqe *mb = &mboxq->u.mqe;
8537 struct lpfc_bmbx_create *mbox_rgn;
8538 struct dma_address *dma_address;
8541 * Only one mailbox can be active to the bootstrap mailbox region
8542 * at a time and there is no queueing provided.
8544 spin_lock_irqsave(&phba->hbalock, iflag);
8545 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8546 spin_unlock_irqrestore(&phba->hbalock, iflag);
8547 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8548 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8549 "cannot issue Data: x%x x%x\n",
8550 mboxq->vport ? mboxq->vport->vpi : 0,
8551 mboxq->u.mb.mbxCommand,
8552 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8553 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8554 psli->sli_flag, MBX_POLL);
8555 return MBXERR_ERROR;
8557 /* The server grabs the token and owns it until release */
8558 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8559 phba->sli.mbox_active = mboxq;
8560 spin_unlock_irqrestore(&phba->hbalock, iflag);
8562 /* wait for bootstrap mbox register for readyness */
8563 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8567 * Initialize the bootstrap memory region to avoid stale data areas
8568 * in the mailbox post. Then copy the caller's mailbox contents to
8569 * the bmbx mailbox region.
8571 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8572 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8573 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8574 sizeof(struct lpfc_mqe));
8576 /* Post the high mailbox dma address to the port and wait for ready. */
8577 dma_address = &phba->sli4_hba.bmbx.dma_address;
8578 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8580 /* wait for bootstrap mbox register for hi-address write done */
8581 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8585 /* Post the low mailbox dma address to the port. */
8586 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8588 /* wait for bootstrap mbox register for low address write done */
8589 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8594 * Read the CQ to ensure the mailbox has completed.
8595 * If so, update the mailbox status so that the upper layers
8596 * can complete the request normally.
8598 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8599 sizeof(struct lpfc_mqe));
8600 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8601 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8602 sizeof(struct lpfc_mcqe));
8603 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8605 * When the CQE status indicates a failure and the mailbox status
8606 * indicates success then copy the CQE status into the mailbox status
8607 * (and prefix it with x4000).
8609 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8610 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8611 bf_set(lpfc_mqe_status, mb,
8612 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8615 lpfc_sli4_swap_str(phba, mboxq);
8617 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8618 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8619 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8620 " x%x x%x CQ: x%x x%x x%x x%x\n",
8621 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8622 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8623 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8624 bf_get(lpfc_mqe_status, mb),
8625 mb->un.mb_words[0], mb->un.mb_words[1],
8626 mb->un.mb_words[2], mb->un.mb_words[3],
8627 mb->un.mb_words[4], mb->un.mb_words[5],
8628 mb->un.mb_words[6], mb->un.mb_words[7],
8629 mb->un.mb_words[8], mb->un.mb_words[9],
8630 mb->un.mb_words[10], mb->un.mb_words[11],
8631 mb->un.mb_words[12], mboxq->mcqe.word0,
8632 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8633 mboxq->mcqe.trailer);
8635 /* We are holding the token, no needed for lock when release */
8636 spin_lock_irqsave(&phba->hbalock, iflag);
8637 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8638 phba->sli.mbox_active = NULL;
8639 spin_unlock_irqrestore(&phba->hbalock, iflag);
8644 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8645 * @phba: Pointer to HBA context object.
8646 * @pmbox: Pointer to mailbox object.
8647 * @flag: Flag indicating how the mailbox need to be processed.
8649 * This function is called by discovery code and HBA management code to submit
8650 * a mailbox command to firmware with SLI-4 interface spec.
8652 * Return codes the caller owns the mailbox command after the return of the
8656 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8659 struct lpfc_sli *psli = &phba->sli;
8660 unsigned long iflags;
8663 /* dump from issue mailbox command if setup */
8664 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8666 rc = lpfc_mbox_dev_check(phba);
8668 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8669 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8670 "cannot issue Data: x%x x%x\n",
8671 mboxq->vport ? mboxq->vport->vpi : 0,
8672 mboxq->u.mb.mbxCommand,
8673 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8674 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8675 psli->sli_flag, flag);
8676 goto out_not_finished;
8679 /* Detect polling mode and jump to a handler */
8680 if (!phba->sli4_hba.intr_enable) {
8681 if (flag == MBX_POLL)
8682 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8685 if (rc != MBX_SUCCESS)
8686 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8687 "(%d):2541 Mailbox command x%x "
8688 "(x%x/x%x) failure: "
8689 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8691 mboxq->vport ? mboxq->vport->vpi : 0,
8692 mboxq->u.mb.mbxCommand,
8693 lpfc_sli_config_mbox_subsys_get(phba,
8695 lpfc_sli_config_mbox_opcode_get(phba,
8697 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8698 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8699 bf_get(lpfc_mcqe_ext_status,
8701 psli->sli_flag, flag);
8703 } else if (flag == MBX_POLL) {
8704 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8705 "(%d):2542 Try to issue mailbox command "
8706 "x%x (x%x/x%x) synchronously ahead of async "
8707 "mailbox command queue: x%x x%x\n",
8708 mboxq->vport ? mboxq->vport->vpi : 0,
8709 mboxq->u.mb.mbxCommand,
8710 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8711 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8712 psli->sli_flag, flag);
8713 /* Try to block the asynchronous mailbox posting */
8714 rc = lpfc_sli4_async_mbox_block(phba);
8716 /* Successfully blocked, now issue sync mbox cmd */
8717 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8718 if (rc != MBX_SUCCESS)
8719 lpfc_printf_log(phba, KERN_WARNING,
8721 "(%d):2597 Sync Mailbox command "
8722 "x%x (x%x/x%x) failure: "
8723 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8725 mboxq->vport ? mboxq->vport->vpi : 0,
8726 mboxq->u.mb.mbxCommand,
8727 lpfc_sli_config_mbox_subsys_get(phba,
8729 lpfc_sli_config_mbox_opcode_get(phba,
8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8733 bf_get(lpfc_mcqe_ext_status,
8735 psli->sli_flag, flag);
8736 /* Unblock the async mailbox posting afterward */
8737 lpfc_sli4_async_mbox_unblock(phba);
8742 /* Now, interrupt mode asynchrous mailbox command */
8743 rc = lpfc_mbox_cmd_check(phba, mboxq);
8745 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8746 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8747 "cannot issue Data: x%x x%x\n",
8748 mboxq->vport ? mboxq->vport->vpi : 0,
8749 mboxq->u.mb.mbxCommand,
8750 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8751 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8752 psli->sli_flag, flag);
8753 goto out_not_finished;
8756 /* Put the mailbox command to the driver internal FIFO */
8757 psli->slistat.mbox_busy++;
8758 spin_lock_irqsave(&phba->hbalock, iflags);
8759 lpfc_mbox_put(phba, mboxq);
8760 spin_unlock_irqrestore(&phba->hbalock, iflags);
8761 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8762 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8763 "x%x (x%x/x%x) x%x x%x x%x\n",
8764 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8765 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8766 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8767 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8768 phba->pport->port_state,
8769 psli->sli_flag, MBX_NOWAIT);
8770 /* Wake up worker thread to transport mailbox command from head */
8771 lpfc_worker_wake_up(phba);
8776 return MBX_NOT_FINISHED;
8780 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8781 * @phba: Pointer to HBA context object.
8783 * This function is called by worker thread to send a mailbox command to
8784 * SLI4 HBA firmware.
8788 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8790 struct lpfc_sli *psli = &phba->sli;
8791 LPFC_MBOXQ_t *mboxq;
8792 int rc = MBX_SUCCESS;
8793 unsigned long iflags;
8794 struct lpfc_mqe *mqe;
8797 /* Check interrupt mode before post async mailbox command */
8798 if (unlikely(!phba->sli4_hba.intr_enable))
8799 return MBX_NOT_FINISHED;
8801 /* Check for mailbox command service token */
8802 spin_lock_irqsave(&phba->hbalock, iflags);
8803 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8804 spin_unlock_irqrestore(&phba->hbalock, iflags);
8805 return MBX_NOT_FINISHED;
8807 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8808 spin_unlock_irqrestore(&phba->hbalock, iflags);
8809 return MBX_NOT_FINISHED;
8811 if (unlikely(phba->sli.mbox_active)) {
8812 spin_unlock_irqrestore(&phba->hbalock, iflags);
8813 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8814 "0384 There is pending active mailbox cmd\n");
8815 return MBX_NOT_FINISHED;
8817 /* Take the mailbox command service token */
8818 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8820 /* Get the next mailbox command from head of queue */
8821 mboxq = lpfc_mbox_get(phba);
8823 /* If no more mailbox command waiting for post, we're done */
8825 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8826 spin_unlock_irqrestore(&phba->hbalock, iflags);
8829 phba->sli.mbox_active = mboxq;
8830 spin_unlock_irqrestore(&phba->hbalock, iflags);
8832 /* Check device readiness for posting mailbox command */
8833 rc = lpfc_mbox_dev_check(phba);
8835 /* Driver clean routine will clean up pending mailbox */
8836 goto out_not_finished;
8838 /* Prepare the mbox command to be posted */
8839 mqe = &mboxq->u.mqe;
8840 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8842 /* Start timer for the mbox_tmo and log some mailbox post messages */
8843 mod_timer(&psli->mbox_tmo, (jiffies +
8844 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8846 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8847 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8849 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8850 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8851 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8852 phba->pport->port_state, psli->sli_flag);
8854 if (mbx_cmnd != MBX_HEARTBEAT) {
8856 lpfc_debugfs_disc_trc(mboxq->vport,
8857 LPFC_DISC_TRC_MBOX_VPORT,
8858 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8859 mbx_cmnd, mqe->un.mb_words[0],
8860 mqe->un.mb_words[1]);
8862 lpfc_debugfs_disc_trc(phba->pport,
8864 "MBOX Send: cmd:x%x mb:x%x x%x",
8865 mbx_cmnd, mqe->un.mb_words[0],
8866 mqe->un.mb_words[1]);
8869 psli->slistat.mbox_cmd++;
8871 /* Post the mailbox command to the port */
8872 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8873 if (rc != MBX_SUCCESS) {
8874 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8875 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8876 "cannot issue Data: x%x x%x\n",
8877 mboxq->vport ? mboxq->vport->vpi : 0,
8878 mboxq->u.mb.mbxCommand,
8879 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8880 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8881 psli->sli_flag, MBX_NOWAIT);
8882 goto out_not_finished;
8888 spin_lock_irqsave(&phba->hbalock, iflags);
8889 if (phba->sli.mbox_active) {
8890 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8891 __lpfc_mbox_cmpl_put(phba, mboxq);
8892 /* Release the token */
8893 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8894 phba->sli.mbox_active = NULL;
8896 spin_unlock_irqrestore(&phba->hbalock, iflags);
8898 return MBX_NOT_FINISHED;
8902 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8903 * @phba: Pointer to HBA context object.
8904 * @pmbox: Pointer to mailbox object.
8905 * @flag: Flag indicating how the mailbox need to be processed.
8907 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8908 * the API jump table function pointer from the lpfc_hba struct.
8910 * Return codes the caller owns the mailbox command after the return of the
8914 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8916 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8920 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8921 * @phba: The hba struct for which this call is being executed.
8922 * @dev_grp: The HBA PCI-Device group number.
8924 * This routine sets up the mbox interface API function jump table in @phba
8926 * Returns: 0 - success, -ENODEV - failure.
8929 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8933 case LPFC_PCI_DEV_LP:
8934 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8935 phba->lpfc_sli_handle_slow_ring_event =
8936 lpfc_sli_handle_slow_ring_event_s3;
8937 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8938 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8939 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8941 case LPFC_PCI_DEV_OC:
8942 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8943 phba->lpfc_sli_handle_slow_ring_event =
8944 lpfc_sli_handle_slow_ring_event_s4;
8945 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8946 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8947 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8951 "1420 Invalid HBA PCI-device group: 0x%x\n",
8960 * __lpfc_sli_ringtx_put - Add an iocb to the txq
8961 * @phba: Pointer to HBA context object.
8962 * @pring: Pointer to driver SLI ring object.
8963 * @piocb: Pointer to address of newly added command iocb.
8965 * This function is called with hbalock held to add a command
8966 * iocb to the txq when SLI layer cannot submit the command iocb
8970 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8971 struct lpfc_iocbq *piocb)
8973 lockdep_assert_held(&phba->hbalock);
8974 /* Insert the caller's iocb in the txq tail for later processing. */
8975 list_add_tail(&piocb->list, &pring->txq);
8979 * lpfc_sli_next_iocb - Get the next iocb in the txq
8980 * @phba: Pointer to HBA context object.
8981 * @pring: Pointer to driver SLI ring object.
8982 * @piocb: Pointer to address of newly added command iocb.
8984 * This function is called with hbalock held before a new
8985 * iocb is submitted to the firmware. This function checks
8986 * txq to flush the iocbs in txq to Firmware before
8987 * submitting new iocbs to the Firmware.
8988 * If there are iocbs in the txq which need to be submitted
8989 * to firmware, lpfc_sli_next_iocb returns the first element
8990 * of the txq after dequeuing it from txq.
8991 * If there is no iocb in the txq then the function will return
8992 * *piocb and *piocb is set to NULL. Caller needs to check
8993 * *piocb to find if there are more commands in the txq.
8995 static struct lpfc_iocbq *
8996 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8997 struct lpfc_iocbq **piocb)
8999 struct lpfc_iocbq * nextiocb;
9001 lockdep_assert_held(&phba->hbalock);
9003 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9013 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9014 * @phba: Pointer to HBA context object.
9015 * @ring_number: SLI ring number to issue iocb on.
9016 * @piocb: Pointer to command iocb.
9017 * @flag: Flag indicating if this command can be put into txq.
9019 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9020 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9021 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9022 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9023 * this function allows only iocbs for posting buffers. This function finds
9024 * next available slot in the command ring and posts the command to the
9025 * available slot and writes the port attention register to request HBA start
9026 * processing new iocb. If there is no slot available in the ring and
9027 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9028 * the function returns IOCB_BUSY.
9030 * This function is called with hbalock held. The function will return success
9031 * after it successfully submit the iocb to firmware or after adding to the
9035 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9036 struct lpfc_iocbq *piocb, uint32_t flag)
9038 struct lpfc_iocbq *nextiocb;
9040 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9042 lockdep_assert_held(&phba->hbalock);
9044 if (piocb->iocb_cmpl && (!piocb->vport) &&
9045 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9046 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9047 lpfc_printf_log(phba, KERN_ERR,
9048 LOG_SLI | LOG_VPORT,
9049 "1807 IOCB x%x failed. No vport\n",
9050 piocb->iocb.ulpCommand);
9056 /* If the PCI channel is in offline state, do not post iocbs. */
9057 if (unlikely(pci_channel_offline(phba->pcidev)))
9060 /* If HBA has a deferred error attention, fail the iocb. */
9061 if (unlikely(phba->hba_flag & DEFER_ERATT))
9065 * We should never get an IOCB if we are in a < LINK_DOWN state
9067 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9071 * Check to see if we are blocking IOCB processing because of a
9072 * outstanding event.
9074 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9077 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9079 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9080 * can be issued if the link is not up.
9082 switch (piocb->iocb.ulpCommand) {
9083 case CMD_GEN_REQUEST64_CR:
9084 case CMD_GEN_REQUEST64_CX:
9085 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9086 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9087 FC_RCTL_DD_UNSOL_CMD) ||
9088 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9089 MENLO_TRANSPORT_TYPE))
9093 case CMD_QUE_RING_BUF_CN:
9094 case CMD_QUE_RING_BUF64_CN:
9096 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9097 * completion, iocb_cmpl MUST be 0.
9099 if (piocb->iocb_cmpl)
9100 piocb->iocb_cmpl = NULL;
9102 case CMD_CREATE_XRI_CR:
9103 case CMD_CLOSE_XRI_CN:
9104 case CMD_CLOSE_XRI_CX:
9111 * For FCP commands, we must be in a state where we can process link
9114 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9115 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9119 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9120 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9121 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9124 lpfc_sli_update_ring(phba, pring);
9126 lpfc_sli_update_full_ring(phba, pring);
9129 return IOCB_SUCCESS;
9134 pring->stats.iocb_cmd_delay++;
9138 if (!(flag & SLI_IOCB_RET_IOCB)) {
9139 __lpfc_sli_ringtx_put(phba, pring, piocb);
9140 return IOCB_SUCCESS;
9147 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9148 * @phba: Pointer to HBA context object.
9149 * @piocb: Pointer to command iocb.
9150 * @sglq: Pointer to the scatter gather queue object.
9152 * This routine converts the bpl or bde that is in the IOCB
9153 * to a sgl list for the sli4 hardware. The physical address
9154 * of the bpl/bde is converted back to a virtual address.
9155 * If the IOCB contains a BPL then the list of BDE's is
9156 * converted to sli4_sge's. If the IOCB contains a single
9157 * BDE then it is converted to a single sli_sge.
9158 * The IOCB is still in cpu endianess so the contents of
9159 * the bpl can be used without byte swapping.
9161 * Returns valid XRI = Success, NO_XRI = Failure.
9164 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9165 struct lpfc_sglq *sglq)
9167 uint16_t xritag = NO_XRI;
9168 struct ulp_bde64 *bpl = NULL;
9169 struct ulp_bde64 bde;
9170 struct sli4_sge *sgl = NULL;
9171 struct lpfc_dmabuf *dmabuf;
9175 uint32_t offset = 0; /* accumulated offset in the sg request list */
9176 int inbound = 0; /* number of sg reply entries inbound from firmware */
9178 if (!piocbq || !sglq)
9181 sgl = (struct sli4_sge *)sglq->sgl;
9182 icmd = &piocbq->iocb;
9183 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9184 return sglq->sli4_xritag;
9185 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9186 numBdes = icmd->un.genreq64.bdl.bdeSize /
9187 sizeof(struct ulp_bde64);
9188 /* The addrHigh and addrLow fields within the IOCB
9189 * have not been byteswapped yet so there is no
9190 * need to swap them back.
9192 if (piocbq->context3)
9193 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9197 bpl = (struct ulp_bde64 *)dmabuf->virt;
9201 for (i = 0; i < numBdes; i++) {
9202 /* Should already be byte swapped. */
9203 sgl->addr_hi = bpl->addrHigh;
9204 sgl->addr_lo = bpl->addrLow;
9206 sgl->word2 = le32_to_cpu(sgl->word2);
9207 if ((i+1) == numBdes)
9208 bf_set(lpfc_sli4_sge_last, sgl, 1);
9210 bf_set(lpfc_sli4_sge_last, sgl, 0);
9211 /* swap the size field back to the cpu so we
9212 * can assign it to the sgl.
9214 bde.tus.w = le32_to_cpu(bpl->tus.w);
9215 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9216 /* The offsets in the sgl need to be accumulated
9217 * separately for the request and reply lists.
9218 * The request is always first, the reply follows.
9220 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9221 /* add up the reply sg entries */
9222 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9224 /* first inbound? reset the offset */
9227 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9228 bf_set(lpfc_sli4_sge_type, sgl,
9229 LPFC_SGE_TYPE_DATA);
9230 offset += bde.tus.f.bdeSize;
9232 sgl->word2 = cpu_to_le32(sgl->word2);
9236 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9237 /* The addrHigh and addrLow fields of the BDE have not
9238 * been byteswapped yet so they need to be swapped
9239 * before putting them in the sgl.
9242 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9244 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9245 sgl->word2 = le32_to_cpu(sgl->word2);
9246 bf_set(lpfc_sli4_sge_last, sgl, 1);
9247 sgl->word2 = cpu_to_le32(sgl->word2);
9249 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9251 return sglq->sli4_xritag;
9255 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9256 * @phba: Pointer to HBA context object.
9257 * @piocb: Pointer to command iocb.
9258 * @wqe: Pointer to the work queue entry.
9260 * This routine converts the iocb command to its Work Queue Entry
9261 * equivalent. The wqe pointer should not have any fields set when
9262 * this routine is called because it will memcpy over them.
9263 * This routine does not set the CQ_ID or the WQEC bits in the
9266 * Returns: 0 = Success, IOCB_ERROR = Failure.
9269 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9270 union lpfc_wqe128 *wqe)
9272 uint32_t xmit_len = 0, total_len = 0;
9276 uint8_t command_type = ELS_COMMAND_NON_FIP;
9279 uint16_t abrt_iotag;
9280 struct lpfc_iocbq *abrtiocbq;
9281 struct ulp_bde64 *bpl = NULL;
9282 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9284 struct ulp_bde64 bde;
9285 struct lpfc_nodelist *ndlp;
9289 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9290 /* The fcp commands will set command type */
9291 if (iocbq->iocb_flag & LPFC_IO_FCP)
9292 command_type = FCP_COMMAND;
9293 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9294 command_type = ELS_COMMAND_FIP;
9296 command_type = ELS_COMMAND_NON_FIP;
9298 if (phba->fcp_embed_io)
9299 memset(wqe, 0, sizeof(union lpfc_wqe128));
9300 /* Some of the fields are in the right position already */
9301 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9302 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9303 /* The ct field has moved so reset */
9304 wqe->generic.wqe_com.word7 = 0;
9305 wqe->generic.wqe_com.word10 = 0;
9308 abort_tag = (uint32_t) iocbq->iotag;
9309 xritag = iocbq->sli4_xritag;
9310 /* words0-2 bpl convert bde */
9311 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9312 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9313 sizeof(struct ulp_bde64);
9314 bpl = (struct ulp_bde64 *)
9315 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9319 /* Should already be byte swapped. */
9320 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9321 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9322 /* swap the size field back to the cpu so we
9323 * can assign it to the sgl.
9325 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9326 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9328 for (i = 0; i < numBdes; i++) {
9329 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9330 total_len += bde.tus.f.bdeSize;
9333 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9335 iocbq->iocb.ulpIoTag = iocbq->iotag;
9336 cmnd = iocbq->iocb.ulpCommand;
9338 switch (iocbq->iocb.ulpCommand) {
9339 case CMD_ELS_REQUEST64_CR:
9340 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9341 ndlp = iocbq->context_un.ndlp;
9343 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9344 if (!iocbq->iocb.ulpLe) {
9345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9346 "2007 Only Limited Edition cmd Format"
9347 " supported 0x%x\n",
9348 iocbq->iocb.ulpCommand);
9352 wqe->els_req.payload_len = xmit_len;
9353 /* Els_reguest64 has a TMO */
9354 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9355 iocbq->iocb.ulpTimeout);
9356 /* Need a VF for word 4 set the vf bit*/
9357 bf_set(els_req64_vf, &wqe->els_req, 0);
9358 /* And a VFID for word 12 */
9359 bf_set(els_req64_vfid, &wqe->els_req, 0);
9360 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9361 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9362 iocbq->iocb.ulpContext);
9363 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9364 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9365 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9366 if (command_type == ELS_COMMAND_FIP)
9367 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9368 >> LPFC_FIP_ELS_ID_SHIFT);
9369 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9370 iocbq->context2)->virt);
9371 if_type = bf_get(lpfc_sli_intf_if_type,
9372 &phba->sli4_hba.sli_intf);
9373 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9374 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9375 *pcmd == ELS_CMD_SCR ||
9376 *pcmd == ELS_CMD_FDISC ||
9377 *pcmd == ELS_CMD_LOGO ||
9378 *pcmd == ELS_CMD_PLOGI)) {
9379 bf_set(els_req64_sp, &wqe->els_req, 1);
9380 bf_set(els_req64_sid, &wqe->els_req,
9381 iocbq->vport->fc_myDID);
9382 if ((*pcmd == ELS_CMD_FLOGI) &&
9383 !(phba->fc_topology ==
9384 LPFC_TOPOLOGY_LOOP))
9385 bf_set(els_req64_sid, &wqe->els_req, 0);
9386 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9387 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9388 phba->vpi_ids[iocbq->vport->vpi]);
9389 } else if (pcmd && iocbq->context1) {
9390 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9391 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9392 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9395 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9396 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9397 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9398 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9399 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9400 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9401 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9402 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9403 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9405 case CMD_XMIT_SEQUENCE64_CX:
9406 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9407 iocbq->iocb.un.ulpWord[3]);
9408 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9409 iocbq->iocb.unsli3.rcvsli3.ox_id);
9410 /* The entire sequence is transmitted for this IOCB */
9411 xmit_len = total_len;
9412 cmnd = CMD_XMIT_SEQUENCE64_CR;
9413 if (phba->link_flag & LS_LOOPBACK_MODE)
9414 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9416 case CMD_XMIT_SEQUENCE64_CR:
9417 /* word3 iocb=io_tag32 wqe=reserved */
9418 wqe->xmit_sequence.rsvd3 = 0;
9419 /* word4 relative_offset memcpy */
9420 /* word5 r_ctl/df_ctl memcpy */
9421 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9422 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9423 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9424 LPFC_WQE_IOD_WRITE);
9425 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9426 LPFC_WQE_LENLOC_WORD12);
9427 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9428 wqe->xmit_sequence.xmit_len = xmit_len;
9429 command_type = OTHER_COMMAND;
9431 case CMD_XMIT_BCAST64_CN:
9432 /* word3 iocb=iotag32 wqe=seq_payload_len */
9433 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9434 /* word4 iocb=rsvd wqe=rsvd */
9435 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9436 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9437 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9438 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9439 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9440 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9441 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9442 LPFC_WQE_LENLOC_WORD3);
9443 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9445 case CMD_FCP_IWRITE64_CR:
9446 command_type = FCP_COMMAND_DATA_OUT;
9447 /* word3 iocb=iotag wqe=payload_offset_len */
9448 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9449 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9450 xmit_len + sizeof(struct fcp_rsp));
9451 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9453 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9454 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9455 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9456 iocbq->iocb.ulpFCP2Rcvy);
9457 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9458 /* Always open the exchange */
9459 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9460 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9461 LPFC_WQE_LENLOC_WORD4);
9462 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9463 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9464 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9465 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9466 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9467 if (iocbq->priority) {
9468 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9469 (iocbq->priority << 1));
9471 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9472 (phba->cfg_XLanePriority << 1));
9475 /* Note, word 10 is already initialized to 0 */
9477 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9478 if (phba->cfg_enable_pbde)
9479 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9481 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9483 if (phba->fcp_embed_io) {
9484 struct lpfc_io_buf *lpfc_cmd;
9485 struct sli4_sge *sgl;
9486 struct fcp_cmnd *fcp_cmnd;
9489 /* 128 byte wqe support here */
9491 lpfc_cmd = iocbq->context1;
9492 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9493 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9495 /* Word 0-2 - FCP_CMND */
9496 wqe->generic.bde.tus.f.bdeFlags =
9497 BUFF_TYPE_BDE_IMMED;
9498 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9499 wqe->generic.bde.addrHigh = 0;
9500 wqe->generic.bde.addrLow = 88; /* Word 22 */
9502 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9503 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9505 /* Word 22-29 FCP CMND Payload */
9506 ptr = &wqe->words[22];
9507 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9510 case CMD_FCP_IREAD64_CR:
9511 /* word3 iocb=iotag wqe=payload_offset_len */
9512 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9513 bf_set(payload_offset_len, &wqe->fcp_iread,
9514 xmit_len + sizeof(struct fcp_rsp));
9515 bf_set(cmd_buff_len, &wqe->fcp_iread,
9517 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9518 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9519 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9520 iocbq->iocb.ulpFCP2Rcvy);
9521 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9522 /* Always open the exchange */
9523 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9524 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9525 LPFC_WQE_LENLOC_WORD4);
9526 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9527 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9528 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9529 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9530 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9531 if (iocbq->priority) {
9532 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9533 (iocbq->priority << 1));
9535 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9536 (phba->cfg_XLanePriority << 1));
9539 /* Note, word 10 is already initialized to 0 */
9541 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9542 if (phba->cfg_enable_pbde)
9543 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9545 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9547 if (phba->fcp_embed_io) {
9548 struct lpfc_io_buf *lpfc_cmd;
9549 struct sli4_sge *sgl;
9550 struct fcp_cmnd *fcp_cmnd;
9553 /* 128 byte wqe support here */
9555 lpfc_cmd = iocbq->context1;
9556 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9557 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9559 /* Word 0-2 - FCP_CMND */
9560 wqe->generic.bde.tus.f.bdeFlags =
9561 BUFF_TYPE_BDE_IMMED;
9562 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9563 wqe->generic.bde.addrHigh = 0;
9564 wqe->generic.bde.addrLow = 88; /* Word 22 */
9566 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9567 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9569 /* Word 22-29 FCP CMND Payload */
9570 ptr = &wqe->words[22];
9571 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9574 case CMD_FCP_ICMND64_CR:
9575 /* word3 iocb=iotag wqe=payload_offset_len */
9576 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9577 bf_set(payload_offset_len, &wqe->fcp_icmd,
9578 xmit_len + sizeof(struct fcp_rsp));
9579 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9581 /* word3 iocb=IO_TAG wqe=reserved */
9582 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9583 /* Always open the exchange */
9584 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9585 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9586 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9587 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9588 LPFC_WQE_LENLOC_NONE);
9589 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9590 iocbq->iocb.ulpFCP2Rcvy);
9591 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9592 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9593 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9594 if (iocbq->priority) {
9595 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9596 (iocbq->priority << 1));
9598 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9599 (phba->cfg_XLanePriority << 1));
9602 /* Note, word 10 is already initialized to 0 */
9604 if (phba->fcp_embed_io) {
9605 struct lpfc_io_buf *lpfc_cmd;
9606 struct sli4_sge *sgl;
9607 struct fcp_cmnd *fcp_cmnd;
9610 /* 128 byte wqe support here */
9612 lpfc_cmd = iocbq->context1;
9613 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9614 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9616 /* Word 0-2 - FCP_CMND */
9617 wqe->generic.bde.tus.f.bdeFlags =
9618 BUFF_TYPE_BDE_IMMED;
9619 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9620 wqe->generic.bde.addrHigh = 0;
9621 wqe->generic.bde.addrLow = 88; /* Word 22 */
9623 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9624 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9626 /* Word 22-29 FCP CMND Payload */
9627 ptr = &wqe->words[22];
9628 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9631 case CMD_GEN_REQUEST64_CR:
9632 /* For this command calculate the xmit length of the
9636 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9637 sizeof(struct ulp_bde64);
9638 for (i = 0; i < numBdes; i++) {
9639 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9640 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9642 xmit_len += bde.tus.f.bdeSize;
9644 /* word3 iocb=IO_TAG wqe=request_payload_len */
9645 wqe->gen_req.request_payload_len = xmit_len;
9646 /* word4 iocb=parameter wqe=relative_offset memcpy */
9647 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9648 /* word6 context tag copied in memcpy */
9649 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9650 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9651 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9652 "2015 Invalid CT %x command 0x%x\n",
9653 ct, iocbq->iocb.ulpCommand);
9656 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9657 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9658 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9659 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9660 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9661 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9662 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9663 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9664 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9665 command_type = OTHER_COMMAND;
9667 case CMD_XMIT_ELS_RSP64_CX:
9668 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9669 /* words0-2 BDE memcpy */
9670 /* word3 iocb=iotag32 wqe=response_payload_len */
9671 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9673 wqe->xmit_els_rsp.word4 = 0;
9674 /* word5 iocb=rsvd wge=did */
9675 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9676 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9678 if_type = bf_get(lpfc_sli_intf_if_type,
9679 &phba->sli4_hba.sli_intf);
9680 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9681 if (iocbq->vport->fc_flag & FC_PT2PT) {
9682 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9683 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9684 iocbq->vport->fc_myDID);
9685 if (iocbq->vport->fc_myDID == Fabric_DID) {
9687 &wqe->xmit_els_rsp.wqe_dest, 0);
9691 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9692 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9693 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9694 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9695 iocbq->iocb.unsli3.rcvsli3.ox_id);
9696 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9697 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9698 phba->vpi_ids[iocbq->vport->vpi]);
9699 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9700 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9701 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9702 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9703 LPFC_WQE_LENLOC_WORD3);
9704 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9705 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9706 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9707 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9708 iocbq->context2)->virt);
9709 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9710 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9711 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9712 iocbq->vport->fc_myDID);
9713 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9714 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9715 phba->vpi_ids[phba->pport->vpi]);
9717 command_type = OTHER_COMMAND;
9719 case CMD_CLOSE_XRI_CN:
9720 case CMD_ABORT_XRI_CN:
9721 case CMD_ABORT_XRI_CX:
9722 /* words 0-2 memcpy should be 0 rserved */
9723 /* port will send abts */
9724 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9725 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9726 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9727 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9731 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9733 * The link is down, or the command was ELS_FIP
9734 * so the fw does not need to send abts
9737 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9739 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9740 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9741 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9742 wqe->abort_cmd.rsrvd5 = 0;
9743 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9744 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9745 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9747 * The abort handler will send us CMD_ABORT_XRI_CN or
9748 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9750 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9751 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9752 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9753 LPFC_WQE_LENLOC_NONE);
9754 cmnd = CMD_ABORT_XRI_CX;
9755 command_type = OTHER_COMMAND;
9758 case CMD_XMIT_BLS_RSP64_CX:
9759 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9760 /* As BLS ABTS RSP WQE is very different from other WQEs,
9761 * we re-construct this WQE here based on information in
9762 * iocbq from scratch.
9764 memset(wqe, 0, sizeof(union lpfc_wqe));
9765 /* OX_ID is invariable to who sent ABTS to CT exchange */
9766 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9767 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9768 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9769 LPFC_ABTS_UNSOL_INT) {
9770 /* ABTS sent by initiator to CT exchange, the
9771 * RX_ID field will be filled with the newly
9772 * allocated responder XRI.
9774 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9775 iocbq->sli4_xritag);
9777 /* ABTS sent by responder to CT exchange, the
9778 * RX_ID field will be filled with the responder
9781 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9782 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9784 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9785 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9788 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9790 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9791 iocbq->iocb.ulpContext);
9792 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9793 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9794 phba->vpi_ids[phba->pport->vpi]);
9795 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9796 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9797 LPFC_WQE_LENLOC_NONE);
9798 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9799 command_type = OTHER_COMMAND;
9800 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9801 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9802 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9803 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9804 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9805 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9806 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9810 case CMD_SEND_FRAME:
9811 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9812 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9814 case CMD_XRI_ABORTED_CX:
9815 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9816 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9817 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9818 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9819 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9822 "2014 Invalid command 0x%x\n",
9823 iocbq->iocb.ulpCommand);
9828 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9829 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9830 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9831 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9832 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9833 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9834 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9835 LPFC_IO_DIF_INSERT);
9836 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9837 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9838 wqe->generic.wqe_com.abort_tag = abort_tag;
9839 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9840 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9841 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9842 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9847 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9848 * @phba: Pointer to HBA context object.
9849 * @ring_number: SLI ring number to issue iocb on.
9850 * @piocb: Pointer to command iocb.
9851 * @flag: Flag indicating if this command can be put into txq.
9853 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9854 * an iocb command to an HBA with SLI-4 interface spec.
9856 * This function is called with hbalock held. The function will return success
9857 * after it successfully submit the iocb to firmware or after adding to the
9861 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9862 struct lpfc_iocbq *piocb, uint32_t flag)
9864 struct lpfc_sglq *sglq;
9865 union lpfc_wqe128 wqe;
9866 struct lpfc_queue *wq;
9867 struct lpfc_sli_ring *pring;
9870 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9871 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9872 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
9874 wq = phba->sli4_hba.els_wq;
9877 /* Get corresponding ring */
9881 * The WQE can be either 64 or 128 bytes,
9884 lockdep_assert_held(&pring->ring_lock);
9886 if (piocb->sli4_xritag == NO_XRI) {
9887 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9888 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9891 if (!list_empty(&pring->txq)) {
9892 if (!(flag & SLI_IOCB_RET_IOCB)) {
9893 __lpfc_sli_ringtx_put(phba,
9895 return IOCB_SUCCESS;
9900 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9902 if (!(flag & SLI_IOCB_RET_IOCB)) {
9903 __lpfc_sli_ringtx_put(phba,
9906 return IOCB_SUCCESS;
9912 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9913 /* These IO's already have an XRI and a mapped sgl. */
9917 * This is a continuation of a commandi,(CX) so this
9918 * sglq is on the active list
9920 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9926 piocb->sli4_lxritag = sglq->sli4_lxritag;
9927 piocb->sli4_xritag = sglq->sli4_xritag;
9928 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9932 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9935 if (lpfc_sli4_wq_put(wq, &wqe))
9937 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9943 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9945 * This routine wraps the actual lockless version for issusing IOCB function
9946 * pointer from the lpfc_hba struct.
9949 * IOCB_ERROR - Error
9950 * IOCB_SUCCESS - Success
9954 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9955 struct lpfc_iocbq *piocb, uint32_t flag)
9957 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9961 * lpfc_sli_api_table_setup - Set up sli api function jump table
9962 * @phba: The hba struct for which this call is being executed.
9963 * @dev_grp: The HBA PCI-Device group number.
9965 * This routine sets up the SLI interface API function jump table in @phba
9967 * Returns: 0 - success, -ENODEV - failure.
9970 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9974 case LPFC_PCI_DEV_LP:
9975 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9976 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9978 case LPFC_PCI_DEV_OC:
9979 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9980 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9984 "1419 Invalid HBA PCI-device group: 0x%x\n",
9989 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9994 * lpfc_sli4_calc_ring - Calculates which ring to use
9995 * @phba: Pointer to HBA context object.
9996 * @piocb: Pointer to command iocb.
9998 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9999 * hba_wqidx, thus we need to calculate the corresponding ring.
10000 * Since ABORTS must go on the same WQ of the command they are
10001 * aborting, we use command's hba_wqidx.
10003 struct lpfc_sli_ring *
10004 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10006 struct lpfc_io_buf *lpfc_cmd;
10008 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10009 if (unlikely(!phba->sli4_hba.hdwq))
10012 * for abort iocb hba_wqidx should already
10013 * be setup based on what work queue we used.
10015 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10016 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10017 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10019 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
10021 if (unlikely(!phba->sli4_hba.els_wq))
10023 piocb->hba_wqidx = 0;
10024 return phba->sli4_hba.els_wq->pring;
10029 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10030 * @phba: Pointer to HBA context object.
10031 * @pring: Pointer to driver SLI ring object.
10032 * @piocb: Pointer to command iocb.
10033 * @flag: Flag indicating if this command can be put into txq.
10035 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10036 * function. This function gets the hbalock and calls
10037 * __lpfc_sli_issue_iocb function and will return the error returned
10038 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10039 * functions which do not hold hbalock.
10042 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10043 struct lpfc_iocbq *piocb, uint32_t flag)
10045 struct lpfc_sli_ring *pring;
10046 unsigned long iflags;
10049 if (phba->sli_rev == LPFC_SLI_REV4) {
10050 pring = lpfc_sli4_calc_ring(phba, piocb);
10051 if (unlikely(pring == NULL))
10054 spin_lock_irqsave(&pring->ring_lock, iflags);
10055 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10056 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10058 /* For now, SLI2/3 will still use hbalock */
10059 spin_lock_irqsave(&phba->hbalock, iflags);
10060 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10061 spin_unlock_irqrestore(&phba->hbalock, iflags);
10067 * lpfc_extra_ring_setup - Extra ring setup function
10068 * @phba: Pointer to HBA context object.
10070 * This function is called while driver attaches with the
10071 * HBA to setup the extra ring. The extra ring is used
10072 * only when driver needs to support target mode functionality
10073 * or IP over FC functionalities.
10075 * This function is called with no lock held. SLI3 only.
10078 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10080 struct lpfc_sli *psli;
10081 struct lpfc_sli_ring *pring;
10085 /* Adjust cmd/rsp ring iocb entries more evenly */
10087 /* Take some away from the FCP ring */
10088 pring = &psli->sli3_ring[LPFC_FCP_RING];
10089 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10090 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10091 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10092 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10094 /* and give them to the extra ring */
10095 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10097 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10098 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10099 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10100 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10102 /* Setup default profile for this ring */
10103 pring->iotag_max = 4096;
10104 pring->num_mask = 1;
10105 pring->prt[0].profile = 0; /* Mask 0 */
10106 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10107 pring->prt[0].type = phba->cfg_multi_ring_type;
10108 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10112 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10113 * @phba: Pointer to HBA context object.
10114 * @iocbq: Pointer to iocb object.
10116 * The async_event handler calls this routine when it receives
10117 * an ASYNC_STATUS_CN event from the port. The port generates
10118 * this event when an Abort Sequence request to an rport fails
10119 * twice in succession. The abort could be originated by the
10120 * driver or by the port. The ABTS could have been for an ELS
10121 * or FCP IO. The port only generates this event when an ABTS
10122 * fails to complete after one retry.
10125 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10126 struct lpfc_iocbq *iocbq)
10128 struct lpfc_nodelist *ndlp = NULL;
10129 uint16_t rpi = 0, vpi = 0;
10130 struct lpfc_vport *vport = NULL;
10132 /* The rpi in the ulpContext is vport-sensitive. */
10133 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10134 rpi = iocbq->iocb.ulpContext;
10136 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10137 "3092 Port generated ABTS async event "
10138 "on vpi %d rpi %d status 0x%x\n",
10139 vpi, rpi, iocbq->iocb.ulpStatus);
10141 vport = lpfc_find_vport_by_vpid(phba, vpi);
10144 ndlp = lpfc_findnode_rpi(vport, rpi);
10145 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10148 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10149 lpfc_sli_abts_recover_port(vport, ndlp);
10153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10154 "3095 Event Context not found, no "
10155 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10156 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10160 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10161 * @phba: pointer to HBA context object.
10162 * @ndlp: nodelist pointer for the impacted rport.
10163 * @axri: pointer to the wcqe containing the failed exchange.
10165 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10166 * port. The port generates this event when an abort exchange request to an
10167 * rport fails twice in succession with no reply. The abort could be originated
10168 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10171 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10172 struct lpfc_nodelist *ndlp,
10173 struct sli4_wcqe_xri_aborted *axri)
10175 struct lpfc_vport *vport;
10176 uint32_t ext_status = 0;
10178 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10179 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10180 "3115 Node Context not found, driver "
10181 "ignoring abts err event\n");
10185 vport = ndlp->vport;
10186 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10187 "3116 Port generated FCP XRI ABORT event on "
10188 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10189 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10190 bf_get(lpfc_wcqe_xa_xri, axri),
10191 bf_get(lpfc_wcqe_xa_status, axri),
10195 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10196 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10197 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10199 ext_status = axri->parameter & IOERR_PARAM_MASK;
10200 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10201 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10202 lpfc_sli_abts_recover_port(vport, ndlp);
10206 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10207 * @phba: Pointer to HBA context object.
10208 * @pring: Pointer to driver SLI ring object.
10209 * @iocbq: Pointer to iocb object.
10211 * This function is called by the slow ring event handler
10212 * function when there is an ASYNC event iocb in the ring.
10213 * This function is called with no lock held.
10214 * Currently this function handles only temperature related
10215 * ASYNC events. The function decodes the temperature sensor
10216 * event message and posts events for the management applications.
10219 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10220 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10224 struct temp_event temp_event_data;
10225 struct Scsi_Host *shost;
10228 icmd = &iocbq->iocb;
10229 evt_code = icmd->un.asyncstat.evt_code;
10231 switch (evt_code) {
10232 case ASYNC_TEMP_WARN:
10233 case ASYNC_TEMP_SAFE:
10234 temp_event_data.data = (uint32_t) icmd->ulpContext;
10235 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10236 if (evt_code == ASYNC_TEMP_WARN) {
10237 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10238 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10239 "0347 Adapter is very hot, please take "
10240 "corrective action. temperature : %d Celsius\n",
10241 (uint32_t) icmd->ulpContext);
10243 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10244 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10245 "0340 Adapter temperature is OK now. "
10246 "temperature : %d Celsius\n",
10247 (uint32_t) icmd->ulpContext);
10250 /* Send temperature change event to applications */
10251 shost = lpfc_shost_from_vport(phba->pport);
10252 fc_host_post_vendor_event(shost, fc_get_event_number(),
10253 sizeof(temp_event_data), (char *) &temp_event_data,
10254 LPFC_NL_VENDOR_ID);
10256 case ASYNC_STATUS_CN:
10257 lpfc_sli_abts_err_handler(phba, iocbq);
10260 iocb_w = (uint32_t *) icmd;
10261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10262 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10264 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10265 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10266 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10267 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10268 pring->ringno, icmd->un.asyncstat.evt_code,
10269 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10270 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10271 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10272 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10280 * lpfc_sli4_setup - SLI ring setup function
10281 * @phba: Pointer to HBA context object.
10283 * lpfc_sli_setup sets up rings of the SLI interface with
10284 * number of iocbs per ring and iotags. This function is
10285 * called while driver attach to the HBA and before the
10286 * interrupts are enabled. So there is no need for locking.
10288 * This function always returns 0.
10291 lpfc_sli4_setup(struct lpfc_hba *phba)
10293 struct lpfc_sli_ring *pring;
10295 pring = phba->sli4_hba.els_wq->pring;
10296 pring->num_mask = LPFC_MAX_RING_MASK;
10297 pring->prt[0].profile = 0; /* Mask 0 */
10298 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10299 pring->prt[0].type = FC_TYPE_ELS;
10300 pring->prt[0].lpfc_sli_rcv_unsol_event =
10301 lpfc_els_unsol_event;
10302 pring->prt[1].profile = 0; /* Mask 1 */
10303 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10304 pring->prt[1].type = FC_TYPE_ELS;
10305 pring->prt[1].lpfc_sli_rcv_unsol_event =
10306 lpfc_els_unsol_event;
10307 pring->prt[2].profile = 0; /* Mask 2 */
10308 /* NameServer Inquiry */
10309 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10311 pring->prt[2].type = FC_TYPE_CT;
10312 pring->prt[2].lpfc_sli_rcv_unsol_event =
10313 lpfc_ct_unsol_event;
10314 pring->prt[3].profile = 0; /* Mask 3 */
10315 /* NameServer response */
10316 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10318 pring->prt[3].type = FC_TYPE_CT;
10319 pring->prt[3].lpfc_sli_rcv_unsol_event =
10320 lpfc_ct_unsol_event;
10325 * lpfc_sli_setup - SLI ring setup function
10326 * @phba: Pointer to HBA context object.
10328 * lpfc_sli_setup sets up rings of the SLI interface with
10329 * number of iocbs per ring and iotags. This function is
10330 * called while driver attach to the HBA and before the
10331 * interrupts are enabled. So there is no need for locking.
10333 * This function always returns 0. SLI3 only.
10336 lpfc_sli_setup(struct lpfc_hba *phba)
10338 int i, totiocbsize = 0;
10339 struct lpfc_sli *psli = &phba->sli;
10340 struct lpfc_sli_ring *pring;
10342 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10343 psli->sli_flag = 0;
10345 psli->iocbq_lookup = NULL;
10346 psli->iocbq_lookup_len = 0;
10347 psli->last_iotag = 0;
10349 for (i = 0; i < psli->num_rings; i++) {
10350 pring = &psli->sli3_ring[i];
10352 case LPFC_FCP_RING: /* ring 0 - FCP */
10353 /* numCiocb and numRiocb are used in config_port */
10354 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10355 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10356 pring->sli.sli3.numCiocb +=
10357 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10358 pring->sli.sli3.numRiocb +=
10359 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10360 pring->sli.sli3.numCiocb +=
10361 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10362 pring->sli.sli3.numRiocb +=
10363 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10364 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10365 SLI3_IOCB_CMD_SIZE :
10366 SLI2_IOCB_CMD_SIZE;
10367 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10368 SLI3_IOCB_RSP_SIZE :
10369 SLI2_IOCB_RSP_SIZE;
10370 pring->iotag_ctr = 0;
10372 (phba->cfg_hba_queue_depth * 2);
10373 pring->fast_iotag = pring->iotag_max;
10374 pring->num_mask = 0;
10376 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10377 /* numCiocb and numRiocb are used in config_port */
10378 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10379 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10380 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10381 SLI3_IOCB_CMD_SIZE :
10382 SLI2_IOCB_CMD_SIZE;
10383 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10384 SLI3_IOCB_RSP_SIZE :
10385 SLI2_IOCB_RSP_SIZE;
10386 pring->iotag_max = phba->cfg_hba_queue_depth;
10387 pring->num_mask = 0;
10389 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10390 /* numCiocb and numRiocb are used in config_port */
10391 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10392 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10393 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10394 SLI3_IOCB_CMD_SIZE :
10395 SLI2_IOCB_CMD_SIZE;
10396 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10397 SLI3_IOCB_RSP_SIZE :
10398 SLI2_IOCB_RSP_SIZE;
10399 pring->fast_iotag = 0;
10400 pring->iotag_ctr = 0;
10401 pring->iotag_max = 4096;
10402 pring->lpfc_sli_rcv_async_status =
10403 lpfc_sli_async_event_handler;
10404 pring->num_mask = LPFC_MAX_RING_MASK;
10405 pring->prt[0].profile = 0; /* Mask 0 */
10406 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10407 pring->prt[0].type = FC_TYPE_ELS;
10408 pring->prt[0].lpfc_sli_rcv_unsol_event =
10409 lpfc_els_unsol_event;
10410 pring->prt[1].profile = 0; /* Mask 1 */
10411 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10412 pring->prt[1].type = FC_TYPE_ELS;
10413 pring->prt[1].lpfc_sli_rcv_unsol_event =
10414 lpfc_els_unsol_event;
10415 pring->prt[2].profile = 0; /* Mask 2 */
10416 /* NameServer Inquiry */
10417 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10419 pring->prt[2].type = FC_TYPE_CT;
10420 pring->prt[2].lpfc_sli_rcv_unsol_event =
10421 lpfc_ct_unsol_event;
10422 pring->prt[3].profile = 0; /* Mask 3 */
10423 /* NameServer response */
10424 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10426 pring->prt[3].type = FC_TYPE_CT;
10427 pring->prt[3].lpfc_sli_rcv_unsol_event =
10428 lpfc_ct_unsol_event;
10431 totiocbsize += (pring->sli.sli3.numCiocb *
10432 pring->sli.sli3.sizeCiocb) +
10433 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10435 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10436 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10437 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10438 "SLI2 SLIM Data: x%x x%lx\n",
10439 phba->brd_no, totiocbsize,
10440 (unsigned long) MAX_SLIM_IOCB_SIZE);
10442 if (phba->cfg_multi_ring_support == 2)
10443 lpfc_extra_ring_setup(phba);
10449 * lpfc_sli4_queue_init - Queue initialization function
10450 * @phba: Pointer to HBA context object.
10452 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10453 * ring. This function also initializes ring indices of each ring.
10454 * This function is called during the initialization of the SLI
10455 * interface of an HBA.
10456 * This function is called with no lock held and always returns
10460 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10462 struct lpfc_sli *psli;
10463 struct lpfc_sli_ring *pring;
10467 spin_lock_irq(&phba->hbalock);
10468 INIT_LIST_HEAD(&psli->mboxq);
10469 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10470 /* Initialize list headers for txq and txcmplq as double linked lists */
10471 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10472 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
10474 pring->ringno = LPFC_FCP_RING;
10475 pring->txcmplq_cnt = 0;
10476 INIT_LIST_HEAD(&pring->txq);
10477 INIT_LIST_HEAD(&pring->txcmplq);
10478 INIT_LIST_HEAD(&pring->iocb_continueq);
10479 spin_lock_init(&pring->ring_lock);
10481 pring = phba->sli4_hba.els_wq->pring;
10483 pring->ringno = LPFC_ELS_RING;
10484 pring->txcmplq_cnt = 0;
10485 INIT_LIST_HEAD(&pring->txq);
10486 INIT_LIST_HEAD(&pring->txcmplq);
10487 INIT_LIST_HEAD(&pring->iocb_continueq);
10488 spin_lock_init(&pring->ring_lock);
10490 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10491 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10492 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10494 pring->ringno = LPFC_FCP_RING;
10495 pring->txcmplq_cnt = 0;
10496 INIT_LIST_HEAD(&pring->txq);
10497 INIT_LIST_HEAD(&pring->txcmplq);
10498 INIT_LIST_HEAD(&pring->iocb_continueq);
10499 spin_lock_init(&pring->ring_lock);
10501 pring = phba->sli4_hba.nvmels_wq->pring;
10503 pring->ringno = LPFC_ELS_RING;
10504 pring->txcmplq_cnt = 0;
10505 INIT_LIST_HEAD(&pring->txq);
10506 INIT_LIST_HEAD(&pring->txcmplq);
10507 INIT_LIST_HEAD(&pring->iocb_continueq);
10508 spin_lock_init(&pring->ring_lock);
10511 spin_unlock_irq(&phba->hbalock);
10515 * lpfc_sli_queue_init - Queue initialization function
10516 * @phba: Pointer to HBA context object.
10518 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10519 * ring. This function also initializes ring indices of each ring.
10520 * This function is called during the initialization of the SLI
10521 * interface of an HBA.
10522 * This function is called with no lock held and always returns
10526 lpfc_sli_queue_init(struct lpfc_hba *phba)
10528 struct lpfc_sli *psli;
10529 struct lpfc_sli_ring *pring;
10533 spin_lock_irq(&phba->hbalock);
10534 INIT_LIST_HEAD(&psli->mboxq);
10535 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10536 /* Initialize list headers for txq and txcmplq as double linked lists */
10537 for (i = 0; i < psli->num_rings; i++) {
10538 pring = &psli->sli3_ring[i];
10540 pring->sli.sli3.next_cmdidx = 0;
10541 pring->sli.sli3.local_getidx = 0;
10542 pring->sli.sli3.cmdidx = 0;
10543 INIT_LIST_HEAD(&pring->iocb_continueq);
10544 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10545 INIT_LIST_HEAD(&pring->postbufq);
10547 INIT_LIST_HEAD(&pring->txq);
10548 INIT_LIST_HEAD(&pring->txcmplq);
10549 spin_lock_init(&pring->ring_lock);
10551 spin_unlock_irq(&phba->hbalock);
10555 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10556 * @phba: Pointer to HBA context object.
10558 * This routine flushes the mailbox command subsystem. It will unconditionally
10559 * flush all the mailbox commands in the three possible stages in the mailbox
10560 * command sub-system: pending mailbox command queue; the outstanding mailbox
10561 * command; and completed mailbox command queue. It is caller's responsibility
10562 * to make sure that the driver is in the proper state to flush the mailbox
10563 * command sub-system. Namely, the posting of mailbox commands into the
10564 * pending mailbox command queue from the various clients must be stopped;
10565 * either the HBA is in a state that it will never works on the outstanding
10566 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10567 * mailbox command has been completed.
10570 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10572 LIST_HEAD(completions);
10573 struct lpfc_sli *psli = &phba->sli;
10575 unsigned long iflag;
10577 /* Disable softirqs, including timers from obtaining phba->hbalock */
10578 local_bh_disable();
10580 /* Flush all the mailbox commands in the mbox system */
10581 spin_lock_irqsave(&phba->hbalock, iflag);
10583 /* The pending mailbox command queue */
10584 list_splice_init(&phba->sli.mboxq, &completions);
10585 /* The outstanding active mailbox command */
10586 if (psli->mbox_active) {
10587 list_add_tail(&psli->mbox_active->list, &completions);
10588 psli->mbox_active = NULL;
10589 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10591 /* The completed mailbox command queue */
10592 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10593 spin_unlock_irqrestore(&phba->hbalock, iflag);
10595 /* Enable softirqs again, done with phba->hbalock */
10598 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10599 while (!list_empty(&completions)) {
10600 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10601 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10602 if (pmb->mbox_cmpl)
10603 pmb->mbox_cmpl(phba, pmb);
10608 * lpfc_sli_host_down - Vport cleanup function
10609 * @vport: Pointer to virtual port object.
10611 * lpfc_sli_host_down is called to clean up the resources
10612 * associated with a vport before destroying virtual
10613 * port data structures.
10614 * This function does following operations:
10615 * - Free discovery resources associated with this virtual
10617 * - Free iocbs associated with this virtual port in
10619 * - Send abort for all iocb commands associated with this
10620 * vport in txcmplq.
10622 * This function is called with no lock held and always returns 1.
10625 lpfc_sli_host_down(struct lpfc_vport *vport)
10627 LIST_HEAD(completions);
10628 struct lpfc_hba *phba = vport->phba;
10629 struct lpfc_sli *psli = &phba->sli;
10630 struct lpfc_queue *qp = NULL;
10631 struct lpfc_sli_ring *pring;
10632 struct lpfc_iocbq *iocb, *next_iocb;
10634 unsigned long flags = 0;
10635 uint16_t prev_pring_flag;
10637 lpfc_cleanup_discovery_resources(vport);
10639 spin_lock_irqsave(&phba->hbalock, flags);
10642 * Error everything on the txq since these iocbs
10643 * have not been given to the FW yet.
10644 * Also issue ABTS for everything on the txcmplq
10646 if (phba->sli_rev != LPFC_SLI_REV4) {
10647 for (i = 0; i < psli->num_rings; i++) {
10648 pring = &psli->sli3_ring[i];
10649 prev_pring_flag = pring->flag;
10650 /* Only slow rings */
10651 if (pring->ringno == LPFC_ELS_RING) {
10652 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10653 /* Set the lpfc data pending flag */
10654 set_bit(LPFC_DATA_READY, &phba->data_flags);
10656 list_for_each_entry_safe(iocb, next_iocb,
10657 &pring->txq, list) {
10658 if (iocb->vport != vport)
10660 list_move_tail(&iocb->list, &completions);
10662 list_for_each_entry_safe(iocb, next_iocb,
10663 &pring->txcmplq, list) {
10664 if (iocb->vport != vport)
10666 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10668 pring->flag = prev_pring_flag;
10671 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10675 if (pring == phba->sli4_hba.els_wq->pring) {
10676 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10677 /* Set the lpfc data pending flag */
10678 set_bit(LPFC_DATA_READY, &phba->data_flags);
10680 prev_pring_flag = pring->flag;
10681 spin_lock_irq(&pring->ring_lock);
10682 list_for_each_entry_safe(iocb, next_iocb,
10683 &pring->txq, list) {
10684 if (iocb->vport != vport)
10686 list_move_tail(&iocb->list, &completions);
10688 spin_unlock_irq(&pring->ring_lock);
10689 list_for_each_entry_safe(iocb, next_iocb,
10690 &pring->txcmplq, list) {
10691 if (iocb->vport != vport)
10693 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10695 pring->flag = prev_pring_flag;
10698 spin_unlock_irqrestore(&phba->hbalock, flags);
10700 /* Cancel all the IOCBs from the completions list */
10701 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10707 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10708 * @phba: Pointer to HBA context object.
10710 * This function cleans up all iocb, buffers, mailbox commands
10711 * while shutting down the HBA. This function is called with no
10712 * lock held and always returns 1.
10713 * This function does the following to cleanup driver resources:
10714 * - Free discovery resources for each virtual port
10715 * - Cleanup any pending fabric iocbs
10716 * - Iterate through the iocb txq and free each entry
10718 * - Free up any buffer posted to the HBA
10719 * - Free mailbox commands in the mailbox queue.
10722 lpfc_sli_hba_down(struct lpfc_hba *phba)
10724 LIST_HEAD(completions);
10725 struct lpfc_sli *psli = &phba->sli;
10726 struct lpfc_queue *qp = NULL;
10727 struct lpfc_sli_ring *pring;
10728 struct lpfc_dmabuf *buf_ptr;
10729 unsigned long flags = 0;
10732 /* Shutdown the mailbox command sub-system */
10733 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10735 lpfc_hba_down_prep(phba);
10737 /* Disable softirqs, including timers from obtaining phba->hbalock */
10738 local_bh_disable();
10740 lpfc_fabric_abort_hba(phba);
10742 spin_lock_irqsave(&phba->hbalock, flags);
10745 * Error everything on the txq since these iocbs
10746 * have not been given to the FW yet.
10748 if (phba->sli_rev != LPFC_SLI_REV4) {
10749 for (i = 0; i < psli->num_rings; i++) {
10750 pring = &psli->sli3_ring[i];
10751 /* Only slow rings */
10752 if (pring->ringno == LPFC_ELS_RING) {
10753 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10754 /* Set the lpfc data pending flag */
10755 set_bit(LPFC_DATA_READY, &phba->data_flags);
10757 list_splice_init(&pring->txq, &completions);
10760 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10764 spin_lock_irq(&pring->ring_lock);
10765 list_splice_init(&pring->txq, &completions);
10766 spin_unlock_irq(&pring->ring_lock);
10767 if (pring == phba->sli4_hba.els_wq->pring) {
10768 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10769 /* Set the lpfc data pending flag */
10770 set_bit(LPFC_DATA_READY, &phba->data_flags);
10774 spin_unlock_irqrestore(&phba->hbalock, flags);
10776 /* Cancel all the IOCBs from the completions list */
10777 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10780 spin_lock_irqsave(&phba->hbalock, flags);
10781 list_splice_init(&phba->elsbuf, &completions);
10782 phba->elsbuf_cnt = 0;
10783 phba->elsbuf_prev_cnt = 0;
10784 spin_unlock_irqrestore(&phba->hbalock, flags);
10786 while (!list_empty(&completions)) {
10787 list_remove_head(&completions, buf_ptr,
10788 struct lpfc_dmabuf, list);
10789 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10793 /* Enable softirqs again, done with phba->hbalock */
10796 /* Return any active mbox cmds */
10797 del_timer_sync(&psli->mbox_tmo);
10799 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10800 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10801 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10807 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10808 * @srcp: Source memory pointer.
10809 * @destp: Destination memory pointer.
10810 * @cnt: Number of words required to be copied.
10812 * This function is used for copying data between driver memory
10813 * and the SLI memory. This function also changes the endianness
10814 * of each word if native endianness is different from SLI
10815 * endianness. This function can be called with or without
10819 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10821 uint32_t *src = srcp;
10822 uint32_t *dest = destp;
10826 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10828 ldata = le32_to_cpu(ldata);
10837 * lpfc_sli_bemem_bcopy - SLI memory copy function
10838 * @srcp: Source memory pointer.
10839 * @destp: Destination memory pointer.
10840 * @cnt: Number of words required to be copied.
10842 * This function is used for copying data between a data structure
10843 * with big endian representation to local endianness.
10844 * This function can be called with or without lock.
10847 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10849 uint32_t *src = srcp;
10850 uint32_t *dest = destp;
10854 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10856 ldata = be32_to_cpu(ldata);
10864 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10865 * @phba: Pointer to HBA context object.
10866 * @pring: Pointer to driver SLI ring object.
10867 * @mp: Pointer to driver buffer object.
10869 * This function is called with no lock held.
10870 * It always return zero after adding the buffer to the postbufq
10874 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10875 struct lpfc_dmabuf *mp)
10877 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10879 spin_lock_irq(&phba->hbalock);
10880 list_add_tail(&mp->list, &pring->postbufq);
10881 pring->postbufq_cnt++;
10882 spin_unlock_irq(&phba->hbalock);
10887 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10888 * @phba: Pointer to HBA context object.
10890 * When HBQ is enabled, buffers are searched based on tags. This function
10891 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10892 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10893 * does not conflict with tags of buffer posted for unsolicited events.
10894 * The function returns the allocated tag. The function is called with
10898 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10900 spin_lock_irq(&phba->hbalock);
10901 phba->buffer_tag_count++;
10903 * Always set the QUE_BUFTAG_BIT to distiguish between
10904 * a tag assigned by HBQ.
10906 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10907 spin_unlock_irq(&phba->hbalock);
10908 return phba->buffer_tag_count;
10912 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10913 * @phba: Pointer to HBA context object.
10914 * @pring: Pointer to driver SLI ring object.
10915 * @tag: Buffer tag.
10917 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10918 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10919 * iocb is posted to the response ring with the tag of the buffer.
10920 * This function searches the pring->postbufq list using the tag
10921 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10922 * iocb. If the buffer is found then lpfc_dmabuf object of the
10923 * buffer is returned to the caller else NULL is returned.
10924 * This function is called with no lock held.
10926 struct lpfc_dmabuf *
10927 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10930 struct lpfc_dmabuf *mp, *next_mp;
10931 struct list_head *slp = &pring->postbufq;
10933 /* Search postbufq, from the beginning, looking for a match on tag */
10934 spin_lock_irq(&phba->hbalock);
10935 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10936 if (mp->buffer_tag == tag) {
10937 list_del_init(&mp->list);
10938 pring->postbufq_cnt--;
10939 spin_unlock_irq(&phba->hbalock);
10944 spin_unlock_irq(&phba->hbalock);
10945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10946 "0402 Cannot find virtual addr for buffer tag on "
10947 "ring %d Data x%lx x%p x%p x%x\n",
10948 pring->ringno, (unsigned long) tag,
10949 slp->next, slp->prev, pring->postbufq_cnt);
10955 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10956 * @phba: Pointer to HBA context object.
10957 * @pring: Pointer to driver SLI ring object.
10958 * @phys: DMA address of the buffer.
10960 * This function searches the buffer list using the dma_address
10961 * of unsolicited event to find the driver's lpfc_dmabuf object
10962 * corresponding to the dma_address. The function returns the
10963 * lpfc_dmabuf object if a buffer is found else it returns NULL.
10964 * This function is called by the ct and els unsolicited event
10965 * handlers to get the buffer associated with the unsolicited
10968 * This function is called with no lock held.
10970 struct lpfc_dmabuf *
10971 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10974 struct lpfc_dmabuf *mp, *next_mp;
10975 struct list_head *slp = &pring->postbufq;
10977 /* Search postbufq, from the beginning, looking for a match on phys */
10978 spin_lock_irq(&phba->hbalock);
10979 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10980 if (mp->phys == phys) {
10981 list_del_init(&mp->list);
10982 pring->postbufq_cnt--;
10983 spin_unlock_irq(&phba->hbalock);
10988 spin_unlock_irq(&phba->hbalock);
10989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10990 "0410 Cannot find virtual addr for mapped buf on "
10991 "ring %d Data x%llx x%p x%p x%x\n",
10992 pring->ringno, (unsigned long long)phys,
10993 slp->next, slp->prev, pring->postbufq_cnt);
10998 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10999 * @phba: Pointer to HBA context object.
11000 * @cmdiocb: Pointer to driver command iocb object.
11001 * @rspiocb: Pointer to driver response iocb object.
11003 * This function is the completion handler for the abort iocbs for
11004 * ELS commands. This function is called from the ELS ring event
11005 * handler with no lock held. This function frees memory resources
11006 * associated with the abort iocb.
11009 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11010 struct lpfc_iocbq *rspiocb)
11012 IOCB_t *irsp = &rspiocb->iocb;
11013 uint16_t abort_iotag, abort_context;
11014 struct lpfc_iocbq *abort_iocb = NULL;
11016 if (irsp->ulpStatus) {
11019 * Assume that the port already completed and returned, or
11020 * will return the iocb. Just Log the message.
11022 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11023 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11025 spin_lock_irq(&phba->hbalock);
11026 if (phba->sli_rev < LPFC_SLI_REV4) {
11027 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11028 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11029 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11030 spin_unlock_irq(&phba->hbalock);
11033 if (abort_iotag != 0 &&
11034 abort_iotag <= phba->sli.last_iotag)
11036 phba->sli.iocbq_lookup[abort_iotag];
11038 /* For sli4 the abort_tag is the XRI,
11039 * so the abort routine puts the iotag of the iocb
11040 * being aborted in the context field of the abort
11043 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11045 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11046 "0327 Cannot abort els iocb %p "
11047 "with tag %x context %x, abort status %x, "
11049 abort_iocb, abort_iotag, abort_context,
11050 irsp->ulpStatus, irsp->un.ulpWord[4]);
11052 spin_unlock_irq(&phba->hbalock);
11055 lpfc_sli_release_iocbq(phba, cmdiocb);
11060 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11061 * @phba: Pointer to HBA context object.
11062 * @cmdiocb: Pointer to driver command iocb object.
11063 * @rspiocb: Pointer to driver response iocb object.
11065 * The function is called from SLI ring event handler with no
11066 * lock held. This function is the completion handler for ELS commands
11067 * which are aborted. The function frees memory resources used for
11068 * the aborted ELS commands.
11071 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11072 struct lpfc_iocbq *rspiocb)
11074 IOCB_t *irsp = &rspiocb->iocb;
11076 /* ELS cmd tag <ulpIoTag> completes */
11077 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11078 "0139 Ignoring ELS cmd tag x%x completion Data: "
11080 irsp->ulpIoTag, irsp->ulpStatus,
11081 irsp->un.ulpWord[4], irsp->ulpTimeout);
11082 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11083 lpfc_ct_free_iocb(phba, cmdiocb);
11085 lpfc_els_free_iocb(phba, cmdiocb);
11090 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11091 * @phba: Pointer to HBA context object.
11092 * @pring: Pointer to driver SLI ring object.
11093 * @cmdiocb: Pointer to driver command iocb object.
11095 * This function issues an abort iocb for the provided command iocb down to
11096 * the port. Other than the case the outstanding command iocb is an abort
11097 * request, this function issues abort out unconditionally. This function is
11098 * called with hbalock held. The function returns 0 when it fails due to
11099 * memory allocation failure or when the command iocb is an abort request.
11102 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11103 struct lpfc_iocbq *cmdiocb)
11105 struct lpfc_vport *vport = cmdiocb->vport;
11106 struct lpfc_iocbq *abtsiocbp;
11107 IOCB_t *icmd = NULL;
11108 IOCB_t *iabt = NULL;
11110 unsigned long iflags;
11111 struct lpfc_nodelist *ndlp;
11113 lockdep_assert_held(&phba->hbalock);
11116 * There are certain command types we don't want to abort. And we
11117 * don't want to abort commands that are already in the process of
11120 icmd = &cmdiocb->iocb;
11121 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11122 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11123 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11126 /* issue ABTS for this IOCB based on iotag */
11127 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11128 if (abtsiocbp == NULL)
11131 /* This signals the response to set the correct status
11132 * before calling the completion handler
11134 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11136 iabt = &abtsiocbp->iocb;
11137 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11138 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11139 if (phba->sli_rev == LPFC_SLI_REV4) {
11140 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11141 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11143 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11144 if (pring->ringno == LPFC_ELS_RING) {
11145 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11146 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11150 iabt->ulpClass = icmd->ulpClass;
11152 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11153 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11154 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11155 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11156 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11157 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11159 if (phba->link_state >= LPFC_LINK_UP)
11160 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11162 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11164 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11165 abtsiocbp->vport = vport;
11167 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11168 "0339 Abort xri x%x, original iotag x%x, "
11169 "abort cmd iotag x%x\n",
11170 iabt->un.acxri.abortIoTag,
11171 iabt->un.acxri.abortContextTag,
11174 if (phba->sli_rev == LPFC_SLI_REV4) {
11175 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11176 if (unlikely(pring == NULL))
11178 /* Note: both hbalock and ring_lock need to be set here */
11179 spin_lock_irqsave(&pring->ring_lock, iflags);
11180 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11182 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11184 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11189 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11192 * Caller to this routine should check for IOCB_ERROR
11193 * and handle it properly. This routine no longer removes
11194 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11200 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11201 * @phba: Pointer to HBA context object.
11202 * @pring: Pointer to driver SLI ring object.
11203 * @cmdiocb: Pointer to driver command iocb object.
11205 * This function issues an abort iocb for the provided command iocb. In case
11206 * of unloading, the abort iocb will not be issued to commands on the ELS
11207 * ring. Instead, the callback function shall be changed to those commands
11208 * so that nothing happens when them finishes. This function is called with
11209 * hbalock held. The function returns 0 when the command iocb is an abort
11213 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11214 struct lpfc_iocbq *cmdiocb)
11216 struct lpfc_vport *vport = cmdiocb->vport;
11217 int retval = IOCB_ERROR;
11218 IOCB_t *icmd = NULL;
11220 lockdep_assert_held(&phba->hbalock);
11223 * There are certain command types we don't want to abort. And we
11224 * don't want to abort commands that are already in the process of
11227 icmd = &cmdiocb->iocb;
11228 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11229 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11230 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11234 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11235 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11237 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11238 goto abort_iotag_exit;
11242 * If we're unloading, don't abort iocb on the ELS ring, but change
11243 * the callback so that nothing happens when it finishes.
11245 if ((vport->load_flag & FC_UNLOADING) &&
11246 (pring->ringno == LPFC_ELS_RING)) {
11247 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11248 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11250 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11251 goto abort_iotag_exit;
11254 /* Now, we try to issue the abort to the cmdiocb out */
11255 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11259 * Caller to this routine should check for IOCB_ERROR
11260 * and handle it properly. This routine no longer removes
11261 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11267 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
11268 * @phba: Pointer to HBA context object.
11269 * @pring: Pointer to driver SLI ring object.
11270 * @cmdiocb: Pointer to driver command iocb object.
11272 * This function issues an abort iocb for the provided command iocb down to
11273 * the port. Other than the case the outstanding command iocb is an abort
11274 * request, this function issues abort out unconditionally. This function is
11275 * called with hbalock held. The function returns 0 when it fails due to
11276 * memory allocation failure or when the command iocb is an abort request.
11279 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11280 struct lpfc_iocbq *cmdiocb)
11282 struct lpfc_vport *vport = cmdiocb->vport;
11283 struct lpfc_iocbq *abtsiocbp;
11284 union lpfc_wqe128 *abts_wqe;
11286 int idx = cmdiocb->hba_wqidx;
11289 * There are certain command types we don't want to abort. And we
11290 * don't want to abort commands that are already in the process of
11293 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
11294 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
11295 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11298 /* issue ABTS for this io based on iotag */
11299 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11300 if (abtsiocbp == NULL)
11303 /* This signals the response to set the correct status
11304 * before calling the completion handler
11306 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11308 /* Complete prepping the abort wqe and issue to the FW. */
11309 abts_wqe = &abtsiocbp->wqe;
11311 /* Clear any stale WQE contents */
11312 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11313 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11316 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11317 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11318 cmdiocb->iocb.ulpClass);
11320 /* word 8 - tell the FW to abort the IO associated with this
11321 * outstanding exchange ID.
11323 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11325 /* word 9 - this is the iotag for the abts_wqe completion. */
11326 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11330 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11331 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11334 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11335 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11336 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11338 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11339 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11340 abtsiocbp->vport = vport;
11341 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
11342 retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
11345 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11346 "6147 Failed abts issue_wqe with status x%x "
11348 retval, cmdiocb->sli4_xritag);
11349 lpfc_sli_release_iocbq(phba, abtsiocbp);
11353 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11354 "6148 Drv Abort NVME Request Issued for "
11355 "ox_id x%x on reqtag x%x\n",
11356 cmdiocb->sli4_xritag,
11363 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11364 * @phba: pointer to lpfc HBA data structure.
11366 * This routine will abort all pending and outstanding iocbs to an HBA.
11369 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11371 struct lpfc_sli *psli = &phba->sli;
11372 struct lpfc_sli_ring *pring;
11373 struct lpfc_queue *qp = NULL;
11376 if (phba->sli_rev != LPFC_SLI_REV4) {
11377 for (i = 0; i < psli->num_rings; i++) {
11378 pring = &psli->sli3_ring[i];
11379 lpfc_sli_abort_iocb_ring(phba, pring);
11383 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11387 lpfc_sli_abort_iocb_ring(phba, pring);
11392 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11393 * @iocbq: Pointer to driver iocb object.
11394 * @vport: Pointer to driver virtual port object.
11395 * @tgt_id: SCSI ID of the target.
11396 * @lun_id: LUN ID of the scsi device.
11397 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11399 * This function acts as an iocb filter for functions which abort or count
11400 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11401 * 0 if the filtering criteria is met for the given iocb and will return
11402 * 1 if the filtering criteria is not met.
11403 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11404 * given iocb is for the SCSI device specified by vport, tgt_id and
11405 * lun_id parameter.
11406 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11407 * given iocb is for the SCSI target specified by vport and tgt_id
11409 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11410 * given iocb is for the SCSI host associated with the given vport.
11411 * This function is called with no locks held.
11414 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11415 uint16_t tgt_id, uint64_t lun_id,
11416 lpfc_ctx_cmd ctx_cmd)
11418 struct lpfc_io_buf *lpfc_cmd;
11421 if (iocbq->vport != vport)
11424 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11425 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11428 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11430 if (lpfc_cmd->pCmd == NULL)
11435 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11436 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11437 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11441 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11442 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11445 case LPFC_CTX_HOST:
11449 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11450 __func__, ctx_cmd);
11458 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11459 * @vport: Pointer to virtual port.
11460 * @tgt_id: SCSI ID of the target.
11461 * @lun_id: LUN ID of the scsi device.
11462 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11464 * This function returns number of FCP commands pending for the vport.
11465 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11466 * commands pending on the vport associated with SCSI device specified
11467 * by tgt_id and lun_id parameters.
11468 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11469 * commands pending on the vport associated with SCSI target specified
11470 * by tgt_id parameter.
11471 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11472 * commands pending on the vport.
11473 * This function returns the number of iocbs which satisfy the filter.
11474 * This function is called without any lock held.
11477 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11478 lpfc_ctx_cmd ctx_cmd)
11480 struct lpfc_hba *phba = vport->phba;
11481 struct lpfc_iocbq *iocbq;
11484 spin_lock_irq(&phba->hbalock);
11485 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11486 iocbq = phba->sli.iocbq_lookup[i];
11488 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11492 spin_unlock_irq(&phba->hbalock);
11498 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11499 * @phba: Pointer to HBA context object
11500 * @cmdiocb: Pointer to command iocb object.
11501 * @rspiocb: Pointer to response iocb object.
11503 * This function is called when an aborted FCP iocb completes. This
11504 * function is called by the ring event handler with no lock held.
11505 * This function frees the iocb.
11508 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11509 struct lpfc_iocbq *rspiocb)
11511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11512 "3096 ABORT_XRI_CN completing on rpi x%x "
11513 "original iotag x%x, abort cmd iotag x%x "
11514 "status 0x%x, reason 0x%x\n",
11515 cmdiocb->iocb.un.acxri.abortContextTag,
11516 cmdiocb->iocb.un.acxri.abortIoTag,
11517 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11518 rspiocb->iocb.un.ulpWord[4]);
11519 lpfc_sli_release_iocbq(phba, cmdiocb);
11524 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11525 * @vport: Pointer to virtual port.
11526 * @pring: Pointer to driver SLI ring object.
11527 * @tgt_id: SCSI ID of the target.
11528 * @lun_id: LUN ID of the scsi device.
11529 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11531 * This function sends an abort command for every SCSI command
11532 * associated with the given virtual port pending on the ring
11533 * filtered by lpfc_sli_validate_fcp_iocb function.
11534 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11535 * FCP iocbs associated with lun specified by tgt_id and lun_id
11537 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11538 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11539 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11540 * FCP iocbs associated with virtual port.
11541 * This function returns number of iocbs it failed to abort.
11542 * This function is called with no locks held.
11545 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11546 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11548 struct lpfc_hba *phba = vport->phba;
11549 struct lpfc_iocbq *iocbq;
11550 struct lpfc_iocbq *abtsiocb;
11551 struct lpfc_sli_ring *pring_s4;
11552 IOCB_t *cmd = NULL;
11553 int errcnt = 0, ret_val = 0;
11556 /* all I/Os are in process of being flushed */
11557 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11560 for (i = 1; i <= phba->sli.last_iotag; i++) {
11561 iocbq = phba->sli.iocbq_lookup[i];
11563 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11568 * If the iocbq is already being aborted, don't take a second
11569 * action, but do count it.
11571 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11574 /* issue ABTS for this IOCB based on iotag */
11575 abtsiocb = lpfc_sli_get_iocbq(phba);
11576 if (abtsiocb == NULL) {
11581 /* indicate the IO is being aborted by the driver. */
11582 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11584 cmd = &iocbq->iocb;
11585 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11586 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11587 if (phba->sli_rev == LPFC_SLI_REV4)
11588 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11590 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11591 abtsiocb->iocb.ulpLe = 1;
11592 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11593 abtsiocb->vport = vport;
11595 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11596 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11597 if (iocbq->iocb_flag & LPFC_IO_FCP)
11598 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11599 if (iocbq->iocb_flag & LPFC_IO_FOF)
11600 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11602 if (lpfc_is_link_up(phba))
11603 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11605 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11607 /* Setup callback routine and issue the command. */
11608 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11609 if (phba->sli_rev == LPFC_SLI_REV4) {
11610 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11613 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11616 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11618 if (ret_val == IOCB_ERROR) {
11619 lpfc_sli_release_iocbq(phba, abtsiocb);
11629 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11630 * @vport: Pointer to virtual port.
11631 * @pring: Pointer to driver SLI ring object.
11632 * @tgt_id: SCSI ID of the target.
11633 * @lun_id: LUN ID of the scsi device.
11634 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11636 * This function sends an abort command for every SCSI command
11637 * associated with the given virtual port pending on the ring
11638 * filtered by lpfc_sli_validate_fcp_iocb function.
11639 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11640 * FCP iocbs associated with lun specified by tgt_id and lun_id
11642 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11643 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11644 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11645 * FCP iocbs associated with virtual port.
11646 * This function returns number of iocbs it aborted .
11647 * This function is called with no locks held right after a taskmgmt
11651 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11652 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11654 struct lpfc_hba *phba = vport->phba;
11655 struct lpfc_io_buf *lpfc_cmd;
11656 struct lpfc_iocbq *abtsiocbq;
11657 struct lpfc_nodelist *ndlp;
11658 struct lpfc_iocbq *iocbq;
11660 int sum, i, ret_val;
11661 unsigned long iflags;
11662 struct lpfc_sli_ring *pring_s4 = NULL;
11664 spin_lock_irqsave(&phba->hbalock, iflags);
11666 /* all I/Os are in process of being flushed */
11667 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11668 spin_unlock_irqrestore(&phba->hbalock, iflags);
11673 for (i = 1; i <= phba->sli.last_iotag; i++) {
11674 iocbq = phba->sli.iocbq_lookup[i];
11676 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11680 /* Guard against IO completion being called at same time */
11681 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11682 spin_lock(&lpfc_cmd->buf_lock);
11684 if (!lpfc_cmd->pCmd) {
11685 spin_unlock(&lpfc_cmd->buf_lock);
11689 if (phba->sli_rev == LPFC_SLI_REV4) {
11691 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11693 spin_unlock(&lpfc_cmd->buf_lock);
11696 /* Note: both hbalock and ring_lock must be set here */
11697 spin_lock(&pring_s4->ring_lock);
11701 * If the iocbq is already being aborted, don't take a second
11702 * action, but do count it.
11704 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11705 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11706 if (phba->sli_rev == LPFC_SLI_REV4)
11707 spin_unlock(&pring_s4->ring_lock);
11708 spin_unlock(&lpfc_cmd->buf_lock);
11712 /* issue ABTS for this IOCB based on iotag */
11713 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11715 if (phba->sli_rev == LPFC_SLI_REV4)
11716 spin_unlock(&pring_s4->ring_lock);
11717 spin_unlock(&lpfc_cmd->buf_lock);
11721 icmd = &iocbq->iocb;
11722 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11723 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11724 if (phba->sli_rev == LPFC_SLI_REV4)
11725 abtsiocbq->iocb.un.acxri.abortIoTag =
11726 iocbq->sli4_xritag;
11728 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11729 abtsiocbq->iocb.ulpLe = 1;
11730 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11731 abtsiocbq->vport = vport;
11733 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11734 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11735 if (iocbq->iocb_flag & LPFC_IO_FCP)
11736 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11737 if (iocbq->iocb_flag & LPFC_IO_FOF)
11738 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11740 ndlp = lpfc_cmd->rdata->pnode;
11742 if (lpfc_is_link_up(phba) &&
11743 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11744 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11746 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11748 /* Setup callback routine and issue the command. */
11749 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11752 * Indicate the IO is being aborted by the driver and set
11753 * the caller's flag into the aborted IO.
11755 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11757 if (phba->sli_rev == LPFC_SLI_REV4) {
11758 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11760 spin_unlock(&pring_s4->ring_lock);
11762 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11766 spin_unlock(&lpfc_cmd->buf_lock);
11768 if (ret_val == IOCB_ERROR)
11769 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11773 spin_unlock_irqrestore(&phba->hbalock, iflags);
11778 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11779 * @phba: Pointer to HBA context object.
11780 * @cmdiocbq: Pointer to command iocb.
11781 * @rspiocbq: Pointer to response iocb.
11783 * This function is the completion handler for iocbs issued using
11784 * lpfc_sli_issue_iocb_wait function. This function is called by the
11785 * ring event handler function without any lock held. This function
11786 * can be called from both worker thread context and interrupt
11787 * context. This function also can be called from other thread which
11788 * cleans up the SLI layer objects.
11789 * This function copy the contents of the response iocb to the
11790 * response iocb memory object provided by the caller of
11791 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11792 * sleeps for the iocb completion.
11795 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11796 struct lpfc_iocbq *cmdiocbq,
11797 struct lpfc_iocbq *rspiocbq)
11799 wait_queue_head_t *pdone_q;
11800 unsigned long iflags;
11801 struct lpfc_io_buf *lpfc_cmd;
11803 spin_lock_irqsave(&phba->hbalock, iflags);
11804 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11807 * A time out has occurred for the iocb. If a time out
11808 * completion handler has been supplied, call it. Otherwise,
11809 * just free the iocbq.
11812 spin_unlock_irqrestore(&phba->hbalock, iflags);
11813 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11814 cmdiocbq->wait_iocb_cmpl = NULL;
11815 if (cmdiocbq->iocb_cmpl)
11816 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11818 lpfc_sli_release_iocbq(phba, cmdiocbq);
11822 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11823 if (cmdiocbq->context2 && rspiocbq)
11824 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11825 &rspiocbq->iocb, sizeof(IOCB_t));
11827 /* Set the exchange busy flag for task management commands */
11828 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11829 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11830 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11832 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11835 pdone_q = cmdiocbq->context_un.wait_queue;
11838 spin_unlock_irqrestore(&phba->hbalock, iflags);
11843 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11844 * @phba: Pointer to HBA context object..
11845 * @piocbq: Pointer to command iocb.
11846 * @flag: Flag to test.
11848 * This routine grabs the hbalock and then test the iocb_flag to
11849 * see if the passed in flag is set.
11851 * 1 if flag is set.
11852 * 0 if flag is not set.
11855 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11856 struct lpfc_iocbq *piocbq, uint32_t flag)
11858 unsigned long iflags;
11861 spin_lock_irqsave(&phba->hbalock, iflags);
11862 ret = piocbq->iocb_flag & flag;
11863 spin_unlock_irqrestore(&phba->hbalock, iflags);
11869 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11870 * @phba: Pointer to HBA context object..
11871 * @pring: Pointer to sli ring.
11872 * @piocb: Pointer to command iocb.
11873 * @prspiocbq: Pointer to response iocb.
11874 * @timeout: Timeout in number of seconds.
11876 * This function issues the iocb to firmware and waits for the
11877 * iocb to complete. The iocb_cmpl field of the shall be used
11878 * to handle iocbs which time out. If the field is NULL, the
11879 * function shall free the iocbq structure. If more clean up is
11880 * needed, the caller is expected to provide a completion function
11881 * that will provide the needed clean up. If the iocb command is
11882 * not completed within timeout seconds, the function will either
11883 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11884 * completion function set in the iocb_cmpl field and then return
11885 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11886 * resources if this function returns IOCB_TIMEDOUT.
11887 * The function waits for the iocb completion using an
11888 * non-interruptible wait.
11889 * This function will sleep while waiting for iocb completion.
11890 * So, this function should not be called from any context which
11891 * does not allow sleeping. Due to the same reason, this function
11892 * cannot be called with interrupt disabled.
11893 * This function assumes that the iocb completions occur while
11894 * this function sleep. So, this function cannot be called from
11895 * the thread which process iocb completion for this ring.
11896 * This function clears the iocb_flag of the iocb object before
11897 * issuing the iocb and the iocb completion handler sets this
11898 * flag and wakes this thread when the iocb completes.
11899 * The contents of the response iocb will be copied to prspiocbq
11900 * by the completion handler when the command completes.
11901 * This function returns IOCB_SUCCESS when success.
11902 * This function is called with no lock held.
11905 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11906 uint32_t ring_number,
11907 struct lpfc_iocbq *piocb,
11908 struct lpfc_iocbq *prspiocbq,
11911 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11912 long timeleft, timeout_req = 0;
11913 int retval = IOCB_SUCCESS;
11915 struct lpfc_iocbq *iocb;
11917 int txcmplq_cnt = 0;
11918 struct lpfc_sli_ring *pring;
11919 unsigned long iflags;
11920 bool iocb_completed = true;
11922 if (phba->sli_rev >= LPFC_SLI_REV4)
11923 pring = lpfc_sli4_calc_ring(phba, piocb);
11925 pring = &phba->sli.sli3_ring[ring_number];
11927 * If the caller has provided a response iocbq buffer, then context2
11928 * is NULL or its an error.
11931 if (piocb->context2)
11933 piocb->context2 = prspiocbq;
11936 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11937 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11938 piocb->context_un.wait_queue = &done_q;
11939 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11941 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11942 if (lpfc_readl(phba->HCregaddr, &creg_val))
11944 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11945 writel(creg_val, phba->HCregaddr);
11946 readl(phba->HCregaddr); /* flush */
11949 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11950 SLI_IOCB_RET_IOCB);
11951 if (retval == IOCB_SUCCESS) {
11952 timeout_req = msecs_to_jiffies(timeout * 1000);
11953 timeleft = wait_event_timeout(done_q,
11954 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11956 spin_lock_irqsave(&phba->hbalock, iflags);
11957 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11960 * IOCB timed out. Inform the wake iocb wait
11961 * completion function and set local status
11964 iocb_completed = false;
11965 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11967 spin_unlock_irqrestore(&phba->hbalock, iflags);
11968 if (iocb_completed) {
11969 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11970 "0331 IOCB wake signaled\n");
11971 /* Note: we are not indicating if the IOCB has a success
11972 * status or not - that's for the caller to check.
11973 * IOCB_SUCCESS means just that the command was sent and
11974 * completed. Not that it completed successfully.
11976 } else if (timeleft == 0) {
11977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11978 "0338 IOCB wait timeout error - no "
11979 "wake response Data x%x\n", timeout);
11980 retval = IOCB_TIMEDOUT;
11982 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11983 "0330 IOCB wake NOT set, "
11985 timeout, (timeleft / jiffies));
11986 retval = IOCB_TIMEDOUT;
11988 } else if (retval == IOCB_BUSY) {
11989 if (phba->cfg_log_verbose & LOG_SLI) {
11990 list_for_each_entry(iocb, &pring->txq, list) {
11993 list_for_each_entry(iocb, &pring->txcmplq, list) {
11996 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11997 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11998 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12002 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12003 "0332 IOCB wait issue failed, Data x%x\n",
12005 retval = IOCB_ERROR;
12008 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12009 if (lpfc_readl(phba->HCregaddr, &creg_val))
12011 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12012 writel(creg_val, phba->HCregaddr);
12013 readl(phba->HCregaddr); /* flush */
12017 piocb->context2 = NULL;
12019 piocb->context_un.wait_queue = NULL;
12020 piocb->iocb_cmpl = NULL;
12025 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12026 * @phba: Pointer to HBA context object.
12027 * @pmboxq: Pointer to driver mailbox object.
12028 * @timeout: Timeout in number of seconds.
12030 * This function issues the mailbox to firmware and waits for the
12031 * mailbox command to complete. If the mailbox command is not
12032 * completed within timeout seconds, it returns MBX_TIMEOUT.
12033 * The function waits for the mailbox completion using an
12034 * interruptible wait. If the thread is woken up due to a
12035 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12036 * should not free the mailbox resources, if this function returns
12038 * This function will sleep while waiting for mailbox completion.
12039 * So, this function should not be called from any context which
12040 * does not allow sleeping. Due to the same reason, this function
12041 * cannot be called with interrupt disabled.
12042 * This function assumes that the mailbox completion occurs while
12043 * this function sleep. So, this function cannot be called from
12044 * the worker thread which processes mailbox completion.
12045 * This function is called in the context of HBA management
12047 * This function returns MBX_SUCCESS when successful.
12048 * This function is called with no lock held.
12051 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12054 struct completion mbox_done;
12056 unsigned long flag;
12058 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12059 /* setup wake call as IOCB callback */
12060 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12062 /* setup context3 field to pass wait_queue pointer to wake function */
12063 init_completion(&mbox_done);
12064 pmboxq->context3 = &mbox_done;
12065 /* now issue the command */
12066 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12067 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12068 wait_for_completion_timeout(&mbox_done,
12069 msecs_to_jiffies(timeout * 1000));
12071 spin_lock_irqsave(&phba->hbalock, flag);
12072 pmboxq->context3 = NULL;
12074 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12075 * else do not free the resources.
12077 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12078 retval = MBX_SUCCESS;
12080 retval = MBX_TIMEOUT;
12081 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12083 spin_unlock_irqrestore(&phba->hbalock, flag);
12089 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12090 * @phba: Pointer to HBA context.
12092 * This function is called to shutdown the driver's mailbox sub-system.
12093 * It first marks the mailbox sub-system is in a block state to prevent
12094 * the asynchronous mailbox command from issued off the pending mailbox
12095 * command queue. If the mailbox command sub-system shutdown is due to
12096 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12097 * the mailbox sub-system flush routine to forcefully bring down the
12098 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12099 * as with offline or HBA function reset), this routine will wait for the
12100 * outstanding mailbox command to complete before invoking the mailbox
12101 * sub-system flush routine to gracefully bring down mailbox sub-system.
12104 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12106 struct lpfc_sli *psli = &phba->sli;
12107 unsigned long timeout;
12109 if (mbx_action == LPFC_MBX_NO_WAIT) {
12110 /* delay 100ms for port state */
12112 lpfc_sli_mbox_sys_flush(phba);
12115 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12117 /* Disable softirqs, including timers from obtaining phba->hbalock */
12118 local_bh_disable();
12120 spin_lock_irq(&phba->hbalock);
12121 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12123 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12124 /* Determine how long we might wait for the active mailbox
12125 * command to be gracefully completed by firmware.
12127 if (phba->sli.mbox_active)
12128 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12129 phba->sli.mbox_active) *
12131 spin_unlock_irq(&phba->hbalock);
12133 /* Enable softirqs again, done with phba->hbalock */
12136 while (phba->sli.mbox_active) {
12137 /* Check active mailbox complete status every 2ms */
12139 if (time_after(jiffies, timeout))
12140 /* Timeout, let the mailbox flush routine to
12141 * forcefully release active mailbox command
12146 spin_unlock_irq(&phba->hbalock);
12148 /* Enable softirqs again, done with phba->hbalock */
12152 lpfc_sli_mbox_sys_flush(phba);
12156 * lpfc_sli_eratt_read - read sli-3 error attention events
12157 * @phba: Pointer to HBA context.
12159 * This function is called to read the SLI3 device error attention registers
12160 * for possible error attention events. The caller must hold the hostlock
12161 * with spin_lock_irq().
12163 * This function returns 1 when there is Error Attention in the Host Attention
12164 * Register and returns 0 otherwise.
12167 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12171 /* Read chip Host Attention (HA) register */
12172 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12175 if (ha_copy & HA_ERATT) {
12176 /* Read host status register to retrieve error event */
12177 if (lpfc_sli_read_hs(phba))
12180 /* Check if there is a deferred error condition is active */
12181 if ((HS_FFER1 & phba->work_hs) &&
12182 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12183 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12184 phba->hba_flag |= DEFER_ERATT;
12185 /* Clear all interrupt enable conditions */
12186 writel(0, phba->HCregaddr);
12187 readl(phba->HCregaddr);
12190 /* Set the driver HA work bitmap */
12191 phba->work_ha |= HA_ERATT;
12192 /* Indicate polling handles this ERATT */
12193 phba->hba_flag |= HBA_ERATT_HANDLED;
12199 /* Set the driver HS work bitmap */
12200 phba->work_hs |= UNPLUG_ERR;
12201 /* Set the driver HA work bitmap */
12202 phba->work_ha |= HA_ERATT;
12203 /* Indicate polling handles this ERATT */
12204 phba->hba_flag |= HBA_ERATT_HANDLED;
12209 * lpfc_sli4_eratt_read - read sli-4 error attention events
12210 * @phba: Pointer to HBA context.
12212 * This function is called to read the SLI4 device error attention registers
12213 * for possible error attention events. The caller must hold the hostlock
12214 * with spin_lock_irq().
12216 * This function returns 1 when there is Error Attention in the Host Attention
12217 * Register and returns 0 otherwise.
12220 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12222 uint32_t uerr_sta_hi, uerr_sta_lo;
12223 uint32_t if_type, portsmphr;
12224 struct lpfc_register portstat_reg;
12227 * For now, use the SLI4 device internal unrecoverable error
12228 * registers for error attention. This can be changed later.
12230 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12232 case LPFC_SLI_INTF_IF_TYPE_0:
12233 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12235 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12237 phba->work_hs |= UNPLUG_ERR;
12238 phba->work_ha |= HA_ERATT;
12239 phba->hba_flag |= HBA_ERATT_HANDLED;
12242 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12243 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12245 "1423 HBA Unrecoverable error: "
12246 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12247 "ue_mask_lo_reg=0x%x, "
12248 "ue_mask_hi_reg=0x%x\n",
12249 uerr_sta_lo, uerr_sta_hi,
12250 phba->sli4_hba.ue_mask_lo,
12251 phba->sli4_hba.ue_mask_hi);
12252 phba->work_status[0] = uerr_sta_lo;
12253 phba->work_status[1] = uerr_sta_hi;
12254 phba->work_ha |= HA_ERATT;
12255 phba->hba_flag |= HBA_ERATT_HANDLED;
12259 case LPFC_SLI_INTF_IF_TYPE_2:
12260 case LPFC_SLI_INTF_IF_TYPE_6:
12261 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12262 &portstat_reg.word0) ||
12263 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12265 phba->work_hs |= UNPLUG_ERR;
12266 phba->work_ha |= HA_ERATT;
12267 phba->hba_flag |= HBA_ERATT_HANDLED;
12270 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12271 phba->work_status[0] =
12272 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12273 phba->work_status[1] =
12274 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12276 "2885 Port Status Event: "
12277 "port status reg 0x%x, "
12278 "port smphr reg 0x%x, "
12279 "error 1=0x%x, error 2=0x%x\n",
12280 portstat_reg.word0,
12282 phba->work_status[0],
12283 phba->work_status[1]);
12284 phba->work_ha |= HA_ERATT;
12285 phba->hba_flag |= HBA_ERATT_HANDLED;
12289 case LPFC_SLI_INTF_IF_TYPE_1:
12291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12292 "2886 HBA Error Attention on unsupported "
12293 "if type %d.", if_type);
12301 * lpfc_sli_check_eratt - check error attention events
12302 * @phba: Pointer to HBA context.
12304 * This function is called from timer soft interrupt context to check HBA's
12305 * error attention register bit for error attention events.
12307 * This function returns 1 when there is Error Attention in the Host Attention
12308 * Register and returns 0 otherwise.
12311 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12315 /* If somebody is waiting to handle an eratt, don't process it
12316 * here. The brdkill function will do this.
12318 if (phba->link_flag & LS_IGNORE_ERATT)
12321 /* Check if interrupt handler handles this ERATT */
12322 spin_lock_irq(&phba->hbalock);
12323 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12324 /* Interrupt handler has handled ERATT */
12325 spin_unlock_irq(&phba->hbalock);
12330 * If there is deferred error attention, do not check for error
12333 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12334 spin_unlock_irq(&phba->hbalock);
12338 /* If PCI channel is offline, don't process it */
12339 if (unlikely(pci_channel_offline(phba->pcidev))) {
12340 spin_unlock_irq(&phba->hbalock);
12344 switch (phba->sli_rev) {
12345 case LPFC_SLI_REV2:
12346 case LPFC_SLI_REV3:
12347 /* Read chip Host Attention (HA) register */
12348 ha_copy = lpfc_sli_eratt_read(phba);
12350 case LPFC_SLI_REV4:
12351 /* Read device Uncoverable Error (UERR) registers */
12352 ha_copy = lpfc_sli4_eratt_read(phba);
12355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12356 "0299 Invalid SLI revision (%d)\n",
12361 spin_unlock_irq(&phba->hbalock);
12367 * lpfc_intr_state_check - Check device state for interrupt handling
12368 * @phba: Pointer to HBA context.
12370 * This inline routine checks whether a device or its PCI slot is in a state
12371 * that the interrupt should be handled.
12373 * This function returns 0 if the device or the PCI slot is in a state that
12374 * interrupt should be handled, otherwise -EIO.
12377 lpfc_intr_state_check(struct lpfc_hba *phba)
12379 /* If the pci channel is offline, ignore all the interrupts */
12380 if (unlikely(pci_channel_offline(phba->pcidev)))
12383 /* Update device level interrupt statistics */
12384 phba->sli.slistat.sli_intr++;
12386 /* Ignore all interrupts during initialization. */
12387 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12394 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12395 * @irq: Interrupt number.
12396 * @dev_id: The device context pointer.
12398 * This function is directly called from the PCI layer as an interrupt
12399 * service routine when device with SLI-3 interface spec is enabled with
12400 * MSI-X multi-message interrupt mode and there are slow-path events in
12401 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12402 * interrupt mode, this function is called as part of the device-level
12403 * interrupt handler. When the PCI slot is in error recovery or the HBA
12404 * is undergoing initialization, the interrupt handler will not process
12405 * the interrupt. The link attention and ELS ring attention events are
12406 * handled by the worker thread. The interrupt handler signals the worker
12407 * thread and returns for these events. This function is called without
12408 * any lock held. It gets the hbalock to access and update SLI data
12411 * This function returns IRQ_HANDLED when interrupt is handled else it
12412 * returns IRQ_NONE.
12415 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12417 struct lpfc_hba *phba;
12418 uint32_t ha_copy, hc_copy;
12419 uint32_t work_ha_copy;
12420 unsigned long status;
12421 unsigned long iflag;
12424 MAILBOX_t *mbox, *pmbox;
12425 struct lpfc_vport *vport;
12426 struct lpfc_nodelist *ndlp;
12427 struct lpfc_dmabuf *mp;
12432 * Get the driver's phba structure from the dev_id and
12433 * assume the HBA is not interrupting.
12435 phba = (struct lpfc_hba *)dev_id;
12437 if (unlikely(!phba))
12441 * Stuff needs to be attented to when this function is invoked as an
12442 * individual interrupt handler in MSI-X multi-message interrupt mode
12444 if (phba->intr_type == MSIX) {
12445 /* Check device state for handling interrupt */
12446 if (lpfc_intr_state_check(phba))
12448 /* Need to read HA REG for slow-path events */
12449 spin_lock_irqsave(&phba->hbalock, iflag);
12450 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12452 /* If somebody is waiting to handle an eratt don't process it
12453 * here. The brdkill function will do this.
12455 if (phba->link_flag & LS_IGNORE_ERATT)
12456 ha_copy &= ~HA_ERATT;
12457 /* Check the need for handling ERATT in interrupt handler */
12458 if (ha_copy & HA_ERATT) {
12459 if (phba->hba_flag & HBA_ERATT_HANDLED)
12460 /* ERATT polling has handled ERATT */
12461 ha_copy &= ~HA_ERATT;
12463 /* Indicate interrupt handler handles ERATT */
12464 phba->hba_flag |= HBA_ERATT_HANDLED;
12468 * If there is deferred error attention, do not check for any
12471 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12472 spin_unlock_irqrestore(&phba->hbalock, iflag);
12476 /* Clear up only attention source related to slow-path */
12477 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12480 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12481 HC_LAINT_ENA | HC_ERINT_ENA),
12483 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12485 writel(hc_copy, phba->HCregaddr);
12486 readl(phba->HAregaddr); /* flush */
12487 spin_unlock_irqrestore(&phba->hbalock, iflag);
12489 ha_copy = phba->ha_copy;
12491 work_ha_copy = ha_copy & phba->work_ha_mask;
12493 if (work_ha_copy) {
12494 if (work_ha_copy & HA_LATT) {
12495 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12497 * Turn off Link Attention interrupts
12498 * until CLEAR_LA done
12500 spin_lock_irqsave(&phba->hbalock, iflag);
12501 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12502 if (lpfc_readl(phba->HCregaddr, &control))
12504 control &= ~HC_LAINT_ENA;
12505 writel(control, phba->HCregaddr);
12506 readl(phba->HCregaddr); /* flush */
12507 spin_unlock_irqrestore(&phba->hbalock, iflag);
12510 work_ha_copy &= ~HA_LATT;
12513 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12515 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12516 * the only slow ring.
12518 status = (work_ha_copy &
12519 (HA_RXMASK << (4*LPFC_ELS_RING)));
12520 status >>= (4*LPFC_ELS_RING);
12521 if (status & HA_RXMASK) {
12522 spin_lock_irqsave(&phba->hbalock, iflag);
12523 if (lpfc_readl(phba->HCregaddr, &control))
12526 lpfc_debugfs_slow_ring_trc(phba,
12527 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12529 (uint32_t)phba->sli.slistat.sli_intr);
12531 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12532 lpfc_debugfs_slow_ring_trc(phba,
12533 "ISR Disable ring:"
12534 "pwork:x%x hawork:x%x wait:x%x",
12535 phba->work_ha, work_ha_copy,
12536 (uint32_t)((unsigned long)
12537 &phba->work_waitq));
12540 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12541 writel(control, phba->HCregaddr);
12542 readl(phba->HCregaddr); /* flush */
12545 lpfc_debugfs_slow_ring_trc(phba,
12546 "ISR slow ring: pwork:"
12547 "x%x hawork:x%x wait:x%x",
12548 phba->work_ha, work_ha_copy,
12549 (uint32_t)((unsigned long)
12550 &phba->work_waitq));
12552 spin_unlock_irqrestore(&phba->hbalock, iflag);
12555 spin_lock_irqsave(&phba->hbalock, iflag);
12556 if (work_ha_copy & HA_ERATT) {
12557 if (lpfc_sli_read_hs(phba))
12560 * Check if there is a deferred error condition
12563 if ((HS_FFER1 & phba->work_hs) &&
12564 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12565 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12567 phba->hba_flag |= DEFER_ERATT;
12568 /* Clear all interrupt enable conditions */
12569 writel(0, phba->HCregaddr);
12570 readl(phba->HCregaddr);
12574 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12575 pmb = phba->sli.mbox_active;
12576 pmbox = &pmb->u.mb;
12578 vport = pmb->vport;
12580 /* First check out the status word */
12581 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12582 if (pmbox->mbxOwner != OWN_HOST) {
12583 spin_unlock_irqrestore(&phba->hbalock, iflag);
12585 * Stray Mailbox Interrupt, mbxCommand <cmd>
12586 * mbxStatus <status>
12588 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12590 "(%d):0304 Stray Mailbox "
12591 "Interrupt mbxCommand x%x "
12593 (vport ? vport->vpi : 0),
12596 /* clear mailbox attention bit */
12597 work_ha_copy &= ~HA_MBATT;
12599 phba->sli.mbox_active = NULL;
12600 spin_unlock_irqrestore(&phba->hbalock, iflag);
12601 phba->last_completion_time = jiffies;
12602 del_timer(&phba->sli.mbox_tmo);
12603 if (pmb->mbox_cmpl) {
12604 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12606 if (pmb->out_ext_byte_len &&
12608 lpfc_sli_pcimem_bcopy(
12611 pmb->out_ext_byte_len);
12613 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12614 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12616 lpfc_debugfs_disc_trc(vport,
12617 LPFC_DISC_TRC_MBOX_VPORT,
12618 "MBOX dflt rpi: : "
12619 "status:x%x rpi:x%x",
12620 (uint32_t)pmbox->mbxStatus,
12621 pmbox->un.varWords[0], 0);
12623 if (!pmbox->mbxStatus) {
12624 mp = (struct lpfc_dmabuf *)
12626 ndlp = (struct lpfc_nodelist *)
12629 /* Reg_LOGIN of dflt RPI was
12630 * successful. new lets get
12631 * rid of the RPI using the
12632 * same mbox buffer.
12634 lpfc_unreg_login(phba,
12636 pmbox->un.varWords[0],
12639 lpfc_mbx_cmpl_dflt_rpi;
12641 pmb->ctx_ndlp = ndlp;
12642 pmb->vport = vport;
12643 rc = lpfc_sli_issue_mbox(phba,
12646 if (rc != MBX_BUSY)
12647 lpfc_printf_log(phba,
12649 LOG_MBOX | LOG_SLI,
12650 "0350 rc should have"
12651 "been MBX_BUSY\n");
12652 if (rc != MBX_NOT_FINISHED)
12653 goto send_current_mbox;
12657 &phba->pport->work_port_lock,
12659 phba->pport->work_port_events &=
12661 spin_unlock_irqrestore(
12662 &phba->pport->work_port_lock,
12664 lpfc_mbox_cmpl_put(phba, pmb);
12667 spin_unlock_irqrestore(&phba->hbalock, iflag);
12669 if ((work_ha_copy & HA_MBATT) &&
12670 (phba->sli.mbox_active == NULL)) {
12672 /* Process next mailbox command if there is one */
12674 rc = lpfc_sli_issue_mbox(phba, NULL,
12676 } while (rc == MBX_NOT_FINISHED);
12677 if (rc != MBX_SUCCESS)
12678 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12679 LOG_SLI, "0349 rc should be "
12683 spin_lock_irqsave(&phba->hbalock, iflag);
12684 phba->work_ha |= work_ha_copy;
12685 spin_unlock_irqrestore(&phba->hbalock, iflag);
12686 lpfc_worker_wake_up(phba);
12688 return IRQ_HANDLED;
12690 spin_unlock_irqrestore(&phba->hbalock, iflag);
12691 return IRQ_HANDLED;
12693 } /* lpfc_sli_sp_intr_handler */
12696 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12697 * @irq: Interrupt number.
12698 * @dev_id: The device context pointer.
12700 * This function is directly called from the PCI layer as an interrupt
12701 * service routine when device with SLI-3 interface spec is enabled with
12702 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12703 * ring event in the HBA. However, when the device is enabled with either
12704 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12705 * device-level interrupt handler. When the PCI slot is in error recovery
12706 * or the HBA is undergoing initialization, the interrupt handler will not
12707 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12708 * the intrrupt context. This function is called without any lock held.
12709 * It gets the hbalock to access and update SLI data structures.
12711 * This function returns IRQ_HANDLED when interrupt is handled else it
12712 * returns IRQ_NONE.
12715 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12717 struct lpfc_hba *phba;
12719 unsigned long status;
12720 unsigned long iflag;
12721 struct lpfc_sli_ring *pring;
12723 /* Get the driver's phba structure from the dev_id and
12724 * assume the HBA is not interrupting.
12726 phba = (struct lpfc_hba *) dev_id;
12728 if (unlikely(!phba))
12732 * Stuff needs to be attented to when this function is invoked as an
12733 * individual interrupt handler in MSI-X multi-message interrupt mode
12735 if (phba->intr_type == MSIX) {
12736 /* Check device state for handling interrupt */
12737 if (lpfc_intr_state_check(phba))
12739 /* Need to read HA REG for FCP ring and other ring events */
12740 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12741 return IRQ_HANDLED;
12742 /* Clear up only attention source related to fast-path */
12743 spin_lock_irqsave(&phba->hbalock, iflag);
12745 * If there is deferred error attention, do not check for
12748 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12749 spin_unlock_irqrestore(&phba->hbalock, iflag);
12752 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12754 readl(phba->HAregaddr); /* flush */
12755 spin_unlock_irqrestore(&phba->hbalock, iflag);
12757 ha_copy = phba->ha_copy;
12760 * Process all events on FCP ring. Take the optimized path for FCP IO.
12762 ha_copy &= ~(phba->work_ha_mask);
12764 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12765 status >>= (4*LPFC_FCP_RING);
12766 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12767 if (status & HA_RXMASK)
12768 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12770 if (phba->cfg_multi_ring_support == 2) {
12772 * Process all events on extra ring. Take the optimized path
12773 * for extra ring IO.
12775 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12776 status >>= (4*LPFC_EXTRA_RING);
12777 if (status & HA_RXMASK) {
12778 lpfc_sli_handle_fast_ring_event(phba,
12779 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12783 return IRQ_HANDLED;
12784 } /* lpfc_sli_fp_intr_handler */
12787 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12788 * @irq: Interrupt number.
12789 * @dev_id: The device context pointer.
12791 * This function is the HBA device-level interrupt handler to device with
12792 * SLI-3 interface spec, called from the PCI layer when either MSI or
12793 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12794 * requires driver attention. This function invokes the slow-path interrupt
12795 * attention handling function and fast-path interrupt attention handling
12796 * function in turn to process the relevant HBA attention events. This
12797 * function is called without any lock held. It gets the hbalock to access
12798 * and update SLI data structures.
12800 * This function returns IRQ_HANDLED when interrupt is handled, else it
12801 * returns IRQ_NONE.
12804 lpfc_sli_intr_handler(int irq, void *dev_id)
12806 struct lpfc_hba *phba;
12807 irqreturn_t sp_irq_rc, fp_irq_rc;
12808 unsigned long status1, status2;
12812 * Get the driver's phba structure from the dev_id and
12813 * assume the HBA is not interrupting.
12815 phba = (struct lpfc_hba *) dev_id;
12817 if (unlikely(!phba))
12820 /* Check device state for handling interrupt */
12821 if (lpfc_intr_state_check(phba))
12824 spin_lock(&phba->hbalock);
12825 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12826 spin_unlock(&phba->hbalock);
12827 return IRQ_HANDLED;
12830 if (unlikely(!phba->ha_copy)) {
12831 spin_unlock(&phba->hbalock);
12833 } else if (phba->ha_copy & HA_ERATT) {
12834 if (phba->hba_flag & HBA_ERATT_HANDLED)
12835 /* ERATT polling has handled ERATT */
12836 phba->ha_copy &= ~HA_ERATT;
12838 /* Indicate interrupt handler handles ERATT */
12839 phba->hba_flag |= HBA_ERATT_HANDLED;
12843 * If there is deferred error attention, do not check for any interrupt.
12845 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12846 spin_unlock(&phba->hbalock);
12850 /* Clear attention sources except link and error attentions */
12851 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12852 spin_unlock(&phba->hbalock);
12853 return IRQ_HANDLED;
12855 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12856 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12858 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12859 writel(hc_copy, phba->HCregaddr);
12860 readl(phba->HAregaddr); /* flush */
12861 spin_unlock(&phba->hbalock);
12864 * Invokes slow-path host attention interrupt handling as appropriate.
12867 /* status of events with mailbox and link attention */
12868 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12870 /* status of events with ELS ring */
12871 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12872 status2 >>= (4*LPFC_ELS_RING);
12874 if (status1 || (status2 & HA_RXMASK))
12875 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12877 sp_irq_rc = IRQ_NONE;
12880 * Invoke fast-path host attention interrupt handling as appropriate.
12883 /* status of events with FCP ring */
12884 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12885 status1 >>= (4*LPFC_FCP_RING);
12887 /* status of events with extra ring */
12888 if (phba->cfg_multi_ring_support == 2) {
12889 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12890 status2 >>= (4*LPFC_EXTRA_RING);
12894 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12895 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12897 fp_irq_rc = IRQ_NONE;
12899 /* Return device-level interrupt handling status */
12900 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12901 } /* lpfc_sli_intr_handler */
12904 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12905 * @phba: pointer to lpfc hba data structure.
12907 * This routine is invoked by the worker thread to process all the pending
12908 * SLI4 els abort xri events.
12910 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12912 struct lpfc_cq_event *cq_event;
12914 /* First, declare the els xri abort event has been handled */
12915 spin_lock_irq(&phba->hbalock);
12916 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12917 spin_unlock_irq(&phba->hbalock);
12918 /* Now, handle all the els xri abort events */
12919 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12920 /* Get the first event from the head of the event queue */
12921 spin_lock_irq(&phba->hbalock);
12922 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12923 cq_event, struct lpfc_cq_event, list);
12924 spin_unlock_irq(&phba->hbalock);
12925 /* Notify aborted XRI for ELS work queue */
12926 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12927 /* Free the event processed back to the free pool */
12928 lpfc_sli4_cq_event_release(phba, cq_event);
12933 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12934 * @phba: pointer to lpfc hba data structure
12935 * @pIocbIn: pointer to the rspiocbq
12936 * @pIocbOut: pointer to the cmdiocbq
12937 * @wcqe: pointer to the complete wcqe
12939 * This routine transfers the fields of a command iocbq to a response iocbq
12940 * by copying all the IOCB fields from command iocbq and transferring the
12941 * completion status information from the complete wcqe.
12944 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12945 struct lpfc_iocbq *pIocbIn,
12946 struct lpfc_iocbq *pIocbOut,
12947 struct lpfc_wcqe_complete *wcqe)
12950 unsigned long iflags;
12951 uint32_t status, max_response;
12952 struct lpfc_dmabuf *dmabuf;
12953 struct ulp_bde64 *bpl, bde;
12954 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12956 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12957 sizeof(struct lpfc_iocbq) - offset);
12958 /* Map WCQE parameters into irspiocb parameters */
12959 status = bf_get(lpfc_wcqe_c_status, wcqe);
12960 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12961 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12962 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12963 pIocbIn->iocb.un.fcpi.fcpi_parm =
12964 pIocbOut->iocb.un.fcpi.fcpi_parm -
12965 wcqe->total_data_placed;
12967 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12969 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12970 switch (pIocbOut->iocb.ulpCommand) {
12971 case CMD_ELS_REQUEST64_CR:
12972 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12973 bpl = (struct ulp_bde64 *)dmabuf->virt;
12974 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12975 max_response = bde.tus.f.bdeSize;
12977 case CMD_GEN_REQUEST64_CR:
12979 if (!pIocbOut->context3)
12981 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12982 sizeof(struct ulp_bde64);
12983 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12984 bpl = (struct ulp_bde64 *)dmabuf->virt;
12985 for (i = 0; i < numBdes; i++) {
12986 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12987 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12988 max_response += bde.tus.f.bdeSize;
12992 max_response = wcqe->total_data_placed;
12995 if (max_response < wcqe->total_data_placed)
12996 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12998 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12999 wcqe->total_data_placed;
13002 /* Convert BG errors for completion status */
13003 if (status == CQE_STATUS_DI_ERROR) {
13004 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13006 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13007 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13009 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13011 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13012 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13013 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13014 BGS_GUARD_ERR_MASK;
13015 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13016 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13017 BGS_APPTAG_ERR_MASK;
13018 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13019 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13020 BGS_REFTAG_ERR_MASK;
13022 /* Check to see if there was any good data before the error */
13023 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13024 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13025 BGS_HI_WATER_MARK_PRESENT_MASK;
13026 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13027 wcqe->total_data_placed;
13031 * Set ALL the error bits to indicate we don't know what
13032 * type of error it is.
13034 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13035 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13036 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13037 BGS_GUARD_ERR_MASK);
13040 /* Pick up HBA exchange busy condition */
13041 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13042 spin_lock_irqsave(&phba->hbalock, iflags);
13043 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13044 spin_unlock_irqrestore(&phba->hbalock, iflags);
13049 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13050 * @phba: Pointer to HBA context object.
13051 * @wcqe: Pointer to work-queue completion queue entry.
13053 * This routine handles an ELS work-queue completion event and construct
13054 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13055 * discovery engine to handle.
13057 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13059 static struct lpfc_iocbq *
13060 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13061 struct lpfc_iocbq *irspiocbq)
13063 struct lpfc_sli_ring *pring;
13064 struct lpfc_iocbq *cmdiocbq;
13065 struct lpfc_wcqe_complete *wcqe;
13066 unsigned long iflags;
13068 pring = lpfc_phba_elsring(phba);
13069 if (unlikely(!pring))
13072 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13073 spin_lock_irqsave(&pring->ring_lock, iflags);
13074 pring->stats.iocb_event++;
13075 /* Look up the ELS command IOCB and create pseudo response IOCB */
13076 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13077 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13078 if (unlikely(!cmdiocbq)) {
13079 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13080 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13081 "0386 ELS complete with no corresponding "
13082 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13083 wcqe->word0, wcqe->total_data_placed,
13084 wcqe->parameter, wcqe->word3);
13085 lpfc_sli_release_iocbq(phba, irspiocbq);
13089 /* Put the iocb back on the txcmplq */
13090 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13091 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13093 /* Fake the irspiocbq and copy necessary response information */
13094 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13099 inline struct lpfc_cq_event *
13100 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13102 struct lpfc_cq_event *cq_event;
13104 /* Allocate a new internal CQ_EVENT entry */
13105 cq_event = lpfc_sli4_cq_event_alloc(phba);
13107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13108 "0602 Failed to alloc CQ_EVENT entry\n");
13112 /* Move the CQE into the event */
13113 memcpy(&cq_event->cqe, entry, size);
13118 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13119 * @phba: Pointer to HBA context object.
13120 * @cqe: Pointer to mailbox completion queue entry.
13122 * This routine process a mailbox completion queue entry with asynchrous
13125 * Return: true if work posted to worker thread, otherwise false.
13128 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13130 struct lpfc_cq_event *cq_event;
13131 unsigned long iflags;
13133 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13134 "0392 Async Event: word0:x%x, word1:x%x, "
13135 "word2:x%x, word3:x%x\n", mcqe->word0,
13136 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13138 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13141 spin_lock_irqsave(&phba->hbalock, iflags);
13142 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13143 /* Set the async event flag */
13144 phba->hba_flag |= ASYNC_EVENT;
13145 spin_unlock_irqrestore(&phba->hbalock, iflags);
13151 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13152 * @phba: Pointer to HBA context object.
13153 * @cqe: Pointer to mailbox completion queue entry.
13155 * This routine process a mailbox completion queue entry with mailbox
13156 * completion event.
13158 * Return: true if work posted to worker thread, otherwise false.
13161 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13163 uint32_t mcqe_status;
13164 MAILBOX_t *mbox, *pmbox;
13165 struct lpfc_mqe *mqe;
13166 struct lpfc_vport *vport;
13167 struct lpfc_nodelist *ndlp;
13168 struct lpfc_dmabuf *mp;
13169 unsigned long iflags;
13171 bool workposted = false;
13174 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13175 if (!bf_get(lpfc_trailer_completed, mcqe))
13176 goto out_no_mqe_complete;
13178 /* Get the reference to the active mbox command */
13179 spin_lock_irqsave(&phba->hbalock, iflags);
13180 pmb = phba->sli.mbox_active;
13181 if (unlikely(!pmb)) {
13182 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13183 "1832 No pending MBOX command to handle\n");
13184 spin_unlock_irqrestore(&phba->hbalock, iflags);
13185 goto out_no_mqe_complete;
13187 spin_unlock_irqrestore(&phba->hbalock, iflags);
13189 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13191 vport = pmb->vport;
13193 /* Reset heartbeat timer */
13194 phba->last_completion_time = jiffies;
13195 del_timer(&phba->sli.mbox_tmo);
13197 /* Move mbox data to caller's mailbox region, do endian swapping */
13198 if (pmb->mbox_cmpl && mbox)
13199 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13202 * For mcqe errors, conditionally move a modified error code to
13203 * the mbox so that the error will not be missed.
13205 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13206 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13207 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13208 bf_set(lpfc_mqe_status, mqe,
13209 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13211 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13212 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13213 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13214 "MBOX dflt rpi: status:x%x rpi:x%x",
13216 pmbox->un.varWords[0], 0);
13217 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13218 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13219 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13220 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13221 * RID of the PPI using the same mbox buffer.
13223 lpfc_unreg_login(phba, vport->vpi,
13224 pmbox->un.varWords[0], pmb);
13225 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13227 pmb->ctx_ndlp = ndlp;
13228 pmb->vport = vport;
13229 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13230 if (rc != MBX_BUSY)
13231 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13232 LOG_SLI, "0385 rc should "
13233 "have been MBX_BUSY\n");
13234 if (rc != MBX_NOT_FINISHED)
13235 goto send_current_mbox;
13238 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13239 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13240 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13242 /* There is mailbox completion work to do */
13243 spin_lock_irqsave(&phba->hbalock, iflags);
13244 __lpfc_mbox_cmpl_put(phba, pmb);
13245 phba->work_ha |= HA_MBATT;
13246 spin_unlock_irqrestore(&phba->hbalock, iflags);
13250 spin_lock_irqsave(&phba->hbalock, iflags);
13251 /* Release the mailbox command posting token */
13252 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13253 /* Setting active mailbox pointer need to be in sync to flag clear */
13254 phba->sli.mbox_active = NULL;
13255 spin_unlock_irqrestore(&phba->hbalock, iflags);
13256 /* Wake up worker thread to post the next pending mailbox command */
13257 lpfc_worker_wake_up(phba);
13258 out_no_mqe_complete:
13259 if (bf_get(lpfc_trailer_consumed, mcqe))
13260 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13265 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13266 * @phba: Pointer to HBA context object.
13267 * @cqe: Pointer to mailbox completion queue entry.
13269 * This routine process a mailbox completion queue entry, it invokes the
13270 * proper mailbox complete handling or asynchrous event handling routine
13271 * according to the MCQE's async bit.
13273 * Return: true if work posted to worker thread, otherwise false.
13276 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13277 struct lpfc_cqe *cqe)
13279 struct lpfc_mcqe mcqe;
13284 /* Copy the mailbox MCQE and convert endian order as needed */
13285 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13287 /* Invoke the proper event handling routine */
13288 if (!bf_get(lpfc_trailer_async, &mcqe))
13289 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13291 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13296 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13297 * @phba: Pointer to HBA context object.
13298 * @cq: Pointer to associated CQ
13299 * @wcqe: Pointer to work-queue completion queue entry.
13301 * This routine handles an ELS work-queue completion event.
13303 * Return: true if work posted to worker thread, otherwise false.
13306 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13307 struct lpfc_wcqe_complete *wcqe)
13309 struct lpfc_iocbq *irspiocbq;
13310 unsigned long iflags;
13311 struct lpfc_sli_ring *pring = cq->pring;
13313 int txcmplq_cnt = 0;
13314 int fcp_txcmplq_cnt = 0;
13316 /* Check for response status */
13317 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13318 /* Log the error status */
13319 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13320 "0357 ELS CQE error: status=x%x: "
13321 "CQE: %08x %08x %08x %08x\n",
13322 bf_get(lpfc_wcqe_c_status, wcqe),
13323 wcqe->word0, wcqe->total_data_placed,
13324 wcqe->parameter, wcqe->word3);
13327 /* Get an irspiocbq for later ELS response processing use */
13328 irspiocbq = lpfc_sli_get_iocbq(phba);
13330 if (!list_empty(&pring->txq))
13332 if (!list_empty(&pring->txcmplq))
13334 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13335 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13336 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13337 txq_cnt, phba->iocb_cnt,
13343 /* Save off the slow-path queue event for work thread to process */
13344 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13345 spin_lock_irqsave(&phba->hbalock, iflags);
13346 list_add_tail(&irspiocbq->cq_event.list,
13347 &phba->sli4_hba.sp_queue_event);
13348 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13349 spin_unlock_irqrestore(&phba->hbalock, iflags);
13355 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13356 * @phba: Pointer to HBA context object.
13357 * @wcqe: Pointer to work-queue completion queue entry.
13359 * This routine handles slow-path WQ entry consumed event by invoking the
13360 * proper WQ release routine to the slow-path WQ.
13363 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13364 struct lpfc_wcqe_release *wcqe)
13366 /* sanity check on queue memory */
13367 if (unlikely(!phba->sli4_hba.els_wq))
13369 /* Check for the slow-path ELS work queue */
13370 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13371 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13372 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13374 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13375 "2579 Slow-path wqe consume event carries "
13376 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13377 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13378 phba->sli4_hba.els_wq->queue_id);
13382 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13383 * @phba: Pointer to HBA context object.
13384 * @cq: Pointer to a WQ completion queue.
13385 * @wcqe: Pointer to work-queue completion queue entry.
13387 * This routine handles an XRI abort event.
13389 * Return: true if work posted to worker thread, otherwise false.
13392 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13393 struct lpfc_queue *cq,
13394 struct sli4_wcqe_xri_aborted *wcqe)
13396 bool workposted = false;
13397 struct lpfc_cq_event *cq_event;
13398 unsigned long iflags;
13400 switch (cq->subtype) {
13402 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13403 workposted = false;
13405 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13407 cq_event = lpfc_cq_event_setup(
13408 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13411 cq_event->hdwq = cq->hdwq;
13412 spin_lock_irqsave(&phba->hbalock, iflags);
13413 list_add_tail(&cq_event->list,
13414 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13415 /* Set the els xri abort event flag */
13416 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13417 spin_unlock_irqrestore(&phba->hbalock, iflags);
13421 /* Notify aborted XRI for NVME work queue */
13422 if (phba->nvmet_support)
13423 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13425 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13427 workposted = false;
13430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13431 "0603 Invalid CQ subtype %d: "
13432 "%08x %08x %08x %08x\n",
13433 cq->subtype, wcqe->word0, wcqe->parameter,
13434 wcqe->word2, wcqe->word3);
13435 workposted = false;
13441 #define FC_RCTL_MDS_DIAGS 0xF4
13444 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13445 * @phba: Pointer to HBA context object.
13446 * @rcqe: Pointer to receive-queue completion queue entry.
13448 * This routine process a receive-queue completion queue entry.
13450 * Return: true if work posted to worker thread, otherwise false.
13453 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13455 bool workposted = false;
13456 struct fc_frame_header *fc_hdr;
13457 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13458 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13459 struct lpfc_nvmet_tgtport *tgtp;
13460 struct hbq_dmabuf *dma_buf;
13461 uint32_t status, rq_id;
13462 unsigned long iflags;
13464 /* sanity check on queue memory */
13465 if (unlikely(!hrq) || unlikely(!drq))
13468 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13469 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13471 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13472 if (rq_id != hrq->queue_id)
13475 status = bf_get(lpfc_rcqe_status, rcqe);
13477 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13479 "2537 Receive Frame Truncated!!\n");
13481 case FC_STATUS_RQ_SUCCESS:
13482 spin_lock_irqsave(&phba->hbalock, iflags);
13483 lpfc_sli4_rq_release(hrq, drq);
13484 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13486 hrq->RQ_no_buf_found++;
13487 spin_unlock_irqrestore(&phba->hbalock, iflags);
13491 hrq->RQ_buf_posted--;
13492 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13494 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13496 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13497 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13498 spin_unlock_irqrestore(&phba->hbalock, iflags);
13499 /* Handle MDS Loopback frames */
13500 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13504 /* save off the frame for the work thread to process */
13505 list_add_tail(&dma_buf->cq_event.list,
13506 &phba->sli4_hba.sp_queue_event);
13507 /* Frame received */
13508 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13509 spin_unlock_irqrestore(&phba->hbalock, iflags);
13512 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13513 if (phba->nvmet_support) {
13514 tgtp = phba->targetport->private;
13515 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13516 "6402 RQE Error x%x, posted %d err_cnt "
13518 status, hrq->RQ_buf_posted,
13519 hrq->RQ_no_posted_buf,
13520 atomic_read(&tgtp->rcv_fcp_cmd_in),
13521 atomic_read(&tgtp->rcv_fcp_cmd_out),
13522 atomic_read(&tgtp->xmt_fcp_release));
13526 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13527 hrq->RQ_no_posted_buf++;
13528 /* Post more buffers if possible */
13529 spin_lock_irqsave(&phba->hbalock, iflags);
13530 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13531 spin_unlock_irqrestore(&phba->hbalock, iflags);
13540 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13541 * @phba: Pointer to HBA context object.
13542 * @cq: Pointer to the completion queue.
13543 * @cqe: Pointer to a completion queue entry.
13545 * This routine process a slow-path work-queue or receive queue completion queue
13548 * Return: true if work posted to worker thread, otherwise false.
13551 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13552 struct lpfc_cqe *cqe)
13554 struct lpfc_cqe cqevt;
13555 bool workposted = false;
13557 /* Copy the work queue CQE and convert endian order if needed */
13558 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13560 /* Check and process for different type of WCQE and dispatch */
13561 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13562 case CQE_CODE_COMPL_WQE:
13563 /* Process the WQ/RQ complete event */
13564 phba->last_completion_time = jiffies;
13565 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13566 (struct lpfc_wcqe_complete *)&cqevt);
13568 case CQE_CODE_RELEASE_WQE:
13569 /* Process the WQ release event */
13570 lpfc_sli4_sp_handle_rel_wcqe(phba,
13571 (struct lpfc_wcqe_release *)&cqevt);
13573 case CQE_CODE_XRI_ABORTED:
13574 /* Process the WQ XRI abort event */
13575 phba->last_completion_time = jiffies;
13576 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13577 (struct sli4_wcqe_xri_aborted *)&cqevt);
13579 case CQE_CODE_RECEIVE:
13580 case CQE_CODE_RECEIVE_V1:
13581 /* Process the RQ event */
13582 phba->last_completion_time = jiffies;
13583 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13584 (struct lpfc_rcqe *)&cqevt);
13587 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13588 "0388 Not a valid WCQE code: x%x\n",
13589 bf_get(lpfc_cqe_code, &cqevt));
13596 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13597 * @phba: Pointer to HBA context object.
13598 * @eqe: Pointer to fast-path event queue entry.
13600 * This routine process a event queue entry from the slow-path event queue.
13601 * It will check the MajorCode and MinorCode to determine this is for a
13602 * completion event on a completion queue, if not, an error shall be logged
13603 * and just return. Otherwise, it will get to the corresponding completion
13604 * queue and process all the entries on that completion queue, rearm the
13605 * completion queue, and then return.
13609 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13610 struct lpfc_queue *speq)
13612 struct lpfc_queue *cq = NULL, *childq;
13615 /* Get the reference to the corresponding CQ */
13616 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13618 list_for_each_entry(childq, &speq->child_list, list) {
13619 if (childq->queue_id == cqid) {
13624 if (unlikely(!cq)) {
13625 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13627 "0365 Slow-path CQ identifier "
13628 "(%d) does not exist\n", cqid);
13632 /* Save EQ associated with this CQ */
13633 cq->assoc_qp = speq;
13635 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13637 "0390 Cannot schedule soft IRQ "
13638 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13639 cqid, cq->queue_id, smp_processor_id());
13643 * __lpfc_sli4_process_cq - Process elements of a CQ
13644 * @phba: Pointer to HBA context object.
13645 * @cq: Pointer to CQ to be processed
13646 * @handler: Routine to process each cqe
13647 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13649 * This routine processes completion queue entries in a CQ. While a valid
13650 * queue element is found, the handler is called. During processing checks
13651 * are made for periodic doorbell writes to let the hardware know of
13652 * element consumption.
13654 * If the max limit on cqes to process is hit, or there are no more valid
13655 * entries, the loop stops. If we processed a sufficient number of elements,
13656 * meaning there is sufficient load, rather than rearming and generating
13657 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13658 * indicates no rescheduling.
13660 * Returns True if work scheduled, False otherwise.
13663 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13664 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13665 struct lpfc_cqe *), unsigned long *delay)
13667 struct lpfc_cqe *cqe;
13668 bool workposted = false;
13669 int count = 0, consumed = 0;
13672 /* default - no reschedule */
13675 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13676 goto rearm_and_exit;
13678 /* Process all the entries to the CQ */
13679 cqe = lpfc_sli4_cq_get(cq);
13681 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME)
13682 if (phba->ktime_on)
13683 cq->isr_timestamp = ktime_get_ns();
13685 cq->isr_timestamp = 0;
13687 workposted |= handler(phba, cq, cqe);
13688 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13691 if (!(++count % cq->max_proc_limit))
13694 if (!(count % cq->notify_interval)) {
13695 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13700 cqe = lpfc_sli4_cq_get(cq);
13702 if (count >= phba->cfg_cq_poll_threshold) {
13707 /* Track the max number of CQEs processed in 1 EQ */
13708 if (count > cq->CQ_max_cqe)
13709 cq->CQ_max_cqe = count;
13711 cq->assoc_qp->EQ_cqe_cnt += count;
13713 /* Catch the no cq entry condition */
13714 if (unlikely(count == 0))
13715 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13716 "0369 No entry from completion queue "
13717 "qid=%d\n", cq->queue_id);
13719 cq->queue_claimed = 0;
13722 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13723 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13729 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13730 * @cq: pointer to CQ to process
13732 * This routine calls the cq processing routine with a handler specific
13733 * to the type of queue bound to it.
13735 * The CQ routine returns two values: the first is the calling status,
13736 * which indicates whether work was queued to the background discovery
13737 * thread. If true, the routine should wakeup the discovery thread;
13738 * the second is the delay parameter. If non-zero, rather than rearming
13739 * the CQ and yet another interrupt, the CQ handler should be queued so
13740 * that it is processed in a subsequent polling action. The value of
13741 * the delay indicates when to reschedule it.
13744 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13746 struct lpfc_hba *phba = cq->phba;
13747 unsigned long delay;
13748 bool workposted = false;
13750 /* Process and rearm the CQ */
13751 switch (cq->type) {
13753 workposted |= __lpfc_sli4_process_cq(phba, cq,
13754 lpfc_sli4_sp_handle_mcqe,
13758 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13759 workposted |= __lpfc_sli4_process_cq(phba, cq,
13760 lpfc_sli4_fp_handle_cqe,
13763 workposted |= __lpfc_sli4_process_cq(phba, cq,
13764 lpfc_sli4_sp_handle_cqe,
13768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13769 "0370 Invalid completion queue type (%d)\n",
13775 if (!queue_delayed_work_on(cq->chann, phba->wq,
13776 &cq->sched_spwork, delay))
13777 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13778 "0394 Cannot schedule soft IRQ "
13779 "for cqid=%d on CPU %d\n",
13780 cq->queue_id, cq->chann);
13783 /* wake up worker thread if there are works to be done */
13785 lpfc_worker_wake_up(phba);
13789 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13791 * @work: pointer to work element
13793 * translates from the work handler and calls the slow-path handler.
13796 lpfc_sli4_sp_process_cq(struct work_struct *work)
13798 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13800 __lpfc_sli4_sp_process_cq(cq);
13804 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13805 * @work: pointer to work element
13807 * translates from the work handler and calls the slow-path handler.
13810 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13812 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13813 struct lpfc_queue, sched_spwork);
13815 __lpfc_sli4_sp_process_cq(cq);
13819 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13820 * @phba: Pointer to HBA context object.
13821 * @cq: Pointer to associated CQ
13822 * @wcqe: Pointer to work-queue completion queue entry.
13824 * This routine process a fast-path work queue completion entry from fast-path
13825 * event queue for FCP command response completion.
13828 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13829 struct lpfc_wcqe_complete *wcqe)
13831 struct lpfc_sli_ring *pring = cq->pring;
13832 struct lpfc_iocbq *cmdiocbq;
13833 struct lpfc_iocbq irspiocbq;
13834 unsigned long iflags;
13836 /* Check for response status */
13837 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13838 /* If resource errors reported from HBA, reduce queue
13839 * depth of the SCSI device.
13841 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13842 IOSTAT_LOCAL_REJECT)) &&
13843 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13844 IOERR_NO_RESOURCES))
13845 phba->lpfc_rampdown_queue_depth(phba);
13847 /* Log the error status */
13848 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13849 "0373 FCP CQE error: status=x%x: "
13850 "CQE: %08x %08x %08x %08x\n",
13851 bf_get(lpfc_wcqe_c_status, wcqe),
13852 wcqe->word0, wcqe->total_data_placed,
13853 wcqe->parameter, wcqe->word3);
13856 /* Look up the FCP command IOCB and create pseudo response IOCB */
13857 spin_lock_irqsave(&pring->ring_lock, iflags);
13858 pring->stats.iocb_event++;
13859 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13860 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13861 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13862 if (unlikely(!cmdiocbq)) {
13863 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13864 "0374 FCP complete with no corresponding "
13865 "cmdiocb: iotag (%d)\n",
13866 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13869 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13870 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13872 if (cmdiocbq->iocb_cmpl == NULL) {
13873 if (cmdiocbq->wqe_cmpl) {
13874 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13875 spin_lock_irqsave(&phba->hbalock, iflags);
13876 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13877 spin_unlock_irqrestore(&phba->hbalock, iflags);
13880 /* Pass the cmd_iocb and the wcqe to the upper layer */
13881 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13884 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13885 "0375 FCP cmdiocb not callback function "
13887 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13891 /* Fake the irspiocb and copy necessary response information */
13892 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13894 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13895 spin_lock_irqsave(&phba->hbalock, iflags);
13896 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13897 spin_unlock_irqrestore(&phba->hbalock, iflags);
13900 /* Pass the cmd_iocb and the rsp state to the upper layer */
13901 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13905 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13906 * @phba: Pointer to HBA context object.
13907 * @cq: Pointer to completion queue.
13908 * @wcqe: Pointer to work-queue completion queue entry.
13910 * This routine handles an fast-path WQ entry consumed event by invoking the
13911 * proper WQ release routine to the slow-path WQ.
13914 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13915 struct lpfc_wcqe_release *wcqe)
13917 struct lpfc_queue *childwq;
13918 bool wqid_matched = false;
13921 /* Check for fast-path FCP work queue release */
13922 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13923 list_for_each_entry(childwq, &cq->child_list, list) {
13924 if (childwq->queue_id == hba_wqid) {
13925 lpfc_sli4_wq_release(childwq,
13926 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13927 if (childwq->q_flag & HBA_NVMET_WQFULL)
13928 lpfc_nvmet_wqfull_process(phba, childwq);
13929 wqid_matched = true;
13933 /* Report warning log message if no match found */
13934 if (wqid_matched != true)
13935 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13936 "2580 Fast-path wqe consume event carries "
13937 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13941 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13942 * @phba: Pointer to HBA context object.
13943 * @rcqe: Pointer to receive-queue completion queue entry.
13945 * This routine process a receive-queue completion queue entry.
13947 * Return: true if work posted to worker thread, otherwise false.
13950 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13951 struct lpfc_rcqe *rcqe)
13953 bool workposted = false;
13954 struct lpfc_queue *hrq;
13955 struct lpfc_queue *drq;
13956 struct rqb_dmabuf *dma_buf;
13957 struct fc_frame_header *fc_hdr;
13958 struct lpfc_nvmet_tgtport *tgtp;
13959 uint32_t status, rq_id;
13960 unsigned long iflags;
13961 uint32_t fctl, idx;
13963 if ((phba->nvmet_support == 0) ||
13964 (phba->sli4_hba.nvmet_cqset == NULL))
13967 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13968 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13969 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13971 /* sanity check on queue memory */
13972 if (unlikely(!hrq) || unlikely(!drq))
13975 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13976 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13978 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13980 if ((phba->nvmet_support == 0) ||
13981 (rq_id != hrq->queue_id))
13984 status = bf_get(lpfc_rcqe_status, rcqe);
13986 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13988 "6126 Receive Frame Truncated!!\n");
13990 case FC_STATUS_RQ_SUCCESS:
13991 spin_lock_irqsave(&phba->hbalock, iflags);
13992 lpfc_sli4_rq_release(hrq, drq);
13993 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13995 hrq->RQ_no_buf_found++;
13996 spin_unlock_irqrestore(&phba->hbalock, iflags);
13999 spin_unlock_irqrestore(&phba->hbalock, iflags);
14001 hrq->RQ_buf_posted--;
14002 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14004 /* Just some basic sanity checks on FCP Command frame */
14005 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14006 fc_hdr->fh_f_ctl[1] << 8 |
14007 fc_hdr->fh_f_ctl[2]);
14009 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14010 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14011 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14014 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14015 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14016 lpfc_nvmet_unsol_fcp_event(
14017 phba, idx, dma_buf,
14018 cq->isr_timestamp);
14022 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14024 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14025 if (phba->nvmet_support) {
14026 tgtp = phba->targetport->private;
14027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14028 "6401 RQE Error x%x, posted %d err_cnt "
14030 status, hrq->RQ_buf_posted,
14031 hrq->RQ_no_posted_buf,
14032 atomic_read(&tgtp->rcv_fcp_cmd_in),
14033 atomic_read(&tgtp->rcv_fcp_cmd_out),
14034 atomic_read(&tgtp->xmt_fcp_release));
14038 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14039 hrq->RQ_no_posted_buf++;
14040 /* Post more buffers if possible */
14048 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14049 * @phba: adapter with cq
14050 * @cq: Pointer to the completion queue.
14051 * @eqe: Pointer to fast-path completion queue entry.
14053 * This routine process a fast-path work queue completion entry from fast-path
14054 * event queue for FCP command response completion.
14056 * Return: true if work posted to worker thread, otherwise false.
14059 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14060 struct lpfc_cqe *cqe)
14062 struct lpfc_wcqe_release wcqe;
14063 bool workposted = false;
14065 /* Copy the work queue CQE and convert endian order if needed */
14066 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14068 /* Check and process for different type of WCQE and dispatch */
14069 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14070 case CQE_CODE_COMPL_WQE:
14071 case CQE_CODE_NVME_ERSP:
14073 /* Process the WQ complete event */
14074 phba->last_completion_time = jiffies;
14075 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14076 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14077 (struct lpfc_wcqe_complete *)&wcqe);
14078 if (cq->subtype == LPFC_NVME_LS)
14079 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14080 (struct lpfc_wcqe_complete *)&wcqe);
14082 case CQE_CODE_RELEASE_WQE:
14083 cq->CQ_release_wqe++;
14084 /* Process the WQ release event */
14085 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14086 (struct lpfc_wcqe_release *)&wcqe);
14088 case CQE_CODE_XRI_ABORTED:
14089 cq->CQ_xri_aborted++;
14090 /* Process the WQ XRI abort event */
14091 phba->last_completion_time = jiffies;
14092 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14093 (struct sli4_wcqe_xri_aborted *)&wcqe);
14095 case CQE_CODE_RECEIVE_V1:
14096 case CQE_CODE_RECEIVE:
14097 phba->last_completion_time = jiffies;
14098 if (cq->subtype == LPFC_NVMET) {
14099 workposted = lpfc_sli4_nvmet_handle_rcqe(
14100 phba, cq, (struct lpfc_rcqe *)&wcqe);
14104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14105 "0144 Not a valid CQE code: x%x\n",
14106 bf_get(lpfc_wcqe_c_code, &wcqe));
14113 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14114 * @phba: Pointer to HBA context object.
14115 * @eqe: Pointer to fast-path event queue entry.
14117 * This routine process a event queue entry from the fast-path event queue.
14118 * It will check the MajorCode and MinorCode to determine this is for a
14119 * completion event on a completion queue, if not, an error shall be logged
14120 * and just return. Otherwise, it will get to the corresponding completion
14121 * queue and process all the entries on the completion queue, rearm the
14122 * completion queue, and then return.
14125 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14126 struct lpfc_eqe *eqe)
14128 struct lpfc_queue *cq = NULL;
14129 uint32_t qidx = eq->hdwq;
14132 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14134 "0366 Not a valid completion "
14135 "event: majorcode=x%x, minorcode=x%x\n",
14136 bf_get_le32(lpfc_eqe_major_code, eqe),
14137 bf_get_le32(lpfc_eqe_minor_code, eqe));
14141 /* Get the reference to the corresponding CQ */
14142 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14144 /* Use the fast lookup method first */
14145 if (cqid <= phba->sli4_hba.cq_max) {
14146 cq = phba->sli4_hba.cq_lookup[cqid];
14151 /* Next check for NVMET completion */
14152 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14153 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14154 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14155 /* Process NVMET unsol rcv */
14156 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14161 if (phba->sli4_hba.nvmels_cq &&
14162 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14163 /* Process NVME unsol rcv */
14164 cq = phba->sli4_hba.nvmels_cq;
14167 /* Otherwise this is a Slow path event */
14169 lpfc_sli4_sp_handle_eqe(phba, eqe,
14170 phba->sli4_hba.hdwq[qidx].hba_eq);
14175 if (unlikely(cqid != cq->queue_id)) {
14176 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14177 "0368 Miss-matched fast-path completion "
14178 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14179 cqid, cq->queue_id);
14184 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14185 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14186 "0363 Cannot schedule soft IRQ "
14187 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14188 cqid, cq->queue_id, smp_processor_id());
14192 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14193 * @cq: Pointer to CQ to be processed
14195 * This routine calls the cq processing routine with the handler for
14198 * The CQ routine returns two values: the first is the calling status,
14199 * which indicates whether work was queued to the background discovery
14200 * thread. If true, the routine should wakeup the discovery thread;
14201 * the second is the delay parameter. If non-zero, rather than rearming
14202 * the CQ and yet another interrupt, the CQ handler should be queued so
14203 * that it is processed in a subsequent polling action. The value of
14204 * the delay indicates when to reschedule it.
14207 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14209 struct lpfc_hba *phba = cq->phba;
14210 unsigned long delay;
14211 bool workposted = false;
14213 /* process and rearm the CQ */
14214 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14218 if (!queue_delayed_work_on(cq->chann, phba->wq,
14219 &cq->sched_irqwork, delay))
14220 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14221 "0367 Cannot schedule soft IRQ "
14222 "for cqid=%d on CPU %d\n",
14223 cq->queue_id, cq->chann);
14226 /* wake up worker thread if there are works to be done */
14228 lpfc_worker_wake_up(phba);
14232 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14234 * @work: pointer to work element
14236 * translates from the work handler and calls the fast-path handler.
14239 lpfc_sli4_hba_process_cq(struct work_struct *work)
14241 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14243 __lpfc_sli4_hba_process_cq(cq);
14247 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14248 * @work: pointer to work element
14250 * translates from the work handler and calls the fast-path handler.
14253 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14255 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14256 struct lpfc_queue, sched_irqwork);
14258 __lpfc_sli4_hba_process_cq(cq);
14262 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14263 * @irq: Interrupt number.
14264 * @dev_id: The device context pointer.
14266 * This function is directly called from the PCI layer as an interrupt
14267 * service routine when device with SLI-4 interface spec is enabled with
14268 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14269 * ring event in the HBA. However, when the device is enabled with either
14270 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14271 * device-level interrupt handler. When the PCI slot is in error recovery
14272 * or the HBA is undergoing initialization, the interrupt handler will not
14273 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14274 * the intrrupt context. This function is called without any lock held.
14275 * It gets the hbalock to access and update SLI data structures. Note that,
14276 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14277 * equal to that of FCP CQ index.
14279 * The link attention and ELS ring attention events are handled
14280 * by the worker thread. The interrupt handler signals the worker thread
14281 * and returns for these events. This function is called without any lock
14282 * held. It gets the hbalock to access and update SLI data structures.
14284 * This function returns IRQ_HANDLED when interrupt is handled else it
14285 * returns IRQ_NONE.
14288 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14290 struct lpfc_hba *phba;
14291 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14292 struct lpfc_queue *fpeq;
14293 unsigned long iflag;
14296 struct lpfc_eq_intr_info *eqi;
14299 /* Get the driver's phba structure from the dev_id */
14300 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14301 phba = hba_eq_hdl->phba;
14302 hba_eqidx = hba_eq_hdl->idx;
14304 if (unlikely(!phba))
14306 if (unlikely(!phba->sli4_hba.hdwq))
14309 /* Get to the EQ struct associated with this vector */
14310 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
14311 if (unlikely(!fpeq))
14314 /* Check device state for handling interrupt */
14315 if (unlikely(lpfc_intr_state_check(phba))) {
14316 /* Check again for link_state with lock held */
14317 spin_lock_irqsave(&phba->hbalock, iflag);
14318 if (phba->link_state < LPFC_LINK_DOWN)
14319 /* Flush, clear interrupt, and rearm the EQ */
14320 lpfc_sli4_eq_flush(phba, fpeq);
14321 spin_unlock_irqrestore(&phba->hbalock, iflag);
14325 eqi = phba->sli4_hba.eq_info;
14326 icnt = this_cpu_inc_return(eqi->icnt);
14327 fpeq->last_cpu = smp_processor_id();
14329 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14330 phba->cfg_irq_chann == 1 &&
14331 phba->cfg_auto_imax &&
14332 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14333 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14334 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14336 /* process and rearm the EQ */
14337 ecount = lpfc_sli4_process_eq(phba, fpeq);
14339 if (unlikely(ecount == 0)) {
14340 fpeq->EQ_no_entry++;
14341 if (phba->intr_type == MSIX)
14342 /* MSI-X treated interrupt served as no EQ share INT */
14343 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14344 "0358 MSI-X interrupt with no EQE\n");
14346 /* Non MSI-X treated on interrupt as EQ share INT */
14350 return IRQ_HANDLED;
14351 } /* lpfc_sli4_fp_intr_handler */
14354 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14355 * @irq: Interrupt number.
14356 * @dev_id: The device context pointer.
14358 * This function is the device-level interrupt handler to device with SLI-4
14359 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14360 * interrupt mode is enabled and there is an event in the HBA which requires
14361 * driver attention. This function invokes the slow-path interrupt attention
14362 * handling function and fast-path interrupt attention handling function in
14363 * turn to process the relevant HBA attention events. This function is called
14364 * without any lock held. It gets the hbalock to access and update SLI data
14367 * This function returns IRQ_HANDLED when interrupt is handled, else it
14368 * returns IRQ_NONE.
14371 lpfc_sli4_intr_handler(int irq, void *dev_id)
14373 struct lpfc_hba *phba;
14374 irqreturn_t hba_irq_rc;
14375 bool hba_handled = false;
14378 /* Get the driver's phba structure from the dev_id */
14379 phba = (struct lpfc_hba *)dev_id;
14381 if (unlikely(!phba))
14385 * Invoke fast-path host attention interrupt handling as appropriate.
14387 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14388 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14389 &phba->sli4_hba.hba_eq_hdl[qidx]);
14390 if (hba_irq_rc == IRQ_HANDLED)
14391 hba_handled |= true;
14394 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14395 } /* lpfc_sli4_intr_handler */
14398 * lpfc_sli4_queue_free - free a queue structure and associated memory
14399 * @queue: The queue structure to free.
14401 * This function frees a queue structure and the DMAable memory used for
14402 * the host resident queue. This function must be called after destroying the
14403 * queue on the HBA.
14406 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14408 struct lpfc_dmabuf *dmabuf;
14413 while (!list_empty(&queue->page_list)) {
14414 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14416 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14417 dmabuf->virt, dmabuf->phys);
14421 lpfc_free_rq_buffer(queue->phba, queue);
14422 kfree(queue->rqbp);
14425 if (!list_empty(&queue->cpu_list))
14426 list_del(&queue->cpu_list);
14428 if (!list_empty(&queue->wq_list))
14429 list_del(&queue->wq_list);
14436 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14437 * @phba: The HBA that this queue is being created on.
14438 * @page_size: The size of a queue page
14439 * @entry_size: The size of each queue entry for this queue.
14440 * @entry count: The number of entries that this queue will handle.
14442 * This function allocates a queue structure and the DMAable memory used for
14443 * the host resident queue. This function must be called before creating the
14444 * queue on the HBA.
14446 struct lpfc_queue *
14447 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14448 uint32_t entry_size, uint32_t entry_count)
14450 struct lpfc_queue *queue;
14451 struct lpfc_dmabuf *dmabuf;
14452 int x, total_qe_count;
14454 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14456 if (!phba->sli4_hba.pc_sli4_params.supported)
14457 hw_page_size = page_size;
14459 queue = kzalloc(sizeof(struct lpfc_queue) +
14460 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14463 queue->page_count = (ALIGN(entry_size * entry_count,
14464 hw_page_size))/hw_page_size;
14466 /* If needed, Adjust page count to match the max the adapter supports */
14467 if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14468 (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
14469 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14471 INIT_LIST_HEAD(&queue->list);
14472 INIT_LIST_HEAD(&queue->wq_list);
14473 INIT_LIST_HEAD(&queue->wqfull_list);
14474 INIT_LIST_HEAD(&queue->page_list);
14475 INIT_LIST_HEAD(&queue->child_list);
14476 INIT_LIST_HEAD(&queue->cpu_list);
14478 /* Set queue parameters now. If the system cannot provide memory
14479 * resources, the free routine needs to know what was allocated.
14481 queue->entry_size = entry_size;
14482 queue->entry_count = entry_count;
14483 queue->page_size = hw_page_size;
14484 queue->phba = phba;
14486 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14487 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14490 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14491 hw_page_size, &dmabuf->phys,
14493 if (!dmabuf->virt) {
14497 dmabuf->buffer_tag = x;
14498 list_add_tail(&dmabuf->list, &queue->page_list);
14499 /* initialize queue's entry array */
14500 dma_pointer = dmabuf->virt;
14501 for (; total_qe_count < entry_count &&
14502 dma_pointer < (hw_page_size + dmabuf->virt);
14503 total_qe_count++, dma_pointer += entry_size) {
14504 queue->qe[total_qe_count].address = dma_pointer;
14507 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14508 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14509 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14510 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14512 /* notify_interval will be set during q creation */
14516 lpfc_sli4_queue_free(queue);
14521 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14522 * @phba: HBA structure that indicates port to create a queue on.
14523 * @pci_barset: PCI BAR set flag.
14525 * This function shall perform iomap of the specified PCI BAR address to host
14526 * memory address if not already done so and return it. The returned host
14527 * memory address can be NULL.
14529 static void __iomem *
14530 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14535 switch (pci_barset) {
14536 case WQ_PCI_BAR_0_AND_1:
14537 return phba->pci_bar0_memmap_p;
14538 case WQ_PCI_BAR_2_AND_3:
14539 return phba->pci_bar2_memmap_p;
14540 case WQ_PCI_BAR_4_AND_5:
14541 return phba->pci_bar4_memmap_p;
14549 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14550 * @phba: HBA structure that EQs are on.
14551 * @startq: The starting EQ index to modify
14552 * @numq: The number of EQs (consecutive indexes) to modify
14553 * @usdelay: amount of delay
14555 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14556 * is set either by writing to a register (if supported by the SLI Port)
14557 * or by mailbox command. The mailbox command allows several EQs to be
14560 * The @phba struct is used to send a mailbox command to HBA. The @startq
14561 * is used to get the starting EQ index to change. The @numq value is
14562 * used to specify how many consecutive EQ indexes, starting at EQ index,
14563 * are to be changed. This function is asynchronous and will wait for any
14564 * mailbox commands to finish before returning.
14566 * On success this function will return a zero. If unable to allocate
14567 * enough memory this function will return -ENOMEM. If a mailbox command
14568 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14569 * have had their delay multipler changed.
14572 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14573 uint32_t numq, uint32_t usdelay)
14575 struct lpfc_mbx_modify_eq_delay *eq_delay;
14576 LPFC_MBOXQ_t *mbox;
14577 struct lpfc_queue *eq;
14578 int cnt = 0, rc, length;
14579 uint32_t shdr_status, shdr_add_status;
14582 union lpfc_sli4_cfg_shdr *shdr;
14584 if (startq >= phba->cfg_irq_chann)
14587 if (usdelay > 0xFFFF) {
14588 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14589 "6429 usdelay %d too large. Scaled down to "
14590 "0xFFFF.\n", usdelay);
14594 /* set values by EQ_DELAY register if supported */
14595 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14596 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14597 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
14601 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14610 /* Otherwise, set values by mailbox cmd */
14612 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14615 "6428 Failed allocating mailbox cmd buffer."
14616 " EQ delay was not set.\n");
14619 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14620 sizeof(struct lpfc_sli4_cfg_mhdr));
14621 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14622 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14623 length, LPFC_SLI4_MBX_EMBED);
14624 eq_delay = &mbox->u.mqe.un.eq_delay;
14626 /* Calculate delay multiper from maximum interrupt per second */
14627 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14630 if (dmult > LPFC_DMULT_MAX)
14631 dmult = LPFC_DMULT_MAX;
14633 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14634 eq = phba->sli4_hba.hdwq[qidx].hba_eq;
14637 eq->q_mode = usdelay;
14638 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14639 eq_delay->u.request.eq[cnt].phase = 0;
14640 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14645 eq_delay->u.request.num_eq = cnt;
14647 mbox->vport = phba->pport;
14648 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14649 mbox->ctx_buf = NULL;
14650 mbox->ctx_ndlp = NULL;
14651 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14652 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14653 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14654 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14655 if (shdr_status || shdr_add_status || rc) {
14656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14657 "2512 MODIFY_EQ_DELAY mailbox failed with "
14658 "status x%x add_status x%x, mbx status x%x\n",
14659 shdr_status, shdr_add_status, rc);
14661 mempool_free(mbox, phba->mbox_mem_pool);
14666 * lpfc_eq_create - Create an Event Queue on the HBA
14667 * @phba: HBA structure that indicates port to create a queue on.
14668 * @eq: The queue structure to use to create the event queue.
14669 * @imax: The maximum interrupt per second limit.
14671 * This function creates an event queue, as detailed in @eq, on a port,
14672 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14674 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14675 * is used to get the entry count and entry size that are necessary to
14676 * determine the number of pages to allocate and use for this queue. This
14677 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14678 * event queue. This function is asynchronous and will wait for the mailbox
14679 * command to finish before continuing.
14681 * On success this function will return a zero. If unable to allocate enough
14682 * memory this function will return -ENOMEM. If the queue create mailbox command
14683 * fails this function will return -ENXIO.
14686 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14688 struct lpfc_mbx_eq_create *eq_create;
14689 LPFC_MBOXQ_t *mbox;
14690 int rc, length, status = 0;
14691 struct lpfc_dmabuf *dmabuf;
14692 uint32_t shdr_status, shdr_add_status;
14693 union lpfc_sli4_cfg_shdr *shdr;
14695 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14697 /* sanity check on queue memory */
14700 if (!phba->sli4_hba.pc_sli4_params.supported)
14701 hw_page_size = SLI4_PAGE_SIZE;
14703 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14706 length = (sizeof(struct lpfc_mbx_eq_create) -
14707 sizeof(struct lpfc_sli4_cfg_mhdr));
14708 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14709 LPFC_MBOX_OPCODE_EQ_CREATE,
14710 length, LPFC_SLI4_MBX_EMBED);
14711 eq_create = &mbox->u.mqe.un.eq_create;
14712 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14713 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14715 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14717 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14719 /* Use version 2 of CREATE_EQ if eqav is set */
14720 if (phba->sli4_hba.pc_sli4_params.eqav) {
14721 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14722 LPFC_Q_CREATE_VERSION_2);
14723 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14724 phba->sli4_hba.pc_sli4_params.eqav);
14727 /* don't setup delay multiplier using EQ_CREATE */
14729 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14731 switch (eq->entry_count) {
14733 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14734 "0360 Unsupported EQ count. (%d)\n",
14736 if (eq->entry_count < 256)
14738 /* fall through - otherwise default to smallest count */
14740 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14744 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14748 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14752 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14756 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14760 list_for_each_entry(dmabuf, &eq->page_list, list) {
14761 memset(dmabuf->virt, 0, hw_page_size);
14762 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14763 putPaddrLow(dmabuf->phys);
14764 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14765 putPaddrHigh(dmabuf->phys);
14767 mbox->vport = phba->pport;
14768 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14769 mbox->ctx_buf = NULL;
14770 mbox->ctx_ndlp = NULL;
14771 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14772 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14773 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14774 if (shdr_status || shdr_add_status || rc) {
14775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14776 "2500 EQ_CREATE mailbox failed with "
14777 "status x%x add_status x%x, mbx status x%x\n",
14778 shdr_status, shdr_add_status, rc);
14781 eq->type = LPFC_EQ;
14782 eq->subtype = LPFC_NONE;
14783 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14784 if (eq->queue_id == 0xFFFF)
14786 eq->host_index = 0;
14787 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14788 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14790 mempool_free(mbox, phba->mbox_mem_pool);
14795 * lpfc_cq_create - Create a Completion Queue on the HBA
14796 * @phba: HBA structure that indicates port to create a queue on.
14797 * @cq: The queue structure to use to create the completion queue.
14798 * @eq: The event queue to bind this completion queue to.
14800 * This function creates a completion queue, as detailed in @wq, on a port,
14801 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14803 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14804 * is used to get the entry count and entry size that are necessary to
14805 * determine the number of pages to allocate and use for this queue. The @eq
14806 * is used to indicate which event queue to bind this completion queue to. This
14807 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14808 * completion queue. This function is asynchronous and will wait for the mailbox
14809 * command to finish before continuing.
14811 * On success this function will return a zero. If unable to allocate enough
14812 * memory this function will return -ENOMEM. If the queue create mailbox command
14813 * fails this function will return -ENXIO.
14816 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14817 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14819 struct lpfc_mbx_cq_create *cq_create;
14820 struct lpfc_dmabuf *dmabuf;
14821 LPFC_MBOXQ_t *mbox;
14822 int rc, length, status = 0;
14823 uint32_t shdr_status, shdr_add_status;
14824 union lpfc_sli4_cfg_shdr *shdr;
14826 /* sanity check on queue memory */
14830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14833 length = (sizeof(struct lpfc_mbx_cq_create) -
14834 sizeof(struct lpfc_sli4_cfg_mhdr));
14835 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14836 LPFC_MBOX_OPCODE_CQ_CREATE,
14837 length, LPFC_SLI4_MBX_EMBED);
14838 cq_create = &mbox->u.mqe.un.cq_create;
14839 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14840 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14842 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14843 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14844 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14845 phba->sli4_hba.pc_sli4_params.cqv);
14846 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14847 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14848 (cq->page_size / SLI4_PAGE_SIZE));
14849 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14851 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14852 phba->sli4_hba.pc_sli4_params.cqav);
14854 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14857 switch (cq->entry_count) {
14860 if (phba->sli4_hba.pc_sli4_params.cqv ==
14861 LPFC_Q_CREATE_VERSION_2) {
14862 cq_create->u.request.context.lpfc_cq_context_count =
14864 bf_set(lpfc_cq_context_count,
14865 &cq_create->u.request.context,
14866 LPFC_CQ_CNT_WORD7);
14871 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14872 "0361 Unsupported CQ count: "
14873 "entry cnt %d sz %d pg cnt %d\n",
14874 cq->entry_count, cq->entry_size,
14876 if (cq->entry_count < 256) {
14880 /* fall through - otherwise default to smallest count */
14882 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14886 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14890 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14894 list_for_each_entry(dmabuf, &cq->page_list, list) {
14895 memset(dmabuf->virt, 0, cq->page_size);
14896 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14897 putPaddrLow(dmabuf->phys);
14898 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14899 putPaddrHigh(dmabuf->phys);
14901 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14903 /* The IOCTL status is embedded in the mailbox subheader. */
14904 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14905 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14906 if (shdr_status || shdr_add_status || rc) {
14907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14908 "2501 CQ_CREATE mailbox failed with "
14909 "status x%x add_status x%x, mbx status x%x\n",
14910 shdr_status, shdr_add_status, rc);
14914 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14915 if (cq->queue_id == 0xFFFF) {
14919 /* link the cq onto the parent eq child list */
14920 list_add_tail(&cq->list, &eq->child_list);
14921 /* Set up completion queue's type and subtype */
14923 cq->subtype = subtype;
14924 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14925 cq->assoc_qid = eq->queue_id;
14927 cq->host_index = 0;
14928 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14929 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14931 if (cq->queue_id > phba->sli4_hba.cq_max)
14932 phba->sli4_hba.cq_max = cq->queue_id;
14934 mempool_free(mbox, phba->mbox_mem_pool);
14939 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14940 * @phba: HBA structure that indicates port to create a queue on.
14941 * @cqp: The queue structure array to use to create the completion queues.
14942 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
14944 * This function creates a set of completion queue, s to support MRQ
14945 * as detailed in @cqp, on a port,
14946 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14948 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14949 * is used to get the entry count and entry size that are necessary to
14950 * determine the number of pages to allocate and use for this queue. The @eq
14951 * is used to indicate which event queue to bind this completion queue to. This
14952 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14953 * completion queue. This function is asynchronous and will wait for the mailbox
14954 * command to finish before continuing.
14956 * On success this function will return a zero. If unable to allocate enough
14957 * memory this function will return -ENOMEM. If the queue create mailbox command
14958 * fails this function will return -ENXIO.
14961 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14962 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14965 struct lpfc_queue *cq;
14966 struct lpfc_queue *eq;
14967 struct lpfc_mbx_cq_create_set *cq_set;
14968 struct lpfc_dmabuf *dmabuf;
14969 LPFC_MBOXQ_t *mbox;
14970 int rc, length, alloclen, status = 0;
14971 int cnt, idx, numcq, page_idx = 0;
14972 uint32_t shdr_status, shdr_add_status;
14973 union lpfc_sli4_cfg_shdr *shdr;
14974 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14976 /* sanity check on queue memory */
14977 numcq = phba->cfg_nvmet_mrq;
14978 if (!cqp || !hdwq || !numcq)
14981 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14985 length = sizeof(struct lpfc_mbx_cq_create_set);
14986 length += ((numcq * cqp[0]->page_count) *
14987 sizeof(struct dma_address));
14988 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14989 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14990 LPFC_SLI4_MBX_NEMBED);
14991 if (alloclen < length) {
14992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14993 "3098 Allocated DMA memory size (%d) is "
14994 "less than the requested DMA memory size "
14995 "(%d)\n", alloclen, length);
14999 cq_set = mbox->sge_array->addr[0];
15000 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15001 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15003 for (idx = 0; idx < numcq; idx++) {
15005 eq = hdwq[idx].hba_eq;
15010 if (!phba->sli4_hba.pc_sli4_params.supported)
15011 hw_page_size = cq->page_size;
15015 bf_set(lpfc_mbx_cq_create_set_page_size,
15016 &cq_set->u.request,
15017 (hw_page_size / SLI4_PAGE_SIZE));
15018 bf_set(lpfc_mbx_cq_create_set_num_pages,
15019 &cq_set->u.request, cq->page_count);
15020 bf_set(lpfc_mbx_cq_create_set_evt,
15021 &cq_set->u.request, 1);
15022 bf_set(lpfc_mbx_cq_create_set_valid,
15023 &cq_set->u.request, 1);
15024 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15025 &cq_set->u.request, 0);
15026 bf_set(lpfc_mbx_cq_create_set_num_cq,
15027 &cq_set->u.request, numcq);
15028 bf_set(lpfc_mbx_cq_create_set_autovalid,
15029 &cq_set->u.request,
15030 phba->sli4_hba.pc_sli4_params.cqav);
15031 switch (cq->entry_count) {
15034 if (phba->sli4_hba.pc_sli4_params.cqv ==
15035 LPFC_Q_CREATE_VERSION_2) {
15036 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15037 &cq_set->u.request,
15039 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15040 &cq_set->u.request,
15041 LPFC_CQ_CNT_WORD7);
15046 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15047 "3118 Bad CQ count. (%d)\n",
15049 if (cq->entry_count < 256) {
15053 /* fall through - otherwise default to smallest */
15055 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15056 &cq_set->u.request, LPFC_CQ_CNT_256);
15059 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15060 &cq_set->u.request, LPFC_CQ_CNT_512);
15063 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15064 &cq_set->u.request, LPFC_CQ_CNT_1024);
15067 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15068 &cq_set->u.request, eq->queue_id);
15071 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15072 &cq_set->u.request, eq->queue_id);
15075 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15076 &cq_set->u.request, eq->queue_id);
15079 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15080 &cq_set->u.request, eq->queue_id);
15083 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15084 &cq_set->u.request, eq->queue_id);
15087 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15088 &cq_set->u.request, eq->queue_id);
15091 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15092 &cq_set->u.request, eq->queue_id);
15095 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15096 &cq_set->u.request, eq->queue_id);
15099 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15100 &cq_set->u.request, eq->queue_id);
15103 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15104 &cq_set->u.request, eq->queue_id);
15107 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15108 &cq_set->u.request, eq->queue_id);
15111 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15112 &cq_set->u.request, eq->queue_id);
15115 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15116 &cq_set->u.request, eq->queue_id);
15119 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15120 &cq_set->u.request, eq->queue_id);
15123 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15124 &cq_set->u.request, eq->queue_id);
15127 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15128 &cq_set->u.request, eq->queue_id);
15132 /* link the cq onto the parent eq child list */
15133 list_add_tail(&cq->list, &eq->child_list);
15134 /* Set up completion queue's type and subtype */
15136 cq->subtype = subtype;
15137 cq->assoc_qid = eq->queue_id;
15139 cq->host_index = 0;
15140 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15141 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15146 list_for_each_entry(dmabuf, &cq->page_list, list) {
15147 memset(dmabuf->virt, 0, hw_page_size);
15148 cnt = page_idx + dmabuf->buffer_tag;
15149 cq_set->u.request.page[cnt].addr_lo =
15150 putPaddrLow(dmabuf->phys);
15151 cq_set->u.request.page[cnt].addr_hi =
15152 putPaddrHigh(dmabuf->phys);
15158 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15160 /* The IOCTL status is embedded in the mailbox subheader. */
15161 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15162 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15163 if (shdr_status || shdr_add_status || rc) {
15164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15165 "3119 CQ_CREATE_SET mailbox failed with "
15166 "status x%x add_status x%x, mbx status x%x\n",
15167 shdr_status, shdr_add_status, rc);
15171 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15172 if (rc == 0xFFFF) {
15177 for (idx = 0; idx < numcq; idx++) {
15179 cq->queue_id = rc + idx;
15180 if (cq->queue_id > phba->sli4_hba.cq_max)
15181 phba->sli4_hba.cq_max = cq->queue_id;
15185 lpfc_sli4_mbox_cmd_free(phba, mbox);
15190 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15191 * @phba: HBA structure that indicates port to create a queue on.
15192 * @mq: The queue structure to use to create the mailbox queue.
15193 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15194 * @cq: The completion queue to associate with this cq.
15196 * This function provides failback (fb) functionality when the
15197 * mq_create_ext fails on older FW generations. It's purpose is identical
15198 * to mq_create_ext otherwise.
15200 * This routine cannot fail as all attributes were previously accessed and
15201 * initialized in mq_create_ext.
15204 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15205 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15207 struct lpfc_mbx_mq_create *mq_create;
15208 struct lpfc_dmabuf *dmabuf;
15211 length = (sizeof(struct lpfc_mbx_mq_create) -
15212 sizeof(struct lpfc_sli4_cfg_mhdr));
15213 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15214 LPFC_MBOX_OPCODE_MQ_CREATE,
15215 length, LPFC_SLI4_MBX_EMBED);
15216 mq_create = &mbox->u.mqe.un.mq_create;
15217 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15219 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15221 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15222 switch (mq->entry_count) {
15224 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15225 LPFC_MQ_RING_SIZE_16);
15228 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15229 LPFC_MQ_RING_SIZE_32);
15232 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15233 LPFC_MQ_RING_SIZE_64);
15236 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15237 LPFC_MQ_RING_SIZE_128);
15240 list_for_each_entry(dmabuf, &mq->page_list, list) {
15241 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15242 putPaddrLow(dmabuf->phys);
15243 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15244 putPaddrHigh(dmabuf->phys);
15249 * lpfc_mq_create - Create a mailbox Queue on the HBA
15250 * @phba: HBA structure that indicates port to create a queue on.
15251 * @mq: The queue structure to use to create the mailbox queue.
15252 * @cq: The completion queue to associate with this cq.
15253 * @subtype: The queue's subtype.
15255 * This function creates a mailbox queue, as detailed in @mq, on a port,
15256 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15258 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15259 * is used to get the entry count and entry size that are necessary to
15260 * determine the number of pages to allocate and use for this queue. This
15261 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15262 * mailbox queue. This function is asynchronous and will wait for the mailbox
15263 * command to finish before continuing.
15265 * On success this function will return a zero. If unable to allocate enough
15266 * memory this function will return -ENOMEM. If the queue create mailbox command
15267 * fails this function will return -ENXIO.
15270 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15271 struct lpfc_queue *cq, uint32_t subtype)
15273 struct lpfc_mbx_mq_create *mq_create;
15274 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15275 struct lpfc_dmabuf *dmabuf;
15276 LPFC_MBOXQ_t *mbox;
15277 int rc, length, status = 0;
15278 uint32_t shdr_status, shdr_add_status;
15279 union lpfc_sli4_cfg_shdr *shdr;
15280 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15282 /* sanity check on queue memory */
15285 if (!phba->sli4_hba.pc_sli4_params.supported)
15286 hw_page_size = SLI4_PAGE_SIZE;
15288 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15291 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15292 sizeof(struct lpfc_sli4_cfg_mhdr));
15293 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15294 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15295 length, LPFC_SLI4_MBX_EMBED);
15297 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15298 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15299 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15300 &mq_create_ext->u.request, mq->page_count);
15301 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15302 &mq_create_ext->u.request, 1);
15303 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15304 &mq_create_ext->u.request, 1);
15305 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15306 &mq_create_ext->u.request, 1);
15307 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15308 &mq_create_ext->u.request, 1);
15309 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15310 &mq_create_ext->u.request, 1);
15311 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15312 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15313 phba->sli4_hba.pc_sli4_params.mqv);
15314 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15315 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15318 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15320 switch (mq->entry_count) {
15322 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15323 "0362 Unsupported MQ count. (%d)\n",
15325 if (mq->entry_count < 16) {
15329 /* fall through - otherwise default to smallest count */
15331 bf_set(lpfc_mq_context_ring_size,
15332 &mq_create_ext->u.request.context,
15333 LPFC_MQ_RING_SIZE_16);
15336 bf_set(lpfc_mq_context_ring_size,
15337 &mq_create_ext->u.request.context,
15338 LPFC_MQ_RING_SIZE_32);
15341 bf_set(lpfc_mq_context_ring_size,
15342 &mq_create_ext->u.request.context,
15343 LPFC_MQ_RING_SIZE_64);
15346 bf_set(lpfc_mq_context_ring_size,
15347 &mq_create_ext->u.request.context,
15348 LPFC_MQ_RING_SIZE_128);
15351 list_for_each_entry(dmabuf, &mq->page_list, list) {
15352 memset(dmabuf->virt, 0, hw_page_size);
15353 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15354 putPaddrLow(dmabuf->phys);
15355 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15356 putPaddrHigh(dmabuf->phys);
15358 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15359 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15360 &mq_create_ext->u.response);
15361 if (rc != MBX_SUCCESS) {
15362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15363 "2795 MQ_CREATE_EXT failed with "
15364 "status x%x. Failback to MQ_CREATE.\n",
15366 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15367 mq_create = &mbox->u.mqe.un.mq_create;
15368 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15369 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15370 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15371 &mq_create->u.response);
15374 /* The IOCTL status is embedded in the mailbox subheader. */
15375 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15376 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15377 if (shdr_status || shdr_add_status || rc) {
15378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15379 "2502 MQ_CREATE mailbox failed with "
15380 "status x%x add_status x%x, mbx status x%x\n",
15381 shdr_status, shdr_add_status, rc);
15385 if (mq->queue_id == 0xFFFF) {
15389 mq->type = LPFC_MQ;
15390 mq->assoc_qid = cq->queue_id;
15391 mq->subtype = subtype;
15392 mq->host_index = 0;
15395 /* link the mq onto the parent cq child list */
15396 list_add_tail(&mq->list, &cq->child_list);
15398 mempool_free(mbox, phba->mbox_mem_pool);
15403 * lpfc_wq_create - Create a Work Queue on the HBA
15404 * @phba: HBA structure that indicates port to create a queue on.
15405 * @wq: The queue structure to use to create the work queue.
15406 * @cq: The completion queue to bind this work queue to.
15407 * @subtype: The subtype of the work queue indicating its functionality.
15409 * This function creates a work queue, as detailed in @wq, on a port, described
15410 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15412 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15413 * is used to get the entry count and entry size that are necessary to
15414 * determine the number of pages to allocate and use for this queue. The @cq
15415 * is used to indicate which completion queue to bind this work queue to. This
15416 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15417 * work queue. This function is asynchronous and will wait for the mailbox
15418 * command to finish before continuing.
15420 * On success this function will return a zero. If unable to allocate enough
15421 * memory this function will return -ENOMEM. If the queue create mailbox command
15422 * fails this function will return -ENXIO.
15425 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15426 struct lpfc_queue *cq, uint32_t subtype)
15428 struct lpfc_mbx_wq_create *wq_create;
15429 struct lpfc_dmabuf *dmabuf;
15430 LPFC_MBOXQ_t *mbox;
15431 int rc, length, status = 0;
15432 uint32_t shdr_status, shdr_add_status;
15433 union lpfc_sli4_cfg_shdr *shdr;
15434 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15435 struct dma_address *page;
15436 void __iomem *bar_memmap_p;
15437 uint32_t db_offset;
15438 uint16_t pci_barset;
15439 uint8_t dpp_barset;
15440 uint32_t dpp_offset;
15441 unsigned long pg_addr;
15442 uint8_t wq_create_version;
15444 /* sanity check on queue memory */
15447 if (!phba->sli4_hba.pc_sli4_params.supported)
15448 hw_page_size = wq->page_size;
15450 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15453 length = (sizeof(struct lpfc_mbx_wq_create) -
15454 sizeof(struct lpfc_sli4_cfg_mhdr));
15455 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15456 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15457 length, LPFC_SLI4_MBX_EMBED);
15458 wq_create = &mbox->u.mqe.un.wq_create;
15459 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15460 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15462 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15465 /* wqv is the earliest version supported, NOT the latest */
15466 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15467 phba->sli4_hba.pc_sli4_params.wqv);
15469 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15470 (wq->page_size > SLI4_PAGE_SIZE))
15471 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15473 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15476 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15477 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15479 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15481 switch (wq_create_version) {
15482 case LPFC_Q_CREATE_VERSION_1:
15483 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15485 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15486 LPFC_Q_CREATE_VERSION_1);
15488 switch (wq->entry_size) {
15491 bf_set(lpfc_mbx_wq_create_wqe_size,
15492 &wq_create->u.request_1,
15493 LPFC_WQ_WQE_SIZE_64);
15496 bf_set(lpfc_mbx_wq_create_wqe_size,
15497 &wq_create->u.request_1,
15498 LPFC_WQ_WQE_SIZE_128);
15501 /* Request DPP by default */
15502 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15503 bf_set(lpfc_mbx_wq_create_page_size,
15504 &wq_create->u.request_1,
15505 (wq->page_size / SLI4_PAGE_SIZE));
15506 page = wq_create->u.request_1.page;
15509 page = wq_create->u.request.page;
15513 list_for_each_entry(dmabuf, &wq->page_list, list) {
15514 memset(dmabuf->virt, 0, hw_page_size);
15515 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15516 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15519 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15520 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15522 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15523 /* The IOCTL status is embedded in the mailbox subheader. */
15524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15526 if (shdr_status || shdr_add_status || rc) {
15527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15528 "2503 WQ_CREATE mailbox failed with "
15529 "status x%x add_status x%x, mbx status x%x\n",
15530 shdr_status, shdr_add_status, rc);
15535 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15536 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15537 &wq_create->u.response);
15539 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15540 &wq_create->u.response_1);
15542 if (wq->queue_id == 0xFFFF) {
15547 wq->db_format = LPFC_DB_LIST_FORMAT;
15548 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15549 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15550 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15551 &wq_create->u.response);
15552 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15553 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15555 "3265 WQ[%d] doorbell format "
15556 "not supported: x%x\n",
15557 wq->queue_id, wq->db_format);
15561 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15562 &wq_create->u.response);
15563 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15565 if (!bar_memmap_p) {
15566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15567 "3263 WQ[%d] failed to memmap "
15568 "pci barset:x%x\n",
15569 wq->queue_id, pci_barset);
15573 db_offset = wq_create->u.response.doorbell_offset;
15574 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15575 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15576 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15577 "3252 WQ[%d] doorbell offset "
15578 "not supported: x%x\n",
15579 wq->queue_id, db_offset);
15583 wq->db_regaddr = bar_memmap_p + db_offset;
15584 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15585 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15586 "format:x%x\n", wq->queue_id,
15587 pci_barset, db_offset, wq->db_format);
15589 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15591 /* Check if DPP was honored by the firmware */
15592 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15593 &wq_create->u.response_1);
15594 if (wq->dpp_enable) {
15595 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15596 &wq_create->u.response_1);
15597 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15599 if (!bar_memmap_p) {
15600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15601 "3267 WQ[%d] failed to memmap "
15602 "pci barset:x%x\n",
15603 wq->queue_id, pci_barset);
15607 db_offset = wq_create->u.response_1.doorbell_offset;
15608 wq->db_regaddr = bar_memmap_p + db_offset;
15609 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15610 &wq_create->u.response_1);
15611 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15612 &wq_create->u.response_1);
15613 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15615 if (!bar_memmap_p) {
15616 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15617 "3268 WQ[%d] failed to memmap "
15618 "pci barset:x%x\n",
15619 wq->queue_id, dpp_barset);
15623 dpp_offset = wq_create->u.response_1.dpp_offset;
15624 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15625 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15626 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15627 "dpp_id:x%x dpp_barset:x%x "
15628 "dpp_offset:x%x\n",
15629 wq->queue_id, pci_barset, db_offset,
15630 wq->dpp_id, dpp_barset, dpp_offset);
15632 /* Enable combined writes for DPP aperture */
15633 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15635 rc = set_memory_wc(pg_addr, 1);
15637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15638 "3272 Cannot setup Combined "
15639 "Write on WQ[%d] - disable DPP\n",
15641 phba->cfg_enable_dpp = 0;
15644 phba->cfg_enable_dpp = 0;
15647 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15649 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15650 if (wq->pring == NULL) {
15654 wq->type = LPFC_WQ;
15655 wq->assoc_qid = cq->queue_id;
15656 wq->subtype = subtype;
15657 wq->host_index = 0;
15659 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15661 /* link the wq onto the parent cq child list */
15662 list_add_tail(&wq->list, &cq->child_list);
15664 mempool_free(mbox, phba->mbox_mem_pool);
15669 * lpfc_rq_create - Create a Receive Queue on the HBA
15670 * @phba: HBA structure that indicates port to create a queue on.
15671 * @hrq: The queue structure to use to create the header receive queue.
15672 * @drq: The queue structure to use to create the data receive queue.
15673 * @cq: The completion queue to bind this work queue to.
15675 * This function creates a receive buffer queue pair , as detailed in @hrq and
15676 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15679 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15680 * struct is used to get the entry count that is necessary to determine the
15681 * number of pages to use for this queue. The @cq is used to indicate which
15682 * completion queue to bind received buffers that are posted to these queues to.
15683 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15684 * receive queue pair. This function is asynchronous and will wait for the
15685 * mailbox command to finish before continuing.
15687 * On success this function will return a zero. If unable to allocate enough
15688 * memory this function will return -ENOMEM. If the queue create mailbox command
15689 * fails this function will return -ENXIO.
15692 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15693 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15695 struct lpfc_mbx_rq_create *rq_create;
15696 struct lpfc_dmabuf *dmabuf;
15697 LPFC_MBOXQ_t *mbox;
15698 int rc, length, status = 0;
15699 uint32_t shdr_status, shdr_add_status;
15700 union lpfc_sli4_cfg_shdr *shdr;
15701 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15702 void __iomem *bar_memmap_p;
15703 uint32_t db_offset;
15704 uint16_t pci_barset;
15706 /* sanity check on queue memory */
15707 if (!hrq || !drq || !cq)
15709 if (!phba->sli4_hba.pc_sli4_params.supported)
15710 hw_page_size = SLI4_PAGE_SIZE;
15712 if (hrq->entry_count != drq->entry_count)
15714 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15717 length = (sizeof(struct lpfc_mbx_rq_create) -
15718 sizeof(struct lpfc_sli4_cfg_mhdr));
15719 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15720 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15721 length, LPFC_SLI4_MBX_EMBED);
15722 rq_create = &mbox->u.mqe.un.rq_create;
15723 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15724 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15725 phba->sli4_hba.pc_sli4_params.rqv);
15726 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15727 bf_set(lpfc_rq_context_rqe_count_1,
15728 &rq_create->u.request.context,
15730 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15731 bf_set(lpfc_rq_context_rqe_size,
15732 &rq_create->u.request.context,
15734 bf_set(lpfc_rq_context_page_size,
15735 &rq_create->u.request.context,
15736 LPFC_RQ_PAGE_SIZE_4096);
15738 switch (hrq->entry_count) {
15740 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15741 "2535 Unsupported RQ count. (%d)\n",
15743 if (hrq->entry_count < 512) {
15747 /* fall through - otherwise default to smallest count */
15749 bf_set(lpfc_rq_context_rqe_count,
15750 &rq_create->u.request.context,
15751 LPFC_RQ_RING_SIZE_512);
15754 bf_set(lpfc_rq_context_rqe_count,
15755 &rq_create->u.request.context,
15756 LPFC_RQ_RING_SIZE_1024);
15759 bf_set(lpfc_rq_context_rqe_count,
15760 &rq_create->u.request.context,
15761 LPFC_RQ_RING_SIZE_2048);
15764 bf_set(lpfc_rq_context_rqe_count,
15765 &rq_create->u.request.context,
15766 LPFC_RQ_RING_SIZE_4096);
15769 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15770 LPFC_HDR_BUF_SIZE);
15772 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15774 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15776 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15777 memset(dmabuf->virt, 0, hw_page_size);
15778 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15779 putPaddrLow(dmabuf->phys);
15780 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15781 putPaddrHigh(dmabuf->phys);
15783 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15784 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15786 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15787 /* The IOCTL status is embedded in the mailbox subheader. */
15788 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15789 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15790 if (shdr_status || shdr_add_status || rc) {
15791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15792 "2504 RQ_CREATE mailbox failed with "
15793 "status x%x add_status x%x, mbx status x%x\n",
15794 shdr_status, shdr_add_status, rc);
15798 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15799 if (hrq->queue_id == 0xFFFF) {
15804 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15805 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15806 &rq_create->u.response);
15807 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15808 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15810 "3262 RQ [%d] doorbell format not "
15811 "supported: x%x\n", hrq->queue_id,
15817 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15818 &rq_create->u.response);
15819 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15820 if (!bar_memmap_p) {
15821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15822 "3269 RQ[%d] failed to memmap pci "
15823 "barset:x%x\n", hrq->queue_id,
15829 db_offset = rq_create->u.response.doorbell_offset;
15830 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15831 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15833 "3270 RQ[%d] doorbell offset not "
15834 "supported: x%x\n", hrq->queue_id,
15839 hrq->db_regaddr = bar_memmap_p + db_offset;
15840 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15841 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15842 "format:x%x\n", hrq->queue_id, pci_barset,
15843 db_offset, hrq->db_format);
15845 hrq->db_format = LPFC_DB_RING_FORMAT;
15846 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15848 hrq->type = LPFC_HRQ;
15849 hrq->assoc_qid = cq->queue_id;
15850 hrq->subtype = subtype;
15851 hrq->host_index = 0;
15852 hrq->hba_index = 0;
15853 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15855 /* now create the data queue */
15856 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15857 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15858 length, LPFC_SLI4_MBX_EMBED);
15859 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15860 phba->sli4_hba.pc_sli4_params.rqv);
15861 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15862 bf_set(lpfc_rq_context_rqe_count_1,
15863 &rq_create->u.request.context, hrq->entry_count);
15864 if (subtype == LPFC_NVMET)
15865 rq_create->u.request.context.buffer_size =
15866 LPFC_NVMET_DATA_BUF_SIZE;
15868 rq_create->u.request.context.buffer_size =
15869 LPFC_DATA_BUF_SIZE;
15870 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15872 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15873 (PAGE_SIZE/SLI4_PAGE_SIZE));
15875 switch (drq->entry_count) {
15877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15878 "2536 Unsupported RQ count. (%d)\n",
15880 if (drq->entry_count < 512) {
15884 /* fall through - otherwise default to smallest count */
15886 bf_set(lpfc_rq_context_rqe_count,
15887 &rq_create->u.request.context,
15888 LPFC_RQ_RING_SIZE_512);
15891 bf_set(lpfc_rq_context_rqe_count,
15892 &rq_create->u.request.context,
15893 LPFC_RQ_RING_SIZE_1024);
15896 bf_set(lpfc_rq_context_rqe_count,
15897 &rq_create->u.request.context,
15898 LPFC_RQ_RING_SIZE_2048);
15901 bf_set(lpfc_rq_context_rqe_count,
15902 &rq_create->u.request.context,
15903 LPFC_RQ_RING_SIZE_4096);
15906 if (subtype == LPFC_NVMET)
15907 bf_set(lpfc_rq_context_buf_size,
15908 &rq_create->u.request.context,
15909 LPFC_NVMET_DATA_BUF_SIZE);
15911 bf_set(lpfc_rq_context_buf_size,
15912 &rq_create->u.request.context,
15913 LPFC_DATA_BUF_SIZE);
15915 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15917 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15919 list_for_each_entry(dmabuf, &drq->page_list, list) {
15920 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15921 putPaddrLow(dmabuf->phys);
15922 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15923 putPaddrHigh(dmabuf->phys);
15925 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15926 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15927 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15928 /* The IOCTL status is embedded in the mailbox subheader. */
15929 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15930 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15931 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15932 if (shdr_status || shdr_add_status || rc) {
15936 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15937 if (drq->queue_id == 0xFFFF) {
15941 drq->type = LPFC_DRQ;
15942 drq->assoc_qid = cq->queue_id;
15943 drq->subtype = subtype;
15944 drq->host_index = 0;
15945 drq->hba_index = 0;
15946 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15948 /* link the header and data RQs onto the parent cq child list */
15949 list_add_tail(&hrq->list, &cq->child_list);
15950 list_add_tail(&drq->list, &cq->child_list);
15953 mempool_free(mbox, phba->mbox_mem_pool);
15958 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15959 * @phba: HBA structure that indicates port to create a queue on.
15960 * @hrqp: The queue structure array to use to create the header receive queues.
15961 * @drqp: The queue structure array to use to create the data receive queues.
15962 * @cqp: The completion queue array to bind these receive queues to.
15964 * This function creates a receive buffer queue pair , as detailed in @hrq and
15965 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15968 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15969 * struct is used to get the entry count that is necessary to determine the
15970 * number of pages to use for this queue. The @cq is used to indicate which
15971 * completion queue to bind received buffers that are posted to these queues to.
15972 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15973 * receive queue pair. This function is asynchronous and will wait for the
15974 * mailbox command to finish before continuing.
15976 * On success this function will return a zero. If unable to allocate enough
15977 * memory this function will return -ENOMEM. If the queue create mailbox command
15978 * fails this function will return -ENXIO.
15981 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15982 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15985 struct lpfc_queue *hrq, *drq, *cq;
15986 struct lpfc_mbx_rq_create_v2 *rq_create;
15987 struct lpfc_dmabuf *dmabuf;
15988 LPFC_MBOXQ_t *mbox;
15989 int rc, length, alloclen, status = 0;
15990 int cnt, idx, numrq, page_idx = 0;
15991 uint32_t shdr_status, shdr_add_status;
15992 union lpfc_sli4_cfg_shdr *shdr;
15993 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15995 numrq = phba->cfg_nvmet_mrq;
15996 /* sanity check on array memory */
15997 if (!hrqp || !drqp || !cqp || !numrq)
15999 if (!phba->sli4_hba.pc_sli4_params.supported)
16000 hw_page_size = SLI4_PAGE_SIZE;
16002 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16006 length = sizeof(struct lpfc_mbx_rq_create_v2);
16007 length += ((2 * numrq * hrqp[0]->page_count) *
16008 sizeof(struct dma_address));
16010 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16011 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16012 LPFC_SLI4_MBX_NEMBED);
16013 if (alloclen < length) {
16014 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16015 "3099 Allocated DMA memory size (%d) is "
16016 "less than the requested DMA memory size "
16017 "(%d)\n", alloclen, length);
16024 rq_create = mbox->sge_array->addr[0];
16025 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16027 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16030 for (idx = 0; idx < numrq; idx++) {
16035 /* sanity check on queue memory */
16036 if (!hrq || !drq || !cq) {
16041 if (hrq->entry_count != drq->entry_count) {
16047 bf_set(lpfc_mbx_rq_create_num_pages,
16048 &rq_create->u.request,
16050 bf_set(lpfc_mbx_rq_create_rq_cnt,
16051 &rq_create->u.request, (numrq * 2));
16052 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16054 bf_set(lpfc_rq_context_base_cq,
16055 &rq_create->u.request.context,
16057 bf_set(lpfc_rq_context_data_size,
16058 &rq_create->u.request.context,
16059 LPFC_NVMET_DATA_BUF_SIZE);
16060 bf_set(lpfc_rq_context_hdr_size,
16061 &rq_create->u.request.context,
16062 LPFC_HDR_BUF_SIZE);
16063 bf_set(lpfc_rq_context_rqe_count_1,
16064 &rq_create->u.request.context,
16066 bf_set(lpfc_rq_context_rqe_size,
16067 &rq_create->u.request.context,
16069 bf_set(lpfc_rq_context_page_size,
16070 &rq_create->u.request.context,
16071 (PAGE_SIZE/SLI4_PAGE_SIZE));
16074 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16075 memset(dmabuf->virt, 0, hw_page_size);
16076 cnt = page_idx + dmabuf->buffer_tag;
16077 rq_create->u.request.page[cnt].addr_lo =
16078 putPaddrLow(dmabuf->phys);
16079 rq_create->u.request.page[cnt].addr_hi =
16080 putPaddrHigh(dmabuf->phys);
16086 list_for_each_entry(dmabuf, &drq->page_list, list) {
16087 memset(dmabuf->virt, 0, hw_page_size);
16088 cnt = page_idx + dmabuf->buffer_tag;
16089 rq_create->u.request.page[cnt].addr_lo =
16090 putPaddrLow(dmabuf->phys);
16091 rq_create->u.request.page[cnt].addr_hi =
16092 putPaddrHigh(dmabuf->phys);
16097 hrq->db_format = LPFC_DB_RING_FORMAT;
16098 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16099 hrq->type = LPFC_HRQ;
16100 hrq->assoc_qid = cq->queue_id;
16101 hrq->subtype = subtype;
16102 hrq->host_index = 0;
16103 hrq->hba_index = 0;
16104 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16106 drq->db_format = LPFC_DB_RING_FORMAT;
16107 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16108 drq->type = LPFC_DRQ;
16109 drq->assoc_qid = cq->queue_id;
16110 drq->subtype = subtype;
16111 drq->host_index = 0;
16112 drq->hba_index = 0;
16113 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16115 list_add_tail(&hrq->list, &cq->child_list);
16116 list_add_tail(&drq->list, &cq->child_list);
16119 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16120 /* The IOCTL status is embedded in the mailbox subheader. */
16121 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16122 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16123 if (shdr_status || shdr_add_status || rc) {
16124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16125 "3120 RQ_CREATE mailbox failed with "
16126 "status x%x add_status x%x, mbx status x%x\n",
16127 shdr_status, shdr_add_status, rc);
16131 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16132 if (rc == 0xFFFF) {
16137 /* Initialize all RQs with associated queue id */
16138 for (idx = 0; idx < numrq; idx++) {
16140 hrq->queue_id = rc + (2 * idx);
16142 drq->queue_id = rc + (2 * idx) + 1;
16146 lpfc_sli4_mbox_cmd_free(phba, mbox);
16151 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16152 * @eq: The queue structure associated with the queue to destroy.
16154 * This function destroys a queue, as detailed in @eq by sending an mailbox
16155 * command, specific to the type of queue, to the HBA.
16157 * The @eq struct is used to get the queue ID of the queue to destroy.
16159 * On success this function will return a zero. If the queue destroy mailbox
16160 * command fails this function will return -ENXIO.
16163 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16165 LPFC_MBOXQ_t *mbox;
16166 int rc, length, status = 0;
16167 uint32_t shdr_status, shdr_add_status;
16168 union lpfc_sli4_cfg_shdr *shdr;
16170 /* sanity check on queue memory */
16174 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16177 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16178 sizeof(struct lpfc_sli4_cfg_mhdr));
16179 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16180 LPFC_MBOX_OPCODE_EQ_DESTROY,
16181 length, LPFC_SLI4_MBX_EMBED);
16182 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16184 mbox->vport = eq->phba->pport;
16185 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16187 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16188 /* The IOCTL status is embedded in the mailbox subheader. */
16189 shdr = (union lpfc_sli4_cfg_shdr *)
16190 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16191 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16192 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16193 if (shdr_status || shdr_add_status || rc) {
16194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16195 "2505 EQ_DESTROY mailbox failed with "
16196 "status x%x add_status x%x, mbx status x%x\n",
16197 shdr_status, shdr_add_status, rc);
16201 /* Remove eq from any list */
16202 list_del_init(&eq->list);
16203 mempool_free(mbox, eq->phba->mbox_mem_pool);
16208 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16209 * @cq: The queue structure associated with the queue to destroy.
16211 * This function destroys a queue, as detailed in @cq by sending an mailbox
16212 * command, specific to the type of queue, to the HBA.
16214 * The @cq struct is used to get the queue ID of the queue to destroy.
16216 * On success this function will return a zero. If the queue destroy mailbox
16217 * command fails this function will return -ENXIO.
16220 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16222 LPFC_MBOXQ_t *mbox;
16223 int rc, length, status = 0;
16224 uint32_t shdr_status, shdr_add_status;
16225 union lpfc_sli4_cfg_shdr *shdr;
16227 /* sanity check on queue memory */
16230 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16233 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16234 sizeof(struct lpfc_sli4_cfg_mhdr));
16235 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16236 LPFC_MBOX_OPCODE_CQ_DESTROY,
16237 length, LPFC_SLI4_MBX_EMBED);
16238 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16240 mbox->vport = cq->phba->pport;
16241 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16242 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16243 /* The IOCTL status is embedded in the mailbox subheader. */
16244 shdr = (union lpfc_sli4_cfg_shdr *)
16245 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16246 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16247 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16248 if (shdr_status || shdr_add_status || rc) {
16249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16250 "2506 CQ_DESTROY mailbox failed with "
16251 "status x%x add_status x%x, mbx status x%x\n",
16252 shdr_status, shdr_add_status, rc);
16255 /* Remove cq from any list */
16256 list_del_init(&cq->list);
16257 mempool_free(mbox, cq->phba->mbox_mem_pool);
16262 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16263 * @qm: The queue structure associated with the queue to destroy.
16265 * This function destroys a queue, as detailed in @mq by sending an mailbox
16266 * command, specific to the type of queue, to the HBA.
16268 * The @mq struct is used to get the queue ID of the queue to destroy.
16270 * On success this function will return a zero. If the queue destroy mailbox
16271 * command fails this function will return -ENXIO.
16274 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16276 LPFC_MBOXQ_t *mbox;
16277 int rc, length, status = 0;
16278 uint32_t shdr_status, shdr_add_status;
16279 union lpfc_sli4_cfg_shdr *shdr;
16281 /* sanity check on queue memory */
16284 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16287 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16288 sizeof(struct lpfc_sli4_cfg_mhdr));
16289 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16290 LPFC_MBOX_OPCODE_MQ_DESTROY,
16291 length, LPFC_SLI4_MBX_EMBED);
16292 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16294 mbox->vport = mq->phba->pport;
16295 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16296 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16297 /* The IOCTL status is embedded in the mailbox subheader. */
16298 shdr = (union lpfc_sli4_cfg_shdr *)
16299 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16300 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16301 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16302 if (shdr_status || shdr_add_status || rc) {
16303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16304 "2507 MQ_DESTROY mailbox failed with "
16305 "status x%x add_status x%x, mbx status x%x\n",
16306 shdr_status, shdr_add_status, rc);
16309 /* Remove mq from any list */
16310 list_del_init(&mq->list);
16311 mempool_free(mbox, mq->phba->mbox_mem_pool);
16316 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16317 * @wq: The queue structure associated with the queue to destroy.
16319 * This function destroys a queue, as detailed in @wq by sending an mailbox
16320 * command, specific to the type of queue, to the HBA.
16322 * The @wq struct is used to get the queue ID of the queue to destroy.
16324 * On success this function will return a zero. If the queue destroy mailbox
16325 * command fails this function will return -ENXIO.
16328 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16330 LPFC_MBOXQ_t *mbox;
16331 int rc, length, status = 0;
16332 uint32_t shdr_status, shdr_add_status;
16333 union lpfc_sli4_cfg_shdr *shdr;
16335 /* sanity check on queue memory */
16338 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16341 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16342 sizeof(struct lpfc_sli4_cfg_mhdr));
16343 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16344 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16345 length, LPFC_SLI4_MBX_EMBED);
16346 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16348 mbox->vport = wq->phba->pport;
16349 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16350 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16351 shdr = (union lpfc_sli4_cfg_shdr *)
16352 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16353 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16354 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16355 if (shdr_status || shdr_add_status || rc) {
16356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16357 "2508 WQ_DESTROY mailbox failed with "
16358 "status x%x add_status x%x, mbx status x%x\n",
16359 shdr_status, shdr_add_status, rc);
16362 /* Remove wq from any list */
16363 list_del_init(&wq->list);
16366 mempool_free(mbox, wq->phba->mbox_mem_pool);
16371 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16372 * @rq: The queue structure associated with the queue to destroy.
16374 * This function destroys a queue, as detailed in @rq by sending an mailbox
16375 * command, specific to the type of queue, to the HBA.
16377 * The @rq struct is used to get the queue ID of the queue to destroy.
16379 * On success this function will return a zero. If the queue destroy mailbox
16380 * command fails this function will return -ENXIO.
16383 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16384 struct lpfc_queue *drq)
16386 LPFC_MBOXQ_t *mbox;
16387 int rc, length, status = 0;
16388 uint32_t shdr_status, shdr_add_status;
16389 union lpfc_sli4_cfg_shdr *shdr;
16391 /* sanity check on queue memory */
16394 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16397 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16398 sizeof(struct lpfc_sli4_cfg_mhdr));
16399 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16400 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16401 length, LPFC_SLI4_MBX_EMBED);
16402 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16404 mbox->vport = hrq->phba->pport;
16405 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16406 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16407 /* The IOCTL status is embedded in the mailbox subheader. */
16408 shdr = (union lpfc_sli4_cfg_shdr *)
16409 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16410 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16411 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16412 if (shdr_status || shdr_add_status || rc) {
16413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16414 "2509 RQ_DESTROY mailbox failed with "
16415 "status x%x add_status x%x, mbx status x%x\n",
16416 shdr_status, shdr_add_status, rc);
16417 if (rc != MBX_TIMEOUT)
16418 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16421 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16423 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16424 shdr = (union lpfc_sli4_cfg_shdr *)
16425 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16426 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16427 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16428 if (shdr_status || shdr_add_status || rc) {
16429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16430 "2510 RQ_DESTROY mailbox failed with "
16431 "status x%x add_status x%x, mbx status x%x\n",
16432 shdr_status, shdr_add_status, rc);
16435 list_del_init(&hrq->list);
16436 list_del_init(&drq->list);
16437 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16442 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16443 * @phba: The virtual port for which this call being executed.
16444 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16445 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16446 * @xritag: the xritag that ties this io to the SGL pages.
16448 * This routine will post the sgl pages for the IO that has the xritag
16449 * that is in the iocbq structure. The xritag is assigned during iocbq
16450 * creation and persists for as long as the driver is loaded.
16451 * if the caller has fewer than 256 scatter gather segments to map then
16452 * pdma_phys_addr1 should be 0.
16453 * If the caller needs to map more than 256 scatter gather segment then
16454 * pdma_phys_addr1 should be a valid physical address.
16455 * physical address for SGLs must be 64 byte aligned.
16456 * If you are going to map 2 SGL's then the first one must have 256 entries
16457 * the second sgl can have between 1 and 256 entries.
16461 * -ENXIO, -ENOMEM - Failure
16464 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16465 dma_addr_t pdma_phys_addr0,
16466 dma_addr_t pdma_phys_addr1,
16469 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16470 LPFC_MBOXQ_t *mbox;
16472 uint32_t shdr_status, shdr_add_status;
16474 union lpfc_sli4_cfg_shdr *shdr;
16476 if (xritag == NO_XRI) {
16477 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16478 "0364 Invalid param:\n");
16482 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16486 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16487 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16488 sizeof(struct lpfc_mbx_post_sgl_pages) -
16489 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16491 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16492 &mbox->u.mqe.un.post_sgl_pages;
16493 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16494 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16496 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16497 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16498 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16499 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16501 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16502 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16503 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16504 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16505 if (!phba->sli4_hba.intr_enable)
16506 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16508 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16509 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16511 /* The IOCTL status is embedded in the mailbox subheader. */
16512 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16513 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16514 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16515 if (rc != MBX_TIMEOUT)
16516 mempool_free(mbox, phba->mbox_mem_pool);
16517 if (shdr_status || shdr_add_status || rc) {
16518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16519 "2511 POST_SGL mailbox failed with "
16520 "status x%x add_status x%x, mbx status x%x\n",
16521 shdr_status, shdr_add_status, rc);
16527 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16528 * @phba: pointer to lpfc hba data structure.
16530 * This routine is invoked to post rpi header templates to the
16531 * HBA consistent with the SLI-4 interface spec. This routine
16532 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16533 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16536 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16537 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16540 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16545 * Fetch the next logical xri. Because this index is logical,
16546 * the driver starts at 0 each time.
16548 spin_lock_irq(&phba->hbalock);
16549 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16550 phba->sli4_hba.max_cfg_param.max_xri, 0);
16551 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16552 spin_unlock_irq(&phba->hbalock);
16555 set_bit(xri, phba->sli4_hba.xri_bmask);
16556 phba->sli4_hba.max_cfg_param.xri_used++;
16558 spin_unlock_irq(&phba->hbalock);
16563 * lpfc_sli4_free_xri - Release an xri for reuse.
16564 * @phba: pointer to lpfc hba data structure.
16566 * This routine is invoked to release an xri to the pool of
16567 * available rpis maintained by the driver.
16570 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16572 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16573 phba->sli4_hba.max_cfg_param.xri_used--;
16578 * lpfc_sli4_free_xri - Release an xri for reuse.
16579 * @phba: pointer to lpfc hba data structure.
16581 * This routine is invoked to release an xri to the pool of
16582 * available rpis maintained by the driver.
16585 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16587 spin_lock_irq(&phba->hbalock);
16588 __lpfc_sli4_free_xri(phba, xri);
16589 spin_unlock_irq(&phba->hbalock);
16593 * lpfc_sli4_next_xritag - Get an xritag for the io
16594 * @phba: Pointer to HBA context object.
16596 * This function gets an xritag for the iocb. If there is no unused xritag
16597 * it will return 0xffff.
16598 * The function returns the allocated xritag if successful, else returns zero.
16599 * Zero is not a valid xritag.
16600 * The caller is not required to hold any lock.
16603 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16605 uint16_t xri_index;
16607 xri_index = lpfc_sli4_alloc_xri(phba);
16608 if (xri_index == NO_XRI)
16609 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16610 "2004 Failed to allocate XRI.last XRITAG is %d"
16611 " Max XRI is %d, Used XRI is %d\n",
16613 phba->sli4_hba.max_cfg_param.max_xri,
16614 phba->sli4_hba.max_cfg_param.xri_used);
16619 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16620 * @phba: pointer to lpfc hba data structure.
16621 * @post_sgl_list: pointer to els sgl entry list.
16622 * @count: number of els sgl entries on the list.
16624 * This routine is invoked to post a block of driver's sgl pages to the
16625 * HBA using non-embedded mailbox command. No Lock is held. This routine
16626 * is only called when the driver is loading and after all IO has been
16630 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16631 struct list_head *post_sgl_list,
16634 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16635 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16636 struct sgl_page_pairs *sgl_pg_pairs;
16638 LPFC_MBOXQ_t *mbox;
16639 uint32_t reqlen, alloclen, pg_pairs;
16641 uint16_t xritag_start = 0;
16643 uint32_t shdr_status, shdr_add_status;
16644 union lpfc_sli4_cfg_shdr *shdr;
16646 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16647 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16648 if (reqlen > SLI4_PAGE_SIZE) {
16649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16650 "2559 Block sgl registration required DMA "
16651 "size (%d) great than a page\n", reqlen);
16655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16659 /* Allocate DMA memory and set up the non-embedded mailbox command */
16660 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16661 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16662 LPFC_SLI4_MBX_NEMBED);
16664 if (alloclen < reqlen) {
16665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16666 "0285 Allocated DMA memory size (%d) is "
16667 "less than the requested DMA memory "
16668 "size (%d)\n", alloclen, reqlen);
16669 lpfc_sli4_mbox_cmd_free(phba, mbox);
16672 /* Set up the SGL pages in the non-embedded DMA pages */
16673 viraddr = mbox->sge_array->addr[0];
16674 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16675 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16678 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16679 /* Set up the sge entry */
16680 sgl_pg_pairs->sgl_pg0_addr_lo =
16681 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16682 sgl_pg_pairs->sgl_pg0_addr_hi =
16683 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16684 sgl_pg_pairs->sgl_pg1_addr_lo =
16685 cpu_to_le32(putPaddrLow(0));
16686 sgl_pg_pairs->sgl_pg1_addr_hi =
16687 cpu_to_le32(putPaddrHigh(0));
16689 /* Keep the first xritag on the list */
16691 xritag_start = sglq_entry->sli4_xritag;
16696 /* Complete initialization and perform endian conversion. */
16697 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16698 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16699 sgl->word0 = cpu_to_le32(sgl->word0);
16701 if (!phba->sli4_hba.intr_enable)
16702 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16704 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16705 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16707 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16708 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16709 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16710 if (rc != MBX_TIMEOUT)
16711 lpfc_sli4_mbox_cmd_free(phba, mbox);
16712 if (shdr_status || shdr_add_status || rc) {
16713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16714 "2513 POST_SGL_BLOCK mailbox command failed "
16715 "status x%x add_status x%x mbx status x%x\n",
16716 shdr_status, shdr_add_status, rc);
16723 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16724 * @phba: pointer to lpfc hba data structure.
16725 * @nblist: pointer to nvme buffer list.
16726 * @count: number of scsi buffers on the list.
16728 * This routine is invoked to post a block of @count scsi sgl pages from a
16729 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16734 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16737 struct lpfc_io_buf *lpfc_ncmd;
16738 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16739 struct sgl_page_pairs *sgl_pg_pairs;
16741 LPFC_MBOXQ_t *mbox;
16742 uint32_t reqlen, alloclen, pg_pairs;
16744 uint16_t xritag_start = 0;
16746 uint32_t shdr_status, shdr_add_status;
16747 dma_addr_t pdma_phys_bpl1;
16748 union lpfc_sli4_cfg_shdr *shdr;
16750 /* Calculate the requested length of the dma memory */
16751 reqlen = count * sizeof(struct sgl_page_pairs) +
16752 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16753 if (reqlen > SLI4_PAGE_SIZE) {
16754 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16755 "6118 Block sgl registration required DMA "
16756 "size (%d) great than a page\n", reqlen);
16759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16762 "6119 Failed to allocate mbox cmd memory\n");
16766 /* Allocate DMA memory and set up the non-embedded mailbox command */
16767 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16768 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16769 reqlen, LPFC_SLI4_MBX_NEMBED);
16771 if (alloclen < reqlen) {
16772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16773 "6120 Allocated DMA memory size (%d) is "
16774 "less than the requested DMA memory "
16775 "size (%d)\n", alloclen, reqlen);
16776 lpfc_sli4_mbox_cmd_free(phba, mbox);
16780 /* Get the first SGE entry from the non-embedded DMA memory */
16781 viraddr = mbox->sge_array->addr[0];
16783 /* Set up the SGL pages in the non-embedded DMA pages */
16784 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16785 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16788 list_for_each_entry(lpfc_ncmd, nblist, list) {
16789 /* Set up the sge entry */
16790 sgl_pg_pairs->sgl_pg0_addr_lo =
16791 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16792 sgl_pg_pairs->sgl_pg0_addr_hi =
16793 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16794 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16795 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16798 pdma_phys_bpl1 = 0;
16799 sgl_pg_pairs->sgl_pg1_addr_lo =
16800 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16801 sgl_pg_pairs->sgl_pg1_addr_hi =
16802 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16803 /* Keep the first xritag on the list */
16805 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16809 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16810 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16811 /* Perform endian conversion if necessary */
16812 sgl->word0 = cpu_to_le32(sgl->word0);
16814 if (!phba->sli4_hba.intr_enable) {
16815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16820 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16821 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16822 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16823 if (rc != MBX_TIMEOUT)
16824 lpfc_sli4_mbox_cmd_free(phba, mbox);
16825 if (shdr_status || shdr_add_status || rc) {
16826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16827 "6125 POST_SGL_BLOCK mailbox command failed "
16828 "status x%x add_status x%x mbx status x%x\n",
16829 shdr_status, shdr_add_status, rc);
16836 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16837 * @phba: pointer to lpfc hba data structure.
16838 * @post_nblist: pointer to the nvme buffer list.
16840 * This routine walks a list of nvme buffers that was passed in. It attempts
16841 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16842 * uses the non-embedded SGL block post mailbox commands to post to the port.
16843 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16844 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16845 * must be local list, thus no lock is needed when manipulate the list.
16847 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16850 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16851 struct list_head *post_nblist, int sb_count)
16853 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16854 int status, sgl_size;
16855 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16856 dma_addr_t pdma_phys_sgl1;
16857 int last_xritag = NO_XRI;
16859 LIST_HEAD(prep_nblist);
16860 LIST_HEAD(blck_nblist);
16861 LIST_HEAD(nvme_nblist);
16867 sgl_size = phba->cfg_sg_dma_buf_size;
16868 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16869 list_del_init(&lpfc_ncmd->list);
16871 if ((last_xritag != NO_XRI) &&
16872 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16873 /* a hole in xri block, form a sgl posting block */
16874 list_splice_init(&prep_nblist, &blck_nblist);
16875 post_cnt = block_cnt - 1;
16876 /* prepare list for next posting block */
16877 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16880 /* prepare list for next posting block */
16881 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16882 /* enough sgls for non-embed sgl mbox command */
16883 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16884 list_splice_init(&prep_nblist, &blck_nblist);
16885 post_cnt = block_cnt;
16890 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16892 /* end of repost sgl list condition for NVME buffers */
16893 if (num_posting == sb_count) {
16894 if (post_cnt == 0) {
16895 /* last sgl posting block */
16896 list_splice_init(&prep_nblist, &blck_nblist);
16897 post_cnt = block_cnt;
16898 } else if (block_cnt == 1) {
16899 /* last single sgl with non-contiguous xri */
16900 if (sgl_size > SGL_PAGE_SIZE)
16902 lpfc_ncmd->dma_phys_sgl +
16905 pdma_phys_sgl1 = 0;
16906 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16907 status = lpfc_sli4_post_sgl(
16908 phba, lpfc_ncmd->dma_phys_sgl,
16909 pdma_phys_sgl1, cur_xritag);
16911 /* Post error. Buffer unavailable. */
16912 lpfc_ncmd->flags |=
16913 LPFC_SBUF_NOT_POSTED;
16915 /* Post success. Bffer available. */
16916 lpfc_ncmd->flags &=
16917 ~LPFC_SBUF_NOT_POSTED;
16918 lpfc_ncmd->status = IOSTAT_SUCCESS;
16921 /* success, put on NVME buffer sgl list */
16922 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16926 /* continue until a nembed page worth of sgls */
16930 /* post block of NVME buffer list sgls */
16931 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16934 /* don't reset xirtag due to hole in xri block */
16935 if (block_cnt == 0)
16936 last_xritag = NO_XRI;
16938 /* reset NVME buffer post count for next round of posting */
16941 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
16942 while (!list_empty(&blck_nblist)) {
16943 list_remove_head(&blck_nblist, lpfc_ncmd,
16944 struct lpfc_io_buf, list);
16946 /* Post error. Mark buffer unavailable. */
16947 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
16949 /* Post success, Mark buffer available. */
16950 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
16951 lpfc_ncmd->status = IOSTAT_SUCCESS;
16954 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16957 /* Push NVME buffers with sgl posted to the available list */
16958 lpfc_io_buf_replenish(phba, &nvme_nblist);
16964 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16965 * @phba: pointer to lpfc_hba struct that the frame was received on
16966 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16968 * This function checks the fields in the @fc_hdr to see if the FC frame is a
16969 * valid type of frame that the LPFC driver will handle. This function will
16970 * return a zero if the frame is a valid frame or a non zero value when the
16971 * frame does not pass the check.
16974 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16976 /* make rctl_names static to save stack space */
16977 struct fc_vft_header *fc_vft_hdr;
16978 uint32_t *header = (uint32_t *) fc_hdr;
16980 switch (fc_hdr->fh_r_ctl) {
16981 case FC_RCTL_DD_UNCAT: /* uncategorized information */
16982 case FC_RCTL_DD_SOL_DATA: /* solicited data */
16983 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
16984 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
16985 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
16986 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
16987 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
16988 case FC_RCTL_DD_CMD_STATUS: /* command status */
16989 case FC_RCTL_ELS_REQ: /* extended link services request */
16990 case FC_RCTL_ELS_REP: /* extended link services reply */
16991 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
16992 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16993 case FC_RCTL_BA_NOP: /* basic link service NOP */
16994 case FC_RCTL_BA_ABTS: /* basic link service abort */
16995 case FC_RCTL_BA_RMC: /* remove connection */
16996 case FC_RCTL_BA_ACC: /* basic accept */
16997 case FC_RCTL_BA_RJT: /* basic reject */
16998 case FC_RCTL_BA_PRMT:
16999 case FC_RCTL_ACK_1: /* acknowledge_1 */
17000 case FC_RCTL_ACK_0: /* acknowledge_0 */
17001 case FC_RCTL_P_RJT: /* port reject */
17002 case FC_RCTL_F_RJT: /* fabric reject */
17003 case FC_RCTL_P_BSY: /* port busy */
17004 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17005 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17006 case FC_RCTL_LCR: /* link credit reset */
17007 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17008 case FC_RCTL_END: /* end */
17010 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17011 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17012 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17013 return lpfc_fc_frame_check(phba, fc_hdr);
17018 switch (fc_hdr->fh_type) {
17031 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17032 "2538 Received frame rctl:x%x, type:x%x, "
17033 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17034 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17035 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17036 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17037 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17038 be32_to_cpu(header[6]));
17041 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17042 "2539 Dropped frame rctl:x%x type:x%x\n",
17043 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17048 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17049 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17051 * This function processes the FC header to retrieve the VFI from the VF
17052 * header, if one exists. This function will return the VFI if one exists
17053 * or 0 if no VSAN Header exists.
17056 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17058 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17060 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17062 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17066 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17067 * @phba: Pointer to the HBA structure to search for the vport on
17068 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17069 * @fcfi: The FC Fabric ID that the frame came from
17071 * This function searches the @phba for a vport that matches the content of the
17072 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17073 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17074 * returns the matching vport pointer or NULL if unable to match frame to a
17077 static struct lpfc_vport *
17078 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17079 uint16_t fcfi, uint32_t did)
17081 struct lpfc_vport **vports;
17082 struct lpfc_vport *vport = NULL;
17085 if (did == Fabric_DID)
17086 return phba->pport;
17087 if ((phba->pport->fc_flag & FC_PT2PT) &&
17088 !(phba->link_state == LPFC_HBA_READY))
17089 return phba->pport;
17091 vports = lpfc_create_vport_work_array(phba);
17092 if (vports != NULL) {
17093 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17094 if (phba->fcf.fcfi == fcfi &&
17095 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17096 vports[i]->fc_myDID == did) {
17102 lpfc_destroy_vport_work_array(phba, vports);
17107 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17108 * @vport: The vport to work on.
17110 * This function updates the receive sequence time stamp for this vport. The
17111 * receive sequence time stamp indicates the time that the last frame of the
17112 * the sequence that has been idle for the longest amount of time was received.
17113 * the driver uses this time stamp to indicate if any received sequences have
17117 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17119 struct lpfc_dmabuf *h_buf;
17120 struct hbq_dmabuf *dmabuf = NULL;
17122 /* get the oldest sequence on the rcv list */
17123 h_buf = list_get_first(&vport->rcv_buffer_list,
17124 struct lpfc_dmabuf, list);
17127 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17128 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17132 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17133 * @vport: The vport that the received sequences were sent to.
17135 * This function cleans up all outstanding received sequences. This is called
17136 * by the driver when a link event or user action invalidates all the received
17140 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17142 struct lpfc_dmabuf *h_buf, *hnext;
17143 struct lpfc_dmabuf *d_buf, *dnext;
17144 struct hbq_dmabuf *dmabuf = NULL;
17146 /* start with the oldest sequence on the rcv list */
17147 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17148 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17149 list_del_init(&dmabuf->hbuf.list);
17150 list_for_each_entry_safe(d_buf, dnext,
17151 &dmabuf->dbuf.list, list) {
17152 list_del_init(&d_buf->list);
17153 lpfc_in_buf_free(vport->phba, d_buf);
17155 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17160 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17161 * @vport: The vport that the received sequences were sent to.
17163 * This function determines whether any received sequences have timed out by
17164 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17165 * indicates that there is at least one timed out sequence this routine will
17166 * go through the received sequences one at a time from most inactive to most
17167 * active to determine which ones need to be cleaned up. Once it has determined
17168 * that a sequence needs to be cleaned up it will simply free up the resources
17169 * without sending an abort.
17172 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17174 struct lpfc_dmabuf *h_buf, *hnext;
17175 struct lpfc_dmabuf *d_buf, *dnext;
17176 struct hbq_dmabuf *dmabuf = NULL;
17177 unsigned long timeout;
17178 int abort_count = 0;
17180 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17181 vport->rcv_buffer_time_stamp);
17182 if (list_empty(&vport->rcv_buffer_list) ||
17183 time_before(jiffies, timeout))
17185 /* start with the oldest sequence on the rcv list */
17186 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17187 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17188 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17189 dmabuf->time_stamp);
17190 if (time_before(jiffies, timeout))
17193 list_del_init(&dmabuf->hbuf.list);
17194 list_for_each_entry_safe(d_buf, dnext,
17195 &dmabuf->dbuf.list, list) {
17196 list_del_init(&d_buf->list);
17197 lpfc_in_buf_free(vport->phba, d_buf);
17199 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17202 lpfc_update_rcv_time_stamp(vport);
17206 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17207 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17209 * This function searches through the existing incomplete sequences that have
17210 * been sent to this @vport. If the frame matches one of the incomplete
17211 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17212 * make up that sequence. If no sequence is found that matches this frame then
17213 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17214 * This function returns a pointer to the first dmabuf in the sequence list that
17215 * the frame was linked to.
17217 static struct hbq_dmabuf *
17218 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17220 struct fc_frame_header *new_hdr;
17221 struct fc_frame_header *temp_hdr;
17222 struct lpfc_dmabuf *d_buf;
17223 struct lpfc_dmabuf *h_buf;
17224 struct hbq_dmabuf *seq_dmabuf = NULL;
17225 struct hbq_dmabuf *temp_dmabuf = NULL;
17228 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17229 dmabuf->time_stamp = jiffies;
17230 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17232 /* Use the hdr_buf to find the sequence that this frame belongs to */
17233 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17234 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17235 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17236 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17237 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17239 /* found a pending sequence that matches this frame */
17240 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17245 * This indicates first frame received for this sequence.
17246 * Queue the buffer on the vport's rcv_buffer_list.
17248 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17249 lpfc_update_rcv_time_stamp(vport);
17252 temp_hdr = seq_dmabuf->hbuf.virt;
17253 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17254 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17255 list_del_init(&seq_dmabuf->hbuf.list);
17256 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17257 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17258 lpfc_update_rcv_time_stamp(vport);
17261 /* move this sequence to the tail to indicate a young sequence */
17262 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17263 seq_dmabuf->time_stamp = jiffies;
17264 lpfc_update_rcv_time_stamp(vport);
17265 if (list_empty(&seq_dmabuf->dbuf.list)) {
17266 temp_hdr = dmabuf->hbuf.virt;
17267 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17270 /* find the correct place in the sequence to insert this frame */
17271 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17273 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17274 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17276 * If the frame's sequence count is greater than the frame on
17277 * the list then insert the frame right after this frame
17279 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17280 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17281 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17286 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17288 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17297 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17298 * @vport: pointer to a vitural port
17299 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17301 * This function tries to abort from the partially assembed sequence, described
17302 * by the information from basic abbort @dmabuf. It checks to see whether such
17303 * partially assembled sequence held by the driver. If so, it shall free up all
17304 * the frames from the partially assembled sequence.
17307 * true -- if there is matching partially assembled sequence present and all
17308 * the frames freed with the sequence;
17309 * false -- if there is no matching partially assembled sequence present so
17310 * nothing got aborted in the lower layer driver
17313 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17314 struct hbq_dmabuf *dmabuf)
17316 struct fc_frame_header *new_hdr;
17317 struct fc_frame_header *temp_hdr;
17318 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17319 struct hbq_dmabuf *seq_dmabuf = NULL;
17321 /* Use the hdr_buf to find the sequence that matches this frame */
17322 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17323 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17324 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17325 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17326 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17327 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17328 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17329 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17331 /* found a pending sequence that matches this frame */
17332 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17336 /* Free up all the frames from the partially assembled sequence */
17338 list_for_each_entry_safe(d_buf, n_buf,
17339 &seq_dmabuf->dbuf.list, list) {
17340 list_del_init(&d_buf->list);
17341 lpfc_in_buf_free(vport->phba, d_buf);
17349 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17350 * @vport: pointer to a vitural port
17351 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17353 * This function tries to abort from the assembed sequence from upper level
17354 * protocol, described by the information from basic abbort @dmabuf. It
17355 * checks to see whether such pending context exists at upper level protocol.
17356 * If so, it shall clean up the pending context.
17359 * true -- if there is matching pending context of the sequence cleaned
17361 * false -- if there is no matching pending context of the sequence present
17365 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17367 struct lpfc_hba *phba = vport->phba;
17370 /* Accepting abort at ulp with SLI4 only */
17371 if (phba->sli_rev < LPFC_SLI_REV4)
17374 /* Register all caring upper level protocols to attend abort */
17375 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17383 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17384 * @phba: Pointer to HBA context object.
17385 * @cmd_iocbq: pointer to the command iocbq structure.
17386 * @rsp_iocbq: pointer to the response iocbq structure.
17388 * This function handles the sequence abort response iocb command complete
17389 * event. It properly releases the memory allocated to the sequence abort
17393 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17394 struct lpfc_iocbq *cmd_iocbq,
17395 struct lpfc_iocbq *rsp_iocbq)
17397 struct lpfc_nodelist *ndlp;
17400 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17401 lpfc_nlp_put(ndlp);
17402 lpfc_nlp_not_used(ndlp);
17403 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17406 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17407 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17408 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17409 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17410 rsp_iocbq->iocb.ulpStatus,
17411 rsp_iocbq->iocb.un.ulpWord[4]);
17415 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17416 * @phba: Pointer to HBA context object.
17417 * @xri: xri id in transaction.
17419 * This function validates the xri maps to the known range of XRIs allocated an
17420 * used by the driver.
17423 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17428 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17429 if (xri == phba->sli4_hba.xri_ids[i])
17436 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17437 * @phba: Pointer to HBA context object.
17438 * @fc_hdr: pointer to a FC frame header.
17440 * This function sends a basic response to a previous unsol sequence abort
17441 * event after aborting the sequence handling.
17444 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17445 struct fc_frame_header *fc_hdr, bool aborted)
17447 struct lpfc_hba *phba = vport->phba;
17448 struct lpfc_iocbq *ctiocb = NULL;
17449 struct lpfc_nodelist *ndlp;
17450 uint16_t oxid, rxid, xri, lxri;
17451 uint32_t sid, fctl;
17455 if (!lpfc_is_link_up(phba))
17458 sid = sli4_sid_from_fc_hdr(fc_hdr);
17459 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17460 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17462 ndlp = lpfc_findnode_did(vport, sid);
17464 ndlp = lpfc_nlp_init(vport, sid);
17466 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17467 "1268 Failed to allocate ndlp for "
17468 "oxid:x%x SID:x%x\n", oxid, sid);
17471 /* Put ndlp onto pport node list */
17472 lpfc_enqueue_node(vport, ndlp);
17473 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17474 /* re-setup ndlp without removing from node list */
17475 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17477 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17478 "3275 Failed to active ndlp found "
17479 "for oxid:x%x SID:x%x\n", oxid, sid);
17484 /* Allocate buffer for rsp iocb */
17485 ctiocb = lpfc_sli_get_iocbq(phba);
17489 /* Extract the F_CTL field from FC_HDR */
17490 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17492 icmd = &ctiocb->iocb;
17493 icmd->un.xseq64.bdl.bdeSize = 0;
17494 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17495 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17496 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17497 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17499 /* Fill in the rest of iocb fields */
17500 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17501 icmd->ulpBdeCount = 0;
17503 icmd->ulpClass = CLASS3;
17504 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17505 ctiocb->context1 = lpfc_nlp_get(ndlp);
17507 ctiocb->iocb_cmpl = NULL;
17508 ctiocb->vport = phba->pport;
17509 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17510 ctiocb->sli4_lxritag = NO_XRI;
17511 ctiocb->sli4_xritag = NO_XRI;
17513 if (fctl & FC_FC_EX_CTX)
17514 /* Exchange responder sent the abort so we
17520 lxri = lpfc_sli4_xri_inrange(phba, xri);
17521 if (lxri != NO_XRI)
17522 lpfc_set_rrq_active(phba, ndlp, lxri,
17523 (xri == oxid) ? rxid : oxid, 0);
17524 /* For BA_ABTS from exchange responder, if the logical xri with
17525 * the oxid maps to the FCP XRI range, the port no longer has
17526 * that exchange context, send a BLS_RJT. Override the IOCB for
17529 if ((fctl & FC_FC_EX_CTX) &&
17530 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17531 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17532 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17533 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17534 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17537 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17538 * the driver no longer has that exchange, send a BLS_RJT. Override
17539 * the IOCB for a BA_RJT.
17541 if (aborted == false) {
17542 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17543 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17544 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17545 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17548 if (fctl & FC_FC_EX_CTX) {
17549 /* ABTS sent by responder to CT exchange, construction
17550 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17551 * field and RX_ID from ABTS for RX_ID field.
17553 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17555 /* ABTS sent by initiator to CT exchange, construction
17556 * of BA_ACC will need to allocate a new XRI as for the
17559 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17561 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17562 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17564 /* Xmit CT abts response on exchange <xid> */
17565 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17566 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17567 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17569 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17570 if (rc == IOCB_ERROR) {
17571 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17572 "2925 Failed to issue CT ABTS RSP x%x on "
17573 "xri x%x, Data x%x\n",
17574 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17576 lpfc_nlp_put(ndlp);
17577 ctiocb->context1 = NULL;
17578 lpfc_sli_release_iocbq(phba, ctiocb);
17583 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17584 * @vport: Pointer to the vport on which this sequence was received
17585 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17587 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17588 * receive sequence is only partially assembed by the driver, it shall abort
17589 * the partially assembled frames for the sequence. Otherwise, if the
17590 * unsolicited receive sequence has been completely assembled and passed to
17591 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17592 * unsolicited sequence has been aborted. After that, it will issue a basic
17593 * accept to accept the abort.
17596 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17597 struct hbq_dmabuf *dmabuf)
17599 struct lpfc_hba *phba = vport->phba;
17600 struct fc_frame_header fc_hdr;
17604 /* Make a copy of fc_hdr before the dmabuf being released */
17605 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17606 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17608 if (fctl & FC_FC_EX_CTX) {
17609 /* ABTS by responder to exchange, no cleanup needed */
17612 /* ABTS by initiator to exchange, need to do cleanup */
17613 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17614 if (aborted == false)
17615 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17617 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17619 if (phba->nvmet_support) {
17620 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17624 /* Respond with BA_ACC or BA_RJT accordingly */
17625 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17629 * lpfc_seq_complete - Indicates if a sequence is complete
17630 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17632 * This function checks the sequence, starting with the frame described by
17633 * @dmabuf, to see if all the frames associated with this sequence are present.
17634 * the frames associated with this sequence are linked to the @dmabuf using the
17635 * dbuf list. This function looks for two major things. 1) That the first frame
17636 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17637 * set. 3) That there are no holes in the sequence count. The function will
17638 * return 1 when the sequence is complete, otherwise it will return 0.
17641 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17643 struct fc_frame_header *hdr;
17644 struct lpfc_dmabuf *d_buf;
17645 struct hbq_dmabuf *seq_dmabuf;
17649 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17650 /* make sure first fame of sequence has a sequence count of zero */
17651 if (hdr->fh_seq_cnt != seq_count)
17653 fctl = (hdr->fh_f_ctl[0] << 16 |
17654 hdr->fh_f_ctl[1] << 8 |
17656 /* If last frame of sequence we can return success. */
17657 if (fctl & FC_FC_END_SEQ)
17659 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17660 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17661 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17662 /* If there is a hole in the sequence count then fail. */
17663 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17665 fctl = (hdr->fh_f_ctl[0] << 16 |
17666 hdr->fh_f_ctl[1] << 8 |
17668 /* If last frame of sequence we can return success. */
17669 if (fctl & FC_FC_END_SEQ)
17676 * lpfc_prep_seq - Prep sequence for ULP processing
17677 * @vport: Pointer to the vport on which this sequence was received
17678 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17680 * This function takes a sequence, described by a list of frames, and creates
17681 * a list of iocbq structures to describe the sequence. This iocbq list will be
17682 * used to issue to the generic unsolicited sequence handler. This routine
17683 * returns a pointer to the first iocbq in the list. If the function is unable
17684 * to allocate an iocbq then it throw out the received frames that were not
17685 * able to be described and return a pointer to the first iocbq. If unable to
17686 * allocate any iocbqs (including the first) this function will return NULL.
17688 static struct lpfc_iocbq *
17689 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17691 struct hbq_dmabuf *hbq_buf;
17692 struct lpfc_dmabuf *d_buf, *n_buf;
17693 struct lpfc_iocbq *first_iocbq, *iocbq;
17694 struct fc_frame_header *fc_hdr;
17696 uint32_t len, tot_len;
17697 struct ulp_bde64 *pbde;
17699 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17700 /* remove from receive buffer list */
17701 list_del_init(&seq_dmabuf->hbuf.list);
17702 lpfc_update_rcv_time_stamp(vport);
17703 /* get the Remote Port's SID */
17704 sid = sli4_sid_from_fc_hdr(fc_hdr);
17706 /* Get an iocbq struct to fill in. */
17707 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17709 /* Initialize the first IOCB. */
17710 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17711 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17712 first_iocbq->vport = vport;
17714 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17715 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17716 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17717 first_iocbq->iocb.un.rcvels.parmRo =
17718 sli4_did_from_fc_hdr(fc_hdr);
17719 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17721 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17722 first_iocbq->iocb.ulpContext = NO_XRI;
17723 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17724 be16_to_cpu(fc_hdr->fh_ox_id);
17725 /* iocbq is prepped for internal consumption. Physical vpi. */
17726 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17727 vport->phba->vpi_ids[vport->vpi];
17728 /* put the first buffer into the first IOCBq */
17729 tot_len = bf_get(lpfc_rcqe_length,
17730 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17732 first_iocbq->context2 = &seq_dmabuf->dbuf;
17733 first_iocbq->context3 = NULL;
17734 first_iocbq->iocb.ulpBdeCount = 1;
17735 if (tot_len > LPFC_DATA_BUF_SIZE)
17736 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17737 LPFC_DATA_BUF_SIZE;
17739 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17741 first_iocbq->iocb.un.rcvels.remoteID = sid;
17743 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17745 iocbq = first_iocbq;
17747 * Each IOCBq can have two Buffers assigned, so go through the list
17748 * of buffers for this sequence and save two buffers in each IOCBq
17750 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17752 lpfc_in_buf_free(vport->phba, d_buf);
17755 if (!iocbq->context3) {
17756 iocbq->context3 = d_buf;
17757 iocbq->iocb.ulpBdeCount++;
17758 /* We need to get the size out of the right CQE */
17759 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17760 len = bf_get(lpfc_rcqe_length,
17761 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17762 pbde = (struct ulp_bde64 *)
17763 &iocbq->iocb.unsli3.sli3Words[4];
17764 if (len > LPFC_DATA_BUF_SIZE)
17765 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17767 pbde->tus.f.bdeSize = len;
17769 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17772 iocbq = lpfc_sli_get_iocbq(vport->phba);
17775 first_iocbq->iocb.ulpStatus =
17776 IOSTAT_FCP_RSP_ERROR;
17777 first_iocbq->iocb.un.ulpWord[4] =
17778 IOERR_NO_RESOURCES;
17780 lpfc_in_buf_free(vport->phba, d_buf);
17783 /* We need to get the size out of the right CQE */
17784 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17785 len = bf_get(lpfc_rcqe_length,
17786 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17787 iocbq->context2 = d_buf;
17788 iocbq->context3 = NULL;
17789 iocbq->iocb.ulpBdeCount = 1;
17790 if (len > LPFC_DATA_BUF_SIZE)
17791 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17792 LPFC_DATA_BUF_SIZE;
17794 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17797 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17799 iocbq->iocb.un.rcvels.remoteID = sid;
17800 list_add_tail(&iocbq->list, &first_iocbq->list);
17803 return first_iocbq;
17807 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17808 struct hbq_dmabuf *seq_dmabuf)
17810 struct fc_frame_header *fc_hdr;
17811 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17812 struct lpfc_hba *phba = vport->phba;
17814 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17815 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17817 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17818 "2707 Ring %d handler: Failed to allocate "
17819 "iocb Rctl x%x Type x%x received\n",
17821 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17824 if (!lpfc_complete_unsol_iocb(phba,
17825 phba->sli4_hba.els_wq->pring,
17826 iocbq, fc_hdr->fh_r_ctl,
17828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17829 "2540 Ring %d handler: unexpected Rctl "
17830 "x%x Type x%x received\n",
17832 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17834 /* Free iocb created in lpfc_prep_seq */
17835 list_for_each_entry_safe(curr_iocb, next_iocb,
17836 &iocbq->list, list) {
17837 list_del_init(&curr_iocb->list);
17838 lpfc_sli_release_iocbq(phba, curr_iocb);
17840 lpfc_sli_release_iocbq(phba, iocbq);
17844 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17845 struct lpfc_iocbq *rspiocb)
17847 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17849 if (pcmd && pcmd->virt)
17850 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17852 lpfc_sli_release_iocbq(phba, cmdiocb);
17853 lpfc_drain_txq(phba);
17857 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17858 struct hbq_dmabuf *dmabuf)
17860 struct fc_frame_header *fc_hdr;
17861 struct lpfc_hba *phba = vport->phba;
17862 struct lpfc_iocbq *iocbq = NULL;
17863 union lpfc_wqe *wqe;
17864 struct lpfc_dmabuf *pcmd = NULL;
17865 uint32_t frame_len;
17867 unsigned long iflags;
17869 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17870 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17872 /* Send the received frame back */
17873 iocbq = lpfc_sli_get_iocbq(phba);
17875 /* Queue cq event and wakeup worker thread to process it */
17876 spin_lock_irqsave(&phba->hbalock, iflags);
17877 list_add_tail(&dmabuf->cq_event.list,
17878 &phba->sli4_hba.sp_queue_event);
17879 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17880 spin_unlock_irqrestore(&phba->hbalock, iflags);
17881 lpfc_worker_wake_up(phba);
17885 /* Allocate buffer for command payload */
17886 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17888 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17890 if (!pcmd || !pcmd->virt)
17893 INIT_LIST_HEAD(&pcmd->list);
17895 /* copyin the payload */
17896 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17898 /* fill in BDE's for command */
17899 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17900 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17901 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17902 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17904 iocbq->context2 = pcmd;
17905 iocbq->vport = vport;
17906 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17907 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17910 * Setup rest of the iocb as though it were a WQE
17911 * Build the SEND_FRAME WQE
17913 wqe = (union lpfc_wqe *)&iocbq->iocb;
17915 wqe->send_frame.frame_len = frame_len;
17916 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17917 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17918 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17919 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17920 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17921 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17923 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17924 iocbq->iocb.ulpLe = 1;
17925 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17926 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17927 if (rc == IOCB_ERROR)
17930 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17934 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17935 "2023 Unable to process MDS loopback frame\n");
17936 if (pcmd && pcmd->virt)
17937 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17940 lpfc_sli_release_iocbq(phba, iocbq);
17941 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17945 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17946 * @phba: Pointer to HBA context object.
17948 * This function is called with no lock held. This function processes all
17949 * the received buffers and gives it to upper layers when a received buffer
17950 * indicates that it is the final frame in the sequence. The interrupt
17951 * service routine processes received buffers at interrupt contexts.
17952 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17953 * appropriate receive function when the final frame in a sequence is received.
17956 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17957 struct hbq_dmabuf *dmabuf)
17959 struct hbq_dmabuf *seq_dmabuf;
17960 struct fc_frame_header *fc_hdr;
17961 struct lpfc_vport *vport;
17965 /* Process each received buffer */
17966 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17968 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17969 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17970 vport = phba->pport;
17971 /* Handle MDS Loopback frames */
17972 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17976 /* check to see if this a valid type of frame */
17977 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17978 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17982 if ((bf_get(lpfc_cqe_code,
17983 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17984 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17985 &dmabuf->cq_event.cqe.rcqe_cmpl);
17987 fcfi = bf_get(lpfc_rcqe_fcf_id,
17988 &dmabuf->cq_event.cqe.rcqe_cmpl);
17990 /* d_id this frame is directed to */
17991 did = sli4_did_from_fc_hdr(fc_hdr);
17993 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17995 /* throw out the frame */
17996 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18000 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18001 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18002 (did != Fabric_DID)) {
18004 * Throw out the frame if we are not pt2pt.
18005 * The pt2pt protocol allows for discovery frames
18006 * to be received without a registered VPI.
18008 if (!(vport->fc_flag & FC_PT2PT) ||
18009 (phba->link_state == LPFC_HBA_READY)) {
18010 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18015 /* Handle the basic abort sequence (BA_ABTS) event */
18016 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18017 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18021 /* Link this frame */
18022 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18024 /* unable to add frame to vport - throw it out */
18025 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18028 /* If not last frame in sequence continue processing frames. */
18029 if (!lpfc_seq_complete(seq_dmabuf))
18032 /* Send the complete sequence to the upper layer protocol */
18033 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18037 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18038 * @phba: pointer to lpfc hba data structure.
18040 * This routine is invoked to post rpi header templates to the
18041 * HBA consistent with the SLI-4 interface spec. This routine
18042 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18043 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18045 * This routine does not require any locks. It's usage is expected
18046 * to be driver load or reset recovery when the driver is
18051 * -EIO - The mailbox failed to complete successfully.
18052 * When this error occurs, the driver is not guaranteed
18053 * to have any rpi regions posted to the device and
18054 * must either attempt to repost the regions or take a
18058 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18060 struct lpfc_rpi_hdr *rpi_page;
18064 /* SLI4 ports that support extents do not require RPI headers. */
18065 if (!phba->sli4_hba.rpi_hdrs_in_use)
18067 if (phba->sli4_hba.extents_in_use)
18070 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18072 * Assign the rpi headers a physical rpi only if the driver
18073 * has not initialized those resources. A port reset only
18074 * needs the headers posted.
18076 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18078 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18080 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18081 if (rc != MBX_SUCCESS) {
18082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18083 "2008 Error %d posting all rpi "
18091 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18092 LPFC_RPI_RSRC_RDY);
18097 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18098 * @phba: pointer to lpfc hba data structure.
18099 * @rpi_page: pointer to the rpi memory region.
18101 * This routine is invoked to post a single rpi header to the
18102 * HBA consistent with the SLI-4 interface spec. This memory region
18103 * maps up to 64 rpi context regions.
18107 * -ENOMEM - No available memory
18108 * -EIO - The mailbox failed to complete successfully.
18111 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18113 LPFC_MBOXQ_t *mboxq;
18114 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18116 uint32_t shdr_status, shdr_add_status;
18117 union lpfc_sli4_cfg_shdr *shdr;
18119 /* SLI4 ports that support extents do not require RPI headers. */
18120 if (!phba->sli4_hba.rpi_hdrs_in_use)
18122 if (phba->sli4_hba.extents_in_use)
18125 /* The port is notified of the header region via a mailbox command. */
18126 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18128 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18129 "2001 Unable to allocate memory for issuing "
18130 "SLI_CONFIG_SPECIAL mailbox command\n");
18134 /* Post all rpi memory regions to the port. */
18135 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18136 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18137 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18138 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18139 sizeof(struct lpfc_sli4_cfg_mhdr),
18140 LPFC_SLI4_MBX_EMBED);
18143 /* Post the physical rpi to the port for this rpi header. */
18144 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18145 rpi_page->start_rpi);
18146 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18147 hdr_tmpl, rpi_page->page_count);
18149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18155 if (rc != MBX_TIMEOUT)
18156 mempool_free(mboxq, phba->mbox_mem_pool);
18157 if (shdr_status || shdr_add_status || rc) {
18158 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18159 "2514 POST_RPI_HDR mailbox failed with "
18160 "status x%x add_status x%x, mbx status x%x\n",
18161 shdr_status, shdr_add_status, rc);
18165 * The next_rpi stores the next logical module-64 rpi value used
18166 * to post physical rpis in subsequent rpi postings.
18168 spin_lock_irq(&phba->hbalock);
18169 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18170 spin_unlock_irq(&phba->hbalock);
18176 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18177 * @phba: pointer to lpfc hba data structure.
18179 * This routine is invoked to post rpi header templates to the
18180 * HBA consistent with the SLI-4 interface spec. This routine
18181 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18182 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18185 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18186 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18189 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18192 uint16_t max_rpi, rpi_limit;
18193 uint16_t rpi_remaining, lrpi = 0;
18194 struct lpfc_rpi_hdr *rpi_hdr;
18195 unsigned long iflag;
18198 * Fetch the next logical rpi. Because this index is logical,
18199 * the driver starts at 0 each time.
18201 spin_lock_irqsave(&phba->hbalock, iflag);
18202 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18203 rpi_limit = phba->sli4_hba.next_rpi;
18205 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18206 if (rpi >= rpi_limit)
18207 rpi = LPFC_RPI_ALLOC_ERROR;
18209 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18210 phba->sli4_hba.max_cfg_param.rpi_used++;
18211 phba->sli4_hba.rpi_count++;
18213 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18214 "0001 rpi:%x max:%x lim:%x\n",
18215 (int) rpi, max_rpi, rpi_limit);
18218 * Don't try to allocate more rpi header regions if the device limit
18219 * has been exhausted.
18221 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18222 (phba->sli4_hba.rpi_count >= max_rpi)) {
18223 spin_unlock_irqrestore(&phba->hbalock, iflag);
18228 * RPI header postings are not required for SLI4 ports capable of
18231 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18232 spin_unlock_irqrestore(&phba->hbalock, iflag);
18237 * If the driver is running low on rpi resources, allocate another
18238 * page now. Note that the next_rpi value is used because
18239 * it represents how many are actually in use whereas max_rpi notes
18240 * how many are supported max by the device.
18242 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18243 spin_unlock_irqrestore(&phba->hbalock, iflag);
18244 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18245 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18247 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18248 "2002 Error Could not grow rpi "
18251 lrpi = rpi_hdr->start_rpi;
18252 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18253 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18261 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18262 * @phba: pointer to lpfc hba data structure.
18264 * This routine is invoked to release an rpi to the pool of
18265 * available rpis maintained by the driver.
18268 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18270 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18271 phba->sli4_hba.rpi_count--;
18272 phba->sli4_hba.max_cfg_param.rpi_used--;
18277 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18278 * @phba: pointer to lpfc hba data structure.
18280 * This routine is invoked to release an rpi to the pool of
18281 * available rpis maintained by the driver.
18284 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18286 spin_lock_irq(&phba->hbalock);
18287 __lpfc_sli4_free_rpi(phba, rpi);
18288 spin_unlock_irq(&phba->hbalock);
18292 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18293 * @phba: pointer to lpfc hba data structure.
18295 * This routine is invoked to remove the memory region that
18296 * provided rpi via a bitmask.
18299 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18301 kfree(phba->sli4_hba.rpi_bmask);
18302 kfree(phba->sli4_hba.rpi_ids);
18303 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18307 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18308 * @phba: pointer to lpfc hba data structure.
18310 * This routine is invoked to remove the memory region that
18311 * provided rpi via a bitmask.
18314 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18315 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18317 LPFC_MBOXQ_t *mboxq;
18318 struct lpfc_hba *phba = ndlp->phba;
18321 /* The port is notified of the header region via a mailbox command. */
18322 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18326 /* Post all rpi memory regions to the port. */
18327 lpfc_resume_rpi(mboxq, ndlp);
18329 mboxq->mbox_cmpl = cmpl;
18330 mboxq->ctx_buf = arg;
18331 mboxq->ctx_ndlp = ndlp;
18333 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18334 mboxq->vport = ndlp->vport;
18335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18336 if (rc == MBX_NOT_FINISHED) {
18337 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18338 "2010 Resume RPI Mailbox failed "
18339 "status %d, mbxStatus x%x\n", rc,
18340 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18341 mempool_free(mboxq, phba->mbox_mem_pool);
18348 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18349 * @vport: Pointer to the vport for which the vpi is being initialized
18351 * This routine is invoked to activate a vpi with the port.
18355 * -Evalue otherwise
18358 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18360 LPFC_MBOXQ_t *mboxq;
18362 int retval = MBX_SUCCESS;
18364 struct lpfc_hba *phba = vport->phba;
18365 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18368 lpfc_init_vpi(phba, mboxq, vport->vpi);
18369 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18370 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18371 if (rc != MBX_SUCCESS) {
18372 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18373 "2022 INIT VPI Mailbox failed "
18374 "status %d, mbxStatus x%x\n", rc,
18375 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18378 if (rc != MBX_TIMEOUT)
18379 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18385 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18386 * @phba: pointer to lpfc hba data structure.
18387 * @mboxq: Pointer to mailbox object.
18389 * This routine is invoked to manually add a single FCF record. The caller
18390 * must pass a completely initialized FCF_Record. This routine takes
18391 * care of the nonembedded mailbox operations.
18394 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18397 union lpfc_sli4_cfg_shdr *shdr;
18398 uint32_t shdr_status, shdr_add_status;
18400 virt_addr = mboxq->sge_array->addr[0];
18401 /* The IOCTL status is embedded in the mailbox subheader. */
18402 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18403 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18404 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18406 if ((shdr_status || shdr_add_status) &&
18407 (shdr_status != STATUS_FCF_IN_USE))
18408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18409 "2558 ADD_FCF_RECORD mailbox failed with "
18410 "status x%x add_status x%x\n",
18411 shdr_status, shdr_add_status);
18413 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18417 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18418 * @phba: pointer to lpfc hba data structure.
18419 * @fcf_record: pointer to the initialized fcf record to add.
18421 * This routine is invoked to manually add a single FCF record. The caller
18422 * must pass a completely initialized FCF_Record. This routine takes
18423 * care of the nonembedded mailbox operations.
18426 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18429 LPFC_MBOXQ_t *mboxq;
18432 struct lpfc_mbx_sge sge;
18433 uint32_t alloc_len, req_len;
18436 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18439 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18443 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18446 /* Allocate DMA memory and set up the non-embedded mailbox command */
18447 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18448 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18449 req_len, LPFC_SLI4_MBX_NEMBED);
18450 if (alloc_len < req_len) {
18451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18452 "2523 Allocated DMA memory size (x%x) is "
18453 "less than the requested DMA memory "
18454 "size (x%x)\n", alloc_len, req_len);
18455 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18460 * Get the first SGE entry from the non-embedded DMA memory. This
18461 * routine only uses a single SGE.
18463 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18464 virt_addr = mboxq->sge_array->addr[0];
18466 * Configure the FCF record for FCFI 0. This is the driver's
18467 * hardcoded default and gets used in nonFIP mode.
18469 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18470 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18471 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18474 * Copy the fcf_index and the FCF Record Data. The data starts after
18475 * the FCoE header plus word10. The data copy needs to be endian
18478 bytep += sizeof(uint32_t);
18479 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18480 mboxq->vport = phba->pport;
18481 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18482 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18483 if (rc == MBX_NOT_FINISHED) {
18484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18485 "2515 ADD_FCF_RECORD mailbox failed with "
18486 "status 0x%x\n", rc);
18487 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18496 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18497 * @phba: pointer to lpfc hba data structure.
18498 * @fcf_record: pointer to the fcf record to write the default data.
18499 * @fcf_index: FCF table entry index.
18501 * This routine is invoked to build the driver's default FCF record. The
18502 * values used are hardcoded. This routine handles memory initialization.
18506 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18507 struct fcf_record *fcf_record,
18508 uint16_t fcf_index)
18510 memset(fcf_record, 0, sizeof(struct fcf_record));
18511 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18512 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18513 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18514 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18515 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18516 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18517 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18518 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18519 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18520 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18521 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18522 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18523 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18524 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18525 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18526 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18527 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18528 /* Set the VLAN bit map */
18529 if (phba->valid_vlan) {
18530 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18531 = 1 << (phba->vlan_id % 8);
18536 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18537 * @phba: pointer to lpfc hba data structure.
18538 * @fcf_index: FCF table entry offset.
18540 * This routine is invoked to scan the entire FCF table by reading FCF
18541 * record and processing it one at a time starting from the @fcf_index
18542 * for initial FCF discovery or fast FCF failover rediscovery.
18544 * Return 0 if the mailbox command is submitted successfully, none 0
18548 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18551 LPFC_MBOXQ_t *mboxq;
18553 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18554 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18555 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18558 "2000 Failed to allocate mbox for "
18561 goto fail_fcf_scan;
18563 /* Construct the read FCF record mailbox command */
18564 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18567 goto fail_fcf_scan;
18569 /* Issue the mailbox command asynchronously */
18570 mboxq->vport = phba->pport;
18571 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18573 spin_lock_irq(&phba->hbalock);
18574 phba->hba_flag |= FCF_TS_INPROG;
18575 spin_unlock_irq(&phba->hbalock);
18577 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18578 if (rc == MBX_NOT_FINISHED)
18581 /* Reset eligible FCF count for new scan */
18582 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18583 phba->fcf.eligible_fcf_cnt = 0;
18589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18590 /* FCF scan failed, clear FCF_TS_INPROG flag */
18591 spin_lock_irq(&phba->hbalock);
18592 phba->hba_flag &= ~FCF_TS_INPROG;
18593 spin_unlock_irq(&phba->hbalock);
18599 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18600 * @phba: pointer to lpfc hba data structure.
18601 * @fcf_index: FCF table entry offset.
18603 * This routine is invoked to read an FCF record indicated by @fcf_index
18604 * and to use it for FLOGI roundrobin FCF failover.
18606 * Return 0 if the mailbox command is submitted successfully, none 0
18610 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18613 LPFC_MBOXQ_t *mboxq;
18615 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18617 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18618 "2763 Failed to allocate mbox for "
18621 goto fail_fcf_read;
18623 /* Construct the read FCF record mailbox command */
18624 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18627 goto fail_fcf_read;
18629 /* Issue the mailbox command asynchronously */
18630 mboxq->vport = phba->pport;
18631 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18633 if (rc == MBX_NOT_FINISHED)
18639 if (error && mboxq)
18640 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18645 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18646 * @phba: pointer to lpfc hba data structure.
18647 * @fcf_index: FCF table entry offset.
18649 * This routine is invoked to read an FCF record indicated by @fcf_index to
18650 * determine whether it's eligible for FLOGI roundrobin failover list.
18652 * Return 0 if the mailbox command is submitted successfully, none 0
18656 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18659 LPFC_MBOXQ_t *mboxq;
18661 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18663 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18664 "2758 Failed to allocate mbox for "
18667 goto fail_fcf_read;
18669 /* Construct the read FCF record mailbox command */
18670 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18673 goto fail_fcf_read;
18675 /* Issue the mailbox command asynchronously */
18676 mboxq->vport = phba->pport;
18677 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18678 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18679 if (rc == MBX_NOT_FINISHED)
18685 if (error && mboxq)
18686 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18691 * lpfc_check_next_fcf_pri_level
18692 * phba pointer to the lpfc_hba struct for this port.
18693 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18694 * routine when the rr_bmask is empty. The FCF indecies are put into the
18695 * rr_bmask based on their priority level. Starting from the highest priority
18696 * to the lowest. The most likely FCF candidate will be in the highest
18697 * priority group. When this routine is called it searches the fcf_pri list for
18698 * next lowest priority group and repopulates the rr_bmask with only those
18701 * 1=success 0=failure
18704 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18706 uint16_t next_fcf_pri;
18707 uint16_t last_index;
18708 struct lpfc_fcf_pri *fcf_pri;
18712 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18713 LPFC_SLI4_FCF_TBL_INDX_MAX);
18714 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18715 "3060 Last IDX %d\n", last_index);
18717 /* Verify the priority list has 2 or more entries */
18718 spin_lock_irq(&phba->hbalock);
18719 if (list_empty(&phba->fcf.fcf_pri_list) ||
18720 list_is_singular(&phba->fcf.fcf_pri_list)) {
18721 spin_unlock_irq(&phba->hbalock);
18722 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18723 "3061 Last IDX %d\n", last_index);
18724 return 0; /* Empty rr list */
18726 spin_unlock_irq(&phba->hbalock);
18730 * Clear the rr_bmask and set all of the bits that are at this
18733 memset(phba->fcf.fcf_rr_bmask, 0,
18734 sizeof(*phba->fcf.fcf_rr_bmask));
18735 spin_lock_irq(&phba->hbalock);
18736 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18737 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18740 * the 1st priority that has not FLOGI failed
18741 * will be the highest.
18744 next_fcf_pri = fcf_pri->fcf_rec.priority;
18745 spin_unlock_irq(&phba->hbalock);
18746 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18747 rc = lpfc_sli4_fcf_rr_index_set(phba,
18748 fcf_pri->fcf_rec.fcf_index);
18752 spin_lock_irq(&phba->hbalock);
18755 * if next_fcf_pri was not set above and the list is not empty then
18756 * we have failed flogis on all of them. So reset flogi failed
18757 * and start at the beginning.
18759 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18760 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18761 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18763 * the 1st priority that has not FLOGI failed
18764 * will be the highest.
18767 next_fcf_pri = fcf_pri->fcf_rec.priority;
18768 spin_unlock_irq(&phba->hbalock);
18769 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18770 rc = lpfc_sli4_fcf_rr_index_set(phba,
18771 fcf_pri->fcf_rec.fcf_index);
18775 spin_lock_irq(&phba->hbalock);
18779 spin_unlock_irq(&phba->hbalock);
18784 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18785 * @phba: pointer to lpfc hba data structure.
18787 * This routine is to get the next eligible FCF record index in a round
18788 * robin fashion. If the next eligible FCF record index equals to the
18789 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18790 * shall be returned, otherwise, the next eligible FCF record's index
18791 * shall be returned.
18794 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18796 uint16_t next_fcf_index;
18799 /* Search start from next bit of currently registered FCF index */
18800 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18803 /* Determine the next fcf index to check */
18804 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18805 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18806 LPFC_SLI4_FCF_TBL_INDX_MAX,
18809 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18810 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18812 * If we have wrapped then we need to clear the bits that
18813 * have been tested so that we can detect when we should
18814 * change the priority level.
18816 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18817 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18821 /* Check roundrobin failover list empty condition */
18822 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18823 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18825 * If next fcf index is not found check if there are lower
18826 * Priority level fcf's in the fcf_priority list.
18827 * Set up the rr_bmask with all of the avaiable fcf bits
18828 * at that level and continue the selection process.
18830 if (lpfc_check_next_fcf_pri_level(phba))
18831 goto initial_priority;
18832 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18833 "2844 No roundrobin failover FCF available\n");
18835 return LPFC_FCOE_FCF_NEXT_NONE;
18838 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18839 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18840 LPFC_FCF_FLOGI_FAILED) {
18841 if (list_is_singular(&phba->fcf.fcf_pri_list))
18842 return LPFC_FCOE_FCF_NEXT_NONE;
18844 goto next_priority;
18847 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18848 "2845 Get next roundrobin failover FCF (x%x)\n",
18851 return next_fcf_index;
18855 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18856 * @phba: pointer to lpfc hba data structure.
18858 * This routine sets the FCF record index in to the eligible bmask for
18859 * roundrobin failover search. It checks to make sure that the index
18860 * does not go beyond the range of the driver allocated bmask dimension
18861 * before setting the bit.
18863 * Returns 0 if the index bit successfully set, otherwise, it returns
18867 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18869 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18870 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18871 "2610 FCF (x%x) reached driver's book "
18872 "keeping dimension:x%x\n",
18873 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18876 /* Set the eligible FCF record index bmask */
18877 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18879 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18880 "2790 Set FCF (x%x) to roundrobin FCF failover "
18881 "bmask\n", fcf_index);
18887 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18888 * @phba: pointer to lpfc hba data structure.
18890 * This routine clears the FCF record index from the eligible bmask for
18891 * roundrobin failover search. It checks to make sure that the index
18892 * does not go beyond the range of the driver allocated bmask dimension
18893 * before clearing the bit.
18896 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18898 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18899 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18900 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18901 "2762 FCF (x%x) reached driver's book "
18902 "keeping dimension:x%x\n",
18903 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18906 /* Clear the eligible FCF record index bmask */
18907 spin_lock_irq(&phba->hbalock);
18908 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18910 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18911 list_del_init(&fcf_pri->list);
18915 spin_unlock_irq(&phba->hbalock);
18916 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18918 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18919 "2791 Clear FCF (x%x) from roundrobin failover "
18920 "bmask\n", fcf_index);
18924 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18925 * @phba: pointer to lpfc hba data structure.
18927 * This routine is the completion routine for the rediscover FCF table mailbox
18928 * command. If the mailbox command returned failure, it will try to stop the
18929 * FCF rediscover wait timer.
18932 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18934 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18935 uint32_t shdr_status, shdr_add_status;
18937 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18939 shdr_status = bf_get(lpfc_mbox_hdr_status,
18940 &redisc_fcf->header.cfg_shdr.response);
18941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18942 &redisc_fcf->header.cfg_shdr.response);
18943 if (shdr_status || shdr_add_status) {
18944 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18945 "2746 Requesting for FCF rediscovery failed "
18946 "status x%x add_status x%x\n",
18947 shdr_status, shdr_add_status);
18948 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18949 spin_lock_irq(&phba->hbalock);
18950 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18951 spin_unlock_irq(&phba->hbalock);
18953 * CVL event triggered FCF rediscover request failed,
18954 * last resort to re-try current registered FCF entry.
18956 lpfc_retry_pport_discovery(phba);
18958 spin_lock_irq(&phba->hbalock);
18959 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18960 spin_unlock_irq(&phba->hbalock);
18962 * DEAD FCF event triggered FCF rediscover request
18963 * failed, last resort to fail over as a link down
18964 * to FCF registration.
18966 lpfc_sli4_fcf_dead_failthrough(phba);
18969 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18970 "2775 Start FCF rediscover quiescent timer\n");
18972 * Start FCF rediscovery wait timer for pending FCF
18973 * before rescan FCF record table.
18975 lpfc_fcf_redisc_wait_start_timer(phba);
18978 mempool_free(mbox, phba->mbox_mem_pool);
18982 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18983 * @phba: pointer to lpfc hba data structure.
18985 * This routine is invoked to request for rediscovery of the entire FCF table
18989 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18991 LPFC_MBOXQ_t *mbox;
18992 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18995 /* Cancel retry delay timers to all vports before FCF rediscover */
18996 lpfc_cancel_all_vport_retry_delay_timer(phba);
18998 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19001 "2745 Failed to allocate mbox for "
19002 "requesting FCF rediscover.\n");
19006 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19007 sizeof(struct lpfc_sli4_cfg_mhdr));
19008 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19009 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19010 length, LPFC_SLI4_MBX_EMBED);
19012 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19013 /* Set count to 0 for invalidating the entire FCF database */
19014 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19016 /* Issue the mailbox command asynchronously */
19017 mbox->vport = phba->pport;
19018 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19019 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19021 if (rc == MBX_NOT_FINISHED) {
19022 mempool_free(mbox, phba->mbox_mem_pool);
19029 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19030 * @phba: pointer to lpfc hba data structure.
19032 * This function is the failover routine as a last resort to the FCF DEAD
19033 * event when driver failed to perform fast FCF failover.
19036 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19038 uint32_t link_state;
19041 * Last resort as FCF DEAD event failover will treat this as
19042 * a link down, but save the link state because we don't want
19043 * it to be changed to Link Down unless it is already down.
19045 link_state = phba->link_state;
19046 lpfc_linkdown(phba);
19047 phba->link_state = link_state;
19049 /* Unregister FCF if no devices connected to it */
19050 lpfc_unregister_unused_fcf(phba);
19054 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19055 * @phba: pointer to lpfc hba data structure.
19056 * @rgn23_data: pointer to configure region 23 data.
19058 * This function gets SLI3 port configure region 23 data through memory dump
19059 * mailbox command. When it successfully retrieves data, the size of the data
19060 * will be returned, otherwise, 0 will be returned.
19063 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19065 LPFC_MBOXQ_t *pmb = NULL;
19067 uint32_t offset = 0;
19073 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19076 "2600 failed to allocate mailbox memory\n");
19082 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19083 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19085 if (rc != MBX_SUCCESS) {
19086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19087 "2601 failed to read config "
19088 "region 23, rc 0x%x Status 0x%x\n",
19089 rc, mb->mbxStatus);
19090 mb->un.varDmp.word_cnt = 0;
19093 * dump mem may return a zero when finished or we got a
19094 * mailbox error, either way we are done.
19096 if (mb->un.varDmp.word_cnt == 0)
19098 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19099 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19101 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19102 rgn23_data + offset,
19103 mb->un.varDmp.word_cnt);
19104 offset += mb->un.varDmp.word_cnt;
19105 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19107 mempool_free(pmb, phba->mbox_mem_pool);
19112 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19113 * @phba: pointer to lpfc hba data structure.
19114 * @rgn23_data: pointer to configure region 23 data.
19116 * This function gets SLI4 port configure region 23 data through memory dump
19117 * mailbox command. When it successfully retrieves data, the size of the data
19118 * will be returned, otherwise, 0 will be returned.
19121 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19123 LPFC_MBOXQ_t *mboxq = NULL;
19124 struct lpfc_dmabuf *mp = NULL;
19125 struct lpfc_mqe *mqe;
19126 uint32_t data_length = 0;
19132 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19135 "3105 failed to allocate mailbox memory\n");
19139 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19141 mqe = &mboxq->u.mqe;
19142 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19143 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19146 data_length = mqe->un.mb_words[5];
19147 if (data_length == 0)
19149 if (data_length > DMP_RGN23_SIZE) {
19153 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19155 mempool_free(mboxq, phba->mbox_mem_pool);
19157 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19160 return data_length;
19164 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19165 * @phba: pointer to lpfc hba data structure.
19167 * This function read region 23 and parse TLV for port status to
19168 * decide if the user disaled the port. If the TLV indicates the
19169 * port is disabled, the hba_flag is set accordingly.
19172 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19174 uint8_t *rgn23_data = NULL;
19175 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19176 uint32_t offset = 0;
19178 /* Get adapter Region 23 data */
19179 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19183 if (phba->sli_rev < LPFC_SLI_REV4)
19184 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19186 if_type = bf_get(lpfc_sli_intf_if_type,
19187 &phba->sli4_hba.sli_intf);
19188 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19190 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19196 /* Check the region signature first */
19197 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19199 "2619 Config region 23 has bad signature\n");
19204 /* Check the data structure version */
19205 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19206 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19207 "2620 Config region 23 has bad version\n");
19212 /* Parse TLV entries in the region */
19213 while (offset < data_size) {
19214 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19217 * If the TLV is not driver specific TLV or driver id is
19218 * not linux driver id, skip the record.
19220 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19221 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19222 (rgn23_data[offset + 3] != 0)) {
19223 offset += rgn23_data[offset + 1] * 4 + 4;
19227 /* Driver found a driver specific TLV in the config region */
19228 sub_tlv_len = rgn23_data[offset + 1] * 4;
19233 * Search for configured port state sub-TLV.
19235 while ((offset < data_size) &&
19236 (tlv_offset < sub_tlv_len)) {
19237 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19242 if (rgn23_data[offset] != PORT_STE_TYPE) {
19243 offset += rgn23_data[offset + 1] * 4 + 4;
19244 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19248 /* This HBA contains PORT_STE configured */
19249 if (!rgn23_data[offset + 2])
19250 phba->hba_flag |= LINK_DISABLED;
19262 * lpfc_wr_object - write an object to the firmware
19263 * @phba: HBA structure that indicates port to create a queue on.
19264 * @dmabuf_list: list of dmabufs to write to the port.
19265 * @size: the total byte value of the objects to write to the port.
19266 * @offset: the current offset to be used to start the transfer.
19268 * This routine will create a wr_object mailbox command to send to the port.
19269 * the mailbox command will be constructed using the dma buffers described in
19270 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19271 * BDEs that the imbedded mailbox can support. The @offset variable will be
19272 * used to indicate the starting offset of the transfer and will also return
19273 * the offset after the write object mailbox has completed. @size is used to
19274 * determine the end of the object and whether the eof bit should be set.
19276 * Return 0 is successful and offset will contain the the new offset to use
19277 * for the next write.
19278 * Return negative value for error cases.
19281 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19282 uint32_t size, uint32_t *offset)
19284 struct lpfc_mbx_wr_object *wr_object;
19285 LPFC_MBOXQ_t *mbox;
19287 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19289 struct lpfc_dmabuf *dmabuf;
19290 uint32_t written = 0;
19291 bool check_change_status = false;
19293 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19297 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19298 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19299 sizeof(struct lpfc_mbx_wr_object) -
19300 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19302 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19303 wr_object->u.request.write_offset = *offset;
19304 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19305 wr_object->u.request.object_name[0] =
19306 cpu_to_le32(wr_object->u.request.object_name[0]);
19307 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19308 list_for_each_entry(dmabuf, dmabuf_list, list) {
19309 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19311 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19312 wr_object->u.request.bde[i].addrHigh =
19313 putPaddrHigh(dmabuf->phys);
19314 if (written + SLI4_PAGE_SIZE >= size) {
19315 wr_object->u.request.bde[i].tus.f.bdeSize =
19317 written += (size - written);
19318 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19319 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19320 check_change_status = true;
19322 wr_object->u.request.bde[i].tus.f.bdeSize =
19324 written += SLI4_PAGE_SIZE;
19328 wr_object->u.request.bde_count = i;
19329 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19330 if (!phba->sli4_hba.intr_enable)
19331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19333 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19334 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19336 /* The IOCTL status is embedded in the mailbox subheader. */
19337 shdr_status = bf_get(lpfc_mbox_hdr_status,
19338 &wr_object->header.cfg_shdr.response);
19339 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19340 &wr_object->header.cfg_shdr.response);
19341 if (check_change_status) {
19342 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19343 &wr_object->u.response);
19344 switch (shdr_change_status) {
19345 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19347 "3198 Firmware write complete: System "
19348 "reboot required to instantiate\n");
19350 case (LPFC_CHANGE_STATUS_FW_RESET):
19351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19352 "3199 Firmware write complete: Firmware"
19353 " reset required to instantiate\n");
19355 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19356 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19357 "3200 Firmware write complete: Port "
19358 "Migration or PCI Reset required to "
19361 case (LPFC_CHANGE_STATUS_PCI_RESET):
19362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19363 "3201 Firmware write complete: PCI "
19364 "Reset required to instantiate\n");
19370 if (rc != MBX_TIMEOUT)
19371 mempool_free(mbox, phba->mbox_mem_pool);
19372 if (shdr_status || shdr_add_status || rc) {
19373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19374 "3025 Write Object mailbox failed with "
19375 "status x%x add_status x%x, mbx status x%x\n",
19376 shdr_status, shdr_add_status, rc);
19378 *offset = shdr_add_status;
19380 *offset += wr_object->u.response.actual_write_length;
19385 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19386 * @vport: pointer to vport data structure.
19388 * This function iterate through the mailboxq and clean up all REG_LOGIN
19389 * and REG_VPI mailbox commands associated with the vport. This function
19390 * is called when driver want to restart discovery of the vport due to
19391 * a Clear Virtual Link event.
19394 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19396 struct lpfc_hba *phba = vport->phba;
19397 LPFC_MBOXQ_t *mb, *nextmb;
19398 struct lpfc_dmabuf *mp;
19399 struct lpfc_nodelist *ndlp;
19400 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19401 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19402 LIST_HEAD(mbox_cmd_list);
19403 uint8_t restart_loop;
19405 /* Clean up internally queued mailbox commands with the vport */
19406 spin_lock_irq(&phba->hbalock);
19407 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19408 if (mb->vport != vport)
19411 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19412 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19415 list_del(&mb->list);
19416 list_add_tail(&mb->list, &mbox_cmd_list);
19418 /* Clean up active mailbox command with the vport */
19419 mb = phba->sli.mbox_active;
19420 if (mb && (mb->vport == vport)) {
19421 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19422 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19423 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19424 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19425 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19426 /* Put reference count for delayed processing */
19427 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19428 /* Unregister the RPI when mailbox complete */
19429 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19432 /* Cleanup any mailbox completions which are not yet processed */
19435 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19437 * If this mailox is already processed or it is
19438 * for another vport ignore it.
19440 if ((mb->vport != vport) ||
19441 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19444 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19445 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19448 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19449 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19450 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19451 /* Unregister the RPI when mailbox complete */
19452 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19454 spin_unlock_irq(&phba->hbalock);
19455 spin_lock(shost->host_lock);
19456 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19457 spin_unlock(shost->host_lock);
19458 spin_lock_irq(&phba->hbalock);
19462 } while (restart_loop);
19464 spin_unlock_irq(&phba->hbalock);
19466 /* Release the cleaned-up mailbox commands */
19467 while (!list_empty(&mbox_cmd_list)) {
19468 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19469 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19470 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19472 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19475 mb->ctx_buf = NULL;
19476 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19477 mb->ctx_ndlp = NULL;
19479 spin_lock(shost->host_lock);
19480 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19481 spin_unlock(shost->host_lock);
19482 lpfc_nlp_put(ndlp);
19485 mempool_free(mb, phba->mbox_mem_pool);
19488 /* Release the ndlp with the cleaned-up active mailbox command */
19489 if (act_mbx_ndlp) {
19490 spin_lock(shost->host_lock);
19491 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19492 spin_unlock(shost->host_lock);
19493 lpfc_nlp_put(act_mbx_ndlp);
19498 * lpfc_drain_txq - Drain the txq
19499 * @phba: Pointer to HBA context object.
19501 * This function attempt to submit IOCBs on the txq
19502 * to the adapter. For SLI4 adapters, the txq contains
19503 * ELS IOCBs that have been deferred because the there
19504 * are no SGLs. This congestion can occur with large
19505 * vport counts during node discovery.
19509 lpfc_drain_txq(struct lpfc_hba *phba)
19511 LIST_HEAD(completions);
19512 struct lpfc_sli_ring *pring;
19513 struct lpfc_iocbq *piocbq = NULL;
19514 unsigned long iflags = 0;
19515 char *fail_msg = NULL;
19516 struct lpfc_sglq *sglq;
19517 union lpfc_wqe128 wqe;
19518 uint32_t txq_cnt = 0;
19519 struct lpfc_queue *wq;
19521 if (phba->link_flag & LS_MDS_LOOPBACK) {
19522 /* MDS WQE are posted only to first WQ*/
19523 wq = phba->sli4_hba.hdwq[0].fcp_wq;
19528 wq = phba->sli4_hba.els_wq;
19531 pring = lpfc_phba_elsring(phba);
19534 if (unlikely(!pring) || list_empty(&pring->txq))
19537 spin_lock_irqsave(&pring->ring_lock, iflags);
19538 list_for_each_entry(piocbq, &pring->txq, list) {
19542 if (txq_cnt > pring->txq_max)
19543 pring->txq_max = txq_cnt;
19545 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19547 while (!list_empty(&pring->txq)) {
19548 spin_lock_irqsave(&pring->ring_lock, iflags);
19550 piocbq = lpfc_sli_ringtx_get(phba, pring);
19552 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19554 "2823 txq empty and txq_cnt is %d\n ",
19558 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19560 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19561 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19566 /* The xri and iocb resources secured,
19567 * attempt to issue request
19569 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19570 piocbq->sli4_xritag = sglq->sli4_xritag;
19571 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19572 fail_msg = "to convert bpl to sgl";
19573 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19574 fail_msg = "to convert iocb to wqe";
19575 else if (lpfc_sli4_wq_put(wq, &wqe))
19576 fail_msg = " - Wq is full";
19578 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19581 /* Failed means we can't issue and need to cancel */
19582 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19583 "2822 IOCB failed %s iotag 0x%x "
19586 piocbq->iotag, piocbq->sli4_xritag);
19587 list_add_tail(&piocbq->list, &completions);
19589 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19592 /* Cancel all the IOCBs that cannot be issued */
19593 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19594 IOERR_SLI_ABORTED);
19600 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19601 * @phba: Pointer to HBA context object.
19602 * @pwqe: Pointer to command WQE.
19603 * @sglq: Pointer to the scatter gather queue object.
19605 * This routine converts the bpl or bde that is in the WQE
19606 * to a sgl list for the sli4 hardware. The physical address
19607 * of the bpl/bde is converted back to a virtual address.
19608 * If the WQE contains a BPL then the list of BDE's is
19609 * converted to sli4_sge's. If the WQE contains a single
19610 * BDE then it is converted to a single sli_sge.
19611 * The WQE is still in cpu endianness so the contents of
19612 * the bpl can be used without byte swapping.
19614 * Returns valid XRI = Success, NO_XRI = Failure.
19617 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19618 struct lpfc_sglq *sglq)
19620 uint16_t xritag = NO_XRI;
19621 struct ulp_bde64 *bpl = NULL;
19622 struct ulp_bde64 bde;
19623 struct sli4_sge *sgl = NULL;
19624 struct lpfc_dmabuf *dmabuf;
19625 union lpfc_wqe128 *wqe;
19628 uint32_t offset = 0; /* accumulated offset in the sg request list */
19629 int inbound = 0; /* number of sg reply entries inbound from firmware */
19632 if (!pwqeq || !sglq)
19635 sgl = (struct sli4_sge *)sglq->sgl;
19637 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19639 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19640 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19641 return sglq->sli4_xritag;
19642 numBdes = pwqeq->rsvd2;
19644 /* The addrHigh and addrLow fields within the WQE
19645 * have not been byteswapped yet so there is no
19646 * need to swap them back.
19648 if (pwqeq->context3)
19649 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19653 bpl = (struct ulp_bde64 *)dmabuf->virt;
19657 for (i = 0; i < numBdes; i++) {
19658 /* Should already be byte swapped. */
19659 sgl->addr_hi = bpl->addrHigh;
19660 sgl->addr_lo = bpl->addrLow;
19662 sgl->word2 = le32_to_cpu(sgl->word2);
19663 if ((i+1) == numBdes)
19664 bf_set(lpfc_sli4_sge_last, sgl, 1);
19666 bf_set(lpfc_sli4_sge_last, sgl, 0);
19667 /* swap the size field back to the cpu so we
19668 * can assign it to the sgl.
19670 bde.tus.w = le32_to_cpu(bpl->tus.w);
19671 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19672 /* The offsets in the sgl need to be accumulated
19673 * separately for the request and reply lists.
19674 * The request is always first, the reply follows.
19677 case CMD_GEN_REQUEST64_WQE:
19678 /* add up the reply sg entries */
19679 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19681 /* first inbound? reset the offset */
19684 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19685 bf_set(lpfc_sli4_sge_type, sgl,
19686 LPFC_SGE_TYPE_DATA);
19687 offset += bde.tus.f.bdeSize;
19689 case CMD_FCP_TRSP64_WQE:
19690 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19691 bf_set(lpfc_sli4_sge_type, sgl,
19692 LPFC_SGE_TYPE_DATA);
19694 case CMD_FCP_TSEND64_WQE:
19695 case CMD_FCP_TRECEIVE64_WQE:
19696 bf_set(lpfc_sli4_sge_type, sgl,
19697 bpl->tus.f.bdeFlags);
19701 offset += bde.tus.f.bdeSize;
19702 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19705 sgl->word2 = cpu_to_le32(sgl->word2);
19709 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19710 /* The addrHigh and addrLow fields of the BDE have not
19711 * been byteswapped yet so they need to be swapped
19712 * before putting them in the sgl.
19714 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19715 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19716 sgl->word2 = le32_to_cpu(sgl->word2);
19717 bf_set(lpfc_sli4_sge_last, sgl, 1);
19718 sgl->word2 = cpu_to_le32(sgl->word2);
19719 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19721 return sglq->sli4_xritag;
19725 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19726 * @phba: Pointer to HBA context object.
19727 * @ring_number: Base sli ring number
19728 * @pwqe: Pointer to command WQE.
19731 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19732 struct lpfc_iocbq *pwqe)
19734 union lpfc_wqe128 *wqe = &pwqe->wqe;
19735 struct lpfc_nvmet_rcv_ctx *ctxp;
19736 struct lpfc_queue *wq;
19737 struct lpfc_sglq *sglq;
19738 struct lpfc_sli_ring *pring;
19739 unsigned long iflags;
19742 /* NVME_LS and NVME_LS ABTS requests. */
19743 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19744 pring = phba->sli4_hba.nvmels_wq->pring;
19745 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19747 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19749 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19752 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19753 pwqe->sli4_xritag = sglq->sli4_xritag;
19754 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19755 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19758 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19759 pwqe->sli4_xritag);
19760 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19762 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19766 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19767 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19771 /* NVME_FCREQ and NVME_ABTS requests */
19772 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19773 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19777 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19779 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19781 ret = lpfc_sli4_wq_put(wq, wqe);
19783 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19786 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19787 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19791 /* NVMET requests */
19792 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19793 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19797 ctxp = pwqe->context2;
19798 sglq = ctxp->ctxbuf->sglq;
19799 if (pwqe->sli4_xritag == NO_XRI) {
19800 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19801 pwqe->sli4_xritag = sglq->sli4_xritag;
19803 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19804 pwqe->sli4_xritag);
19805 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19807 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19809 ret = lpfc_sli4_wq_put(wq, wqe);
19811 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19814 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19815 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19821 #ifdef LPFC_MXP_STAT
19823 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
19824 * @phba: pointer to lpfc hba data structure.
19825 * @hwqid: belong to which HWQ.
19827 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
19828 * 15 seconds after a test case is running.
19830 * The user should call lpfc_debugfs_multixripools_write before running a test
19831 * case to clear stat_snapshot_taken. Then the user starts a test case. During
19832 * test case is running, stat_snapshot_taken is incremented by 1 every time when
19833 * this routine is called from heartbeat timer. When stat_snapshot_taken is
19834 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
19836 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19838 struct lpfc_sli4_hdw_queue *qp;
19839 struct lpfc_multixri_pool *multixri_pool;
19840 struct lpfc_pvt_pool *pvt_pool;
19841 struct lpfc_pbl_pool *pbl_pool;
19844 qp = &phba->sli4_hba.hdwq[hwqid];
19845 multixri_pool = qp->p_multixri_pool;
19846 if (!multixri_pool)
19849 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19850 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19851 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19852 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19854 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19856 multixri_pool->stat_pbl_count = pbl_pool->count;
19857 multixri_pool->stat_pvt_count = pvt_pool->count;
19858 multixri_pool->stat_busy_count = txcmplq_cnt;
19861 multixri_pool->stat_snapshot_taken++;
19866 * lpfc_adjust_pvt_pool_count - Adjust private pool count
19867 * @phba: pointer to lpfc hba data structure.
19868 * @hwqid: belong to which HWQ.
19870 * This routine moves some XRIs from private to public pool when private pool
19873 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19875 struct lpfc_multixri_pool *multixri_pool;
19877 u32 prev_io_req_count;
19879 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19880 if (!multixri_pool)
19882 io_req_count = multixri_pool->io_req_count;
19883 prev_io_req_count = multixri_pool->prev_io_req_count;
19885 if (prev_io_req_count != io_req_count) {
19886 /* Private pool is busy */
19887 multixri_pool->prev_io_req_count = io_req_count;
19889 /* Private pool is not busy.
19890 * Move XRIs from private to public pool.
19892 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19897 * lpfc_adjust_high_watermark - Adjust high watermark
19898 * @phba: pointer to lpfc hba data structure.
19899 * @hwqid: belong to which HWQ.
19901 * This routine sets high watermark as number of outstanding XRIs,
19902 * but make sure the new value is between xri_limit/2 and xri_limit.
19904 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19912 struct lpfc_multixri_pool *multixri_pool;
19913 struct lpfc_sli4_hdw_queue *qp;
19915 qp = &phba->sli4_hba.hdwq[hwqid];
19916 multixri_pool = qp->p_multixri_pool;
19917 if (!multixri_pool)
19919 xri_limit = multixri_pool->xri_limit;
19921 watermark_max = xri_limit;
19922 watermark_min = xri_limit / 2;
19924 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19925 abts_io_bufs = qp->abts_scsi_io_bufs;
19927 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19928 abts_io_bufs += qp->abts_nvme_io_bufs;
19931 new_watermark = txcmplq_cnt + abts_io_bufs;
19932 new_watermark = min(watermark_max, new_watermark);
19933 new_watermark = max(watermark_min, new_watermark);
19934 multixri_pool->pvt_pool.high_watermark = new_watermark;
19936 #ifdef LPFC_MXP_STAT
19937 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19943 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
19944 * @phba: pointer to lpfc hba data structure.
19945 * @hwqid: belong to which HWQ.
19947 * This routine is called from hearbeat timer when pvt_pool is idle.
19948 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
19949 * The first step moves (all - low_watermark) amount of XRIs.
19950 * The second step moves the rest of XRIs.
19952 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19954 struct lpfc_pbl_pool *pbl_pool;
19955 struct lpfc_pvt_pool *pvt_pool;
19956 struct lpfc_sli4_hdw_queue *qp;
19957 struct lpfc_io_buf *lpfc_ncmd;
19958 struct lpfc_io_buf *lpfc_ncmd_next;
19959 unsigned long iflag;
19960 struct list_head tmp_list;
19963 qp = &phba->sli4_hba.hdwq[hwqid];
19964 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19965 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19968 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19969 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
19971 if (pvt_pool->count > pvt_pool->low_watermark) {
19972 /* Step 1: move (all - low_watermark) from pvt_pool
19976 /* Move low watermark of bufs from pvt_pool to tmp_list */
19977 INIT_LIST_HEAD(&tmp_list);
19978 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19979 &pvt_pool->list, list) {
19980 list_move_tail(&lpfc_ncmd->list, &tmp_list);
19982 if (tmp_count >= pvt_pool->low_watermark)
19986 /* Move all bufs from pvt_pool to pbl_pool */
19987 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19989 /* Move all bufs from tmp_list to pvt_pool */
19990 list_splice(&tmp_list, &pvt_pool->list);
19992 pbl_pool->count += (pvt_pool->count - tmp_count);
19993 pvt_pool->count = tmp_count;
19995 /* Step 2: move the rest from pvt_pool to pbl_pool */
19996 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19997 pbl_pool->count += pvt_pool->count;
19998 pvt_pool->count = 0;
20001 spin_unlock(&pvt_pool->lock);
20002 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20006 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20007 * @phba: pointer to lpfc hba data structure
20008 * @pbl_pool: specified public free XRI pool
20009 * @pvt_pool: specified private free XRI pool
20010 * @count: number of XRIs to move
20012 * This routine tries to move some free common bufs from the specified pbl_pool
20013 * to the specified pvt_pool. It might move less than count XRIs if there's not
20014 * enough in public pool.
20017 * true - if XRIs are successfully moved from the specified pbl_pool to the
20018 * specified pvt_pool
20019 * false - if the specified pbl_pool is empty or locked by someone else
20022 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20023 struct lpfc_pbl_pool *pbl_pool,
20024 struct lpfc_pvt_pool *pvt_pool, u32 count)
20026 struct lpfc_io_buf *lpfc_ncmd;
20027 struct lpfc_io_buf *lpfc_ncmd_next;
20028 unsigned long iflag;
20031 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20033 if (pbl_pool->count) {
20034 /* Move a batch of XRIs from public to private pool */
20035 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20036 list_for_each_entry_safe(lpfc_ncmd,
20040 list_move_tail(&lpfc_ncmd->list,
20049 spin_unlock(&pvt_pool->lock);
20050 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20053 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20060 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20061 * @phba: pointer to lpfc hba data structure.
20062 * @hwqid: belong to which HWQ.
20063 * @count: number of XRIs to move
20065 * This routine tries to find some free common bufs in one of public pools with
20066 * Round Robin method. The search always starts from local hwqid, then the next
20067 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20068 * a batch of free common bufs are moved to private pool on hwqid.
20069 * It might move less than count XRIs if there's not enough in public pool.
20071 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20073 struct lpfc_multixri_pool *multixri_pool;
20074 struct lpfc_multixri_pool *next_multixri_pool;
20075 struct lpfc_pvt_pool *pvt_pool;
20076 struct lpfc_pbl_pool *pbl_pool;
20077 struct lpfc_sli4_hdw_queue *qp;
20082 qp = &phba->sli4_hba.hdwq[hwqid];
20083 multixri_pool = qp->p_multixri_pool;
20084 pvt_pool = &multixri_pool->pvt_pool;
20085 pbl_pool = &multixri_pool->pbl_pool;
20087 /* Check if local pbl_pool is available */
20088 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20090 #ifdef LPFC_MXP_STAT
20091 multixri_pool->local_pbl_hit_count++;
20096 hwq_count = phba->cfg_hdw_queue;
20098 /* Get the next hwqid which was found last time */
20099 next_hwqid = multixri_pool->rrb_next_hwqid;
20102 /* Go to next hwq */
20103 next_hwqid = (next_hwqid + 1) % hwq_count;
20105 next_multixri_pool =
20106 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20107 pbl_pool = &next_multixri_pool->pbl_pool;
20109 /* Check if the public free xri pool is available */
20110 ret = _lpfc_move_xri_pbl_to_pvt(
20111 phba, qp, pbl_pool, pvt_pool, count);
20113 /* Exit while-loop if success or all hwqid are checked */
20114 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20116 /* Starting point for the next time */
20117 multixri_pool->rrb_next_hwqid = next_hwqid;
20120 /* stats: all public pools are empty*/
20121 multixri_pool->pbl_empty_count++;
20124 #ifdef LPFC_MXP_STAT
20126 if (next_hwqid == hwqid)
20127 multixri_pool->local_pbl_hit_count++;
20129 multixri_pool->other_pbl_hit_count++;
20135 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20136 * @phba: pointer to lpfc hba data structure.
20137 * @qp: belong to which HWQ.
20139 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20142 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20144 struct lpfc_multixri_pool *multixri_pool;
20145 struct lpfc_pvt_pool *pvt_pool;
20147 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20148 pvt_pool = &multixri_pool->pvt_pool;
20150 if (pvt_pool->count < pvt_pool->low_watermark)
20151 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20155 * lpfc_release_io_buf - Return one IO buf back to free pool
20156 * @phba: pointer to lpfc hba data structure.
20157 * @lpfc_ncmd: IO buf to be returned.
20158 * @qp: belong to which HWQ.
20160 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20161 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20162 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20163 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20164 * lpfc_io_buf_list_put.
20166 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20167 struct lpfc_sli4_hdw_queue *qp)
20169 unsigned long iflag;
20170 struct lpfc_pbl_pool *pbl_pool;
20171 struct lpfc_pvt_pool *pvt_pool;
20172 struct lpfc_epd_pool *epd_pool;
20178 /* MUST zero fields if buffer is reused by another protocol */
20179 lpfc_ncmd->nvmeCmd = NULL;
20180 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20181 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20183 if (phba->cfg_xri_rebalancing) {
20184 if (lpfc_ncmd->expedite) {
20185 /* Return to expedite pool */
20186 epd_pool = &phba->epd_pool;
20187 spin_lock_irqsave(&epd_pool->lock, iflag);
20188 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20190 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20194 /* Avoid invalid access if an IO sneaks in and is being rejected
20195 * just _after_ xri pools are destroyed in lpfc_offline.
20196 * Nothing much can be done at this point.
20198 if (!qp->p_multixri_pool)
20201 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20202 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20204 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20205 abts_io_bufs = qp->abts_scsi_io_bufs;
20207 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20208 abts_io_bufs += qp->abts_nvme_io_bufs;
20211 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20212 xri_limit = qp->p_multixri_pool->xri_limit;
20214 #ifdef LPFC_MXP_STAT
20215 if (xri_owned <= xri_limit)
20216 qp->p_multixri_pool->below_limit_count++;
20218 qp->p_multixri_pool->above_limit_count++;
20221 /* XRI goes to either public or private free xri pool
20222 * based on watermark and xri_limit
20224 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20225 (xri_owned < xri_limit &&
20226 pvt_pool->count < pvt_pool->high_watermark)) {
20227 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20228 qp, free_pvt_pool);
20229 list_add_tail(&lpfc_ncmd->list,
20232 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20234 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20235 qp, free_pub_pool);
20236 list_add_tail(&lpfc_ncmd->list,
20239 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20242 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20244 list_add_tail(&lpfc_ncmd->list,
20245 &qp->lpfc_io_buf_list_put);
20247 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20253 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20254 * @phba: pointer to lpfc hba data structure.
20255 * @pvt_pool: pointer to private pool data structure.
20256 * @ndlp: pointer to lpfc nodelist data structure.
20258 * This routine tries to get one free IO buf from private pool.
20261 * pointer to one free IO buf - if private pool is not empty
20262 * NULL - if private pool is empty
20264 static struct lpfc_io_buf *
20265 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20266 struct lpfc_sli4_hdw_queue *qp,
20267 struct lpfc_pvt_pool *pvt_pool,
20268 struct lpfc_nodelist *ndlp)
20270 struct lpfc_io_buf *lpfc_ncmd;
20271 struct lpfc_io_buf *lpfc_ncmd_next;
20272 unsigned long iflag;
20274 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20275 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20276 &pvt_pool->list, list) {
20277 if (lpfc_test_rrq_active(
20278 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20280 list_del(&lpfc_ncmd->list);
20282 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20285 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20291 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20292 * @phba: pointer to lpfc hba data structure.
20294 * This routine tries to get one free IO buf from expedite pool.
20297 * pointer to one free IO buf - if expedite pool is not empty
20298 * NULL - if expedite pool is empty
20300 static struct lpfc_io_buf *
20301 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20303 struct lpfc_io_buf *lpfc_ncmd;
20304 struct lpfc_io_buf *lpfc_ncmd_next;
20305 unsigned long iflag;
20306 struct lpfc_epd_pool *epd_pool;
20308 epd_pool = &phba->epd_pool;
20311 spin_lock_irqsave(&epd_pool->lock, iflag);
20312 if (epd_pool->count > 0) {
20313 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20314 &epd_pool->list, list) {
20315 list_del(&lpfc_ncmd->list);
20320 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20326 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20327 * @phba: pointer to lpfc hba data structure.
20328 * @ndlp: pointer to lpfc nodelist data structure.
20329 * @hwqid: belong to which HWQ
20330 * @expedite: 1 means this request is urgent.
20332 * This routine will do the following actions and then return a pointer to
20335 * 1. If private free xri count is empty, move some XRIs from public to
20337 * 2. Get one XRI from private free xri pool.
20338 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20339 * get one free xri from expedite pool.
20341 * Note: ndlp is only used on SCSI side for RRQ testing.
20342 * The caller should pass NULL for ndlp on NVME side.
20345 * pointer to one free IO buf - if private pool is not empty
20346 * NULL - if private pool is empty
20348 static struct lpfc_io_buf *
20349 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20350 struct lpfc_nodelist *ndlp,
20351 int hwqid, int expedite)
20353 struct lpfc_sli4_hdw_queue *qp;
20354 struct lpfc_multixri_pool *multixri_pool;
20355 struct lpfc_pvt_pool *pvt_pool;
20356 struct lpfc_io_buf *lpfc_ncmd;
20358 qp = &phba->sli4_hba.hdwq[hwqid];
20360 multixri_pool = qp->p_multixri_pool;
20361 pvt_pool = &multixri_pool->pvt_pool;
20362 multixri_pool->io_req_count++;
20364 /* If pvt_pool is empty, move some XRIs from public to private pool */
20365 if (pvt_pool->count == 0)
20366 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20368 /* Get one XRI from private free xri pool */
20369 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20372 lpfc_ncmd->hdwq = qp;
20373 lpfc_ncmd->hdwq_no = hwqid;
20374 } else if (expedite) {
20375 /* If we fail to get one from pvt_pool and this is an expedite
20376 * request, get one free xri from expedite pool.
20378 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20384 static inline struct lpfc_io_buf *
20385 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20387 struct lpfc_sli4_hdw_queue *qp;
20388 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20390 qp = &phba->sli4_hba.hdwq[idx];
20391 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20392 &qp->lpfc_io_buf_list_get, list) {
20393 if (lpfc_test_rrq_active(phba, ndlp,
20394 lpfc_cmd->cur_iocbq.sli4_lxritag))
20397 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20400 list_del_init(&lpfc_cmd->list);
20402 lpfc_cmd->hdwq = qp;
20403 lpfc_cmd->hdwq_no = idx;
20410 * lpfc_get_io_buf - Get one IO buffer from free pool
20411 * @phba: The HBA for which this call is being executed.
20412 * @ndlp: pointer to lpfc nodelist data structure.
20413 * @hwqid: belong to which HWQ
20414 * @expedite: 1 means this request is urgent.
20416 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20417 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20418 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20420 * Note: ndlp is only used on SCSI side for RRQ testing.
20421 * The caller should pass NULL for ndlp on NVME side.
20425 * Pointer to lpfc_io_buf - Success
20427 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20428 struct lpfc_nodelist *ndlp,
20429 u32 hwqid, int expedite)
20431 struct lpfc_sli4_hdw_queue *qp;
20432 unsigned long iflag;
20433 struct lpfc_io_buf *lpfc_cmd;
20435 qp = &phba->sli4_hba.hdwq[hwqid];
20438 if (phba->cfg_xri_rebalancing)
20439 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20440 phba, ndlp, hwqid, expedite);
20442 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20443 qp, alloc_xri_get);
20444 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20445 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20447 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20448 qp, alloc_xri_put);
20449 list_splice(&qp->lpfc_io_buf_list_put,
20450 &qp->lpfc_io_buf_list_get);
20451 qp->get_io_bufs += qp->put_io_bufs;
20452 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20453 qp->put_io_bufs = 0;
20454 spin_unlock(&qp->io_buf_list_put_lock);
20455 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20457 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20459 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);