2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_process_completed_request(struct scsi_qla_host *,
18 struct req_que *, uint32_t);
19 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
20 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
21 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
27 * @dev_id: SCSI driver HA context
29 * Called by system whenever the host adapter generates an interrupt.
31 * Returns handled flag.
34 qla2100_intr_handler(int irq, void *dev_id)
37 struct qla_hw_data *ha;
38 struct device_reg_2xxx __iomem *reg;
46 rsp = (struct rsp_que *) dev_id;
48 ql_log(ql_log_info, NULL, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__);
54 reg = &ha->iobase->isp;
57 spin_lock_irqsave(&ha->hardware_lock, flags);
58 vha = pci_get_drvdata(ha->pdev);
59 for (iter = 50; iter--; ) {
60 hccr = RD_REG_WORD(®->hccr);
61 if (hccr & HCCR_RISC_PAUSE) {
62 if (pci_channel_offline(ha->pdev))
66 * Issue a "HARD" reset in order for the RISC interrupt
67 * bit to be cleared. Schedule a big hammer to get
68 * out of the RISC PAUSED state.
70 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
71 RD_REG_WORD(®->hccr);
73 ha->isp_ops->fw_dump(vha, 1);
74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
76 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
79 if (RD_REG_WORD(®->semaphore) & BIT_0) {
80 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
81 RD_REG_WORD(®->hccr);
83 /* Get mailbox data. */
84 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
85 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
86 qla2x00_mbx_completion(vha, mb[0]);
87 status |= MBX_INTERRUPT;
88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
89 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
90 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
91 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
92 qla2x00_async_event(vha, rsp, mb);
95 ql_dbg(ql_dbg_async, vha, 0x5025,
96 "Unrecognized interrupt type (%d).\n",
99 /* Release mailbox registers. */
100 WRT_REG_WORD(®->semaphore, 0);
101 RD_REG_WORD(®->semaphore);
103 qla2x00_process_response_queue(rsp);
105 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
106 RD_REG_WORD(®->hccr);
109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
114 complete(&ha->mbx_intr_comp);
117 return (IRQ_HANDLED);
121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
123 * @dev_id: SCSI driver HA context
125 * Called by system whenever the host adapter generates an interrupt.
127 * Returns handled flag.
130 qla2300_intr_handler(int irq, void *dev_id)
132 scsi_qla_host_t *vha;
133 struct device_reg_2xxx __iomem *reg;
140 struct qla_hw_data *ha;
143 rsp = (struct rsp_que *) dev_id;
145 ql_log(ql_log_info, NULL, 0x5058,
146 "%s: NULL response queue pointer.\n", __func__);
151 reg = &ha->iobase->isp;
154 spin_lock_irqsave(&ha->hardware_lock, flags);
155 vha = pci_get_drvdata(ha->pdev);
156 for (iter = 50; iter--; ) {
157 stat = RD_REG_DWORD(®->u.isp2300.host_status);
158 if (stat & HSR_RISC_PAUSED) {
159 if (unlikely(pci_channel_offline(ha->pdev)))
162 hccr = RD_REG_WORD(®->hccr);
163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
164 ql_log(ql_log_warn, vha, 0x5026,
165 "Parity error -- HCCR=%x, Dumping "
166 "firmware.\n", hccr);
168 ql_log(ql_log_warn, vha, 0x5027,
169 "RISC paused -- HCCR=%x, Dumping "
170 "firmware.\n", hccr);
173 * Issue a "HARD" reset in order for the RISC
174 * interrupt bit to be cleared. Schedule a big
175 * hammer to get out of the RISC PAUSED state.
177 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
178 RD_REG_WORD(®->hccr);
180 ha->isp_ops->fw_dump(vha, 1);
181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
183 } else if ((stat & HSR_RISC_INT) == 0)
186 switch (stat & 0xff) {
191 qla2x00_mbx_completion(vha, MSW(stat));
192 status |= MBX_INTERRUPT;
194 /* Release mailbox registers. */
195 WRT_REG_WORD(®->semaphore, 0);
199 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
200 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
201 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
202 qla2x00_async_event(vha, rsp, mb);
205 qla2x00_process_response_queue(rsp);
208 mb[0] = MBA_CMPLT_1_16BIT;
210 qla2x00_async_event(vha, rsp, mb);
213 mb[0] = MBA_SCSI_COMPLETION;
215 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
216 qla2x00_async_event(vha, rsp, mb);
219 ql_dbg(ql_dbg_async, vha, 0x5028,
220 "Unrecognized interrupt type (%d).\n", stat & 0xff);
223 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
224 RD_REG_WORD_RELAXED(®->hccr);
226 spin_unlock_irqrestore(&ha->hardware_lock, flags);
228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
231 complete(&ha->mbx_intr_comp);
234 return (IRQ_HANDLED);
238 * qla2x00_mbx_completion() - Process mailbox command completions.
239 * @ha: SCSI driver HA context
240 * @mb0: Mailbox0 register
243 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
247 uint16_t __iomem *wptr;
248 struct qla_hw_data *ha = vha->hw;
249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
251 /* Read all mbox registers? */
252 mboxes = (1 << ha->mbx_count) - 1;
254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
256 mboxes = ha->mcp->in_mb;
258 /* Load return mailbox registers. */
259 ha->flags.mbox_int = 1;
260 ha->mailbox_out[0] = mb0;
262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
264 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
265 if (IS_QLA2200(ha) && cnt == 8)
266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
269 else if (mboxes & BIT_0)
270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
278 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
280 static char *event[] =
281 { "Complete", "Request Notification", "Time Extension" };
283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
284 uint16_t __iomem *wptr;
285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
287 /* Seed data -- mailbox1 -> mailbox7. */
288 wptr = (uint16_t __iomem *)®24->mailbox1;
289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
290 mb[cnt] = RD_REG_WORD(wptr);
292 ql_dbg(ql_dbg_async, vha, 0x5021,
293 "Inter-Driver Communication %s -- "
294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]);
297 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
298 vha->hw->flags.idc_compl_status = 1;
299 if (vha->hw->notify_dcbx_comp)
300 complete(&vha->hw->dcbx_comp);
303 /* Acknowledgement needed? [Notify && non-zero timeout]. */
304 timeout = (descr >> 8) & 0xf;
305 if (aen != MBA_IDC_NOTIFY || !timeout)
308 ql_dbg(ql_dbg_async, vha, 0x5022,
309 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
310 vha->host_no, event[aen & 0xff], timeout);
312 rval = qla2x00_post_idc_ack_work(vha, mb);
313 if (rval != QLA_SUCCESS)
314 ql_log(ql_log_warn, vha, 0x5023,
315 "IDC failed to post ACK.\n");
320 qla2x00_get_link_speed_str(struct qla_hw_data *ha)
322 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
324 int fw_speed = ha->link_data_rate;
326 if (IS_QLA2100(ha) || IS_QLA2200(ha))
327 link_speed = link_speeds[0];
328 else if (fw_speed == 0x13)
329 link_speed = link_speeds[6];
331 link_speed = link_speeds[LS_UNKNOWN];
334 link_speeds[fw_speed];
341 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
343 struct qla_hw_data *ha = vha->hw;
346 * 8200 AEN Interpretation:
348 * mb[1] = AEN Reason code
349 * mb[2] = LSW of Peg-Halt Status-1 Register
350 * mb[6] = MSW of Peg-Halt Status-1 Register
351 * mb[3] = LSW of Peg-Halt Status-2 register
352 * mb[7] = MSW of Peg-Halt Status-2 register
353 * mb[4] = IDC Device-State Register value
354 * mb[5] = IDC Driver-Presence Register value
356 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
357 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
358 mb[0], mb[1], mb[2], mb[6]);
359 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
360 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
361 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
363 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
364 IDC_HEARTBEAT_FAILURE)) {
365 ha->flags.nic_core_hung = 1;
366 ql_log(ql_log_warn, vha, 0x5060,
367 "83XX: F/W Error Reported: Check if reset required.\n");
369 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
370 uint32_t protocol_engine_id, fw_err_code, err_level;
373 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
374 * - PEG-Halt Status-1 Register:
375 * (LSW = mb[2], MSW = mb[6])
376 * Bits 0-7 = protocol-engine ID
377 * Bits 8-28 = f/w error code
378 * Bits 29-31 = Error-level
379 * Error-level 0x1 = Non-Fatal error
380 * Error-level 0x2 = Recoverable Fatal error
381 * Error-level 0x4 = UnRecoverable Fatal error
382 * - PEG-Halt Status-2 Register:
383 * (LSW = mb[3], MSW = mb[7])
385 protocol_engine_id = (mb[2] & 0xff);
386 fw_err_code = (((mb[2] & 0xff00) >> 8) |
387 ((mb[6] & 0x1fff) << 8));
388 err_level = ((mb[6] & 0xe000) >> 13);
389 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
390 "Register: protocol_engine_id=0x%x "
391 "fw_err_code=0x%x err_level=0x%x.\n",
392 protocol_engine_id, fw_err_code, err_level);
393 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
394 "Register: 0x%x%x.\n", mb[7], mb[3]);
395 if (err_level == ERR_LEVEL_NON_FATAL) {
396 ql_log(ql_log_warn, vha, 0x5063,
397 "Not a fatal error, f/w has recovered "
399 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
400 ql_log(ql_log_fatal, vha, 0x5064,
401 "Recoverable Fatal error: Chip reset "
403 qla83xx_schedule_work(vha,
404 QLA83XX_NIC_CORE_RESET);
405 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
406 ql_log(ql_log_fatal, vha, 0x5065,
407 "Unrecoverable Fatal error: Set FAILED "
408 "state, reboot required.\n");
409 qla83xx_schedule_work(vha,
410 QLA83XX_NIC_CORE_UNRECOVERABLE);
414 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
415 uint16_t peg_fw_state, nw_interface_link_up;
416 uint16_t nw_interface_signal_detect, sfp_status;
417 uint16_t htbt_counter, htbt_monitor_enable;
418 uint16_t sfp_additonal_info, sfp_multirate;
419 uint16_t sfp_tx_fault, link_speed, dcbx_status;
422 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
423 * - PEG-to-FC Status Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = Peg-Firmware state
426 * Bit 8 = N/W Interface Link-up
427 * Bit 9 = N/W Interface signal detected
428 * Bits 10-11 = SFP Status
429 * SFP Status 0x0 = SFP+ transceiver not expected
430 * SFP Status 0x1 = SFP+ transceiver not present
431 * SFP Status 0x2 = SFP+ transceiver invalid
432 * SFP Status 0x3 = SFP+ transceiver present and
434 * Bits 12-14 = Heartbeat Counter
435 * Bit 15 = Heartbeat Monitor Enable
436 * Bits 16-17 = SFP Additional Info
437 * SFP info 0x0 = Unregocnized transceiver for
439 * SFP info 0x1 = SFP+ brand validation failed
440 * SFP info 0x2 = SFP+ speed validation failed
441 * SFP info 0x3 = SFP+ access error
442 * Bit 18 = SFP Multirate
443 * Bit 19 = SFP Tx Fault
444 * Bits 20-22 = Link Speed
445 * Bits 23-27 = Reserved
446 * Bits 28-30 = DCBX Status
447 * DCBX Status 0x0 = DCBX Disabled
448 * DCBX Status 0x1 = DCBX Enabled
449 * DCBX Status 0x2 = DCBX Exchange error
452 peg_fw_state = (mb[2] & 0x00ff);
453 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
454 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
455 sfp_status = ((mb[2] & 0x0c00) >> 10);
456 htbt_counter = ((mb[2] & 0x7000) >> 12);
457 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
458 sfp_additonal_info = (mb[6] & 0x0003);
459 sfp_multirate = ((mb[6] & 0x0004) >> 2);
460 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
461 link_speed = ((mb[6] & 0x0070) >> 4);
462 dcbx_status = ((mb[6] & 0x7000) >> 12);
464 ql_log(ql_log_warn, vha, 0x5066,
465 "Peg-to-Fc Status Register:\n"
466 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
467 "nw_interface_signal_detect=0x%x"
468 "\nsfp_statis=0x%x.\n ", peg_fw_state,
469 nw_interface_link_up, nw_interface_signal_detect,
471 ql_log(ql_log_warn, vha, 0x5067,
472 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
473 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
474 htbt_counter, htbt_monitor_enable,
475 sfp_additonal_info, sfp_multirate);
476 ql_log(ql_log_warn, vha, 0x5068,
477 "sfp_tx_fault=0x%x, link_state=0x%x, "
478 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
481 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
484 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
485 ql_log(ql_log_warn, vha, 0x5069,
486 "Heartbeat Failure encountered, chip reset "
489 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
493 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
494 ql_log(ql_log_info, vha, 0x506a,
495 "IDC Device-State changed = 0x%x.\n", mb[4]);
496 qla83xx_schedule_work(vha, MBA_IDC_AEN);
501 * qla2x00_async_event() - Process aynchronous events.
502 * @ha: SCSI driver HA context
503 * @mb: Mailbox registers (0 - 3)
506 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
511 struct qla_hw_data *ha = vha->hw;
512 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
513 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
514 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
515 uint32_t rscn_entry, host_pid;
518 /* Setup to process RIO completion. */
520 if (IS_CNA_CAPABLE(ha))
523 case MBA_SCSI_COMPLETION:
524 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
527 case MBA_CMPLT_1_16BIT:
530 mb[0] = MBA_SCSI_COMPLETION;
532 case MBA_CMPLT_2_16BIT:
536 mb[0] = MBA_SCSI_COMPLETION;
538 case MBA_CMPLT_3_16BIT:
543 mb[0] = MBA_SCSI_COMPLETION;
545 case MBA_CMPLT_4_16BIT:
549 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
551 mb[0] = MBA_SCSI_COMPLETION;
553 case MBA_CMPLT_5_16BIT:
557 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
558 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
560 mb[0] = MBA_SCSI_COMPLETION;
562 case MBA_CMPLT_2_32BIT:
563 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
564 handles[1] = le32_to_cpu(
565 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
566 RD_MAILBOX_REG(ha, reg, 6));
568 mb[0] = MBA_SCSI_COMPLETION;
575 case MBA_SCSI_COMPLETION: /* Fast Post */
576 if (!vha->flags.online)
579 for (cnt = 0; cnt < handle_cnt; cnt++)
580 qla2x00_process_completed_request(vha, rsp->req,
584 case MBA_RESET: /* Reset */
585 ql_dbg(ql_dbg_async, vha, 0x5002,
586 "Asynchronous RESET.\n");
588 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
591 case MBA_SYSTEM_ERR: /* System Error */
592 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
593 RD_REG_WORD(®24->mailbox7) : 0;
594 ql_log(ql_log_warn, vha, 0x5003,
595 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
596 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
598 ha->isp_ops->fw_dump(vha, 1);
600 if (IS_FWI2_CAPABLE(ha)) {
601 if (mb[1] == 0 && mb[2] == 0) {
602 ql_log(ql_log_fatal, vha, 0x5004,
603 "Unrecoverable Hardware Error: adapter "
604 "marked OFFLINE!\n");
605 vha->flags.online = 0;
606 vha->device_flags |= DFLG_DEV_FAILED;
608 /* Check to see if MPI timeout occurred */
609 if ((mbx & MBX_3) && (ha->flags.port0))
610 set_bit(MPI_RESET_NEEDED,
613 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
615 } else if (mb[1] == 0) {
616 ql_log(ql_log_fatal, vha, 0x5005,
617 "Unrecoverable Hardware Error: adapter marked "
619 vha->flags.online = 0;
620 vha->device_flags |= DFLG_DEV_FAILED;
622 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
625 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
626 ql_log(ql_log_warn, vha, 0x5006,
627 "ISP Request Transfer Error (%x).\n", mb[1]);
629 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
632 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
633 ql_log(ql_log_warn, vha, 0x5007,
634 "ISP Response Transfer Error.\n");
636 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
639 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
640 ql_dbg(ql_dbg_async, vha, 0x5008,
641 "Asynchronous WAKEUP_THRES.\n");
644 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
645 ql_dbg(ql_dbg_async, vha, 0x5009,
646 "LIP occurred (%x).\n", mb[1]);
648 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
649 atomic_set(&vha->loop_state, LOOP_DOWN);
650 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
651 qla2x00_mark_all_devices_lost(vha, 1);
655 atomic_set(&vha->vp_state, VP_FAILED);
656 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
659 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
660 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
662 vha->flags.management_server_logged_in = 0;
663 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
666 case MBA_LOOP_UP: /* Loop Up Event */
667 if (IS_QLA2100(ha) || IS_QLA2200(ha))
668 ha->link_data_rate = PORT_SPEED_1GB;
670 ha->link_data_rate = mb[1];
672 ql_dbg(ql_dbg_async, vha, 0x500a,
673 "LOOP UP detected (%s Gbps).\n",
674 qla2x00_get_link_speed_str(ha));
676 vha->flags.management_server_logged_in = 0;
677 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
680 case MBA_LOOP_DOWN: /* Loop Down Event */
681 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
682 ? RD_REG_WORD(®24->mailbox4) : 0;
683 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx;
684 ql_dbg(ql_dbg_async, vha, 0x500b,
685 "LOOP DOWN detected (%x %x %x %x).\n",
686 mb[1], mb[2], mb[3], mbx);
688 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
689 atomic_set(&vha->loop_state, LOOP_DOWN);
690 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
691 vha->device_flags |= DFLG_NO_CABLE;
692 qla2x00_mark_all_devices_lost(vha, 1);
696 atomic_set(&vha->vp_state, VP_FAILED);
697 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
700 vha->flags.management_server_logged_in = 0;
701 ha->link_data_rate = PORT_SPEED_UNKNOWN;
702 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
705 case MBA_LIP_RESET: /* LIP reset occurred */
706 ql_dbg(ql_dbg_async, vha, 0x500c,
707 "LIP reset occurred (%x).\n", mb[1]);
709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
710 atomic_set(&vha->loop_state, LOOP_DOWN);
711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
712 qla2x00_mark_all_devices_lost(vha, 1);
716 atomic_set(&vha->vp_state, VP_FAILED);
717 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
720 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
722 ha->operating_mode = LOOP;
723 vha->flags.management_server_logged_in = 0;
724 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
727 /* case MBA_DCBX_COMPLETE: */
728 case MBA_POINT_TO_POINT: /* Point-to-Point */
732 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
733 ql_dbg(ql_dbg_async, vha, 0x500d,
734 "DCBX Completed -- %04x %04x %04x.\n",
735 mb[1], mb[2], mb[3]);
736 if (ha->notify_dcbx_comp)
737 complete(&ha->dcbx_comp);
740 ql_dbg(ql_dbg_async, vha, 0x500e,
741 "Asynchronous P2P MODE received.\n");
744 * Until there's a transition from loop down to loop up, treat
745 * this as loop down only.
747 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
748 atomic_set(&vha->loop_state, LOOP_DOWN);
749 if (!atomic_read(&vha->loop_down_timer))
750 atomic_set(&vha->loop_down_timer,
752 qla2x00_mark_all_devices_lost(vha, 1);
756 atomic_set(&vha->vp_state, VP_FAILED);
757 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
760 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
761 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
763 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
764 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
766 ha->flags.gpsc_supported = 1;
767 vha->flags.management_server_logged_in = 0;
770 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
774 ql_dbg(ql_dbg_async, vha, 0x500f,
775 "Configuration change detected: value=%x.\n", mb[1]);
777 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
778 atomic_set(&vha->loop_state, LOOP_DOWN);
779 if (!atomic_read(&vha->loop_down_timer))
780 atomic_set(&vha->loop_down_timer,
782 qla2x00_mark_all_devices_lost(vha, 1);
786 atomic_set(&vha->vp_state, VP_FAILED);
787 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
790 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
791 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
794 case MBA_PORT_UPDATE: /* Port database update */
796 * Handle only global and vn-port update events
799 * mb[1] = N_Port handle of changed port
800 * OR 0xffff for global event
801 * mb[2] = New login state
802 * 7 = Port logged out
803 * mb[3] = LSB is vp_idx, 0xff = all vps
805 * Skip processing if:
806 * Event is global, vp_idx is NOT all vps,
807 * vp_idx does not match
808 * Event is not global, vp_idx does not match
810 if (IS_QLA2XXX_MIDTYPE(ha) &&
811 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
812 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
815 /* Global event -- port logout or port unavailable. */
816 if (mb[1] == 0xffff && mb[2] == 0x7) {
817 ql_dbg(ql_dbg_async, vha, 0x5010,
818 "Port unavailable %04x %04x %04x.\n",
819 mb[1], mb[2], mb[3]);
820 ql_log(ql_log_warn, vha, 0x505e,
821 "Link is offline.\n");
823 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
824 atomic_set(&vha->loop_state, LOOP_DOWN);
825 atomic_set(&vha->loop_down_timer,
827 vha->device_flags |= DFLG_NO_CABLE;
828 qla2x00_mark_all_devices_lost(vha, 1);
832 atomic_set(&vha->vp_state, VP_FAILED);
833 fc_vport_set_state(vha->fc_vport,
835 qla2x00_mark_all_devices_lost(vha, 1);
838 vha->flags.management_server_logged_in = 0;
839 ha->link_data_rate = PORT_SPEED_UNKNOWN;
844 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
845 * event etc. earlier indicating loop is down) then process
846 * it. Otherwise ignore it and Wait for RSCN to come in.
848 atomic_set(&vha->loop_down_timer, 0);
849 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
850 ql_dbg(ql_dbg_async, vha, 0x5011,
851 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
852 mb[1], mb[2], mb[3]);
854 qlt_async_event(mb[0], vha, mb);
858 ql_dbg(ql_dbg_async, vha, 0x5012,
859 "Port database changed %04x %04x %04x.\n",
860 mb[1], mb[2], mb[3]);
861 ql_log(ql_log_warn, vha, 0x505f,
862 "Link is operational (%s Gbps).\n",
863 qla2x00_get_link_speed_str(ha));
866 * Mark all devices as missing so we will login again.
868 atomic_set(&vha->loop_state, LOOP_UP);
870 qla2x00_mark_all_devices_lost(vha, 1);
872 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
873 set_bit(SCR_PENDING, &vha->dpc_flags);
875 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
876 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
878 qlt_async_event(mb[0], vha, mb);
881 case MBA_RSCN_UPDATE: /* State Change Registration */
882 /* Check if the Vport has issued a SCR */
883 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
885 /* Only handle SCNs for our Vport index. */
886 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
889 ql_dbg(ql_dbg_async, vha, 0x5013,
890 "RSCN database changed -- %04x %04x %04x.\n",
891 mb[1], mb[2], mb[3]);
893 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
894 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
896 if (rscn_entry == host_pid) {
897 ql_dbg(ql_dbg_async, vha, 0x5014,
898 "Ignoring RSCN update to local host "
899 "port ID (%06x).\n", host_pid);
903 /* Ignore reserved bits from RSCN-payload. */
904 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
906 atomic_set(&vha->loop_down_timer, 0);
907 vha->flags.management_server_logged_in = 0;
909 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
910 set_bit(RSCN_UPDATE, &vha->dpc_flags);
911 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
914 /* case MBA_RIO_RESPONSE: */
915 case MBA_ZIO_RESPONSE:
916 ql_dbg(ql_dbg_async, vha, 0x5015,
917 "[R|Z]IO update completion.\n");
919 if (IS_FWI2_CAPABLE(ha))
920 qla24xx_process_response_queue(vha, rsp);
922 qla2x00_process_response_queue(rsp);
925 case MBA_DISCARD_RND_FRAME:
926 ql_dbg(ql_dbg_async, vha, 0x5016,
927 "Discard RND Frame -- %04x %04x %04x.\n",
928 mb[1], mb[2], mb[3]);
931 case MBA_TRACE_NOTIFICATION:
932 ql_dbg(ql_dbg_async, vha, 0x5017,
933 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
936 case MBA_ISP84XX_ALERT:
937 ql_dbg(ql_dbg_async, vha, 0x5018,
938 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
939 mb[1], mb[2], mb[3]);
941 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
943 case A84_PANIC_RECOVERY:
944 ql_log(ql_log_info, vha, 0x5019,
945 "Alert 84XX: panic recovery %04x %04x.\n",
948 case A84_OP_LOGIN_COMPLETE:
949 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
950 ql_log(ql_log_info, vha, 0x501a,
951 "Alert 84XX: firmware version %x.\n",
952 ha->cs84xx->op_fw_version);
954 case A84_DIAG_LOGIN_COMPLETE:
955 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
956 ql_log(ql_log_info, vha, 0x501b,
957 "Alert 84XX: diagnostic firmware version %x.\n",
958 ha->cs84xx->diag_fw_version);
960 case A84_GOLD_LOGIN_COMPLETE:
961 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
962 ha->cs84xx->fw_update = 1;
963 ql_log(ql_log_info, vha, 0x501c,
964 "Alert 84XX: gold firmware version %x.\n",
965 ha->cs84xx->gold_fw_version);
968 ql_log(ql_log_warn, vha, 0x501d,
969 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
970 mb[1], mb[2], mb[3]);
972 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
975 ql_dbg(ql_dbg_async, vha, 0x501e,
976 "DCBX Started -- %04x %04x %04x.\n",
977 mb[1], mb[2], mb[3]);
979 case MBA_DCBX_PARAM_UPDATE:
980 ql_dbg(ql_dbg_async, vha, 0x501f,
981 "DCBX Parameters Updated -- %04x %04x %04x.\n",
982 mb[1], mb[2], mb[3]);
984 case MBA_FCF_CONF_ERR:
985 ql_dbg(ql_dbg_async, vha, 0x5020,
986 "FCF Configuration Error -- %04x %04x %04x.\n",
987 mb[1], mb[2], mb[3]);
990 /* See if we need to quiesce any I/O */
991 if (IS_QLA8031(vha->hw))
992 if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
993 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
994 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
995 qla2xxx_wake_dpc(vha);
997 case MBA_IDC_COMPLETE:
998 case MBA_IDC_TIME_EXT:
999 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1000 qla81xx_idc_event(vha, mb[0], mb[1]);
1004 mb[4] = RD_REG_WORD(®24->mailbox4);
1005 mb[5] = RD_REG_WORD(®24->mailbox5);
1006 mb[6] = RD_REG_WORD(®24->mailbox6);
1007 mb[7] = RD_REG_WORD(®24->mailbox7);
1008 qla83xx_handle_8200_aen(vha, mb);
1012 ql_dbg(ql_dbg_async, vha, 0x5057,
1013 "Unknown AEN:%04x %04x %04x %04x\n",
1014 mb[0], mb[1], mb[2], mb[3]);
1017 qlt_async_event(mb[0], vha, mb);
1019 if (!vha->vp_idx && ha->num_vhosts)
1020 qla2x00_alert_all_vps(rsp, mb);
1024 * qla2x00_process_completed_request() - Process a Fast Post response.
1025 * @ha: SCSI driver HA context
1029 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1030 struct req_que *req, uint32_t index)
1033 struct qla_hw_data *ha = vha->hw;
1035 /* Validate handle. */
1036 if (index >= MAX_OUTSTANDING_COMMANDS) {
1037 ql_log(ql_log_warn, vha, 0x3014,
1038 "Invalid SCSI command index (%x).\n", index);
1041 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1047 sp = req->outstanding_cmds[index];
1049 /* Free outstanding command slot. */
1050 req->outstanding_cmds[index] = NULL;
1052 /* Save ISP completion status */
1053 sp->done(ha, sp, DID_OK << 16);
1055 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1058 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1060 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1065 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1066 struct req_que *req, void *iocb)
1068 struct qla_hw_data *ha = vha->hw;
1069 sts_entry_t *pkt = iocb;
1073 index = LSW(pkt->handle);
1074 if (index >= MAX_OUTSTANDING_COMMANDS) {
1075 ql_log(ql_log_warn, vha, 0x5031,
1076 "Invalid command index (%x).\n", index);
1078 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1080 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1083 sp = req->outstanding_cmds[index];
1085 ql_log(ql_log_warn, vha, 0x5032,
1086 "Invalid completion handle (%x) -- timed-out.\n", index);
1089 if (sp->handle != index) {
1090 ql_log(ql_log_warn, vha, 0x5033,
1091 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1095 req->outstanding_cmds[index] = NULL;
1102 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1103 struct mbx_entry *mbx)
1105 const char func[] = "MBX-IOCB";
1109 struct srb_iocb *lio;
1113 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1117 lio = &sp->u.iocb_cmd;
1119 fcport = sp->fcport;
1120 data = lio->u.logio.data;
1122 data[0] = MBS_COMMAND_ERROR;
1123 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1124 QLA_LOGIO_LOGIN_RETRIED : 0;
1125 if (mbx->entry_status) {
1126 ql_dbg(ql_dbg_async, vha, 0x5043,
1127 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1128 "entry-status=%x status=%x state-flag=%x "
1129 "status-flags=%x.\n", type, sp->handle,
1130 fcport->d_id.b.domain, fcport->d_id.b.area,
1131 fcport->d_id.b.al_pa, mbx->entry_status,
1132 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1133 le16_to_cpu(mbx->status_flags));
1135 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1136 (uint8_t *)mbx, sizeof(*mbx));
1141 status = le16_to_cpu(mbx->status);
1142 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1143 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1145 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1146 ql_dbg(ql_dbg_async, vha, 0x5045,
1147 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1148 type, sp->handle, fcport->d_id.b.domain,
1149 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1150 le16_to_cpu(mbx->mb1));
1152 data[0] = MBS_COMMAND_COMPLETE;
1153 if (sp->type == SRB_LOGIN_CMD) {
1154 fcport->port_type = FCT_TARGET;
1155 if (le16_to_cpu(mbx->mb1) & BIT_0)
1156 fcport->port_type = FCT_INITIATOR;
1157 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1158 fcport->flags |= FCF_FCP2_DEVICE;
1163 data[0] = le16_to_cpu(mbx->mb0);
1165 case MBS_PORT_ID_USED:
1166 data[1] = le16_to_cpu(mbx->mb1);
1168 case MBS_LOOP_ID_USED:
1171 data[0] = MBS_COMMAND_ERROR;
1175 ql_log(ql_log_warn, vha, 0x5046,
1176 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1177 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1178 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1179 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1180 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1181 le16_to_cpu(mbx->mb7));
1184 sp->done(vha, sp, 0);
1188 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1189 sts_entry_t *pkt, int iocb_type)
1191 const char func[] = "CT_IOCB";
1194 struct fc_bsg_job *bsg_job;
1195 uint16_t comp_status;
1198 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1202 bsg_job = sp->u.bsg_job;
1204 type = "ct pass-through";
1206 comp_status = le16_to_cpu(pkt->comp_status);
1208 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1209 * fc payload to the caller
1211 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1212 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1214 if (comp_status != CS_COMPLETE) {
1215 if (comp_status == CS_DATA_UNDERRUN) {
1217 bsg_job->reply->reply_payload_rcv_len =
1218 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1220 ql_log(ql_log_warn, vha, 0x5048,
1221 "CT pass-through-%s error "
1222 "comp_status-status=0x%x total_byte = 0x%x.\n",
1224 bsg_job->reply->reply_payload_rcv_len);
1226 ql_log(ql_log_warn, vha, 0x5049,
1227 "CT pass-through-%s error "
1228 "comp_status-status=0x%x.\n", type, comp_status);
1229 res = DID_ERROR << 16;
1230 bsg_job->reply->reply_payload_rcv_len = 0;
1232 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1233 (uint8_t *)pkt, sizeof(*pkt));
1236 bsg_job->reply->reply_payload_rcv_len =
1237 bsg_job->reply_payload.payload_len;
1238 bsg_job->reply_len = 0;
1241 sp->done(vha, sp, res);
1245 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1246 struct sts_entry_24xx *pkt, int iocb_type)
1248 const char func[] = "ELS_CT_IOCB";
1251 struct fc_bsg_job *bsg_job;
1252 uint16_t comp_status;
1253 uint32_t fw_status[3];
1254 uint8_t* fw_sts_ptr;
1257 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1260 bsg_job = sp->u.bsg_job;
1264 case SRB_ELS_CMD_RPT:
1265 case SRB_ELS_CMD_HST:
1269 type = "ct pass-through";
1272 ql_dbg(ql_dbg_user, vha, 0x503e,
1273 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1277 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1278 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1279 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1281 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1282 * fc payload to the caller
1284 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1285 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1287 if (comp_status != CS_COMPLETE) {
1288 if (comp_status == CS_DATA_UNDERRUN) {
1290 bsg_job->reply->reply_payload_rcv_len =
1291 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1293 ql_dbg(ql_dbg_user, vha, 0x503f,
1294 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1295 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1296 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1297 le16_to_cpu(((struct els_sts_entry_24xx *)
1298 pkt)->total_byte_count));
1299 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1300 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1303 ql_dbg(ql_dbg_user, vha, 0x5040,
1304 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1305 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1306 type, sp->handle, comp_status,
1307 le16_to_cpu(((struct els_sts_entry_24xx *)
1308 pkt)->error_subcode_1),
1309 le16_to_cpu(((struct els_sts_entry_24xx *)
1310 pkt)->error_subcode_2));
1311 res = DID_ERROR << 16;
1312 bsg_job->reply->reply_payload_rcv_len = 0;
1313 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1314 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1316 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1317 (uint8_t *)pkt, sizeof(*pkt));
1321 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1322 bsg_job->reply_len = 0;
1325 sp->done(vha, sp, res);
1329 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1330 struct logio_entry_24xx *logio)
1332 const char func[] = "LOGIO-IOCB";
1336 struct srb_iocb *lio;
1340 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1344 lio = &sp->u.iocb_cmd;
1346 fcport = sp->fcport;
1347 data = lio->u.logio.data;
1349 data[0] = MBS_COMMAND_ERROR;
1350 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1351 QLA_LOGIO_LOGIN_RETRIED : 0;
1352 if (logio->entry_status) {
1353 ql_log(ql_log_warn, fcport->vha, 0x5034,
1354 "Async-%s error entry - hdl=%x"
1355 "portid=%02x%02x%02x entry-status=%x.\n",
1356 type, sp->handle, fcport->d_id.b.domain,
1357 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1358 logio->entry_status);
1359 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1360 (uint8_t *)logio, sizeof(*logio));
1365 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1366 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1367 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1368 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1369 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1370 le32_to_cpu(logio->io_parameter[0]));
1372 data[0] = MBS_COMMAND_COMPLETE;
1373 if (sp->type != SRB_LOGIN_CMD)
1376 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1377 if (iop[0] & BIT_4) {
1378 fcport->port_type = FCT_TARGET;
1380 fcport->flags |= FCF_FCP2_DEVICE;
1381 } else if (iop[0] & BIT_5)
1382 fcport->port_type = FCT_INITIATOR;
1385 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1387 if (logio->io_parameter[7] || logio->io_parameter[8])
1388 fcport->supported_classes |= FC_COS_CLASS2;
1389 if (logio->io_parameter[9] || logio->io_parameter[10])
1390 fcport->supported_classes |= FC_COS_CLASS3;
1395 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1396 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1398 case LSC_SCODE_PORTID_USED:
1399 data[0] = MBS_PORT_ID_USED;
1400 data[1] = LSW(iop[1]);
1402 case LSC_SCODE_NPORT_USED:
1403 data[0] = MBS_LOOP_ID_USED;
1406 data[0] = MBS_COMMAND_ERROR;
1410 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1411 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1412 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1413 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1414 le16_to_cpu(logio->comp_status),
1415 le32_to_cpu(logio->io_parameter[0]),
1416 le32_to_cpu(logio->io_parameter[1]));
1419 sp->done(vha, sp, 0);
1423 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1424 struct tsk_mgmt_entry *tsk)
1426 const char func[] = "TMF-IOCB";
1430 struct srb_iocb *iocb;
1431 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1434 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1438 iocb = &sp->u.iocb_cmd;
1440 fcport = sp->fcport;
1442 if (sts->entry_status) {
1443 ql_log(ql_log_warn, fcport->vha, 0x5038,
1444 "Async-%s error - hdl=%x entry-status(%x).\n",
1445 type, sp->handle, sts->entry_status);
1446 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1447 ql_log(ql_log_warn, fcport->vha, 0x5039,
1448 "Async-%s error - hdl=%x completion status(%x).\n",
1449 type, sp->handle, sts->comp_status);
1450 } else if (!(le16_to_cpu(sts->scsi_status) &
1451 SS_RESPONSE_INFO_LEN_VALID)) {
1452 ql_log(ql_log_warn, fcport->vha, 0x503a,
1453 "Async-%s error - hdl=%x no response info(%x).\n",
1454 type, sp->handle, sts->scsi_status);
1455 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1456 ql_log(ql_log_warn, fcport->vha, 0x503b,
1457 "Async-%s error - hdl=%x not enough response(%d).\n",
1458 type, sp->handle, sts->rsp_data_len);
1459 } else if (sts->data[3]) {
1460 ql_log(ql_log_warn, fcport->vha, 0x503c,
1461 "Async-%s error - hdl=%x response(%x).\n",
1462 type, sp->handle, sts->data[3]);
1468 iocb->u.tmf.data = error;
1469 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1470 (uint8_t *)sts, sizeof(*sts));
1473 sp->done(vha, sp, 0);
1477 * qla2x00_process_response_queue() - Process response queue entries.
1478 * @ha: SCSI driver HA context
1481 qla2x00_process_response_queue(struct rsp_que *rsp)
1483 struct scsi_qla_host *vha;
1484 struct qla_hw_data *ha = rsp->hw;
1485 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1487 uint16_t handle_cnt;
1490 vha = pci_get_drvdata(ha->pdev);
1492 if (!vha->flags.online)
1495 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1496 pkt = (sts_entry_t *)rsp->ring_ptr;
1499 if (rsp->ring_index == rsp->length) {
1500 rsp->ring_index = 0;
1501 rsp->ring_ptr = rsp->ring;
1506 if (pkt->entry_status != 0) {
1507 qla2x00_error_entry(vha, rsp, pkt);
1508 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1513 switch (pkt->entry_type) {
1515 qla2x00_status_entry(vha, rsp, pkt);
1517 case STATUS_TYPE_21:
1518 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1519 for (cnt = 0; cnt < handle_cnt; cnt++) {
1520 qla2x00_process_completed_request(vha, rsp->req,
1521 ((sts21_entry_t *)pkt)->handle[cnt]);
1524 case STATUS_TYPE_22:
1525 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1526 for (cnt = 0; cnt < handle_cnt; cnt++) {
1527 qla2x00_process_completed_request(vha, rsp->req,
1528 ((sts22_entry_t *)pkt)->handle[cnt]);
1531 case STATUS_CONT_TYPE:
1532 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1535 qla2x00_mbx_iocb_entry(vha, rsp->req,
1536 (struct mbx_entry *)pkt);
1539 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1542 /* Type Not Supported. */
1543 ql_log(ql_log_warn, vha, 0x504a,
1544 "Received unknown response pkt type %x "
1545 "entry status=%x.\n",
1546 pkt->entry_type, pkt->entry_status);
1549 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1553 /* Adjust ring index */
1554 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1558 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1559 uint32_t sense_len, struct rsp_que *rsp, int res)
1561 struct scsi_qla_host *vha = sp->fcport->vha;
1562 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1563 uint32_t track_sense_len;
1565 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1566 sense_len = SCSI_SENSE_BUFFERSIZE;
1568 SET_CMD_SENSE_LEN(sp, sense_len);
1569 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1570 track_sense_len = sense_len;
1572 if (sense_len > par_sense_len)
1573 sense_len = par_sense_len;
1575 memcpy(cp->sense_buffer, sense_data, sense_len);
1577 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1578 track_sense_len -= sense_len;
1579 SET_CMD_SENSE_LEN(sp, track_sense_len);
1581 if (track_sense_len != 0) {
1582 rsp->status_srb = sp;
1587 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1588 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1589 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1591 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1592 cp->sense_buffer, sense_len);
1596 struct scsi_dif_tuple {
1597 __be16 guard; /* Checksum */
1598 __be16 app_tag; /* APPL identifier */
1599 __be32 ref_tag; /* Target LBA or indirect LBA */
1603 * Checks the guard or meta-data for the type of error
1604 * detected by the HBA. In case of errors, we set the
1605 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1606 * to indicate to the kernel that the HBA detected error.
1609 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1611 struct scsi_qla_host *vha = sp->fcport->vha;
1612 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1613 uint8_t *ap = &sts24->data[12];
1614 uint8_t *ep = &sts24->data[20];
1615 uint32_t e_ref_tag, a_ref_tag;
1616 uint16_t e_app_tag, a_app_tag;
1617 uint16_t e_guard, a_guard;
1620 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1621 * would make guard field appear at offset 2
1623 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1624 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1625 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1626 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1627 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1628 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1630 ql_dbg(ql_dbg_io, vha, 0x3023,
1631 "iocb(s) %p Returned STATUS.\n", sts24);
1633 ql_dbg(ql_dbg_io, vha, 0x3024,
1634 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1635 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1636 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1637 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1638 a_app_tag, e_app_tag, a_guard, e_guard);
1642 * For type 3: ref & app tag is all 'f's
1643 * For type 0,1,2: app tag is all 'f's
1645 if ((a_app_tag == 0xffff) &&
1646 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1647 (a_ref_tag == 0xffffffff))) {
1648 uint32_t blocks_done, resid;
1649 sector_t lba_s = scsi_get_lba(cmd);
1651 /* 2TB boundary case covered automatically with this */
1652 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1654 resid = scsi_bufflen(cmd) - (blocks_done *
1655 cmd->device->sector_size);
1657 scsi_set_resid(cmd, resid);
1658 cmd->result = DID_OK << 16;
1660 /* Update protection tag */
1661 if (scsi_prot_sg_count(cmd)) {
1662 uint32_t i, j = 0, k = 0, num_ent;
1663 struct scatterlist *sg;
1664 struct sd_dif_tuple *spt;
1666 /* Patch the corresponding protection tags */
1667 scsi_for_each_prot_sg(cmd, sg,
1668 scsi_prot_sg_count(cmd), i) {
1669 num_ent = sg_dma_len(sg) / 8;
1670 if (k + num_ent < blocks_done) {
1674 j = blocks_done - k - 1;
1679 if (k != blocks_done) {
1680 ql_log(ql_log_warn, vha, 0x302f,
1681 "unexpected tag values tag:lba=%x:%llx)\n",
1682 e_ref_tag, (unsigned long long)lba_s);
1686 spt = page_address(sg_page(sg)) + sg->offset;
1689 spt->app_tag = 0xffff;
1690 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1691 spt->ref_tag = 0xffffffff;
1698 if (e_guard != a_guard) {
1699 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1701 set_driver_byte(cmd, DRIVER_SENSE);
1702 set_host_byte(cmd, DID_ABORT);
1703 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1708 if (e_ref_tag != a_ref_tag) {
1709 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1711 set_driver_byte(cmd, DRIVER_SENSE);
1712 set_host_byte(cmd, DID_ABORT);
1713 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1717 /* check appl tag */
1718 if (e_app_tag != a_app_tag) {
1719 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1721 set_driver_byte(cmd, DRIVER_SENSE);
1722 set_host_byte(cmd, DID_ABORT);
1723 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1731 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1732 struct req_que *req, uint32_t index)
1734 struct qla_hw_data *ha = vha->hw;
1736 uint16_t comp_status;
1737 uint16_t scsi_status;
1739 uint32_t rval = EXT_STATUS_OK;
1740 struct fc_bsg_job *bsg_job = NULL;
1742 struct sts_entry_24xx *sts24;
1743 sts = (sts_entry_t *) pkt;
1744 sts24 = (struct sts_entry_24xx *) pkt;
1746 /* Validate handle. */
1747 if (index >= MAX_OUTSTANDING_COMMANDS) {
1748 ql_log(ql_log_warn, vha, 0x70af,
1749 "Invalid SCSI completion handle 0x%x.\n", index);
1750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1754 sp = req->outstanding_cmds[index];
1756 /* Free outstanding command slot. */
1757 req->outstanding_cmds[index] = NULL;
1758 bsg_job = sp->u.bsg_job;
1760 ql_log(ql_log_warn, vha, 0x70b0,
1761 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1768 if (IS_FWI2_CAPABLE(ha)) {
1769 comp_status = le16_to_cpu(sts24->comp_status);
1770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1772 comp_status = le16_to_cpu(sts->comp_status);
1773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1776 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1777 switch (comp_status) {
1779 if (scsi_status == 0) {
1780 bsg_job->reply->reply_payload_rcv_len =
1781 bsg_job->reply_payload.payload_len;
1782 rval = EXT_STATUS_OK;
1786 case CS_DATA_OVERRUN:
1787 ql_dbg(ql_dbg_user, vha, 0x70b1,
1788 "Command completed with date overrun thread_id=%d\n",
1790 rval = EXT_STATUS_DATA_OVERRUN;
1793 case CS_DATA_UNDERRUN:
1794 ql_dbg(ql_dbg_user, vha, 0x70b2,
1795 "Command completed with date underrun thread_id=%d\n",
1797 rval = EXT_STATUS_DATA_UNDERRUN;
1799 case CS_BIDIR_RD_OVERRUN:
1800 ql_dbg(ql_dbg_user, vha, 0x70b3,
1801 "Command completed with read data overrun thread_id=%d\n",
1803 rval = EXT_STATUS_DATA_OVERRUN;
1806 case CS_BIDIR_RD_WR_OVERRUN:
1807 ql_dbg(ql_dbg_user, vha, 0x70b4,
1808 "Command completed with read and write data overrun "
1809 "thread_id=%d\n", thread_id);
1810 rval = EXT_STATUS_DATA_OVERRUN;
1813 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1814 ql_dbg(ql_dbg_user, vha, 0x70b5,
1815 "Command completed with read data over and write data "
1816 "underrun thread_id=%d\n", thread_id);
1817 rval = EXT_STATUS_DATA_OVERRUN;
1820 case CS_BIDIR_RD_UNDERRUN:
1821 ql_dbg(ql_dbg_user, vha, 0x70b6,
1822 "Command completed with read data data underrun "
1823 "thread_id=%d\n", thread_id);
1824 rval = EXT_STATUS_DATA_UNDERRUN;
1827 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1828 ql_dbg(ql_dbg_user, vha, 0x70b7,
1829 "Command completed with read data under and write data "
1830 "overrun thread_id=%d\n", thread_id);
1831 rval = EXT_STATUS_DATA_UNDERRUN;
1834 case CS_BIDIR_RD_WR_UNDERRUN:
1835 ql_dbg(ql_dbg_user, vha, 0x70b8,
1836 "Command completed with read and write data underrun "
1837 "thread_id=%d\n", thread_id);
1838 rval = EXT_STATUS_DATA_UNDERRUN;
1842 ql_dbg(ql_dbg_user, vha, 0x70b9,
1843 "Command completed with data DMA error thread_id=%d\n",
1845 rval = EXT_STATUS_DMA_ERR;
1849 ql_dbg(ql_dbg_user, vha, 0x70ba,
1850 "Command completed with timeout thread_id=%d\n",
1852 rval = EXT_STATUS_TIMEOUT;
1855 ql_dbg(ql_dbg_user, vha, 0x70bb,
1856 "Command completed with completion status=0x%x "
1857 "thread_id=%d\n", comp_status, thread_id);
1858 rval = EXT_STATUS_ERR;
1861 bsg_job->reply->reply_payload_rcv_len = 0;
1864 /* Return the vendor specific reply to API */
1865 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1866 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1867 /* Always return DID_OK, bsg will send the vendor specific response
1868 * in this case only */
1869 sp->done(vha, sp, (DID_OK << 6));
1874 * qla2x00_status_entry() - Process a Status IOCB entry.
1875 * @ha: SCSI driver HA context
1876 * @pkt: Entry pointer
1879 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1883 struct scsi_cmnd *cp;
1885 struct sts_entry_24xx *sts24;
1886 uint16_t comp_status;
1887 uint16_t scsi_status;
1889 uint8_t lscsi_status;
1891 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1893 uint8_t *rsp_info, *sense_data;
1894 struct qla_hw_data *ha = vha->hw;
1897 struct req_que *req;
1900 uint16_t state_flags = 0;
1902 sts = (sts_entry_t *) pkt;
1903 sts24 = (struct sts_entry_24xx *) pkt;
1904 if (IS_FWI2_CAPABLE(ha)) {
1905 comp_status = le16_to_cpu(sts24->comp_status);
1906 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1907 state_flags = le16_to_cpu(sts24->state_flags);
1909 comp_status = le16_to_cpu(sts->comp_status);
1910 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1912 handle = (uint32_t) LSW(sts->handle);
1913 que = MSW(sts->handle);
1914 req = ha->req_q_map[que];
1916 /* Validate handle. */
1917 if (handle < MAX_OUTSTANDING_COMMANDS) {
1918 sp = req->outstanding_cmds[handle];
1923 ql_dbg(ql_dbg_io, vha, 0x3017,
1924 "Invalid status handle (0x%x).\n", sts->handle);
1927 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1929 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1930 qla2xxx_wake_dpc(vha);
1934 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1935 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1939 /* Fast path completion. */
1940 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1941 qla2x00_process_completed_request(vha, req, handle);
1946 req->outstanding_cmds[handle] = NULL;
1947 cp = GET_CMD_SP(sp);
1949 ql_dbg(ql_dbg_io, vha, 0x3018,
1950 "Command already returned (0x%x/%p).\n",
1956 lscsi_status = scsi_status & STATUS_MASK;
1958 fcport = sp->fcport;
1961 sense_len = par_sense_len = rsp_info_len = resid_len =
1963 if (IS_FWI2_CAPABLE(ha)) {
1964 if (scsi_status & SS_SENSE_LEN_VALID)
1965 sense_len = le32_to_cpu(sts24->sense_len);
1966 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1967 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1968 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1969 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1970 if (comp_status == CS_DATA_UNDERRUN)
1971 fw_resid_len = le32_to_cpu(sts24->residual_len);
1972 rsp_info = sts24->data;
1973 sense_data = sts24->data;
1974 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1975 ox_id = le16_to_cpu(sts24->ox_id);
1976 par_sense_len = sizeof(sts24->data);
1978 if (scsi_status & SS_SENSE_LEN_VALID)
1979 sense_len = le16_to_cpu(sts->req_sense_length);
1980 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1981 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1982 resid_len = le32_to_cpu(sts->residual_length);
1983 rsp_info = sts->rsp_info;
1984 sense_data = sts->req_sense_data;
1985 par_sense_len = sizeof(sts->req_sense_data);
1988 /* Check for any FCP transport errors. */
1989 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1990 /* Sense data lies beyond any FCP RESPONSE data. */
1991 if (IS_FWI2_CAPABLE(ha)) {
1992 sense_data += rsp_info_len;
1993 par_sense_len -= rsp_info_len;
1995 if (rsp_info_len > 3 && rsp_info[3]) {
1996 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
1997 "FCP I/O protocol failure (0x%x/0x%x).\n",
1998 rsp_info_len, rsp_info[3]);
2000 res = DID_BUS_BUSY << 16;
2005 /* Check for overrun. */
2006 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2007 scsi_status & SS_RESIDUAL_OVER)
2008 comp_status = CS_DATA_OVERRUN;
2011 * Based on Host and scsi status generate status code for Linux
2013 switch (comp_status) {
2016 if (scsi_status == 0) {
2020 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2022 scsi_set_resid(cp, resid);
2024 if (!lscsi_status &&
2025 ((unsigned)(scsi_bufflen(cp) - resid) <
2027 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2028 "Mid-layer underflow "
2029 "detected (0x%x of 0x%x bytes).\n",
2030 resid, scsi_bufflen(cp));
2032 res = DID_ERROR << 16;
2036 res = DID_OK << 16 | lscsi_status;
2038 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2039 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2040 "QUEUE FULL detected.\n");
2044 if (lscsi_status != SS_CHECK_CONDITION)
2047 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2048 if (!(scsi_status & SS_SENSE_LEN_VALID))
2051 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2055 case CS_DATA_UNDERRUN:
2056 /* Use F/W calculated residual length. */
2057 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2058 scsi_set_resid(cp, resid);
2059 if (scsi_status & SS_RESIDUAL_UNDER) {
2060 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2061 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2062 "Dropped frame(s) detected "
2063 "(0x%x of 0x%x bytes).\n",
2064 resid, scsi_bufflen(cp));
2066 res = DID_ERROR << 16 | lscsi_status;
2067 goto check_scsi_status;
2070 if (!lscsi_status &&
2071 ((unsigned)(scsi_bufflen(cp) - resid) <
2073 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2074 "Mid-layer underflow "
2075 "detected (0x%x of 0x%x bytes).\n",
2076 resid, scsi_bufflen(cp));
2078 res = DID_ERROR << 16;
2081 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2082 lscsi_status != SAM_STAT_BUSY) {
2084 * scsi status of task set and busy are considered to be
2085 * task not completed.
2088 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2089 "Dropped frame(s) detected (0x%x "
2090 "of 0x%x bytes).\n", resid,
2093 res = DID_ERROR << 16 | lscsi_status;
2094 goto check_scsi_status;
2096 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2097 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2098 scsi_status, lscsi_status);
2101 res = DID_OK << 16 | lscsi_status;
2106 * Check to see if SCSI Status is non zero. If so report SCSI
2109 if (lscsi_status != 0) {
2110 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2111 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2112 "QUEUE FULL detected.\n");
2116 if (lscsi_status != SS_CHECK_CONDITION)
2119 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2120 if (!(scsi_status & SS_SENSE_LEN_VALID))
2123 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2124 sense_len, rsp, res);
2128 case CS_PORT_LOGGED_OUT:
2129 case CS_PORT_CONFIG_CHG:
2132 case CS_PORT_UNAVAILABLE:
2137 * We are going to have the fc class block the rport
2138 * while we try to recover so instruct the mid layer
2139 * to requeue until the class decides how to handle this.
2141 res = DID_TRANSPORT_DISRUPTED << 16;
2143 if (comp_status == CS_TIMEOUT) {
2144 if (IS_FWI2_CAPABLE(ha))
2146 else if ((le16_to_cpu(sts->status_flags) &
2147 SF_LOGOUT_SENT) == 0)
2151 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2152 "Port down status: port-state=0x%x.\n",
2153 atomic_read(&fcport->state));
2155 if (atomic_read(&fcport->state) == FCS_ONLINE)
2156 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2160 res = DID_RESET << 16;
2164 logit = qla2x00_handle_dif_error(sp, sts24);
2169 res = DID_ERROR << 16;
2171 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2174 if (state_flags & BIT_4)
2175 scmd_printk(KERN_WARNING, cp,
2176 "Unsupported device '%s' found.\n",
2177 cp->device->vendor);
2181 res = DID_ERROR << 16;
2187 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2188 "FCP command status: 0x%x-0x%x (0x%x) "
2189 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2190 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2191 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2192 comp_status, scsi_status, res, vha->host_no,
2193 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2194 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2195 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2196 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2197 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2198 resid_len, fw_resid_len);
2200 if (rsp->status_srb == NULL)
2201 sp->done(ha, sp, res);
2205 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2206 * @ha: SCSI driver HA context
2207 * @pkt: Entry pointer
2209 * Extended sense data.
2212 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2214 uint8_t sense_sz = 0;
2215 struct qla_hw_data *ha = rsp->hw;
2216 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2217 srb_t *sp = rsp->status_srb;
2218 struct scsi_cmnd *cp;
2222 if (!sp || !GET_CMD_SENSE_LEN(sp))
2225 sense_len = GET_CMD_SENSE_LEN(sp);
2226 sense_ptr = GET_CMD_SENSE_PTR(sp);
2228 cp = GET_CMD_SP(sp);
2230 ql_log(ql_log_warn, vha, 0x3025,
2231 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2233 rsp->status_srb = NULL;
2237 if (sense_len > sizeof(pkt->data))
2238 sense_sz = sizeof(pkt->data);
2240 sense_sz = sense_len;
2242 /* Move sense data. */
2243 if (IS_FWI2_CAPABLE(ha))
2244 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2245 memcpy(sense_ptr, pkt->data, sense_sz);
2246 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2247 sense_ptr, sense_sz);
2249 sense_len -= sense_sz;
2250 sense_ptr += sense_sz;
2252 SET_CMD_SENSE_PTR(sp, sense_ptr);
2253 SET_CMD_SENSE_LEN(sp, sense_len);
2255 /* Place command on done queue. */
2256 if (sense_len == 0) {
2257 rsp->status_srb = NULL;
2258 sp->done(ha, sp, cp->result);
2263 * qla2x00_error_entry() - Process an error entry.
2264 * @ha: SCSI driver HA context
2265 * @pkt: Entry pointer
2268 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2271 struct qla_hw_data *ha = vha->hw;
2272 const char func[] = "ERROR-IOCB";
2273 uint16_t que = MSW(pkt->handle);
2274 struct req_que *req = NULL;
2275 int res = DID_ERROR << 16;
2277 ql_dbg(ql_dbg_async, vha, 0x502a,
2278 "type of error status in response: 0x%x\n", pkt->entry_status);
2280 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2283 req = ha->req_q_map[que];
2285 if (pkt->entry_status & RF_BUSY)
2286 res = DID_BUS_BUSY << 16;
2288 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2290 sp->done(ha, sp, res);
2294 ql_log(ql_log_warn, vha, 0x5030,
2295 "Error entry - invalid handle/queue.\n");
2298 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2300 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2301 qla2xxx_wake_dpc(vha);
2305 * qla24xx_mbx_completion() - Process mailbox command completions.
2306 * @ha: SCSI driver HA context
2307 * @mb0: Mailbox0 register
2310 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2314 uint16_t __iomem *wptr;
2315 struct qla_hw_data *ha = vha->hw;
2316 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2318 /* Read all mbox registers? */
2319 mboxes = (1 << ha->mbx_count) - 1;
2321 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2323 mboxes = ha->mcp->in_mb;
2325 /* Load return mailbox registers. */
2326 ha->flags.mbox_int = 1;
2327 ha->mailbox_out[0] = mb0;
2329 wptr = (uint16_t __iomem *)®->mailbox1;
2331 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2333 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2341 * qla24xx_process_response_queue() - Process response queue entries.
2342 * @ha: SCSI driver HA context
2344 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2345 struct rsp_que *rsp)
2347 struct sts_entry_24xx *pkt;
2348 struct qla_hw_data *ha = vha->hw;
2350 if (!vha->flags.online)
2353 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2354 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2357 if (rsp->ring_index == rsp->length) {
2358 rsp->ring_index = 0;
2359 rsp->ring_ptr = rsp->ring;
2364 if (pkt->entry_status != 0) {
2365 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2367 (void)qlt_24xx_process_response_error(vha, pkt);
2369 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2374 switch (pkt->entry_type) {
2376 qla2x00_status_entry(vha, rsp, pkt);
2378 case STATUS_CONT_TYPE:
2379 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2381 case VP_RPT_ID_IOCB_TYPE:
2382 qla24xx_report_id_acquisition(vha,
2383 (struct vp_rpt_id_entry_24xx *)pkt);
2385 case LOGINOUT_PORT_IOCB_TYPE:
2386 qla24xx_logio_entry(vha, rsp->req,
2387 (struct logio_entry_24xx *)pkt);
2389 case TSK_MGMT_IOCB_TYPE:
2390 qla24xx_tm_iocb_entry(vha, rsp->req,
2391 (struct tsk_mgmt_entry *)pkt);
2394 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2397 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2399 case ABTS_RECV_24XX:
2400 /* ensure that the ATIO queue is empty */
2401 qlt_24xx_process_atio_queue(vha);
2402 case ABTS_RESP_24XX:
2404 case NOTIFY_ACK_TYPE:
2405 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2408 /* Do nothing in this case, this check is to prevent it
2409 * from falling into default case
2413 /* Type Not Supported. */
2414 ql_dbg(ql_dbg_async, vha, 0x5042,
2415 "Received unknown response pkt type %x "
2416 "entry status=%x.\n",
2417 pkt->entry_type, pkt->entry_status);
2420 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2424 /* Adjust ring index */
2425 if (IS_QLA82XX(ha)) {
2426 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2427 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2429 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2433 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2437 struct qla_hw_data *ha = vha->hw;
2438 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2440 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2444 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2445 RD_REG_DWORD(®->iobase_addr);
2446 WRT_REG_DWORD(®->iobase_window, 0x0001);
2447 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2448 rval == QLA_SUCCESS; cnt--) {
2450 WRT_REG_DWORD(®->iobase_window, 0x0001);
2453 rval = QLA_FUNCTION_TIMEOUT;
2455 if (rval == QLA_SUCCESS)
2458 WRT_REG_DWORD(®->iobase_window, 0x0003);
2459 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2460 rval == QLA_SUCCESS; cnt--) {
2462 WRT_REG_DWORD(®->iobase_window, 0x0003);
2465 rval = QLA_FUNCTION_TIMEOUT;
2467 if (rval != QLA_SUCCESS)
2471 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2472 ql_log(ql_log_info, vha, 0x504c,
2473 "Additional code -- 0x55AA.\n");
2476 WRT_REG_DWORD(®->iobase_window, 0x0000);
2477 RD_REG_DWORD(®->iobase_window);
2481 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2483 * @dev_id: SCSI driver HA context
2485 * Called by system whenever the host adapter generates an interrupt.
2487 * Returns handled flag.
2490 qla24xx_intr_handler(int irq, void *dev_id)
2492 scsi_qla_host_t *vha;
2493 struct qla_hw_data *ha;
2494 struct device_reg_24xx __iomem *reg;
2500 struct rsp_que *rsp;
2501 unsigned long flags;
2503 rsp = (struct rsp_que *) dev_id;
2505 ql_log(ql_log_info, NULL, 0x5059,
2506 "%s: NULL response queue pointer.\n", __func__);
2511 reg = &ha->iobase->isp24;
2514 if (unlikely(pci_channel_offline(ha->pdev)))
2517 spin_lock_irqsave(&ha->hardware_lock, flags);
2518 vha = pci_get_drvdata(ha->pdev);
2519 for (iter = 50; iter--; ) {
2520 stat = RD_REG_DWORD(®->host_status);
2521 if (stat & HSRX_RISC_PAUSED) {
2522 if (unlikely(pci_channel_offline(ha->pdev)))
2525 hccr = RD_REG_DWORD(®->hccr);
2527 ql_log(ql_log_warn, vha, 0x504b,
2528 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2531 qla2xxx_check_risc_status(vha);
2533 ha->isp_ops->fw_dump(vha, 1);
2534 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2536 } else if ((stat & HSRX_RISC_INT) == 0)
2539 switch (stat & 0xff) {
2540 case INTR_ROM_MB_SUCCESS:
2541 case INTR_ROM_MB_FAILED:
2542 case INTR_MB_SUCCESS:
2543 case INTR_MB_FAILED:
2544 qla24xx_mbx_completion(vha, MSW(stat));
2545 status |= MBX_INTERRUPT;
2548 case INTR_ASYNC_EVENT:
2550 mb[1] = RD_REG_WORD(®->mailbox1);
2551 mb[2] = RD_REG_WORD(®->mailbox2);
2552 mb[3] = RD_REG_WORD(®->mailbox3);
2553 qla2x00_async_event(vha, rsp, mb);
2555 case INTR_RSP_QUE_UPDATE:
2556 case INTR_RSP_QUE_UPDATE_83XX:
2557 qla24xx_process_response_queue(vha, rsp);
2559 case INTR_ATIO_QUE_UPDATE:
2560 qlt_24xx_process_atio_queue(vha);
2562 case INTR_ATIO_RSP_QUE_UPDATE:
2563 qlt_24xx_process_atio_queue(vha);
2564 qla24xx_process_response_queue(vha, rsp);
2567 ql_dbg(ql_dbg_async, vha, 0x504f,
2568 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2571 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2572 RD_REG_DWORD_RELAXED(®->hccr);
2573 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2576 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2579 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2580 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2581 complete(&ha->mbx_intr_comp);
2588 qla24xx_msix_rsp_q(int irq, void *dev_id)
2590 struct qla_hw_data *ha;
2591 struct rsp_que *rsp;
2592 struct device_reg_24xx __iomem *reg;
2593 struct scsi_qla_host *vha;
2594 unsigned long flags;
2596 rsp = (struct rsp_que *) dev_id;
2598 ql_log(ql_log_info, NULL, 0x505a,
2599 "%s: NULL response queue pointer.\n", __func__);
2603 reg = &ha->iobase->isp24;
2605 spin_lock_irqsave(&ha->hardware_lock, flags);
2607 vha = pci_get_drvdata(ha->pdev);
2608 qla24xx_process_response_queue(vha, rsp);
2609 if (!ha->flags.disable_msix_handshake) {
2610 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2611 RD_REG_DWORD_RELAXED(®->hccr);
2613 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2619 qla25xx_msix_rsp_q(int irq, void *dev_id)
2621 struct qla_hw_data *ha;
2622 struct rsp_que *rsp;
2623 struct device_reg_24xx __iomem *reg;
2624 unsigned long flags;
2626 rsp = (struct rsp_que *) dev_id;
2628 ql_log(ql_log_info, NULL, 0x505b,
2629 "%s: NULL response queue pointer.\n", __func__);
2634 /* Clear the interrupt, if enabled, for this response queue */
2635 if (!ha->flags.disable_msix_handshake) {
2636 reg = &ha->iobase->isp24;
2637 spin_lock_irqsave(&ha->hardware_lock, flags);
2638 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2639 RD_REG_DWORD_RELAXED(®->hccr);
2640 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2642 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2648 qla24xx_msix_default(int irq, void *dev_id)
2650 scsi_qla_host_t *vha;
2651 struct qla_hw_data *ha;
2652 struct rsp_que *rsp;
2653 struct device_reg_24xx __iomem *reg;
2658 unsigned long flags;
2660 rsp = (struct rsp_que *) dev_id;
2662 ql_log(ql_log_info, NULL, 0x505c,
2663 "%s: NULL response queue pointer.\n", __func__);
2667 reg = &ha->iobase->isp24;
2670 spin_lock_irqsave(&ha->hardware_lock, flags);
2671 vha = pci_get_drvdata(ha->pdev);
2673 stat = RD_REG_DWORD(®->host_status);
2674 if (stat & HSRX_RISC_PAUSED) {
2675 if (unlikely(pci_channel_offline(ha->pdev)))
2678 hccr = RD_REG_DWORD(®->hccr);
2680 ql_log(ql_log_info, vha, 0x5050,
2681 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2684 qla2xxx_check_risc_status(vha);
2686 ha->isp_ops->fw_dump(vha, 1);
2687 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2689 } else if ((stat & HSRX_RISC_INT) == 0)
2692 switch (stat & 0xff) {
2693 case INTR_ROM_MB_SUCCESS:
2694 case INTR_ROM_MB_FAILED:
2695 case INTR_MB_SUCCESS:
2696 case INTR_MB_FAILED:
2697 qla24xx_mbx_completion(vha, MSW(stat));
2698 status |= MBX_INTERRUPT;
2701 case INTR_ASYNC_EVENT:
2703 mb[1] = RD_REG_WORD(®->mailbox1);
2704 mb[2] = RD_REG_WORD(®->mailbox2);
2705 mb[3] = RD_REG_WORD(®->mailbox3);
2706 qla2x00_async_event(vha, rsp, mb);
2708 case INTR_RSP_QUE_UPDATE:
2709 case INTR_RSP_QUE_UPDATE_83XX:
2710 qla24xx_process_response_queue(vha, rsp);
2712 case INTR_ATIO_QUE_UPDATE:
2713 qlt_24xx_process_atio_queue(vha);
2715 case INTR_ATIO_RSP_QUE_UPDATE:
2716 qlt_24xx_process_atio_queue(vha);
2717 qla24xx_process_response_queue(vha, rsp);
2720 ql_dbg(ql_dbg_async, vha, 0x5051,
2721 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2724 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2726 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2728 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2729 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2730 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2731 complete(&ha->mbx_intr_comp);
2736 /* Interrupt handling helpers. */
2738 struct qla_init_msix_entry {
2740 irq_handler_t handler;
2743 static struct qla_init_msix_entry msix_entries[3] = {
2744 { "qla2xxx (default)", qla24xx_msix_default },
2745 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2746 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2749 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2750 { "qla2xxx (default)", qla82xx_msix_default },
2751 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2755 qla24xx_disable_msix(struct qla_hw_data *ha)
2758 struct qla_msix_entry *qentry;
2759 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2761 for (i = 0; i < ha->msix_count; i++) {
2762 qentry = &ha->msix_entries[i];
2763 if (qentry->have_irq)
2764 free_irq(qentry->vector, qentry->rsp);
2766 pci_disable_msix(ha->pdev);
2767 kfree(ha->msix_entries);
2768 ha->msix_entries = NULL;
2769 ha->flags.msix_enabled = 0;
2770 ql_dbg(ql_dbg_init, vha, 0x0042,
2771 "Disabled the MSI.\n");
2775 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2777 #define MIN_MSIX_COUNT 2
2779 struct msix_entry *entries;
2780 struct qla_msix_entry *qentry;
2781 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2783 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2786 ql_log(ql_log_warn, vha, 0x00bc,
2787 "Failed to allocate memory for msix_entry.\n");
2791 for (i = 0; i < ha->msix_count; i++)
2792 entries[i].entry = i;
2794 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2796 if (ret < MIN_MSIX_COUNT)
2799 ql_log(ql_log_warn, vha, 0x00c6,
2800 "MSI-X: Failed to enable support "
2801 "-- %d/%d\n Retry with %d vectors.\n",
2802 ha->msix_count, ret, ret);
2803 ha->msix_count = ret;
2804 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2807 ql_log(ql_log_fatal, vha, 0x00c7,
2808 "MSI-X: Failed to enable support, "
2809 "giving up -- %d/%d.\n",
2810 ha->msix_count, ret);
2813 ha->max_rsp_queues = ha->msix_count - 1;
2815 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2816 ha->msix_count, GFP_KERNEL);
2817 if (!ha->msix_entries) {
2818 ql_log(ql_log_fatal, vha, 0x00c8,
2819 "Failed to allocate memory for ha->msix_entries.\n");
2823 ha->flags.msix_enabled = 1;
2825 for (i = 0; i < ha->msix_count; i++) {
2826 qentry = &ha->msix_entries[i];
2827 qentry->vector = entries[i].vector;
2828 qentry->entry = entries[i].entry;
2829 qentry->have_irq = 0;
2833 /* Enable MSI-X vectors for the base queue */
2834 for (i = 0; i < 2; i++) {
2835 qentry = &ha->msix_entries[i];
2836 if (IS_QLA82XX(ha)) {
2837 ret = request_irq(qentry->vector,
2838 qla82xx_msix_entries[i].handler,
2839 0, qla82xx_msix_entries[i].name, rsp);
2841 ret = request_irq(qentry->vector,
2842 msix_entries[i].handler,
2843 0, msix_entries[i].name, rsp);
2846 ql_log(ql_log_fatal, vha, 0x00cb,
2847 "MSI-X: unable to register handler -- %x/%d.\n",
2848 qentry->vector, ret);
2849 qla24xx_disable_msix(ha);
2853 qentry->have_irq = 1;
2858 /* Enable MSI-X vector for response queue update for queue 0 */
2859 if (IS_QLA83XX(ha)) {
2860 if (ha->msixbase && ha->mqiobase &&
2861 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2865 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2867 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2868 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2869 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2870 ql_dbg(ql_dbg_init, vha, 0x0055,
2871 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2872 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2880 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2883 device_reg_t __iomem *reg = ha->iobase;
2884 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2886 /* If possible, enable MSI-X. */
2887 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2888 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
2891 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2892 (ha->pdev->subsystem_device == 0x7040 ||
2893 ha->pdev->subsystem_device == 0x7041 ||
2894 ha->pdev->subsystem_device == 0x1705)) {
2895 ql_log(ql_log_warn, vha, 0x0034,
2896 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2897 ha->pdev->subsystem_vendor,
2898 ha->pdev->subsystem_device);
2902 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2903 ql_log(ql_log_warn, vha, 0x0035,
2904 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2905 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2909 ret = qla24xx_enable_msix(ha, rsp);
2911 ql_dbg(ql_dbg_init, vha, 0x0036,
2912 "MSI-X: Enabled (0x%X, 0x%X).\n",
2913 ha->chip_revision, ha->fw_attributes);
2914 goto clear_risc_ints;
2916 ql_log(ql_log_info, vha, 0x0037,
2917 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2920 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2921 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
2924 ret = pci_enable_msi(ha->pdev);
2926 ql_dbg(ql_dbg_init, vha, 0x0038,
2928 ha->flags.msi_enabled = 1;
2930 ql_log(ql_log_warn, vha, 0x0039,
2931 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2933 /* Skip INTx on ISP82xx. */
2934 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2935 return QLA_FUNCTION_FAILED;
2939 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2940 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2941 QLA2XXX_DRIVER_NAME, rsp);
2943 ql_log(ql_log_warn, vha, 0x003a,
2944 "Failed to reserve interrupt %d already in use.\n",
2951 spin_lock_irq(&ha->hardware_lock);
2952 if (!IS_FWI2_CAPABLE(ha))
2953 WRT_REG_WORD(®->isp.semaphore, 0);
2954 spin_unlock_irq(&ha->hardware_lock);
2961 qla2x00_free_irqs(scsi_qla_host_t *vha)
2963 struct qla_hw_data *ha = vha->hw;
2964 struct rsp_que *rsp;
2967 * We need to check that ha->rsp_q_map is valid in case we are called
2968 * from a probe failure context.
2970 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2972 rsp = ha->rsp_q_map[0];
2974 if (ha->flags.msix_enabled)
2975 qla24xx_disable_msix(ha);
2976 else if (ha->flags.msi_enabled) {
2977 free_irq(ha->pdev->irq, rsp);
2978 pci_disable_msi(ha->pdev);
2980 free_irq(ha->pdev->irq, rsp);
2984 int qla25xx_request_irq(struct rsp_que *rsp)
2986 struct qla_hw_data *ha = rsp->hw;
2987 struct qla_init_msix_entry *intr = &msix_entries[2];
2988 struct qla_msix_entry *msix = rsp->msix;
2989 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2992 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2994 ql_log(ql_log_fatal, vha, 0x00e6,
2995 "MSI-X: Unable to register handler -- %x/%d.\n",