2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment decriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment decriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 __constant_cpu_to_le32(COMMAND_TYPE);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213 vha = sp->fcport->vha;
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
259 cmd = GET_CMD_SP(sp);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) =
263 __constant_cpu_to_le32(COMMAND_A64_TYPE);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271 vha = sp->fcport->vha;
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
281 cont_a64_entry_t *cont_pkt;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t *sp)
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
318 cmd_entry_t *cmd_pkt;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
328 /* Setup device pointers. */
330 vha = sp->fcport->vha;
332 reg = &ha->iobase->isp;
333 cmd = GET_CMD_SP(sp);
334 req = ha->req_q_map[0];
335 rsp = ha->rsp_q_map[0];
336 /* So we know we haven't pci_map'ed anything yet */
339 /* Send marker if required */
340 if (vha->marker_needed != 0) {
341 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
343 return (QLA_FUNCTION_FAILED);
345 vha->marker_needed = 0;
348 /* Acquire ring specific lock */
349 spin_lock_irqsave(&ha->hardware_lock, flags);
351 /* Check for room in outstanding command list. */
352 handle = req->current_outstanding_cmd;
353 for (index = 1; index < req->num_outstanding_cmds; index++) {
355 if (handle == req->num_outstanding_cmds)
357 if (!req->outstanding_cmds[handle])
360 if (index == req->num_outstanding_cmds)
363 /* Map the sg table so we have an accurate count of sg entries needed */
364 if (scsi_sg_count(cmd)) {
365 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366 scsi_sg_count(cmd), cmd->sc_data_direction);
374 /* Calculate the number of request entries needed. */
375 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376 if (req->cnt < (req_cnt + 2)) {
377 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378 if (req->ring_index < cnt)
379 req->cnt = cnt - req->ring_index;
381 req->cnt = req->length -
382 (req->ring_index - cnt);
383 /* If still no head room then bail out */
384 if (req->cnt < (req_cnt + 2))
388 /* Build command packet */
389 req->current_outstanding_cmd = handle;
390 req->outstanding_cmds[handle] = sp;
392 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396 cmd_pkt->handle = handle;
397 /* Zero out remaining portion of packet. */
398 clr_ptr = (uint32_t *)cmd_pkt + 2;
399 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
402 /* Set target ID and LUN number*/
403 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406 /* Update tagged queuing modifier */
407 if (scsi_populate_tag_msg(cmd, tag)) {
409 case HEAD_OF_QUEUE_TAG:
410 cmd_pkt->control_flags =
411 __constant_cpu_to_le16(CF_HEAD_TAG);
413 case ORDERED_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
423 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426 /* Load SCSI command packet. */
427 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
428 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
430 /* Build IOCB segments */
431 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
433 /* Set total data segment count. */
434 cmd_pkt->entry_count = (uint8_t)req_cnt;
437 /* Adjust ring index. */
439 if (req->ring_index == req->length) {
441 req->ring_ptr = req->ring;
445 sp->flags |= SRB_DMA_VALID;
447 /* Set chip new ring index. */
448 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
449 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
451 /* Manage unprocessed RIO/ZIO commands in response queue. */
452 if (vha->flags.process_response_queue &&
453 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454 qla2x00_process_response_queue(rsp);
456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
457 return (QLA_SUCCESS);
463 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465 return (QLA_FUNCTION_FAILED);
469 * qla2x00_start_iocbs() - Execute the IOCB command
472 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
477 if (IS_QLA82XX(ha)) {
478 qla82xx_start_iocbs(vha);
480 /* Adjust ring index. */
482 if (req->ring_index == req->length) {
484 req->ring_ptr = req->ring;
488 /* Set chip new ring index. */
489 if (ha->mqenable || IS_QLA83XX(ha)) {
490 WRT_REG_DWORD(req->req_q_in, req->ring_index);
491 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
492 } else if (IS_QLAFX00(ha)) {
493 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
494 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
495 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
496 } else if (IS_FWI2_CAPABLE(ha)) {
497 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
498 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
500 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
502 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
508 * qla2x00_marker() - Send a marker IOCB to the firmware.
512 * @type: marker modifier
514 * Can be called from both normal and interrupt context.
516 * Returns non-zero if a failure occurred, else zero.
519 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
520 struct rsp_que *rsp, uint16_t loop_id,
521 uint16_t lun, uint8_t type)
524 struct mrk_entry_24xx *mrk24 = NULL;
525 struct mrk_entry_fx00 *mrkfx = NULL;
527 struct qla_hw_data *ha = vha->hw;
528 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
530 req = ha->req_q_map[0];
531 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
533 ql_log(ql_log_warn, base_vha, 0x3026,
534 "Failed to allocate Marker IOCB.\n");
536 return (QLA_FUNCTION_FAILED);
539 mrk->entry_type = MARKER_TYPE;
540 mrk->modifier = type;
541 if (type != MK_SYNC_ALL) {
542 if (IS_QLAFX00(ha)) {
543 mrkfx = (struct mrk_entry_fx00 *) mrk;
544 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
545 mrkfx->handle_hi = 0;
546 mrkfx->tgt_id = cpu_to_le16(loop_id);
547 mrkfx->lun[1] = LSB(lun);
548 mrkfx->lun[2] = MSB(lun);
549 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
550 } else if (IS_FWI2_CAPABLE(ha)) {
551 mrk24 = (struct mrk_entry_24xx *) mrk;
552 mrk24->nport_handle = cpu_to_le16(loop_id);
553 mrk24->lun[1] = LSB(lun);
554 mrk24->lun[2] = MSB(lun);
555 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
556 mrk24->vp_index = vha->vp_idx;
557 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
559 SET_TARGET_ID(ha, mrk->target, loop_id);
560 mrk->lun = cpu_to_le16(lun);
565 qla2x00_start_iocbs(vha, req);
567 return (QLA_SUCCESS);
571 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
572 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
576 unsigned long flags = 0;
578 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
579 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
580 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
586 * qla2x00_issue_marker
589 * Caller CAN have hardware lock held as specified by ha_locked parameter.
590 * Might release it, then reaquire.
592 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
595 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
596 MK_SYNC_ALL) != QLA_SUCCESS)
597 return QLA_FUNCTION_FAILED;
599 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
600 MK_SYNC_ALL) != QLA_SUCCESS)
601 return QLA_FUNCTION_FAILED;
603 vha->marker_needed = 0;
609 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
612 uint32_t *cur_dsd = NULL;
613 scsi_qla_host_t *vha;
614 struct qla_hw_data *ha;
615 struct scsi_cmnd *cmd;
616 struct scatterlist *cur_seg;
620 uint8_t first_iocb = 1;
621 uint32_t dsd_list_len;
622 struct dsd_dma *dsd_ptr;
625 cmd = GET_CMD_SP(sp);
627 /* Update entry type to indicate Command Type 3 IOCB */
628 *((uint32_t *)(&cmd_pkt->entry_type)) =
629 __constant_cpu_to_le32(COMMAND_TYPE_6);
631 /* No data transfer */
632 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
633 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
637 vha = sp->fcport->vha;
640 /* Set transfer direction */
641 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
642 cmd_pkt->control_flags =
643 __constant_cpu_to_le16(CF_WRITE_DATA);
644 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
645 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
646 cmd_pkt->control_flags =
647 __constant_cpu_to_le16(CF_READ_DATA);
648 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
651 cur_seg = scsi_sglist(cmd);
652 ctx = GET_CMD_CTX_SP(sp);
655 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
656 QLA_DSDS_PER_IOCB : tot_dsds;
657 tot_dsds -= avail_dsds;
658 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
660 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
661 struct dsd_dma, list);
662 next_dsd = dsd_ptr->dsd_addr;
663 list_del(&dsd_ptr->list);
665 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
671 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
672 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
673 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
674 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
676 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
677 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
678 *cur_dsd++ = cpu_to_le32(dsd_list_len);
680 cur_dsd = (uint32_t *)next_dsd;
684 sle_dma = sg_dma_address(cur_seg);
685 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
686 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
687 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
688 cur_seg = sg_next(cur_seg);
693 /* Null termination */
697 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
702 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
703 * for Command Type 6.
705 * @dsds: number of data segment decriptors needed
707 * Returns the number of dsd list needed to store @dsds.
710 qla24xx_calc_dsd_lists(uint16_t dsds)
712 uint16_t dsd_lists = 0;
714 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
715 if (dsds % QLA_DSDS_PER_IOCB)
722 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
725 * @sp: SRB command to process
726 * @cmd_pkt: Command type 3 IOCB
727 * @tot_dsds: Total number of segments to transfer
730 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
735 scsi_qla_host_t *vha;
736 struct scsi_cmnd *cmd;
737 struct scatterlist *sg;
741 cmd = GET_CMD_SP(sp);
743 /* Update entry type to indicate Command Type 3 IOCB */
744 *((uint32_t *)(&cmd_pkt->entry_type)) =
745 __constant_cpu_to_le32(COMMAND_TYPE_7);
747 /* No data transfer */
748 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
749 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
753 vha = sp->fcport->vha;
756 /* Set transfer direction */
757 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
758 cmd_pkt->task_mgmt_flags =
759 __constant_cpu_to_le16(TMF_WRITE_DATA);
760 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
761 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
762 cmd_pkt->task_mgmt_flags =
763 __constant_cpu_to_le16(TMF_READ_DATA);
764 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
767 /* One DSD is available in the Command Type 3 IOCB */
769 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
771 /* Load data segments */
773 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
775 cont_a64_entry_t *cont_pkt;
777 /* Allocate additional continuation packets? */
778 if (avail_dsds == 0) {
780 * Five DSDs are available in the Continuation
783 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
784 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
788 sle_dma = sg_dma_address(sg);
789 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
790 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
791 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
796 struct fw_dif_context {
799 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
800 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
804 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
808 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
809 unsigned int protcnt)
811 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
813 switch (scsi_get_prot_type(cmd)) {
814 case SCSI_PROT_DIF_TYPE0:
816 * No check for ql2xenablehba_err_chk, as it would be an
817 * I/O error if hba tag generation is not done.
819 pkt->ref_tag = cpu_to_le32((uint32_t)
820 (0xffffffff & scsi_get_lba(cmd)));
822 if (!qla2x00_hba_err_chk_enabled(sp))
825 pkt->ref_tag_mask[0] = 0xff;
826 pkt->ref_tag_mask[1] = 0xff;
827 pkt->ref_tag_mask[2] = 0xff;
828 pkt->ref_tag_mask[3] = 0xff;
832 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
833 * match LBA in CDB + N
835 case SCSI_PROT_DIF_TYPE2:
836 pkt->app_tag = __constant_cpu_to_le16(0);
837 pkt->app_tag_mask[0] = 0x0;
838 pkt->app_tag_mask[1] = 0x0;
840 pkt->ref_tag = cpu_to_le32((uint32_t)
841 (0xffffffff & scsi_get_lba(cmd)));
843 if (!qla2x00_hba_err_chk_enabled(sp))
846 /* enable ALL bytes of the ref tag */
847 pkt->ref_tag_mask[0] = 0xff;
848 pkt->ref_tag_mask[1] = 0xff;
849 pkt->ref_tag_mask[2] = 0xff;
850 pkt->ref_tag_mask[3] = 0xff;
853 /* For Type 3 protection: 16 bit GUARD only */
854 case SCSI_PROT_DIF_TYPE3:
855 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
856 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
861 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
864 case SCSI_PROT_DIF_TYPE1:
865 pkt->ref_tag = cpu_to_le32((uint32_t)
866 (0xffffffff & scsi_get_lba(cmd)));
867 pkt->app_tag = __constant_cpu_to_le16(0);
868 pkt->app_tag_mask[0] = 0x0;
869 pkt->app_tag_mask[1] = 0x0;
871 if (!qla2x00_hba_err_chk_enabled(sp))
874 /* enable ALL bytes of the ref tag */
875 pkt->ref_tag_mask[0] = 0xff;
876 pkt->ref_tag_mask[1] = 0xff;
877 pkt->ref_tag_mask[2] = 0xff;
878 pkt->ref_tag_mask[3] = 0xff;
884 dma_addr_t dma_addr; /* OUT */
885 uint32_t dma_len; /* OUT */
887 uint32_t tot_bytes; /* IN */
888 struct scatterlist *cur_sg; /* IN */
890 /* for book keeping, bzero on initial invocation */
891 uint32_t bytes_consumed;
893 uint32_t tot_partial;
901 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
904 struct scatterlist *sg;
905 uint32_t cumulative_partial, sg_len;
906 dma_addr_t sg_dma_addr;
908 if (sgx->num_bytes == sgx->tot_bytes)
912 cumulative_partial = sgx->tot_partial;
914 sg_dma_addr = sg_dma_address(sg);
915 sg_len = sg_dma_len(sg);
917 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
919 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
920 sgx->dma_len = (blk_sz - cumulative_partial);
921 sgx->tot_partial = 0;
922 sgx->num_bytes += blk_sz;
925 sgx->dma_len = sg_len - sgx->bytes_consumed;
926 sgx->tot_partial += sgx->dma_len;
930 sgx->bytes_consumed += sgx->dma_len;
932 if (sg_len == sgx->bytes_consumed) {
936 sgx->bytes_consumed = 0;
943 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
944 uint32_t *dsd, uint16_t tot_dsds)
947 uint8_t avail_dsds = 0;
948 uint32_t dsd_list_len;
949 struct dsd_dma *dsd_ptr;
950 struct scatterlist *sg_prot;
951 uint32_t *cur_dsd = dsd;
952 uint16_t used_dsds = tot_dsds;
958 uint32_t sle_dma_len, tot_prot_dma_len = 0;
959 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
961 prot_int = cmd->device->sector_size;
963 memset(&sgx, 0, sizeof(struct qla2_sgx));
964 sgx.tot_bytes = scsi_bufflen(cmd);
965 sgx.cur_sg = scsi_sglist(cmd);
968 sg_prot = scsi_prot_sglist(cmd);
970 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
972 sle_dma = sgx.dma_addr;
973 sle_dma_len = sgx.dma_len;
975 /* Allocate additional continuation packets? */
976 if (avail_dsds == 0) {
977 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
978 QLA_DSDS_PER_IOCB : used_dsds;
979 dsd_list_len = (avail_dsds + 1) * 12;
980 used_dsds -= avail_dsds;
982 /* allocate tracking DS */
983 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
987 /* allocate new list */
988 dsd_ptr->dsd_addr = next_dsd =
989 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
990 &dsd_ptr->dsd_list_dma);
994 * Need to cleanup only this dsd_ptr, rest
995 * will be done by sp_free_dma()
1001 list_add_tail(&dsd_ptr->list,
1002 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1004 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1006 /* add new list to cmd iocb or last list */
1007 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1008 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1009 *cur_dsd++ = dsd_list_len;
1010 cur_dsd = (uint32_t *)next_dsd;
1012 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1013 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1014 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1018 /* Got a full protection interval */
1019 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1022 tot_prot_dma_len += sle_dma_len;
1023 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1024 tot_prot_dma_len = 0;
1025 sg_prot = sg_next(sg_prot);
1028 partial = 1; /* So as to not re-enter this block */
1029 goto alloc_and_fill;
1032 /* Null termination */
1040 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1044 uint8_t avail_dsds = 0;
1045 uint32_t dsd_list_len;
1046 struct dsd_dma *dsd_ptr;
1047 struct scatterlist *sg;
1048 uint32_t *cur_dsd = dsd;
1050 uint16_t used_dsds = tot_dsds;
1051 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1053 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1056 /* Allocate additional continuation packets? */
1057 if (avail_dsds == 0) {
1058 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1059 QLA_DSDS_PER_IOCB : used_dsds;
1060 dsd_list_len = (avail_dsds + 1) * 12;
1061 used_dsds -= avail_dsds;
1063 /* allocate tracking DS */
1064 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1068 /* allocate new list */
1069 dsd_ptr->dsd_addr = next_dsd =
1070 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1071 &dsd_ptr->dsd_list_dma);
1075 * Need to cleanup only this dsd_ptr, rest
1076 * will be done by sp_free_dma()
1082 list_add_tail(&dsd_ptr->list,
1083 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1085 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1087 /* add new list to cmd iocb or last list */
1088 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1089 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1090 *cur_dsd++ = dsd_list_len;
1091 cur_dsd = (uint32_t *)next_dsd;
1093 sle_dma = sg_dma_address(sg);
1095 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1101 /* Null termination */
1109 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1114 uint8_t avail_dsds = 0;
1115 uint32_t dsd_list_len;
1116 struct dsd_dma *dsd_ptr;
1117 struct scatterlist *sg;
1119 struct scsi_cmnd *cmd;
1120 uint32_t *cur_dsd = dsd;
1121 uint16_t used_dsds = tot_dsds;
1123 cmd = GET_CMD_SP(sp);
1124 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1153 list_add_tail(&dsd_ptr->list,
1154 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1156 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1158 /* add new list to cmd iocb or last list */
1159 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1160 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1161 *cur_dsd++ = dsd_list_len;
1162 cur_dsd = (uint32_t *)next_dsd;
1164 sle_dma = sg_dma_address(sg);
1166 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1168 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1172 /* Null termination */
1180 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1181 * Type 6 IOCB types.
1183 * @sp: SRB command to process
1184 * @cmd_pkt: Command type 3 IOCB
1185 * @tot_dsds: Total number of segments to transfer
1188 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1189 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1191 uint32_t *cur_dsd, *fcp_dl;
1192 scsi_qla_host_t *vha;
1193 struct scsi_cmnd *cmd;
1195 uint32_t total_bytes = 0;
1196 uint32_t data_bytes;
1198 uint8_t bundling = 1;
1201 struct crc_context *crc_ctx_pkt = NULL;
1202 struct qla_hw_data *ha;
1203 uint8_t additional_fcpcdb_len;
1204 uint16_t fcp_cmnd_len;
1205 struct fcp_cmnd *fcp_cmnd;
1206 dma_addr_t crc_ctx_dma;
1209 cmd = GET_CMD_SP(sp);
1212 /* Update entry type to indicate Command Type CRC_2 IOCB */
1213 *((uint32_t *)(&cmd_pkt->entry_type)) =
1214 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1216 vha = sp->fcport->vha;
1219 /* No data transfer */
1220 data_bytes = scsi_bufflen(cmd);
1221 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1222 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1226 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1228 /* Set transfer direction */
1229 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1230 cmd_pkt->control_flags =
1231 __constant_cpu_to_le16(CF_WRITE_DATA);
1232 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1233 cmd_pkt->control_flags =
1234 __constant_cpu_to_le16(CF_READ_DATA);
1237 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1238 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1239 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1240 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1243 /* Allocate CRC context from global pool */
1244 crc_ctx_pkt = sp->u.scmd.ctx =
1245 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1248 goto crc_queuing_error;
1250 /* Zero out CTX area. */
1251 clr_ptr = (uint8_t *)crc_ctx_pkt;
1252 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1254 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1256 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1259 crc_ctx_pkt->handle = cmd_pkt->handle;
1261 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1263 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1264 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1266 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1267 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1268 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1270 /* Determine SCSI command length -- align to 4 byte boundary */
1271 if (cmd->cmd_len > 16) {
1272 additional_fcpcdb_len = cmd->cmd_len - 16;
1273 if ((cmd->cmd_len % 4) != 0) {
1274 /* SCSI cmd > 16 bytes must be multiple of 4 */
1275 goto crc_queuing_error;
1277 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1279 additional_fcpcdb_len = 0;
1280 fcp_cmnd_len = 12 + 16 + 4;
1283 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1285 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1286 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1287 fcp_cmnd->additional_cdb_len |= 1;
1288 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1289 fcp_cmnd->additional_cdb_len |= 2;
1291 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1292 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1293 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1294 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1295 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1296 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1297 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1298 fcp_cmnd->task_management = 0;
1301 * Update tagged queuing modifier if using command tag queuing
1303 if (scsi_populate_tag_msg(cmd, tag)) {
1305 case HEAD_OF_QUEUE_TAG:
1306 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1308 case ORDERED_QUEUE_TAG:
1309 fcp_cmnd->task_attribute = TSK_ORDERED;
1312 fcp_cmnd->task_attribute = TSK_SIMPLE;
1316 fcp_cmnd->task_attribute = TSK_SIMPLE;
1319 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1321 /* Compute dif len and adjust data len to incude protection */
1323 blk_size = cmd->device->sector_size;
1324 dif_bytes = (data_bytes / blk_size) * 8;
1326 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1327 case SCSI_PROT_READ_INSERT:
1328 case SCSI_PROT_WRITE_STRIP:
1329 total_bytes = data_bytes;
1330 data_bytes += dif_bytes;
1333 case SCSI_PROT_READ_STRIP:
1334 case SCSI_PROT_WRITE_INSERT:
1335 case SCSI_PROT_READ_PASS:
1336 case SCSI_PROT_WRITE_PASS:
1337 total_bytes = data_bytes + dif_bytes;
1343 if (!qla2x00_hba_err_chk_enabled(sp))
1344 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1345 /* HBA error checking enabled */
1346 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1347 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1348 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1349 SCSI_PROT_DIF_TYPE2))
1350 fw_prot_opts |= BIT_10;
1351 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1352 SCSI_PROT_DIF_TYPE3)
1353 fw_prot_opts |= BIT_11;
1357 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1360 * Configure Bundling if we need to fetch interlaving
1361 * protection PCI accesses
1363 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1364 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1365 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1367 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1370 /* Finish the common fields of CRC pkt */
1371 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1372 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1373 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1374 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1375 /* Fibre channel byte count */
1376 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1377 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1378 additional_fcpcdb_len);
1379 *fcp_dl = htonl(total_bytes);
1381 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1382 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1385 /* Walks data segments */
1387 cmd_pkt->control_flags |=
1388 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1390 if (!bundling && tot_prot_dsds) {
1391 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1393 goto crc_queuing_error;
1394 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1395 (tot_dsds - tot_prot_dsds)))
1396 goto crc_queuing_error;
1398 if (bundling && tot_prot_dsds) {
1399 /* Walks dif segments */
1400 cmd_pkt->control_flags |=
1401 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1402 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1403 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1405 goto crc_queuing_error;
1410 /* Cleanup will be performed by the caller */
1412 return QLA_FUNCTION_FAILED;
1416 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1417 * @sp: command to send to the ISP
1419 * Returns non-zero if a failure occurred, else zero.
1422 qla24xx_start_scsi(srb_t *sp)
1425 unsigned long flags;
1429 struct cmd_type_7 *cmd_pkt;
1433 struct req_que *req = NULL;
1434 struct rsp_que *rsp = NULL;
1435 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1436 struct scsi_qla_host *vha = sp->fcport->vha;
1437 struct qla_hw_data *ha = vha->hw;
1440 /* Setup device pointers. */
1443 qla25xx_set_que(sp, &rsp);
1446 /* So we know we haven't pci_map'ed anything yet */
1449 /* Send marker if required */
1450 if (vha->marker_needed != 0) {
1451 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1453 return QLA_FUNCTION_FAILED;
1454 vha->marker_needed = 0;
1457 /* Acquire ring specific lock */
1458 spin_lock_irqsave(&ha->hardware_lock, flags);
1460 /* Check for room in outstanding command list. */
1461 handle = req->current_outstanding_cmd;
1462 for (index = 1; index < req->num_outstanding_cmds; index++) {
1464 if (handle == req->num_outstanding_cmds)
1466 if (!req->outstanding_cmds[handle])
1469 if (index == req->num_outstanding_cmds)
1472 /* Map the sg table so we have an accurate count of sg entries needed */
1473 if (scsi_sg_count(cmd)) {
1474 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1475 scsi_sg_count(cmd), cmd->sc_data_direction);
1476 if (unlikely(!nseg))
1482 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1483 if (req->cnt < (req_cnt + 2)) {
1484 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1486 if (req->ring_index < cnt)
1487 req->cnt = cnt - req->ring_index;
1489 req->cnt = req->length -
1490 (req->ring_index - cnt);
1491 if (req->cnt < (req_cnt + 2))
1495 /* Build command packet. */
1496 req->current_outstanding_cmd = handle;
1497 req->outstanding_cmds[handle] = sp;
1498 sp->handle = handle;
1499 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1500 req->cnt -= req_cnt;
1502 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1503 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1505 /* Zero out remaining portion of packet. */
1506 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1507 clr_ptr = (uint32_t *)cmd_pkt + 2;
1508 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1509 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1511 /* Set NPORT-ID and LUN number*/
1512 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1513 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1514 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1515 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1516 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1518 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1519 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1521 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1522 if (scsi_populate_tag_msg(cmd, tag)) {
1524 case HEAD_OF_QUEUE_TAG:
1525 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1527 case ORDERED_QUEUE_TAG:
1528 cmd_pkt->task = TSK_ORDERED;
1531 cmd_pkt->task = TSK_SIMPLE;
1535 cmd_pkt->task = TSK_SIMPLE;
1538 /* Load SCSI command packet. */
1539 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1540 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1542 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1544 /* Build IOCB segments */
1545 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1547 /* Set total data segment count. */
1548 cmd_pkt->entry_count = (uint8_t)req_cnt;
1549 /* Specify response queue number where completion should happen */
1550 cmd_pkt->entry_status = (uint8_t) rsp->id;
1552 /* Adjust ring index. */
1554 if (req->ring_index == req->length) {
1555 req->ring_index = 0;
1556 req->ring_ptr = req->ring;
1560 sp->flags |= SRB_DMA_VALID;
1562 /* Set chip new ring index. */
1563 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1564 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1566 /* Manage unprocessed RIO/ZIO commands in response queue. */
1567 if (vha->flags.process_response_queue &&
1568 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1569 qla24xx_process_response_queue(vha, rsp);
1571 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1576 scsi_dma_unmap(cmd);
1578 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1580 return QLA_FUNCTION_FAILED;
1584 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1585 * @sp: command to send to the ISP
1587 * Returns non-zero if a failure occurred, else zero.
1590 qla24xx_dif_start_scsi(srb_t *sp)
1593 unsigned long flags;
1598 uint16_t req_cnt = 0;
1600 uint16_t tot_prot_dsds;
1601 uint16_t fw_prot_opts = 0;
1602 struct req_que *req = NULL;
1603 struct rsp_que *rsp = NULL;
1604 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1605 struct scsi_qla_host *vha = sp->fcport->vha;
1606 struct qla_hw_data *ha = vha->hw;
1607 struct cmd_type_crc_2 *cmd_pkt;
1608 uint32_t status = 0;
1610 #define QDSS_GOT_Q_SPACE BIT_0
1612 /* Only process protection or >16 cdb in this routine */
1613 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1614 if (cmd->cmd_len <= 16)
1615 return qla24xx_start_scsi(sp);
1618 /* Setup device pointers. */
1620 qla25xx_set_que(sp, &rsp);
1623 /* So we know we haven't pci_map'ed anything yet */
1626 /* Send marker if required */
1627 if (vha->marker_needed != 0) {
1628 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1630 return QLA_FUNCTION_FAILED;
1631 vha->marker_needed = 0;
1634 /* Acquire ring specific lock */
1635 spin_lock_irqsave(&ha->hardware_lock, flags);
1637 /* Check for room in outstanding command list. */
1638 handle = req->current_outstanding_cmd;
1639 for (index = 1; index < req->num_outstanding_cmds; index++) {
1641 if (handle == req->num_outstanding_cmds)
1643 if (!req->outstanding_cmds[handle])
1647 if (index == req->num_outstanding_cmds)
1650 /* Compute number of required data segments */
1651 /* Map the sg table so we have an accurate count of sg entries needed */
1652 if (scsi_sg_count(cmd)) {
1653 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1654 scsi_sg_count(cmd), cmd->sc_data_direction);
1655 if (unlikely(!nseg))
1658 sp->flags |= SRB_DMA_VALID;
1660 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1661 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1662 struct qla2_sgx sgx;
1665 memset(&sgx, 0, sizeof(struct qla2_sgx));
1666 sgx.tot_bytes = scsi_bufflen(cmd);
1667 sgx.cur_sg = scsi_sglist(cmd);
1671 while (qla24xx_get_one_block_sg(
1672 cmd->device->sector_size, &sgx, &partial))
1678 /* number of required data segments */
1681 /* Compute number of required protection segments */
1682 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1683 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1684 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1685 if (unlikely(!nseg))
1688 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1690 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1691 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1692 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1699 /* Total Data and protection sg segment(s) */
1700 tot_prot_dsds = nseg;
1702 if (req->cnt < (req_cnt + 2)) {
1703 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1705 if (req->ring_index < cnt)
1706 req->cnt = cnt - req->ring_index;
1708 req->cnt = req->length -
1709 (req->ring_index - cnt);
1710 if (req->cnt < (req_cnt + 2))
1714 status |= QDSS_GOT_Q_SPACE;
1716 /* Build header part of command packet (excluding the OPCODE). */
1717 req->current_outstanding_cmd = handle;
1718 req->outstanding_cmds[handle] = sp;
1719 sp->handle = handle;
1720 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1721 req->cnt -= req_cnt;
1723 /* Fill-in common area */
1724 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1725 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1727 clr_ptr = (uint32_t *)cmd_pkt + 2;
1728 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1730 /* Set NPORT-ID and LUN number*/
1731 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1732 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1733 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1734 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1736 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1737 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1739 /* Total Data and protection segment(s) */
1740 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1742 /* Build IOCB segments and adjust for data protection segments */
1743 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1744 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1748 cmd_pkt->entry_count = (uint8_t)req_cnt;
1749 /* Specify response queue number where completion should happen */
1750 cmd_pkt->entry_status = (uint8_t) rsp->id;
1751 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1754 /* Adjust ring index. */
1756 if (req->ring_index == req->length) {
1757 req->ring_index = 0;
1758 req->ring_ptr = req->ring;
1762 /* Set chip new ring index. */
1763 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1764 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1766 /* Manage unprocessed RIO/ZIO commands in response queue. */
1767 if (vha->flags.process_response_queue &&
1768 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1769 qla24xx_process_response_queue(vha, rsp);
1771 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1776 if (status & QDSS_GOT_Q_SPACE) {
1777 req->outstanding_cmds[handle] = NULL;
1778 req->cnt += req_cnt;
1780 /* Cleanup will be performed by the caller (queuecommand) */
1782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783 return QLA_FUNCTION_FAILED;
1787 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1789 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1790 struct qla_hw_data *ha = sp->fcport->vha->hw;
1791 int affinity = cmd->request->cpu;
1793 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1794 affinity < ha->max_rsp_queues - 1)
1795 *rsp = ha->rsp_q_map[affinity + 1];
1797 *rsp = ha->rsp_q_map[0];
1800 /* Generic Control-SRB manipulation functions. */
1802 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1804 struct qla_hw_data *ha = vha->hw;
1805 struct req_que *req = ha->req_q_map[0];
1806 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1807 uint32_t index, handle;
1809 uint16_t cnt, req_cnt;
1816 goto skip_cmd_array;
1818 /* Check for room in outstanding command list. */
1819 handle = req->current_outstanding_cmd;
1820 for (index = 1; req->num_outstanding_cmds; index++) {
1822 if (handle == req->num_outstanding_cmds)
1824 if (!req->outstanding_cmds[handle])
1827 if (index == req->num_outstanding_cmds) {
1828 ql_log(ql_log_warn, vha, 0x700b,
1829 "No room on outstanding cmd array.\n");
1833 /* Prep command array. */
1834 req->current_outstanding_cmd = handle;
1835 req->outstanding_cmds[handle] = sp;
1836 sp->handle = handle;
1838 /* Adjust entry-counts as needed. */
1839 if (sp->type != SRB_SCSI_CMD)
1840 req_cnt = sp->iocbs;
1843 /* Check for room on request queue. */
1844 if (req->cnt < req_cnt) {
1845 if (ha->mqenable || IS_QLA83XX(ha))
1846 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1847 else if (IS_QLA82XX(ha))
1848 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1849 else if (IS_FWI2_CAPABLE(ha))
1850 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1851 else if (IS_QLAFX00(ha))
1852 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
1854 cnt = qla2x00_debounce_register(
1855 ISP_REQ_Q_OUT(ha, ®->isp));
1857 if (req->ring_index < cnt)
1858 req->cnt = cnt - req->ring_index;
1860 req->cnt = req->length -
1861 (req->ring_index - cnt);
1863 if (req->cnt < req_cnt)
1867 req->cnt -= req_cnt;
1868 pkt = req->ring_ptr;
1869 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1870 if (IS_QLAFX00(ha)) {
1871 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1872 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1874 pkt->entry_count = req_cnt;
1875 pkt->handle = handle;
1883 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1885 struct srb_iocb *lio = &sp->u.iocb_cmd;
1887 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1888 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1889 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1890 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1891 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1892 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1893 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1894 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1895 logio->port_id[1] = sp->fcport->d_id.b.area;
1896 logio->port_id[2] = sp->fcport->d_id.b.domain;
1897 logio->vp_index = sp->fcport->vha->vp_idx;
1901 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1903 struct qla_hw_data *ha = sp->fcport->vha->hw;
1904 struct srb_iocb *lio = &sp->u.iocb_cmd;
1907 mbx->entry_type = MBX_IOCB_TYPE;
1908 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1909 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1910 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1911 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1912 if (HAS_EXTENDED_IDS(ha)) {
1913 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1914 mbx->mb10 = cpu_to_le16(opts);
1916 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1918 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1919 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1920 sp->fcport->d_id.b.al_pa);
1921 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1925 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1927 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1928 logio->control_flags =
1929 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1930 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1931 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1932 logio->port_id[1] = sp->fcport->d_id.b.area;
1933 logio->port_id[2] = sp->fcport->d_id.b.domain;
1934 logio->vp_index = sp->fcport->vha->vp_idx;
1938 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1940 struct qla_hw_data *ha = sp->fcport->vha->hw;
1942 mbx->entry_type = MBX_IOCB_TYPE;
1943 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1944 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1945 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1946 cpu_to_le16(sp->fcport->loop_id):
1947 cpu_to_le16(sp->fcport->loop_id << 8);
1948 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1949 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1950 sp->fcport->d_id.b.al_pa);
1951 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1952 /* Implicit: mbx->mbx10 = 0. */
1956 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1958 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1959 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1960 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1961 logio->vp_index = sp->fcport->vha->vp_idx;
1965 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1967 struct qla_hw_data *ha = sp->fcport->vha->hw;
1969 mbx->entry_type = MBX_IOCB_TYPE;
1970 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1971 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1972 if (HAS_EXTENDED_IDS(ha)) {
1973 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1974 mbx->mb10 = cpu_to_le16(BIT_0);
1976 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1978 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1979 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1980 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1981 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1982 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1986 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1990 struct fc_port *fcport = sp->fcport;
1991 scsi_qla_host_t *vha = fcport->vha;
1992 struct qla_hw_data *ha = vha->hw;
1993 struct srb_iocb *iocb = &sp->u.iocb_cmd;
1994 struct req_que *req = vha->req;
1996 flags = iocb->u.tmf.flags;
1997 lun = iocb->u.tmf.lun;
1999 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2000 tsk->entry_count = 1;
2001 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2002 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2003 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2004 tsk->control_flags = cpu_to_le32(flags);
2005 tsk->port_id[0] = fcport->d_id.b.al_pa;
2006 tsk->port_id[1] = fcport->d_id.b.area;
2007 tsk->port_id[2] = fcport->d_id.b.domain;
2008 tsk->vp_index = fcport->vha->vp_idx;
2010 if (flags == TCF_LUN_RESET) {
2011 int_to_scsilun(lun, &tsk->lun);
2012 host_to_fcp_swap((uint8_t *)&tsk->lun,
2018 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2020 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2022 els_iocb->entry_type = ELS_IOCB_TYPE;
2023 els_iocb->entry_count = 1;
2024 els_iocb->sys_define = 0;
2025 els_iocb->entry_status = 0;
2026 els_iocb->handle = sp->handle;
2027 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2028 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2029 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2030 els_iocb->sof_type = EST_SOFI3;
2031 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2034 sp->type == SRB_ELS_CMD_RPT ?
2035 bsg_job->request->rqst_data.r_els.els_code :
2036 bsg_job->request->rqst_data.h_els.command_code;
2037 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2038 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2039 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2040 els_iocb->control_flags = 0;
2041 els_iocb->rx_byte_count =
2042 cpu_to_le32(bsg_job->reply_payload.payload_len);
2043 els_iocb->tx_byte_count =
2044 cpu_to_le32(bsg_job->request_payload.payload_len);
2046 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2047 (bsg_job->request_payload.sg_list)));
2048 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2049 (bsg_job->request_payload.sg_list)));
2050 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2051 (bsg_job->request_payload.sg_list));
2053 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2054 (bsg_job->reply_payload.sg_list)));
2055 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2056 (bsg_job->reply_payload.sg_list)));
2057 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2058 (bsg_job->reply_payload.sg_list));
2062 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2064 uint16_t avail_dsds;
2066 struct scatterlist *sg;
2069 scsi_qla_host_t *vha = sp->fcport->vha;
2070 struct qla_hw_data *ha = vha->hw;
2071 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2072 int loop_iterartion = 0;
2073 int cont_iocb_prsnt = 0;
2074 int entry_count = 1;
2076 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2077 ct_iocb->entry_type = CT_IOCB_TYPE;
2078 ct_iocb->entry_status = 0;
2079 ct_iocb->handle1 = sp->handle;
2080 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2081 ct_iocb->status = __constant_cpu_to_le16(0);
2082 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2083 ct_iocb->timeout = 0;
2084 ct_iocb->cmd_dsd_count =
2085 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2086 ct_iocb->total_dsd_count =
2087 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2088 ct_iocb->req_bytecount =
2089 cpu_to_le32(bsg_job->request_payload.payload_len);
2090 ct_iocb->rsp_bytecount =
2091 cpu_to_le32(bsg_job->reply_payload.payload_len);
2093 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2094 (bsg_job->request_payload.sg_list)));
2095 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2096 (bsg_job->request_payload.sg_list)));
2097 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2099 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2100 (bsg_job->reply_payload.sg_list)));
2101 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2102 (bsg_job->reply_payload.sg_list)));
2103 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2106 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2108 tot_dsds = bsg_job->reply_payload.sg_cnt;
2110 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2112 cont_a64_entry_t *cont_pkt;
2114 /* Allocate additional continuation packets? */
2115 if (avail_dsds == 0) {
2117 * Five DSDs are available in the Cont.
2120 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2121 vha->hw->req_q_map[0]);
2122 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2124 cont_iocb_prsnt = 1;
2128 sle_dma = sg_dma_address(sg);
2129 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2130 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2131 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2135 ct_iocb->entry_count = entry_count;
2139 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2141 uint16_t avail_dsds;
2143 struct scatterlist *sg;
2146 scsi_qla_host_t *vha = sp->fcport->vha;
2147 struct qla_hw_data *ha = vha->hw;
2148 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2149 int loop_iterartion = 0;
2150 int cont_iocb_prsnt = 0;
2151 int entry_count = 1;
2153 ct_iocb->entry_type = CT_IOCB_TYPE;
2154 ct_iocb->entry_status = 0;
2155 ct_iocb->sys_define = 0;
2156 ct_iocb->handle = sp->handle;
2158 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2159 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2160 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2162 ct_iocb->cmd_dsd_count =
2163 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2164 ct_iocb->timeout = 0;
2165 ct_iocb->rsp_dsd_count =
2166 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2167 ct_iocb->rsp_byte_count =
2168 cpu_to_le32(bsg_job->reply_payload.payload_len);
2169 ct_iocb->cmd_byte_count =
2170 cpu_to_le32(bsg_job->request_payload.payload_len);
2171 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2172 (bsg_job->request_payload.sg_list)));
2173 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2174 (bsg_job->request_payload.sg_list)));
2175 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2176 (bsg_job->request_payload.sg_list));
2179 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2181 tot_dsds = bsg_job->reply_payload.sg_cnt;
2183 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2185 cont_a64_entry_t *cont_pkt;
2187 /* Allocate additional continuation packets? */
2188 if (avail_dsds == 0) {
2190 * Five DSDs are available in the Cont.
2193 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2195 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2197 cont_iocb_prsnt = 1;
2201 sle_dma = sg_dma_address(sg);
2202 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2203 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2204 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2208 ct_iocb->entry_count = entry_count;
2212 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2213 * @sp: command to send to the ISP
2215 * Returns non-zero if a failure occurred, else zero.
2218 qla82xx_start_scsi(srb_t *sp)
2221 unsigned long flags;
2222 struct scsi_cmnd *cmd;
2229 struct device_reg_82xx __iomem *reg;
2232 uint8_t additional_cdb_len;
2233 struct ct6_dsd *ctx;
2234 struct scsi_qla_host *vha = sp->fcport->vha;
2235 struct qla_hw_data *ha = vha->hw;
2236 struct req_que *req = NULL;
2237 struct rsp_que *rsp = NULL;
2240 /* Setup device pointers. */
2242 reg = &ha->iobase->isp82;
2243 cmd = GET_CMD_SP(sp);
2245 rsp = ha->rsp_q_map[0];
2247 /* So we know we haven't pci_map'ed anything yet */
2250 dbval = 0x04 | (ha->portnum << 5);
2252 /* Send marker if required */
2253 if (vha->marker_needed != 0) {
2254 if (qla2x00_marker(vha, req,
2255 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2256 ql_log(ql_log_warn, vha, 0x300c,
2257 "qla2x00_marker failed for cmd=%p.\n", cmd);
2258 return QLA_FUNCTION_FAILED;
2260 vha->marker_needed = 0;
2263 /* Acquire ring specific lock */
2264 spin_lock_irqsave(&ha->hardware_lock, flags);
2266 /* Check for room in outstanding command list. */
2267 handle = req->current_outstanding_cmd;
2268 for (index = 1; index < req->num_outstanding_cmds; index++) {
2270 if (handle == req->num_outstanding_cmds)
2272 if (!req->outstanding_cmds[handle])
2275 if (index == req->num_outstanding_cmds)
2278 /* Map the sg table so we have an accurate count of sg entries needed */
2279 if (scsi_sg_count(cmd)) {
2280 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2281 scsi_sg_count(cmd), cmd->sc_data_direction);
2282 if (unlikely(!nseg))
2289 if (tot_dsds > ql2xshiftctondsd) {
2290 struct cmd_type_6 *cmd_pkt;
2291 uint16_t more_dsd_lists = 0;
2292 struct dsd_dma *dsd_ptr;
2295 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2296 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2297 ql_dbg(ql_dbg_io, vha, 0x300d,
2298 "Num of DSD list %d is than %d for cmd=%p.\n",
2299 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2304 if (more_dsd_lists <= ha->gbl_dsd_avail)
2305 goto sufficient_dsds;
2307 more_dsd_lists -= ha->gbl_dsd_avail;
2309 for (i = 0; i < more_dsd_lists; i++) {
2310 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2312 ql_log(ql_log_fatal, vha, 0x300e,
2313 "Failed to allocate memory for dsd_dma "
2314 "for cmd=%p.\n", cmd);
2318 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2319 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2320 if (!dsd_ptr->dsd_addr) {
2322 ql_log(ql_log_fatal, vha, 0x300f,
2323 "Failed to allocate memory for dsd_addr "
2324 "for cmd=%p.\n", cmd);
2327 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2328 ha->gbl_dsd_avail++;
2334 if (req->cnt < (req_cnt + 2)) {
2335 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2336 ®->req_q_out[0]);
2337 if (req->ring_index < cnt)
2338 req->cnt = cnt - req->ring_index;
2340 req->cnt = req->length -
2341 (req->ring_index - cnt);
2342 if (req->cnt < (req_cnt + 2))
2346 ctx = sp->u.scmd.ctx =
2347 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2349 ql_log(ql_log_fatal, vha, 0x3010,
2350 "Failed to allocate ctx for cmd=%p.\n", cmd);
2354 memset(ctx, 0, sizeof(struct ct6_dsd));
2355 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2356 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2357 if (!ctx->fcp_cmnd) {
2358 ql_log(ql_log_fatal, vha, 0x3011,
2359 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2363 /* Initialize the DSD list and dma handle */
2364 INIT_LIST_HEAD(&ctx->dsd_list);
2365 ctx->dsd_use_cnt = 0;
2367 if (cmd->cmd_len > 16) {
2368 additional_cdb_len = cmd->cmd_len - 16;
2369 if ((cmd->cmd_len % 4) != 0) {
2370 /* SCSI command bigger than 16 bytes must be
2373 ql_log(ql_log_warn, vha, 0x3012,
2374 "scsi cmd len %d not multiple of 4 "
2375 "for cmd=%p.\n", cmd->cmd_len, cmd);
2376 goto queuing_error_fcp_cmnd;
2378 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2380 additional_cdb_len = 0;
2381 ctx->fcp_cmnd_len = 12 + 16 + 4;
2384 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2385 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2387 /* Zero out remaining portion of packet. */
2388 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2389 clr_ptr = (uint32_t *)cmd_pkt + 2;
2390 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2391 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2393 /* Set NPORT-ID and LUN number*/
2394 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2395 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2396 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2397 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2398 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2400 /* Build IOCB segments */
2401 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2402 goto queuing_error_fcp_cmnd;
2404 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2405 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2407 /* build FCP_CMND IU */
2408 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2409 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2410 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2412 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2413 ctx->fcp_cmnd->additional_cdb_len |= 1;
2414 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2415 ctx->fcp_cmnd->additional_cdb_len |= 2;
2418 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2420 if (scsi_populate_tag_msg(cmd, tag)) {
2422 case HEAD_OF_QUEUE_TAG:
2423 ctx->fcp_cmnd->task_attribute =
2426 case ORDERED_QUEUE_TAG:
2427 ctx->fcp_cmnd->task_attribute =
2433 /* Populate the FCP_PRIO. */
2434 if (ha->flags.fcp_prio_enabled)
2435 ctx->fcp_cmnd->task_attribute |=
2436 sp->fcport->fcp_prio << 3;
2438 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2440 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2441 additional_cdb_len);
2442 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2444 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2445 cmd_pkt->fcp_cmnd_dseg_address[0] =
2446 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2447 cmd_pkt->fcp_cmnd_dseg_address[1] =
2448 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2450 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2451 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2452 /* Set total data segment count. */
2453 cmd_pkt->entry_count = (uint8_t)req_cnt;
2454 /* Specify response queue number where
2455 * completion should happen
2457 cmd_pkt->entry_status = (uint8_t) rsp->id;
2459 struct cmd_type_7 *cmd_pkt;
2460 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2461 if (req->cnt < (req_cnt + 2)) {
2462 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2463 ®->req_q_out[0]);
2464 if (req->ring_index < cnt)
2465 req->cnt = cnt - req->ring_index;
2467 req->cnt = req->length -
2468 (req->ring_index - cnt);
2470 if (req->cnt < (req_cnt + 2))
2473 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2474 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2476 /* Zero out remaining portion of packet. */
2477 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2478 clr_ptr = (uint32_t *)cmd_pkt + 2;
2479 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2480 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2482 /* Set NPORT-ID and LUN number*/
2483 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2484 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2485 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2486 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2487 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2489 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2490 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2491 sizeof(cmd_pkt->lun));
2494 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2496 if (scsi_populate_tag_msg(cmd, tag)) {
2498 case HEAD_OF_QUEUE_TAG:
2499 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2501 case ORDERED_QUEUE_TAG:
2502 cmd_pkt->task = TSK_ORDERED;
2507 /* Populate the FCP_PRIO. */
2508 if (ha->flags.fcp_prio_enabled)
2509 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2511 /* Load SCSI command packet. */
2512 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2513 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2515 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2517 /* Build IOCB segments */
2518 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2520 /* Set total data segment count. */
2521 cmd_pkt->entry_count = (uint8_t)req_cnt;
2522 /* Specify response queue number where
2523 * completion should happen.
2525 cmd_pkt->entry_status = (uint8_t) rsp->id;
2528 /* Build command packet. */
2529 req->current_outstanding_cmd = handle;
2530 req->outstanding_cmds[handle] = sp;
2531 sp->handle = handle;
2532 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2533 req->cnt -= req_cnt;
2536 /* Adjust ring index. */
2538 if (req->ring_index == req->length) {
2539 req->ring_index = 0;
2540 req->ring_ptr = req->ring;
2544 sp->flags |= SRB_DMA_VALID;
2546 /* Set chip new ring index. */
2547 /* write, read and verify logic */
2548 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2550 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2553 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2556 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2558 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2564 /* Manage unprocessed RIO/ZIO commands in response queue. */
2565 if (vha->flags.process_response_queue &&
2566 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2567 qla24xx_process_response_queue(vha, rsp);
2569 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2572 queuing_error_fcp_cmnd:
2573 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2576 scsi_dma_unmap(cmd);
2578 if (sp->u.scmd.ctx) {
2579 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2580 sp->u.scmd.ctx = NULL;
2582 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2584 return QLA_FUNCTION_FAILED;
2588 qla2x00_start_sp(srb_t *sp)
2591 struct qla_hw_data *ha = sp->fcport->vha->hw;
2593 unsigned long flags;
2595 rval = QLA_FUNCTION_FAILED;
2596 spin_lock_irqsave(&ha->hardware_lock, flags);
2597 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2599 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2600 "qla2x00_alloc_iocbs failed.\n");
2607 IS_FWI2_CAPABLE(ha) ?
2608 qla24xx_login_iocb(sp, pkt) :
2609 qla2x00_login_iocb(sp, pkt);
2611 case SRB_LOGOUT_CMD:
2612 IS_FWI2_CAPABLE(ha) ?
2613 qla24xx_logout_iocb(sp, pkt) :
2614 qla2x00_logout_iocb(sp, pkt);
2616 case SRB_ELS_CMD_RPT:
2617 case SRB_ELS_CMD_HST:
2618 qla24xx_els_iocb(sp, pkt);
2621 IS_FWI2_CAPABLE(ha) ?
2622 qla24xx_ct_iocb(sp, pkt) :
2623 qla2x00_ct_iocb(sp, pkt);
2626 IS_FWI2_CAPABLE(ha) ?
2627 qla24xx_adisc_iocb(sp, pkt) :
2628 qla2x00_adisc_iocb(sp, pkt);
2632 qlafx00_tm_iocb(sp, pkt) :
2633 qla24xx_tm_iocb(sp, pkt);
2635 case SRB_FXIOCB_DCMD:
2636 case SRB_FXIOCB_BCMD:
2637 qlafx00_fxdisc_iocb(sp, pkt);
2640 qlafx00_abort_iocb(sp, pkt);
2647 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2654 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2655 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2657 uint16_t avail_dsds;
2659 uint32_t req_data_len = 0;
2660 uint32_t rsp_data_len = 0;
2661 struct scatterlist *sg;
2663 int entry_count = 1;
2664 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2666 /*Update entry type to indicate bidir command */
2667 *((uint32_t *)(&cmd_pkt->entry_type)) =
2668 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2670 /* Set the transfer direction, in this set both flags
2671 * Also set the BD_WRAP_BACK flag, firmware will take care
2672 * assigning DID=SID for outgoing pkts.
2674 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2675 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2676 cmd_pkt->control_flags =
2677 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2680 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2681 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2682 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2683 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2685 vha->bidi_stats.transfer_bytes += req_data_len;
2686 vha->bidi_stats.io_count++;
2688 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2689 * are bundled in continuation iocb
2692 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2696 for_each_sg(bsg_job->request_payload.sg_list, sg,
2697 bsg_job->request_payload.sg_cnt, index) {
2699 cont_a64_entry_t *cont_pkt;
2701 /* Allocate additional continuation packets */
2702 if (avail_dsds == 0) {
2703 /* Continuation type 1 IOCB can accomodate
2706 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2707 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2711 sle_dma = sg_dma_address(sg);
2712 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2713 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2714 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2717 /* For read request DSD will always goes to continuation IOCB
2718 * and follow the write DSD. If there is room on the current IOCB
2719 * then it is added to that IOCB else new continuation IOCB is
2722 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2723 bsg_job->reply_payload.sg_cnt, index) {
2725 cont_a64_entry_t *cont_pkt;
2727 /* Allocate additional continuation packets */
2728 if (avail_dsds == 0) {
2729 /* Continuation type 1 IOCB can accomodate
2732 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2733 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2737 sle_dma = sg_dma_address(sg);
2738 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2739 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2740 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2743 /* This value should be same as number of IOCB required for this cmd */
2744 cmd_pkt->entry_count = entry_count;
2748 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2751 struct qla_hw_data *ha = vha->hw;
2752 unsigned long flags;
2758 struct cmd_bidir *cmd_pkt = NULL;
2759 struct rsp_que *rsp;
2760 struct req_que *req;
2761 int rval = EXT_STATUS_OK;
2765 rsp = ha->rsp_q_map[0];
2768 /* Send marker if required */
2769 if (vha->marker_needed != 0) {
2770 if (qla2x00_marker(vha, req,
2771 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2772 return EXT_STATUS_MAILBOX;
2773 vha->marker_needed = 0;
2776 /* Acquire ring specific lock */
2777 spin_lock_irqsave(&ha->hardware_lock, flags);
2779 /* Check for room in outstanding command list. */
2780 handle = req->current_outstanding_cmd;
2781 for (index = 1; index < req->num_outstanding_cmds; index++) {
2783 if (handle == req->num_outstanding_cmds)
2785 if (!req->outstanding_cmds[handle])
2789 if (index == req->num_outstanding_cmds) {
2790 rval = EXT_STATUS_BUSY;
2794 /* Calculate number of IOCB required */
2795 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2797 /* Check for room on request queue. */
2798 if (req->cnt < req_cnt + 2) {
2799 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2801 if (req->ring_index < cnt)
2802 req->cnt = cnt - req->ring_index;
2804 req->cnt = req->length -
2805 (req->ring_index - cnt);
2807 if (req->cnt < req_cnt + 2) {
2808 rval = EXT_STATUS_BUSY;
2812 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2813 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2815 /* Zero out remaining portion of packet. */
2816 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2817 clr_ptr = (uint32_t *)cmd_pkt + 2;
2818 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2820 /* Set NPORT-ID (of vha)*/
2821 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2822 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2823 cmd_pkt->port_id[1] = vha->d_id.b.area;
2824 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2826 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2827 cmd_pkt->entry_status = (uint8_t) rsp->id;
2828 /* Build command packet. */
2829 req->current_outstanding_cmd = handle;
2830 req->outstanding_cmds[handle] = sp;
2831 sp->handle = handle;
2832 req->cnt -= req_cnt;
2834 /* Send the command to the firmware */
2836 qla2x00_start_iocbs(vha, req);
2838 spin_unlock_irqrestore(&ha->hardware_lock, flags);