]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-imx.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38         }
39         return (cflags);
40 }
41
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment decriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53         uint16_t iocbs;
54
55         iocbs = 1;
56         if (dsds > 3) {
57                 iocbs += (dsds - 3) / 7;
58                 if ((dsds - 3) % 7)
59                         iocbs++;
60         }
61         return (iocbs);
62 }
63
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment decriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75         uint16_t iocbs;
76
77         iocbs = 1;
78         if (dsds > 2) {
79                 iocbs += (dsds - 2) / 5;
80                 if ((dsds - 2) % 5)
81                         iocbs++;
82         }
83         return (iocbs);
84 }
85
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @ha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95         cont_entry_t *cont_pkt;
96         struct req_que *req = vha->req;
97         /* Adjust ring index. */
98         req->ring_index++;
99         if (req->ring_index == req->length) {
100                 req->ring_index = 0;
101                 req->ring_ptr = req->ring;
102         } else {
103                 req->ring_ptr++;
104         }
105
106         cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108         /* Load packet defaults. */
109         *((uint32_t *)(&cont_pkt->entry_type)) =
110             __constant_cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 static inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             __constant_cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->fcport->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) =
263             __constant_cpu_to_le32(COMMAND_A64_TYPE);
264
265         /* No data transfer */
266         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
268                 return;
269         }
270
271         vha = sp->fcport->vha;
272         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274         /* Two DSDs are available in the Command Type 3 IOCB */
275         avail_dsds = 2;
276         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278         /* Load data segments */
279         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280                 dma_addr_t      sle_dma;
281                 cont_a64_entry_t *cont_pkt;
282
283                 /* Allocate additional continuation packets? */
284                 if (avail_dsds == 0) {
285                         /*
286                          * Five DSDs are available in the Continuation
287                          * Type 1 IOCB.
288                          */
289                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291                         avail_dsds = 5;
292                 }
293
294                 sle_dma = sg_dma_address(sg);
295                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298                 avail_dsds--;
299         }
300 }
301
302 /**
303  * qla2x00_start_scsi() - Send a SCSI command to the ISP
304  * @sp: command to send to the ISP
305  *
306  * Returns non-zero if a failure occurred, else zero.
307  */
308 int
309 qla2x00_start_scsi(srb_t *sp)
310 {
311         int             ret, nseg;
312         unsigned long   flags;
313         scsi_qla_host_t *vha;
314         struct scsi_cmnd *cmd;
315         uint32_t        *clr_ptr;
316         uint32_t        index;
317         uint32_t        handle;
318         cmd_entry_t     *cmd_pkt;
319         uint16_t        cnt;
320         uint16_t        req_cnt;
321         uint16_t        tot_dsds;
322         struct device_reg_2xxx __iomem *reg;
323         struct qla_hw_data *ha;
324         struct req_que *req;
325         struct rsp_que *rsp;
326         char            tag[2];
327
328         /* Setup device pointers. */
329         ret = 0;
330         vha = sp->fcport->vha;
331         ha = vha->hw;
332         reg = &ha->iobase->isp;
333         cmd = GET_CMD_SP(sp);
334         req = ha->req_q_map[0];
335         rsp = ha->rsp_q_map[0];
336         /* So we know we haven't pci_map'ed anything yet */
337         tot_dsds = 0;
338
339         /* Send marker if required */
340         if (vha->marker_needed != 0) {
341                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
342                     QLA_SUCCESS) {
343                         return (QLA_FUNCTION_FAILED);
344                 }
345                 vha->marker_needed = 0;
346         }
347
348         /* Acquire ring specific lock */
349         spin_lock_irqsave(&ha->hardware_lock, flags);
350
351         /* Check for room in outstanding command list. */
352         handle = req->current_outstanding_cmd;
353         for (index = 1; index < req->num_outstanding_cmds; index++) {
354                 handle++;
355                 if (handle == req->num_outstanding_cmds)
356                         handle = 1;
357                 if (!req->outstanding_cmds[handle])
358                         break;
359         }
360         if (index == req->num_outstanding_cmds)
361                 goto queuing_error;
362
363         /* Map the sg table so we have an accurate count of sg entries needed */
364         if (scsi_sg_count(cmd)) {
365                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366                     scsi_sg_count(cmd), cmd->sc_data_direction);
367                 if (unlikely(!nseg))
368                         goto queuing_error;
369         } else
370                 nseg = 0;
371
372         tot_dsds = nseg;
373
374         /* Calculate the number of request entries needed. */
375         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376         if (req->cnt < (req_cnt + 2)) {
377                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378                 if (req->ring_index < cnt)
379                         req->cnt = cnt - req->ring_index;
380                 else
381                         req->cnt = req->length -
382                             (req->ring_index - cnt);
383                 /* If still no head room then bail out */
384                 if (req->cnt < (req_cnt + 2))
385                         goto queuing_error;
386         }
387
388         /* Build command packet */
389         req->current_outstanding_cmd = handle;
390         req->outstanding_cmds[handle] = sp;
391         sp->handle = handle;
392         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393         req->cnt -= req_cnt;
394
395         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396         cmd_pkt->handle = handle;
397         /* Zero out remaining portion of packet. */
398         clr_ptr = (uint32_t *)cmd_pkt + 2;
399         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
401
402         /* Set target ID and LUN number*/
403         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
405
406         /* Update tagged queuing modifier */
407         if (scsi_populate_tag_msg(cmd, tag)) {
408                 switch (tag[0]) {
409                 case HEAD_OF_QUEUE_TAG:
410                         cmd_pkt->control_flags =
411                             __constant_cpu_to_le16(CF_HEAD_TAG);
412                         break;
413                 case ORDERED_QUEUE_TAG:
414                         cmd_pkt->control_flags =
415                             __constant_cpu_to_le16(CF_ORDERED_TAG);
416                         break;
417                 default:
418                         cmd_pkt->control_flags =
419                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
420                         break;
421                 }
422         } else {
423                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
424         }
425
426         /* Load SCSI command packet. */
427         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
428         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
429
430         /* Build IOCB segments */
431         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
432
433         /* Set total data segment count. */
434         cmd_pkt->entry_count = (uint8_t)req_cnt;
435         wmb();
436
437         /* Adjust ring index. */
438         req->ring_index++;
439         if (req->ring_index == req->length) {
440                 req->ring_index = 0;
441                 req->ring_ptr = req->ring;
442         } else
443                 req->ring_ptr++;
444
445         sp->flags |= SRB_DMA_VALID;
446
447         /* Set chip new ring index. */
448         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
449         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
450
451         /* Manage unprocessed RIO/ZIO commands in response queue. */
452         if (vha->flags.process_response_queue &&
453             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454                 qla2x00_process_response_queue(rsp);
455
456         spin_unlock_irqrestore(&ha->hardware_lock, flags);
457         return (QLA_SUCCESS);
458
459 queuing_error:
460         if (tot_dsds)
461                 scsi_dma_unmap(cmd);
462
463         spin_unlock_irqrestore(&ha->hardware_lock, flags);
464
465         return (QLA_FUNCTION_FAILED);
466 }
467
468 /**
469  * qla2x00_start_iocbs() - Execute the IOCB command
470  */
471 void
472 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473 {
474         struct qla_hw_data *ha = vha->hw;
475         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476
477         if (IS_QLA82XX(ha)) {
478                 qla82xx_start_iocbs(vha);
479         } else {
480                 /* Adjust ring index. */
481                 req->ring_index++;
482                 if (req->ring_index == req->length) {
483                         req->ring_index = 0;
484                         req->ring_ptr = req->ring;
485                 } else
486                         req->ring_ptr++;
487
488                 /* Set chip new ring index. */
489                 if (ha->mqenable || IS_QLA83XX(ha)) {
490                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
491                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
492                 } else if (IS_QLAFX00(ha)) {
493                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
494                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
495                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
496                 } else if (IS_FWI2_CAPABLE(ha)) {
497                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
498                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
499                 } else {
500                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
501                                 req->ring_index);
502                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
503                 }
504         }
505 }
506
507 /**
508  * qla2x00_marker() - Send a marker IOCB to the firmware.
509  * @ha: HA context
510  * @loop_id: loop ID
511  * @lun: LUN
512  * @type: marker modifier
513  *
514  * Can be called from both normal and interrupt context.
515  *
516  * Returns non-zero if a failure occurred, else zero.
517  */
518 static int
519 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
520                         struct rsp_que *rsp, uint16_t loop_id,
521                         uint16_t lun, uint8_t type)
522 {
523         mrk_entry_t *mrk;
524         struct mrk_entry_24xx *mrk24 = NULL;
525         struct mrk_entry_fx00 *mrkfx = NULL;
526
527         struct qla_hw_data *ha = vha->hw;
528         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
529
530         req = ha->req_q_map[0];
531         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
532         if (mrk == NULL) {
533                 ql_log(ql_log_warn, base_vha, 0x3026,
534                     "Failed to allocate Marker IOCB.\n");
535
536                 return (QLA_FUNCTION_FAILED);
537         }
538
539         mrk->entry_type = MARKER_TYPE;
540         mrk->modifier = type;
541         if (type != MK_SYNC_ALL) {
542                 if (IS_QLAFX00(ha)) {
543                         mrkfx = (struct mrk_entry_fx00 *) mrk;
544                         mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
545                         mrkfx->handle_hi = 0;
546                         mrkfx->tgt_id = cpu_to_le16(loop_id);
547                         mrkfx->lun[1] = LSB(lun);
548                         mrkfx->lun[2] = MSB(lun);
549                         host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
550                 } else if (IS_FWI2_CAPABLE(ha)) {
551                         mrk24 = (struct mrk_entry_24xx *) mrk;
552                         mrk24->nport_handle = cpu_to_le16(loop_id);
553                         mrk24->lun[1] = LSB(lun);
554                         mrk24->lun[2] = MSB(lun);
555                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
556                         mrk24->vp_index = vha->vp_idx;
557                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
558                 } else {
559                         SET_TARGET_ID(ha, mrk->target, loop_id);
560                         mrk->lun = cpu_to_le16(lun);
561                 }
562         }
563         wmb();
564
565         qla2x00_start_iocbs(vha, req);
566
567         return (QLA_SUCCESS);
568 }
569
570 int
571 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
572                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
573                 uint8_t type)
574 {
575         int ret;
576         unsigned long flags = 0;
577
578         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
579         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
580         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
581
582         return (ret);
583 }
584
585 /*
586  * qla2x00_issue_marker
587  *
588  * Issue marker
589  * Caller CAN have hardware lock held as specified by ha_locked parameter.
590  * Might release it, then reaquire.
591  */
592 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
593 {
594         if (ha_locked) {
595                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
596                                         MK_SYNC_ALL) != QLA_SUCCESS)
597                         return QLA_FUNCTION_FAILED;
598         } else {
599                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
600                                         MK_SYNC_ALL) != QLA_SUCCESS)
601                         return QLA_FUNCTION_FAILED;
602         }
603         vha->marker_needed = 0;
604
605         return QLA_SUCCESS;
606 }
607
608 static inline int
609 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
610         uint16_t tot_dsds)
611 {
612         uint32_t *cur_dsd = NULL;
613         scsi_qla_host_t *vha;
614         struct qla_hw_data *ha;
615         struct scsi_cmnd *cmd;
616         struct  scatterlist *cur_seg;
617         uint32_t *dsd_seg;
618         void *next_dsd;
619         uint8_t avail_dsds;
620         uint8_t first_iocb = 1;
621         uint32_t dsd_list_len;
622         struct dsd_dma *dsd_ptr;
623         struct ct6_dsd *ctx;
624
625         cmd = GET_CMD_SP(sp);
626
627         /* Update entry type to indicate Command Type 3 IOCB */
628         *((uint32_t *)(&cmd_pkt->entry_type)) =
629                 __constant_cpu_to_le32(COMMAND_TYPE_6);
630
631         /* No data transfer */
632         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
633                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
634                 return 0;
635         }
636
637         vha = sp->fcport->vha;
638         ha = vha->hw;
639
640         /* Set transfer direction */
641         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
642                 cmd_pkt->control_flags =
643                     __constant_cpu_to_le16(CF_WRITE_DATA);
644                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
645         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
646                 cmd_pkt->control_flags =
647                     __constant_cpu_to_le16(CF_READ_DATA);
648                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
649         }
650
651         cur_seg = scsi_sglist(cmd);
652         ctx = GET_CMD_CTX_SP(sp);
653
654         while (tot_dsds) {
655                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
656                     QLA_DSDS_PER_IOCB : tot_dsds;
657                 tot_dsds -= avail_dsds;
658                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
659
660                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
661                     struct dsd_dma, list);
662                 next_dsd = dsd_ptr->dsd_addr;
663                 list_del(&dsd_ptr->list);
664                 ha->gbl_dsd_avail--;
665                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
666                 ctx->dsd_use_cnt++;
667                 ha->gbl_dsd_inuse++;
668
669                 if (first_iocb) {
670                         first_iocb = 0;
671                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
672                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
673                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
674                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
675                 } else {
676                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
677                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
678                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
679                 }
680                 cur_dsd = (uint32_t *)next_dsd;
681                 while (avail_dsds) {
682                         dma_addr_t      sle_dma;
683
684                         sle_dma = sg_dma_address(cur_seg);
685                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
686                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
687                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
688                         cur_seg = sg_next(cur_seg);
689                         avail_dsds--;
690                 }
691         }
692
693         /* Null termination */
694         *cur_dsd++ =  0;
695         *cur_dsd++ = 0;
696         *cur_dsd++ = 0;
697         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
698         return 0;
699 }
700
701 /*
702  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
703  * for Command Type 6.
704  *
705  * @dsds: number of data segment decriptors needed
706  *
707  * Returns the number of dsd list needed to store @dsds.
708  */
709 inline uint16_t
710 qla24xx_calc_dsd_lists(uint16_t dsds)
711 {
712         uint16_t dsd_lists = 0;
713
714         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
715         if (dsds % QLA_DSDS_PER_IOCB)
716                 dsd_lists++;
717         return dsd_lists;
718 }
719
720
721 /**
722  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
723  * IOCB types.
724  *
725  * @sp: SRB command to process
726  * @cmd_pkt: Command type 3 IOCB
727  * @tot_dsds: Total number of segments to transfer
728  */
729 inline void
730 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
731     uint16_t tot_dsds)
732 {
733         uint16_t        avail_dsds;
734         uint32_t        *cur_dsd;
735         scsi_qla_host_t *vha;
736         struct scsi_cmnd *cmd;
737         struct scatterlist *sg;
738         int i;
739         struct req_que *req;
740
741         cmd = GET_CMD_SP(sp);
742
743         /* Update entry type to indicate Command Type 3 IOCB */
744         *((uint32_t *)(&cmd_pkt->entry_type)) =
745             __constant_cpu_to_le32(COMMAND_TYPE_7);
746
747         /* No data transfer */
748         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
749                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
750                 return;
751         }
752
753         vha = sp->fcport->vha;
754         req = vha->req;
755
756         /* Set transfer direction */
757         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
758                 cmd_pkt->task_mgmt_flags =
759                     __constant_cpu_to_le16(TMF_WRITE_DATA);
760                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
761         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
762                 cmd_pkt->task_mgmt_flags =
763                     __constant_cpu_to_le16(TMF_READ_DATA);
764                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
765         }
766
767         /* One DSD is available in the Command Type 3 IOCB */
768         avail_dsds = 1;
769         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
770
771         /* Load data segments */
772
773         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
774                 dma_addr_t      sle_dma;
775                 cont_a64_entry_t *cont_pkt;
776
777                 /* Allocate additional continuation packets? */
778                 if (avail_dsds == 0) {
779                         /*
780                          * Five DSDs are available in the Continuation
781                          * Type 1 IOCB.
782                          */
783                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
784                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
785                         avail_dsds = 5;
786                 }
787
788                 sle_dma = sg_dma_address(sg);
789                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
790                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
791                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
792                 avail_dsds--;
793         }
794 }
795
796 struct fw_dif_context {
797         uint32_t ref_tag;
798         uint16_t app_tag;
799         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
800         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
801 };
802
803 /*
804  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
805  *
806  */
807 static inline void
808 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
809     unsigned int protcnt)
810 {
811         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
812
813         switch (scsi_get_prot_type(cmd)) {
814         case SCSI_PROT_DIF_TYPE0:
815                 /*
816                  * No check for ql2xenablehba_err_chk, as it would be an
817                  * I/O error if hba tag generation is not done.
818                  */
819                 pkt->ref_tag = cpu_to_le32((uint32_t)
820                     (0xffffffff & scsi_get_lba(cmd)));
821
822                 if (!qla2x00_hba_err_chk_enabled(sp))
823                         break;
824
825                 pkt->ref_tag_mask[0] = 0xff;
826                 pkt->ref_tag_mask[1] = 0xff;
827                 pkt->ref_tag_mask[2] = 0xff;
828                 pkt->ref_tag_mask[3] = 0xff;
829                 break;
830
831         /*
832          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
833          * match LBA in CDB + N
834          */
835         case SCSI_PROT_DIF_TYPE2:
836                 pkt->app_tag = __constant_cpu_to_le16(0);
837                 pkt->app_tag_mask[0] = 0x0;
838                 pkt->app_tag_mask[1] = 0x0;
839
840                 pkt->ref_tag = cpu_to_le32((uint32_t)
841                     (0xffffffff & scsi_get_lba(cmd)));
842
843                 if (!qla2x00_hba_err_chk_enabled(sp))
844                         break;
845
846                 /* enable ALL bytes of the ref tag */
847                 pkt->ref_tag_mask[0] = 0xff;
848                 pkt->ref_tag_mask[1] = 0xff;
849                 pkt->ref_tag_mask[2] = 0xff;
850                 pkt->ref_tag_mask[3] = 0xff;
851                 break;
852
853         /* For Type 3 protection: 16 bit GUARD only */
854         case SCSI_PROT_DIF_TYPE3:
855                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
856                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
857                                                                 0x00;
858                 break;
859
860         /*
861          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
862          * 16 bit app tag.
863          */
864         case SCSI_PROT_DIF_TYPE1:
865                 pkt->ref_tag = cpu_to_le32((uint32_t)
866                     (0xffffffff & scsi_get_lba(cmd)));
867                 pkt->app_tag = __constant_cpu_to_le16(0);
868                 pkt->app_tag_mask[0] = 0x0;
869                 pkt->app_tag_mask[1] = 0x0;
870
871                 if (!qla2x00_hba_err_chk_enabled(sp))
872                         break;
873
874                 /* enable ALL bytes of the ref tag */
875                 pkt->ref_tag_mask[0] = 0xff;
876                 pkt->ref_tag_mask[1] = 0xff;
877                 pkt->ref_tag_mask[2] = 0xff;
878                 pkt->ref_tag_mask[3] = 0xff;
879                 break;
880         }
881 }
882
883 struct qla2_sgx {
884         dma_addr_t              dma_addr;       /* OUT */
885         uint32_t                dma_len;        /* OUT */
886
887         uint32_t                tot_bytes;      /* IN */
888         struct scatterlist      *cur_sg;        /* IN */
889
890         /* for book keeping, bzero on initial invocation */
891         uint32_t                bytes_consumed;
892         uint32_t                num_bytes;
893         uint32_t                tot_partial;
894
895         /* for debugging */
896         uint32_t                num_sg;
897         srb_t                   *sp;
898 };
899
900 static int
901 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
902         uint32_t *partial)
903 {
904         struct scatterlist *sg;
905         uint32_t cumulative_partial, sg_len;
906         dma_addr_t sg_dma_addr;
907
908         if (sgx->num_bytes == sgx->tot_bytes)
909                 return 0;
910
911         sg = sgx->cur_sg;
912         cumulative_partial = sgx->tot_partial;
913
914         sg_dma_addr = sg_dma_address(sg);
915         sg_len = sg_dma_len(sg);
916
917         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
918
919         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
920                 sgx->dma_len = (blk_sz - cumulative_partial);
921                 sgx->tot_partial = 0;
922                 sgx->num_bytes += blk_sz;
923                 *partial = 0;
924         } else {
925                 sgx->dma_len = sg_len - sgx->bytes_consumed;
926                 sgx->tot_partial += sgx->dma_len;
927                 *partial = 1;
928         }
929
930         sgx->bytes_consumed += sgx->dma_len;
931
932         if (sg_len == sgx->bytes_consumed) {
933                 sg = sg_next(sg);
934                 sgx->num_sg++;
935                 sgx->cur_sg = sg;
936                 sgx->bytes_consumed = 0;
937         }
938
939         return 1;
940 }
941
942 static int
943 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
944         uint32_t *dsd, uint16_t tot_dsds)
945 {
946         void *next_dsd;
947         uint8_t avail_dsds = 0;
948         uint32_t dsd_list_len;
949         struct dsd_dma *dsd_ptr;
950         struct scatterlist *sg_prot;
951         uint32_t *cur_dsd = dsd;
952         uint16_t        used_dsds = tot_dsds;
953
954         uint32_t        prot_int;
955         uint32_t        partial;
956         struct qla2_sgx sgx;
957         dma_addr_t      sle_dma;
958         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
959         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
960
961         prot_int = cmd->device->sector_size;
962
963         memset(&sgx, 0, sizeof(struct qla2_sgx));
964         sgx.tot_bytes = scsi_bufflen(cmd);
965         sgx.cur_sg = scsi_sglist(cmd);
966         sgx.sp = sp;
967
968         sg_prot = scsi_prot_sglist(cmd);
969
970         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
971
972                 sle_dma = sgx.dma_addr;
973                 sle_dma_len = sgx.dma_len;
974 alloc_and_fill:
975                 /* Allocate additional continuation packets? */
976                 if (avail_dsds == 0) {
977                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
978                                         QLA_DSDS_PER_IOCB : used_dsds;
979                         dsd_list_len = (avail_dsds + 1) * 12;
980                         used_dsds -= avail_dsds;
981
982                         /* allocate tracking DS */
983                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
984                         if (!dsd_ptr)
985                                 return 1;
986
987                         /* allocate new list */
988                         dsd_ptr->dsd_addr = next_dsd =
989                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
990                                 &dsd_ptr->dsd_list_dma);
991
992                         if (!next_dsd) {
993                                 /*
994                                  * Need to cleanup only this dsd_ptr, rest
995                                  * will be done by sp_free_dma()
996                                  */
997                                 kfree(dsd_ptr);
998                                 return 1;
999                         }
1000
1001                         list_add_tail(&dsd_ptr->list,
1002                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1003
1004                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1005
1006                         /* add new list to cmd iocb or last list */
1007                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1008                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1009                         *cur_dsd++ = dsd_list_len;
1010                         cur_dsd = (uint32_t *)next_dsd;
1011                 }
1012                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1013                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1014                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1015                 avail_dsds--;
1016
1017                 if (partial == 0) {
1018                         /* Got a full protection interval */
1019                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1020                         sle_dma_len = 8;
1021
1022                         tot_prot_dma_len += sle_dma_len;
1023                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1024                                 tot_prot_dma_len = 0;
1025                                 sg_prot = sg_next(sg_prot);
1026                         }
1027
1028                         partial = 1; /* So as to not re-enter this block */
1029                         goto alloc_and_fill;
1030                 }
1031         }
1032         /* Null termination */
1033         *cur_dsd++ = 0;
1034         *cur_dsd++ = 0;
1035         *cur_dsd++ = 0;
1036         return 0;
1037 }
1038
1039 static int
1040 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1041         uint16_t tot_dsds)
1042 {
1043         void *next_dsd;
1044         uint8_t avail_dsds = 0;
1045         uint32_t dsd_list_len;
1046         struct dsd_dma *dsd_ptr;
1047         struct scatterlist *sg;
1048         uint32_t *cur_dsd = dsd;
1049         int     i;
1050         uint16_t        used_dsds = tot_dsds;
1051         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1052
1053         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1054                 dma_addr_t      sle_dma;
1055
1056                 /* Allocate additional continuation packets? */
1057                 if (avail_dsds == 0) {
1058                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1059                                         QLA_DSDS_PER_IOCB : used_dsds;
1060                         dsd_list_len = (avail_dsds + 1) * 12;
1061                         used_dsds -= avail_dsds;
1062
1063                         /* allocate tracking DS */
1064                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1065                         if (!dsd_ptr)
1066                                 return 1;
1067
1068                         /* allocate new list */
1069                         dsd_ptr->dsd_addr = next_dsd =
1070                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1071                                 &dsd_ptr->dsd_list_dma);
1072
1073                         if (!next_dsd) {
1074                                 /*
1075                                  * Need to cleanup only this dsd_ptr, rest
1076                                  * will be done by sp_free_dma()
1077                                  */
1078                                 kfree(dsd_ptr);
1079                                 return 1;
1080                         }
1081
1082                         list_add_tail(&dsd_ptr->list,
1083                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1084
1085                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1086
1087                         /* add new list to cmd iocb or last list */
1088                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1089                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1090                         *cur_dsd++ = dsd_list_len;
1091                         cur_dsd = (uint32_t *)next_dsd;
1092                 }
1093                 sle_dma = sg_dma_address(sg);
1094
1095                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098                 avail_dsds--;
1099
1100         }
1101         /* Null termination */
1102         *cur_dsd++ = 0;
1103         *cur_dsd++ = 0;
1104         *cur_dsd++ = 0;
1105         return 0;
1106 }
1107
1108 static int
1109 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1110                                                         uint32_t *dsd,
1111         uint16_t tot_dsds)
1112 {
1113         void *next_dsd;
1114         uint8_t avail_dsds = 0;
1115         uint32_t dsd_list_len;
1116         struct dsd_dma *dsd_ptr;
1117         struct scatterlist *sg;
1118         int     i;
1119         struct scsi_cmnd *cmd;
1120         uint32_t *cur_dsd = dsd;
1121         uint16_t        used_dsds = tot_dsds;
1122
1123         cmd = GET_CMD_SP(sp);
1124         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1125                 dma_addr_t      sle_dma;
1126
1127                 /* Allocate additional continuation packets? */
1128                 if (avail_dsds == 0) {
1129                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                 QLA_DSDS_PER_IOCB : used_dsds;
1131                         dsd_list_len = (avail_dsds + 1) * 12;
1132                         used_dsds -= avail_dsds;
1133
1134                         /* allocate tracking DS */
1135                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                         if (!dsd_ptr)
1137                                 return 1;
1138
1139                         /* allocate new list */
1140                         dsd_ptr->dsd_addr = next_dsd =
1141                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                 &dsd_ptr->dsd_list_dma);
1143
1144                         if (!next_dsd) {
1145                                 /*
1146                                  * Need to cleanup only this dsd_ptr, rest
1147                                  * will be done by sp_free_dma()
1148                                  */
1149                                 kfree(dsd_ptr);
1150                                 return 1;
1151                         }
1152
1153                         list_add_tail(&dsd_ptr->list,
1154                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1155
1156                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1157
1158                         /* add new list to cmd iocb or last list */
1159                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1160                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1161                         *cur_dsd++ = dsd_list_len;
1162                         cur_dsd = (uint32_t *)next_dsd;
1163                 }
1164                 sle_dma = sg_dma_address(sg);
1165
1166                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1167                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1168                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1169
1170                 avail_dsds--;
1171         }
1172         /* Null termination */
1173         *cur_dsd++ = 0;
1174         *cur_dsd++ = 0;
1175         *cur_dsd++ = 0;
1176         return 0;
1177 }
1178
1179 /**
1180  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1181  *                                                      Type 6 IOCB types.
1182  *
1183  * @sp: SRB command to process
1184  * @cmd_pkt: Command type 3 IOCB
1185  * @tot_dsds: Total number of segments to transfer
1186  */
1187 static inline int
1188 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1189     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1190 {
1191         uint32_t                *cur_dsd, *fcp_dl;
1192         scsi_qla_host_t         *vha;
1193         struct scsi_cmnd        *cmd;
1194         int                     sgc;
1195         uint32_t                total_bytes = 0;
1196         uint32_t                data_bytes;
1197         uint32_t                dif_bytes;
1198         uint8_t                 bundling = 1;
1199         uint16_t                blk_size;
1200         uint8_t                 *clr_ptr;
1201         struct crc_context      *crc_ctx_pkt = NULL;
1202         struct qla_hw_data      *ha;
1203         uint8_t                 additional_fcpcdb_len;
1204         uint16_t                fcp_cmnd_len;
1205         struct fcp_cmnd         *fcp_cmnd;
1206         dma_addr_t              crc_ctx_dma;
1207         char                    tag[2];
1208
1209         cmd = GET_CMD_SP(sp);
1210
1211         sgc = 0;
1212         /* Update entry type to indicate Command Type CRC_2 IOCB */
1213         *((uint32_t *)(&cmd_pkt->entry_type)) =
1214             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1215
1216         vha = sp->fcport->vha;
1217         ha = vha->hw;
1218
1219         /* No data transfer */
1220         data_bytes = scsi_bufflen(cmd);
1221         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1222                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1223                 return QLA_SUCCESS;
1224         }
1225
1226         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1227
1228         /* Set transfer direction */
1229         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1230                 cmd_pkt->control_flags =
1231                     __constant_cpu_to_le16(CF_WRITE_DATA);
1232         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1233                 cmd_pkt->control_flags =
1234                     __constant_cpu_to_le16(CF_READ_DATA);
1235         }
1236
1237         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1238             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1239             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1240             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1241                 bundling = 0;
1242
1243         /* Allocate CRC context from global pool */
1244         crc_ctx_pkt = sp->u.scmd.ctx =
1245             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1246
1247         if (!crc_ctx_pkt)
1248                 goto crc_queuing_error;
1249
1250         /* Zero out CTX area. */
1251         clr_ptr = (uint8_t *)crc_ctx_pkt;
1252         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1253
1254         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1255
1256         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1257
1258         /* Set handle */
1259         crc_ctx_pkt->handle = cmd_pkt->handle;
1260
1261         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1262
1263         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1264             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1265
1266         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1267         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1268         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1269
1270         /* Determine SCSI command length -- align to 4 byte boundary */
1271         if (cmd->cmd_len > 16) {
1272                 additional_fcpcdb_len = cmd->cmd_len - 16;
1273                 if ((cmd->cmd_len % 4) != 0) {
1274                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1275                         goto crc_queuing_error;
1276                 }
1277                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1278         } else {
1279                 additional_fcpcdb_len = 0;
1280                 fcp_cmnd_len = 12 + 16 + 4;
1281         }
1282
1283         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1284
1285         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1286         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1287                 fcp_cmnd->additional_cdb_len |= 1;
1288         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1289                 fcp_cmnd->additional_cdb_len |= 2;
1290
1291         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1292         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1293         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1294         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1295             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1296         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1297             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1298         fcp_cmnd->task_management = 0;
1299
1300         /*
1301          * Update tagged queuing modifier if using command tag queuing
1302          */
1303         if (scsi_populate_tag_msg(cmd, tag)) {
1304                 switch (tag[0]) {
1305                 case HEAD_OF_QUEUE_TAG:
1306                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1307                     break;
1308                 case ORDERED_QUEUE_TAG:
1309                     fcp_cmnd->task_attribute = TSK_ORDERED;
1310                     break;
1311                 default:
1312                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1313                     break;
1314                 }
1315         } else {
1316                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1317         }
1318
1319         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1320
1321         /* Compute dif len and adjust data len to incude protection */
1322         dif_bytes = 0;
1323         blk_size = cmd->device->sector_size;
1324         dif_bytes = (data_bytes / blk_size) * 8;
1325
1326         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1327         case SCSI_PROT_READ_INSERT:
1328         case SCSI_PROT_WRITE_STRIP:
1329             total_bytes = data_bytes;
1330             data_bytes += dif_bytes;
1331             break;
1332
1333         case SCSI_PROT_READ_STRIP:
1334         case SCSI_PROT_WRITE_INSERT:
1335         case SCSI_PROT_READ_PASS:
1336         case SCSI_PROT_WRITE_PASS:
1337             total_bytes = data_bytes + dif_bytes;
1338             break;
1339         default:
1340             BUG();
1341         }
1342
1343         if (!qla2x00_hba_err_chk_enabled(sp))
1344                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1345         /* HBA error checking enabled */
1346         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1347                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1348                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1349                         SCSI_PROT_DIF_TYPE2))
1350                         fw_prot_opts |= BIT_10;
1351                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1352                     SCSI_PROT_DIF_TYPE3)
1353                         fw_prot_opts |= BIT_11;
1354         }
1355
1356         if (!bundling) {
1357                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1358         } else {
1359                 /*
1360                  * Configure Bundling if we need to fetch interlaving
1361                  * protection PCI accesses
1362                  */
1363                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1364                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1365                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1366                                                         tot_prot_dsds);
1367                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1368         }
1369
1370         /* Finish the common fields of CRC pkt */
1371         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1372         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1373         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1374         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1375         /* Fibre channel byte count */
1376         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1377         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1378             additional_fcpcdb_len);
1379         *fcp_dl = htonl(total_bytes);
1380
1381         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1382                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1383                 return QLA_SUCCESS;
1384         }
1385         /* Walks data segments */
1386
1387         cmd_pkt->control_flags |=
1388             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1389
1390         if (!bundling && tot_prot_dsds) {
1391                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1392                     cur_dsd, tot_dsds))
1393                         goto crc_queuing_error;
1394         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1395             (tot_dsds - tot_prot_dsds)))
1396                 goto crc_queuing_error;
1397
1398         if (bundling && tot_prot_dsds) {
1399                 /* Walks dif segments */
1400                 cmd_pkt->control_flags |=
1401                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1402                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1403                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1404                     tot_prot_dsds))
1405                         goto crc_queuing_error;
1406         }
1407         return QLA_SUCCESS;
1408
1409 crc_queuing_error:
1410         /* Cleanup will be performed by the caller */
1411
1412         return QLA_FUNCTION_FAILED;
1413 }
1414
1415 /**
1416  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1417  * @sp: command to send to the ISP
1418  *
1419  * Returns non-zero if a failure occurred, else zero.
1420  */
1421 int
1422 qla24xx_start_scsi(srb_t *sp)
1423 {
1424         int             ret, nseg;
1425         unsigned long   flags;
1426         uint32_t        *clr_ptr;
1427         uint32_t        index;
1428         uint32_t        handle;
1429         struct cmd_type_7 *cmd_pkt;
1430         uint16_t        cnt;
1431         uint16_t        req_cnt;
1432         uint16_t        tot_dsds;
1433         struct req_que *req = NULL;
1434         struct rsp_que *rsp = NULL;
1435         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1436         struct scsi_qla_host *vha = sp->fcport->vha;
1437         struct qla_hw_data *ha = vha->hw;
1438         char            tag[2];
1439
1440         /* Setup device pointers. */
1441         ret = 0;
1442
1443         qla25xx_set_que(sp, &rsp);
1444         req = vha->req;
1445
1446         /* So we know we haven't pci_map'ed anything yet */
1447         tot_dsds = 0;
1448
1449         /* Send marker if required */
1450         if (vha->marker_needed != 0) {
1451                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1452                     QLA_SUCCESS)
1453                         return QLA_FUNCTION_FAILED;
1454                 vha->marker_needed = 0;
1455         }
1456
1457         /* Acquire ring specific lock */
1458         spin_lock_irqsave(&ha->hardware_lock, flags);
1459
1460         /* Check for room in outstanding command list. */
1461         handle = req->current_outstanding_cmd;
1462         for (index = 1; index < req->num_outstanding_cmds; index++) {
1463                 handle++;
1464                 if (handle == req->num_outstanding_cmds)
1465                         handle = 1;
1466                 if (!req->outstanding_cmds[handle])
1467                         break;
1468         }
1469         if (index == req->num_outstanding_cmds)
1470                 goto queuing_error;
1471
1472         /* Map the sg table so we have an accurate count of sg entries needed */
1473         if (scsi_sg_count(cmd)) {
1474                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1475                     scsi_sg_count(cmd), cmd->sc_data_direction);
1476                 if (unlikely(!nseg))
1477                         goto queuing_error;
1478         } else
1479                 nseg = 0;
1480
1481         tot_dsds = nseg;
1482         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1483         if (req->cnt < (req_cnt + 2)) {
1484                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1485
1486                 if (req->ring_index < cnt)
1487                         req->cnt = cnt - req->ring_index;
1488                 else
1489                         req->cnt = req->length -
1490                                 (req->ring_index - cnt);
1491                 if (req->cnt < (req_cnt + 2))
1492                         goto queuing_error;
1493         }
1494
1495         /* Build command packet. */
1496         req->current_outstanding_cmd = handle;
1497         req->outstanding_cmds[handle] = sp;
1498         sp->handle = handle;
1499         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1500         req->cnt -= req_cnt;
1501
1502         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1503         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1504
1505         /* Zero out remaining portion of packet. */
1506         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1507         clr_ptr = (uint32_t *)cmd_pkt + 2;
1508         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1509         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1510
1511         /* Set NPORT-ID and LUN number*/
1512         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1513         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1514         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1515         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1516         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1517
1518         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1519         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1520
1521         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1522         if (scsi_populate_tag_msg(cmd, tag)) {
1523                 switch (tag[0]) {
1524                 case HEAD_OF_QUEUE_TAG:
1525                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1526                         break;
1527                 case ORDERED_QUEUE_TAG:
1528                         cmd_pkt->task = TSK_ORDERED;
1529                         break;
1530                 default:
1531                     cmd_pkt->task = TSK_SIMPLE;
1532                     break;
1533                 }
1534         } else {
1535                 cmd_pkt->task = TSK_SIMPLE;
1536         }
1537
1538         /* Load SCSI command packet. */
1539         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1540         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1541
1542         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1543
1544         /* Build IOCB segments */
1545         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1546
1547         /* Set total data segment count. */
1548         cmd_pkt->entry_count = (uint8_t)req_cnt;
1549         /* Specify response queue number where completion should happen */
1550         cmd_pkt->entry_status = (uint8_t) rsp->id;
1551         wmb();
1552         /* Adjust ring index. */
1553         req->ring_index++;
1554         if (req->ring_index == req->length) {
1555                 req->ring_index = 0;
1556                 req->ring_ptr = req->ring;
1557         } else
1558                 req->ring_ptr++;
1559
1560         sp->flags |= SRB_DMA_VALID;
1561
1562         /* Set chip new ring index. */
1563         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1564         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1565
1566         /* Manage unprocessed RIO/ZIO commands in response queue. */
1567         if (vha->flags.process_response_queue &&
1568                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1569                 qla24xx_process_response_queue(vha, rsp);
1570
1571         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1572         return QLA_SUCCESS;
1573
1574 queuing_error:
1575         if (tot_dsds)
1576                 scsi_dma_unmap(cmd);
1577
1578         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1579
1580         return QLA_FUNCTION_FAILED;
1581 }
1582
1583 /**
1584  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1585  * @sp: command to send to the ISP
1586  *
1587  * Returns non-zero if a failure occurred, else zero.
1588  */
1589 int
1590 qla24xx_dif_start_scsi(srb_t *sp)
1591 {
1592         int                     nseg;
1593         unsigned long           flags;
1594         uint32_t                *clr_ptr;
1595         uint32_t                index;
1596         uint32_t                handle;
1597         uint16_t                cnt;
1598         uint16_t                req_cnt = 0;
1599         uint16_t                tot_dsds;
1600         uint16_t                tot_prot_dsds;
1601         uint16_t                fw_prot_opts = 0;
1602         struct req_que          *req = NULL;
1603         struct rsp_que          *rsp = NULL;
1604         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1605         struct scsi_qla_host    *vha = sp->fcport->vha;
1606         struct qla_hw_data      *ha = vha->hw;
1607         struct cmd_type_crc_2   *cmd_pkt;
1608         uint32_t                status = 0;
1609
1610 #define QDSS_GOT_Q_SPACE        BIT_0
1611
1612         /* Only process protection or >16 cdb in this routine */
1613         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1614                 if (cmd->cmd_len <= 16)
1615                         return qla24xx_start_scsi(sp);
1616         }
1617
1618         /* Setup device pointers. */
1619
1620         qla25xx_set_que(sp, &rsp);
1621         req = vha->req;
1622
1623         /* So we know we haven't pci_map'ed anything yet */
1624         tot_dsds = 0;
1625
1626         /* Send marker if required */
1627         if (vha->marker_needed != 0) {
1628                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1629                     QLA_SUCCESS)
1630                         return QLA_FUNCTION_FAILED;
1631                 vha->marker_needed = 0;
1632         }
1633
1634         /* Acquire ring specific lock */
1635         spin_lock_irqsave(&ha->hardware_lock, flags);
1636
1637         /* Check for room in outstanding command list. */
1638         handle = req->current_outstanding_cmd;
1639         for (index = 1; index < req->num_outstanding_cmds; index++) {
1640                 handle++;
1641                 if (handle == req->num_outstanding_cmds)
1642                         handle = 1;
1643                 if (!req->outstanding_cmds[handle])
1644                         break;
1645         }
1646
1647         if (index == req->num_outstanding_cmds)
1648                 goto queuing_error;
1649
1650         /* Compute number of required data segments */
1651         /* Map the sg table so we have an accurate count of sg entries needed */
1652         if (scsi_sg_count(cmd)) {
1653                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1654                     scsi_sg_count(cmd), cmd->sc_data_direction);
1655                 if (unlikely(!nseg))
1656                         goto queuing_error;
1657                 else
1658                         sp->flags |= SRB_DMA_VALID;
1659
1660                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1661                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1662                         struct qla2_sgx sgx;
1663                         uint32_t        partial;
1664
1665                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1666                         sgx.tot_bytes = scsi_bufflen(cmd);
1667                         sgx.cur_sg = scsi_sglist(cmd);
1668                         sgx.sp = sp;
1669
1670                         nseg = 0;
1671                         while (qla24xx_get_one_block_sg(
1672                             cmd->device->sector_size, &sgx, &partial))
1673                                 nseg++;
1674                 }
1675         } else
1676                 nseg = 0;
1677
1678         /* number of required data segments */
1679         tot_dsds = nseg;
1680
1681         /* Compute number of required protection segments */
1682         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1683                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1684                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1685                 if (unlikely(!nseg))
1686                         goto queuing_error;
1687                 else
1688                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1689
1690                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1691                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1692                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1693                 }
1694         } else {
1695                 nseg = 0;
1696         }
1697
1698         req_cnt = 1;
1699         /* Total Data and protection sg segment(s) */
1700         tot_prot_dsds = nseg;
1701         tot_dsds += nseg;
1702         if (req->cnt < (req_cnt + 2)) {
1703                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1704
1705                 if (req->ring_index < cnt)
1706                         req->cnt = cnt - req->ring_index;
1707                 else
1708                         req->cnt = req->length -
1709                                 (req->ring_index - cnt);
1710                 if (req->cnt < (req_cnt + 2))
1711                         goto queuing_error;
1712         }
1713
1714         status |= QDSS_GOT_Q_SPACE;
1715
1716         /* Build header part of command packet (excluding the OPCODE). */
1717         req->current_outstanding_cmd = handle;
1718         req->outstanding_cmds[handle] = sp;
1719         sp->handle = handle;
1720         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1721         req->cnt -= req_cnt;
1722
1723         /* Fill-in common area */
1724         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1725         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1726
1727         clr_ptr = (uint32_t *)cmd_pkt + 2;
1728         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1729
1730         /* Set NPORT-ID and LUN number*/
1731         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1732         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1733         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1734         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1735
1736         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1737         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1738
1739         /* Total Data and protection segment(s) */
1740         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1741
1742         /* Build IOCB segments and adjust for data protection segments */
1743         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1744             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1745                 QLA_SUCCESS)
1746                 goto queuing_error;
1747
1748         cmd_pkt->entry_count = (uint8_t)req_cnt;
1749         /* Specify response queue number where completion should happen */
1750         cmd_pkt->entry_status = (uint8_t) rsp->id;
1751         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1752         wmb();
1753
1754         /* Adjust ring index. */
1755         req->ring_index++;
1756         if (req->ring_index == req->length) {
1757                 req->ring_index = 0;
1758                 req->ring_ptr = req->ring;
1759         } else
1760                 req->ring_ptr++;
1761
1762         /* Set chip new ring index. */
1763         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1764         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1765
1766         /* Manage unprocessed RIO/ZIO commands in response queue. */
1767         if (vha->flags.process_response_queue &&
1768             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1769                 qla24xx_process_response_queue(vha, rsp);
1770
1771         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1772
1773         return QLA_SUCCESS;
1774
1775 queuing_error:
1776         if (status & QDSS_GOT_Q_SPACE) {
1777                 req->outstanding_cmds[handle] = NULL;
1778                 req->cnt += req_cnt;
1779         }
1780         /* Cleanup will be performed by the caller (queuecommand) */
1781
1782         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783         return QLA_FUNCTION_FAILED;
1784 }
1785
1786
1787 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1788 {
1789         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1790         struct qla_hw_data *ha = sp->fcport->vha->hw;
1791         int affinity = cmd->request->cpu;
1792
1793         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1794                 affinity < ha->max_rsp_queues - 1)
1795                 *rsp = ha->rsp_q_map[affinity + 1];
1796          else
1797                 *rsp = ha->rsp_q_map[0];
1798 }
1799
1800 /* Generic Control-SRB manipulation functions. */
1801 void *
1802 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1803 {
1804         struct qla_hw_data *ha = vha->hw;
1805         struct req_que *req = ha->req_q_map[0];
1806         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1807         uint32_t index, handle;
1808         request_t *pkt;
1809         uint16_t cnt, req_cnt;
1810
1811         pkt = NULL;
1812         req_cnt = 1;
1813         handle = 0;
1814
1815         if (!sp)
1816                 goto skip_cmd_array;
1817
1818         /* Check for room in outstanding command list. */
1819         handle = req->current_outstanding_cmd;
1820         for (index = 1; req->num_outstanding_cmds; index++) {
1821                 handle++;
1822                 if (handle == req->num_outstanding_cmds)
1823                         handle = 1;
1824                 if (!req->outstanding_cmds[handle])
1825                         break;
1826         }
1827         if (index == req->num_outstanding_cmds) {
1828                 ql_log(ql_log_warn, vha, 0x700b,
1829                     "No room on outstanding cmd array.\n");
1830                 goto queuing_error;
1831         }
1832
1833         /* Prep command array. */
1834         req->current_outstanding_cmd = handle;
1835         req->outstanding_cmds[handle] = sp;
1836         sp->handle = handle;
1837
1838         /* Adjust entry-counts as needed. */
1839         if (sp->type != SRB_SCSI_CMD)
1840                 req_cnt = sp->iocbs;
1841
1842 skip_cmd_array:
1843         /* Check for room on request queue. */
1844         if (req->cnt < req_cnt) {
1845                 if (ha->mqenable || IS_QLA83XX(ha))
1846                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1847                 else if (IS_QLA82XX(ha))
1848                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1849                 else if (IS_FWI2_CAPABLE(ha))
1850                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1851                 else if (IS_QLAFX00(ha))
1852                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1853                 else
1854                         cnt = qla2x00_debounce_register(
1855                             ISP_REQ_Q_OUT(ha, &reg->isp));
1856
1857                 if  (req->ring_index < cnt)
1858                         req->cnt = cnt - req->ring_index;
1859                 else
1860                         req->cnt = req->length -
1861                             (req->ring_index - cnt);
1862         }
1863         if (req->cnt < req_cnt)
1864                 goto queuing_error;
1865
1866         /* Prep packet */
1867         req->cnt -= req_cnt;
1868         pkt = req->ring_ptr;
1869         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1870         if (IS_QLAFX00(ha)) {
1871                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1872                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1873         } else {
1874                 pkt->entry_count = req_cnt;
1875                 pkt->handle = handle;
1876         }
1877
1878 queuing_error:
1879         return pkt;
1880 }
1881
1882 static void
1883 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1884 {
1885         struct srb_iocb *lio = &sp->u.iocb_cmd;
1886
1887         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1888         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1889         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1890                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1891         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1892                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1893         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1894         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1895         logio->port_id[1] = sp->fcport->d_id.b.area;
1896         logio->port_id[2] = sp->fcport->d_id.b.domain;
1897         logio->vp_index = sp->fcport->vha->vp_idx;
1898 }
1899
1900 static void
1901 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1902 {
1903         struct qla_hw_data *ha = sp->fcport->vha->hw;
1904         struct srb_iocb *lio = &sp->u.iocb_cmd;
1905         uint16_t opts;
1906
1907         mbx->entry_type = MBX_IOCB_TYPE;
1908         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1909         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1910         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1911         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1912         if (HAS_EXTENDED_IDS(ha)) {
1913                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1914                 mbx->mb10 = cpu_to_le16(opts);
1915         } else {
1916                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1917         }
1918         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1919         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1920             sp->fcport->d_id.b.al_pa);
1921         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1922 }
1923
1924 static void
1925 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1926 {
1927         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1928         logio->control_flags =
1929             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1930         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1931         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1932         logio->port_id[1] = sp->fcport->d_id.b.area;
1933         logio->port_id[2] = sp->fcport->d_id.b.domain;
1934         logio->vp_index = sp->fcport->vha->vp_idx;
1935 }
1936
1937 static void
1938 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1939 {
1940         struct qla_hw_data *ha = sp->fcport->vha->hw;
1941
1942         mbx->entry_type = MBX_IOCB_TYPE;
1943         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1944         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1945         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1946             cpu_to_le16(sp->fcport->loop_id):
1947             cpu_to_le16(sp->fcport->loop_id << 8);
1948         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1949         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1950             sp->fcport->d_id.b.al_pa);
1951         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1952         /* Implicit: mbx->mbx10 = 0. */
1953 }
1954
1955 static void
1956 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1957 {
1958         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1959         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1960         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1961         logio->vp_index = sp->fcport->vha->vp_idx;
1962 }
1963
1964 static void
1965 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1966 {
1967         struct qla_hw_data *ha = sp->fcport->vha->hw;
1968
1969         mbx->entry_type = MBX_IOCB_TYPE;
1970         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1971         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1972         if (HAS_EXTENDED_IDS(ha)) {
1973                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1974                 mbx->mb10 = cpu_to_le16(BIT_0);
1975         } else {
1976                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1977         }
1978         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1979         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1980         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1981         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1982         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1983 }
1984
1985 static void
1986 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1987 {
1988         uint32_t flags;
1989         unsigned int lun;
1990         struct fc_port *fcport = sp->fcport;
1991         scsi_qla_host_t *vha = fcport->vha;
1992         struct qla_hw_data *ha = vha->hw;
1993         struct srb_iocb *iocb = &sp->u.iocb_cmd;
1994         struct req_que *req = vha->req;
1995
1996         flags = iocb->u.tmf.flags;
1997         lun = iocb->u.tmf.lun;
1998
1999         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2000         tsk->entry_count = 1;
2001         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2002         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2003         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2004         tsk->control_flags = cpu_to_le32(flags);
2005         tsk->port_id[0] = fcport->d_id.b.al_pa;
2006         tsk->port_id[1] = fcport->d_id.b.area;
2007         tsk->port_id[2] = fcport->d_id.b.domain;
2008         tsk->vp_index = fcport->vha->vp_idx;
2009
2010         if (flags == TCF_LUN_RESET) {
2011                 int_to_scsilun(lun, &tsk->lun);
2012                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2013                         sizeof(tsk->lun));
2014         }
2015 }
2016
2017 static void
2018 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2019 {
2020         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2021
2022         els_iocb->entry_type = ELS_IOCB_TYPE;
2023         els_iocb->entry_count = 1;
2024         els_iocb->sys_define = 0;
2025         els_iocb->entry_status = 0;
2026         els_iocb->handle = sp->handle;
2027         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2028         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2029         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2030         els_iocb->sof_type = EST_SOFI3;
2031         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2032
2033         els_iocb->opcode =
2034             sp->type == SRB_ELS_CMD_RPT ?
2035             bsg_job->request->rqst_data.r_els.els_code :
2036             bsg_job->request->rqst_data.h_els.command_code;
2037         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2038         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2039         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2040         els_iocb->control_flags = 0;
2041         els_iocb->rx_byte_count =
2042             cpu_to_le32(bsg_job->reply_payload.payload_len);
2043         els_iocb->tx_byte_count =
2044             cpu_to_le32(bsg_job->request_payload.payload_len);
2045
2046         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2047             (bsg_job->request_payload.sg_list)));
2048         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2049             (bsg_job->request_payload.sg_list)));
2050         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2051             (bsg_job->request_payload.sg_list));
2052
2053         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2054             (bsg_job->reply_payload.sg_list)));
2055         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2056             (bsg_job->reply_payload.sg_list)));
2057         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2058             (bsg_job->reply_payload.sg_list));
2059 }
2060
2061 static void
2062 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2063 {
2064         uint16_t        avail_dsds;
2065         uint32_t        *cur_dsd;
2066         struct scatterlist *sg;
2067         int index;
2068         uint16_t tot_dsds;
2069         scsi_qla_host_t *vha = sp->fcport->vha;
2070         struct qla_hw_data *ha = vha->hw;
2071         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2072         int loop_iterartion = 0;
2073         int cont_iocb_prsnt = 0;
2074         int entry_count = 1;
2075
2076         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2077         ct_iocb->entry_type = CT_IOCB_TYPE;
2078         ct_iocb->entry_status = 0;
2079         ct_iocb->handle1 = sp->handle;
2080         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2081         ct_iocb->status = __constant_cpu_to_le16(0);
2082         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2083         ct_iocb->timeout = 0;
2084         ct_iocb->cmd_dsd_count =
2085             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2086         ct_iocb->total_dsd_count =
2087             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2088         ct_iocb->req_bytecount =
2089             cpu_to_le32(bsg_job->request_payload.payload_len);
2090         ct_iocb->rsp_bytecount =
2091             cpu_to_le32(bsg_job->reply_payload.payload_len);
2092
2093         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2094             (bsg_job->request_payload.sg_list)));
2095         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2096             (bsg_job->request_payload.sg_list)));
2097         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2098
2099         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2100             (bsg_job->reply_payload.sg_list)));
2101         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2102             (bsg_job->reply_payload.sg_list)));
2103         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2104
2105         avail_dsds = 1;
2106         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2107         index = 0;
2108         tot_dsds = bsg_job->reply_payload.sg_cnt;
2109
2110         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2111                 dma_addr_t       sle_dma;
2112                 cont_a64_entry_t *cont_pkt;
2113
2114                 /* Allocate additional continuation packets? */
2115                 if (avail_dsds == 0) {
2116                         /*
2117                         * Five DSDs are available in the Cont.
2118                         * Type 1 IOCB.
2119                                */
2120                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2121                             vha->hw->req_q_map[0]);
2122                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2123                         avail_dsds = 5;
2124                         cont_iocb_prsnt = 1;
2125                         entry_count++;
2126                 }
2127
2128                 sle_dma = sg_dma_address(sg);
2129                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2130                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2131                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2132                 loop_iterartion++;
2133                 avail_dsds--;
2134         }
2135         ct_iocb->entry_count = entry_count;
2136 }
2137
2138 static void
2139 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2140 {
2141         uint16_t        avail_dsds;
2142         uint32_t        *cur_dsd;
2143         struct scatterlist *sg;
2144         int index;
2145         uint16_t tot_dsds;
2146         scsi_qla_host_t *vha = sp->fcport->vha;
2147         struct qla_hw_data *ha = vha->hw;
2148         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2149         int loop_iterartion = 0;
2150         int cont_iocb_prsnt = 0;
2151         int entry_count = 1;
2152
2153         ct_iocb->entry_type = CT_IOCB_TYPE;
2154         ct_iocb->entry_status = 0;
2155         ct_iocb->sys_define = 0;
2156         ct_iocb->handle = sp->handle;
2157
2158         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2159         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2160         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2161
2162         ct_iocb->cmd_dsd_count =
2163             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2164         ct_iocb->timeout = 0;
2165         ct_iocb->rsp_dsd_count =
2166             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2167         ct_iocb->rsp_byte_count =
2168             cpu_to_le32(bsg_job->reply_payload.payload_len);
2169         ct_iocb->cmd_byte_count =
2170             cpu_to_le32(bsg_job->request_payload.payload_len);
2171         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2172             (bsg_job->request_payload.sg_list)));
2173         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2174            (bsg_job->request_payload.sg_list)));
2175         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2176             (bsg_job->request_payload.sg_list));
2177
2178         avail_dsds = 1;
2179         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2180         index = 0;
2181         tot_dsds = bsg_job->reply_payload.sg_cnt;
2182
2183         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2184                 dma_addr_t       sle_dma;
2185                 cont_a64_entry_t *cont_pkt;
2186
2187                 /* Allocate additional continuation packets? */
2188                 if (avail_dsds == 0) {
2189                         /*
2190                         * Five DSDs are available in the Cont.
2191                         * Type 1 IOCB.
2192                                */
2193                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2194                             ha->req_q_map[0]);
2195                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2196                         avail_dsds = 5;
2197                         cont_iocb_prsnt = 1;
2198                         entry_count++;
2199                 }
2200
2201                 sle_dma = sg_dma_address(sg);
2202                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2203                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2204                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2205                 loop_iterartion++;
2206                 avail_dsds--;
2207         }
2208         ct_iocb->entry_count = entry_count;
2209 }
2210
2211 /*
2212  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2213  * @sp: command to send to the ISP
2214  *
2215  * Returns non-zero if a failure occurred, else zero.
2216  */
2217 int
2218 qla82xx_start_scsi(srb_t *sp)
2219 {
2220         int             ret, nseg;
2221         unsigned long   flags;
2222         struct scsi_cmnd *cmd;
2223         uint32_t        *clr_ptr;
2224         uint32_t        index;
2225         uint32_t        handle;
2226         uint16_t        cnt;
2227         uint16_t        req_cnt;
2228         uint16_t        tot_dsds;
2229         struct device_reg_82xx __iomem *reg;
2230         uint32_t dbval;
2231         uint32_t *fcp_dl;
2232         uint8_t additional_cdb_len;
2233         struct ct6_dsd *ctx;
2234         struct scsi_qla_host *vha = sp->fcport->vha;
2235         struct qla_hw_data *ha = vha->hw;
2236         struct req_que *req = NULL;
2237         struct rsp_que *rsp = NULL;
2238         char tag[2];
2239
2240         /* Setup device pointers. */
2241         ret = 0;
2242         reg = &ha->iobase->isp82;
2243         cmd = GET_CMD_SP(sp);
2244         req = vha->req;
2245         rsp = ha->rsp_q_map[0];
2246
2247         /* So we know we haven't pci_map'ed anything yet */
2248         tot_dsds = 0;
2249
2250         dbval = 0x04 | (ha->portnum << 5);
2251
2252         /* Send marker if required */
2253         if (vha->marker_needed != 0) {
2254                 if (qla2x00_marker(vha, req,
2255                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2256                         ql_log(ql_log_warn, vha, 0x300c,
2257                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2258                         return QLA_FUNCTION_FAILED;
2259                 }
2260                 vha->marker_needed = 0;
2261         }
2262
2263         /* Acquire ring specific lock */
2264         spin_lock_irqsave(&ha->hardware_lock, flags);
2265
2266         /* Check for room in outstanding command list. */
2267         handle = req->current_outstanding_cmd;
2268         for (index = 1; index < req->num_outstanding_cmds; index++) {
2269                 handle++;
2270                 if (handle == req->num_outstanding_cmds)
2271                         handle = 1;
2272                 if (!req->outstanding_cmds[handle])
2273                         break;
2274         }
2275         if (index == req->num_outstanding_cmds)
2276                 goto queuing_error;
2277
2278         /* Map the sg table so we have an accurate count of sg entries needed */
2279         if (scsi_sg_count(cmd)) {
2280                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2281                     scsi_sg_count(cmd), cmd->sc_data_direction);
2282                 if (unlikely(!nseg))
2283                         goto queuing_error;
2284         } else
2285                 nseg = 0;
2286
2287         tot_dsds = nseg;
2288
2289         if (tot_dsds > ql2xshiftctondsd) {
2290                 struct cmd_type_6 *cmd_pkt;
2291                 uint16_t more_dsd_lists = 0;
2292                 struct dsd_dma *dsd_ptr;
2293                 uint16_t i;
2294
2295                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2296                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2297                         ql_dbg(ql_dbg_io, vha, 0x300d,
2298                             "Num of DSD list %d is than %d for cmd=%p.\n",
2299                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2300                             cmd);
2301                         goto queuing_error;
2302                 }
2303
2304                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2305                         goto sufficient_dsds;
2306                 else
2307                         more_dsd_lists -= ha->gbl_dsd_avail;
2308
2309                 for (i = 0; i < more_dsd_lists; i++) {
2310                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2311                         if (!dsd_ptr) {
2312                                 ql_log(ql_log_fatal, vha, 0x300e,
2313                                     "Failed to allocate memory for dsd_dma "
2314                                     "for cmd=%p.\n", cmd);
2315                                 goto queuing_error;
2316                         }
2317
2318                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2319                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2320                         if (!dsd_ptr->dsd_addr) {
2321                                 kfree(dsd_ptr);
2322                                 ql_log(ql_log_fatal, vha, 0x300f,
2323                                     "Failed to allocate memory for dsd_addr "
2324                                     "for cmd=%p.\n", cmd);
2325                                 goto queuing_error;
2326                         }
2327                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2328                         ha->gbl_dsd_avail++;
2329                 }
2330
2331 sufficient_dsds:
2332                 req_cnt = 1;
2333
2334                 if (req->cnt < (req_cnt + 2)) {
2335                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2336                                 &reg->req_q_out[0]);
2337                         if (req->ring_index < cnt)
2338                                 req->cnt = cnt - req->ring_index;
2339                         else
2340                                 req->cnt = req->length -
2341                                         (req->ring_index - cnt);
2342                         if (req->cnt < (req_cnt + 2))
2343                                 goto queuing_error;
2344                 }
2345
2346                 ctx = sp->u.scmd.ctx =
2347                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2348                 if (!ctx) {
2349                         ql_log(ql_log_fatal, vha, 0x3010,
2350                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2351                         goto queuing_error;
2352                 }
2353
2354                 memset(ctx, 0, sizeof(struct ct6_dsd));
2355                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2356                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2357                 if (!ctx->fcp_cmnd) {
2358                         ql_log(ql_log_fatal, vha, 0x3011,
2359                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2360                         goto queuing_error;
2361                 }
2362
2363                 /* Initialize the DSD list and dma handle */
2364                 INIT_LIST_HEAD(&ctx->dsd_list);
2365                 ctx->dsd_use_cnt = 0;
2366
2367                 if (cmd->cmd_len > 16) {
2368                         additional_cdb_len = cmd->cmd_len - 16;
2369                         if ((cmd->cmd_len % 4) != 0) {
2370                                 /* SCSI command bigger than 16 bytes must be
2371                                  * multiple of 4
2372                                  */
2373                                 ql_log(ql_log_warn, vha, 0x3012,
2374                                     "scsi cmd len %d not multiple of 4 "
2375                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2376                                 goto queuing_error_fcp_cmnd;
2377                         }
2378                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2379                 } else {
2380                         additional_cdb_len = 0;
2381                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2382                 }
2383
2384                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2385                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2386
2387                 /* Zero out remaining portion of packet. */
2388                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2389                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2390                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2391                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2392
2393                 /* Set NPORT-ID and LUN number*/
2394                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2395                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2396                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2397                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2398                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2399
2400                 /* Build IOCB segments */
2401                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2402                         goto queuing_error_fcp_cmnd;
2403
2404                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2405                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2406
2407                 /* build FCP_CMND IU */
2408                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2409                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2410                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2411
2412                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2413                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2414                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2415                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2416
2417                 /*
2418                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2419                  */
2420                 if (scsi_populate_tag_msg(cmd, tag)) {
2421                         switch (tag[0]) {
2422                         case HEAD_OF_QUEUE_TAG:
2423                                 ctx->fcp_cmnd->task_attribute =
2424                                     TSK_HEAD_OF_QUEUE;
2425                                 break;
2426                         case ORDERED_QUEUE_TAG:
2427                                 ctx->fcp_cmnd->task_attribute =
2428                                     TSK_ORDERED;
2429                                 break;
2430                         }
2431                 }
2432
2433                 /* Populate the FCP_PRIO. */
2434                 if (ha->flags.fcp_prio_enabled)
2435                         ctx->fcp_cmnd->task_attribute |=
2436                             sp->fcport->fcp_prio << 3;
2437
2438                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2439
2440                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2441                     additional_cdb_len);
2442                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2443
2444                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2445                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2446                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2447                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2448                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2449
2450                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2451                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2452                 /* Set total data segment count. */
2453                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2454                 /* Specify response queue number where
2455                  * completion should happen
2456                  */
2457                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2458         } else {
2459                 struct cmd_type_7 *cmd_pkt;
2460                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2461                 if (req->cnt < (req_cnt + 2)) {
2462                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2463                             &reg->req_q_out[0]);
2464                         if (req->ring_index < cnt)
2465                                 req->cnt = cnt - req->ring_index;
2466                         else
2467                                 req->cnt = req->length -
2468                                         (req->ring_index - cnt);
2469                 }
2470                 if (req->cnt < (req_cnt + 2))
2471                         goto queuing_error;
2472
2473                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2474                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2475
2476                 /* Zero out remaining portion of packet. */
2477                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2478                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2479                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2480                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2481
2482                 /* Set NPORT-ID and LUN number*/
2483                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2484                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2485                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2486                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2487                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2488
2489                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2490                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2491                     sizeof(cmd_pkt->lun));
2492
2493                 /*
2494                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2495                  */
2496                 if (scsi_populate_tag_msg(cmd, tag)) {
2497                         switch (tag[0]) {
2498                         case HEAD_OF_QUEUE_TAG:
2499                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2500                                 break;
2501                         case ORDERED_QUEUE_TAG:
2502                                 cmd_pkt->task = TSK_ORDERED;
2503                                 break;
2504                         }
2505                 }
2506
2507                 /* Populate the FCP_PRIO. */
2508                 if (ha->flags.fcp_prio_enabled)
2509                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2510
2511                 /* Load SCSI command packet. */
2512                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2513                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2514
2515                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2516
2517                 /* Build IOCB segments */
2518                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2519
2520                 /* Set total data segment count. */
2521                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2522                 /* Specify response queue number where
2523                  * completion should happen.
2524                  */
2525                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2526
2527         }
2528         /* Build command packet. */
2529         req->current_outstanding_cmd = handle;
2530         req->outstanding_cmds[handle] = sp;
2531         sp->handle = handle;
2532         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2533         req->cnt -= req_cnt;
2534         wmb();
2535
2536         /* Adjust ring index. */
2537         req->ring_index++;
2538         if (req->ring_index == req->length) {
2539                 req->ring_index = 0;
2540                 req->ring_ptr = req->ring;
2541         } else
2542                 req->ring_ptr++;
2543
2544         sp->flags |= SRB_DMA_VALID;
2545
2546         /* Set chip new ring index. */
2547         /* write, read and verify logic */
2548         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2549         if (ql2xdbwr)
2550                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2551         else {
2552                 WRT_REG_DWORD(
2553                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2554                         dbval);
2555                 wmb();
2556                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2557                         WRT_REG_DWORD(
2558                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2559                                 dbval);
2560                         wmb();
2561                 }
2562         }
2563
2564         /* Manage unprocessed RIO/ZIO commands in response queue. */
2565         if (vha->flags.process_response_queue &&
2566             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2567                 qla24xx_process_response_queue(vha, rsp);
2568
2569         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2570         return QLA_SUCCESS;
2571
2572 queuing_error_fcp_cmnd:
2573         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2574 queuing_error:
2575         if (tot_dsds)
2576                 scsi_dma_unmap(cmd);
2577
2578         if (sp->u.scmd.ctx) {
2579                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2580                 sp->u.scmd.ctx = NULL;
2581         }
2582         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2583
2584         return QLA_FUNCTION_FAILED;
2585 }
2586
2587 int
2588 qla2x00_start_sp(srb_t *sp)
2589 {
2590         int rval;
2591         struct qla_hw_data *ha = sp->fcport->vha->hw;
2592         void *pkt;
2593         unsigned long flags;
2594
2595         rval = QLA_FUNCTION_FAILED;
2596         spin_lock_irqsave(&ha->hardware_lock, flags);
2597         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2598         if (!pkt) {
2599                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2600                     "qla2x00_alloc_iocbs failed.\n");
2601                 goto done;
2602         }
2603
2604         rval = QLA_SUCCESS;
2605         switch (sp->type) {
2606         case SRB_LOGIN_CMD:
2607                 IS_FWI2_CAPABLE(ha) ?
2608                     qla24xx_login_iocb(sp, pkt) :
2609                     qla2x00_login_iocb(sp, pkt);
2610                 break;
2611         case SRB_LOGOUT_CMD:
2612                 IS_FWI2_CAPABLE(ha) ?
2613                     qla24xx_logout_iocb(sp, pkt) :
2614                     qla2x00_logout_iocb(sp, pkt);
2615                 break;
2616         case SRB_ELS_CMD_RPT:
2617         case SRB_ELS_CMD_HST:
2618                 qla24xx_els_iocb(sp, pkt);
2619                 break;
2620         case SRB_CT_CMD:
2621                 IS_FWI2_CAPABLE(ha) ?
2622                     qla24xx_ct_iocb(sp, pkt) :
2623                     qla2x00_ct_iocb(sp, pkt);
2624                 break;
2625         case SRB_ADISC_CMD:
2626                 IS_FWI2_CAPABLE(ha) ?
2627                     qla24xx_adisc_iocb(sp, pkt) :
2628                     qla2x00_adisc_iocb(sp, pkt);
2629                 break;
2630         case SRB_TM_CMD:
2631                 IS_QLAFX00(ha) ?
2632                     qlafx00_tm_iocb(sp, pkt) :
2633                     qla24xx_tm_iocb(sp, pkt);
2634                 break;
2635         case SRB_FXIOCB_DCMD:
2636         case SRB_FXIOCB_BCMD:
2637                 qlafx00_fxdisc_iocb(sp, pkt);
2638                 break;
2639         case SRB_ABT_CMD:
2640                 qlafx00_abort_iocb(sp, pkt);
2641                 break;
2642         default:
2643                 break;
2644         }
2645
2646         wmb();
2647         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2648 done:
2649         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650         return rval;
2651 }
2652
2653 static void
2654 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2655                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2656 {
2657         uint16_t avail_dsds;
2658         uint32_t *cur_dsd;
2659         uint32_t req_data_len = 0;
2660         uint32_t rsp_data_len = 0;
2661         struct scatterlist *sg;
2662         int index;
2663         int entry_count = 1;
2664         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2665
2666         /*Update entry type to indicate bidir command */
2667         *((uint32_t *)(&cmd_pkt->entry_type)) =
2668                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2669
2670         /* Set the transfer direction, in this set both flags
2671          * Also set the BD_WRAP_BACK flag, firmware will take care
2672          * assigning DID=SID for outgoing pkts.
2673          */
2674         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2675         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2676         cmd_pkt->control_flags =
2677                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2678                                                         BD_WRAP_BACK);
2679
2680         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2681         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2682         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2683         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2684
2685         vha->bidi_stats.transfer_bytes += req_data_len;
2686         vha->bidi_stats.io_count++;
2687
2688         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2689          * are bundled in continuation iocb
2690          */
2691         avail_dsds = 1;
2692         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2693
2694         index = 0;
2695
2696         for_each_sg(bsg_job->request_payload.sg_list, sg,
2697                                 bsg_job->request_payload.sg_cnt, index) {
2698                 dma_addr_t sle_dma;
2699                 cont_a64_entry_t *cont_pkt;
2700
2701                 /* Allocate additional continuation packets */
2702                 if (avail_dsds == 0) {
2703                         /* Continuation type 1 IOCB can accomodate
2704                          * 5 DSDS
2705                          */
2706                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2707                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2708                         avail_dsds = 5;
2709                         entry_count++;
2710                 }
2711                 sle_dma = sg_dma_address(sg);
2712                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2713                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2714                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2715                 avail_dsds--;
2716         }
2717         /* For read request DSD will always goes to continuation IOCB
2718          * and follow the write DSD. If there is room on the current IOCB
2719          * then it is added to that IOCB else new continuation IOCB is
2720          * allocated.
2721          */
2722         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2723                                 bsg_job->reply_payload.sg_cnt, index) {
2724                 dma_addr_t sle_dma;
2725                 cont_a64_entry_t *cont_pkt;
2726
2727                 /* Allocate additional continuation packets */
2728                 if (avail_dsds == 0) {
2729                         /* Continuation type 1 IOCB can accomodate
2730                          * 5 DSDS
2731                          */
2732                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2733                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2734                         avail_dsds = 5;
2735                         entry_count++;
2736                 }
2737                 sle_dma = sg_dma_address(sg);
2738                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2739                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2740                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2741                 avail_dsds--;
2742         }
2743         /* This value should be same as number of IOCB required for this cmd */
2744         cmd_pkt->entry_count = entry_count;
2745 }
2746
2747 int
2748 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2749 {
2750
2751         struct qla_hw_data *ha = vha->hw;
2752         unsigned long flags;
2753         uint32_t handle;
2754         uint32_t index;
2755         uint16_t req_cnt;
2756         uint16_t cnt;
2757         uint32_t *clr_ptr;
2758         struct cmd_bidir *cmd_pkt = NULL;
2759         struct rsp_que *rsp;
2760         struct req_que *req;
2761         int rval = EXT_STATUS_OK;
2762
2763         rval = QLA_SUCCESS;
2764
2765         rsp = ha->rsp_q_map[0];
2766         req = vha->req;
2767
2768         /* Send marker if required */
2769         if (vha->marker_needed != 0) {
2770                 if (qla2x00_marker(vha, req,
2771                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2772                         return EXT_STATUS_MAILBOX;
2773                 vha->marker_needed = 0;
2774         }
2775
2776         /* Acquire ring specific lock */
2777         spin_lock_irqsave(&ha->hardware_lock, flags);
2778
2779         /* Check for room in outstanding command list. */
2780         handle = req->current_outstanding_cmd;
2781         for (index = 1; index < req->num_outstanding_cmds; index++) {
2782                 handle++;
2783         if (handle == req->num_outstanding_cmds)
2784                 handle = 1;
2785         if (!req->outstanding_cmds[handle])
2786                 break;
2787         }
2788
2789         if (index == req->num_outstanding_cmds) {
2790                 rval = EXT_STATUS_BUSY;
2791                 goto queuing_error;
2792         }
2793
2794         /* Calculate number of IOCB required */
2795         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2796
2797         /* Check for room on request queue. */
2798         if (req->cnt < req_cnt + 2) {
2799                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2800
2801                 if  (req->ring_index < cnt)
2802                         req->cnt = cnt - req->ring_index;
2803                 else
2804                         req->cnt = req->length -
2805                                 (req->ring_index - cnt);
2806         }
2807         if (req->cnt < req_cnt + 2) {
2808                 rval = EXT_STATUS_BUSY;
2809                 goto queuing_error;
2810         }
2811
2812         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2813         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2814
2815         /* Zero out remaining portion of packet. */
2816         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2817         clr_ptr = (uint32_t *)cmd_pkt + 2;
2818         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2819
2820         /* Set NPORT-ID  (of vha)*/
2821         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2822         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2823         cmd_pkt->port_id[1] = vha->d_id.b.area;
2824         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2825
2826         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2827         cmd_pkt->entry_status = (uint8_t) rsp->id;
2828         /* Build command packet. */
2829         req->current_outstanding_cmd = handle;
2830         req->outstanding_cmds[handle] = sp;
2831         sp->handle = handle;
2832         req->cnt -= req_cnt;
2833
2834         /* Send the command to the firmware */
2835         wmb();
2836         qla2x00_start_iocbs(vha, req);
2837 queuing_error:
2838         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2839         return rval;
2840 }