blob: ea027f6a7fd4e949c1a9a53aad0de00b0a7ee361 [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Armen Baloyanbd21eaf2014-04-11 16:54:24 -04003 * Copyright (c) 2003-2014 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
Nicholas Bellinger2d70c102012-05-15 14:34:28 -04008#include "qla_target.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070022qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
24 uint16_t cflags;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080025 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -080026 struct scsi_qla_host *vha = sp->vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28 cflags = 0;
29
30 /* Set transfer direction */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080031 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 cflags = CF_WRITE;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040033 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040034 vha->qla_stats.output_requests++;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080035 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 cflags = CF_READ;
Saurav Kashyap2be21fa2012-05-15 14:34:16 -040037 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -040038 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070094qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070097 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080099 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800104 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800107 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 /* Load packet defaults. */
Bart Van Asschead950362015-07-09 07:24:08 -0700110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800132 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
134
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 /* Load packet defaults. */
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
Bart Van Asschead950362015-07-09 07:24:08 -0700139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 return (cont_pkt);
143}
144
Michael Hernandezd7459522016-12-12 14:40:07 -0800145inline int
Arun Easibad75002010-05-04 15:01:30 -0700146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700150
Arun Easibad75002010-05-04 15:01:30 -0700151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800155 switch (scsi_get_prot_op(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
Arun Easibad75002010-05-04 15:01:30 -0700169 case SCSI_PROT_WRITE_PASS:
Arun Easi9e522cd2012-08-22 14:21:31 -0400170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
Arun Easibad75002010-05-04 15:01:30 -0700174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800180 return scsi_prot_sg_count(cmd);
Arun Easibad75002010-05-04 15:01:30 -0700181}
182
183/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193{
194 uint16_t avail_dsds;
195 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800196 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900198 struct scatterlist *sg;
199 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800201 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
Bart Van Asschead950362015-07-09 07:24:08 -0700205 cpu_to_le32(COMMAND_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700209 cmd_pkt->byte_count = cpu_to_le32(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 return;
211 }
212
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800213 vha = sp->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = 3;
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
226 /*
227 * Seven DSDs are available in the Continuation
228 * Type 0 IOCB.
229 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900234
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
239}
240
241/**
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
244 *
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
248 */
249void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
251{
252 uint16_t avail_dsds;
253 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800254 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900256 struct scatterlist *sg;
257 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800259 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700266 cmd_pkt->byte_count = cpu_to_le32(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 return;
268 }
269
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800270 vha = sp->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds = 2;
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 dma_addr_t sle_dma;
280 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
284 /*
285 * Five DSDs are available in the Continuation
286 * Type 1 IOCB.
287 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900292
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299}
300
301/**
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
304 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700305 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 */
307int
308qla2x00_start_scsi(srb_t *sp)
309{
Bart Van Assche52c82822015-07-09 07:23:26 -0700310 int nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800312 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 struct scsi_cmnd *cmd;
314 uint32_t *clr_ptr;
315 uint32_t index;
316 uint32_t handle;
317 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 uint16_t cnt;
319 uint16_t req_cnt;
320 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700321 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800322 struct qla_hw_data *ha;
323 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800324 struct rsp_que *rsp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 /* Setup device pointers. */
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800327 vha = sp->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800328 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700329 reg = &ha->iobase->isp;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800330 cmd = GET_CMD_SP(sp);
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500333 /* So we know we haven't pci_map'ed anything yet */
334 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800337 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700341 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800342 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 }
344
345 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700346 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800349 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500350 for (index = 1; index < req->num_outstanding_cmds; index++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -0500352 if (handle == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800354 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 break;
356 }
Chad Dupuis8d93f552013-01-30 03:34:37 -0500357 if (index == req->num_outstanding_cmds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 goto queuing_error;
359
83021922005-04-17 15:10:41 -0500360 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
364 if (unlikely(!nseg))
365 goto queuing_error;
366 } else
367 nseg = 0;
368
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900369 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800373 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800378 req->cnt = req->length -
379 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -0400380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
382 goto queuing_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700388 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800390 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
bdf79622005-04-17 15:06:53 -0500399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
Bart Van Asschead950362015-07-09 07:24:08 -0700402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
413 wmb();
414
415 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800416 req->ring_index++;
417 if (req->ring_index == req->length) {
418 req->ring_index = 0;
419 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800421 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
428
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700429 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800430 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700433
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 return (QLA_SUCCESS);
436
437queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900438 if (tot_dsds)
439 scsi_dma_unmap(cmd);
440
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 return (QLA_FUNCTION_FAILED);
444}
445
446/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800447 * qla2x00_start_iocbs() - Execute the IOCB command
448 */
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400449void
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800450qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451{
452 struct qla_hw_data *ha = vha->hw;
Bart Van Assche118e2ef2015-07-09 07:24:27 -0700453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800454
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -0400455 if (IS_P3P_TYPE(ha)) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800456 qla82xx_start_iocbs(vha);
457 } else {
458 /* Adjust ring index. */
459 req->ring_index++;
460 if (req->ring_index == req->length) {
461 req->ring_index = 0;
462 req->ring_ptr = req->ring;
463 } else
464 req->ring_ptr++;
465
466 /* Set chip new ring index. */
Chad Dupuisf73cb692014-02-26 04:15:06 -0500467 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easi98878a12012-02-09 11:15:59 -0800469 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400470 } else if (IS_QLAFX00(ha)) {
471 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
472 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
473 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800474 } else if (IS_FWI2_CAPABLE(ha)) {
475 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
476 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
477 } else {
478 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
479 req->ring_index);
480 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
481 }
482 }
483}
484
485/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 * qla2x00_marker() - Send a marker IOCB to the firmware.
487 * @ha: HA context
488 * @loop_id: loop ID
489 * @lun: LUN
490 * @type: marker modifier
491 *
492 * Can be called from both normal and interrupt context.
493 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700494 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500496static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800497__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
498 struct rsp_que *rsp, uint16_t loop_id,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200499 uint64_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700501 mrk_entry_t *mrk;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400502 struct mrk_entry_24xx *mrk24 = NULL;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400503
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800504 struct qla_hw_data *ha = vha->hw;
505 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Giridhar Malavali99b82122011-11-18 09:03:17 -0800507 req = ha->req_q_map[0];
Saurav Kashyapfa492632012-11-21 02:40:29 -0500508 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700509 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700510 ql_log(ql_log_warn, base_vha, 0x3026,
511 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 return (QLA_FUNCTION_FAILED);
514 }
515
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700516 mrk->entry_type = MARKER_TYPE;
517 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (type != MK_SYNC_ALL) {
Armen Baloyanbfd73342014-02-26 04:15:07 -0500519 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700520 mrk24 = (struct mrk_entry_24xx *) mrk;
521 mrk24->nport_handle = cpu_to_le16(loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200522 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700523 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800524 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700525 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700526 } else {
527 SET_TARGET_ID(ha, mrk->target, loop_id);
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200528 mrk->lun = cpu_to_le16((uint16_t)lun);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 }
531 wmb();
532
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800533 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 return (QLA_SUCCESS);
536}
537
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700538int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800539qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
Hannes Reinecke9cb78c12014-06-25 15:27:36 +0200540 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800541 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
543 int ret;
544 unsigned long flags = 0;
545
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800546 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
547 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
548 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 return (ret);
551}
552
Nicholas Bellinger2d70c102012-05-15 14:34:28 -0400553/*
554 * qla2x00_issue_marker
555 *
556 * Issue marker
557 * Caller CAN have hardware lock held as specified by ha_locked parameter.
558 * Might release it, then reaquire.
559 */
560int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
561{
562 if (ha_locked) {
563 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
564 MK_SYNC_ALL) != QLA_SUCCESS)
565 return QLA_FUNCTION_FAILED;
566 } else {
567 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
568 MK_SYNC_ALL) != QLA_SUCCESS)
569 return QLA_FUNCTION_FAILED;
570 }
571 vha->marker_needed = 0;
572
573 return QLA_SUCCESS;
574}
575
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800576static inline int
577qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
578 uint16_t tot_dsds)
579{
580 uint32_t *cur_dsd = NULL;
581 scsi_qla_host_t *vha;
582 struct qla_hw_data *ha;
583 struct scsi_cmnd *cmd;
584 struct scatterlist *cur_seg;
585 uint32_t *dsd_seg;
586 void *next_dsd;
587 uint8_t avail_dsds;
588 uint8_t first_iocb = 1;
589 uint32_t dsd_list_len;
590 struct dsd_dma *dsd_ptr;
591 struct ct6_dsd *ctx;
592
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800593 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800594
595 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700596 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800597
598 /* No data transfer */
599 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700600 cmd_pkt->byte_count = cpu_to_le32(0);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800601 return 0;
602 }
603
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800604 vha = sp->vha;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800605 ha = vha->hw;
606
607 /* Set transfer direction */
608 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700609 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400610 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400611 vha->qla_stats.output_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800612 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700613 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400614 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400615 vha->qla_stats.input_requests++;
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800616 }
617
618 cur_seg = scsi_sglist(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800619 ctx = GET_CMD_CTX_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800620
621 while (tot_dsds) {
622 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
623 QLA_DSDS_PER_IOCB : tot_dsds;
624 tot_dsds -= avail_dsds;
625 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
626
627 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
628 struct dsd_dma, list);
629 next_dsd = dsd_ptr->dsd_addr;
630 list_del(&dsd_ptr->list);
631 ha->gbl_dsd_avail--;
632 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
633 ctx->dsd_use_cnt++;
634 ha->gbl_dsd_inuse++;
635
636 if (first_iocb) {
637 first_iocb = 0;
638 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
639 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
640 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
641 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
642 } else {
643 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
644 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
645 *cur_dsd++ = cpu_to_le32(dsd_list_len);
646 }
647 cur_dsd = (uint32_t *)next_dsd;
648 while (avail_dsds) {
649 dma_addr_t sle_dma;
650
651 sle_dma = sg_dma_address(cur_seg);
652 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
653 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
654 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
655 cur_seg = sg_next(cur_seg);
656 avail_dsds--;
657 }
658 }
659
660 /* Null termination */
661 *cur_dsd++ = 0;
662 *cur_dsd++ = 0;
663 *cur_dsd++ = 0;
664 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
665 return 0;
666}
667
668/*
669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670 * for Command Type 6.
671 *
672 * @dsds: number of data segment decriptors needed
673 *
674 * Returns the number of dsd list needed to store @dsds.
675 */
Bart Van Assche2374dd22015-07-09 07:23:02 -0700676static inline uint16_t
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800677qla24xx_calc_dsd_lists(uint16_t dsds)
678{
679 uint16_t dsd_lists = 0;
680
681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682 if (dsds % QLA_DSDS_PER_IOCB)
683 dsd_lists++;
684 return dsd_lists;
685}
686
687
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700688/**
689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690 * IOCB types.
691 *
692 * @sp: SRB command to process
693 * @cmd_pkt: Command type 3 IOCB
694 * @tot_dsds: Total number of segments to transfer
Michael Hernandezd7459522016-12-12 14:40:07 -0800695 * @req: pointer to request queue
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700696 */
Michael Hernandezd7459522016-12-12 14:40:07 -0800697inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
Michael Hernandezd7459522016-12-12 14:40:07 -0800699 uint16_t tot_dsds, struct req_que *req)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700700{
701 uint16_t avail_dsds;
702 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800703 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700704 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900705 struct scatterlist *sg;
706 int i;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700707
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800708 cmd = GET_CMD_SP(sp);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700709
710 /* Update entry type to indicate Command Type 3 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -0700711 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700712
713 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700715 cmd_pkt->byte_count = cpu_to_le32(0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700716 return;
717 }
718
Joe Carnuccio25ff6af2017-01-19 22:28:04 -0800719 vha = sp->vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700720
721 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700722 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400724 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400725 vha->qla_stats.output_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Bart Van Asschead950362015-07-09 07:24:08 -0700727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
Saurav Kashyap2be21fa2012-05-15 14:34:16 -0400728 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
Joe Carnucciofabbb8d2013-08-27 01:37:40 -0400729 vha->qla_stats.input_requests++;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700730 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700731
732 /* One DSD is available in the Command Type 3 IOCB */
733 avail_dsds = 1;
734 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
735
736 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700737
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900738 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739 dma_addr_t sle_dma;
740 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700741
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900742 /* Allocate additional continuation packets? */
743 if (avail_dsds == 0) {
744 /*
745 * Five DSDs are available in the Continuation
746 * Type 1 IOCB.
747 */
Michael Hernandezd7459522016-12-12 14:40:07 -0800748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900749 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
750 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700751 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900752
753 sle_dma = sg_dma_address(sg);
754 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
755 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
756 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
757 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700758 }
759}
760
Arun Easibad75002010-05-04 15:01:30 -0700761struct fw_dif_context {
762 uint32_t ref_tag;
763 uint16_t app_tag;
764 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
765 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
766};
767
768/*
769 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
770 *
771 */
772static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700773qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700774 unsigned int protcnt)
775{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800776 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -0700777
778 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700779 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700780 /*
781 * No check for ql2xenablehba_err_chk, as it would be an
782 * I/O error if hba tag generation is not done.
783 */
784 pkt->ref_tag = cpu_to_le32((uint32_t)
785 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700786
787 if (!qla2x00_hba_err_chk_enabled(sp))
788 break;
789
Arun Easi8cb20492011-08-16 11:29:22 -0700790 pkt->ref_tag_mask[0] = 0xff;
791 pkt->ref_tag_mask[1] = 0xff;
792 pkt->ref_tag_mask[2] = 0xff;
793 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700794 break;
795
796 /*
797 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
798 * match LBA in CDB + N
799 */
800 case SCSI_PROT_DIF_TYPE2:
Bart Van Asschead950362015-07-09 07:24:08 -0700801 pkt->app_tag = cpu_to_le16(0);
Arun Easie02587d2011-08-16 11:29:23 -0700802 pkt->app_tag_mask[0] = 0x0;
803 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500804
805 pkt->ref_tag = cpu_to_le32((uint32_t)
806 (0xffffffff & scsi_get_lba(cmd)));
807
Arun Easie02587d2011-08-16 11:29:23 -0700808 if (!qla2x00_hba_err_chk_enabled(sp))
809 break;
810
Arun Easi0c470872010-07-23 15:28:38 +0500811 /* enable ALL bytes of the ref tag */
812 pkt->ref_tag_mask[0] = 0xff;
813 pkt->ref_tag_mask[1] = 0xff;
814 pkt->ref_tag_mask[2] = 0xff;
815 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700816 break;
817
818 /* For Type 3 protection: 16 bit GUARD only */
819 case SCSI_PROT_DIF_TYPE3:
820 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
821 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
822 0x00;
823 break;
824
825 /*
826 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
827 * 16 bit app tag.
828 */
829 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700830 pkt->ref_tag = cpu_to_le32((uint32_t)
831 (0xffffffff & scsi_get_lba(cmd)));
Bart Van Asschead950362015-07-09 07:24:08 -0700832 pkt->app_tag = cpu_to_le16(0);
Arun Easie02587d2011-08-16 11:29:23 -0700833 pkt->app_tag_mask[0] = 0x0;
834 pkt->app_tag_mask[1] = 0x0;
835
836 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700837 break;
838
Arun Easibad75002010-05-04 15:01:30 -0700839 /* enable ALL bytes of the ref tag */
840 pkt->ref_tag_mask[0] = 0xff;
841 pkt->ref_tag_mask[1] = 0xff;
842 pkt->ref_tag_mask[2] = 0xff;
843 pkt->ref_tag_mask[3] = 0xff;
844 break;
845 }
Arun Easibad75002010-05-04 15:01:30 -0700846}
847
Michael Hernandezd7459522016-12-12 14:40:07 -0800848int
Arun Easi8cb20492011-08-16 11:29:22 -0700849qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
850 uint32_t *partial)
851{
852 struct scatterlist *sg;
853 uint32_t cumulative_partial, sg_len;
854 dma_addr_t sg_dma_addr;
855
856 if (sgx->num_bytes == sgx->tot_bytes)
857 return 0;
858
859 sg = sgx->cur_sg;
860 cumulative_partial = sgx->tot_partial;
861
862 sg_dma_addr = sg_dma_address(sg);
863 sg_len = sg_dma_len(sg);
864
865 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
866
867 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
868 sgx->dma_len = (blk_sz - cumulative_partial);
869 sgx->tot_partial = 0;
870 sgx->num_bytes += blk_sz;
871 *partial = 0;
872 } else {
873 sgx->dma_len = sg_len - sgx->bytes_consumed;
874 sgx->tot_partial += sgx->dma_len;
875 *partial = 1;
876 }
877
878 sgx->bytes_consumed += sgx->dma_len;
879
880 if (sg_len == sgx->bytes_consumed) {
881 sg = sg_next(sg);
882 sgx->num_sg++;
883 sgx->cur_sg = sg;
884 sgx->bytes_consumed = 0;
885 }
886
887 return 1;
888}
889
Quinn Tranf83adb62014-04-11 16:54:43 -0400890int
Arun Easi8cb20492011-08-16 11:29:22 -0700891qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranbe251522017-03-15 09:48:49 -0700892 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easi8cb20492011-08-16 11:29:22 -0700893{
894 void *next_dsd;
895 uint8_t avail_dsds = 0;
896 uint32_t dsd_list_len;
897 struct dsd_dma *dsd_ptr;
898 struct scatterlist *sg_prot;
899 uint32_t *cur_dsd = dsd;
900 uint16_t used_dsds = tot_dsds;
Quinn Tranf83adb62014-04-11 16:54:43 -0400901 uint32_t prot_int; /* protection interval */
Arun Easi8cb20492011-08-16 11:29:22 -0700902 uint32_t partial;
903 struct qla2_sgx sgx;
904 dma_addr_t sle_dma;
905 uint32_t sle_dma_len, tot_prot_dma_len = 0;
Quinn Tranf83adb62014-04-11 16:54:43 -0400906 struct scsi_cmnd *cmd;
Arun Easi8cb20492011-08-16 11:29:22 -0700907
908 memset(&sgx, 0, sizeof(struct qla2_sgx));
Quinn Tranf83adb62014-04-11 16:54:43 -0400909 if (sp) {
Quinn Tranf83adb62014-04-11 16:54:43 -0400910 cmd = GET_CMD_SP(sp);
911 prot_int = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -0700912
Quinn Tranf83adb62014-04-11 16:54:43 -0400913 sgx.tot_bytes = scsi_bufflen(cmd);
914 sgx.cur_sg = scsi_sglist(cmd);
915 sgx.sp = sp;
916
917 sg_prot = scsi_prot_sglist(cmd);
918 } else if (tc) {
Quinn Tranf83adb62014-04-11 16:54:43 -0400919 prot_int = tc->blk_sz;
920 sgx.tot_bytes = tc->bufflen;
921 sgx.cur_sg = tc->sg;
922 sg_prot = tc->prot_sg;
923 } else {
924 BUG();
925 return 1;
926 }
Arun Easi8cb20492011-08-16 11:29:22 -0700927
928 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
929
930 sle_dma = sgx.dma_addr;
931 sle_dma_len = sgx.dma_len;
932alloc_and_fill:
933 /* Allocate additional continuation packets? */
934 if (avail_dsds == 0) {
935 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
936 QLA_DSDS_PER_IOCB : used_dsds;
937 dsd_list_len = (avail_dsds + 1) * 12;
938 used_dsds -= avail_dsds;
939
940 /* allocate tracking DS */
941 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
942 if (!dsd_ptr)
943 return 1;
944
945 /* allocate new list */
946 dsd_ptr->dsd_addr = next_dsd =
947 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
948 &dsd_ptr->dsd_list_dma);
949
950 if (!next_dsd) {
951 /*
952 * Need to cleanup only this dsd_ptr, rest
953 * will be done by sp_free_dma()
954 */
955 kfree(dsd_ptr);
956 return 1;
957 }
958
Quinn Tranf83adb62014-04-11 16:54:43 -0400959 if (sp) {
960 list_add_tail(&dsd_ptr->list,
961 &((struct crc_context *)
962 sp->u.scmd.ctx)->dsd_list);
Arun Easi8cb20492011-08-16 11:29:22 -0700963
Quinn Tranf83adb62014-04-11 16:54:43 -0400964 sp->flags |= SRB_CRC_CTX_DSD_VALID;
965 } else {
966 list_add_tail(&dsd_ptr->list,
967 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -0700968 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -0400969 }
970
Arun Easi8cb20492011-08-16 11:29:22 -0700971
972 /* add new list to cmd iocb or last list */
973 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
974 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
975 *cur_dsd++ = dsd_list_len;
976 cur_dsd = (uint32_t *)next_dsd;
977 }
978 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
979 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
980 *cur_dsd++ = cpu_to_le32(sle_dma_len);
981 avail_dsds--;
982
983 if (partial == 0) {
984 /* Got a full protection interval */
985 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
986 sle_dma_len = 8;
987
988 tot_prot_dma_len += sle_dma_len;
989 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
990 tot_prot_dma_len = 0;
991 sg_prot = sg_next(sg_prot);
992 }
993
994 partial = 1; /* So as to not re-enter this block */
995 goto alloc_and_fill;
996 }
997 }
998 /* Null termination */
999 *cur_dsd++ = 0;
1000 *cur_dsd++ = 0;
1001 *cur_dsd++ = 0;
1002 return 0;
1003}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001004
Quinn Tranf83adb62014-04-11 16:54:43 -04001005int
Arun Easibad75002010-05-04 15:01:30 -07001006qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
Quinn Tranbe251522017-03-15 09:48:49 -07001007 uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easibad75002010-05-04 15:01:30 -07001008{
1009 void *next_dsd;
1010 uint8_t avail_dsds = 0;
1011 uint32_t dsd_list_len;
1012 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001013 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001014 uint32_t *cur_dsd = dsd;
1015 int i;
1016 uint16_t used_dsds = tot_dsds;
Quinn Tranf83adb62014-04-11 16:54:43 -04001017 struct scsi_cmnd *cmd;
Arun Easibad75002010-05-04 15:01:30 -07001018
Quinn Tranf83adb62014-04-11 16:54:43 -04001019 if (sp) {
1020 cmd = GET_CMD_SP(sp);
1021 sgl = scsi_sglist(cmd);
Quinn Tranf83adb62014-04-11 16:54:43 -04001022 } else if (tc) {
1023 sgl = tc->sg;
Quinn Tranf83adb62014-04-11 16:54:43 -04001024 } else {
1025 BUG();
1026 return 1;
1027 }
1028
1029
1030 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001031 dma_addr_t sle_dma;
1032
1033 /* Allocate additional continuation packets? */
1034 if (avail_dsds == 0) {
1035 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1036 QLA_DSDS_PER_IOCB : used_dsds;
1037 dsd_list_len = (avail_dsds + 1) * 12;
1038 used_dsds -= avail_dsds;
1039
1040 /* allocate tracking DS */
1041 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1042 if (!dsd_ptr)
1043 return 1;
1044
1045 /* allocate new list */
1046 dsd_ptr->dsd_addr = next_dsd =
1047 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1048 &dsd_ptr->dsd_list_dma);
1049
1050 if (!next_dsd) {
1051 /*
1052 * Need to cleanup only this dsd_ptr, rest
1053 * will be done by sp_free_dma()
1054 */
1055 kfree(dsd_ptr);
1056 return 1;
1057 }
1058
Quinn Tranf83adb62014-04-11 16:54:43 -04001059 if (sp) {
1060 list_add_tail(&dsd_ptr->list,
1061 &((struct crc_context *)
1062 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001063
Quinn Tranf83adb62014-04-11 16:54:43 -04001064 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1065 } else {
1066 list_add_tail(&dsd_ptr->list,
1067 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -07001068 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -04001069 }
Arun Easibad75002010-05-04 15:01:30 -07001070
1071 /* add new list to cmd iocb or last list */
1072 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1073 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1074 *cur_dsd++ = dsd_list_len;
1075 cur_dsd = (uint32_t *)next_dsd;
1076 }
1077 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001078
Arun Easibad75002010-05-04 15:01:30 -07001079 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1080 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1081 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1082 avail_dsds--;
1083
Arun Easibad75002010-05-04 15:01:30 -07001084 }
1085 /* Null termination */
1086 *cur_dsd++ = 0;
1087 *cur_dsd++ = 0;
1088 *cur_dsd++ = 0;
1089 return 0;
1090}
1091
Quinn Tranf83adb62014-04-11 16:54:43 -04001092int
Arun Easibad75002010-05-04 15:01:30 -07001093qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
Quinn Tranbe251522017-03-15 09:48:49 -07001094 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
Arun Easibad75002010-05-04 15:01:30 -07001095{
1096 void *next_dsd;
1097 uint8_t avail_dsds = 0;
1098 uint32_t dsd_list_len;
1099 struct dsd_dma *dsd_ptr;
Quinn Tranf83adb62014-04-11 16:54:43 -04001100 struct scatterlist *sg, *sgl;
Arun Easibad75002010-05-04 15:01:30 -07001101 int i;
1102 struct scsi_cmnd *cmd;
1103 uint32_t *cur_dsd = dsd;
Quinn Tranf83adb62014-04-11 16:54:43 -04001104 uint16_t used_dsds = tot_dsds;
1105 struct scsi_qla_host *vha;
Arun Easibad75002010-05-04 15:01:30 -07001106
Quinn Tranf83adb62014-04-11 16:54:43 -04001107 if (sp) {
1108 cmd = GET_CMD_SP(sp);
1109 sgl = scsi_prot_sglist(cmd);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001110 vha = sp->vha;
Quinn Tranf83adb62014-04-11 16:54:43 -04001111 } else if (tc) {
1112 vha = tc->vha;
1113 sgl = tc->prot_sg;
1114 } else {
1115 BUG();
1116 return 1;
1117 }
1118
1119 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1120 "%s: enter\n", __func__);
1121
1122 for_each_sg(sgl, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001123 dma_addr_t sle_dma;
1124
1125 /* Allocate additional continuation packets? */
1126 if (avail_dsds == 0) {
1127 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1128 QLA_DSDS_PER_IOCB : used_dsds;
1129 dsd_list_len = (avail_dsds + 1) * 12;
1130 used_dsds -= avail_dsds;
1131
1132 /* allocate tracking DS */
1133 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1134 if (!dsd_ptr)
1135 return 1;
1136
1137 /* allocate new list */
1138 dsd_ptr->dsd_addr = next_dsd =
1139 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1140 &dsd_ptr->dsd_list_dma);
1141
1142 if (!next_dsd) {
1143 /*
1144 * Need to cleanup only this dsd_ptr, rest
1145 * will be done by sp_free_dma()
1146 */
1147 kfree(dsd_ptr);
1148 return 1;
1149 }
1150
Quinn Tranf83adb62014-04-11 16:54:43 -04001151 if (sp) {
1152 list_add_tail(&dsd_ptr->list,
1153 &((struct crc_context *)
1154 sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001155
Quinn Tranf83adb62014-04-11 16:54:43 -04001156 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1157 } else {
1158 list_add_tail(&dsd_ptr->list,
1159 &(tc->ctx->dsd_list));
Quinn Tranbe251522017-03-15 09:48:49 -07001160 *tc->ctx_dsd_alloced = 1;
Quinn Tranf83adb62014-04-11 16:54:43 -04001161 }
Arun Easibad75002010-05-04 15:01:30 -07001162
1163 /* add new list to cmd iocb or last list */
1164 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1165 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1166 *cur_dsd++ = dsd_list_len;
1167 cur_dsd = (uint32_t *)next_dsd;
1168 }
1169 sle_dma = sg_dma_address(sg);
Arun Easi9e522cd2012-08-22 14:21:31 -04001170
Arun Easibad75002010-05-04 15:01:30 -07001171 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1172 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1173 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1174
Arun Easibad75002010-05-04 15:01:30 -07001175 avail_dsds--;
1176 }
1177 /* Null termination */
1178 *cur_dsd++ = 0;
1179 *cur_dsd++ = 0;
1180 *cur_dsd++ = 0;
1181 return 0;
1182}
1183
1184/**
1185 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1186 * Type 6 IOCB types.
1187 *
1188 * @sp: SRB command to process
1189 * @cmd_pkt: Command type 3 IOCB
1190 * @tot_dsds: Total number of segments to transfer
1191 */
Michael Hernandezd7459522016-12-12 14:40:07 -08001192inline int
Arun Easibad75002010-05-04 15:01:30 -07001193qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1194 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1195{
1196 uint32_t *cur_dsd, *fcp_dl;
1197 scsi_qla_host_t *vha;
1198 struct scsi_cmnd *cmd;
Arun Easi8cb20492011-08-16 11:29:22 -07001199 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001200 uint32_t data_bytes;
1201 uint32_t dif_bytes;
1202 uint8_t bundling = 1;
1203 uint16_t blk_size;
1204 uint8_t *clr_ptr;
1205 struct crc_context *crc_ctx_pkt = NULL;
1206 struct qla_hw_data *ha;
1207 uint8_t additional_fcpcdb_len;
1208 uint16_t fcp_cmnd_len;
1209 struct fcp_cmnd *fcp_cmnd;
1210 dma_addr_t crc_ctx_dma;
1211
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001212 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001213
Arun Easibad75002010-05-04 15:01:30 -07001214 /* Update entry type to indicate Command Type CRC_2 IOCB */
Bart Van Asschead950362015-07-09 07:24:08 -07001215 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
Arun Easibad75002010-05-04 15:01:30 -07001216
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001217 vha = sp->vha;
Arun Easibad75002010-05-04 15:01:30 -07001218 ha = vha->hw;
1219
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001220 /* No data transfer */
1221 data_bytes = scsi_bufflen(cmd);
1222 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -07001223 cmd_pkt->byte_count = cpu_to_le32(0);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001224 return QLA_SUCCESS;
1225 }
Arun Easibad75002010-05-04 15:01:30 -07001226
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001227 cmd_pkt->vp_index = sp->vha->vp_idx;
Arun Easibad75002010-05-04 15:01:30 -07001228
1229 /* Set transfer direction */
1230 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1231 cmd_pkt->control_flags =
Bart Van Asschead950362015-07-09 07:24:08 -07001232 cpu_to_le16(CF_WRITE_DATA);
Arun Easibad75002010-05-04 15:01:30 -07001233 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1234 cmd_pkt->control_flags =
Bart Van Asschead950362015-07-09 07:24:08 -07001235 cpu_to_le16(CF_READ_DATA);
Arun Easibad75002010-05-04 15:01:30 -07001236 }
1237
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001238 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1239 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1240 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001242 bundling = 0;
1243
1244 /* Allocate CRC context from global pool */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001245 crc_ctx_pkt = sp->u.scmd.ctx =
1246 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
Arun Easibad75002010-05-04 15:01:30 -07001247
1248 if (!crc_ctx_pkt)
1249 goto crc_queuing_error;
1250
1251 /* Zero out CTX area. */
1252 clr_ptr = (uint8_t *)crc_ctx_pkt;
1253 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1254
1255 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1256
1257 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1258
1259 /* Set handle */
1260 crc_ctx_pkt->handle = cmd_pkt->handle;
1261
1262 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1263
Arun Easie02587d2011-08-16 11:29:23 -07001264 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001265 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1266
1267 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1268 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1269 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1270
1271 /* Determine SCSI command length -- align to 4 byte boundary */
1272 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001273 additional_fcpcdb_len = cmd->cmd_len - 16;
1274 if ((cmd->cmd_len % 4) != 0) {
1275 /* SCSI cmd > 16 bytes must be multiple of 4 */
1276 goto crc_queuing_error;
1277 }
1278 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1279 } else {
1280 additional_fcpcdb_len = 0;
1281 fcp_cmnd_len = 12 + 16 + 4;
1282 }
1283
1284 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1285
1286 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1287 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1288 fcp_cmnd->additional_cdb_len |= 1;
1289 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 2;
1291
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001292 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
Arun Easibad75002010-05-04 15:01:30 -07001293 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1294 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1295 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1296 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1297 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1298 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001299 fcp_cmnd->task_management = 0;
Christoph Hellwig50668632014-10-30 14:30:06 +01001300 fcp_cmnd->task_attribute = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001301
Arun Easibad75002010-05-04 15:01:30 -07001302 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1303
Arun Easibad75002010-05-04 15:01:30 -07001304 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001305 dif_bytes = 0;
1306 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001307 dif_bytes = (data_bytes / blk_size) * 8;
1308
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001309 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -07001310 case SCSI_PROT_READ_INSERT:
1311 case SCSI_PROT_WRITE_STRIP:
1312 total_bytes = data_bytes;
1313 data_bytes += dif_bytes;
1314 break;
1315
1316 case SCSI_PROT_READ_STRIP:
1317 case SCSI_PROT_WRITE_INSERT:
1318 case SCSI_PROT_READ_PASS:
1319 case SCSI_PROT_WRITE_PASS:
1320 total_bytes = data_bytes + dif_bytes;
1321 break;
1322 default:
1323 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001324 }
1325
Arun Easie02587d2011-08-16 11:29:23 -07001326 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001327 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
Arun Easi9e522cd2012-08-22 14:21:31 -04001328 /* HBA error checking enabled */
1329 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1330 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1331 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1332 SCSI_PROT_DIF_TYPE2))
1333 fw_prot_opts |= BIT_10;
1334 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1335 SCSI_PROT_DIF_TYPE3)
1336 fw_prot_opts |= BIT_11;
1337 }
Arun Easibad75002010-05-04 15:01:30 -07001338
1339 if (!bundling) {
1340 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1341 } else {
1342 /*
1343 * Configure Bundling if we need to fetch interlaving
1344 * protection PCI accesses
1345 */
1346 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1347 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1348 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1349 tot_prot_dsds);
1350 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1351 }
1352
1353 /* Finish the common fields of CRC pkt */
1354 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1355 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1356 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
Bart Van Asschead950362015-07-09 07:24:08 -07001357 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
Arun Easibad75002010-05-04 15:01:30 -07001358 /* Fibre channel byte count */
1359 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1360 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1361 additional_fcpcdb_len);
1362 *fcp_dl = htonl(total_bytes);
1363
Arun Easi0c470872010-07-23 15:28:38 +05001364 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Bart Van Asschead950362015-07-09 07:24:08 -07001365 cmd_pkt->byte_count = cpu_to_le32(0);
Arun Easi0c470872010-07-23 15:28:38 +05001366 return QLA_SUCCESS;
1367 }
Arun Easibad75002010-05-04 15:01:30 -07001368 /* Walks data segments */
1369
Bart Van Asschead950362015-07-09 07:24:08 -07001370 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001371
1372 if (!bundling && tot_prot_dsds) {
1373 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
Quinn Tranf83adb62014-04-11 16:54:43 -04001374 cur_dsd, tot_dsds, NULL))
Arun Easi8cb20492011-08-16 11:29:22 -07001375 goto crc_queuing_error;
1376 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001377 (tot_dsds - tot_prot_dsds), NULL))
Arun Easibad75002010-05-04 15:01:30 -07001378 goto crc_queuing_error;
1379
1380 if (bundling && tot_prot_dsds) {
1381 /* Walks dif segments */
Bart Van Asschead950362015-07-09 07:24:08 -07001382 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
Arun Easibad75002010-05-04 15:01:30 -07001383 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1384 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
Quinn Tranf83adb62014-04-11 16:54:43 -04001385 tot_prot_dsds, NULL))
Arun Easibad75002010-05-04 15:01:30 -07001386 goto crc_queuing_error;
1387 }
1388 return QLA_SUCCESS;
1389
1390crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001391 /* Cleanup will be performed by the caller */
1392
1393 return QLA_FUNCTION_FAILED;
1394}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001395
1396/**
1397 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1398 * @sp: command to send to the ISP
1399 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001400 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001401 */
1402int
1403qla24xx_start_scsi(srb_t *sp)
1404{
Bart Van Assche52c82822015-07-09 07:23:26 -07001405 int nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001406 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001407 uint32_t *clr_ptr;
1408 uint32_t index;
1409 uint32_t handle;
1410 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001411 uint16_t cnt;
1412 uint16_t req_cnt;
1413 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001414 struct req_que *req = NULL;
1415 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001416 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001417 struct scsi_qla_host *vha = sp->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001418 struct qla_hw_data *ha = vha->hw;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001419
1420 /* Setup device pointers. */
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001421 req = vha->req;
Michael Hernandezd7459522016-12-12 14:40:07 -08001422 rsp = req->rsp;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001423
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001424 /* So we know we haven't pci_map'ed anything yet */
1425 tot_dsds = 0;
1426
1427 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001428 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001429 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1430 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001431 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001432 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001433 }
1434
1435 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001436 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001437
1438 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001439 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001440 for (index = 1; index < req->num_outstanding_cmds; index++) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001441 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001442 if (handle == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001443 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001444 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001445 break;
1446 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05001447 if (index == req->num_outstanding_cmds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001448 goto queuing_error;
1449
1450 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001451 if (scsi_sg_count(cmd)) {
1452 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1453 scsi_sg_count(cmd), cmd->sc_data_direction);
1454 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001455 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001456 } else
1457 nseg = 0;
1458
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001459 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001460 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001461 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001462 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1463 RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001464 if (req->ring_index < cnt)
1465 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001466 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001467 req->cnt = req->length -
1468 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001469 if (req->cnt < (req_cnt + 2))
1470 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001471 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001472
1473 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001474 req->current_outstanding_cmd = handle;
1475 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001476 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001477 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001478 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001479
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001480 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001481 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001482
1483 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001484 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001485 clr_ptr = (uint32_t *)cmd_pkt + 2;
1486 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1487 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1488
1489 /* Set NPORT-ID and LUN number*/
1490 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1491 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1492 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1493 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001494 cmd_pkt->vp_index = sp->vha->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001495
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001496 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001497 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001498
Christoph Hellwig50668632014-10-30 14:30:06 +01001499 cmd_pkt->task = TSK_SIMPLE;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001500
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001501 /* Load SCSI command packet. */
1502 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1503 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1504
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001505 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001506
1507 /* Build IOCB segments */
Michael Hernandezd7459522016-12-12 14:40:07 -08001508 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001509
1510 /* Set total data segment count. */
1511 cmd_pkt->entry_count = (uint8_t)req_cnt;
1512 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001513 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001514 req->ring_index++;
1515 if (req->ring_index == req->length) {
1516 req->ring_index = 0;
1517 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001518 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001519 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001520
1521 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001522
1523 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001524 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1525 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001526
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001527 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001528 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001529 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001530 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001531
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001532 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001533 return QLA_SUCCESS;
1534
1535queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001536 if (tot_dsds)
1537 scsi_dma_unmap(cmd);
1538
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001540
1541 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001543
Arun Easibad75002010-05-04 15:01:30 -07001544/**
1545 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1546 * @sp: command to send to the ISP
1547 *
1548 * Returns non-zero if a failure occurred, else zero.
1549 */
1550int
1551qla24xx_dif_start_scsi(srb_t *sp)
1552{
1553 int nseg;
1554 unsigned long flags;
1555 uint32_t *clr_ptr;
1556 uint32_t index;
1557 uint32_t handle;
1558 uint16_t cnt;
1559 uint16_t req_cnt = 0;
1560 uint16_t tot_dsds;
1561 uint16_t tot_prot_dsds;
1562 uint16_t fw_prot_opts = 0;
1563 struct req_que *req = NULL;
1564 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001565 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08001566 struct scsi_qla_host *vha = sp->vha;
Arun Easibad75002010-05-04 15:01:30 -07001567 struct qla_hw_data *ha = vha->hw;
1568 struct cmd_type_crc_2 *cmd_pkt;
1569 uint32_t status = 0;
1570
1571#define QDSS_GOT_Q_SPACE BIT_0
1572
Arun Easi0c470872010-07-23 15:28:38 +05001573 /* Only process protection or >16 cdb in this routine */
1574 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1575 if (cmd->cmd_len <= 16)
1576 return qla24xx_start_scsi(sp);
1577 }
Arun Easibad75002010-05-04 15:01:30 -07001578
1579 /* Setup device pointers. */
Arun Easibad75002010-05-04 15:01:30 -07001580 req = vha->req;
Michael Hernandezd7459522016-12-12 14:40:07 -08001581 rsp = req->rsp;
Arun Easibad75002010-05-04 15:01:30 -07001582
1583 /* So we know we haven't pci_map'ed anything yet */
1584 tot_dsds = 0;
1585
1586 /* Send marker if required */
1587 if (vha->marker_needed != 0) {
1588 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1589 QLA_SUCCESS)
1590 return QLA_FUNCTION_FAILED;
1591 vha->marker_needed = 0;
1592 }
1593
1594 /* Acquire ring specific lock */
1595 spin_lock_irqsave(&ha->hardware_lock, flags);
1596
1597 /* Check for room in outstanding command list. */
1598 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001599 for (index = 1; index < req->num_outstanding_cmds; index++) {
Arun Easibad75002010-05-04 15:01:30 -07001600 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05001601 if (handle == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001602 handle = 1;
1603 if (!req->outstanding_cmds[handle])
1604 break;
1605 }
1606
Chad Dupuis8d93f552013-01-30 03:34:37 -05001607 if (index == req->num_outstanding_cmds)
Arun Easibad75002010-05-04 15:01:30 -07001608 goto queuing_error;
1609
1610 /* Compute number of required data segments */
1611 /* Map the sg table so we have an accurate count of sg entries needed */
1612 if (scsi_sg_count(cmd)) {
1613 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1614 scsi_sg_count(cmd), cmd->sc_data_direction);
1615 if (unlikely(!nseg))
1616 goto queuing_error;
1617 else
1618 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001619
1620 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1621 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1622 struct qla2_sgx sgx;
1623 uint32_t partial;
1624
1625 memset(&sgx, 0, sizeof(struct qla2_sgx));
1626 sgx.tot_bytes = scsi_bufflen(cmd);
1627 sgx.cur_sg = scsi_sglist(cmd);
1628 sgx.sp = sp;
1629
1630 nseg = 0;
1631 while (qla24xx_get_one_block_sg(
1632 cmd->device->sector_size, &sgx, &partial))
1633 nseg++;
1634 }
Arun Easibad75002010-05-04 15:01:30 -07001635 } else
1636 nseg = 0;
1637
1638 /* number of required data segments */
1639 tot_dsds = nseg;
1640
1641 /* Compute number of required protection segments */
1642 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1643 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1644 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1645 if (unlikely(!nseg))
1646 goto queuing_error;
1647 else
1648 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001649
1650 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1651 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1652 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1653 }
Arun Easibad75002010-05-04 15:01:30 -07001654 } else {
1655 nseg = 0;
1656 }
1657
1658 req_cnt = 1;
1659 /* Total Data and protection sg segment(s) */
1660 tot_prot_dsds = nseg;
1661 tot_dsds += nseg;
1662 if (req->cnt < (req_cnt + 2)) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04001663 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1664 RD_REG_DWORD_RELAXED(req->req_q_out);
Arun Easibad75002010-05-04 15:01:30 -07001665 if (req->ring_index < cnt)
1666 req->cnt = cnt - req->ring_index;
1667 else
1668 req->cnt = req->length -
1669 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001670 if (req->cnt < (req_cnt + 2))
1671 goto queuing_error;
Arun Easibad75002010-05-04 15:01:30 -07001672 }
1673
Arun Easibad75002010-05-04 15:01:30 -07001674 status |= QDSS_GOT_Q_SPACE;
1675
1676 /* Build header part of command packet (excluding the OPCODE). */
1677 req->current_outstanding_cmd = handle;
1678 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001679 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001680 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Arun Easibad75002010-05-04 15:01:30 -07001681 req->cnt -= req_cnt;
1682
1683 /* Fill-in common area */
1684 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1685 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1686
1687 clr_ptr = (uint32_t *)cmd_pkt + 2;
1688 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1689
1690 /* Set NPORT-ID and LUN number*/
1691 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1692 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1693 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1694 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1695
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001696 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Arun Easibad75002010-05-04 15:01:30 -07001697 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1698
1699 /* Total Data and protection segment(s) */
1700 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1701
1702 /* Build IOCB segments and adjust for data protection segments */
1703 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1704 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1705 QLA_SUCCESS)
1706 goto queuing_error;
1707
1708 cmd_pkt->entry_count = (uint8_t)req_cnt;
1709 /* Specify response queue number where completion should happen */
1710 cmd_pkt->entry_status = (uint8_t) rsp->id;
Bart Van Asschead950362015-07-09 07:24:08 -07001711 cmd_pkt->timeout = cpu_to_le16(0);
Arun Easibad75002010-05-04 15:01:30 -07001712 wmb();
1713
1714 /* Adjust ring index. */
1715 req->ring_index++;
1716 if (req->ring_index == req->length) {
1717 req->ring_index = 0;
1718 req->ring_ptr = req->ring;
1719 } else
1720 req->ring_ptr++;
1721
1722 /* Set chip new ring index. */
1723 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1724 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1725
1726 /* Manage unprocessed RIO/ZIO commands in response queue. */
1727 if (vha->flags.process_response_queue &&
1728 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1729 qla24xx_process_response_queue(vha, rsp);
1730
1731 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1732
1733 return QLA_SUCCESS;
1734
1735queuing_error:
1736 if (status & QDSS_GOT_Q_SPACE) {
1737 req->outstanding_cmds[handle] = NULL;
1738 req->cnt += req_cnt;
1739 }
1740 /* Cleanup will be performed by the caller (queuecommand) */
1741
1742 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001743 return QLA_FUNCTION_FAILED;
1744}
1745
Michael Hernandezd7459522016-12-12 14:40:07 -08001746/**
1747 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1748 * @sp: command to send to the ISP
1749 *
1750 * Returns non-zero if a failure occurred, else zero.
1751 */
1752static int
1753qla2xxx_start_scsi_mq(srb_t *sp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001754{
Michael Hernandezd7459522016-12-12 14:40:07 -08001755 int nseg;
1756 unsigned long flags;
1757 uint32_t *clr_ptr;
1758 uint32_t index;
1759 uint32_t handle;
1760 struct cmd_type_7 *cmd_pkt;
1761 uint16_t cnt;
1762 uint16_t req_cnt;
1763 uint16_t tot_dsds;
1764 struct req_que *req = NULL;
1765 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001766 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Michael Hernandezd7459522016-12-12 14:40:07 -08001767 struct scsi_qla_host *vha = sp->fcport->vha;
1768 struct qla_hw_data *ha = vha->hw;
1769 struct qla_qpair *qpair = sp->qpair;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001770
Michael Hernandezd7459522016-12-12 14:40:07 -08001771 /* Setup qpair pointers */
1772 rsp = qpair->rsp;
1773 req = qpair->req;
1774
1775 /* So we know we haven't pci_map'ed anything yet */
1776 tot_dsds = 0;
1777
1778 /* Send marker if required */
1779 if (vha->marker_needed != 0) {
1780 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1781 QLA_SUCCESS)
1782 return QLA_FUNCTION_FAILED;
1783 vha->marker_needed = 0;
1784 }
1785
1786 /* Acquire qpair specific lock */
1787 spin_lock_irqsave(&qpair->qp_lock, flags);
1788
1789 /* Check for room in outstanding command list. */
1790 handle = req->current_outstanding_cmd;
1791 for (index = 1; index < req->num_outstanding_cmds; index++) {
1792 handle++;
1793 if (handle == req->num_outstanding_cmds)
1794 handle = 1;
1795 if (!req->outstanding_cmds[handle])
1796 break;
1797 }
1798 if (index == req->num_outstanding_cmds)
1799 goto queuing_error;
1800
1801 /* Map the sg table so we have an accurate count of sg entries needed */
1802 if (scsi_sg_count(cmd)) {
1803 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1804 scsi_sg_count(cmd), cmd->sc_data_direction);
1805 if (unlikely(!nseg))
1806 goto queuing_error;
1807 } else
1808 nseg = 0;
1809
1810 tot_dsds = nseg;
1811 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1812 if (req->cnt < (req_cnt + 2)) {
1813 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1814 RD_REG_DWORD_RELAXED(req->req_q_out);
1815 if (req->ring_index < cnt)
1816 req->cnt = cnt - req->ring_index;
1817 else
1818 req->cnt = req->length -
1819 (req->ring_index - cnt);
1820 if (req->cnt < (req_cnt + 2))
1821 goto queuing_error;
1822 }
1823
1824 /* Build command packet. */
1825 req->current_outstanding_cmd = handle;
1826 req->outstanding_cmds[handle] = sp;
1827 sp->handle = handle;
1828 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1829 req->cnt -= req_cnt;
1830
1831 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1832 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1833
1834 /* Zero out remaining portion of packet. */
1835 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1836 clr_ptr = (uint32_t *)cmd_pkt + 2;
1837 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1838 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1839
1840 /* Set NPORT-ID and LUN number*/
1841 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1842 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1843 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1844 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1845 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1846
1847 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1848 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1849
1850 cmd_pkt->task = TSK_SIMPLE;
1851
1852 /* Load SCSI command packet. */
1853 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1854 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1855
1856 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1857
1858 /* Build IOCB segments */
1859 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1860
1861 /* Set total data segment count. */
1862 cmd_pkt->entry_count = (uint8_t)req_cnt;
1863 wmb();
1864 /* Adjust ring index. */
1865 req->ring_index++;
1866 if (req->ring_index == req->length) {
1867 req->ring_index = 0;
1868 req->ring_ptr = req->ring;
1869 } else
1870 req->ring_ptr++;
1871
1872 sp->flags |= SRB_DMA_VALID;
1873
1874 /* Set chip new ring index. */
1875 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1876
1877 /* Manage unprocessed RIO/ZIO commands in response queue. */
1878 if (vha->flags.process_response_queue &&
1879 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1880 qla24xx_process_response_queue(vha, rsp);
1881
1882 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1883 return QLA_SUCCESS;
1884
1885queuing_error:
1886 if (tot_dsds)
1887 scsi_dma_unmap(cmd);
1888
1889 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1890
1891 return QLA_FUNCTION_FAILED;
1892}
1893
1894
1895/**
1896 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1897 * @sp: command to send to the ISP
1898 *
1899 * Returns non-zero if a failure occurred, else zero.
1900 */
1901int
1902qla2xxx_dif_start_scsi_mq(srb_t *sp)
1903{
1904 int nseg;
1905 unsigned long flags;
1906 uint32_t *clr_ptr;
1907 uint32_t index;
1908 uint32_t handle;
1909 uint16_t cnt;
1910 uint16_t req_cnt = 0;
1911 uint16_t tot_dsds;
1912 uint16_t tot_prot_dsds;
1913 uint16_t fw_prot_opts = 0;
1914 struct req_que *req = NULL;
1915 struct rsp_que *rsp = NULL;
1916 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1917 struct scsi_qla_host *vha = sp->fcport->vha;
1918 struct qla_hw_data *ha = vha->hw;
1919 struct cmd_type_crc_2 *cmd_pkt;
1920 uint32_t status = 0;
1921 struct qla_qpair *qpair = sp->qpair;
1922
1923#define QDSS_GOT_Q_SPACE BIT_0
1924
1925 /* Check for host side state */
1926 if (!qpair->online) {
1927 cmd->result = DID_NO_CONNECT << 16;
1928 return QLA_INTERFACE_ERROR;
1929 }
1930
1931 if (!qpair->difdix_supported &&
1932 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1933 cmd->result = DID_NO_CONNECT << 16;
1934 return QLA_INTERFACE_ERROR;
1935 }
1936
1937 /* Only process protection or >16 cdb in this routine */
1938 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1939 if (cmd->cmd_len <= 16)
1940 return qla2xxx_start_scsi_mq(sp);
1941 }
1942
1943 /* Setup qpair pointers */
1944 rsp = qpair->rsp;
1945 req = qpair->req;
1946
1947 /* So we know we haven't pci_map'ed anything yet */
1948 tot_dsds = 0;
1949
1950 /* Send marker if required */
1951 if (vha->marker_needed != 0) {
1952 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1953 QLA_SUCCESS)
1954 return QLA_FUNCTION_FAILED;
1955 vha->marker_needed = 0;
1956 }
1957
1958 /* Acquire ring specific lock */
1959 spin_lock_irqsave(&qpair->qp_lock, flags);
1960
1961 /* Check for room in outstanding command list. */
1962 handle = req->current_outstanding_cmd;
1963 for (index = 1; index < req->num_outstanding_cmds; index++) {
1964 handle++;
1965 if (handle == req->num_outstanding_cmds)
1966 handle = 1;
1967 if (!req->outstanding_cmds[handle])
1968 break;
1969 }
1970
1971 if (index == req->num_outstanding_cmds)
1972 goto queuing_error;
1973
1974 /* Compute number of required data segments */
1975 /* Map the sg table so we have an accurate count of sg entries needed */
1976 if (scsi_sg_count(cmd)) {
1977 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1978 scsi_sg_count(cmd), cmd->sc_data_direction);
1979 if (unlikely(!nseg))
1980 goto queuing_error;
1981 else
1982 sp->flags |= SRB_DMA_VALID;
1983
1984 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1985 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1986 struct qla2_sgx sgx;
1987 uint32_t partial;
1988
1989 memset(&sgx, 0, sizeof(struct qla2_sgx));
1990 sgx.tot_bytes = scsi_bufflen(cmd);
1991 sgx.cur_sg = scsi_sglist(cmd);
1992 sgx.sp = sp;
1993
1994 nseg = 0;
1995 while (qla24xx_get_one_block_sg(
1996 cmd->device->sector_size, &sgx, &partial))
1997 nseg++;
1998 }
1999 } else
2000 nseg = 0;
2001
2002 /* number of required data segments */
2003 tot_dsds = nseg;
2004
2005 /* Compute number of required protection segments */
2006 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2007 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2008 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2009 if (unlikely(!nseg))
2010 goto queuing_error;
2011 else
2012 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2013
2014 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2015 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2016 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2017 }
2018 } else {
2019 nseg = 0;
2020 }
2021
2022 req_cnt = 1;
2023 /* Total Data and protection sg segment(s) */
2024 tot_prot_dsds = nseg;
2025 tot_dsds += nseg;
2026 if (req->cnt < (req_cnt + 2)) {
2027 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2028 RD_REG_DWORD_RELAXED(req->req_q_out);
2029 if (req->ring_index < cnt)
2030 req->cnt = cnt - req->ring_index;
2031 else
2032 req->cnt = req->length -
2033 (req->ring_index - cnt);
2034 if (req->cnt < (req_cnt + 2))
2035 goto queuing_error;
2036 }
2037
2038 status |= QDSS_GOT_Q_SPACE;
2039
2040 /* Build header part of command packet (excluding the OPCODE). */
2041 req->current_outstanding_cmd = handle;
2042 req->outstanding_cmds[handle] = sp;
2043 sp->handle = handle;
2044 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2045 req->cnt -= req_cnt;
2046
2047 /* Fill-in common area */
2048 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2049 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2050
2051 clr_ptr = (uint32_t *)cmd_pkt + 2;
2052 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2053
2054 /* Set NPORT-ID and LUN number*/
2055 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2056 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2057 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2058 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2059
2060 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2061 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2062
2063 /* Total Data and protection segment(s) */
2064 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2065
2066 /* Build IOCB segments and adjust for data protection segments */
2067 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2068 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2069 QLA_SUCCESS)
2070 goto queuing_error;
2071
2072 cmd_pkt->entry_count = (uint8_t)req_cnt;
2073 cmd_pkt->timeout = cpu_to_le16(0);
2074 wmb();
2075
2076 /* Adjust ring index. */
2077 req->ring_index++;
2078 if (req->ring_index == req->length) {
2079 req->ring_index = 0;
2080 req->ring_ptr = req->ring;
2081 } else
2082 req->ring_ptr++;
2083
2084 /* Set chip new ring index. */
2085 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2086
2087 /* Manage unprocessed RIO/ZIO commands in response queue. */
2088 if (vha->flags.process_response_queue &&
2089 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2090 qla24xx_process_response_queue(vha, rsp);
2091
2092 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2093
2094 return QLA_SUCCESS;
2095
2096queuing_error:
2097 if (status & QDSS_GOT_Q_SPACE) {
2098 req->outstanding_cmds[handle] = NULL;
2099 req->cnt += req_cnt;
2100 }
2101 /* Cleanup will be performed by the caller (queuecommand) */
2102
2103 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2104 return QLA_FUNCTION_FAILED;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07002105}
Andrew Vasquezac280b62009-08-20 11:06:05 -07002106
2107/* Generic Control-SRB manipulation functions. */
Arun Easib6a029e2014-09-25 06:14:52 -04002108
2109/* hardware_lock assumed to be held. */
2110void *
2111qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
2112{
2113 if (qla2x00_reset_active(vha))
2114 return NULL;
2115
2116 return qla2x00_alloc_iocbs(vha, sp);
2117}
2118
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002119void *
2120qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002121{
Andrew Vasquezac280b62009-08-20 11:06:05 -07002122 struct qla_hw_data *ha = vha->hw;
2123 struct req_que *req = ha->req_q_map[0];
Bart Van Assche118e2ef2015-07-09 07:24:27 -07002124 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002125 uint32_t index, handle;
2126 request_t *pkt;
2127 uint16_t cnt, req_cnt;
2128
2129 pkt = NULL;
2130 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002131 handle = 0;
2132
2133 if (!sp)
2134 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002135
2136 /* Check for room in outstanding command list. */
2137 handle = req->current_outstanding_cmd;
Chad Dupuis4b4f30c2014-03-07 02:43:52 -05002138 for (index = 1; index < req->num_outstanding_cmds; index++) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07002139 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002140 if (handle == req->num_outstanding_cmds)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002141 handle = 1;
2142 if (!req->outstanding_cmds[handle])
2143 break;
2144 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05002145 if (index == req->num_outstanding_cmds) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002146 ql_log(ql_log_warn, vha, 0x700b,
Masanari Iidad6a03582012-08-22 14:20:58 -04002147 "No room on outstanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002148 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002149 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002150
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002151 /* Prep command array. */
2152 req->current_outstanding_cmd = handle;
2153 req->outstanding_cmds[handle] = sp;
2154 sp->handle = handle;
2155
Andrew Vasquez57807902011-11-18 09:03:20 -08002156 /* Adjust entry-counts as needed. */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002157 if (sp->type != SRB_SCSI_CMD)
2158 req_cnt = sp->iocbs;
Andrew Vasquez57807902011-11-18 09:03:20 -08002159
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002160skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07002161 /* Check for room on request queue. */
Himanshu Madhani94007032014-09-25 06:14:46 -04002162 if (req->cnt < req_cnt + 2) {
Chad Dupuisf73cb692014-02-26 04:15:06 -05002163 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07002164 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Atul Deshmukh7ec0eff2013-08-27 01:37:28 -04002165 else if (IS_P3P_TYPE(ha))
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002166 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002167 else if (IS_FWI2_CAPABLE(ha))
2168 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002169 else if (IS_QLAFX00(ha))
2170 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002171 else
2172 cnt = qla2x00_debounce_register(
2173 ISP_REQ_Q_OUT(ha, &reg->isp));
2174
2175 if (req->ring_index < cnt)
2176 req->cnt = cnt - req->ring_index;
2177 else
2178 req->cnt = req->length -
2179 (req->ring_index - cnt);
2180 }
Himanshu Madhani94007032014-09-25 06:14:46 -04002181 if (req->cnt < req_cnt + 2)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002182 goto queuing_error;
2183
2184 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07002185 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002186 pkt = req->ring_ptr;
2187 memset(pkt, 0, REQUEST_ENTRY_SIZE);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002188 if (IS_QLAFX00(ha)) {
Saurav Kashyap1f8deef2013-06-25 11:27:21 -04002189 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2190 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04002191 } else {
2192 pkt->entry_count = req_cnt;
2193 pkt->handle = handle;
2194 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002195
2196queuing_error:
Himanshu Madhanice1025c2015-12-17 14:56:58 -05002197 vha->tgt_counters.num_alloc_iocb_failed++;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002198 return pkt;
2199}
2200
2201static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07002202qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2203{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002204 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002205
2206 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2207 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002208 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002209 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002210 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07002211 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2212 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2213 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2214 logio->port_id[1] = sp->fcport->d_id.b.area;
2215 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002216 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002217}
2218
2219static void
2220qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2221{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002222 struct qla_hw_data *ha = sp->vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002223 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002224 uint16_t opts;
2225
Giridhar Malavalib9637522010-05-28 15:08:15 -07002226 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002227 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2228 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002229 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2230 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002231 if (HAS_EXTENDED_IDS(ha)) {
2232 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2233 mbx->mb10 = cpu_to_le16(opts);
2234 } else {
2235 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2236 }
2237 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2238 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2239 sp->fcport->d_id.b.al_pa);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002240 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002241}
2242
2243static void
2244qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2245{
2246 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2247 logio->control_flags =
2248 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
Quinn Tran726b8542017-01-19 22:28:00 -08002249 if (!sp->fcport->se_sess ||
Quinn Tran5d964832017-01-19 22:27:59 -08002250 !sp->fcport->keep_nport_handle)
Alexei Potashnika6ca8872015-07-14 16:00:44 -04002251 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002252 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2253 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2254 logio->port_id[1] = sp->fcport->d_id.b.area;
2255 logio->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002256 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002257}
2258
2259static void
2260qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2261{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002262 struct qla_hw_data *ha = sp->vha->hw;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002263
Giridhar Malavalib9637522010-05-28 15:08:15 -07002264 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002265 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2266 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2267 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2268 cpu_to_le16(sp->fcport->loop_id):
2269 cpu_to_le16(sp->fcport->loop_id << 8);
2270 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2271 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2272 sp->fcport->d_id.b.al_pa);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002273 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002274 /* Implicit: mbx->mbx10 = 0. */
2275}
2276
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002277static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002278qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2279{
2280 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2281 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2282 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002283 logio->vp_index = sp->vha->vp_idx;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002284}
2285
2286static void
2287qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2288{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002289 struct qla_hw_data *ha = sp->vha->hw;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002290
2291 mbx->entry_type = MBX_IOCB_TYPE;
2292 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2293 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2294 if (HAS_EXTENDED_IDS(ha)) {
2295 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2296 mbx->mb10 = cpu_to_le16(BIT_0);
2297 } else {
2298 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2299 }
2300 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2301 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2302 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2303 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002304 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002305}
2306
2307static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002308qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2309{
2310 uint32_t flags;
Hannes Reinecke9cb78c12014-06-25 15:27:36 +02002311 uint64_t lun;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002312 struct fc_port *fcport = sp->fcport;
2313 scsi_qla_host_t *vha = fcport->vha;
2314 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002315 struct srb_iocb *iocb = &sp->u.iocb_cmd;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002316 struct req_que *req = vha->req;
2317
2318 flags = iocb->u.tmf.flags;
2319 lun = iocb->u.tmf.lun;
2320
2321 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2322 tsk->entry_count = 1;
2323 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2324 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2325 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2326 tsk->control_flags = cpu_to_le32(flags);
2327 tsk->port_id[0] = fcport->d_id.b.al_pa;
2328 tsk->port_id[1] = fcport->d_id.b.area;
2329 tsk->port_id[2] = fcport->d_id.b.domain;
Joe Carnuccioc6d39e22012-05-15 14:34:20 -04002330 tsk->vp_index = fcport->vha->vp_idx;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002331
2332 if (flags == TCF_LUN_RESET) {
2333 int_to_scsilun(lun, &tsk->lun);
2334 host_to_fcp_swap((uint8_t *)&tsk->lun,
2335 sizeof(tsk->lun));
2336 }
2337}
2338
2339static void
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002340qla2x00_els_dcmd_sp_free(void *data)
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002341{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002342 srb_t *sp = data;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002343 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2344
2345 kfree(sp->fcport);
2346
2347 if (elsio->u.els_logo.els_logo_pyld)
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002348 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002349 elsio->u.els_logo.els_logo_pyld,
2350 elsio->u.els_logo.els_logo_pyld_dma);
2351
2352 del_timer(&elsio->timer);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002353 qla2x00_rel_sp(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002354}
2355
2356static void
2357qla2x00_els_dcmd_iocb_timeout(void *data)
2358{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002359 srb_t *sp = data;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002360 fc_port_t *fcport = sp->fcport;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002361 struct scsi_qla_host *vha = sp->vha;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002362 struct qla_hw_data *ha = vha->hw;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002363 struct srb_iocb *lio = &sp->u.iocb_cmd;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002364 unsigned long flags = 0;
2365
2366 ql_dbg(ql_dbg_io, vha, 0x3069,
2367 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2368 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2369 fcport->d_id.b.al_pa);
2370
2371 /* Abort the exchange */
2372 spin_lock_irqsave(&ha->hardware_lock, flags);
2373 if (ha->isp_ops->abort_command(sp)) {
2374 ql_dbg(ql_dbg_io, vha, 0x3070,
2375 "mbx abort_command failed.\n");
2376 } else {
2377 ql_dbg(ql_dbg_io, vha, 0x3071,
2378 "mbx abort_command success.\n");
2379 }
2380 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2381
2382 complete(&lio->u.els_logo.comp);
2383}
2384
2385static void
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002386qla2x00_els_dcmd_sp_done(void *ptr, int res)
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002387{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002388 srb_t *sp = ptr;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002389 fc_port_t *fcport = sp->fcport;
2390 struct srb_iocb *lio = &sp->u.iocb_cmd;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002391 struct scsi_qla_host *vha = sp->vha;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002392
2393 ql_dbg(ql_dbg_io, vha, 0x3072,
2394 "%s hdl=%x, portid=%02x%02x%02x done\n",
2395 sp->name, sp->handle, fcport->d_id.b.domain,
2396 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2397
2398 complete(&lio->u.els_logo.comp);
2399}
2400
2401int
2402qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2403 port_id_t remote_did)
2404{
2405 srb_t *sp;
2406 fc_port_t *fcport = NULL;
2407 struct srb_iocb *elsio = NULL;
2408 struct qla_hw_data *ha = vha->hw;
2409 struct els_logo_payload logo_pyld;
2410 int rval = QLA_SUCCESS;
2411
2412 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2413 if (!fcport) {
2414 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2415 return -ENOMEM;
2416 }
2417
2418 /* Alloc SRB structure */
2419 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2420 if (!sp) {
2421 kfree(fcport);
2422 ql_log(ql_log_info, vha, 0x70e6,
2423 "SRB allocation failed\n");
2424 return -ENOMEM;
2425 }
2426
2427 elsio = &sp->u.iocb_cmd;
2428 fcport->loop_id = 0xFFFF;
2429 fcport->d_id.b.domain = remote_did.b.domain;
2430 fcport->d_id.b.area = remote_did.b.area;
2431 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2432
2433 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2434 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2435
2436 sp->type = SRB_ELS_DCMD;
2437 sp->name = "ELS_DCMD";
2438 sp->fcport = fcport;
2439 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2440 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2441 sp->done = qla2x00_els_dcmd_sp_done;
2442 sp->free = qla2x00_els_dcmd_sp_free;
2443
2444 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2445 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2446 GFP_KERNEL);
2447
2448 if (!elsio->u.els_logo.els_logo_pyld) {
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002449 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002450 return QLA_FUNCTION_FAILED;
2451 }
2452
2453 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2454
2455 elsio->u.els_logo.els_cmd = els_opcode;
2456 logo_pyld.opcode = els_opcode;
2457 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2458 logo_pyld.s_id[1] = vha->d_id.b.area;
2459 logo_pyld.s_id[2] = vha->d_id.b.domain;
2460 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2461 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2462
2463 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2464 sizeof(struct els_logo_payload));
2465
2466 rval = qla2x00_start_sp(sp);
2467 if (rval != QLA_SUCCESS) {
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002468 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002469 return QLA_FUNCTION_FAILED;
2470 }
2471
2472 ql_dbg(ql_dbg_io, vha, 0x3074,
2473 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2474 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2475 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2476
2477 wait_for_completion(&elsio->u.els_logo.comp);
2478
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002479 sp->free(sp);
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002480 return rval;
2481}
2482
2483static void
2484qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2485{
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002486 scsi_qla_host_t *vha = sp->vha;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002487 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2488
2489 els_iocb->entry_type = ELS_IOCB_TYPE;
2490 els_iocb->entry_count = 1;
2491 els_iocb->sys_define = 0;
2492 els_iocb->entry_status = 0;
2493 els_iocb->handle = sp->handle;
2494 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2495 els_iocb->tx_dsd_count = 1;
2496 els_iocb->vp_index = vha->vp_idx;
2497 els_iocb->sof_type = EST_SOFI3;
2498 els_iocb->rx_dsd_count = 0;
2499 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2500
2501 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2502 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2503 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2504 els_iocb->control_flags = 0;
2505
2506 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2507 els_iocb->tx_address[0] =
2508 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2509 els_iocb->tx_address[1] =
2510 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2511 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2512
2513 els_iocb->rx_byte_count = 0;
2514 els_iocb->rx_address[0] = 0;
2515 els_iocb->rx_address[1] = 0;
2516 els_iocb->rx_len = 0;
2517
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002518 sp->vha->qla_stats.control_requests++;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05002519}
2520
2521static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002522qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2523{
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002524 struct bsg_job *bsg_job = sp->u.bsg_job;
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002525 struct fc_bsg_request *bsg_request = bsg_job->request;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002526
2527 els_iocb->entry_type = ELS_IOCB_TYPE;
2528 els_iocb->entry_count = 1;
2529 els_iocb->sys_define = 0;
2530 els_iocb->entry_status = 0;
2531 els_iocb->handle = sp->handle;
2532 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Bart Van Asschead950362015-07-09 07:24:08 -07002533 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002534 els_iocb->vp_index = sp->vha->vp_idx;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002535 els_iocb->sof_type = EST_SOFI3;
Bart Van Asschead950362015-07-09 07:24:08 -07002536 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002537
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002538 els_iocb->opcode =
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002539 sp->type == SRB_ELS_CMD_RPT ?
Johannes Thumshirn01e0e152016-11-17 10:31:12 +01002540 bsg_request->rqst_data.r_els.els_code :
2541 bsg_request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002542 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2543 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2544 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2545 els_iocb->control_flags = 0;
2546 els_iocb->rx_byte_count =
2547 cpu_to_le32(bsg_job->reply_payload.payload_len);
2548 els_iocb->tx_byte_count =
2549 cpu_to_le32(bsg_job->request_payload.payload_len);
2550
2551 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2552 (bsg_job->request_payload.sg_list)));
2553 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2554 (bsg_job->request_payload.sg_list)));
2555 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2556 (bsg_job->request_payload.sg_list));
2557
2558 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2559 (bsg_job->reply_payload.sg_list)));
2560 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2561 (bsg_job->reply_payload.sg_list)));
2562 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2563 (bsg_job->reply_payload.sg_list));
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002564
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002565 sp->vha->qla_stats.control_requests++;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002566}
2567
2568static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002569qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2570{
2571 uint16_t avail_dsds;
2572 uint32_t *cur_dsd;
2573 struct scatterlist *sg;
2574 int index;
2575 uint16_t tot_dsds;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002576 scsi_qla_host_t *vha = sp->vha;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002577 struct qla_hw_data *ha = vha->hw;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002578 struct bsg_job *bsg_job = sp->u.bsg_job;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002579 int loop_iterartion = 0;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002580 int entry_count = 1;
2581
2582 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2583 ct_iocb->entry_type = CT_IOCB_TYPE;
2584 ct_iocb->entry_status = 0;
2585 ct_iocb->handle1 = sp->handle;
2586 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
Bart Van Asschead950362015-07-09 07:24:08 -07002587 ct_iocb->status = cpu_to_le16(0);
2588 ct_iocb->control_flags = cpu_to_le16(0);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002589 ct_iocb->timeout = 0;
2590 ct_iocb->cmd_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002591 cpu_to_le16(bsg_job->request_payload.sg_cnt);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002592 ct_iocb->total_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002593 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002594 ct_iocb->req_bytecount =
2595 cpu_to_le32(bsg_job->request_payload.payload_len);
2596 ct_iocb->rsp_bytecount =
2597 cpu_to_le32(bsg_job->reply_payload.payload_len);
2598
2599 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2600 (bsg_job->request_payload.sg_list)));
2601 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2602 (bsg_job->request_payload.sg_list)));
2603 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2604
2605 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2606 (bsg_job->reply_payload.sg_list)));
2607 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2608 (bsg_job->reply_payload.sg_list)));
2609 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2610
2611 avail_dsds = 1;
2612 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2613 index = 0;
2614 tot_dsds = bsg_job->reply_payload.sg_cnt;
2615
2616 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2617 dma_addr_t sle_dma;
2618 cont_a64_entry_t *cont_pkt;
2619
2620 /* Allocate additional continuation packets? */
2621 if (avail_dsds == 0) {
2622 /*
2623 * Five DSDs are available in the Cont.
2624 * Type 1 IOCB.
2625 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002626 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2627 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002628 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2629 avail_dsds = 5;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002630 entry_count++;
2631 }
2632
2633 sle_dma = sg_dma_address(sg);
2634 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2635 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2636 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2637 loop_iterartion++;
2638 avail_dsds--;
2639 }
2640 ct_iocb->entry_count = entry_count;
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04002641
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002642 sp->vha->qla_stats.control_requests++;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002643}
2644
2645static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002646qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2647{
2648 uint16_t avail_dsds;
2649 uint32_t *cur_dsd;
2650 struct scatterlist *sg;
2651 int index;
2652 uint16_t tot_dsds;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002653 scsi_qla_host_t *vha = sp->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002654 struct qla_hw_data *ha = vha->hw;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01002655 struct bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002656 int loop_iterartion = 0;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002657 int entry_count = 1;
2658
2659 ct_iocb->entry_type = CT_IOCB_TYPE;
2660 ct_iocb->entry_status = 0;
2661 ct_iocb->sys_define = 0;
2662 ct_iocb->handle = sp->handle;
2663
2664 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002665 ct_iocb->vp_index = sp->vha->vp_idx;
Bart Van Asschead950362015-07-09 07:24:08 -07002666 ct_iocb->comp_status = cpu_to_le16(0);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002667
2668 ct_iocb->cmd_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002669 cpu_to_le16(bsg_job->request_payload.sg_cnt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002670 ct_iocb->timeout = 0;
2671 ct_iocb->rsp_dsd_count =
Bart Van Asschead950362015-07-09 07:24:08 -07002672 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002673 ct_iocb->rsp_byte_count =
2674 cpu_to_le32(bsg_job->reply_payload.payload_len);
2675 ct_iocb->cmd_byte_count =
2676 cpu_to_le32(bsg_job->request_payload.payload_len);
2677 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2678 (bsg_job->request_payload.sg_list)));
2679 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2680 (bsg_job->request_payload.sg_list)));
2681 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2682 (bsg_job->request_payload.sg_list));
2683
2684 avail_dsds = 1;
2685 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2686 index = 0;
2687 tot_dsds = bsg_job->reply_payload.sg_cnt;
2688
2689 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2690 dma_addr_t sle_dma;
2691 cont_a64_entry_t *cont_pkt;
2692
2693 /* Allocate additional continuation packets? */
2694 if (avail_dsds == 0) {
2695 /*
2696 * Five DSDs are available in the Cont.
2697 * Type 1 IOCB.
2698 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002699 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2700 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002701 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2702 avail_dsds = 5;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002703 entry_count++;
2704 }
2705
2706 sle_dma = sg_dma_address(sg);
2707 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2708 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2709 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2710 loop_iterartion++;
2711 avail_dsds--;
2712 }
2713 ct_iocb->entry_count = entry_count;
2714}
2715
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002716/*
2717 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2718 * @sp: command to send to the ISP
2719 *
2720 * Returns non-zero if a failure occurred, else zero.
2721 */
2722int
2723qla82xx_start_scsi(srb_t *sp)
2724{
Bart Van Assche52c82822015-07-09 07:23:26 -07002725 int nseg;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002726 unsigned long flags;
2727 struct scsi_cmnd *cmd;
2728 uint32_t *clr_ptr;
2729 uint32_t index;
2730 uint32_t handle;
2731 uint16_t cnt;
2732 uint16_t req_cnt;
2733 uint16_t tot_dsds;
2734 struct device_reg_82xx __iomem *reg;
2735 uint32_t dbval;
2736 uint32_t *fcp_dl;
2737 uint8_t additional_cdb_len;
2738 struct ct6_dsd *ctx;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002739 struct scsi_qla_host *vha = sp->vha;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002740 struct qla_hw_data *ha = vha->hw;
2741 struct req_que *req = NULL;
2742 struct rsp_que *rsp = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002743
2744 /* Setup device pointers. */
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002745 reg = &ha->iobase->isp82;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002746 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002747 req = vha->req;
2748 rsp = ha->rsp_q_map[0];
2749
2750 /* So we know we haven't pci_map'ed anything yet */
2751 tot_dsds = 0;
2752
2753 dbval = 0x04 | (ha->portnum << 5);
2754
2755 /* Send marker if required */
2756 if (vha->marker_needed != 0) {
2757 if (qla2x00_marker(vha, req,
2758 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2759 ql_log(ql_log_warn, vha, 0x300c,
2760 "qla2x00_marker failed for cmd=%p.\n", cmd);
2761 return QLA_FUNCTION_FAILED;
2762 }
2763 vha->marker_needed = 0;
2764 }
2765
2766 /* Acquire ring specific lock */
2767 spin_lock_irqsave(&ha->hardware_lock, flags);
2768
2769 /* Check for room in outstanding command list. */
2770 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002771 for (index = 1; index < req->num_outstanding_cmds; index++) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002772 handle++;
Chad Dupuis8d93f552013-01-30 03:34:37 -05002773 if (handle == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002774 handle = 1;
2775 if (!req->outstanding_cmds[handle])
2776 break;
2777 }
Chad Dupuis8d93f552013-01-30 03:34:37 -05002778 if (index == req->num_outstanding_cmds)
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002779 goto queuing_error;
2780
2781 /* Map the sg table so we have an accurate count of sg entries needed */
2782 if (scsi_sg_count(cmd)) {
2783 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2784 scsi_sg_count(cmd), cmd->sc_data_direction);
2785 if (unlikely(!nseg))
2786 goto queuing_error;
2787 } else
2788 nseg = 0;
2789
2790 tot_dsds = nseg;
2791
2792 if (tot_dsds > ql2xshiftctondsd) {
2793 struct cmd_type_6 *cmd_pkt;
2794 uint16_t more_dsd_lists = 0;
2795 struct dsd_dma *dsd_ptr;
2796 uint16_t i;
2797
2798 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2799 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2800 ql_dbg(ql_dbg_io, vha, 0x300d,
2801 "Num of DSD list %d is than %d for cmd=%p.\n",
2802 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2803 cmd);
2804 goto queuing_error;
2805 }
2806
2807 if (more_dsd_lists <= ha->gbl_dsd_avail)
2808 goto sufficient_dsds;
2809 else
2810 more_dsd_lists -= ha->gbl_dsd_avail;
2811
2812 for (i = 0; i < more_dsd_lists; i++) {
2813 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2814 if (!dsd_ptr) {
2815 ql_log(ql_log_fatal, vha, 0x300e,
2816 "Failed to allocate memory for dsd_dma "
2817 "for cmd=%p.\n", cmd);
2818 goto queuing_error;
2819 }
2820
2821 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2822 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2823 if (!dsd_ptr->dsd_addr) {
2824 kfree(dsd_ptr);
2825 ql_log(ql_log_fatal, vha, 0x300f,
2826 "Failed to allocate memory for dsd_addr "
2827 "for cmd=%p.\n", cmd);
2828 goto queuing_error;
2829 }
2830 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2831 ha->gbl_dsd_avail++;
2832 }
2833
2834sufficient_dsds:
2835 req_cnt = 1;
2836
2837 if (req->cnt < (req_cnt + 2)) {
2838 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2839 &reg->req_q_out[0]);
2840 if (req->ring_index < cnt)
2841 req->cnt = cnt - req->ring_index;
2842 else
2843 req->cnt = req->length -
2844 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04002845 if (req->cnt < (req_cnt + 2))
2846 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002847 }
2848
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002849 ctx = sp->u.scmd.ctx =
2850 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2851 if (!ctx) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002852 ql_log(ql_log_fatal, vha, 0x3010,
2853 "Failed to allocate ctx for cmd=%p.\n", cmd);
2854 goto queuing_error;
2855 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002856
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002857 memset(ctx, 0, sizeof(struct ct6_dsd));
2858 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2859 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2860 if (!ctx->fcp_cmnd) {
2861 ql_log(ql_log_fatal, vha, 0x3011,
2862 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
Dan Carpenter841f97b2012-05-17 10:13:40 +03002863 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002864 }
2865
2866 /* Initialize the DSD list and dma handle */
2867 INIT_LIST_HEAD(&ctx->dsd_list);
2868 ctx->dsd_use_cnt = 0;
2869
2870 if (cmd->cmd_len > 16) {
2871 additional_cdb_len = cmd->cmd_len - 16;
2872 if ((cmd->cmd_len % 4) != 0) {
2873 /* SCSI command bigger than 16 bytes must be
2874 * multiple of 4
2875 */
2876 ql_log(ql_log_warn, vha, 0x3012,
2877 "scsi cmd len %d not multiple of 4 "
2878 "for cmd=%p.\n", cmd->cmd_len, cmd);
2879 goto queuing_error_fcp_cmnd;
2880 }
2881 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2882 } else {
2883 additional_cdb_len = 0;
2884 ctx->fcp_cmnd_len = 12 + 16 + 4;
2885 }
2886
2887 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2888 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2889
2890 /* Zero out remaining portion of packet. */
2891 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2892 clr_ptr = (uint32_t *)cmd_pkt + 2;
2893 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2894 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2895
2896 /* Set NPORT-ID and LUN number*/
2897 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2898 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2899 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2900 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002901 cmd_pkt->vp_index = sp->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002902
2903 /* Build IOCB segments */
2904 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2905 goto queuing_error_fcp_cmnd;
2906
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002907 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002908 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2909
2910 /* build FCP_CMND IU */
2911 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002912 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002913 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2914
2915 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2916 ctx->fcp_cmnd->additional_cdb_len |= 1;
2917 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2918 ctx->fcp_cmnd->additional_cdb_len |= 2;
2919
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002920 /* Populate the FCP_PRIO. */
2921 if (ha->flags.fcp_prio_enabled)
2922 ctx->fcp_cmnd->task_attribute |=
2923 sp->fcport->fcp_prio << 3;
2924
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002925 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2926
2927 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2928 additional_cdb_len);
2929 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2930
2931 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2932 cmd_pkt->fcp_cmnd_dseg_address[0] =
2933 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2934 cmd_pkt->fcp_cmnd_dseg_address[1] =
2935 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2936
2937 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2938 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2939 /* Set total data segment count. */
2940 cmd_pkt->entry_count = (uint8_t)req_cnt;
2941 /* Specify response queue number where
2942 * completion should happen
2943 */
2944 cmd_pkt->entry_status = (uint8_t) rsp->id;
2945 } else {
2946 struct cmd_type_7 *cmd_pkt;
2947 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2948 if (req->cnt < (req_cnt + 2)) {
2949 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2950 &reg->req_q_out[0]);
2951 if (req->ring_index < cnt)
2952 req->cnt = cnt - req->ring_index;
2953 else
2954 req->cnt = req->length -
2955 (req->ring_index - cnt);
2956 }
2957 if (req->cnt < (req_cnt + 2))
2958 goto queuing_error;
2959
2960 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2961 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2962
2963 /* Zero out remaining portion of packet. */
2964 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2965 clr_ptr = (uint32_t *)cmd_pkt + 2;
2966 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2967 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2968
2969 /* Set NPORT-ID and LUN number*/
2970 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2971 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2972 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2973 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08002974 cmd_pkt->vp_index = sp->vha->vp_idx;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002975
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002976 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002977 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002978 sizeof(cmd_pkt->lun));
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002979
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002980 /* Populate the FCP_PRIO. */
2981 if (ha->flags.fcp_prio_enabled)
2982 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2983
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002984 /* Load SCSI command packet. */
2985 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2986 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2987
2988 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2989
2990 /* Build IOCB segments */
Michael Hernandezd7459522016-12-12 14:40:07 -08002991 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002992
2993 /* Set total data segment count. */
2994 cmd_pkt->entry_count = (uint8_t)req_cnt;
2995 /* Specify response queue number where
2996 * completion should happen.
2997 */
2998 cmd_pkt->entry_status = (uint8_t) rsp->id;
2999
3000 }
3001 /* Build command packet. */
3002 req->current_outstanding_cmd = handle;
3003 req->outstanding_cmds[handle] = sp;
3004 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003005 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003006 req->cnt -= req_cnt;
3007 wmb();
3008
3009 /* Adjust ring index. */
3010 req->ring_index++;
3011 if (req->ring_index == req->length) {
3012 req->ring_index = 0;
3013 req->ring_ptr = req->ring;
3014 } else
3015 req->ring_ptr++;
3016
3017 sp->flags |= SRB_DMA_VALID;
3018
3019 /* Set chip new ring index. */
3020 /* write, read and verify logic */
3021 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3022 if (ql2xdbwr)
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003023 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003024 else {
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003025 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003026 wmb();
Bart Van Assche8dfa4b5a2015-07-09 07:24:50 -07003027 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3028 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003029 wmb();
3030 }
3031 }
3032
3033 /* Manage unprocessed RIO/ZIO commands in response queue. */
3034 if (vha->flags.process_response_queue &&
3035 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3036 qla24xx_process_response_queue(vha, rsp);
3037
3038 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3039 return QLA_SUCCESS;
3040
3041queuing_error_fcp_cmnd:
3042 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3043queuing_error:
3044 if (tot_dsds)
3045 scsi_dma_unmap(cmd);
3046
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003047 if (sp->u.scmd.ctx) {
3048 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3049 sp->u.scmd.ctx = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08003050 }
3051 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3052
3053 return QLA_FUNCTION_FAILED;
3054}
3055
Joe Carnuccio6d78e552014-09-25 05:17:05 -04003056static void
Armen Baloyan4440e462014-02-26 04:15:18 -05003057qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3058{
3059 struct srb_iocb *aio = &sp->u.iocb_cmd;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003060 scsi_qla_host_t *vha = sp->vha;
Armen Baloyan4440e462014-02-26 04:15:18 -05003061 struct req_que *req = vha->req;
3062
3063 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3064 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3065 abt_iocb->entry_count = 1;
3066 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3067 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3068 abt_iocb->handle_to_abort =
3069 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3070 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3071 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3072 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3073 abt_iocb->vp_index = vha->vp_idx;
3074 abt_iocb->req_que_no = cpu_to_le16(req->id);
3075 /* Send the command to the firmware */
3076 wmb();
3077}
3078
Quinn Tran726b8542017-01-19 22:28:00 -08003079static void
3080qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3081{
3082 int i, sz;
3083
3084 mbx->entry_type = MBX_IOCB_TYPE;
3085 mbx->handle = sp->handle;
3086 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3087
3088 for (i = 0; i < sz; i++)
3089 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3090}
3091
3092static void
3093qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3094{
3095 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3096 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3097 ct_pkt->handle = sp->handle;
3098}
3099
3100static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3101 struct nack_to_isp *nack)
3102{
3103 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3104
3105 nack->entry_type = NOTIFY_ACK_TYPE;
3106 nack->entry_count = 1;
3107 nack->ox_id = ntfy->ox_id;
3108
3109 nack->u.isp24.handle = sp->handle;
3110 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3111 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3112 nack->u.isp24.flags = ntfy->u.isp24.flags &
3113 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3114 }
3115 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3116 nack->u.isp24.status = ntfy->u.isp24.status;
3117 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3118 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3119 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3120 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3121 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3122 nack->u.isp24.srr_flags = 0;
3123 nack->u.isp24.srr_reject_code = 0;
3124 nack->u.isp24.srr_reject_code_expl = 0;
3125 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3126}
3127
Andrew Vasquezac280b62009-08-20 11:06:05 -07003128int
3129qla2x00_start_sp(srb_t *sp)
3130{
3131 int rval;
Joe Carnuccio25ff6af2017-01-19 22:28:04 -08003132 scsi_qla_host_t *vha = sp->vha;
Quinn Tran726b8542017-01-19 22:28:00 -08003133 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003134 void *pkt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003135 unsigned long flags;
3136
3137 rval = QLA_FUNCTION_FAILED;
3138 spin_lock_irqsave(&ha->hardware_lock, flags);
Quinn Tran726b8542017-01-19 22:28:00 -08003139 pkt = qla2x00_alloc_iocbs(vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003140 if (!pkt) {
Quinn Tran726b8542017-01-19 22:28:00 -08003141 ql_log(ql_log_warn, vha, 0x700c,
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003142 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07003143 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07003144 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07003145
3146 rval = QLA_SUCCESS;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08003147 switch (sp->type) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07003148 case SRB_LOGIN_CMD:
3149 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003150 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07003151 qla2x00_login_iocb(sp, pkt);
3152 break;
3153 case SRB_LOGOUT_CMD:
3154 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003155 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07003156 qla2x00_logout_iocb(sp, pkt);
3157 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08003158 case SRB_ELS_CMD_RPT:
3159 case SRB_ELS_CMD_HST:
3160 qla24xx_els_iocb(sp, pkt);
3161 break;
3162 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05003163 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08003164 qla24xx_ct_iocb(sp, pkt) :
3165 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08003166 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07003167 case SRB_ADISC_CMD:
3168 IS_FWI2_CAPABLE(ha) ?
3169 qla24xx_adisc_iocb(sp, pkt) :
3170 qla2x00_adisc_iocb(sp, pkt);
3171 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07003172 case SRB_TM_CMD:
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04003173 IS_QLAFX00(ha) ?
3174 qlafx00_tm_iocb(sp, pkt) :
3175 qla24xx_tm_iocb(sp, pkt);
3176 break;
3177 case SRB_FXIOCB_DCMD:
3178 case SRB_FXIOCB_BCMD:
3179 qlafx00_fxdisc_iocb(sp, pkt);
3180 break;
3181 case SRB_ABT_CMD:
Armen Baloyan4440e462014-02-26 04:15:18 -05003182 IS_QLAFX00(ha) ?
3183 qlafx00_abort_iocb(sp, pkt) :
3184 qla24xx_abort_iocb(sp, pkt);
Madhuranath Iyengar38222632010-05-04 15:01:29 -07003185 break;
Himanshu Madhani6eb54712015-12-17 14:57:00 -05003186 case SRB_ELS_DCMD:
3187 qla24xx_els_logo_iocb(sp, pkt);
3188 break;
Quinn Tran726b8542017-01-19 22:28:00 -08003189 case SRB_CT_PTHRU_CMD:
3190 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3191 break;
3192 case SRB_MB_IOCB:
3193 qla2x00_mb_iocb(sp, pkt);
3194 break;
3195 case SRB_NACK_PLOGI:
3196 case SRB_NACK_PRLI:
3197 case SRB_NACK_LOGO:
3198 qla2x00_send_notify_ack_iocb(sp, pkt);
3199 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07003200 default:
3201 break;
3202 }
3203
3204 wmb();
Quinn Tran726b8542017-01-19 22:28:00 -08003205 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07003206done:
3207 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3208 return rval;
3209}
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003210
3211static void
3212qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3213 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3214{
3215 uint16_t avail_dsds;
3216 uint32_t *cur_dsd;
3217 uint32_t req_data_len = 0;
3218 uint32_t rsp_data_len = 0;
3219 struct scatterlist *sg;
3220 int index;
3221 int entry_count = 1;
Johannes Thumshirn75cc8cf2016-11-17 10:31:19 +01003222 struct bsg_job *bsg_job = sp->u.bsg_job;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003223
3224 /*Update entry type to indicate bidir command */
3225 *((uint32_t *)(&cmd_pkt->entry_type)) =
Bart Van Asschead950362015-07-09 07:24:08 -07003226 cpu_to_le32(COMMAND_BIDIRECTIONAL);
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003227
3228 /* Set the transfer direction, in this set both flags
3229 * Also set the BD_WRAP_BACK flag, firmware will take care
3230 * assigning DID=SID for outgoing pkts.
3231 */
3232 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3233 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
Bart Van Asschead950362015-07-09 07:24:08 -07003234 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003235 BD_WRAP_BACK);
3236
3237 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3238 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3239 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3240 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3241
3242 vha->bidi_stats.transfer_bytes += req_data_len;
3243 vha->bidi_stats.io_count++;
3244
Joe Carnucciofabbb8d2013-08-27 01:37:40 -04003245 vha->qla_stats.output_bytes += req_data_len;
3246 vha->qla_stats.output_requests++;
3247
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003248 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3249 * are bundled in continuation iocb
3250 */
3251 avail_dsds = 1;
3252 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3253
3254 index = 0;
3255
3256 for_each_sg(bsg_job->request_payload.sg_list, sg,
3257 bsg_job->request_payload.sg_cnt, index) {
3258 dma_addr_t sle_dma;
3259 cont_a64_entry_t *cont_pkt;
3260
3261 /* Allocate additional continuation packets */
3262 if (avail_dsds == 0) {
3263 /* Continuation type 1 IOCB can accomodate
3264 * 5 DSDS
3265 */
3266 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3267 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3268 avail_dsds = 5;
3269 entry_count++;
3270 }
3271 sle_dma = sg_dma_address(sg);
3272 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3273 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3274 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3275 avail_dsds--;
3276 }
3277 /* For read request DSD will always goes to continuation IOCB
3278 * and follow the write DSD. If there is room on the current IOCB
3279 * then it is added to that IOCB else new continuation IOCB is
3280 * allocated.
3281 */
3282 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3283 bsg_job->reply_payload.sg_cnt, index) {
3284 dma_addr_t sle_dma;
3285 cont_a64_entry_t *cont_pkt;
3286
3287 /* Allocate additional continuation packets */
3288 if (avail_dsds == 0) {
3289 /* Continuation type 1 IOCB can accomodate
3290 * 5 DSDS
3291 */
3292 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3293 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3294 avail_dsds = 5;
3295 entry_count++;
3296 }
3297 sle_dma = sg_dma_address(sg);
3298 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3299 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3300 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3301 avail_dsds--;
3302 }
3303 /* This value should be same as number of IOCB required for this cmd */
3304 cmd_pkt->entry_count = entry_count;
3305}
3306
3307int
3308qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3309{
3310
3311 struct qla_hw_data *ha = vha->hw;
3312 unsigned long flags;
3313 uint32_t handle;
3314 uint32_t index;
3315 uint16_t req_cnt;
3316 uint16_t cnt;
3317 uint32_t *clr_ptr;
3318 struct cmd_bidir *cmd_pkt = NULL;
3319 struct rsp_que *rsp;
3320 struct req_que *req;
3321 int rval = EXT_STATUS_OK;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003322
3323 rval = QLA_SUCCESS;
3324
3325 rsp = ha->rsp_q_map[0];
3326 req = vha->req;
3327
3328 /* Send marker if required */
3329 if (vha->marker_needed != 0) {
3330 if (qla2x00_marker(vha, req,
3331 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3332 return EXT_STATUS_MAILBOX;
3333 vha->marker_needed = 0;
3334 }
3335
3336 /* Acquire ring specific lock */
3337 spin_lock_irqsave(&ha->hardware_lock, flags);
3338
3339 /* Check for room in outstanding command list. */
3340 handle = req->current_outstanding_cmd;
Chad Dupuis8d93f552013-01-30 03:34:37 -05003341 for (index = 1; index < req->num_outstanding_cmds; index++) {
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003342 handle++;
Bart Van Assche8d2b21d2015-06-04 15:58:09 -07003343 if (handle == req->num_outstanding_cmds)
3344 handle = 1;
3345 if (!req->outstanding_cmds[handle])
3346 break;
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003347 }
3348
Chad Dupuis8d93f552013-01-30 03:34:37 -05003349 if (index == req->num_outstanding_cmds) {
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003350 rval = EXT_STATUS_BUSY;
3351 goto queuing_error;
3352 }
3353
3354 /* Calculate number of IOCB required */
3355 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3356
3357 /* Check for room on request queue. */
3358 if (req->cnt < req_cnt + 2) {
Joe Carnuccio7c6300e2014-04-11 16:54:37 -04003359 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3360 RD_REG_DWORD_RELAXED(req->req_q_out);
Saurav Kashyapa9b6f722012-08-22 14:21:01 -04003361 if (req->ring_index < cnt)
3362 req->cnt = cnt - req->ring_index;
3363 else
3364 req->cnt = req->length -
3365 (req->ring_index - cnt);
3366 }
3367 if (req->cnt < req_cnt + 2) {
3368 rval = EXT_STATUS_BUSY;
3369 goto queuing_error;
3370 }
3371
3372 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3373 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3374
3375 /* Zero out remaining portion of packet. */
3376 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3377 clr_ptr = (uint32_t *)cmd_pkt + 2;
3378 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3379
3380 /* Set NPORT-ID (of vha)*/
3381 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3382 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3383 cmd_pkt->port_id[1] = vha->d_id.b.area;
3384 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3385
3386 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3387 cmd_pkt->entry_status = (uint8_t) rsp->id;
3388 /* Build command packet. */
3389 req->current_outstanding_cmd = handle;
3390 req->outstanding_cmds[handle] = sp;
3391 sp->handle = handle;
3392 req->cnt -= req_cnt;
3393
3394 /* Send the command to the firmware */
3395 wmb();
3396 qla2x00_start_iocbs(vha, req);
3397queuing_error:
3398 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3399 return rval;
3400}