blob: 9e5d3f7d29ae7ec47c8a70bc984de1bea6c92adb [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
Hannes Reinecke1af6f602014-11-24 15:37:22 +010052#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
David S. Millercd9ad582007-04-26 21:19:23 -070054
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010057 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070058} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010062 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070063} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010067 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070068} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010072 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070073} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010077 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070078} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010082 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070083} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010087 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070088} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010092 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070093} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010097 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070098} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -0700103} while (0)
104
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
David S. Millercd9ad582007-04-26 21:19:23 -0700115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100141 esp_log_command("cmd[%02x]\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -0700142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
Hannes Reinecke31708662014-11-24 15:37:24 +0100146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
David S. Millercd9ad582007-04-26 21:19:23 -0700164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
David S. Millercd9ad582007-04-26 21:19:23 -0700195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
David S. Millera7938042007-09-30 17:10:42 -0700250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
David S. Millercd9ad582007-04-26 21:19:23 -0700252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
David S. Millercd9ad582007-04-26 21:19:23 -0700254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
256 */
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100271 if (esp->rev == FAS236) {
272 /*
273 * The AM53c974 chip returns the same ID as FAS236;
274 * try to configure glitch eater.
275 */
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
David S. Millercd9ad582007-04-26 21:19:23 -0700284 esp->max_period = (esp->max_period + 3)>>2;
285 esp->min_period = (esp->min_period + 3)>>2;
286
287 esp_write8(esp->config1, ESP_CFG1);
288 switch (esp->rev) {
289 case ESP100:
290 /* nothing to do */
291 break;
292
293 case ESP100A:
294 esp_write8(esp->config2, ESP_CFG2);
295 break;
296
297 case ESP236:
298 /* Slow 236 */
299 esp_write8(esp->config2, ESP_CFG2);
300 esp->prev_cfg3 = esp->target[0].esp_config3;
301 esp_write8(esp->prev_cfg3, ESP_CFG3);
302 break;
303
304 case FASHME:
305 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
306 /* fallthrough... */
307
308 case FAS236:
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100309 case PCSCSI:
310 /* Fast 236, AM53c974 or HME */
David S. Millercd9ad582007-04-26 21:19:23 -0700311 esp_write8(esp->config2, ESP_CFG2);
312 if (esp->rev == FASHME) {
313 u8 cfg3 = esp->target[0].esp_config3;
314
315 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
316 if (esp->scsi_id >= 8)
317 cfg3 |= ESP_CONFIG3_IDBIT3;
318 esp_set_all_config3(esp, cfg3);
319 } else {
320 u32 cfg3 = esp->target[0].esp_config3;
321
322 cfg3 |= ESP_CONFIG3_FCLK;
323 esp_set_all_config3(esp, cfg3);
324 }
325 esp->prev_cfg3 = esp->target[0].esp_config3;
326 esp_write8(esp->prev_cfg3, ESP_CFG3);
327 if (esp->rev == FASHME) {
328 esp->radelay = 80;
329 } else {
330 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
331 esp->radelay = 0;
332 else
333 esp->radelay = 96;
334 }
335 break;
336
337 case FAS100A:
338 /* Fast 100a */
339 esp_write8(esp->config2, ESP_CFG2);
340 esp_set_all_config3(esp,
341 (esp->target[0].esp_config3 |
342 ESP_CONFIG3_FCLOCK));
343 esp->prev_cfg3 = esp->target[0].esp_config3;
344 esp_write8(esp->prev_cfg3, ESP_CFG3);
345 esp->radelay = 32;
346 break;
347
348 default:
349 break;
350 }
351
David S. Millera7938042007-09-30 17:10:42 -0700352 /* Reload the configuration registers */
353 esp_write8(esp->cfact, ESP_CFACT);
354
355 esp->prev_stp = 0;
356 esp_write8(esp->prev_stp, ESP_STP);
357
358 esp->prev_soff = 0;
359 esp_write8(esp->prev_soff, ESP_SOFF);
360
361 esp_write8(esp->neg_defp, ESP_TIMEO);
362
David S. Millercd9ad582007-04-26 21:19:23 -0700363 /* Eat any bitrot in the chip */
364 esp_read8(ESP_INTRPT);
365 udelay(100);
366}
367
368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{
370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900371 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700372 int dir = cmd->sc_data_direction;
373 int total, i;
374
375 if (dir == DMA_NONE)
376 return;
377
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900378 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700379 spriv->cur_residue = sg_dma_len(sg);
380 spriv->cur_sg = sg;
381
382 total = 0;
383 for (i = 0; i < spriv->u.num_sg; i++)
384 total += sg_dma_len(&sg[i]);
385 spriv->tot_residue = total;
386}
387
388static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
389 struct scsi_cmnd *cmd)
390{
391 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
392
393 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
394 return ent->sense_dma +
395 (ent->sense_ptr - cmd->sense_buffer);
396 }
397
398 return sg_dma_address(p->cur_sg) +
399 (sg_dma_len(p->cur_sg) -
400 p->cur_residue);
401}
402
403static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
404 struct scsi_cmnd *cmd)
405{
406 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
407
408 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
409 return SCSI_SENSE_BUFFERSIZE -
410 (ent->sense_ptr - cmd->sense_buffer);
411 }
412 return p->cur_residue;
413}
414
415static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
416 struct scsi_cmnd *cmd, unsigned int len)
417{
418 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
419
420 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
421 ent->sense_ptr += len;
422 return;
423 }
424
425 p->cur_residue -= len;
426 p->tot_residue -= len;
427 if (p->cur_residue < 0 || p->tot_residue < 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100428 shost_printk(KERN_ERR, esp->host,
429 "Data transfer overflow.\n");
430 shost_printk(KERN_ERR, esp->host,
431 "cur_residue[%d] tot_residue[%d] len[%u]\n",
432 p->cur_residue, p->tot_residue, len);
David S. Millercd9ad582007-04-26 21:19:23 -0700433 p->cur_residue = 0;
434 p->tot_residue = 0;
435 }
436 if (!p->cur_residue && p->tot_residue) {
437 p->cur_sg++;
438 p->cur_residue = sg_dma_len(p->cur_sg);
439 }
440}
441
442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
443{
444 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
445 int dir = cmd->sc_data_direction;
446
447 if (dir == DMA_NONE)
448 return;
449
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900450 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700451}
452
453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->saved_sense_ptr = ent->sense_ptr;
460 return;
461 }
462 ent->saved_cur_residue = spriv->cur_residue;
463 ent->saved_cur_sg = spriv->cur_sg;
464 ent->saved_tot_residue = spriv->tot_residue;
465}
466
467static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
468{
469 struct scsi_cmnd *cmd = ent->cmd;
470 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
471
472 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
473 ent->sense_ptr = ent->saved_sense_ptr;
474 return;
475 }
476 spriv->cur_residue = ent->saved_cur_residue;
477 spriv->cur_sg = ent->saved_cur_sg;
478 spriv->tot_residue = ent->saved_tot_residue;
479}
480
481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
482{
483 if (cmd->cmd_len == 6 ||
484 cmd->cmd_len == 10 ||
485 cmd->cmd_len == 12) {
486 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
487 } else {
488 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
489 }
490}
491
492static void esp_write_tgt_config3(struct esp *esp, int tgt)
493{
494 if (esp->rev > ESP100A) {
495 u8 val = esp->target[tgt].esp_config3;
496
497 if (val != esp->prev_cfg3) {
498 esp->prev_cfg3 = val;
499 esp_write8(val, ESP_CFG3);
500 }
501 }
502}
503
504static void esp_write_tgt_sync(struct esp *esp, int tgt)
505{
506 u8 off = esp->target[tgt].esp_offset;
507 u8 per = esp->target[tgt].esp_period;
508
509 if (off != esp->prev_soff) {
510 esp->prev_soff = off;
511 esp_write8(off, ESP_SOFF);
512 }
513 if (per != esp->prev_stp) {
514 esp->prev_stp = per;
515 esp_write8(per, ESP_STP);
516 }
517}
518
519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
520{
521 if (esp->rev == FASHME) {
522 /* Arbitrary segment boundaries, 24-bit counts. */
523 if (dma_len > (1U << 24))
524 dma_len = (1U << 24);
525 } else {
526 u32 base, end;
527
528 /* ESP chip limits other variants by 16-bits of transfer
529 * count. Actually on FAS100A and FAS236 we could get
530 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
531 * in the ESP_CFG2 register but that causes other unwanted
532 * changes so we don't use it currently.
533 */
534 if (dma_len > (1U << 16))
535 dma_len = (1U << 16);
536
537 /* All of the DMA variants hooked up to these chips
538 * cannot handle crossing a 24-bit address boundary.
539 */
540 base = dma_addr & ((1U << 24) - 1U);
541 end = base + dma_len;
542 if (end > (1U << 24))
543 end = (1U <<24);
544 dma_len = end - base;
545 }
546 return dma_len;
547}
548
549static int esp_need_to_nego_wide(struct esp_target_data *tp)
550{
551 struct scsi_target *target = tp->starget;
552
553 return spi_width(target) != tp->nego_goal_width;
554}
555
556static int esp_need_to_nego_sync(struct esp_target_data *tp)
557{
558 struct scsi_target *target = tp->starget;
559
560 /* When offset is zero, period is "don't care". */
561 if (!spi_offset(target) && !tp->nego_goal_offset)
562 return 0;
563
564 if (spi_offset(target) == tp->nego_goal_offset &&
565 spi_period(target) == tp->nego_goal_period)
566 return 0;
567
568 return 1;
569}
570
571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
572 struct esp_lun_data *lp)
573{
David S. Miller21af8102013-08-01 18:08:34 -0700574 if (!ent->orig_tag[0]) {
David S. Millercd9ad582007-04-26 21:19:23 -0700575 /* Non-tagged, slot already taken? */
576 if (lp->non_tagged_cmd)
577 return -EBUSY;
578
579 if (lp->hold) {
580 /* We are being held by active tagged
581 * commands.
582 */
583 if (lp->num_tagged)
584 return -EBUSY;
585
586 /* Tagged commands completed, we can unplug
587 * the queue and run this untagged command.
588 */
589 lp->hold = 0;
590 } else if (lp->num_tagged) {
591 /* Plug the queue until num_tagged decreases
592 * to zero in esp_free_lun_tag.
593 */
594 lp->hold = 1;
595 return -EBUSY;
596 }
597
598 lp->non_tagged_cmd = ent;
599 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -0700600 }
601
Finn Thain201c37d2017-08-04 01:43:19 -0400602 /* Tagged command. Check that it isn't blocked by a non-tagged one. */
603 if (lp->non_tagged_cmd || lp->hold)
604 return -EBUSY;
605
David S. Miller21af8102013-08-01 18:08:34 -0700606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
David S. Millercd9ad582007-04-26 21:19:23 -0700607
David S. Miller21af8102013-08-01 18:08:34 -0700608 lp->tagged_cmds[ent->orig_tag[1]] = ent;
David S. Millercd9ad582007-04-26 21:19:23 -0700609 lp->num_tagged++;
610
611 return 0;
612}
613
614static void esp_free_lun_tag(struct esp_cmd_entry *ent,
615 struct esp_lun_data *lp)
616{
David S. Miller21af8102013-08-01 18:08:34 -0700617 if (ent->orig_tag[0]) {
618 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
619 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700620 lp->num_tagged--;
621 } else {
622 BUG_ON(lp->non_tagged_cmd != ent);
623 lp->non_tagged_cmd = NULL;
624 }
625}
626
627/* When a contingent allegiance conditon is created, we force feed a
628 * REQUEST_SENSE command to the device to fetch the sense data. I
629 * tried many other schemes, relying on the scsi error handling layer
630 * to send out the REQUEST_SENSE automatically, but this was difficult
631 * to get right especially in the presence of applications like smartd
632 * which use SG_IO to send out their own REQUEST_SENSE commands.
633 */
634static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
635{
636 struct scsi_cmnd *cmd = ent->cmd;
637 struct scsi_device *dev = cmd->device;
638 int tgt, lun;
639 u8 *p, val;
640
641 tgt = dev->id;
642 lun = dev->lun;
643
644
645 if (!ent->sense_ptr) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100646 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
647 tgt, lun);
David S. Millercd9ad582007-04-26 21:19:23 -0700648
649 ent->sense_ptr = cmd->sense_buffer;
650 ent->sense_dma = esp->ops->map_single(esp,
651 ent->sense_ptr,
652 SCSI_SENSE_BUFFERSIZE,
653 DMA_FROM_DEVICE);
654 }
655 ent->saved_sense_ptr = ent->sense_ptr;
656
657 esp->active_cmd = ent;
658
659 p = esp->command_block;
660 esp->msg_out_len = 0;
661
662 *p++ = IDENTIFY(0, lun);
663 *p++ = REQUEST_SENSE;
664 *p++ = ((dev->scsi_level <= SCSI_2) ?
665 (lun << 5) : 0);
666 *p++ = 0;
667 *p++ = 0;
668 *p++ = SCSI_SENSE_BUFFERSIZE;
669 *p++ = 0;
670
671 esp->select_state = ESP_SELECT_BASIC;
672
673 val = tgt;
674 if (esp->rev == FASHME)
675 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
676 esp_write8(val, ESP_BUSID);
677
678 esp_write_tgt_sync(esp, tgt);
679 esp_write_tgt_config3(esp, tgt);
680
681 val = (p - esp->command_block);
682
Hannes Reinecke31708662014-11-24 15:37:24 +0100683 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
David S. Millercd9ad582007-04-26 21:19:23 -0700684}
685
686static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
687{
688 struct esp_cmd_entry *ent;
689
690 list_for_each_entry(ent, &esp->queued_cmds, list) {
691 struct scsi_cmnd *cmd = ent->cmd;
692 struct scsi_device *dev = cmd->device;
693 struct esp_lun_data *lp = dev->hostdata;
694
695 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
696 ent->tag[0] = 0;
697 ent->tag[1] = 0;
698 return ent;
699 }
700
Christoph Hellwig50668632014-10-30 14:30:06 +0100701 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700702 ent->tag[0] = 0;
703 ent->tag[1] = 0;
704 }
David S. Miller21af8102013-08-01 18:08:34 -0700705 ent->orig_tag[0] = ent->tag[0];
706 ent->orig_tag[1] = ent->tag[1];
David S. Millercd9ad582007-04-26 21:19:23 -0700707
708 if (esp_alloc_lun_tag(ent, lp) < 0)
709 continue;
710
711 return ent;
712 }
713
714 return NULL;
715}
716
717static void esp_maybe_execute_command(struct esp *esp)
718{
719 struct esp_target_data *tp;
720 struct esp_lun_data *lp;
721 struct scsi_device *dev;
722 struct scsi_cmnd *cmd;
723 struct esp_cmd_entry *ent;
724 int tgt, lun, i;
725 u32 val, start_cmd;
726 u8 *p;
727
728 if (esp->active_cmd ||
729 (esp->flags & ESP_FLAG_RESETTING))
730 return;
731
732 ent = find_and_prep_issuable_command(esp);
733 if (!ent)
734 return;
735
736 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
737 esp_autosense(esp, ent);
738 return;
739 }
740
741 cmd = ent->cmd;
742 dev = cmd->device;
743 tgt = dev->id;
744 lun = dev->lun;
745 tp = &esp->target[tgt];
746 lp = dev->hostdata;
747
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -0700748 list_move(&ent->list, &esp->active_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -0700749
750 esp->active_cmd = ent;
751
752 esp_map_dma(esp, cmd);
753 esp_save_pointers(esp, ent);
754
755 esp_check_command_len(esp, cmd);
756
757 p = esp->command_block;
758
759 esp->msg_out_len = 0;
760 if (tp->flags & ESP_TGT_CHECK_NEGO) {
761 /* Need to negotiate. If the target is broken
762 * go for synchronous transfers and non-wide.
763 */
764 if (tp->flags & ESP_TGT_BROKEN) {
765 tp->flags &= ~ESP_TGT_DISCONNECT;
766 tp->nego_goal_period = 0;
767 tp->nego_goal_offset = 0;
768 tp->nego_goal_width = 0;
769 tp->nego_goal_tags = 0;
770 }
771
772 /* If the settings are not changing, skip this. */
773 if (spi_width(tp->starget) == tp->nego_goal_width &&
774 spi_period(tp->starget) == tp->nego_goal_period &&
775 spi_offset(tp->starget) == tp->nego_goal_offset) {
776 tp->flags &= ~ESP_TGT_CHECK_NEGO;
777 goto build_identify;
778 }
779
780 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
781 esp->msg_out_len =
782 spi_populate_width_msg(&esp->msg_out[0],
783 (tp->nego_goal_width ?
784 1 : 0));
785 tp->flags |= ESP_TGT_NEGO_WIDE;
786 } else if (esp_need_to_nego_sync(tp)) {
787 esp->msg_out_len =
788 spi_populate_sync_msg(&esp->msg_out[0],
789 tp->nego_goal_period,
790 tp->nego_goal_offset);
791 tp->flags |= ESP_TGT_NEGO_SYNC;
792 } else {
793 tp->flags &= ~ESP_TGT_CHECK_NEGO;
794 }
795
796 /* Process it like a slow command. */
797 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
798 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
799 }
800
801build_identify:
802 /* If we don't have a lun-data struct yet, we're probing
803 * so do not disconnect. Also, do not disconnect unless
804 * we have a tag on this command.
805 */
806 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
807 *p++ = IDENTIFY(1, lun);
808 else
809 *p++ = IDENTIFY(0, lun);
810
811 if (ent->tag[0] && esp->rev == ESP100) {
812 /* ESP100 lacks select w/atn3 command, use select
813 * and stop instead.
814 */
815 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
816 }
817
818 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
Hannes Reinecke31708662014-11-24 15:37:24 +0100819 start_cmd = ESP_CMD_SELA;
David S. Millercd9ad582007-04-26 21:19:23 -0700820 if (ent->tag[0]) {
821 *p++ = ent->tag[0];
822 *p++ = ent->tag[1];
823
Hannes Reinecke31708662014-11-24 15:37:24 +0100824 start_cmd = ESP_CMD_SA3;
David S. Millercd9ad582007-04-26 21:19:23 -0700825 }
826
827 for (i = 0; i < cmd->cmd_len; i++)
828 *p++ = cmd->cmnd[i];
829
830 esp->select_state = ESP_SELECT_BASIC;
831 } else {
832 esp->cmd_bytes_left = cmd->cmd_len;
833 esp->cmd_bytes_ptr = &cmd->cmnd[0];
834
835 if (ent->tag[0]) {
836 for (i = esp->msg_out_len - 1;
837 i >= 0; i--)
838 esp->msg_out[i + 2] = esp->msg_out[i];
839 esp->msg_out[0] = ent->tag[0];
840 esp->msg_out[1] = ent->tag[1];
841 esp->msg_out_len += 2;
842 }
843
Hannes Reinecke31708662014-11-24 15:37:24 +0100844 start_cmd = ESP_CMD_SELAS;
David S. Millercd9ad582007-04-26 21:19:23 -0700845 esp->select_state = ESP_SELECT_MSGOUT;
846 }
847 val = tgt;
848 if (esp->rev == FASHME)
849 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
850 esp_write8(val, ESP_BUSID);
851
852 esp_write_tgt_sync(esp, tgt);
853 esp_write_tgt_config3(esp, tgt);
854
855 val = (p - esp->command_block);
856
857 if (esp_debug & ESP_DEBUG_SCSICMD) {
858 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
859 for (i = 0; i < cmd->cmd_len; i++)
860 printk("%02x ", cmd->cmnd[i]);
861 printk("]\n");
862 }
863
Hannes Reinecke31708662014-11-24 15:37:24 +0100864 esp_send_dma_cmd(esp, val, 16, start_cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700865}
866
867static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
868{
869 struct list_head *head = &esp->esp_cmd_pool;
870 struct esp_cmd_entry *ret;
871
872 if (list_empty(head)) {
873 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
874 } else {
875 ret = list_entry(head->next, struct esp_cmd_entry, list);
876 list_del(&ret->list);
877 memset(ret, 0, sizeof(*ret));
878 }
879 return ret;
880}
881
882static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
883{
884 list_add(&ent->list, &esp->esp_cmd_pool);
885}
886
887static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
888 struct scsi_cmnd *cmd, unsigned int result)
889{
890 struct scsi_device *dev = cmd->device;
891 int tgt = dev->id;
892 int lun = dev->lun;
893
894 esp->active_cmd = NULL;
895 esp_unmap_dma(esp, cmd);
896 esp_free_lun_tag(ent, dev->hostdata);
897 cmd->result = result;
898
899 if (ent->eh_done) {
900 complete(ent->eh_done);
901 ent->eh_done = NULL;
902 }
903
904 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
905 esp->ops->unmap_single(esp, ent->sense_dma,
906 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
907 ent->sense_ptr = NULL;
908
909 /* Restore the message/status bytes to what we actually
910 * saw originally. Also, report that we are providing
911 * the sense data.
912 */
913 cmd->result = ((DRIVER_SENSE << 24) |
914 (DID_OK << 16) |
915 (COMMAND_COMPLETE << 8) |
916 (SAM_STAT_CHECK_CONDITION << 0));
917
918 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
919 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
920 int i;
921
922 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
923 esp->host->unique_id, tgt, lun);
924 for (i = 0; i < 18; i++)
925 printk("%02x ", cmd->sense_buffer[i]);
926 printk("]\n");
927 }
928 }
929
930 cmd->scsi_done(cmd);
931
932 list_del(&ent->list);
933 esp_put_ent(esp, ent);
934
935 esp_maybe_execute_command(esp);
936}
937
938static unsigned int compose_result(unsigned int status, unsigned int message,
939 unsigned int driver_code)
940{
941 return (status | (message << 8) | (driver_code << 16));
942}
943
944static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
945{
946 struct scsi_device *dev = ent->cmd->device;
947 struct esp_lun_data *lp = dev->hostdata;
948
949 scsi_track_queue_full(dev, lp->num_tagged - 1);
950}
951
Jeff Garzikf2812332010-11-16 02:10:29 -0500952static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
David S. Millercd9ad582007-04-26 21:19:23 -0700953{
954 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200955 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700956 struct esp_cmd_priv *spriv;
957 struct esp_cmd_entry *ent;
958
959 ent = esp_get_ent(esp);
960 if (!ent)
961 return SCSI_MLQUEUE_HOST_BUSY;
962
963 ent->cmd = cmd;
964
965 cmd->scsi_done = done;
966
967 spriv = ESP_CMD_PRIV(cmd);
968 spriv->u.dma_addr = ~(dma_addr_t)0x0;
969
970 list_add_tail(&ent->list, &esp->queued_cmds);
971
972 esp_maybe_execute_command(esp);
973
974 return 0;
975}
976
Jeff Garzikf2812332010-11-16 02:10:29 -0500977static DEF_SCSI_QCMD(esp_queuecommand)
978
David S. Millercd9ad582007-04-26 21:19:23 -0700979static int esp_check_gross_error(struct esp *esp)
980{
981 if (esp->sreg & ESP_STAT_SPAM) {
982 /* Gross Error, could be one of:
983 * - top of fifo overwritten
984 * - top of command register overwritten
985 * - DMA programmed with wrong direction
986 * - improper phase change
987 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100988 shost_printk(KERN_ERR, esp->host,
989 "Gross error sreg[%02x]\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700990 /* XXX Reset the chip. XXX */
991 return 1;
992 }
993 return 0;
994}
995
996static int esp_check_spur_intr(struct esp *esp)
997{
998 switch (esp->rev) {
999 case ESP100:
1000 case ESP100A:
1001 /* The interrupt pending bit of the status register cannot
1002 * be trusted on these revisions.
1003 */
1004 esp->sreg &= ~ESP_STAT_INTR;
1005 break;
1006
1007 default:
1008 if (!(esp->sreg & ESP_STAT_INTR)) {
David S. Millercd9ad582007-04-26 21:19:23 -07001009 if (esp->ireg & ESP_INTR_SR)
1010 return 1;
1011
1012 /* If the DMA is indicating interrupt pending and the
1013 * ESP is not, the only possibility is a DMA error.
1014 */
1015 if (!esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001016 shost_printk(KERN_ERR, esp->host,
1017 "Spurious irq, sreg=%02x.\n",
1018 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001019 return -1;
1020 }
1021
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001022 shost_printk(KERN_ERR, esp->host, "DMA error\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001023
1024 /* XXX Reset the chip. XXX */
1025 return -1;
1026 }
1027 break;
1028 }
1029
1030 return 0;
1031}
1032
1033static void esp_schedule_reset(struct esp *esp)
1034{
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001035 esp_log_reset("esp_schedule_reset() from %pf\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001036 __builtin_return_address(0));
1037 esp->flags |= ESP_FLAG_RESETTING;
1038 esp_event(esp, ESP_EVENT_RESET);
1039}
1040
1041/* In order to avoid having to add a special half-reconnected state
1042 * into the driver we just sit here and poll through the rest of
1043 * the reselection process to get the tag message bytes.
1044 */
1045static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1046 struct esp_lun_data *lp)
1047{
1048 struct esp_cmd_entry *ent;
1049 int i;
1050
1051 if (!lp->num_tagged) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001052 shost_printk(KERN_ERR, esp->host,
1053 "Reconnect w/num_tagged==0\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001054 return NULL;
1055 }
1056
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001057 esp_log_reconnect("reconnect tag, ");
David S. Millercd9ad582007-04-26 21:19:23 -07001058
1059 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1060 if (esp->ops->irq_pending(esp))
1061 break;
1062 }
1063 if (i == ESP_QUICKIRQ_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001064 shost_printk(KERN_ERR, esp->host,
1065 "Reconnect IRQ1 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001066 return NULL;
1067 }
1068
1069 esp->sreg = esp_read8(ESP_STATUS);
1070 esp->ireg = esp_read8(ESP_INTRPT);
1071
1072 esp_log_reconnect("IRQ(%d:%x:%x), ",
1073 i, esp->ireg, esp->sreg);
1074
1075 if (esp->ireg & ESP_INTR_DC) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001076 shost_printk(KERN_ERR, esp->host,
1077 "Reconnect, got disconnect.\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001078 return NULL;
1079 }
1080
1081 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001082 shost_printk(KERN_ERR, esp->host,
1083 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001084 return NULL;
1085 }
1086
1087 /* DMA in the tag bytes... */
1088 esp->command_block[0] = 0xff;
1089 esp->command_block[1] = 0xff;
1090 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1091 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1092
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02001093 /* ACK the message. */
David S. Millercd9ad582007-04-26 21:19:23 -07001094 scsi_esp_cmd(esp, ESP_CMD_MOK);
1095
1096 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1097 if (esp->ops->irq_pending(esp)) {
1098 esp->sreg = esp_read8(ESP_STATUS);
1099 esp->ireg = esp_read8(ESP_INTRPT);
1100 if (esp->ireg & ESP_INTR_FDONE)
1101 break;
1102 }
1103 udelay(1);
1104 }
1105 if (i == ESP_RESELECT_TAG_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001106 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001107 return NULL;
1108 }
1109 esp->ops->dma_drain(esp);
1110 esp->ops->dma_invalidate(esp);
1111
1112 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1113 i, esp->ireg, esp->sreg,
1114 esp->command_block[0],
1115 esp->command_block[1]);
1116
1117 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1118 esp->command_block[0] > ORDERED_QUEUE_TAG) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001119 shost_printk(KERN_ERR, esp->host,
1120 "Reconnect, bad tag type %02x.\n",
1121 esp->command_block[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07001122 return NULL;
1123 }
1124
1125 ent = lp->tagged_cmds[esp->command_block[1]];
1126 if (!ent) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001127 shost_printk(KERN_ERR, esp->host,
1128 "Reconnect, no entry for tag %02x.\n",
1129 esp->command_block[1]);
David S. Millercd9ad582007-04-26 21:19:23 -07001130 return NULL;
1131 }
1132
1133 return ent;
1134}
1135
1136static int esp_reconnect(struct esp *esp)
1137{
1138 struct esp_cmd_entry *ent;
1139 struct esp_target_data *tp;
1140 struct esp_lun_data *lp;
1141 struct scsi_device *dev;
1142 int target, lun;
1143
1144 BUG_ON(esp->active_cmd);
1145 if (esp->rev == FASHME) {
1146 /* FASHME puts the target and lun numbers directly
1147 * into the fifo.
1148 */
1149 target = esp->fifo[0];
1150 lun = esp->fifo[1] & 0x7;
1151 } else {
1152 u8 bits = esp_read8(ESP_FDATA);
1153
1154 /* Older chips put the lun directly into the fifo, but
1155 * the target is given as a sample of the arbitration
1156 * lines on the bus at reselection time. So we should
1157 * see the ID of the ESP and the one reconnecting target
1158 * set in the bitmap.
1159 */
1160 if (!(bits & esp->scsi_id_mask))
1161 goto do_reset;
1162 bits &= ~esp->scsi_id_mask;
1163 if (!bits || (bits & (bits - 1)))
1164 goto do_reset;
1165
1166 target = ffs(bits) - 1;
1167 lun = (esp_read8(ESP_FDATA) & 0x7);
1168
1169 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1170 if (esp->rev == ESP100) {
1171 u8 ireg = esp_read8(ESP_INTRPT);
1172 /* This chip has a bug during reselection that can
1173 * cause a spurious illegal-command interrupt, which
1174 * we simply ACK here. Another possibility is a bus
1175 * reset so we must check for that.
1176 */
1177 if (ireg & ESP_INTR_SR)
1178 goto do_reset;
1179 }
1180 scsi_esp_cmd(esp, ESP_CMD_NULL);
1181 }
1182
1183 esp_write_tgt_sync(esp, target);
1184 esp_write_tgt_config3(esp, target);
1185
1186 scsi_esp_cmd(esp, ESP_CMD_MOK);
1187
1188 if (esp->rev == FASHME)
1189 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1190 ESP_BUSID);
1191
1192 tp = &esp->target[target];
1193 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1194 if (!dev) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001195 shost_printk(KERN_ERR, esp->host,
1196 "Reconnect, no lp tgt[%u] lun[%u]\n",
1197 target, lun);
David S. Millercd9ad582007-04-26 21:19:23 -07001198 goto do_reset;
1199 }
1200 lp = dev->hostdata;
1201
1202 ent = lp->non_tagged_cmd;
1203 if (!ent) {
1204 ent = esp_reconnect_with_tag(esp, lp);
1205 if (!ent)
1206 goto do_reset;
1207 }
1208
1209 esp->active_cmd = ent;
1210
David S. Millercd9ad582007-04-26 21:19:23 -07001211 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1212 esp_restore_pointers(esp, ent);
1213 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1214 return 1;
1215
1216do_reset:
1217 esp_schedule_reset(esp);
1218 return 0;
1219}
1220
1221static int esp_finish_select(struct esp *esp)
1222{
1223 struct esp_cmd_entry *ent;
1224 struct scsi_cmnd *cmd;
David S. Millercd9ad582007-04-26 21:19:23 -07001225
1226 /* No longer selecting. */
1227 esp->select_state = ESP_SELECT_NONE;
1228
1229 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1230 ent = esp->active_cmd;
1231 cmd = ent->cmd;
1232
1233 if (esp->ops->dma_error(esp)) {
1234 /* If we see a DMA error during or as a result of selection,
1235 * all bets are off.
1236 */
1237 esp_schedule_reset(esp);
1238 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1239 return 0;
1240 }
1241
1242 esp->ops->dma_invalidate(esp);
1243
1244 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1245 struct esp_target_data *tp = &esp->target[cmd->device->id];
1246
1247 /* Carefully back out of the selection attempt. Release
1248 * resources (such as DMA mapping & TAG) and reset state (such
1249 * as message out and command delivery variables).
1250 */
1251 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1252 esp_unmap_dma(esp, cmd);
1253 esp_free_lun_tag(ent, cmd->device->hostdata);
1254 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1255 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1256 esp->cmd_bytes_ptr = NULL;
1257 esp->cmd_bytes_left = 0;
1258 } else {
1259 esp->ops->unmap_single(esp, ent->sense_dma,
1260 SCSI_SENSE_BUFFERSIZE,
1261 DMA_FROM_DEVICE);
1262 ent->sense_ptr = NULL;
1263 }
1264
1265 /* Now that the state is unwound properly, put back onto
1266 * the issue queue. This command is no longer active.
1267 */
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -07001268 list_move(&ent->list, &esp->queued_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -07001269 esp->active_cmd = NULL;
1270
1271 /* Return value ignored by caller, it directly invokes
1272 * esp_reconnect().
1273 */
1274 return 0;
1275 }
1276
1277 if (esp->ireg == ESP_INTR_DC) {
1278 struct scsi_device *dev = cmd->device;
1279
1280 /* Disconnect. Make sure we re-negotiate sync and
1281 * wide parameters if this target starts responding
1282 * again in the future.
1283 */
1284 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1285
1286 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1287 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1288 return 1;
1289 }
1290
1291 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1292 /* Selection successful. On pre-FAST chips we have
1293 * to do a NOP and possibly clean out the FIFO.
1294 */
1295 if (esp->rev <= ESP236) {
1296 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1297
1298 scsi_esp_cmd(esp, ESP_CMD_NULL);
1299
1300 if (!fcnt &&
1301 (!esp->prev_soff ||
1302 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1303 esp_flush_fifo(esp);
1304 }
1305
1306 /* If we are doing a slow command, negotiation, etc.
1307 * we'll do the right thing as we transition to the
1308 * next phase.
1309 */
1310 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1311 return 0;
1312 }
1313
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001314 shost_printk(KERN_INFO, esp->host,
1315 "Unexpected selection completion ireg[%x]\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07001316 esp_schedule_reset(esp);
1317 return 0;
1318}
1319
1320static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1321 struct scsi_cmnd *cmd)
1322{
1323 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1324
1325 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1326 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1327 fifo_cnt <<= 1;
1328
1329 ecount = 0;
1330 if (!(esp->sreg & ESP_STAT_TCNT)) {
1331 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1332 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1333 if (esp->rev == FASHME)
1334 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
Hannes Reineckee858d932014-11-24 15:37:29 +01001335 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1336 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
David S. Millercd9ad582007-04-26 21:19:23 -07001337 }
1338
1339 bytes_sent = esp->data_dma_len;
1340 bytes_sent -= ecount;
Finn Thain94dc21c2018-10-16 16:31:25 +11001341 bytes_sent -= esp->send_cmd_residual;
David S. Millercd9ad582007-04-26 21:19:23 -07001342
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001343 /*
1344 * The am53c974 has a DMA 'pecularity'. The doc states:
1345 * In some odd byte conditions, one residual byte will
1346 * be left in the SCSI FIFO, and the FIFO Flags will
1347 * never count to '0 '. When this happens, the residual
1348 * byte should be retrieved via PIO following completion
1349 * of the BLAST operation.
1350 */
1351 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1352 size_t count = 1;
1353 size_t offset = bytes_sent;
1354 u8 bval = esp_read8(ESP_FDATA);
1355
1356 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1357 ent->sense_ptr[bytes_sent] = bval;
1358 else {
1359 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1360 u8 *ptr;
1361
1362 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1363 &offset, &count);
1364 if (likely(ptr)) {
1365 *(ptr + offset) = bval;
1366 scsi_kunmap_atomic_sg(ptr);
1367 }
1368 }
1369 bytes_sent += fifo_cnt;
1370 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1371 }
David S. Millercd9ad582007-04-26 21:19:23 -07001372 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1373 bytes_sent -= fifo_cnt;
1374
1375 flush_fifo = 0;
1376 if (!esp->prev_soff) {
1377 /* Synchronous data transfer, always flush fifo. */
1378 flush_fifo = 1;
1379 } else {
1380 if (esp->rev == ESP100) {
1381 u32 fflags, phase;
1382
1383 /* ESP100 has a chip bug where in the synchronous data
1384 * phase it can mistake a final long REQ pulse from the
1385 * target as an extra data byte. Fun.
1386 *
1387 * To detect this case we resample the status register
1388 * and fifo flags. If we're still in a data phase and
1389 * we see spurious chunks in the fifo, we return error
1390 * to the caller which should reset and set things up
1391 * such that we only try future transfers to this
1392 * target in synchronous mode.
1393 */
1394 esp->sreg = esp_read8(ESP_STATUS);
1395 phase = esp->sreg & ESP_STAT_PMASK;
1396 fflags = esp_read8(ESP_FFLAGS);
1397
1398 if ((phase == ESP_DOP &&
1399 (fflags & ESP_FF_ONOTZERO)) ||
1400 (phase == ESP_DIP &&
1401 (fflags & ESP_FF_FBYTES)))
1402 return -1;
1403 }
1404 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1405 flush_fifo = 1;
1406 }
1407
1408 if (flush_fifo)
1409 esp_flush_fifo(esp);
1410
1411 return bytes_sent;
1412}
1413
1414static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1415 u8 scsi_period, u8 scsi_offset,
1416 u8 esp_stp, u8 esp_soff)
1417{
1418 spi_period(tp->starget) = scsi_period;
1419 spi_offset(tp->starget) = scsi_offset;
1420 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1421
1422 if (esp_soff) {
1423 esp_stp &= 0x1f;
1424 esp_soff |= esp->radelay;
1425 if (esp->rev >= FAS236) {
1426 u8 bit = ESP_CONFIG3_FSCSI;
1427 if (esp->rev >= FAS100A)
1428 bit = ESP_CONFIG3_FAST;
1429
1430 if (scsi_period < 50) {
1431 if (esp->rev == FASHME)
1432 esp_soff &= ~esp->radelay;
1433 tp->esp_config3 |= bit;
1434 } else {
1435 tp->esp_config3 &= ~bit;
1436 }
1437 esp->prev_cfg3 = tp->esp_config3;
1438 esp_write8(esp->prev_cfg3, ESP_CFG3);
1439 }
1440 }
1441
1442 tp->esp_period = esp->prev_stp = esp_stp;
1443 tp->esp_offset = esp->prev_soff = esp_soff;
1444
1445 esp_write8(esp_soff, ESP_SOFF);
1446 esp_write8(esp_stp, ESP_STP);
1447
1448 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1449
1450 spi_display_xfer_agreement(tp->starget);
1451}
1452
1453static void esp_msgin_reject(struct esp *esp)
1454{
1455 struct esp_cmd_entry *ent = esp->active_cmd;
1456 struct scsi_cmnd *cmd = ent->cmd;
1457 struct esp_target_data *tp;
1458 int tgt;
1459
1460 tgt = cmd->device->id;
1461 tp = &esp->target[tgt];
1462
1463 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1464 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1465
1466 if (!esp_need_to_nego_sync(tp)) {
1467 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1468 scsi_esp_cmd(esp, ESP_CMD_RATN);
1469 } else {
1470 esp->msg_out_len =
1471 spi_populate_sync_msg(&esp->msg_out[0],
1472 tp->nego_goal_period,
1473 tp->nego_goal_offset);
1474 tp->flags |= ESP_TGT_NEGO_SYNC;
1475 scsi_esp_cmd(esp, ESP_CMD_SATN);
1476 }
1477 return;
1478 }
1479
1480 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1481 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1482 tp->esp_period = 0;
1483 tp->esp_offset = 0;
1484 esp_setsync(esp, tp, 0, 0, 0, 0);
1485 scsi_esp_cmd(esp, ESP_CMD_RATN);
1486 return;
1487 }
1488
Finn Thainc69edff52017-08-04 01:43:20 -04001489 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1490 esp_schedule_reset(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07001491}
1492
1493static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1494{
1495 u8 period = esp->msg_in[3];
1496 u8 offset = esp->msg_in[4];
1497 u8 stp;
1498
1499 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1500 goto do_reject;
1501
1502 if (offset > 15)
1503 goto do_reject;
1504
1505 if (offset) {
Julia Lawall237abac2008-10-29 14:24:40 -07001506 int one_clock;
David S. Millercd9ad582007-04-26 21:19:23 -07001507
1508 if (period > esp->max_period) {
1509 period = offset = 0;
1510 goto do_sdtr;
1511 }
1512 if (period < esp->min_period)
1513 goto do_reject;
1514
1515 one_clock = esp->ccycle / 1000;
Julia Lawall237abac2008-10-29 14:24:40 -07001516 stp = DIV_ROUND_UP(period << 2, one_clock);
David S. Millercd9ad582007-04-26 21:19:23 -07001517 if (stp && esp->rev >= FAS236) {
1518 if (stp >= 50)
1519 stp--;
1520 }
1521 } else {
1522 stp = 0;
1523 }
1524
1525 esp_setsync(esp, tp, period, offset, stp, offset);
1526 return;
1527
1528do_reject:
1529 esp->msg_out[0] = MESSAGE_REJECT;
1530 esp->msg_out_len = 1;
1531 scsi_esp_cmd(esp, ESP_CMD_SATN);
1532 return;
1533
1534do_sdtr:
1535 tp->nego_goal_period = period;
1536 tp->nego_goal_offset = offset;
1537 esp->msg_out_len =
1538 spi_populate_sync_msg(&esp->msg_out[0],
1539 tp->nego_goal_period,
1540 tp->nego_goal_offset);
1541 scsi_esp_cmd(esp, ESP_CMD_SATN);
1542}
1543
1544static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1545{
1546 int size = 8 << esp->msg_in[3];
1547 u8 cfg3;
1548
1549 if (esp->rev != FASHME)
1550 goto do_reject;
1551
1552 if (size != 8 && size != 16)
1553 goto do_reject;
1554
1555 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1556 goto do_reject;
1557
1558 cfg3 = tp->esp_config3;
1559 if (size == 16) {
1560 tp->flags |= ESP_TGT_WIDE;
1561 cfg3 |= ESP_CONFIG3_EWIDE;
1562 } else {
1563 tp->flags &= ~ESP_TGT_WIDE;
1564 cfg3 &= ~ESP_CONFIG3_EWIDE;
1565 }
1566 tp->esp_config3 = cfg3;
1567 esp->prev_cfg3 = cfg3;
1568 esp_write8(cfg3, ESP_CFG3);
1569
1570 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1571
1572 spi_period(tp->starget) = 0;
1573 spi_offset(tp->starget) = 0;
1574 if (!esp_need_to_nego_sync(tp)) {
1575 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1576 scsi_esp_cmd(esp, ESP_CMD_RATN);
1577 } else {
1578 esp->msg_out_len =
1579 spi_populate_sync_msg(&esp->msg_out[0],
1580 tp->nego_goal_period,
1581 tp->nego_goal_offset);
1582 tp->flags |= ESP_TGT_NEGO_SYNC;
1583 scsi_esp_cmd(esp, ESP_CMD_SATN);
1584 }
1585 return;
1586
1587do_reject:
1588 esp->msg_out[0] = MESSAGE_REJECT;
1589 esp->msg_out_len = 1;
1590 scsi_esp_cmd(esp, ESP_CMD_SATN);
1591}
1592
1593static void esp_msgin_extended(struct esp *esp)
1594{
1595 struct esp_cmd_entry *ent = esp->active_cmd;
1596 struct scsi_cmnd *cmd = ent->cmd;
1597 struct esp_target_data *tp;
1598 int tgt = cmd->device->id;
1599
1600 tp = &esp->target[tgt];
1601 if (esp->msg_in[2] == EXTENDED_SDTR) {
1602 esp_msgin_sdtr(esp, tp);
1603 return;
1604 }
1605 if (esp->msg_in[2] == EXTENDED_WDTR) {
1606 esp_msgin_wdtr(esp, tp);
1607 return;
1608 }
1609
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001610 shost_printk(KERN_INFO, esp->host,
1611 "Unexpected extended msg type %x\n", esp->msg_in[2]);
David S. Millercd9ad582007-04-26 21:19:23 -07001612
Finn Thainc69edff52017-08-04 01:43:20 -04001613 esp->msg_out[0] = MESSAGE_REJECT;
David S. Millercd9ad582007-04-26 21:19:23 -07001614 esp->msg_out_len = 1;
1615 scsi_esp_cmd(esp, ESP_CMD_SATN);
1616}
1617
1618/* Analyze msgin bytes received from target so far. Return non-zero
1619 * if there are more bytes needed to complete the message.
1620 */
1621static int esp_msgin_process(struct esp *esp)
1622{
1623 u8 msg0 = esp->msg_in[0];
1624 int len = esp->msg_in_len;
1625
1626 if (msg0 & 0x80) {
1627 /* Identify */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001628 shost_printk(KERN_INFO, esp->host,
1629 "Unexpected msgin identify\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001630 return 0;
1631 }
1632
1633 switch (msg0) {
1634 case EXTENDED_MESSAGE:
1635 if (len == 1)
1636 return 1;
1637 if (len < esp->msg_in[1] + 2)
1638 return 1;
1639 esp_msgin_extended(esp);
1640 return 0;
1641
1642 case IGNORE_WIDE_RESIDUE: {
1643 struct esp_cmd_entry *ent;
1644 struct esp_cmd_priv *spriv;
1645 if (len == 1)
1646 return 1;
1647
1648 if (esp->msg_in[1] != 1)
1649 goto do_reject;
1650
1651 ent = esp->active_cmd;
1652 spriv = ESP_CMD_PRIV(ent->cmd);
1653
1654 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1655 spriv->cur_sg--;
1656 spriv->cur_residue = 1;
1657 } else
1658 spriv->cur_residue++;
1659 spriv->tot_residue++;
1660 return 0;
1661 }
1662 case NOP:
1663 return 0;
1664 case RESTORE_POINTERS:
1665 esp_restore_pointers(esp, esp->active_cmd);
1666 return 0;
1667 case SAVE_POINTERS:
1668 esp_save_pointers(esp, esp->active_cmd);
1669 return 0;
1670
1671 case COMMAND_COMPLETE:
1672 case DISCONNECT: {
1673 struct esp_cmd_entry *ent = esp->active_cmd;
1674
1675 ent->message = msg0;
1676 esp_event(esp, ESP_EVENT_FREE_BUS);
1677 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1678 return 0;
1679 }
1680 case MESSAGE_REJECT:
1681 esp_msgin_reject(esp);
1682 return 0;
1683
1684 default:
1685 do_reject:
1686 esp->msg_out[0] = MESSAGE_REJECT;
1687 esp->msg_out_len = 1;
1688 scsi_esp_cmd(esp, ESP_CMD_SATN);
1689 return 0;
1690 }
1691}
1692
1693static int esp_process_event(struct esp *esp)
1694{
Hannes Reinecke31708662014-11-24 15:37:24 +01001695 int write, i;
David S. Millercd9ad582007-04-26 21:19:23 -07001696
1697again:
1698 write = 0;
Hannes Reinecke1af6f602014-11-24 15:37:22 +01001699 esp_log_event("process event %d phase %x\n",
1700 esp->event, esp->sreg & ESP_STAT_PMASK);
David S. Millercd9ad582007-04-26 21:19:23 -07001701 switch (esp->event) {
1702 case ESP_EVENT_CHECK_PHASE:
1703 switch (esp->sreg & ESP_STAT_PMASK) {
1704 case ESP_DOP:
1705 esp_event(esp, ESP_EVENT_DATA_OUT);
1706 break;
1707 case ESP_DIP:
1708 esp_event(esp, ESP_EVENT_DATA_IN);
1709 break;
1710 case ESP_STATP:
1711 esp_flush_fifo(esp);
1712 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1713 esp_event(esp, ESP_EVENT_STATUS);
1714 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1715 return 1;
1716
1717 case ESP_MOP:
1718 esp_event(esp, ESP_EVENT_MSGOUT);
1719 break;
1720
1721 case ESP_MIP:
1722 esp_event(esp, ESP_EVENT_MSGIN);
1723 break;
1724
1725 case ESP_CMDP:
1726 esp_event(esp, ESP_EVENT_CMD_START);
1727 break;
1728
1729 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001730 shost_printk(KERN_INFO, esp->host,
1731 "Unexpected phase, sreg=%02x\n",
1732 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001733 esp_schedule_reset(esp);
1734 return 0;
1735 }
1736 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001737
1738 case ESP_EVENT_DATA_IN:
1739 write = 1;
1740 /* fallthru */
1741
1742 case ESP_EVENT_DATA_OUT: {
1743 struct esp_cmd_entry *ent = esp->active_cmd;
1744 struct scsi_cmnd *cmd = ent->cmd;
1745 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1746 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1747
1748 if (esp->rev == ESP100)
1749 scsi_esp_cmd(esp, ESP_CMD_NULL);
1750
1751 if (write)
1752 ent->flags |= ESP_CMD_FLAG_WRITE;
1753 else
1754 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1755
Finn Thain6fe07aa2008-04-25 10:06:05 -05001756 if (esp->ops->dma_length_limit)
1757 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1758 dma_len);
1759 else
1760 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1761
David S. Millercd9ad582007-04-26 21:19:23 -07001762 esp->data_dma_len = dma_len;
1763
1764 if (!dma_len) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001765 shost_printk(KERN_ERR, esp->host,
1766 "DMA length is zero!\n");
1767 shost_printk(KERN_ERR, esp->host,
1768 "cur adr[%08llx] len[%08x]\n",
1769 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1770 esp_cur_dma_len(ent, cmd));
David S. Millercd9ad582007-04-26 21:19:23 -07001771 esp_schedule_reset(esp);
1772 return 0;
1773 }
1774
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001775 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001776 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001777
1778 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1779 write, ESP_CMD_DMA | ESP_CMD_TI);
1780 esp_event(esp, ESP_EVENT_DATA_DONE);
1781 break;
1782 }
1783 case ESP_EVENT_DATA_DONE: {
1784 struct esp_cmd_entry *ent = esp->active_cmd;
1785 struct scsi_cmnd *cmd = ent->cmd;
1786 int bytes_sent;
1787
1788 if (esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001789 shost_printk(KERN_INFO, esp->host,
1790 "data done, DMA error, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001791 esp_schedule_reset(esp);
1792 return 0;
1793 }
1794
1795 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1796 /* XXX parity errors, etc. XXX */
1797
1798 esp->ops->dma_drain(esp);
1799 }
1800 esp->ops->dma_invalidate(esp);
1801
1802 if (esp->ireg != ESP_INTR_BSERV) {
1803 /* We should always see exactly a bus-service
1804 * interrupt at the end of a successful transfer.
1805 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001806 shost_printk(KERN_INFO, esp->host,
1807 "data done, not BSERV, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001808 esp_schedule_reset(esp);
1809 return 0;
1810 }
1811
1812 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1813
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001814 esp_log_datadone("data done flgs[%x] sent[%d]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001815 ent->flags, bytes_sent);
1816
1817 if (bytes_sent < 0) {
1818 /* XXX force sync mode for this target XXX */
1819 esp_schedule_reset(esp);
1820 return 0;
1821 }
1822
1823 esp_advance_dma(esp, ent, cmd, bytes_sent);
1824 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1825 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001826 }
1827
1828 case ESP_EVENT_STATUS: {
1829 struct esp_cmd_entry *ent = esp->active_cmd;
1830
1831 if (esp->ireg & ESP_INTR_FDONE) {
1832 ent->status = esp_read8(ESP_FDATA);
1833 ent->message = esp_read8(ESP_FDATA);
1834 scsi_esp_cmd(esp, ESP_CMD_MOK);
1835 } else if (esp->ireg == ESP_INTR_BSERV) {
1836 ent->status = esp_read8(ESP_FDATA);
1837 ent->message = 0xff;
1838 esp_event(esp, ESP_EVENT_MSGIN);
1839 return 0;
1840 }
1841
1842 if (ent->message != COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001843 shost_printk(KERN_INFO, esp->host,
1844 "Unexpected message %x in status\n",
1845 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001846 esp_schedule_reset(esp);
1847 return 0;
1848 }
1849
1850 esp_event(esp, ESP_EVENT_FREE_BUS);
1851 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1852 break;
1853 }
1854 case ESP_EVENT_FREE_BUS: {
1855 struct esp_cmd_entry *ent = esp->active_cmd;
1856 struct scsi_cmnd *cmd = ent->cmd;
1857
1858 if (ent->message == COMMAND_COMPLETE ||
1859 ent->message == DISCONNECT)
1860 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1861
1862 if (ent->message == COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001863 esp_log_cmddone("Command done status[%x] message[%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001864 ent->status, ent->message);
1865 if (ent->status == SAM_STAT_TASK_SET_FULL)
1866 esp_event_queue_full(esp, ent);
1867
1868 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1869 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1870 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1871 esp_autosense(esp, ent);
1872 } else {
1873 esp_cmd_is_done(esp, ent, cmd,
1874 compose_result(ent->status,
1875 ent->message,
1876 DID_OK));
1877 }
1878 } else if (ent->message == DISCONNECT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001879 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001880 cmd->device->id,
1881 ent->tag[0], ent->tag[1]);
1882
1883 esp->active_cmd = NULL;
1884 esp_maybe_execute_command(esp);
1885 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001886 shost_printk(KERN_INFO, esp->host,
1887 "Unexpected message %x in freebus\n",
1888 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001889 esp_schedule_reset(esp);
1890 return 0;
1891 }
1892 if (esp->active_cmd)
1893 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1894 break;
1895 }
1896 case ESP_EVENT_MSGOUT: {
1897 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1898
1899 if (esp_debug & ESP_DEBUG_MSGOUT) {
1900 int i;
1901 printk("ESP: Sending message [ ");
1902 for (i = 0; i < esp->msg_out_len; i++)
1903 printk("%02x ", esp->msg_out[i]);
1904 printk("]\n");
1905 }
1906
1907 if (esp->rev == FASHME) {
1908 int i;
1909
1910 /* Always use the fifo. */
1911 for (i = 0; i < esp->msg_out_len; i++) {
1912 esp_write8(esp->msg_out[i], ESP_FDATA);
1913 esp_write8(0, ESP_FDATA);
1914 }
1915 scsi_esp_cmd(esp, ESP_CMD_TI);
1916 } else {
1917 if (esp->msg_out_len == 1) {
1918 esp_write8(esp->msg_out[0], ESP_FDATA);
1919 scsi_esp_cmd(esp, ESP_CMD_TI);
Hannes Reinecke31708662014-11-24 15:37:24 +01001920 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1921 for (i = 0; i < esp->msg_out_len; i++)
1922 esp_write8(esp->msg_out[i], ESP_FDATA);
1923 scsi_esp_cmd(esp, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07001924 } else {
1925 /* Use DMA. */
1926 memcpy(esp->command_block,
1927 esp->msg_out,
1928 esp->msg_out_len);
1929
1930 esp->ops->send_dma_cmd(esp,
1931 esp->command_block_dma,
1932 esp->msg_out_len,
1933 esp->msg_out_len,
1934 0,
1935 ESP_CMD_DMA|ESP_CMD_TI);
1936 }
1937 }
1938 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1939 break;
1940 }
1941 case ESP_EVENT_MSGOUT_DONE:
1942 if (esp->rev == FASHME) {
1943 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1944 } else {
1945 if (esp->msg_out_len > 1)
1946 esp->ops->dma_invalidate(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07001947
Finn Thain201c37d2017-08-04 01:43:19 -04001948 /* XXX if the chip went into disconnected mode,
1949 * we can't run the phase state machine anyway.
1950 */
1951 if (!(esp->ireg & ESP_INTR_DC))
David S. Millercd9ad582007-04-26 21:19:23 -07001952 scsi_esp_cmd(esp, ESP_CMD_NULL);
1953 }
Finn Thain201c37d2017-08-04 01:43:19 -04001954
Finn Thaind60e9ee2017-08-04 01:43:20 -04001955 esp->msg_out_len = 0;
1956
David S. Millercd9ad582007-04-26 21:19:23 -07001957 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1958 goto again;
1959 case ESP_EVENT_MSGIN:
1960 if (esp->ireg & ESP_INTR_BSERV) {
1961 if (esp->rev == FASHME) {
1962 if (!(esp_read8(ESP_STATUS2) &
1963 ESP_STAT2_FEMPTY))
1964 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1965 } else {
1966 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1967 if (esp->rev == ESP100)
1968 scsi_esp_cmd(esp, ESP_CMD_NULL);
1969 }
1970 scsi_esp_cmd(esp, ESP_CMD_TI);
1971 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1972 return 1;
1973 }
1974 if (esp->ireg & ESP_INTR_FDONE) {
1975 u8 val;
1976
1977 if (esp->rev == FASHME)
1978 val = esp->fifo[0];
1979 else
1980 val = esp_read8(ESP_FDATA);
1981 esp->msg_in[esp->msg_in_len++] = val;
1982
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001983 esp_log_msgin("Got msgin byte %x\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -07001984
1985 if (!esp_msgin_process(esp))
1986 esp->msg_in_len = 0;
1987
1988 if (esp->rev == FASHME)
1989 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1990
1991 scsi_esp_cmd(esp, ESP_CMD_MOK);
1992
Finn Thainc69edff52017-08-04 01:43:20 -04001993 /* Check whether a bus reset is to be done next */
1994 if (esp->event == ESP_EVENT_RESET)
1995 return 0;
1996
David S. Millercd9ad582007-04-26 21:19:23 -07001997 if (esp->event != ESP_EVENT_FREE_BUS)
1998 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1999 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002000 shost_printk(KERN_INFO, esp->host,
2001 "MSGIN neither BSERV not FDON, resetting");
David S. Millercd9ad582007-04-26 21:19:23 -07002002 esp_schedule_reset(esp);
2003 return 0;
2004 }
2005 break;
2006 case ESP_EVENT_CMD_START:
2007 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2008 esp->cmd_bytes_left);
Hannes Reinecke31708662014-11-24 15:37:24 +01002009 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07002010 esp_event(esp, ESP_EVENT_CMD_DONE);
2011 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2012 break;
2013 case ESP_EVENT_CMD_DONE:
2014 esp->ops->dma_invalidate(esp);
2015 if (esp->ireg & ESP_INTR_BSERV) {
2016 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2017 goto again;
2018 }
2019 esp_schedule_reset(esp);
2020 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002021
2022 case ESP_EVENT_RESET:
2023 scsi_esp_cmd(esp, ESP_CMD_RS);
2024 break;
2025
2026 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002027 shost_printk(KERN_INFO, esp->host,
2028 "Unexpected event %x, resetting\n", esp->event);
David S. Millercd9ad582007-04-26 21:19:23 -07002029 esp_schedule_reset(esp);
2030 return 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002031 }
2032 return 1;
2033}
2034
2035static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2036{
2037 struct scsi_cmnd *cmd = ent->cmd;
2038
2039 esp_unmap_dma(esp, cmd);
2040 esp_free_lun_tag(ent, cmd->device->hostdata);
2041 cmd->result = DID_RESET << 16;
2042
2043 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2044 esp->ops->unmap_single(esp, ent->sense_dma,
2045 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2046 ent->sense_ptr = NULL;
2047 }
2048
2049 cmd->scsi_done(cmd);
2050 list_del(&ent->list);
2051 esp_put_ent(esp, ent);
2052}
2053
2054static void esp_clear_hold(struct scsi_device *dev, void *data)
2055{
2056 struct esp_lun_data *lp = dev->hostdata;
2057
2058 BUG_ON(lp->num_tagged);
2059 lp->hold = 0;
2060}
2061
2062static void esp_reset_cleanup(struct esp *esp)
2063{
2064 struct esp_cmd_entry *ent, *tmp;
2065 int i;
2066
2067 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2068 struct scsi_cmnd *cmd = ent->cmd;
2069
2070 list_del(&ent->list);
2071 cmd->result = DID_RESET << 16;
2072 cmd->scsi_done(cmd);
2073 esp_put_ent(esp, ent);
2074 }
2075
2076 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2077 if (ent == esp->active_cmd)
2078 esp->active_cmd = NULL;
2079 esp_reset_cleanup_one(esp, ent);
2080 }
2081
2082 BUG_ON(esp->active_cmd != NULL);
2083
2084 /* Force renegotiation of sync/wide transfers. */
2085 for (i = 0; i < ESP_MAX_TARGET; i++) {
2086 struct esp_target_data *tp = &esp->target[i];
2087
2088 tp->esp_period = 0;
2089 tp->esp_offset = 0;
2090 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2091 ESP_CONFIG3_FSCSI |
2092 ESP_CONFIG3_FAST);
2093 tp->flags &= ~ESP_TGT_WIDE;
2094 tp->flags |= ESP_TGT_CHECK_NEGO;
2095
2096 if (tp->starget)
Maciej W. Rozycki522939d2007-12-10 15:49:31 -08002097 __starget_for_each_device(tp->starget, NULL,
2098 esp_clear_hold);
David S. Millercd9ad582007-04-26 21:19:23 -07002099 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002100 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002101}
2102
2103/* Runs under host->lock */
2104static void __esp_interrupt(struct esp *esp)
2105{
2106 int finish_reset, intr_done;
2107 u8 phase;
2108
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002109 /*
2110 * Once INTRPT is read STATUS and SSTEP are cleared.
2111 */
David S. Millercd9ad582007-04-26 21:19:23 -07002112 esp->sreg = esp_read8(ESP_STATUS);
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002113 esp->seqreg = esp_read8(ESP_SSTEP);
2114 esp->ireg = esp_read8(ESP_INTRPT);
David S. Millercd9ad582007-04-26 21:19:23 -07002115
2116 if (esp->flags & ESP_FLAG_RESETTING) {
2117 finish_reset = 1;
2118 } else {
2119 if (esp_check_gross_error(esp))
2120 return;
2121
2122 finish_reset = esp_check_spur_intr(esp);
2123 if (finish_reset < 0)
2124 return;
2125 }
2126
David S. Millercd9ad582007-04-26 21:19:23 -07002127 if (esp->ireg & ESP_INTR_SR)
2128 finish_reset = 1;
2129
2130 if (finish_reset) {
2131 esp_reset_cleanup(esp);
2132 if (esp->eh_reset) {
2133 complete(esp->eh_reset);
2134 esp->eh_reset = NULL;
2135 }
2136 return;
2137 }
2138
2139 phase = (esp->sreg & ESP_STAT_PMASK);
2140 if (esp->rev == FASHME) {
2141 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2142 esp->select_state == ESP_SELECT_NONE &&
2143 esp->event != ESP_EVENT_STATUS &&
2144 esp->event != ESP_EVENT_DATA_DONE) ||
2145 (esp->ireg & ESP_INTR_RSEL)) {
2146 esp->sreg2 = esp_read8(ESP_STATUS2);
2147 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2148 (esp->sreg2 & ESP_STAT2_F1BYTE))
2149 hme_read_fifo(esp);
2150 }
2151 }
2152
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002153 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
David S. Millercd9ad582007-04-26 21:19:23 -07002154 "sreg2[%02x] ireg[%02x]\n",
2155 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2156
2157 intr_done = 0;
2158
2159 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002160 shost_printk(KERN_INFO, esp->host,
2161 "unexpected IREG %02x\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07002162 if (esp->ireg & ESP_INTR_IC)
2163 esp_dump_cmd_log(esp);
2164
2165 esp_schedule_reset(esp);
2166 } else {
Finn Thain201c37d2017-08-04 01:43:19 -04002167 if (esp->ireg & ESP_INTR_RSEL) {
David S. Millercd9ad582007-04-26 21:19:23 -07002168 if (esp->active_cmd)
2169 (void) esp_finish_select(esp);
2170 intr_done = esp_reconnect(esp);
Finn Thain201c37d2017-08-04 01:43:19 -04002171 } else {
2172 /* Some combination of FDONE, BSERV, DC. */
2173 if (esp->select_state != ESP_SELECT_NONE)
2174 intr_done = esp_finish_select(esp);
David S. Millercd9ad582007-04-26 21:19:23 -07002175 }
2176 }
2177 while (!intr_done)
2178 intr_done = esp_process_event(esp);
2179}
2180
2181irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2182{
2183 struct esp *esp = dev_id;
2184 unsigned long flags;
2185 irqreturn_t ret;
2186
2187 spin_lock_irqsave(esp->host->host_lock, flags);
2188 ret = IRQ_NONE;
2189 if (esp->ops->irq_pending(esp)) {
2190 ret = IRQ_HANDLED;
2191 for (;;) {
2192 int i;
2193
2194 __esp_interrupt(esp);
2195 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2196 break;
2197 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2198
2199 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2200 if (esp->ops->irq_pending(esp))
2201 break;
2202 }
2203 if (i == ESP_QUICKIRQ_LIMIT)
2204 break;
2205 }
2206 }
2207 spin_unlock_irqrestore(esp->host->host_lock, flags);
2208
2209 return ret;
2210}
2211EXPORT_SYMBOL(scsi_esp_intr);
2212
Adrian Bunk76246802007-10-11 17:35:20 +02002213static void esp_get_revision(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002214{
2215 u8 val;
2216
2217 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002218 if (esp->config2 == 0) {
2219 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
David S. Millercd9ad582007-04-26 21:19:23 -07002220 esp_write8(esp->config2, ESP_CFG2);
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002221
2222 val = esp_read8(ESP_CFG2);
2223 val &= ~ESP_CONFIG2_MAGIC;
2224
2225 esp->config2 = 0;
2226 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2227 /*
2228 * If what we write to cfg2 does not come back,
2229 * cfg2 is not implemented.
2230 * Therefore this must be a plain esp100.
2231 */
2232 esp->rev = ESP100;
2233 return;
2234 }
2235 }
2236
2237 esp_set_all_config3(esp, 5);
2238 esp->prev_cfg3 = 5;
2239 esp_write8(esp->config2, ESP_CFG2);
2240 esp_write8(0, ESP_CFG3);
2241 esp_write8(esp->prev_cfg3, ESP_CFG3);
2242
2243 val = esp_read8(ESP_CFG3);
2244 if (val != 5) {
2245 /* The cfg2 register is implemented, however
2246 * cfg3 is not, must be esp100a.
2247 */
2248 esp->rev = ESP100A;
2249 } else {
2250 esp_set_all_config3(esp, 0);
2251 esp->prev_cfg3 = 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002252 esp_write8(esp->prev_cfg3, ESP_CFG3);
2253
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002254 /* All of cfg{1,2,3} implemented, must be one of
2255 * the fas variants, figure out which one.
2256 */
2257 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2258 esp->rev = FAST;
2259 esp->sync_defp = SYNC_DEFP_FAST;
David S. Millercd9ad582007-04-26 21:19:23 -07002260 } else {
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002261 esp->rev = ESP236;
David S. Millercd9ad582007-04-26 21:19:23 -07002262 }
2263 }
2264}
2265
Adrian Bunk76246802007-10-11 17:35:20 +02002266static void esp_init_swstate(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002267{
2268 int i;
2269
2270 INIT_LIST_HEAD(&esp->queued_cmds);
2271 INIT_LIST_HEAD(&esp->active_cmds);
2272 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2273
2274 /* Start with a clear state, domain validation (via ->slave_configure,
2275 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2276 * commands.
2277 */
2278 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2279 esp->target[i].flags = 0;
2280 esp->target[i].nego_goal_period = 0;
2281 esp->target[i].nego_goal_offset = 0;
2282 esp->target[i].nego_goal_width = 0;
2283 esp->target[i].nego_goal_tags = 0;
2284 }
2285}
2286
2287/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002288static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002289{
2290 u8 val;
2291
2292 /* Reset the DMA */
2293 esp->ops->reset_dma(esp);
2294
2295 /* Reset the ESP */
2296 esp_reset_esp(esp);
2297
2298 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2299 val = esp_read8(ESP_CFG1);
2300 val |= ESP_CONFIG1_SRRDISAB;
2301 esp_write8(val, ESP_CFG1);
2302
2303 scsi_esp_cmd(esp, ESP_CMD_RS);
2304 udelay(400);
2305
2306 esp_write8(esp->config1, ESP_CFG1);
2307
2308 /* Eat any bitrot in the chip and we are done... */
2309 esp_read8(ESP_INTRPT);
2310}
2311
Adrian Bunk76246802007-10-11 17:35:20 +02002312static void esp_set_clock_params(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002313{
Finn Thain6fe07aa2008-04-25 10:06:05 -05002314 int fhz;
David S. Millercd9ad582007-04-26 21:19:23 -07002315 u8 ccf;
2316
2317 /* This is getting messy but it has to be done correctly or else
2318 * you get weird behavior all over the place. We are trying to
2319 * basically figure out three pieces of information.
2320 *
2321 * a) Clock Conversion Factor
2322 *
2323 * This is a representation of the input crystal clock frequency
2324 * going into the ESP on this machine. Any operation whose timing
2325 * is longer than 400ns depends on this value being correct. For
2326 * example, you'll get blips for arbitration/selection during high
2327 * load or with multiple targets if this is not set correctly.
2328 *
2329 * b) Selection Time-Out
2330 *
2331 * The ESP isn't very bright and will arbitrate for the bus and try
2332 * to select a target forever if you let it. This value tells the
2333 * ESP when it has taken too long to negotiate and that it should
2334 * interrupt the CPU so we can see what happened. The value is
2335 * computed as follows (from NCR/Symbios chip docs).
2336 *
2337 * (Time Out Period) * (Input Clock)
2338 * STO = ----------------------------------
2339 * (8192) * (Clock Conversion Factor)
2340 *
2341 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2342 *
2343 * c) Imperical constants for synchronous offset and transfer period
2344 * register values
2345 *
2346 * This entails the smallest and largest sync period we could ever
2347 * handle on this ESP.
2348 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002349 fhz = esp->cfreq;
David S. Millercd9ad582007-04-26 21:19:23 -07002350
Finn Thain6fe07aa2008-04-25 10:06:05 -05002351 ccf = ((fhz / 1000000) + 4) / 5;
David S. Millercd9ad582007-04-26 21:19:23 -07002352 if (ccf == 1)
2353 ccf = 2;
2354
2355 /* If we can't find anything reasonable, just assume 20MHZ.
2356 * This is the clock frequency of the older sun4c's where I've
2357 * been unable to find the clock-frequency PROM property. All
2358 * other machines provide useful values it seems.
2359 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002360 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2361 fhz = 20000000;
David S. Millercd9ad582007-04-26 21:19:23 -07002362 ccf = 4;
2363 }
2364
2365 esp->cfact = (ccf == 8 ? 0 : ccf);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002366 esp->cfreq = fhz;
2367 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
David S. Millercd9ad582007-04-26 21:19:23 -07002368 esp->ctick = ESP_TICK(ccf, esp->ccycle);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002369 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
David S. Millercd9ad582007-04-26 21:19:23 -07002370 esp->sync_defp = SYNC_DEFP_SLOW;
2371}
2372
2373static const char *esp_chip_names[] = {
2374 "ESP100",
2375 "ESP100A",
2376 "ESP236",
2377 "FAS236",
2378 "FAS100A",
2379 "FAST",
2380 "FASHME",
Hannes Reineckeeeea2f92014-11-24 15:37:27 +01002381 "AM53C974",
David S. Millercd9ad582007-04-26 21:19:23 -07002382};
2383
2384static struct scsi_transport_template *esp_transport_template;
2385
Adrian Bunk76246802007-10-11 17:35:20 +02002386int scsi_esp_register(struct esp *esp, struct device *dev)
David S. Millercd9ad582007-04-26 21:19:23 -07002387{
2388 static int instance;
2389 int err;
2390
Hannes Reinecke3707a182014-11-24 15:37:20 +01002391 if (!esp->num_tags)
2392 esp->num_tags = ESP_DEFAULT_TAGS;
David S. Millercd9ad582007-04-26 21:19:23 -07002393 esp->host->transportt = esp_transport_template;
2394 esp->host->max_lun = ESP_MAX_LUN;
2395 esp->host->cmd_per_lun = 2;
David Millerff4abd62007-08-24 22:25:58 -07002396 esp->host->unique_id = instance;
David S. Millercd9ad582007-04-26 21:19:23 -07002397
2398 esp_set_clock_params(esp);
2399
2400 esp_get_revision(esp);
2401
2402 esp_init_swstate(esp);
2403
2404 esp_bootup_reset(esp);
2405
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002406 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2407 esp->host->unique_id, esp->regs, esp->dma_regs,
2408 esp->host->irq);
2409 dev_printk(KERN_INFO, dev,
2410 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2411 esp->host->unique_id, esp_chip_names[esp->rev],
2412 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
David S. Millercd9ad582007-04-26 21:19:23 -07002413
2414 /* Let the SCSI bus reset settle. */
2415 ssleep(esp_bus_reset_settle);
2416
2417 err = scsi_add_host(esp->host, dev);
2418 if (err)
2419 return err;
2420
David Millerff4abd62007-08-24 22:25:58 -07002421 instance++;
David S. Millercd9ad582007-04-26 21:19:23 -07002422
2423 scsi_scan_host(esp->host);
2424
2425 return 0;
2426}
2427EXPORT_SYMBOL(scsi_esp_register);
2428
Adrian Bunk76246802007-10-11 17:35:20 +02002429void scsi_esp_unregister(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002430{
2431 scsi_remove_host(esp->host);
2432}
2433EXPORT_SYMBOL(scsi_esp_unregister);
2434
James Bottomleyec5e69f2008-06-23 14:52:09 -05002435static int esp_target_alloc(struct scsi_target *starget)
2436{
2437 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2438 struct esp_target_data *tp = &esp->target[starget->id];
2439
2440 tp->starget = starget;
2441
2442 return 0;
2443}
2444
2445static void esp_target_destroy(struct scsi_target *starget)
2446{
2447 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2448 struct esp_target_data *tp = &esp->target[starget->id];
2449
2450 tp->starget = NULL;
2451}
2452
David S. Millercd9ad582007-04-26 21:19:23 -07002453static int esp_slave_alloc(struct scsi_device *dev)
2454{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002455 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002456 struct esp_target_data *tp = &esp->target[dev->id];
2457 struct esp_lun_data *lp;
2458
2459 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2460 if (!lp)
2461 return -ENOMEM;
2462 dev->hostdata = lp;
2463
David S. Millercd9ad582007-04-26 21:19:23 -07002464 spi_min_period(tp->starget) = esp->min_period;
2465 spi_max_offset(tp->starget) = 15;
2466
2467 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2468 spi_max_width(tp->starget) = 1;
2469 else
2470 spi_max_width(tp->starget) = 0;
2471
2472 return 0;
2473}
2474
2475static int esp_slave_configure(struct scsi_device *dev)
2476{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002477 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002478 struct esp_target_data *tp = &esp->target[dev->id];
David S. Millercd9ad582007-04-26 21:19:23 -07002479
Hannes Reinecke3707a182014-11-24 15:37:20 +01002480 if (dev->tagged_supported)
2481 scsi_change_queue_depth(dev, esp->num_tags);
David S. Millercd9ad582007-04-26 21:19:23 -07002482
David S. Millercd9ad582007-04-26 21:19:23 -07002483 tp->flags |= ESP_TGT_DISCONNECT;
2484
2485 if (!spi_initial_dv(dev->sdev_target))
2486 spi_dv_device(dev);
2487
2488 return 0;
2489}
2490
2491static void esp_slave_destroy(struct scsi_device *dev)
2492{
2493 struct esp_lun_data *lp = dev->hostdata;
2494
2495 kfree(lp);
2496 dev->hostdata = NULL;
2497}
2498
2499static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2500{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002501 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002502 struct esp_cmd_entry *ent, *tmp;
2503 struct completion eh_done;
2504 unsigned long flags;
2505
2506 /* XXX This helps a lot with debugging but might be a bit
2507 * XXX much for the final driver.
2508 */
2509 spin_lock_irqsave(esp->host->host_lock, flags);
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002510 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2511 cmd, cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002512 ent = esp->active_cmd;
2513 if (ent)
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002514 shost_printk(KERN_ERR, esp->host,
2515 "Current command [%p:%02x]\n",
2516 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002517 list_for_each_entry(ent, &esp->queued_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002518 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2519 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002520 }
2521 list_for_each_entry(ent, &esp->active_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002522 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2523 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002524 }
2525 esp_dump_cmd_log(esp);
2526 spin_unlock_irqrestore(esp->host->host_lock, flags);
2527
2528 spin_lock_irqsave(esp->host->host_lock, flags);
2529
2530 ent = NULL;
2531 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2532 if (tmp->cmd == cmd) {
2533 ent = tmp;
2534 break;
2535 }
2536 }
2537
2538 if (ent) {
2539 /* Easiest case, we didn't even issue the command
2540 * yet so it is trivial to abort.
2541 */
2542 list_del(&ent->list);
2543
2544 cmd->result = DID_ABORT << 16;
2545 cmd->scsi_done(cmd);
2546
2547 esp_put_ent(esp, ent);
2548
2549 goto out_success;
2550 }
2551
2552 init_completion(&eh_done);
2553
2554 ent = esp->active_cmd;
2555 if (ent && ent->cmd == cmd) {
2556 /* Command is the currently active command on
2557 * the bus. If we already have an output message
2558 * pending, no dice.
2559 */
2560 if (esp->msg_out_len)
2561 goto out_failure;
2562
2563 /* Send out an abort, encouraging the target to
2564 * go to MSGOUT phase by asserting ATN.
2565 */
2566 esp->msg_out[0] = ABORT_TASK_SET;
2567 esp->msg_out_len = 1;
2568 ent->eh_done = &eh_done;
2569
2570 scsi_esp_cmd(esp, ESP_CMD_SATN);
2571 } else {
2572 /* The command is disconnected. This is not easy to
2573 * abort. For now we fail and let the scsi error
2574 * handling layer go try a scsi bus reset or host
2575 * reset.
2576 *
2577 * What we could do is put together a scsi command
2578 * solely for the purpose of sending an abort message
2579 * to the target. Coming up with all the code to
2580 * cook up scsi commands, special case them everywhere,
2581 * etc. is for questionable gain and it would be better
2582 * if the generic scsi error handling layer could do at
2583 * least some of that for us.
2584 *
2585 * Anyways this is an area for potential future improvement
2586 * in this driver.
2587 */
2588 goto out_failure;
2589 }
2590
2591 spin_unlock_irqrestore(esp->host->host_lock, flags);
2592
2593 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2594 spin_lock_irqsave(esp->host->host_lock, flags);
2595 ent->eh_done = NULL;
2596 spin_unlock_irqrestore(esp->host->host_lock, flags);
2597
2598 return FAILED;
2599 }
2600
2601 return SUCCESS;
2602
2603out_success:
2604 spin_unlock_irqrestore(esp->host->host_lock, flags);
2605 return SUCCESS;
2606
2607out_failure:
2608 /* XXX This might be a good location to set ESP_TGT_BROKEN
2609 * XXX since we know which target/lun in particular is
2610 * XXX causing trouble.
2611 */
2612 spin_unlock_irqrestore(esp->host->host_lock, flags);
2613 return FAILED;
2614}
2615
2616static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2617{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002618 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002619 struct completion eh_reset;
2620 unsigned long flags;
2621
2622 init_completion(&eh_reset);
2623
2624 spin_lock_irqsave(esp->host->host_lock, flags);
2625
2626 esp->eh_reset = &eh_reset;
2627
2628 /* XXX This is too simple... We should add lots of
2629 * XXX checks here so that if we find that the chip is
2630 * XXX very wedged we return failure immediately so
2631 * XXX that we can perform a full chip reset.
2632 */
2633 esp->flags |= ESP_FLAG_RESETTING;
2634 scsi_esp_cmd(esp, ESP_CMD_RS);
2635
2636 spin_unlock_irqrestore(esp->host->host_lock, flags);
2637
2638 ssleep(esp_bus_reset_settle);
2639
2640 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2641 spin_lock_irqsave(esp->host->host_lock, flags);
2642 esp->eh_reset = NULL;
2643 spin_unlock_irqrestore(esp->host->host_lock, flags);
2644
2645 return FAILED;
2646 }
2647
2648 return SUCCESS;
2649}
2650
2651/* All bets are off, reset the entire device. */
2652static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2653{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002654 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002655 unsigned long flags;
2656
2657 spin_lock_irqsave(esp->host->host_lock, flags);
2658 esp_bootup_reset(esp);
2659 esp_reset_cleanup(esp);
2660 spin_unlock_irqrestore(esp->host->host_lock, flags);
2661
2662 ssleep(esp_bus_reset_settle);
2663
2664 return SUCCESS;
2665}
2666
2667static const char *esp_info(struct Scsi_Host *host)
2668{
2669 return "esp";
2670}
2671
2672struct scsi_host_template scsi_esp_template = {
2673 .module = THIS_MODULE,
2674 .name = "esp",
2675 .info = esp_info,
2676 .queuecommand = esp_queuecommand,
James Bottomleyec5e69f2008-06-23 14:52:09 -05002677 .target_alloc = esp_target_alloc,
2678 .target_destroy = esp_target_destroy,
David S. Millercd9ad582007-04-26 21:19:23 -07002679 .slave_alloc = esp_slave_alloc,
2680 .slave_configure = esp_slave_configure,
2681 .slave_destroy = esp_slave_destroy,
2682 .eh_abort_handler = esp_eh_abort_handler,
2683 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2684 .eh_host_reset_handler = esp_eh_host_reset_handler,
2685 .can_queue = 7,
2686 .this_id = 7,
2687 .sg_tablesize = SG_ALL,
2688 .use_clustering = ENABLE_CLUSTERING,
2689 .max_sectors = 0xffff,
2690 .skip_settle_delay = 1,
2691};
2692EXPORT_SYMBOL(scsi_esp_template);
2693
2694static void esp_get_signalling(struct Scsi_Host *host)
2695{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002696 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002697 enum spi_signal_type type;
2698
2699 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2700 type = SPI_SIGNAL_HVD;
2701 else
2702 type = SPI_SIGNAL_SE;
2703
2704 spi_signalling(host) = type;
2705}
2706
2707static void esp_set_offset(struct scsi_target *target, int offset)
2708{
2709 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002710 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002711 struct esp_target_data *tp = &esp->target[target->id];
2712
Finn Thain02507a82009-12-05 12:30:42 +11002713 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2714 tp->nego_goal_offset = 0;
2715 else
2716 tp->nego_goal_offset = offset;
David S. Millercd9ad582007-04-26 21:19:23 -07002717 tp->flags |= ESP_TGT_CHECK_NEGO;
2718}
2719
2720static void esp_set_period(struct scsi_target *target, int period)
2721{
2722 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002723 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002724 struct esp_target_data *tp = &esp->target[target->id];
2725
2726 tp->nego_goal_period = period;
2727 tp->flags |= ESP_TGT_CHECK_NEGO;
2728}
2729
2730static void esp_set_width(struct scsi_target *target, int width)
2731{
2732 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002733 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002734 struct esp_target_data *tp = &esp->target[target->id];
2735
2736 tp->nego_goal_width = (width ? 1 : 0);
2737 tp->flags |= ESP_TGT_CHECK_NEGO;
2738}
2739
2740static struct spi_function_template esp_transport_ops = {
2741 .set_offset = esp_set_offset,
2742 .show_offset = 1,
2743 .set_period = esp_set_period,
2744 .show_period = 1,
2745 .set_width = esp_set_width,
2746 .show_width = 1,
2747 .get_signalling = esp_get_signalling,
2748};
2749
2750static int __init esp_init(void)
2751{
2752 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2753 sizeof(struct esp_cmd_priv));
2754
2755 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2756 if (!esp_transport_template)
2757 return -ENODEV;
2758
2759 return 0;
2760}
2761
2762static void __exit esp_exit(void)
2763{
2764 spi_release_transport(esp_transport_template);
2765}
2766
2767MODULE_DESCRIPTION("ESP SCSI driver core");
2768MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2769MODULE_LICENSE("GPL");
2770MODULE_VERSION(DRV_VERSION);
2771
2772module_param(esp_bus_reset_settle, int, 0);
2773MODULE_PARM_DESC(esp_bus_reset_settle,
2774 "ESP scsi bus reset delay in seconds");
2775
2776module_param(esp_debug, int, 0);
2777MODULE_PARM_DESC(esp_debug,
2778"ESP bitmapped debugging message enable value:\n"
2779" 0x00000001 Log interrupt events\n"
2780" 0x00000002 Log scsi commands\n"
2781" 0x00000004 Log resets\n"
2782" 0x00000008 Log message in events\n"
2783" 0x00000010 Log message out events\n"
2784" 0x00000020 Log command completion\n"
2785" 0x00000040 Log disconnects\n"
2786" 0x00000080 Log data start\n"
2787" 0x00000100 Log data done\n"
2788" 0x00000200 Log reconnects\n"
2789" 0x00000400 Log auto-sense data\n"
2790);
2791
2792module_init(esp_init);
2793module_exit(esp_exit);