1 /* esp_scsi.c: ESP SCSI driver.
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/slab.h>
9 #include <linux/delay.h>
10 #include <linux/list.h>
11 #include <linux/completion.h>
12 #include <linux/kallsyms.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/irqreturn.h>
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_tcq.h>
27 #include <scsi/scsi_dbg.h>
28 #include <scsi/scsi_transport_spi.h>
32 #define DRV_MODULE_NAME "esp"
33 #define PFX DRV_MODULE_NAME ": "
34 #define DRV_VERSION "2.000"
35 #define DRV_MODULE_RELDATE "April 19, 2007"
37 /* SCSI bus reset settle time in seconds. */
38 static int esp_bus_reset_settle = 3;
41 #define ESP_DEBUG_INTR 0x00000001
42 #define ESP_DEBUG_SCSICMD 0x00000002
43 #define ESP_DEBUG_RESET 0x00000004
44 #define ESP_DEBUG_MSGIN 0x00000008
45 #define ESP_DEBUG_MSGOUT 0x00000010
46 #define ESP_DEBUG_CMDDONE 0x00000020
47 #define ESP_DEBUG_DISCONNECT 0x00000040
48 #define ESP_DEBUG_DATASTART 0x00000080
49 #define ESP_DEBUG_DATADONE 0x00000100
50 #define ESP_DEBUG_RECONNECT 0x00000200
51 #define ESP_DEBUG_AUTOSENSE 0x00000400
52 #define ESP_DEBUG_EVENT 0x00000800
53 #define ESP_DEBUG_COMMAND 0x00001000
55 #define esp_log_intr(f, a...) \
56 do { if (esp_debug & ESP_DEBUG_INTR) \
57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
60 #define esp_log_reset(f, a...) \
61 do { if (esp_debug & ESP_DEBUG_RESET) \
62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
65 #define esp_log_msgin(f, a...) \
66 do { if (esp_debug & ESP_DEBUG_MSGIN) \
67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
70 #define esp_log_msgout(f, a...) \
71 do { if (esp_debug & ESP_DEBUG_MSGOUT) \
72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
75 #define esp_log_cmddone(f, a...) \
76 do { if (esp_debug & ESP_DEBUG_CMDDONE) \
77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
80 #define esp_log_disconnect(f, a...) \
81 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
85 #define esp_log_datastart(f, a...) \
86 do { if (esp_debug & ESP_DEBUG_DATASTART) \
87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
90 #define esp_log_datadone(f, a...) \
91 do { if (esp_debug & ESP_DEBUG_DATADONE) \
92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
95 #define esp_log_reconnect(f, a...) \
96 do { if (esp_debug & ESP_DEBUG_RECONNECT) \
97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
100 #define esp_log_autosense(f, a...) \
101 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
105 #define esp_log_event(f, a...) \
106 do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
110 #define esp_log_command(f, a...) \
111 do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
115 #define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
118 static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
129 void scsi_esp_cmd(struct esp *esp, u8 val)
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
137 esp_log_fill_regs(esp, p);
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141 esp_log_command("cmd[%02x]\n", val);
142 esp_write8(val, ESP_CMD);
144 EXPORT_SYMBOL(scsi_esp_cmd);
146 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
164 static void esp_event(struct esp *esp, u8 val)
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
172 esp_log_fill_regs(esp, p);
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
179 static void esp_dump_cmd_log(struct esp *esp)
181 int idx = esp->esp_event_cur;
184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
200 static void esp_flush_fifo(struct esp *esp)
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
217 static void hme_read_fifo(struct esp *esp)
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
234 static void esp_set_all_config3(struct esp *esp, u8 val)
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
242 /* Reset the ESP chip, _not_ the SCSI bus. */
243 static void esp_reset_esp(struct esp *esp)
245 u8 family_code, version;
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
267 esp->min_period = ((4 * esp->ccycle) / 1000);
269 esp->min_period = ((5 * esp->ccycle) / 1000);
271 esp->max_period = (esp->max_period + 3)>>2;
272 esp->min_period = (esp->min_period + 3)>>2;
274 esp_write8(esp->config1, ESP_CFG1);
281 esp_write8(esp->config2, ESP_CFG2);
286 esp_write8(esp->config2, ESP_CFG2);
287 esp->prev_cfg3 = esp->target[0].esp_config3;
288 esp_write8(esp->prev_cfg3, ESP_CFG3);
292 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
296 /* Fast 236 or HME */
297 esp_write8(esp->config2, ESP_CFG2);
298 if (esp->rev == FASHME) {
299 u8 cfg3 = esp->target[0].esp_config3;
301 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
302 if (esp->scsi_id >= 8)
303 cfg3 |= ESP_CONFIG3_IDBIT3;
304 esp_set_all_config3(esp, cfg3);
306 u32 cfg3 = esp->target[0].esp_config3;
308 cfg3 |= ESP_CONFIG3_FCLK;
309 esp_set_all_config3(esp, cfg3);
311 esp->prev_cfg3 = esp->target[0].esp_config3;
312 esp_write8(esp->prev_cfg3, ESP_CFG3);
313 if (esp->rev == FASHME) {
316 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
325 esp_write8(esp->config2, ESP_CFG2);
326 esp_set_all_config3(esp,
327 (esp->target[0].esp_config3 |
328 ESP_CONFIG3_FCLOCK));
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
338 /* Reload the configuration registers */
339 esp_write8(esp->cfact, ESP_CFACT);
342 esp_write8(esp->prev_stp, ESP_STP);
345 esp_write8(esp->prev_soff, ESP_SOFF);
347 esp_write8(esp->neg_defp, ESP_TIMEO);
349 /* Eat any bitrot in the chip */
350 esp_read8(ESP_INTRPT);
354 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
356 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
357 struct scatterlist *sg = scsi_sglist(cmd);
358 int dir = cmd->sc_data_direction;
364 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
365 spriv->cur_residue = sg_dma_len(sg);
369 for (i = 0; i < spriv->u.num_sg; i++)
370 total += sg_dma_len(&sg[i]);
371 spriv->tot_residue = total;
374 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
375 struct scsi_cmnd *cmd)
377 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
379 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380 return ent->sense_dma +
381 (ent->sense_ptr - cmd->sense_buffer);
384 return sg_dma_address(p->cur_sg) +
385 (sg_dma_len(p->cur_sg) -
389 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
390 struct scsi_cmnd *cmd)
392 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
394 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
395 return SCSI_SENSE_BUFFERSIZE -
396 (ent->sense_ptr - cmd->sense_buffer);
398 return p->cur_residue;
401 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
402 struct scsi_cmnd *cmd, unsigned int len)
404 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
406 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
407 ent->sense_ptr += len;
411 p->cur_residue -= len;
412 p->tot_residue -= len;
413 if (p->cur_residue < 0 || p->tot_residue < 0) {
414 shost_printk(KERN_ERR, esp->host,
415 "Data transfer overflow.\n");
416 shost_printk(KERN_ERR, esp->host,
417 "cur_residue[%d] tot_residue[%d] len[%u]\n",
418 p->cur_residue, p->tot_residue, len);
422 if (!p->cur_residue && p->tot_residue) {
424 p->cur_residue = sg_dma_len(p->cur_sg);
428 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
430 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
431 int dir = cmd->sc_data_direction;
436 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
439 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
441 struct scsi_cmnd *cmd = ent->cmd;
442 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
444 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
445 ent->saved_sense_ptr = ent->sense_ptr;
448 ent->saved_cur_residue = spriv->cur_residue;
449 ent->saved_cur_sg = spriv->cur_sg;
450 ent->saved_tot_residue = spriv->tot_residue;
453 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->sense_ptr = ent->saved_sense_ptr;
462 spriv->cur_residue = ent->saved_cur_residue;
463 spriv->cur_sg = ent->saved_cur_sg;
464 spriv->tot_residue = ent->saved_tot_residue;
467 static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
469 if (cmd->cmd_len == 6 ||
470 cmd->cmd_len == 10 ||
471 cmd->cmd_len == 12) {
472 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
474 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
478 static void esp_write_tgt_config3(struct esp *esp, int tgt)
480 if (esp->rev > ESP100A) {
481 u8 val = esp->target[tgt].esp_config3;
483 if (val != esp->prev_cfg3) {
484 esp->prev_cfg3 = val;
485 esp_write8(val, ESP_CFG3);
490 static void esp_write_tgt_sync(struct esp *esp, int tgt)
492 u8 off = esp->target[tgt].esp_offset;
493 u8 per = esp->target[tgt].esp_period;
495 if (off != esp->prev_soff) {
496 esp->prev_soff = off;
497 esp_write8(off, ESP_SOFF);
499 if (per != esp->prev_stp) {
501 esp_write8(per, ESP_STP);
505 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
507 if (esp->rev == FASHME) {
508 /* Arbitrary segment boundaries, 24-bit counts. */
509 if (dma_len > (1U << 24))
510 dma_len = (1U << 24);
514 /* ESP chip limits other variants by 16-bits of transfer
515 * count. Actually on FAS100A and FAS236 we could get
516 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
517 * in the ESP_CFG2 register but that causes other unwanted
518 * changes so we don't use it currently.
520 if (dma_len > (1U << 16))
521 dma_len = (1U << 16);
523 /* All of the DMA variants hooked up to these chips
524 * cannot handle crossing a 24-bit address boundary.
526 base = dma_addr & ((1U << 24) - 1U);
527 end = base + dma_len;
528 if (end > (1U << 24))
530 dma_len = end - base;
535 static int esp_need_to_nego_wide(struct esp_target_data *tp)
537 struct scsi_target *target = tp->starget;
539 return spi_width(target) != tp->nego_goal_width;
542 static int esp_need_to_nego_sync(struct esp_target_data *tp)
544 struct scsi_target *target = tp->starget;
546 /* When offset is zero, period is "don't care". */
547 if (!spi_offset(target) && !tp->nego_goal_offset)
550 if (spi_offset(target) == tp->nego_goal_offset &&
551 spi_period(target) == tp->nego_goal_period)
557 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
558 struct esp_lun_data *lp)
560 if (!ent->orig_tag[0]) {
561 /* Non-tagged, slot already taken? */
562 if (lp->non_tagged_cmd)
566 /* We are being held by active tagged
572 /* Tagged commands completed, we can unplug
573 * the queue and run this untagged command.
576 } else if (lp->num_tagged) {
577 /* Plug the queue until num_tagged decreases
578 * to zero in esp_free_lun_tag.
584 lp->non_tagged_cmd = ent;
587 /* Tagged command, see if blocked by a
590 if (lp->non_tagged_cmd || lp->hold)
594 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
596 lp->tagged_cmds[ent->orig_tag[1]] = ent;
602 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
603 struct esp_lun_data *lp)
605 if (ent->orig_tag[0]) {
606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
607 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
610 BUG_ON(lp->non_tagged_cmd != ent);
611 lp->non_tagged_cmd = NULL;
615 /* When a contingent allegiance conditon is created, we force feed a
616 * REQUEST_SENSE command to the device to fetch the sense data. I
617 * tried many other schemes, relying on the scsi error handling layer
618 * to send out the REQUEST_SENSE automatically, but this was difficult
619 * to get right especially in the presence of applications like smartd
620 * which use SG_IO to send out their own REQUEST_SENSE commands.
622 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
624 struct scsi_cmnd *cmd = ent->cmd;
625 struct scsi_device *dev = cmd->device;
633 if (!ent->sense_ptr) {
634 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
637 ent->sense_ptr = cmd->sense_buffer;
638 ent->sense_dma = esp->ops->map_single(esp,
640 SCSI_SENSE_BUFFERSIZE,
643 ent->saved_sense_ptr = ent->sense_ptr;
645 esp->active_cmd = ent;
647 p = esp->command_block;
648 esp->msg_out_len = 0;
650 *p++ = IDENTIFY(0, lun);
651 *p++ = REQUEST_SENSE;
652 *p++ = ((dev->scsi_level <= SCSI_2) ?
656 *p++ = SCSI_SENSE_BUFFERSIZE;
659 esp->select_state = ESP_SELECT_BASIC;
662 if (esp->rev == FASHME)
663 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
664 esp_write8(val, ESP_BUSID);
666 esp_write_tgt_sync(esp, tgt);
667 esp_write_tgt_config3(esp, tgt);
669 val = (p - esp->command_block);
671 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
674 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
676 struct esp_cmd_entry *ent;
678 list_for_each_entry(ent, &esp->queued_cmds, list) {
679 struct scsi_cmnd *cmd = ent->cmd;
680 struct scsi_device *dev = cmd->device;
681 struct esp_lun_data *lp = dev->hostdata;
683 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
689 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
693 ent->orig_tag[0] = ent->tag[0];
694 ent->orig_tag[1] = ent->tag[1];
696 if (esp_alloc_lun_tag(ent, lp) < 0)
705 static void esp_maybe_execute_command(struct esp *esp)
707 struct esp_target_data *tp;
708 struct esp_lun_data *lp;
709 struct scsi_device *dev;
710 struct scsi_cmnd *cmd;
711 struct esp_cmd_entry *ent;
716 if (esp->active_cmd ||
717 (esp->flags & ESP_FLAG_RESETTING))
720 ent = find_and_prep_issuable_command(esp);
724 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
725 esp_autosense(esp, ent);
733 tp = &esp->target[tgt];
736 list_move(&ent->list, &esp->active_cmds);
738 esp->active_cmd = ent;
740 esp_map_dma(esp, cmd);
741 esp_save_pointers(esp, ent);
743 esp_check_command_len(esp, cmd);
745 p = esp->command_block;
747 esp->msg_out_len = 0;
748 if (tp->flags & ESP_TGT_CHECK_NEGO) {
749 /* Need to negotiate. If the target is broken
750 * go for synchronous transfers and non-wide.
752 if (tp->flags & ESP_TGT_BROKEN) {
753 tp->flags &= ~ESP_TGT_DISCONNECT;
754 tp->nego_goal_period = 0;
755 tp->nego_goal_offset = 0;
756 tp->nego_goal_width = 0;
757 tp->nego_goal_tags = 0;
760 /* If the settings are not changing, skip this. */
761 if (spi_width(tp->starget) == tp->nego_goal_width &&
762 spi_period(tp->starget) == tp->nego_goal_period &&
763 spi_offset(tp->starget) == tp->nego_goal_offset) {
764 tp->flags &= ~ESP_TGT_CHECK_NEGO;
768 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
770 spi_populate_width_msg(&esp->msg_out[0],
771 (tp->nego_goal_width ?
773 tp->flags |= ESP_TGT_NEGO_WIDE;
774 } else if (esp_need_to_nego_sync(tp)) {
776 spi_populate_sync_msg(&esp->msg_out[0],
777 tp->nego_goal_period,
778 tp->nego_goal_offset);
779 tp->flags |= ESP_TGT_NEGO_SYNC;
781 tp->flags &= ~ESP_TGT_CHECK_NEGO;
784 /* Process it like a slow command. */
785 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
786 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
790 /* If we don't have a lun-data struct yet, we're probing
791 * so do not disconnect. Also, do not disconnect unless
792 * we have a tag on this command.
794 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
795 *p++ = IDENTIFY(1, lun);
797 *p++ = IDENTIFY(0, lun);
799 if (ent->tag[0] && esp->rev == ESP100) {
800 /* ESP100 lacks select w/atn3 command, use select
803 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
806 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
807 start_cmd = ESP_CMD_SELA;
812 start_cmd = ESP_CMD_SA3;
815 for (i = 0; i < cmd->cmd_len; i++)
818 esp->select_state = ESP_SELECT_BASIC;
820 esp->cmd_bytes_left = cmd->cmd_len;
821 esp->cmd_bytes_ptr = &cmd->cmnd[0];
824 for (i = esp->msg_out_len - 1;
826 esp->msg_out[i + 2] = esp->msg_out[i];
827 esp->msg_out[0] = ent->tag[0];
828 esp->msg_out[1] = ent->tag[1];
829 esp->msg_out_len += 2;
832 start_cmd = ESP_CMD_SELAS;
833 esp->select_state = ESP_SELECT_MSGOUT;
836 if (esp->rev == FASHME)
837 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
838 esp_write8(val, ESP_BUSID);
840 esp_write_tgt_sync(esp, tgt);
841 esp_write_tgt_config3(esp, tgt);
843 val = (p - esp->command_block);
845 if (esp_debug & ESP_DEBUG_SCSICMD) {
846 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
847 for (i = 0; i < cmd->cmd_len; i++)
848 printk("%02x ", cmd->cmnd[i]);
852 esp_send_dma_cmd(esp, val, 16, start_cmd);
855 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
857 struct list_head *head = &esp->esp_cmd_pool;
858 struct esp_cmd_entry *ret;
860 if (list_empty(head)) {
861 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
863 ret = list_entry(head->next, struct esp_cmd_entry, list);
864 list_del(&ret->list);
865 memset(ret, 0, sizeof(*ret));
870 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
872 list_add(&ent->list, &esp->esp_cmd_pool);
875 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
876 struct scsi_cmnd *cmd, unsigned int result)
878 struct scsi_device *dev = cmd->device;
882 esp->active_cmd = NULL;
883 esp_unmap_dma(esp, cmd);
884 esp_free_lun_tag(ent, dev->hostdata);
885 cmd->result = result;
888 complete(ent->eh_done);
892 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
893 esp->ops->unmap_single(esp, ent->sense_dma,
894 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
895 ent->sense_ptr = NULL;
897 /* Restore the message/status bytes to what we actually
898 * saw originally. Also, report that we are providing
901 cmd->result = ((DRIVER_SENSE << 24) |
903 (COMMAND_COMPLETE << 8) |
904 (SAM_STAT_CHECK_CONDITION << 0));
906 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
907 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
910 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
911 esp->host->unique_id, tgt, lun);
912 for (i = 0; i < 18; i++)
913 printk("%02x ", cmd->sense_buffer[i]);
920 list_del(&ent->list);
921 esp_put_ent(esp, ent);
923 esp_maybe_execute_command(esp);
926 static unsigned int compose_result(unsigned int status, unsigned int message,
927 unsigned int driver_code)
929 return (status | (message << 8) | (driver_code << 16));
932 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
934 struct scsi_device *dev = ent->cmd->device;
935 struct esp_lun_data *lp = dev->hostdata;
937 scsi_track_queue_full(dev, lp->num_tagged - 1);
940 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
942 struct scsi_device *dev = cmd->device;
943 struct esp *esp = shost_priv(dev->host);
944 struct esp_cmd_priv *spriv;
945 struct esp_cmd_entry *ent;
947 ent = esp_get_ent(esp);
949 return SCSI_MLQUEUE_HOST_BUSY;
953 cmd->scsi_done = done;
955 spriv = ESP_CMD_PRIV(cmd);
956 spriv->u.dma_addr = ~(dma_addr_t)0x0;
958 list_add_tail(&ent->list, &esp->queued_cmds);
960 esp_maybe_execute_command(esp);
965 static DEF_SCSI_QCMD(esp_queuecommand)
967 static int esp_check_gross_error(struct esp *esp)
969 if (esp->sreg & ESP_STAT_SPAM) {
970 /* Gross Error, could be one of:
971 * - top of fifo overwritten
972 * - top of command register overwritten
973 * - DMA programmed with wrong direction
974 * - improper phase change
976 shost_printk(KERN_ERR, esp->host,
977 "Gross error sreg[%02x]\n", esp->sreg);
978 /* XXX Reset the chip. XXX */
984 static int esp_check_spur_intr(struct esp *esp)
989 /* The interrupt pending bit of the status register cannot
990 * be trusted on these revisions.
992 esp->sreg &= ~ESP_STAT_INTR;
996 if (!(esp->sreg & ESP_STAT_INTR)) {
997 if (esp->ireg & ESP_INTR_SR)
1000 /* If the DMA is indicating interrupt pending and the
1001 * ESP is not, the only possibility is a DMA error.
1003 if (!esp->ops->dma_error(esp)) {
1004 shost_printk(KERN_ERR, esp->host,
1005 "Spurious irq, sreg=%02x.\n",
1010 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1012 /* XXX Reset the chip. XXX */
1021 static void esp_schedule_reset(struct esp *esp)
1023 esp_log_reset("esp_schedule_reset() from %pf\n",
1024 __builtin_return_address(0));
1025 esp->flags |= ESP_FLAG_RESETTING;
1026 esp_event(esp, ESP_EVENT_RESET);
1029 /* In order to avoid having to add a special half-reconnected state
1030 * into the driver we just sit here and poll through the rest of
1031 * the reselection process to get the tag message bytes.
1033 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1034 struct esp_lun_data *lp)
1036 struct esp_cmd_entry *ent;
1039 if (!lp->num_tagged) {
1040 shost_printk(KERN_ERR, esp->host,
1041 "Reconnect w/num_tagged==0\n");
1045 esp_log_reconnect("reconnect tag, ");
1047 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1048 if (esp->ops->irq_pending(esp))
1051 if (i == ESP_QUICKIRQ_LIMIT) {
1052 shost_printk(KERN_ERR, esp->host,
1053 "Reconnect IRQ1 timeout\n");
1057 esp->sreg = esp_read8(ESP_STATUS);
1058 esp->ireg = esp_read8(ESP_INTRPT);
1060 esp_log_reconnect("IRQ(%d:%x:%x), ",
1061 i, esp->ireg, esp->sreg);
1063 if (esp->ireg & ESP_INTR_DC) {
1064 shost_printk(KERN_ERR, esp->host,
1065 "Reconnect, got disconnect.\n");
1069 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1070 shost_printk(KERN_ERR, esp->host,
1071 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1075 /* DMA in the tag bytes... */
1076 esp->command_block[0] = 0xff;
1077 esp->command_block[1] = 0xff;
1078 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1079 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1081 /* ACK the message. */
1082 scsi_esp_cmd(esp, ESP_CMD_MOK);
1084 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1085 if (esp->ops->irq_pending(esp)) {
1086 esp->sreg = esp_read8(ESP_STATUS);
1087 esp->ireg = esp_read8(ESP_INTRPT);
1088 if (esp->ireg & ESP_INTR_FDONE)
1093 if (i == ESP_RESELECT_TAG_LIMIT) {
1094 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1097 esp->ops->dma_drain(esp);
1098 esp->ops->dma_invalidate(esp);
1100 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1101 i, esp->ireg, esp->sreg,
1102 esp->command_block[0],
1103 esp->command_block[1]);
1105 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1106 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1107 shost_printk(KERN_ERR, esp->host,
1108 "Reconnect, bad tag type %02x.\n",
1109 esp->command_block[0]);
1113 ent = lp->tagged_cmds[esp->command_block[1]];
1115 shost_printk(KERN_ERR, esp->host,
1116 "Reconnect, no entry for tag %02x.\n",
1117 esp->command_block[1]);
1124 static int esp_reconnect(struct esp *esp)
1126 struct esp_cmd_entry *ent;
1127 struct esp_target_data *tp;
1128 struct esp_lun_data *lp;
1129 struct scsi_device *dev;
1132 BUG_ON(esp->active_cmd);
1133 if (esp->rev == FASHME) {
1134 /* FASHME puts the target and lun numbers directly
1137 target = esp->fifo[0];
1138 lun = esp->fifo[1] & 0x7;
1140 u8 bits = esp_read8(ESP_FDATA);
1142 /* Older chips put the lun directly into the fifo, but
1143 * the target is given as a sample of the arbitration
1144 * lines on the bus at reselection time. So we should
1145 * see the ID of the ESP and the one reconnecting target
1146 * set in the bitmap.
1148 if (!(bits & esp->scsi_id_mask))
1150 bits &= ~esp->scsi_id_mask;
1151 if (!bits || (bits & (bits - 1)))
1154 target = ffs(bits) - 1;
1155 lun = (esp_read8(ESP_FDATA) & 0x7);
1157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1158 if (esp->rev == ESP100) {
1159 u8 ireg = esp_read8(ESP_INTRPT);
1160 /* This chip has a bug during reselection that can
1161 * cause a spurious illegal-command interrupt, which
1162 * we simply ACK here. Another possibility is a bus
1163 * reset so we must check for that.
1165 if (ireg & ESP_INTR_SR)
1168 scsi_esp_cmd(esp, ESP_CMD_NULL);
1171 esp_write_tgt_sync(esp, target);
1172 esp_write_tgt_config3(esp, target);
1174 scsi_esp_cmd(esp, ESP_CMD_MOK);
1176 if (esp->rev == FASHME)
1177 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1180 tp = &esp->target[target];
1181 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1183 shost_printk(KERN_ERR, esp->host,
1184 "Reconnect, no lp tgt[%u] lun[%u]\n",
1190 ent = lp->non_tagged_cmd;
1192 ent = esp_reconnect_with_tag(esp, lp);
1197 esp->active_cmd = ent;
1199 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1200 esp->msg_out[0] = ABORT_TASK_SET;
1201 esp->msg_out_len = 1;
1202 scsi_esp_cmd(esp, ESP_CMD_SATN);
1205 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1206 esp_restore_pointers(esp, ent);
1207 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1211 esp_schedule_reset(esp);
1215 static int esp_finish_select(struct esp *esp)
1217 struct esp_cmd_entry *ent;
1218 struct scsi_cmnd *cmd;
1219 u8 orig_select_state;
1221 orig_select_state = esp->select_state;
1223 /* No longer selecting. */
1224 esp->select_state = ESP_SELECT_NONE;
1226 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1227 ent = esp->active_cmd;
1230 if (esp->ops->dma_error(esp)) {
1231 /* If we see a DMA error during or as a result of selection,
1234 esp_schedule_reset(esp);
1235 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1239 esp->ops->dma_invalidate(esp);
1241 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1242 struct esp_target_data *tp = &esp->target[cmd->device->id];
1244 /* Carefully back out of the selection attempt. Release
1245 * resources (such as DMA mapping & TAG) and reset state (such
1246 * as message out and command delivery variables).
1248 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1249 esp_unmap_dma(esp, cmd);
1250 esp_free_lun_tag(ent, cmd->device->hostdata);
1251 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1252 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1253 esp->cmd_bytes_ptr = NULL;
1254 esp->cmd_bytes_left = 0;
1256 esp->ops->unmap_single(esp, ent->sense_dma,
1257 SCSI_SENSE_BUFFERSIZE,
1259 ent->sense_ptr = NULL;
1262 /* Now that the state is unwound properly, put back onto
1263 * the issue queue. This command is no longer active.
1265 list_move(&ent->list, &esp->queued_cmds);
1266 esp->active_cmd = NULL;
1268 /* Return value ignored by caller, it directly invokes
1274 if (esp->ireg == ESP_INTR_DC) {
1275 struct scsi_device *dev = cmd->device;
1277 /* Disconnect. Make sure we re-negotiate sync and
1278 * wide parameters if this target starts responding
1279 * again in the future.
1281 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1283 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1288 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1289 /* Selection successful. On pre-FAST chips we have
1290 * to do a NOP and possibly clean out the FIFO.
1292 if (esp->rev <= ESP236) {
1293 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1295 scsi_esp_cmd(esp, ESP_CMD_NULL);
1299 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300 esp_flush_fifo(esp);
1303 /* If we are doing a slow command, negotiation, etc.
1304 * we'll do the right thing as we transition to the
1307 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1311 shost_printk(KERN_INFO, esp->host,
1312 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1313 esp_schedule_reset(esp);
1317 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1318 struct scsi_cmnd *cmd)
1320 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1322 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1323 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1327 if (!(esp->sreg & ESP_STAT_TCNT)) {
1328 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1329 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1330 if (esp->rev == FASHME)
1331 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1334 bytes_sent = esp->data_dma_len;
1335 bytes_sent -= ecount;
1338 * The am53c974 has a DMA 'pecularity'. The doc states:
1339 * In some odd byte conditions, one residual byte will
1340 * be left in the SCSI FIFO, and the FIFO Flags will
1341 * never count to '0 '. When this happens, the residual
1342 * byte should be retrieved via PIO following completion
1343 * of the BLAST operation.
1345 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1347 size_t offset = bytes_sent;
1348 u8 bval = esp_read8(ESP_FDATA);
1350 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1351 ent->sense_ptr[bytes_sent] = bval;
1353 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1356 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1359 *(ptr + offset) = bval;
1360 scsi_kunmap_atomic_sg(ptr);
1363 bytes_sent += fifo_cnt;
1364 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1366 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1367 bytes_sent -= fifo_cnt;
1370 if (!esp->prev_soff) {
1371 /* Synchronous data transfer, always flush fifo. */
1374 if (esp->rev == ESP100) {
1377 /* ESP100 has a chip bug where in the synchronous data
1378 * phase it can mistake a final long REQ pulse from the
1379 * target as an extra data byte. Fun.
1381 * To detect this case we resample the status register
1382 * and fifo flags. If we're still in a data phase and
1383 * we see spurious chunks in the fifo, we return error
1384 * to the caller which should reset and set things up
1385 * such that we only try future transfers to this
1386 * target in synchronous mode.
1388 esp->sreg = esp_read8(ESP_STATUS);
1389 phase = esp->sreg & ESP_STAT_PMASK;
1390 fflags = esp_read8(ESP_FFLAGS);
1392 if ((phase == ESP_DOP &&
1393 (fflags & ESP_FF_ONOTZERO)) ||
1394 (phase == ESP_DIP &&
1395 (fflags & ESP_FF_FBYTES)))
1398 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1403 esp_flush_fifo(esp);
1408 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1409 u8 scsi_period, u8 scsi_offset,
1410 u8 esp_stp, u8 esp_soff)
1412 spi_period(tp->starget) = scsi_period;
1413 spi_offset(tp->starget) = scsi_offset;
1414 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1418 esp_soff |= esp->radelay;
1419 if (esp->rev >= FAS236) {
1420 u8 bit = ESP_CONFIG3_FSCSI;
1421 if (esp->rev >= FAS100A)
1422 bit = ESP_CONFIG3_FAST;
1424 if (scsi_period < 50) {
1425 if (esp->rev == FASHME)
1426 esp_soff &= ~esp->radelay;
1427 tp->esp_config3 |= bit;
1429 tp->esp_config3 &= ~bit;
1431 esp->prev_cfg3 = tp->esp_config3;
1432 esp_write8(esp->prev_cfg3, ESP_CFG3);
1436 tp->esp_period = esp->prev_stp = esp_stp;
1437 tp->esp_offset = esp->prev_soff = esp_soff;
1439 esp_write8(esp_soff, ESP_SOFF);
1440 esp_write8(esp_stp, ESP_STP);
1442 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1444 spi_display_xfer_agreement(tp->starget);
1447 static void esp_msgin_reject(struct esp *esp)
1449 struct esp_cmd_entry *ent = esp->active_cmd;
1450 struct scsi_cmnd *cmd = ent->cmd;
1451 struct esp_target_data *tp;
1454 tgt = cmd->device->id;
1455 tp = &esp->target[tgt];
1457 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1458 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1460 if (!esp_need_to_nego_sync(tp)) {
1461 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1462 scsi_esp_cmd(esp, ESP_CMD_RATN);
1465 spi_populate_sync_msg(&esp->msg_out[0],
1466 tp->nego_goal_period,
1467 tp->nego_goal_offset);
1468 tp->flags |= ESP_TGT_NEGO_SYNC;
1469 scsi_esp_cmd(esp, ESP_CMD_SATN);
1474 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1475 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1478 esp_setsync(esp, tp, 0, 0, 0, 0);
1479 scsi_esp_cmd(esp, ESP_CMD_RATN);
1483 esp->msg_out[0] = ABORT_TASK_SET;
1484 esp->msg_out_len = 1;
1485 scsi_esp_cmd(esp, ESP_CMD_SATN);
1488 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1490 u8 period = esp->msg_in[3];
1491 u8 offset = esp->msg_in[4];
1494 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1503 if (period > esp->max_period) {
1504 period = offset = 0;
1507 if (period < esp->min_period)
1510 one_clock = esp->ccycle / 1000;
1511 stp = DIV_ROUND_UP(period << 2, one_clock);
1512 if (stp && esp->rev >= FAS236) {
1520 esp_setsync(esp, tp, period, offset, stp, offset);
1524 esp->msg_out[0] = MESSAGE_REJECT;
1525 esp->msg_out_len = 1;
1526 scsi_esp_cmd(esp, ESP_CMD_SATN);
1530 tp->nego_goal_period = period;
1531 tp->nego_goal_offset = offset;
1533 spi_populate_sync_msg(&esp->msg_out[0],
1534 tp->nego_goal_period,
1535 tp->nego_goal_offset);
1536 scsi_esp_cmd(esp, ESP_CMD_SATN);
1539 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1541 int size = 8 << esp->msg_in[3];
1544 if (esp->rev != FASHME)
1547 if (size != 8 && size != 16)
1550 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1553 cfg3 = tp->esp_config3;
1555 tp->flags |= ESP_TGT_WIDE;
1556 cfg3 |= ESP_CONFIG3_EWIDE;
1558 tp->flags &= ~ESP_TGT_WIDE;
1559 cfg3 &= ~ESP_CONFIG3_EWIDE;
1561 tp->esp_config3 = cfg3;
1562 esp->prev_cfg3 = cfg3;
1563 esp_write8(cfg3, ESP_CFG3);
1565 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1567 spi_period(tp->starget) = 0;
1568 spi_offset(tp->starget) = 0;
1569 if (!esp_need_to_nego_sync(tp)) {
1570 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1571 scsi_esp_cmd(esp, ESP_CMD_RATN);
1574 spi_populate_sync_msg(&esp->msg_out[0],
1575 tp->nego_goal_period,
1576 tp->nego_goal_offset);
1577 tp->flags |= ESP_TGT_NEGO_SYNC;
1578 scsi_esp_cmd(esp, ESP_CMD_SATN);
1583 esp->msg_out[0] = MESSAGE_REJECT;
1584 esp->msg_out_len = 1;
1585 scsi_esp_cmd(esp, ESP_CMD_SATN);
1588 static void esp_msgin_extended(struct esp *esp)
1590 struct esp_cmd_entry *ent = esp->active_cmd;
1591 struct scsi_cmnd *cmd = ent->cmd;
1592 struct esp_target_data *tp;
1593 int tgt = cmd->device->id;
1595 tp = &esp->target[tgt];
1596 if (esp->msg_in[2] == EXTENDED_SDTR) {
1597 esp_msgin_sdtr(esp, tp);
1600 if (esp->msg_in[2] == EXTENDED_WDTR) {
1601 esp_msgin_wdtr(esp, tp);
1605 shost_printk(KERN_INFO, esp->host,
1606 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1608 esp->msg_out[0] = ABORT_TASK_SET;
1609 esp->msg_out_len = 1;
1610 scsi_esp_cmd(esp, ESP_CMD_SATN);
1613 /* Analyze msgin bytes received from target so far. Return non-zero
1614 * if there are more bytes needed to complete the message.
1616 static int esp_msgin_process(struct esp *esp)
1618 u8 msg0 = esp->msg_in[0];
1619 int len = esp->msg_in_len;
1623 shost_printk(KERN_INFO, esp->host,
1624 "Unexpected msgin identify\n");
1629 case EXTENDED_MESSAGE:
1632 if (len < esp->msg_in[1] + 2)
1634 esp_msgin_extended(esp);
1637 case IGNORE_WIDE_RESIDUE: {
1638 struct esp_cmd_entry *ent;
1639 struct esp_cmd_priv *spriv;
1643 if (esp->msg_in[1] != 1)
1646 ent = esp->active_cmd;
1647 spriv = ESP_CMD_PRIV(ent->cmd);
1649 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1651 spriv->cur_residue = 1;
1653 spriv->cur_residue++;
1654 spriv->tot_residue++;
1659 case RESTORE_POINTERS:
1660 esp_restore_pointers(esp, esp->active_cmd);
1663 esp_save_pointers(esp, esp->active_cmd);
1666 case COMMAND_COMPLETE:
1668 struct esp_cmd_entry *ent = esp->active_cmd;
1670 ent->message = msg0;
1671 esp_event(esp, ESP_EVENT_FREE_BUS);
1672 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1675 case MESSAGE_REJECT:
1676 esp_msgin_reject(esp);
1681 esp->msg_out[0] = MESSAGE_REJECT;
1682 esp->msg_out_len = 1;
1683 scsi_esp_cmd(esp, ESP_CMD_SATN);
1688 static int esp_process_event(struct esp *esp)
1694 esp_log_event("process event %d phase %x\n",
1695 esp->event, esp->sreg & ESP_STAT_PMASK);
1696 switch (esp->event) {
1697 case ESP_EVENT_CHECK_PHASE:
1698 switch (esp->sreg & ESP_STAT_PMASK) {
1700 esp_event(esp, ESP_EVENT_DATA_OUT);
1703 esp_event(esp, ESP_EVENT_DATA_IN);
1706 esp_flush_fifo(esp);
1707 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1708 esp_event(esp, ESP_EVENT_STATUS);
1709 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1713 esp_event(esp, ESP_EVENT_MSGOUT);
1717 esp_event(esp, ESP_EVENT_MSGIN);
1721 esp_event(esp, ESP_EVENT_CMD_START);
1725 shost_printk(KERN_INFO, esp->host,
1726 "Unexpected phase, sreg=%02x\n",
1728 esp_schedule_reset(esp);
1734 case ESP_EVENT_DATA_IN:
1738 case ESP_EVENT_DATA_OUT: {
1739 struct esp_cmd_entry *ent = esp->active_cmd;
1740 struct scsi_cmnd *cmd = ent->cmd;
1741 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1742 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1744 if (esp->rev == ESP100)
1745 scsi_esp_cmd(esp, ESP_CMD_NULL);
1748 ent->flags |= ESP_CMD_FLAG_WRITE;
1750 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1752 if (esp->ops->dma_length_limit)
1753 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1756 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1758 esp->data_dma_len = dma_len;
1761 shost_printk(KERN_ERR, esp->host,
1762 "DMA length is zero!\n");
1763 shost_printk(KERN_ERR, esp->host,
1764 "cur adr[%08llx] len[%08x]\n",
1765 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1766 esp_cur_dma_len(ent, cmd));
1767 esp_schedule_reset(esp);
1771 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1772 (unsigned long long)dma_addr, dma_len, write);
1774 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1775 write, ESP_CMD_DMA | ESP_CMD_TI);
1776 esp_event(esp, ESP_EVENT_DATA_DONE);
1779 case ESP_EVENT_DATA_DONE: {
1780 struct esp_cmd_entry *ent = esp->active_cmd;
1781 struct scsi_cmnd *cmd = ent->cmd;
1784 if (esp->ops->dma_error(esp)) {
1785 shost_printk(KERN_INFO, esp->host,
1786 "data done, DMA error, resetting\n");
1787 esp_schedule_reset(esp);
1791 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1792 /* XXX parity errors, etc. XXX */
1794 esp->ops->dma_drain(esp);
1796 esp->ops->dma_invalidate(esp);
1798 if (esp->ireg != ESP_INTR_BSERV) {
1799 /* We should always see exactly a bus-service
1800 * interrupt at the end of a successful transfer.
1802 shost_printk(KERN_INFO, esp->host,
1803 "data done, not BSERV, resetting\n");
1804 esp_schedule_reset(esp);
1808 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1810 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1811 ent->flags, bytes_sent);
1813 if (bytes_sent < 0) {
1814 /* XXX force sync mode for this target XXX */
1815 esp_schedule_reset(esp);
1819 esp_advance_dma(esp, ent, cmd, bytes_sent);
1820 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1824 case ESP_EVENT_STATUS: {
1825 struct esp_cmd_entry *ent = esp->active_cmd;
1827 if (esp->ireg & ESP_INTR_FDONE) {
1828 ent->status = esp_read8(ESP_FDATA);
1829 ent->message = esp_read8(ESP_FDATA);
1830 scsi_esp_cmd(esp, ESP_CMD_MOK);
1831 } else if (esp->ireg == ESP_INTR_BSERV) {
1832 ent->status = esp_read8(ESP_FDATA);
1833 ent->message = 0xff;
1834 esp_event(esp, ESP_EVENT_MSGIN);
1838 if (ent->message != COMMAND_COMPLETE) {
1839 shost_printk(KERN_INFO, esp->host,
1840 "Unexpected message %x in status\n",
1842 esp_schedule_reset(esp);
1846 esp_event(esp, ESP_EVENT_FREE_BUS);
1847 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1850 case ESP_EVENT_FREE_BUS: {
1851 struct esp_cmd_entry *ent = esp->active_cmd;
1852 struct scsi_cmnd *cmd = ent->cmd;
1854 if (ent->message == COMMAND_COMPLETE ||
1855 ent->message == DISCONNECT)
1856 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1858 if (ent->message == COMMAND_COMPLETE) {
1859 esp_log_cmddone("Command done status[%x] message[%x]\n",
1860 ent->status, ent->message);
1861 if (ent->status == SAM_STAT_TASK_SET_FULL)
1862 esp_event_queue_full(esp, ent);
1864 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1865 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1866 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1867 esp_autosense(esp, ent);
1869 esp_cmd_is_done(esp, ent, cmd,
1870 compose_result(ent->status,
1874 } else if (ent->message == DISCONNECT) {
1875 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1877 ent->tag[0], ent->tag[1]);
1879 esp->active_cmd = NULL;
1880 esp_maybe_execute_command(esp);
1882 shost_printk(KERN_INFO, esp->host,
1883 "Unexpected message %x in freebus\n",
1885 esp_schedule_reset(esp);
1888 if (esp->active_cmd)
1889 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1892 case ESP_EVENT_MSGOUT: {
1893 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1895 if (esp_debug & ESP_DEBUG_MSGOUT) {
1897 printk("ESP: Sending message [ ");
1898 for (i = 0; i < esp->msg_out_len; i++)
1899 printk("%02x ", esp->msg_out[i]);
1903 if (esp->rev == FASHME) {
1906 /* Always use the fifo. */
1907 for (i = 0; i < esp->msg_out_len; i++) {
1908 esp_write8(esp->msg_out[i], ESP_FDATA);
1909 esp_write8(0, ESP_FDATA);
1911 scsi_esp_cmd(esp, ESP_CMD_TI);
1913 if (esp->msg_out_len == 1) {
1914 esp_write8(esp->msg_out[0], ESP_FDATA);
1915 scsi_esp_cmd(esp, ESP_CMD_TI);
1916 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1917 for (i = 0; i < esp->msg_out_len; i++)
1918 esp_write8(esp->msg_out[i], ESP_FDATA);
1919 scsi_esp_cmd(esp, ESP_CMD_TI);
1922 memcpy(esp->command_block,
1926 esp->ops->send_dma_cmd(esp,
1927 esp->command_block_dma,
1931 ESP_CMD_DMA|ESP_CMD_TI);
1934 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1937 case ESP_EVENT_MSGOUT_DONE:
1938 if (esp->rev == FASHME) {
1939 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1941 if (esp->msg_out_len > 1)
1942 esp->ops->dma_invalidate(esp);
1945 if (!(esp->ireg & ESP_INTR_DC)) {
1946 if (esp->rev != FASHME)
1947 scsi_esp_cmd(esp, ESP_CMD_NULL);
1949 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1951 case ESP_EVENT_MSGIN:
1952 if (esp->ireg & ESP_INTR_BSERV) {
1953 if (esp->rev == FASHME) {
1954 if (!(esp_read8(ESP_STATUS2) &
1956 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1958 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1959 if (esp->rev == ESP100)
1960 scsi_esp_cmd(esp, ESP_CMD_NULL);
1962 scsi_esp_cmd(esp, ESP_CMD_TI);
1963 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1966 if (esp->ireg & ESP_INTR_FDONE) {
1969 if (esp->rev == FASHME)
1972 val = esp_read8(ESP_FDATA);
1973 esp->msg_in[esp->msg_in_len++] = val;
1975 esp_log_msgin("Got msgin byte %x\n", val);
1977 if (!esp_msgin_process(esp))
1978 esp->msg_in_len = 0;
1980 if (esp->rev == FASHME)
1981 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1983 scsi_esp_cmd(esp, ESP_CMD_MOK);
1985 if (esp->event != ESP_EVENT_FREE_BUS)
1986 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1988 shost_printk(KERN_INFO, esp->host,
1989 "MSGIN neither BSERV not FDON, resetting");
1990 esp_schedule_reset(esp);
1994 case ESP_EVENT_CMD_START:
1995 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1996 esp->cmd_bytes_left);
1997 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
1998 esp_event(esp, ESP_EVENT_CMD_DONE);
1999 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2001 case ESP_EVENT_CMD_DONE:
2002 esp->ops->dma_invalidate(esp);
2003 if (esp->ireg & ESP_INTR_BSERV) {
2004 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2007 esp_schedule_reset(esp);
2011 case ESP_EVENT_RESET:
2012 scsi_esp_cmd(esp, ESP_CMD_RS);
2016 shost_printk(KERN_INFO, esp->host,
2017 "Unexpected event %x, resetting\n", esp->event);
2018 esp_schedule_reset(esp);
2025 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2027 struct scsi_cmnd *cmd = ent->cmd;
2029 esp_unmap_dma(esp, cmd);
2030 esp_free_lun_tag(ent, cmd->device->hostdata);
2031 cmd->result = DID_RESET << 16;
2033 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2034 esp->ops->unmap_single(esp, ent->sense_dma,
2035 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2036 ent->sense_ptr = NULL;
2039 cmd->scsi_done(cmd);
2040 list_del(&ent->list);
2041 esp_put_ent(esp, ent);
2044 static void esp_clear_hold(struct scsi_device *dev, void *data)
2046 struct esp_lun_data *lp = dev->hostdata;
2048 BUG_ON(lp->num_tagged);
2052 static void esp_reset_cleanup(struct esp *esp)
2054 struct esp_cmd_entry *ent, *tmp;
2057 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2058 struct scsi_cmnd *cmd = ent->cmd;
2060 list_del(&ent->list);
2061 cmd->result = DID_RESET << 16;
2062 cmd->scsi_done(cmd);
2063 esp_put_ent(esp, ent);
2066 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2067 if (ent == esp->active_cmd)
2068 esp->active_cmd = NULL;
2069 esp_reset_cleanup_one(esp, ent);
2072 BUG_ON(esp->active_cmd != NULL);
2074 /* Force renegotiation of sync/wide transfers. */
2075 for (i = 0; i < ESP_MAX_TARGET; i++) {
2076 struct esp_target_data *tp = &esp->target[i];
2080 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2083 tp->flags &= ~ESP_TGT_WIDE;
2084 tp->flags |= ESP_TGT_CHECK_NEGO;
2087 __starget_for_each_device(tp->starget, NULL,
2090 esp->flags &= ~ESP_FLAG_RESETTING;
2093 /* Runs under host->lock */
2094 static void __esp_interrupt(struct esp *esp)
2096 int finish_reset, intr_done;
2100 * Once INTRPT is read STATUS and SSTEP are cleared.
2102 esp->sreg = esp_read8(ESP_STATUS);
2103 esp->seqreg = esp_read8(ESP_SSTEP);
2104 esp->ireg = esp_read8(ESP_INTRPT);
2106 if (esp->flags & ESP_FLAG_RESETTING) {
2109 if (esp_check_gross_error(esp))
2112 finish_reset = esp_check_spur_intr(esp);
2113 if (finish_reset < 0)
2117 if (esp->ireg & ESP_INTR_SR)
2121 esp_reset_cleanup(esp);
2122 if (esp->eh_reset) {
2123 complete(esp->eh_reset);
2124 esp->eh_reset = NULL;
2129 phase = (esp->sreg & ESP_STAT_PMASK);
2130 if (esp->rev == FASHME) {
2131 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2132 esp->select_state == ESP_SELECT_NONE &&
2133 esp->event != ESP_EVENT_STATUS &&
2134 esp->event != ESP_EVENT_DATA_DONE) ||
2135 (esp->ireg & ESP_INTR_RSEL)) {
2136 esp->sreg2 = esp_read8(ESP_STATUS2);
2137 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2138 (esp->sreg2 & ESP_STAT2_F1BYTE))
2143 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2144 "sreg2[%02x] ireg[%02x]\n",
2145 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2149 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2150 shost_printk(KERN_INFO, esp->host,
2151 "unexpected IREG %02x\n", esp->ireg);
2152 if (esp->ireg & ESP_INTR_IC)
2153 esp_dump_cmd_log(esp);
2155 esp_schedule_reset(esp);
2157 if (!(esp->ireg & ESP_INTR_RSEL)) {
2158 /* Some combination of FDONE, BSERV, DC. */
2159 if (esp->select_state != ESP_SELECT_NONE)
2160 intr_done = esp_finish_select(esp);
2161 } else if (esp->ireg & ESP_INTR_RSEL) {
2162 if (esp->active_cmd)
2163 (void) esp_finish_select(esp);
2164 intr_done = esp_reconnect(esp);
2168 intr_done = esp_process_event(esp);
2171 irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2173 struct esp *esp = dev_id;
2174 unsigned long flags;
2177 spin_lock_irqsave(esp->host->host_lock, flags);
2179 if (esp->ops->irq_pending(esp)) {
2184 __esp_interrupt(esp);
2185 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2187 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2189 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2190 if (esp->ops->irq_pending(esp))
2193 if (i == ESP_QUICKIRQ_LIMIT)
2197 spin_unlock_irqrestore(esp->host->host_lock, flags);
2201 EXPORT_SYMBOL(scsi_esp_intr);
2203 static void esp_get_revision(struct esp *esp)
2207 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2208 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2209 esp_write8(esp->config2, ESP_CFG2);
2211 val = esp_read8(ESP_CFG2);
2212 val &= ~ESP_CONFIG2_MAGIC;
2213 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2214 /* If what we write to cfg2 does not come back, cfg2 is not
2215 * implemented, therefore this must be a plain esp100.
2220 esp_set_all_config3(esp, 5);
2222 esp_write8(esp->config2, ESP_CFG2);
2223 esp_write8(0, ESP_CFG3);
2224 esp_write8(esp->prev_cfg3, ESP_CFG3);
2226 val = esp_read8(ESP_CFG3);
2228 /* The cfg2 register is implemented, however
2229 * cfg3 is not, must be esp100a.
2233 esp_set_all_config3(esp, 0);
2235 esp_write8(esp->prev_cfg3, ESP_CFG3);
2237 /* All of cfg{1,2,3} implemented, must be one of
2238 * the fas variants, figure out which one.
2240 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2242 esp->sync_defp = SYNC_DEFP_FAST;
2247 esp_write8(esp->config2, ESP_CFG2);
2252 static void esp_init_swstate(struct esp *esp)
2256 INIT_LIST_HEAD(&esp->queued_cmds);
2257 INIT_LIST_HEAD(&esp->active_cmds);
2258 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2260 /* Start with a clear state, domain validation (via ->slave_configure,
2261 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2264 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2265 esp->target[i].flags = 0;
2266 esp->target[i].nego_goal_period = 0;
2267 esp->target[i].nego_goal_offset = 0;
2268 esp->target[i].nego_goal_width = 0;
2269 esp->target[i].nego_goal_tags = 0;
2273 /* This places the ESP into a known state at boot time. */
2274 static void esp_bootup_reset(struct esp *esp)
2279 esp->ops->reset_dma(esp);
2284 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2285 val = esp_read8(ESP_CFG1);
2286 val |= ESP_CONFIG1_SRRDISAB;
2287 esp_write8(val, ESP_CFG1);
2289 scsi_esp_cmd(esp, ESP_CMD_RS);
2292 esp_write8(esp->config1, ESP_CFG1);
2294 /* Eat any bitrot in the chip and we are done... */
2295 esp_read8(ESP_INTRPT);
2298 static void esp_set_clock_params(struct esp *esp)
2303 /* This is getting messy but it has to be done correctly or else
2304 * you get weird behavior all over the place. We are trying to
2305 * basically figure out three pieces of information.
2307 * a) Clock Conversion Factor
2309 * This is a representation of the input crystal clock frequency
2310 * going into the ESP on this machine. Any operation whose timing
2311 * is longer than 400ns depends on this value being correct. For
2312 * example, you'll get blips for arbitration/selection during high
2313 * load or with multiple targets if this is not set correctly.
2315 * b) Selection Time-Out
2317 * The ESP isn't very bright and will arbitrate for the bus and try
2318 * to select a target forever if you let it. This value tells the
2319 * ESP when it has taken too long to negotiate and that it should
2320 * interrupt the CPU so we can see what happened. The value is
2321 * computed as follows (from NCR/Symbios chip docs).
2323 * (Time Out Period) * (Input Clock)
2324 * STO = ----------------------------------
2325 * (8192) * (Clock Conversion Factor)
2327 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2329 * c) Imperical constants for synchronous offset and transfer period
2332 * This entails the smallest and largest sync period we could ever
2333 * handle on this ESP.
2337 ccf = ((fhz / 1000000) + 4) / 5;
2341 /* If we can't find anything reasonable, just assume 20MHZ.
2342 * This is the clock frequency of the older sun4c's where I've
2343 * been unable to find the clock-frequency PROM property. All
2344 * other machines provide useful values it seems.
2346 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2351 esp->cfact = (ccf == 8 ? 0 : ccf);
2353 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2354 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2355 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2356 esp->sync_defp = SYNC_DEFP_SLOW;
2359 static const char *esp_chip_names[] = {
2369 static struct scsi_transport_template *esp_transport_template;
2371 int scsi_esp_register(struct esp *esp, struct device *dev)
2373 static int instance;
2377 esp->num_tags = ESP_DEFAULT_TAGS;
2378 else if (esp->num_tags >= ESP_MAX_TAG)
2379 esp->num_tags = ESP_MAX_TAG - 1;
2380 esp->host->transportt = esp_transport_template;
2381 esp->host->max_lun = ESP_MAX_LUN;
2382 esp->host->cmd_per_lun = 2;
2383 esp->host->unique_id = instance;
2385 esp_set_clock_params(esp);
2387 esp_get_revision(esp);
2389 esp_init_swstate(esp);
2391 esp_bootup_reset(esp);
2393 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2394 esp->host->unique_id, esp->regs, esp->dma_regs,
2396 dev_printk(KERN_INFO, dev,
2397 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2398 esp->host->unique_id, esp_chip_names[esp->rev],
2399 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2401 /* Let the SCSI bus reset settle. */
2402 ssleep(esp_bus_reset_settle);
2404 err = scsi_add_host(esp->host, dev);
2410 scsi_scan_host(esp->host);
2414 EXPORT_SYMBOL(scsi_esp_register);
2416 void scsi_esp_unregister(struct esp *esp)
2418 scsi_remove_host(esp->host);
2420 EXPORT_SYMBOL(scsi_esp_unregister);
2422 static int esp_target_alloc(struct scsi_target *starget)
2424 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2425 struct esp_target_data *tp = &esp->target[starget->id];
2427 tp->starget = starget;
2432 static void esp_target_destroy(struct scsi_target *starget)
2434 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2435 struct esp_target_data *tp = &esp->target[starget->id];
2440 static int esp_slave_alloc(struct scsi_device *dev)
2442 struct esp *esp = shost_priv(dev->host);
2443 struct esp_target_data *tp = &esp->target[dev->id];
2444 struct esp_lun_data *lp;
2446 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2451 spi_min_period(tp->starget) = esp->min_period;
2452 spi_max_offset(tp->starget) = 15;
2454 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2455 spi_max_width(tp->starget) = 1;
2457 spi_max_width(tp->starget) = 0;
2462 static int esp_slave_configure(struct scsi_device *dev)
2464 struct esp *esp = shost_priv(dev->host);
2465 struct esp_target_data *tp = &esp->target[dev->id];
2467 if (dev->tagged_supported)
2468 scsi_change_queue_depth(dev, esp->num_tags);
2470 tp->flags |= ESP_TGT_DISCONNECT;
2472 if (!spi_initial_dv(dev->sdev_target))
2478 static void esp_slave_destroy(struct scsi_device *dev)
2480 struct esp_lun_data *lp = dev->hostdata;
2483 dev->hostdata = NULL;
2486 static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2488 struct esp *esp = shost_priv(cmd->device->host);
2489 struct esp_cmd_entry *ent, *tmp;
2490 struct completion eh_done;
2491 unsigned long flags;
2493 /* XXX This helps a lot with debugging but might be a bit
2494 * XXX much for the final driver.
2496 spin_lock_irqsave(esp->host->host_lock, flags);
2497 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2499 ent = esp->active_cmd;
2501 shost_printk(KERN_ERR, esp->host,
2502 "Current command [%p:%02x]\n",
2503 ent->cmd, ent->cmd->cmnd[0]);
2504 list_for_each_entry(ent, &esp->queued_cmds, list) {
2505 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2506 ent->cmd, ent->cmd->cmnd[0]);
2508 list_for_each_entry(ent, &esp->active_cmds, list) {
2509 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2510 ent->cmd, ent->cmd->cmnd[0]);
2512 esp_dump_cmd_log(esp);
2513 spin_unlock_irqrestore(esp->host->host_lock, flags);
2515 spin_lock_irqsave(esp->host->host_lock, flags);
2518 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2519 if (tmp->cmd == cmd) {
2526 /* Easiest case, we didn't even issue the command
2527 * yet so it is trivial to abort.
2529 list_del(&ent->list);
2531 cmd->result = DID_ABORT << 16;
2532 cmd->scsi_done(cmd);
2534 esp_put_ent(esp, ent);
2539 init_completion(&eh_done);
2541 ent = esp->active_cmd;
2542 if (ent && ent->cmd == cmd) {
2543 /* Command is the currently active command on
2544 * the bus. If we already have an output message
2547 if (esp->msg_out_len)
2550 /* Send out an abort, encouraging the target to
2551 * go to MSGOUT phase by asserting ATN.
2553 esp->msg_out[0] = ABORT_TASK_SET;
2554 esp->msg_out_len = 1;
2555 ent->eh_done = &eh_done;
2557 scsi_esp_cmd(esp, ESP_CMD_SATN);
2559 /* The command is disconnected. This is not easy to
2560 * abort. For now we fail and let the scsi error
2561 * handling layer go try a scsi bus reset or host
2564 * What we could do is put together a scsi command
2565 * solely for the purpose of sending an abort message
2566 * to the target. Coming up with all the code to
2567 * cook up scsi commands, special case them everywhere,
2568 * etc. is for questionable gain and it would be better
2569 * if the generic scsi error handling layer could do at
2570 * least some of that for us.
2572 * Anyways this is an area for potential future improvement
2578 spin_unlock_irqrestore(esp->host->host_lock, flags);
2580 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2581 spin_lock_irqsave(esp->host->host_lock, flags);
2582 ent->eh_done = NULL;
2583 spin_unlock_irqrestore(esp->host->host_lock, flags);
2591 spin_unlock_irqrestore(esp->host->host_lock, flags);
2595 /* XXX This might be a good location to set ESP_TGT_BROKEN
2596 * XXX since we know which target/lun in particular is
2597 * XXX causing trouble.
2599 spin_unlock_irqrestore(esp->host->host_lock, flags);
2603 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2605 struct esp *esp = shost_priv(cmd->device->host);
2606 struct completion eh_reset;
2607 unsigned long flags;
2609 init_completion(&eh_reset);
2611 spin_lock_irqsave(esp->host->host_lock, flags);
2613 esp->eh_reset = &eh_reset;
2615 /* XXX This is too simple... We should add lots of
2616 * XXX checks here so that if we find that the chip is
2617 * XXX very wedged we return failure immediately so
2618 * XXX that we can perform a full chip reset.
2620 esp->flags |= ESP_FLAG_RESETTING;
2621 scsi_esp_cmd(esp, ESP_CMD_RS);
2623 spin_unlock_irqrestore(esp->host->host_lock, flags);
2625 ssleep(esp_bus_reset_settle);
2627 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2628 spin_lock_irqsave(esp->host->host_lock, flags);
2629 esp->eh_reset = NULL;
2630 spin_unlock_irqrestore(esp->host->host_lock, flags);
2638 /* All bets are off, reset the entire device. */
2639 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2641 struct esp *esp = shost_priv(cmd->device->host);
2642 unsigned long flags;
2644 spin_lock_irqsave(esp->host->host_lock, flags);
2645 esp_bootup_reset(esp);
2646 esp_reset_cleanup(esp);
2647 spin_unlock_irqrestore(esp->host->host_lock, flags);
2649 ssleep(esp_bus_reset_settle);
2654 static const char *esp_info(struct Scsi_Host *host)
2659 struct scsi_host_template scsi_esp_template = {
2660 .module = THIS_MODULE,
2663 .queuecommand = esp_queuecommand,
2664 .target_alloc = esp_target_alloc,
2665 .target_destroy = esp_target_destroy,
2666 .slave_alloc = esp_slave_alloc,
2667 .slave_configure = esp_slave_configure,
2668 .slave_destroy = esp_slave_destroy,
2669 .eh_abort_handler = esp_eh_abort_handler,
2670 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2671 .eh_host_reset_handler = esp_eh_host_reset_handler,
2674 .sg_tablesize = SG_ALL,
2675 .use_clustering = ENABLE_CLUSTERING,
2676 .max_sectors = 0xffff,
2677 .skip_settle_delay = 1,
2680 EXPORT_SYMBOL(scsi_esp_template);
2682 static void esp_get_signalling(struct Scsi_Host *host)
2684 struct esp *esp = shost_priv(host);
2685 enum spi_signal_type type;
2687 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2688 type = SPI_SIGNAL_HVD;
2690 type = SPI_SIGNAL_SE;
2692 spi_signalling(host) = type;
2695 static void esp_set_offset(struct scsi_target *target, int offset)
2697 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2698 struct esp *esp = shost_priv(host);
2699 struct esp_target_data *tp = &esp->target[target->id];
2701 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2702 tp->nego_goal_offset = 0;
2704 tp->nego_goal_offset = offset;
2705 tp->flags |= ESP_TGT_CHECK_NEGO;
2708 static void esp_set_period(struct scsi_target *target, int period)
2710 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2711 struct esp *esp = shost_priv(host);
2712 struct esp_target_data *tp = &esp->target[target->id];
2714 tp->nego_goal_period = period;
2715 tp->flags |= ESP_TGT_CHECK_NEGO;
2718 static void esp_set_width(struct scsi_target *target, int width)
2720 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2721 struct esp *esp = shost_priv(host);
2722 struct esp_target_data *tp = &esp->target[target->id];
2724 tp->nego_goal_width = (width ? 1 : 0);
2725 tp->flags |= ESP_TGT_CHECK_NEGO;
2728 static struct spi_function_template esp_transport_ops = {
2729 .set_offset = esp_set_offset,
2731 .set_period = esp_set_period,
2733 .set_width = esp_set_width,
2735 .get_signalling = esp_get_signalling,
2738 static int __init esp_init(void)
2740 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2741 sizeof(struct esp_cmd_priv));
2743 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2744 if (!esp_transport_template)
2750 static void __exit esp_exit(void)
2752 spi_release_transport(esp_transport_template);
2755 MODULE_DESCRIPTION("ESP SCSI driver core");
2756 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2757 MODULE_LICENSE("GPL");
2758 MODULE_VERSION(DRV_VERSION);
2760 module_param(esp_bus_reset_settle, int, 0);
2761 MODULE_PARM_DESC(esp_bus_reset_settle,
2762 "ESP scsi bus reset delay in seconds");
2764 module_param(esp_debug, int, 0);
2765 MODULE_PARM_DESC(esp_debug,
2766 "ESP bitmapped debugging message enable value:\n"
2767 " 0x00000001 Log interrupt events\n"
2768 " 0x00000002 Log scsi commands\n"
2769 " 0x00000004 Log resets\n"
2770 " 0x00000008 Log message in events\n"
2771 " 0x00000010 Log message out events\n"
2772 " 0x00000020 Log command completion\n"
2773 " 0x00000040 Log disconnects\n"
2774 " 0x00000080 Log data start\n"
2775 " 0x00000100 Log data done\n"
2776 " 0x00000200 Log reconnects\n"
2777 " 0x00000400 Log auto-sense data\n"
2780 module_init(esp_init);
2781 module_exit(esp_exit);