2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
55 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
56 static void ata_set_mode(struct ata_port *ap);
57 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
58 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
59 static int fgb(u32 bitmap);
60 static int ata_choose_xfer_mode(struct ata_port *ap,
62 unsigned int *xfer_shift_out);
63 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
64 static void __ata_qc_complete(struct ata_queued_cmd *qc);
66 static unsigned int ata_unique_id = 1;
67 static struct workqueue_struct *ata_wq;
69 MODULE_AUTHOR("Jeff Garzik");
70 MODULE_DESCRIPTION("Library module for ATA devices");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
75 * ata_tf_load - send taskfile registers to host controller
76 * @ap: Port to which output is sent
77 * @tf: ATA taskfile register set
79 * Outputs ATA taskfile to standard ATA host controller.
82 * Inherited from caller.
85 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
87 struct ata_ioports *ioaddr = &ap->ioaddr;
88 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
90 if (tf->ctl != ap->last_ctl) {
91 outb(tf->ctl, ioaddr->ctl_addr);
92 ap->last_ctl = tf->ctl;
96 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
97 outb(tf->hob_feature, ioaddr->feature_addr);
98 outb(tf->hob_nsect, ioaddr->nsect_addr);
99 outb(tf->hob_lbal, ioaddr->lbal_addr);
100 outb(tf->hob_lbam, ioaddr->lbam_addr);
101 outb(tf->hob_lbah, ioaddr->lbah_addr);
102 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
111 outb(tf->feature, ioaddr->feature_addr);
112 outb(tf->nsect, ioaddr->nsect_addr);
113 outb(tf->lbal, ioaddr->lbal_addr);
114 outb(tf->lbam, ioaddr->lbam_addr);
115 outb(tf->lbah, ioaddr->lbah_addr);
116 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
124 if (tf->flags & ATA_TFLAG_DEVICE) {
125 outb(tf->device, ioaddr->device_addr);
126 VPRINTK("device 0x%X\n", tf->device);
133 * ata_tf_load_mmio - send taskfile registers to host controller
134 * @ap: Port to which output is sent
135 * @tf: ATA taskfile register set
137 * Outputs ATA taskfile to standard ATA host controller using MMIO.
140 * Inherited from caller.
143 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
145 struct ata_ioports *ioaddr = &ap->ioaddr;
146 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
148 if (tf->ctl != ap->last_ctl) {
149 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
150 ap->last_ctl = tf->ctl;
154 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
155 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
156 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
157 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
158 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
159 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
160 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
169 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
170 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
171 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
172 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
173 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
174 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
182 if (tf->flags & ATA_TFLAG_DEVICE) {
183 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
184 VPRINTK("device 0x%X\n", tf->device);
192 * ata_tf_load - send taskfile registers to host controller
193 * @ap: Port to which output is sent
194 * @tf: ATA taskfile register set
196 * Outputs ATA taskfile to standard ATA host controller using MMIO
197 * or PIO as indicated by the ATA_FLAG_MMIO flag.
198 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
199 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
200 * hob_lbal, hob_lbam, and hob_lbah.
202 * This function waits for idle (!BUSY and !DRQ) after writing
203 * registers. If the control register has a new value, this
204 * function also waits for idle after writing control and before
205 * writing the remaining registers.
207 * May be used as the tf_load() entry in ata_port_operations.
210 * Inherited from caller.
212 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
214 if (ap->flags & ATA_FLAG_MMIO)
215 ata_tf_load_mmio(ap, tf);
217 ata_tf_load_pio(ap, tf);
221 * ata_exec_command_pio - issue ATA command to host controller
222 * @ap: port to which command is being issued
223 * @tf: ATA taskfile register set
225 * Issues PIO write to ATA command register, with proper
226 * synchronization with interrupt handler / other threads.
229 * spin_lock_irqsave(host_set lock)
232 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
234 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
236 outb(tf->command, ap->ioaddr.command_addr);
242 * ata_exec_command_mmio - issue ATA command to host controller
243 * @ap: port to which command is being issued
244 * @tf: ATA taskfile register set
246 * Issues MMIO write to ATA command register, with proper
247 * synchronization with interrupt handler / other threads.
250 * spin_lock_irqsave(host_set lock)
253 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
255 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
257 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
263 * ata_exec_command - issue ATA command to host controller
264 * @ap: port to which command is being issued
265 * @tf: ATA taskfile register set
267 * Issues PIO/MMIO write to ATA command register, with proper
268 * synchronization with interrupt handler / other threads.
271 * spin_lock_irqsave(host_set lock)
273 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
275 if (ap->flags & ATA_FLAG_MMIO)
276 ata_exec_command_mmio(ap, tf);
278 ata_exec_command_pio(ap, tf);
282 * ata_exec - issue ATA command to host controller
283 * @ap: port to which command is being issued
284 * @tf: ATA taskfile register set
286 * Issues PIO/MMIO write to ATA command register, with proper
287 * synchronization with interrupt handler / other threads.
290 * Obtains host_set lock.
293 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
297 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
298 spin_lock_irqsave(&ap->host_set->lock, flags);
299 ap->ops->exec_command(ap, tf);
300 spin_unlock_irqrestore(&ap->host_set->lock, flags);
304 * ata_tf_to_host - issue ATA taskfile to host controller
305 * @ap: port to which command is being issued
306 * @tf: ATA taskfile register set
308 * Issues ATA taskfile register set to ATA host controller,
309 * with proper synchronization with interrupt handler and
313 * Obtains host_set lock.
316 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
318 ap->ops->tf_load(ap, tf);
324 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
325 * @ap: port to which command is being issued
326 * @tf: ATA taskfile register set
328 * Issues ATA taskfile register set to ATA host controller,
329 * with proper synchronization with interrupt handler and
333 * spin_lock_irqsave(host_set lock)
336 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
338 ap->ops->tf_load(ap, tf);
339 ap->ops->exec_command(ap, tf);
343 * ata_tf_read_pio - input device's ATA taskfile shadow registers
344 * @ap: Port from which input is read
345 * @tf: ATA taskfile register set for storing input
347 * Reads ATA taskfile registers for currently-selected device
351 * Inherited from caller.
354 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
356 struct ata_ioports *ioaddr = &ap->ioaddr;
358 tf->nsect = inb(ioaddr->nsect_addr);
359 tf->lbal = inb(ioaddr->lbal_addr);
360 tf->lbam = inb(ioaddr->lbam_addr);
361 tf->lbah = inb(ioaddr->lbah_addr);
362 tf->device = inb(ioaddr->device_addr);
364 if (tf->flags & ATA_TFLAG_LBA48) {
365 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
366 tf->hob_feature = inb(ioaddr->error_addr);
367 tf->hob_nsect = inb(ioaddr->nsect_addr);
368 tf->hob_lbal = inb(ioaddr->lbal_addr);
369 tf->hob_lbam = inb(ioaddr->lbam_addr);
370 tf->hob_lbah = inb(ioaddr->lbah_addr);
375 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
376 * @ap: Port from which input is read
377 * @tf: ATA taskfile register set for storing input
379 * Reads ATA taskfile registers for currently-selected device
383 * Inherited from caller.
386 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
388 struct ata_ioports *ioaddr = &ap->ioaddr;
390 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
391 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
392 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
393 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
394 tf->device = readb((void __iomem *)ioaddr->device_addr);
396 if (tf->flags & ATA_TFLAG_LBA48) {
397 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
398 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
399 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
400 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
401 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
402 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
408 * ata_tf_read - input device's ATA taskfile shadow registers
409 * @ap: Port from which input is read
410 * @tf: ATA taskfile register set for storing input
412 * Reads ATA taskfile registers for currently-selected device
415 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
416 * is set, also reads the hob registers.
418 * May be used as the tf_read() entry in ata_port_operations.
421 * Inherited from caller.
423 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
425 if (ap->flags & ATA_FLAG_MMIO)
426 ata_tf_read_mmio(ap, tf);
428 ata_tf_read_pio(ap, tf);
432 * ata_check_status_pio - Read device status reg & clear interrupt
433 * @ap: port where the device is
435 * Reads ATA taskfile status register for currently-selected device
436 * and return its value. This also clears pending interrupts
440 * Inherited from caller.
442 static u8 ata_check_status_pio(struct ata_port *ap)
444 return inb(ap->ioaddr.status_addr);
448 * ata_check_status_mmio - Read device status reg & clear interrupt
449 * @ap: port where the device is
451 * Reads ATA taskfile status register for currently-selected device
452 * via MMIO and return its value. This also clears pending interrupts
456 * Inherited from caller.
458 static u8 ata_check_status_mmio(struct ata_port *ap)
460 return readb((void __iomem *) ap->ioaddr.status_addr);
465 * ata_check_status - Read device status reg & clear interrupt
466 * @ap: port where the device is
468 * Reads ATA taskfile status register for currently-selected device
469 * and return its value. This also clears pending interrupts
472 * May be used as the check_status() entry in ata_port_operations.
475 * Inherited from caller.
477 u8 ata_check_status(struct ata_port *ap)
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_check_status_mmio(ap);
481 return ata_check_status_pio(ap);
486 * ata_altstatus - Read device alternate status reg
487 * @ap: port where the device is
489 * Reads ATA taskfile alternate status register for
490 * currently-selected device and return its value.
492 * Note: may NOT be used as the check_altstatus() entry in
493 * ata_port_operations.
496 * Inherited from caller.
498 u8 ata_altstatus(struct ata_port *ap)
500 if (ap->ops->check_altstatus)
501 return ap->ops->check_altstatus(ap);
503 if (ap->flags & ATA_FLAG_MMIO)
504 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
505 return inb(ap->ioaddr.altstatus_addr);
510 * ata_chk_err - Read device error reg
511 * @ap: port where the device is
513 * Reads ATA taskfile error register for
514 * currently-selected device and return its value.
516 * Note: may NOT be used as the check_err() entry in
517 * ata_port_operations.
520 * Inherited from caller.
522 u8 ata_chk_err(struct ata_port *ap)
524 if (ap->ops->check_err)
525 return ap->ops->check_err(ap);
527 if (ap->flags & ATA_FLAG_MMIO) {
528 return readb((void __iomem *) ap->ioaddr.error_addr);
530 return inb(ap->ioaddr.error_addr);
534 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
535 * @tf: Taskfile to convert
536 * @fis: Buffer into which data will output
537 * @pmp: Port multiplier port
539 * Converts a standard ATA taskfile to a Serial ATA
540 * FIS structure (Register - Host to Device).
543 * Inherited from caller.
546 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
548 fis[0] = 0x27; /* Register - Host to Device FIS */
549 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
550 bit 7 indicates Command FIS */
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
565 fis[13] = tf->hob_nsect;
576 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
577 * @fis: Buffer from which data will be input
578 * @tf: Taskfile to output
580 * Converts a standard ATA taskfile to a Serial ATA
581 * FIS structure (Register - Host to Device).
584 * Inherited from caller.
587 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
589 tf->command = fis[2]; /* status */
590 tf->feature = fis[3]; /* error */
597 tf->hob_lbal = fis[8];
598 tf->hob_lbam = fis[9];
599 tf->hob_lbah = fis[10];
602 tf->hob_nsect = fis[13];
606 * ata_prot_to_cmd - determine which read/write opcodes to use
607 * @protocol: ATA_PROT_xxx taskfile protocol
608 * @lba48: true is lba48 is present
610 * Given necessary input, determine which read/write commands
611 * to use to transfer data.
616 static int ata_prot_to_cmd(int protocol, int lba48)
618 int rcmd = 0, wcmd = 0;
623 rcmd = ATA_CMD_PIO_READ_EXT;
624 wcmd = ATA_CMD_PIO_WRITE_EXT;
626 rcmd = ATA_CMD_PIO_READ;
627 wcmd = ATA_CMD_PIO_WRITE;
633 rcmd = ATA_CMD_READ_EXT;
634 wcmd = ATA_CMD_WRITE_EXT;
637 wcmd = ATA_CMD_WRITE;
645 return rcmd | (wcmd << 8);
649 * ata_dev_set_protocol - set taskfile protocol and r/w commands
650 * @dev: device to examine and configure
652 * Examine the device configuration, after we have
653 * read the identify-device page and configured the
654 * data transfer mode. Set internal state related to
655 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
656 * and calculate the proper read/write commands to use.
661 static void ata_dev_set_protocol(struct ata_device *dev)
663 int pio = (dev->flags & ATA_DFLAG_PIO);
664 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
668 proto = dev->xfer_protocol = ATA_PROT_PIO;
670 proto = dev->xfer_protocol = ATA_PROT_DMA;
672 cmd = ata_prot_to_cmd(proto, lba48);
676 dev->read_cmd = cmd & 0xff;
677 dev->write_cmd = (cmd >> 8) & 0xff;
680 static const char * xfer_mode_str[] = {
700 * ata_udma_string - convert UDMA bit offset to string
701 * @mask: mask of bits supported; only highest bit counts.
703 * Determine string which represents the highest speed
704 * (highest bit in @udma_mask).
710 * Constant C string representing highest speed listed in
711 * @udma_mask, or the constant C string "<n/a>".
714 static const char *ata_mode_string(unsigned int mask)
718 for (i = 7; i >= 0; i--)
721 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
724 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
731 return xfer_mode_str[i];
735 * ata_pio_devchk - PATA device presence detection
736 * @ap: ATA channel to examine
737 * @device: Device to examine (starting at zero)
739 * This technique was originally described in
740 * Hale Landis's ATADRVR (www.ata-atapi.com), and
741 * later found its way into the ATA/ATAPI spec.
743 * Write a pattern to the ATA shadow registers,
744 * and if a device is present, it will respond by
745 * correctly storing and echoing back the
746 * ATA shadow register contents.
752 static unsigned int ata_pio_devchk(struct ata_port *ap,
755 struct ata_ioports *ioaddr = &ap->ioaddr;
758 ap->ops->dev_select(ap, device);
760 outb(0x55, ioaddr->nsect_addr);
761 outb(0xaa, ioaddr->lbal_addr);
763 outb(0xaa, ioaddr->nsect_addr);
764 outb(0x55, ioaddr->lbal_addr);
766 outb(0x55, ioaddr->nsect_addr);
767 outb(0xaa, ioaddr->lbal_addr);
769 nsect = inb(ioaddr->nsect_addr);
770 lbal = inb(ioaddr->lbal_addr);
772 if ((nsect == 0x55) && (lbal == 0xaa))
773 return 1; /* we found a device */
775 return 0; /* nothing found */
779 * ata_mmio_devchk - PATA device presence detection
780 * @ap: ATA channel to examine
781 * @device: Device to examine (starting at zero)
783 * This technique was originally described in
784 * Hale Landis's ATADRVR (www.ata-atapi.com), and
785 * later found its way into the ATA/ATAPI spec.
787 * Write a pattern to the ATA shadow registers,
788 * and if a device is present, it will respond by
789 * correctly storing and echoing back the
790 * ATA shadow register contents.
796 static unsigned int ata_mmio_devchk(struct ata_port *ap,
799 struct ata_ioports *ioaddr = &ap->ioaddr;
802 ap->ops->dev_select(ap, device);
804 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
805 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
807 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
808 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
810 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
811 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
813 nsect = readb((void __iomem *) ioaddr->nsect_addr);
814 lbal = readb((void __iomem *) ioaddr->lbal_addr);
816 if ((nsect == 0x55) && (lbal == 0xaa))
817 return 1; /* we found a device */
819 return 0; /* nothing found */
823 * ata_devchk - PATA device presence detection
824 * @ap: ATA channel to examine
825 * @device: Device to examine (starting at zero)
827 * Dispatch ATA device presence detection, depending
828 * on whether we are using PIO or MMIO to talk to the
829 * ATA shadow registers.
835 static unsigned int ata_devchk(struct ata_port *ap,
838 if (ap->flags & ATA_FLAG_MMIO)
839 return ata_mmio_devchk(ap, device);
840 return ata_pio_devchk(ap, device);
844 * ata_dev_classify - determine device type based on ATA-spec signature
845 * @tf: ATA taskfile register set for device to be identified
847 * Determine from taskfile register contents whether a device is
848 * ATA or ATAPI, as per "Signature and persistence" section
849 * of ATA/PI spec (volume 1, sect 5.14).
855 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
856 * the event of failure.
859 unsigned int ata_dev_classify(struct ata_taskfile *tf)
861 /* Apple's open source Darwin code hints that some devices only
862 * put a proper signature into the LBA mid/high registers,
863 * So, we only check those. It's sufficient for uniqueness.
866 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
867 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
868 DPRINTK("found ATA device by sig\n");
872 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
873 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
874 DPRINTK("found ATAPI device by sig\n");
875 return ATA_DEV_ATAPI;
878 DPRINTK("unknown device\n");
879 return ATA_DEV_UNKNOWN;
883 * ata_dev_try_classify - Parse returned ATA device signature
884 * @ap: ATA channel to examine
885 * @device: Device to examine (starting at zero)
887 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
888 * an ATA/ATAPI-defined set of values is placed in the ATA
889 * shadow registers, indicating the results of device detection
892 * Select the ATA device, and read the values from the ATA shadow
893 * registers. Then parse according to the Error register value,
894 * and the spec-defined values examined by ata_dev_classify().
900 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
902 struct ata_device *dev = &ap->device[device];
903 struct ata_taskfile tf;
907 ap->ops->dev_select(ap, device);
909 memset(&tf, 0, sizeof(tf));
911 err = ata_chk_err(ap);
912 ap->ops->tf_read(ap, &tf);
914 dev->class = ATA_DEV_NONE;
916 /* see if device passed diags */
919 else if ((device == 0) && (err == 0x81))
924 /* determine if device if ATA or ATAPI */
925 class = ata_dev_classify(&tf);
926 if (class == ATA_DEV_UNKNOWN)
928 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
937 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
938 * @id: IDENTIFY DEVICE results we will examine
939 * @s: string into which data is output
940 * @ofs: offset into identify device page
941 * @len: length of string to return. must be an even number.
943 * The strings in the IDENTIFY DEVICE page are broken up into
944 * 16-bit chunks. Run through the string, and output each
945 * 8-bit chunk linearly, regardless of platform.
951 void ata_dev_id_string(u16 *id, unsigned char *s,
952 unsigned int ofs, unsigned int len)
972 * ata_noop_dev_select - Select device 0/1 on ATA bus
973 * @ap: ATA channel to manipulate
974 * @device: ATA device (numbered from zero) to select
976 * This function performs no actual function.
978 * May be used as the dev_select() entry in ata_port_operations.
983 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
989 * ata_std_dev_select - Select device 0/1 on ATA bus
990 * @ap: ATA channel to manipulate
991 * @device: ATA device (numbered from zero) to select
993 * Use the method defined in the ATA specification to
994 * make either device 0, or device 1, active on the
995 * ATA channel. Works with both PIO and MMIO.
997 * May be used as the dev_select() entry in ata_port_operations.
1003 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1008 tmp = ATA_DEVICE_OBS;
1010 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1012 if (ap->flags & ATA_FLAG_MMIO) {
1013 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1015 outb(tmp, ap->ioaddr.device_addr);
1017 ata_pause(ap); /* needed; also flushes, for mmio */
1021 * ata_dev_select - Select device 0/1 on ATA bus
1022 * @ap: ATA channel to manipulate
1023 * @device: ATA device (numbered from zero) to select
1024 * @wait: non-zero to wait for Status register BSY bit to clear
1025 * @can_sleep: non-zero if context allows sleeping
1027 * Use the method defined in the ATA specification to
1028 * make either device 0, or device 1, active on the
1031 * This is a high-level version of ata_std_dev_select(),
1032 * which additionally provides the services of inserting
1033 * the proper pauses and status polling, where needed.
1039 void ata_dev_select(struct ata_port *ap, unsigned int device,
1040 unsigned int wait, unsigned int can_sleep)
1042 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1043 ap->id, device, wait);
1048 ap->ops->dev_select(ap, device);
1051 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1058 * ata_dump_id - IDENTIFY DEVICE info debugging output
1059 * @dev: Device whose IDENTIFY DEVICE page we will dump
1061 * Dump selected 16-bit words from a detected device's
1062 * IDENTIFY PAGE page.
1068 static inline void ata_dump_id(struct ata_device *dev)
1070 DPRINTK("49==0x%04x "
1080 DPRINTK("80==0x%04x "
1090 DPRINTK("88==0x%04x "
1097 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1098 * @ap: port on which device we wish to probe resides
1099 * @device: device bus address, starting at zero
1101 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1102 * command, and read back the 512-byte device information page.
1103 * The device information page is fed to us via the standard
1104 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1105 * using standard PIO-IN paths)
1107 * After reading the device information page, we use several
1108 * bits of information from it to initialize data structures
1109 * that will be used during the lifetime of the ata_device.
1110 * Other data from the info page is used to disqualify certain
1111 * older ATA devices we do not wish to support.
1114 * Inherited from caller. Some functions called by this function
1115 * obtain the host_set lock.
1118 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1120 struct ata_device *dev = &ap->device[device];
1121 unsigned int major_version;
1123 unsigned long xfer_modes;
1125 unsigned int using_edd;
1126 DECLARE_COMPLETION(wait);
1127 struct ata_queued_cmd *qc;
1128 unsigned long flags;
1131 if (!ata_dev_present(dev)) {
1132 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1137 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1142 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1144 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1145 dev->class == ATA_DEV_NONE);
1147 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1149 qc = ata_qc_new_init(ap, dev);
1152 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1153 qc->dma_dir = DMA_FROM_DEVICE;
1154 qc->tf.protocol = ATA_PROT_PIO;
1158 if (dev->class == ATA_DEV_ATA) {
1159 qc->tf.command = ATA_CMD_ID_ATA;
1160 DPRINTK("do ATA identify\n");
1162 qc->tf.command = ATA_CMD_ID_ATAPI;
1163 DPRINTK("do ATAPI identify\n");
1166 qc->waiting = &wait;
1167 qc->complete_fn = ata_qc_complete_noop;
1169 spin_lock_irqsave(&ap->host_set->lock, flags);
1170 rc = ata_qc_issue(qc);
1171 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1176 wait_for_completion(&wait);
1178 status = ata_chk_status(ap);
1179 if (status & ATA_ERR) {
1181 * arg! EDD works for all test cases, but seems to return
1182 * the ATA signature for some ATAPI devices. Until the
1183 * reason for this is found and fixed, we fix up the mess
1184 * here. If IDENTIFY DEVICE returns command aborted
1185 * (as ATAPI devices do), then we issue an
1186 * IDENTIFY PACKET DEVICE.
1188 * ATA software reset (SRST, the default) does not appear
1189 * to have this problem.
1191 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1192 u8 err = ata_chk_err(ap);
1193 if (err & ATA_ABORTED) {
1194 dev->class = ATA_DEV_ATAPI;
1205 swap_buf_le16(dev->id, ATA_ID_WORDS);
1207 /* print device capabilities */
1208 printk(KERN_DEBUG "ata%u: dev %u cfg "
1209 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1210 ap->id, device, dev->id[49],
1211 dev->id[82], dev->id[83], dev->id[84],
1212 dev->id[85], dev->id[86], dev->id[87],
1216 * common ATA, ATAPI feature tests
1219 /* we require DMA support (bits 8 of word 49) */
1220 if (!ata_id_has_dma(dev->id)) {
1221 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1225 /* quick-n-dirty find max transfer mode; for printk only */
1226 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1228 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1230 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1231 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1236 /* ATA-specific feature tests */
1237 if (dev->class == ATA_DEV_ATA) {
1238 if (!ata_id_is_ata(dev->id)) /* sanity check */
1241 /* get major version */
1242 tmp = dev->id[ATA_ID_MAJOR_VER];
1243 for (major_version = 14; major_version >= 1; major_version--)
1244 if (tmp & (1 << major_version))
1248 * The exact sequence expected by certain pre-ATA4 drives is:
1251 * INITIALIZE DEVICE PARAMETERS
1253 * Some drives were very specific about that exact sequence.
1255 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1256 ata_dev_init_params(ap, dev);
1258 if (ata_id_has_lba(dev->id)) {
1259 dev->flags |= ATA_DFLAG_LBA;
1261 if (ata_id_has_lba48(dev->id)) {
1262 dev->flags |= ATA_DFLAG_LBA48;
1263 dev->n_sectors = ata_id_u64(dev->id, 100);
1265 dev->n_sectors = ata_id_u32(dev->id, 60);
1268 /* print device info to dmesg */
1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1272 ata_mode_string(xfer_modes),
1273 (unsigned long long)dev->n_sectors,
1274 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1278 /* Default translation */
1279 dev->cylinders = dev->id[1];
1280 dev->heads = dev->id[3];
1281 dev->sectors = dev->id[6];
1282 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1284 if (ata_id_current_chs_valid(dev->id)) {
1285 /* Current CHS translation is valid. */
1286 dev->cylinders = dev->id[54];
1287 dev->heads = dev->id[55];
1288 dev->sectors = dev->id[56];
1290 dev->n_sectors = ata_id_u32(dev->id, 57);
1293 /* print device info to dmesg */
1294 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1297 ata_mode_string(xfer_modes),
1298 (unsigned long long)dev->n_sectors,
1299 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1303 ap->host->max_cmd_len = 16;
1306 /* ATAPI-specific feature tests */
1308 if (ata_id_is_ata(dev->id)) /* sanity check */
1311 rc = atapi_cdb_len(dev->id);
1312 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1313 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1316 ap->cdb_len = (unsigned int) rc;
1317 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1319 /* print device info to dmesg */
1320 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1322 ata_mode_string(xfer_modes));
1325 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1329 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1332 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1333 DPRINTK("EXIT, err\n");
1337 static inline u8 ata_dev_knobble(struct ata_port *ap)
1339 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1343 * ata_dev_config - Run device specific handlers and check for
1344 * SATA->PATA bridges
1351 void ata_dev_config(struct ata_port *ap, unsigned int i)
1353 /* limit bridge transfers to udma5, 200 sectors */
1354 if (ata_dev_knobble(ap)) {
1355 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1356 ap->id, ap->device->devno);
1357 ap->udma_mask &= ATA_UDMA5;
1358 ap->host->max_sectors = ATA_MAX_SECTORS;
1359 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1360 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1363 if (ap->ops->dev_config)
1364 ap->ops->dev_config(ap, &ap->device[i]);
1368 * ata_bus_probe - Reset and probe ATA bus
1371 * Master ATA bus probing function. Initiates a hardware-dependent
1372 * bus reset, then attempts to identify any devices found on
1376 * PCI/etc. bus probe sem.
1379 * Zero on success, non-zero on error.
1382 static int ata_bus_probe(struct ata_port *ap)
1384 unsigned int i, found = 0;
1386 ap->ops->phy_reset(ap);
1387 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 ata_dev_identify(ap, i);
1392 if (ata_dev_present(&ap->device[i])) {
1394 ata_dev_config(ap,i);
1398 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1399 goto err_out_disable;
1402 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1403 goto err_out_disable;
1408 ap->ops->port_disable(ap);
1414 * ata_port_probe - Mark port as enabled
1415 * @ap: Port for which we indicate enablement
1417 * Modify @ap data structure such that the system
1418 * thinks that the entire port is enabled.
1420 * LOCKING: host_set lock, or some other form of
1424 void ata_port_probe(struct ata_port *ap)
1426 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1430 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1431 * @ap: SATA port associated with target SATA PHY.
1433 * This function issues commands to standard SATA Sxxx
1434 * PHY registers, to wake up the phy (and device), and
1435 * clear any reset condition.
1438 * PCI/etc. bus probe sem.
1441 void __sata_phy_reset(struct ata_port *ap)
1444 unsigned long timeout = jiffies + (HZ * 5);
1446 if (ap->flags & ATA_FLAG_SATA_RESET) {
1447 /* issue phy wake/reset */
1448 scr_write_flush(ap, SCR_CONTROL, 0x301);
1449 udelay(400); /* FIXME: a guess */
1451 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1453 /* wait for phy to become ready, if necessary */
1456 sstatus = scr_read(ap, SCR_STATUS);
1457 if ((sstatus & 0xf) != 1)
1459 } while (time_before(jiffies, timeout));
1461 /* TODO: phy layer with polling, timeouts, etc. */
1462 if (sata_dev_present(ap))
1465 sstatus = scr_read(ap, SCR_STATUS);
1466 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1468 ata_port_disable(ap);
1471 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1474 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1475 ata_port_disable(ap);
1479 ap->cbl = ATA_CBL_SATA;
1483 * sata_phy_reset - Reset SATA bus.
1484 * @ap: SATA port associated with target SATA PHY.
1486 * This function resets the SATA bus, and then probes
1487 * the bus for devices.
1490 * PCI/etc. bus probe sem.
1493 void sata_phy_reset(struct ata_port *ap)
1495 __sata_phy_reset(ap);
1496 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1502 * ata_port_disable - Disable port.
1503 * @ap: Port to be disabled.
1505 * Modify @ap data structure such that the system
1506 * thinks that the entire port is disabled, and should
1507 * never attempt to probe or communicate with devices
1510 * LOCKING: host_set lock, or some other form of
1514 void ata_port_disable(struct ata_port *ap)
1516 ap->device[0].class = ATA_DEV_NONE;
1517 ap->device[1].class = ATA_DEV_NONE;
1518 ap->flags |= ATA_FLAG_PORT_DISABLED;
1524 } xfer_mode_classes[] = {
1525 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1526 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1527 { ATA_SHIFT_PIO, XFER_PIO_0 },
1530 static inline u8 base_from_shift(unsigned int shift)
1534 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1535 if (xfer_mode_classes[i].shift == shift)
1536 return xfer_mode_classes[i].base;
1541 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1546 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1549 if (dev->xfer_shift == ATA_SHIFT_PIO)
1550 dev->flags |= ATA_DFLAG_PIO;
1552 ata_dev_set_xfermode(ap, dev);
1554 base = base_from_shift(dev->xfer_shift);
1555 ofs = dev->xfer_mode - base;
1556 idx = ofs + dev->xfer_shift;
1557 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1559 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1560 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1562 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1563 ap->id, dev->devno, xfer_mode_str[idx]);
1566 static int ata_host_set_pio(struct ata_port *ap)
1572 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1575 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1579 base = base_from_shift(ATA_SHIFT_PIO);
1580 xfer_mode = base + x;
1582 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1583 (int)base, (int)xfer_mode, mask, x);
1585 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1586 struct ata_device *dev = &ap->device[i];
1587 if (ata_dev_present(dev)) {
1588 dev->pio_mode = xfer_mode;
1589 dev->xfer_mode = xfer_mode;
1590 dev->xfer_shift = ATA_SHIFT_PIO;
1591 if (ap->ops->set_piomode)
1592 ap->ops->set_piomode(ap, dev);
1599 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1600 unsigned int xfer_shift)
1604 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1605 struct ata_device *dev = &ap->device[i];
1606 if (ata_dev_present(dev)) {
1607 dev->dma_mode = xfer_mode;
1608 dev->xfer_mode = xfer_mode;
1609 dev->xfer_shift = xfer_shift;
1610 if (ap->ops->set_dmamode)
1611 ap->ops->set_dmamode(ap, dev);
1617 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1618 * @ap: port on which timings will be programmed
1620 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1623 * PCI/etc. bus probe sem.
1626 static void ata_set_mode(struct ata_port *ap)
1628 unsigned int i, xfer_shift;
1632 /* step 1: always set host PIO timings */
1633 rc = ata_host_set_pio(ap);
1637 /* step 2: choose the best data xfer mode */
1638 xfer_mode = xfer_shift = 0;
1639 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1643 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1644 if (xfer_shift != ATA_SHIFT_PIO)
1645 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1647 /* step 4: update devices' xfer mode */
1648 ata_dev_set_mode(ap, &ap->device[0]);
1649 ata_dev_set_mode(ap, &ap->device[1]);
1651 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1654 if (ap->ops->post_set_mode)
1655 ap->ops->post_set_mode(ap);
1657 for (i = 0; i < 2; i++) {
1658 struct ata_device *dev = &ap->device[i];
1659 ata_dev_set_protocol(dev);
1665 ata_port_disable(ap);
1669 * ata_busy_sleep - sleep until BSY clears, or timeout
1670 * @ap: port containing status register to be polled
1671 * @tmout_pat: impatience timeout
1672 * @tmout: overall timeout
1674 * Sleep until ATA Status register bit BSY clears,
1675 * or a timeout occurs.
1681 static unsigned int ata_busy_sleep (struct ata_port *ap,
1682 unsigned long tmout_pat,
1683 unsigned long tmout)
1685 unsigned long timer_start, timeout;
1688 status = ata_busy_wait(ap, ATA_BUSY, 300);
1689 timer_start = jiffies;
1690 timeout = timer_start + tmout_pat;
1691 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1693 status = ata_busy_wait(ap, ATA_BUSY, 3);
1696 if (status & ATA_BUSY)
1697 printk(KERN_WARNING "ata%u is slow to respond, "
1698 "please be patient\n", ap->id);
1700 timeout = timer_start + tmout;
1701 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1703 status = ata_chk_status(ap);
1706 if (status & ATA_BUSY) {
1707 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1708 ap->id, tmout / HZ);
1715 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1717 struct ata_ioports *ioaddr = &ap->ioaddr;
1718 unsigned int dev0 = devmask & (1 << 0);
1719 unsigned int dev1 = devmask & (1 << 1);
1720 unsigned long timeout;
1722 /* if device 0 was found in ata_devchk, wait for its
1726 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1728 /* if device 1 was found in ata_devchk, wait for
1729 * register access, then wait for BSY to clear
1731 timeout = jiffies + ATA_TMOUT_BOOT;
1735 ap->ops->dev_select(ap, 1);
1736 if (ap->flags & ATA_FLAG_MMIO) {
1737 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1738 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1740 nsect = inb(ioaddr->nsect_addr);
1741 lbal = inb(ioaddr->lbal_addr);
1743 if ((nsect == 1) && (lbal == 1))
1745 if (time_after(jiffies, timeout)) {
1749 msleep(50); /* give drive a breather */
1752 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1754 /* is all this really necessary? */
1755 ap->ops->dev_select(ap, 0);
1757 ap->ops->dev_select(ap, 1);
1759 ap->ops->dev_select(ap, 0);
1763 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1764 * @ap: Port to reset and probe
1766 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1767 * probe the bus. Not often used these days.
1770 * PCI/etc. bus probe sem.
1774 static unsigned int ata_bus_edd(struct ata_port *ap)
1776 struct ata_taskfile tf;
1778 /* set up execute-device-diag (bus reset) taskfile */
1779 /* also, take interrupts to a known state (disabled) */
1780 DPRINTK("execute-device-diag\n");
1781 ata_tf_init(ap, &tf, 0);
1783 tf.command = ATA_CMD_EDD;
1784 tf.protocol = ATA_PROT_NODATA;
1787 ata_tf_to_host(ap, &tf);
1789 /* spec says at least 2ms. but who knows with those
1790 * crazy ATAPI devices...
1794 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1797 static unsigned int ata_bus_softreset(struct ata_port *ap,
1798 unsigned int devmask)
1800 struct ata_ioports *ioaddr = &ap->ioaddr;
1802 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1804 /* software reset. causes dev0 to be selected */
1805 if (ap->flags & ATA_FLAG_MMIO) {
1806 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1807 udelay(20); /* FIXME: flush */
1808 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1809 udelay(20); /* FIXME: flush */
1810 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1812 outb(ap->ctl, ioaddr->ctl_addr);
1814 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1816 outb(ap->ctl, ioaddr->ctl_addr);
1819 /* spec mandates ">= 2ms" before checking status.
1820 * We wait 150ms, because that was the magic delay used for
1821 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1822 * between when the ATA command register is written, and then
1823 * status is checked. Because waiting for "a while" before
1824 * checking status is fine, post SRST, we perform this magic
1825 * delay here as well.
1829 ata_bus_post_reset(ap, devmask);
1835 * ata_bus_reset - reset host port and associated ATA channel
1836 * @ap: port to reset
1838 * This is typically the first time we actually start issuing
1839 * commands to the ATA channel. We wait for BSY to clear, then
1840 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1841 * result. Determine what devices, if any, are on the channel
1842 * by looking at the device 0/1 error register. Look at the signature
1843 * stored in each device's taskfile registers, to determine if
1844 * the device is ATA or ATAPI.
1847 * PCI/etc. bus probe sem.
1848 * Obtains host_set lock.
1851 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1854 void ata_bus_reset(struct ata_port *ap)
1856 struct ata_ioports *ioaddr = &ap->ioaddr;
1857 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1859 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1861 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1863 /* determine if device 0/1 are present */
1864 if (ap->flags & ATA_FLAG_SATA_RESET)
1867 dev0 = ata_devchk(ap, 0);
1869 dev1 = ata_devchk(ap, 1);
1873 devmask |= (1 << 0);
1875 devmask |= (1 << 1);
1877 /* select device 0 again */
1878 ap->ops->dev_select(ap, 0);
1880 /* issue bus reset */
1881 if (ap->flags & ATA_FLAG_SRST)
1882 rc = ata_bus_softreset(ap, devmask);
1883 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1884 /* set up device control */
1885 if (ap->flags & ATA_FLAG_MMIO)
1886 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1888 outb(ap->ctl, ioaddr->ctl_addr);
1889 rc = ata_bus_edd(ap);
1896 * determine by signature whether we have ATA or ATAPI devices
1898 err = ata_dev_try_classify(ap, 0);
1899 if ((slave_possible) && (err != 0x81))
1900 ata_dev_try_classify(ap, 1);
1902 /* re-enable interrupts */
1903 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1906 /* is double-select really necessary? */
1907 if (ap->device[1].class != ATA_DEV_NONE)
1908 ap->ops->dev_select(ap, 1);
1909 if (ap->device[0].class != ATA_DEV_NONE)
1910 ap->ops->dev_select(ap, 0);
1912 /* if no devices were detected, disable this port */
1913 if ((ap->device[0].class == ATA_DEV_NONE) &&
1914 (ap->device[1].class == ATA_DEV_NONE))
1917 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1918 /* set up device control for ATA_FLAG_SATA_RESET */
1919 if (ap->flags & ATA_FLAG_MMIO)
1920 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1922 outb(ap->ctl, ioaddr->ctl_addr);
1929 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1930 ap->ops->port_disable(ap);
1935 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1937 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1938 ap->id, dev->devno);
1941 static const char * ata_dma_blacklist [] = {
1960 "Toshiba CD-ROM XM-6202B",
1962 "E-IDE CD-ROM CR-840",
1965 "SAMSUNG CD-ROM SC-148C",
1966 "SAMSUNG CD-ROM SC",
1968 "SAMSUNG CD-ROM SN-124",
1969 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1973 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1975 unsigned char model_num[40];
1980 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1983 len = strnlen(s, sizeof(model_num));
1985 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1986 while ((len > 0) && (s[len - 1] == ' ')) {
1991 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1992 if (!strncmp(ata_dma_blacklist[i], s, len))
1998 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
2000 struct ata_device *master, *slave;
2003 master = &ap->device[0];
2004 slave = &ap->device[1];
2006 assert (ata_dev_present(master) || ata_dev_present(slave));
2008 if (shift == ATA_SHIFT_UDMA) {
2009 mask = ap->udma_mask;
2010 if (ata_dev_present(master)) {
2011 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2012 if (ata_dma_blacklisted(ap, master)) {
2014 ata_pr_blacklisted(ap, master);
2017 if (ata_dev_present(slave)) {
2018 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2019 if (ata_dma_blacklisted(ap, slave)) {
2021 ata_pr_blacklisted(ap, slave);
2025 else if (shift == ATA_SHIFT_MWDMA) {
2026 mask = ap->mwdma_mask;
2027 if (ata_dev_present(master)) {
2028 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2029 if (ata_dma_blacklisted(ap, master)) {
2031 ata_pr_blacklisted(ap, master);
2034 if (ata_dev_present(slave)) {
2035 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2036 if (ata_dma_blacklisted(ap, slave)) {
2038 ata_pr_blacklisted(ap, slave);
2042 else if (shift == ATA_SHIFT_PIO) {
2043 mask = ap->pio_mask;
2044 if (ata_dev_present(master)) {
2045 /* spec doesn't return explicit support for
2046 * PIO0-2, so we fake it
2048 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2053 if (ata_dev_present(slave)) {
2054 /* spec doesn't return explicit support for
2055 * PIO0-2, so we fake it
2057 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2064 mask = 0xffffffff; /* shut up compiler warning */
2071 /* find greatest bit */
2072 static int fgb(u32 bitmap)
2077 for (i = 0; i < 32; i++)
2078 if (bitmap & (1 << i))
2085 * ata_choose_xfer_mode - attempt to find best transfer mode
2086 * @ap: Port for which an xfer mode will be selected
2087 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2088 * @xfer_shift_out: (output) bit shift that selects this mode
2090 * Based on host and device capabilities, determine the
2091 * maximum transfer mode that is amenable to all.
2094 * PCI/etc. bus probe sem.
2097 * Zero on success, negative on error.
2100 static int ata_choose_xfer_mode(struct ata_port *ap,
2102 unsigned int *xfer_shift_out)
2104 unsigned int mask, shift;
2107 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2108 shift = xfer_mode_classes[i].shift;
2109 mask = ata_get_mode_mask(ap, shift);
2113 *xfer_mode_out = xfer_mode_classes[i].base + x;
2114 *xfer_shift_out = shift;
2123 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2124 * @ap: Port associated with device @dev
2125 * @dev: Device to which command will be sent
2127 * Issue SET FEATURES - XFER MODE command to device @dev
2131 * PCI/etc. bus probe sem.
2134 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2136 DECLARE_COMPLETION(wait);
2137 struct ata_queued_cmd *qc;
2139 unsigned long flags;
2141 /* set up set-features taskfile */
2142 DPRINTK("set features - xfer mode\n");
2144 qc = ata_qc_new_init(ap, dev);
2147 qc->tf.command = ATA_CMD_SET_FEATURES;
2148 qc->tf.feature = SETFEATURES_XFER;
2149 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2150 qc->tf.protocol = ATA_PROT_NODATA;
2151 qc->tf.nsect = dev->xfer_mode;
2153 qc->waiting = &wait;
2154 qc->complete_fn = ata_qc_complete_noop;
2156 spin_lock_irqsave(&ap->host_set->lock, flags);
2157 rc = ata_qc_issue(qc);
2158 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2161 ata_port_disable(ap);
2163 wait_for_completion(&wait);
2169 * ata_dev_init_params - Issue INIT DEV PARAMS command
2170 * @ap: Port associated with device @dev
2171 * @dev: Device to which command will be sent
2176 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2178 DECLARE_COMPLETION(wait);
2179 struct ata_queued_cmd *qc;
2181 unsigned long flags;
2182 u16 sectors = dev->id[6];
2183 u16 heads = dev->id[3];
2185 /* Number of sectors per track 1-255. Number of heads 1-16 */
2186 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2189 /* set up init dev params taskfile */
2190 DPRINTK("init dev params \n");
2192 qc = ata_qc_new_init(ap, dev);
2195 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2196 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2197 qc->tf.protocol = ATA_PROT_NODATA;
2198 qc->tf.nsect = sectors;
2199 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2201 qc->waiting = &wait;
2202 qc->complete_fn = ata_qc_complete_noop;
2204 spin_lock_irqsave(&ap->host_set->lock, flags);
2205 rc = ata_qc_issue(qc);
2206 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2209 ata_port_disable(ap);
2211 wait_for_completion(&wait);
2217 * ata_sg_clean - Unmap DMA memory associated with command
2218 * @qc: Command containing DMA memory to be released
2220 * Unmap all mapped DMA memory associated with this command.
2223 * spin_lock_irqsave(host_set lock)
2226 static void ata_sg_clean(struct ata_queued_cmd *qc)
2228 struct ata_port *ap = qc->ap;
2229 struct scatterlist *sg = qc->sg;
2230 int dir = qc->dma_dir;
2232 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2235 if (qc->flags & ATA_QCFLAG_SINGLE)
2236 assert(qc->n_elem == 1);
2238 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2240 if (qc->flags & ATA_QCFLAG_SG)
2241 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2243 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2244 sg_dma_len(&sg[0]), dir);
2246 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2251 * ata_fill_sg - Fill PCI IDE PRD table
2252 * @qc: Metadata associated with taskfile to be transferred
2254 * Fill PCI IDE PRD (scatter-gather) table with segments
2255 * associated with the current disk command.
2258 * spin_lock_irqsave(host_set lock)
2261 static void ata_fill_sg(struct ata_queued_cmd *qc)
2263 struct scatterlist *sg = qc->sg;
2264 struct ata_port *ap = qc->ap;
2265 unsigned int idx, nelem;
2268 assert(qc->n_elem > 0);
2271 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2275 /* determine if physical DMA addr spans 64K boundary.
2276 * Note h/w doesn't support 64-bit, so we unconditionally
2277 * truncate dma_addr_t to u32.
2279 addr = (u32) sg_dma_address(sg);
2280 sg_len = sg_dma_len(sg);
2283 offset = addr & 0xffff;
2285 if ((offset + sg_len) > 0x10000)
2286 len = 0x10000 - offset;
2288 ap->prd[idx].addr = cpu_to_le32(addr);
2289 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2290 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2299 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2302 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2303 * @qc: Metadata associated with taskfile to check
2305 * Allow low-level driver to filter ATA PACKET commands, returning
2306 * a status indicating whether or not it is OK to use DMA for the
2307 * supplied PACKET command.
2310 * spin_lock_irqsave(host_set lock)
2312 * RETURNS: 0 when ATAPI DMA can be used
2315 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2317 struct ata_port *ap = qc->ap;
2318 int rc = 0; /* Assume ATAPI DMA is OK by default */
2320 if (ap->ops->check_atapi_dma)
2321 rc = ap->ops->check_atapi_dma(qc);
2326 * ata_qc_prep - Prepare taskfile for submission
2327 * @qc: Metadata associated with taskfile to be prepared
2329 * Prepare ATA taskfile for submission.
2332 * spin_lock_irqsave(host_set lock)
2334 void ata_qc_prep(struct ata_queued_cmd *qc)
2336 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2343 * ata_sg_init_one - Associate command with memory buffer
2344 * @qc: Command to be associated
2345 * @buf: Memory buffer
2346 * @buflen: Length of memory buffer, in bytes.
2348 * Initialize the data-related elements of queued_cmd @qc
2349 * to point to a single memory buffer, @buf of byte length @buflen.
2352 * spin_lock_irqsave(host_set lock)
2358 * ata_sg_init_one - Prepare a one-entry scatter-gather list.
2359 * @qc: Queued command
2360 * @buf: transfer buffer
2361 * @buflen: length of buf
2363 * Builds a single-entry scatter-gather list to initiate a
2364 * transfer utilizing the specified buffer.
2368 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2370 struct scatterlist *sg;
2372 qc->flags |= ATA_QCFLAG_SINGLE;
2374 memset(&qc->sgent, 0, sizeof(qc->sgent));
2375 qc->sg = &qc->sgent;
2380 sg->page = virt_to_page(buf);
2381 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2382 sg->length = buflen;
2386 * ata_sg_init - Associate command with scatter-gather table.
2387 * @qc: Command to be associated
2388 * @sg: Scatter-gather table.
2389 * @n_elem: Number of elements in s/g table.
2391 * Initialize the data-related elements of queued_cmd @qc
2392 * to point to a scatter-gather table @sg, containing @n_elem
2396 * spin_lock_irqsave(host_set lock)
2401 * ata_sg_init - Assign a scatter gather list to a queued command
2402 * @qc: Queued command
2403 * @sg: Scatter-gather list
2404 * @n_elem: length of sg list
2406 * Attaches a scatter-gather list to a queued command.
2411 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2412 unsigned int n_elem)
2414 qc->flags |= ATA_QCFLAG_SG;
2416 qc->n_elem = n_elem;
2420 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2421 * @qc: Command with memory buffer to be mapped.
2423 * DMA-map the memory buffer associated with queued_cmd @qc.
2426 * spin_lock_irqsave(host_set lock)
2429 * Zero on success, negative on error.
2432 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2434 struct ata_port *ap = qc->ap;
2435 int dir = qc->dma_dir;
2436 struct scatterlist *sg = qc->sg;
2437 dma_addr_t dma_address;
2439 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2441 if (dma_mapping_error(dma_address))
2444 sg_dma_address(sg) = dma_address;
2445 sg_dma_len(sg) = sg->length;
2447 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2448 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2454 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2455 * @qc: Command with scatter-gather table to be mapped.
2457 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2460 * spin_lock_irqsave(host_set lock)
2463 * Zero on success, negative on error.
2467 static int ata_sg_setup(struct ata_queued_cmd *qc)
2469 struct ata_port *ap = qc->ap;
2470 struct scatterlist *sg = qc->sg;
2473 VPRINTK("ENTER, ata%u\n", ap->id);
2474 assert(qc->flags & ATA_QCFLAG_SG);
2477 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2481 DPRINTK("%d sg elements mapped\n", n_elem);
2483 qc->n_elem = n_elem;
2493 * None. (executing in kernel thread context)
2499 static unsigned long ata_pio_poll(struct ata_port *ap)
2502 unsigned int poll_state = PIO_ST_UNKNOWN;
2503 unsigned int reg_state = PIO_ST_UNKNOWN;
2504 const unsigned int tmout_state = PIO_ST_TMOUT;
2506 switch (ap->pio_task_state) {
2509 poll_state = PIO_ST_POLL;
2513 case PIO_ST_LAST_POLL:
2514 poll_state = PIO_ST_LAST_POLL;
2515 reg_state = PIO_ST_LAST;
2522 status = ata_chk_status(ap);
2523 if (status & ATA_BUSY) {
2524 if (time_after(jiffies, ap->pio_task_timeout)) {
2525 ap->pio_task_state = tmout_state;
2528 ap->pio_task_state = poll_state;
2529 return ATA_SHORT_PAUSE;
2532 ap->pio_task_state = reg_state;
2537 * ata_pio_complete -
2541 * None. (executing in kernel thread context)
2544 static void ata_pio_complete (struct ata_port *ap)
2546 struct ata_queued_cmd *qc;
2550 * This is purely hueristic. This is a fast path.
2551 * Sometimes when we enter, BSY will be cleared in
2552 * a chk-status or two. If not, the drive is probably seeking
2553 * or something. Snooze for a couple msecs, then
2554 * chk-status again. If still busy, fall back to
2555 * PIO_ST_POLL state.
2557 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2558 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2560 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2561 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2562 ap->pio_task_state = PIO_ST_LAST_POLL;
2563 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2568 drv_stat = ata_wait_idle(ap);
2569 if (!ata_ok(drv_stat)) {
2570 ap->pio_task_state = PIO_ST_ERR;
2574 qc = ata_qc_from_tag(ap, ap->active_tag);
2577 ap->pio_task_state = PIO_ST_IDLE;
2581 ata_qc_complete(qc, drv_stat);
2587 * @buf: Buffer to swap
2588 * @buf_words: Number of 16-bit words in buffer.
2590 * Swap halves of 16-bit words if needed to convert from
2591 * little-endian byte order to native cpu byte order, or
2596 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2601 for (i = 0; i < buf_words; i++)
2602 buf[i] = le16_to_cpu(buf[i]);
2603 #endif /* __BIG_ENDIAN */
2606 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2607 unsigned int buflen, int write_data)
2610 unsigned int words = buflen >> 1;
2611 u16 *buf16 = (u16 *) buf;
2612 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2615 for (i = 0; i < words; i++)
2616 writew(le16_to_cpu(buf16[i]), mmio);
2618 for (i = 0; i < words; i++)
2619 buf16[i] = cpu_to_le16(readw(mmio));
2623 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2624 unsigned int buflen, int write_data)
2626 unsigned int dwords = buflen >> 1;
2629 outsw(ap->ioaddr.data_addr, buf, dwords);
2631 insw(ap->ioaddr.data_addr, buf, dwords);
2634 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2635 unsigned int buflen, int do_write)
2637 if (ap->flags & ATA_FLAG_MMIO)
2638 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2640 ata_pio_data_xfer(ap, buf, buflen, do_write);
2643 static void ata_pio_sector(struct ata_queued_cmd *qc)
2645 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2646 struct scatterlist *sg = qc->sg;
2647 struct ata_port *ap = qc->ap;
2649 unsigned int offset;
2652 if (qc->cursect == (qc->nsect - 1))
2653 ap->pio_task_state = PIO_ST_LAST;
2655 page = sg[qc->cursg].page;
2656 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2658 /* get the current page and offset */
2659 page = nth_page(page, (offset >> PAGE_SHIFT));
2660 offset %= PAGE_SIZE;
2662 buf = kmap(page) + offset;
2667 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2672 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2674 /* do the actual data transfer */
2675 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2676 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2681 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2683 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2684 struct scatterlist *sg = qc->sg;
2685 struct ata_port *ap = qc->ap;
2688 unsigned int offset, count;
2690 if (qc->curbytes == qc->nbytes - bytes)
2691 ap->pio_task_state = PIO_ST_LAST;
2694 sg = &qc->sg[qc->cursg];
2697 offset = sg->offset + qc->cursg_ofs;
2699 /* get the current page and offset */
2700 page = nth_page(page, (offset >> PAGE_SHIFT));
2701 offset %= PAGE_SIZE;
2703 /* don't overrun current sg */
2704 count = min(sg->length - qc->cursg_ofs, bytes);
2706 /* don't cross page boundaries */
2707 count = min(count, (unsigned int)PAGE_SIZE - offset);
2709 buf = kmap(page) + offset;
2712 qc->curbytes += count;
2713 qc->cursg_ofs += count;
2715 if (qc->cursg_ofs == sg->length) {
2720 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2722 /* do the actual data transfer */
2723 ata_data_xfer(ap, buf, count, do_write);
2732 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2734 struct ata_port *ap = qc->ap;
2735 struct ata_device *dev = qc->dev;
2736 unsigned int ireason, bc_lo, bc_hi, bytes;
2737 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2739 ap->ops->tf_read(ap, &qc->tf);
2740 ireason = qc->tf.nsect;
2741 bc_lo = qc->tf.lbam;
2742 bc_hi = qc->tf.lbah;
2743 bytes = (bc_hi << 8) | bc_lo;
2745 /* shall be cleared to zero, indicating xfer of data */
2746 if (ireason & (1 << 0))
2749 /* make sure transfer direction matches expected */
2750 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2751 if (do_write != i_write)
2754 __atapi_pio_bytes(qc, bytes);
2759 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2760 ap->id, dev->devno);
2761 ap->pio_task_state = PIO_ST_ERR;
2769 * None. (executing in kernel thread context)
2772 static void ata_pio_block(struct ata_port *ap)
2774 struct ata_queued_cmd *qc;
2778 * This is purely hueristic. This is a fast path.
2779 * Sometimes when we enter, BSY will be cleared in
2780 * a chk-status or two. If not, the drive is probably seeking
2781 * or something. Snooze for a couple msecs, then
2782 * chk-status again. If still busy, fall back to
2783 * PIO_ST_POLL state.
2785 status = ata_busy_wait(ap, ATA_BUSY, 5);
2786 if (status & ATA_BUSY) {
2788 status = ata_busy_wait(ap, ATA_BUSY, 10);
2789 if (status & ATA_BUSY) {
2790 ap->pio_task_state = PIO_ST_POLL;
2791 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2796 qc = ata_qc_from_tag(ap, ap->active_tag);
2799 if (is_atapi_taskfile(&qc->tf)) {
2800 /* no more data to transfer or unsupported ATAPI command */
2801 if ((status & ATA_DRQ) == 0) {
2802 ap->pio_task_state = PIO_ST_IDLE;
2806 ata_qc_complete(qc, status);
2810 atapi_pio_bytes(qc);
2812 /* handle BSY=0, DRQ=0 as error */
2813 if ((status & ATA_DRQ) == 0) {
2814 ap->pio_task_state = PIO_ST_ERR;
2822 static void ata_pio_error(struct ata_port *ap)
2824 struct ata_queued_cmd *qc;
2827 qc = ata_qc_from_tag(ap, ap->active_tag);
2830 drv_stat = ata_chk_status(ap);
2831 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2834 ap->pio_task_state = PIO_ST_IDLE;
2838 ata_qc_complete(qc, drv_stat | ATA_ERR);
2841 static void ata_pio_task(void *_data)
2843 struct ata_port *ap = _data;
2844 unsigned long timeout = 0;
2846 switch (ap->pio_task_state) {
2855 ata_pio_complete(ap);
2859 case PIO_ST_LAST_POLL:
2860 timeout = ata_pio_poll(ap);
2870 queue_delayed_work(ata_wq, &ap->pio_task,
2873 queue_work(ata_wq, &ap->pio_task);
2876 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2877 struct scsi_cmnd *cmd)
2879 DECLARE_COMPLETION(wait);
2880 struct ata_queued_cmd *qc;
2881 unsigned long flags;
2884 DPRINTK("ATAPI request sense\n");
2886 qc = ata_qc_new_init(ap, dev);
2889 /* FIXME: is this needed? */
2890 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2892 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2893 qc->dma_dir = DMA_FROM_DEVICE;
2895 memset(&qc->cdb, 0, ap->cdb_len);
2896 qc->cdb[0] = REQUEST_SENSE;
2897 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2899 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2900 qc->tf.command = ATA_CMD_PACKET;
2902 qc->tf.protocol = ATA_PROT_ATAPI;
2903 qc->tf.lbam = (8 * 1024) & 0xff;
2904 qc->tf.lbah = (8 * 1024) >> 8;
2905 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2907 qc->waiting = &wait;
2908 qc->complete_fn = ata_qc_complete_noop;
2910 spin_lock_irqsave(&ap->host_set->lock, flags);
2911 rc = ata_qc_issue(qc);
2912 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2915 ata_port_disable(ap);
2917 wait_for_completion(&wait);
2923 * ata_qc_timeout - Handle timeout of queued command
2924 * @qc: Command that timed out
2926 * Some part of the kernel (currently, only the SCSI layer)
2927 * has noticed that the active command on port @ap has not
2928 * completed after a specified length of time. Handle this
2929 * condition by disabling DMA (if necessary) and completing
2930 * transactions, with error if necessary.
2932 * This also handles the case of the "lost interrupt", where
2933 * for some reason (possibly hardware bug, possibly driver bug)
2934 * an interrupt was not delivered to the driver, even though the
2935 * transaction completed successfully.
2938 * Inherited from SCSI layer (none, can sleep)
2941 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2943 struct ata_port *ap = qc->ap;
2944 struct ata_device *dev = qc->dev;
2945 u8 host_stat = 0, drv_stat;
2949 /* FIXME: doesn't this conflict with timeout handling? */
2950 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2951 struct scsi_cmnd *cmd = qc->scsicmd;
2953 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2955 /* finish completing original command */
2956 __ata_qc_complete(qc);
2958 atapi_request_sense(ap, dev, cmd);
2960 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2961 scsi_finish_command(cmd);
2967 /* hack alert! We cannot use the supplied completion
2968 * function from inside the ->eh_strategy_handler() thread.
2969 * libata is the only user of ->eh_strategy_handler() in
2970 * any kernel, so the default scsi_done() assumes it is
2971 * not being called from the SCSI EH.
2973 qc->scsidone = scsi_finish_command;
2975 switch (qc->tf.protocol) {
2978 case ATA_PROT_ATAPI_DMA:
2979 host_stat = ap->ops->bmdma_status(ap);
2981 /* before we do anything else, clear DMA-Start bit */
2982 ap->ops->bmdma_stop(ap);
2988 drv_stat = ata_chk_status(ap);
2990 /* ack bmdma irq events */
2991 ap->ops->irq_clear(ap);
2993 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2994 ap->id, qc->tf.command, drv_stat, host_stat);
2996 /* complete taskfile transaction */
2997 ata_qc_complete(qc, drv_stat);
3005 * ata_eng_timeout - Handle timeout of queued command
3006 * @ap: Port on which timed-out command is active
3008 * Some part of the kernel (currently, only the SCSI layer)
3009 * has noticed that the active command on port @ap has not
3010 * completed after a specified length of time. Handle this
3011 * condition by disabling DMA (if necessary) and completing
3012 * transactions, with error if necessary.
3014 * This also handles the case of the "lost interrupt", where
3015 * for some reason (possibly hardware bug, possibly driver bug)
3016 * an interrupt was not delivered to the driver, even though the
3017 * transaction completed successfully.
3020 * Inherited from SCSI layer (none, can sleep)
3023 void ata_eng_timeout(struct ata_port *ap)
3025 struct ata_queued_cmd *qc;
3029 qc = ata_qc_from_tag(ap, ap->active_tag);
3031 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3043 * ata_qc_new - Request an available ATA command, for queueing
3044 * @ap: Port associated with device @dev
3045 * @dev: Device from whom we request an available command structure
3051 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3053 struct ata_queued_cmd *qc = NULL;
3056 for (i = 0; i < ATA_MAX_QUEUE; i++)
3057 if (!test_and_set_bit(i, &ap->qactive)) {
3058 qc = ata_qc_from_tag(ap, i);
3069 * ata_qc_new_init - Request an available ATA command, and initialize it
3070 * @ap: Port associated with device @dev
3071 * @dev: Device from whom we request an available command structure
3077 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3078 struct ata_device *dev)
3080 struct ata_queued_cmd *qc;
3082 qc = ata_qc_new(ap);
3089 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
3091 qc->nbytes = qc->curbytes = 0;
3093 ata_tf_init(ap, &qc->tf, dev->devno);
3095 if (dev->flags & ATA_DFLAG_LBA) {
3096 qc->tf.flags |= ATA_TFLAG_LBA;
3098 if (dev->flags & ATA_DFLAG_LBA48)
3099 qc->tf.flags |= ATA_TFLAG_LBA48;
3106 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3111 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3113 struct ata_port *ap = qc->ap;
3114 unsigned int tag, do_clear = 0;
3118 if (likely(ata_tag_valid(tag))) {
3119 if (tag == ap->active_tag)
3120 ap->active_tag = ATA_TAG_POISON;
3121 qc->tag = ATA_TAG_POISON;
3126 struct completion *waiting = qc->waiting;
3131 if (likely(do_clear))
3132 clear_bit(tag, &ap->qactive);
3136 * ata_qc_free - free unused ata_queued_cmd
3137 * @qc: Command to complete
3139 * Designed to free unused ata_queued_cmd object
3140 * in case something prevents using it.
3143 * spin_lock_irqsave(host_set lock)
3146 void ata_qc_free(struct ata_queued_cmd *qc)
3148 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3149 assert(qc->waiting == NULL); /* nothing should be waiting */
3151 __ata_qc_complete(qc);
3155 * ata_qc_complete - Complete an active ATA command
3156 * @qc: Command to complete
3157 * @drv_stat: ATA Status register contents
3159 * Indicate to the mid and upper layers that an ATA
3160 * command has completed, with either an ok or not-ok status.
3163 * spin_lock_irqsave(host_set lock)
3167 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3171 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3172 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3174 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3177 /* call completion callback */
3178 rc = qc->complete_fn(qc, drv_stat);
3179 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3181 /* if callback indicates not to complete command (non-zero),
3182 * return immediately
3187 __ata_qc_complete(qc);
3192 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3194 struct ata_port *ap = qc->ap;
3196 switch (qc->tf.protocol) {
3198 case ATA_PROT_ATAPI_DMA:
3201 case ATA_PROT_ATAPI:
3203 case ATA_PROT_PIO_MULT:
3204 if (ap->flags & ATA_FLAG_PIO_DMA)
3217 * ata_qc_issue - issue taskfile to device
3218 * @qc: command to issue to device
3220 * Prepare an ATA command to submission to device.
3221 * This includes mapping the data into a DMA-able
3222 * area, filling in the S/G table, and finally
3223 * writing the taskfile to hardware, starting the command.
3226 * spin_lock_irqsave(host_set lock)
3229 * Zero on success, negative on error.
3232 int ata_qc_issue(struct ata_queued_cmd *qc)
3234 struct ata_port *ap = qc->ap;
3236 if (ata_should_dma_map(qc)) {
3237 if (qc->flags & ATA_QCFLAG_SG) {
3238 if (ata_sg_setup(qc))
3240 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3241 if (ata_sg_setup_one(qc))
3245 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3248 ap->ops->qc_prep(qc);
3250 qc->ap->active_tag = qc->tag;
3251 qc->flags |= ATA_QCFLAG_ACTIVE;
3253 return ap->ops->qc_issue(qc);
3261 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3262 * @qc: command to issue to device
3264 * Using various libata functions and hooks, this function
3265 * starts an ATA command. ATA commands are grouped into
3266 * classes called "protocols", and issuing each type of protocol
3267 * is slightly different.
3269 * May be used as the qc_issue() entry in ata_port_operations.
3272 * spin_lock_irqsave(host_set lock)
3275 * Zero on success, negative on error.
3278 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3280 struct ata_port *ap = qc->ap;
3282 ata_dev_select(ap, qc->dev->devno, 1, 0);
3284 switch (qc->tf.protocol) {
3285 case ATA_PROT_NODATA:
3286 ata_tf_to_host_nolock(ap, &qc->tf);
3290 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3291 ap->ops->bmdma_setup(qc); /* set up bmdma */
3292 ap->ops->bmdma_start(qc); /* initiate bmdma */
3295 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3296 ata_qc_set_polling(qc);
3297 ata_tf_to_host_nolock(ap, &qc->tf);
3298 ap->pio_task_state = PIO_ST;
3299 queue_work(ata_wq, &ap->pio_task);
3302 case ATA_PROT_ATAPI:
3303 ata_qc_set_polling(qc);
3304 ata_tf_to_host_nolock(ap, &qc->tf);
3305 queue_work(ata_wq, &ap->packet_task);
3308 case ATA_PROT_ATAPI_NODATA:
3309 ata_tf_to_host_nolock(ap, &qc->tf);
3310 queue_work(ata_wq, &ap->packet_task);
3313 case ATA_PROT_ATAPI_DMA:
3314 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3315 ap->ops->bmdma_setup(qc); /* set up bmdma */
3316 queue_work(ata_wq, &ap->packet_task);
3328 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3329 * @qc: Info associated with this ATA transaction.
3332 * spin_lock_irqsave(host_set lock)
3335 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3337 struct ata_port *ap = qc->ap;
3338 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3340 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3342 /* load PRD table addr. */
3343 mb(); /* make sure PRD table writes are visible to controller */
3344 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3346 /* specify data direction, triple-check start bit is clear */
3347 dmactl = readb(mmio + ATA_DMA_CMD);
3348 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3350 dmactl |= ATA_DMA_WR;
3351 writeb(dmactl, mmio + ATA_DMA_CMD);
3353 /* issue r/w command */
3354 ap->ops->exec_command(ap, &qc->tf);
3358 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3359 * @qc: Info associated with this ATA transaction.
3362 * spin_lock_irqsave(host_set lock)
3365 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3367 struct ata_port *ap = qc->ap;
3368 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3371 /* start host DMA transaction */
3372 dmactl = readb(mmio + ATA_DMA_CMD);
3373 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3375 /* Strictly, one may wish to issue a readb() here, to
3376 * flush the mmio write. However, control also passes
3377 * to the hardware at this point, and it will interrupt
3378 * us when we are to resume control. So, in effect,
3379 * we don't care when the mmio write flushes.
3380 * Further, a read of the DMA status register _immediately_
3381 * following the write may not be what certain flaky hardware
3382 * is expected, so I think it is best to not add a readb()
3383 * without first all the MMIO ATA cards/mobos.
3384 * Or maybe I'm just being paranoid.
3389 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3390 * @qc: Info associated with this ATA transaction.
3393 * spin_lock_irqsave(host_set lock)
3396 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3398 struct ata_port *ap = qc->ap;
3399 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3402 /* load PRD table addr. */
3403 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3405 /* specify data direction, triple-check start bit is clear */
3406 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3407 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3409 dmactl |= ATA_DMA_WR;
3410 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3412 /* issue r/w command */
3413 ap->ops->exec_command(ap, &qc->tf);
3417 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3418 * @qc: Info associated with this ATA transaction.
3421 * spin_lock_irqsave(host_set lock)
3424 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3426 struct ata_port *ap = qc->ap;
3429 /* start host DMA transaction */
3430 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3431 outb(dmactl | ATA_DMA_START,
3432 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3437 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3438 * @qc: Info associated with this ATA transaction.
3440 * Writes the ATA_DMA_START flag to the DMA command register.
3442 * May be used as the bmdma_start() entry in ata_port_operations.
3445 * spin_lock_irqsave(host_set lock)
3447 void ata_bmdma_start(struct ata_queued_cmd *qc)
3449 if (qc->ap->flags & ATA_FLAG_MMIO)
3450 ata_bmdma_start_mmio(qc);
3452 ata_bmdma_start_pio(qc);
3457 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3458 * @qc: Info associated with this ATA transaction.
3460 * Writes address of PRD table to device's PRD Table Address
3461 * register, sets the DMA control register, and calls
3462 * ops->exec_command() to start the transfer.
3464 * May be used as the bmdma_setup() entry in ata_port_operations.
3467 * spin_lock_irqsave(host_set lock)
3469 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3471 if (qc->ap->flags & ATA_FLAG_MMIO)
3472 ata_bmdma_setup_mmio(qc);
3474 ata_bmdma_setup_pio(qc);
3479 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3480 * @ap: Port associated with this ATA transaction.
3482 * Clear interrupt and error flags in DMA status register.
3484 * May be used as the irq_clear() entry in ata_port_operations.
3487 * spin_lock_irqsave(host_set lock)
3490 void ata_bmdma_irq_clear(struct ata_port *ap)
3492 if (ap->flags & ATA_FLAG_MMIO) {
3493 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3494 writeb(readb(mmio), mmio);
3496 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3497 outb(inb(addr), addr);
3504 * ata_bmdma_status - Read PCI IDE BMDMA status
3505 * @ap: Port associated with this ATA transaction.
3507 * Read and return BMDMA status register.
3509 * May be used as the bmdma_status() entry in ata_port_operations.
3512 * spin_lock_irqsave(host_set lock)
3515 u8 ata_bmdma_status(struct ata_port *ap)
3518 if (ap->flags & ATA_FLAG_MMIO) {
3519 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3520 host_stat = readb(mmio + ATA_DMA_STATUS);
3522 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3528 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3529 * @ap: Port associated with this ATA transaction.
3531 * Clears the ATA_DMA_START flag in the dma control register
3533 * May be used as the bmdma_stop() entry in ata_port_operations.
3536 * spin_lock_irqsave(host_set lock)
3539 void ata_bmdma_stop(struct ata_port *ap)
3541 if (ap->flags & ATA_FLAG_MMIO) {
3542 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3544 /* clear start/stop bit */
3545 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3546 mmio + ATA_DMA_CMD);
3548 /* clear start/stop bit */
3549 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3550 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3553 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3554 ata_altstatus(ap); /* dummy read */
3558 * ata_host_intr - Handle host interrupt for given (port, task)
3559 * @ap: Port on which interrupt arrived (possibly...)
3560 * @qc: Taskfile currently active in engine
3562 * Handle host interrupt for given queued command. Currently,
3563 * only DMA interrupts are handled. All other commands are
3564 * handled via polling with interrupts disabled (nIEN bit).
3567 * spin_lock_irqsave(host_set lock)
3570 * One if interrupt was handled, zero if not (shared irq).
3573 inline unsigned int ata_host_intr (struct ata_port *ap,
3574 struct ata_queued_cmd *qc)
3576 u8 status, host_stat;
3578 switch (qc->tf.protocol) {
3581 case ATA_PROT_ATAPI_DMA:
3582 case ATA_PROT_ATAPI:
3583 /* check status of DMA engine */
3584 host_stat = ap->ops->bmdma_status(ap);
3585 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3587 /* if it's not our irq... */
3588 if (!(host_stat & ATA_DMA_INTR))
3591 /* before we do anything else, clear DMA-Start bit */
3592 ap->ops->bmdma_stop(ap);
3596 case ATA_PROT_ATAPI_NODATA:
3597 case ATA_PROT_NODATA:
3598 /* check altstatus */
3599 status = ata_altstatus(ap);
3600 if (status & ATA_BUSY)
3603 /* check main status, clearing INTRQ */
3604 status = ata_chk_status(ap);
3605 if (unlikely(status & ATA_BUSY))
3607 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3608 ap->id, qc->tf.protocol, status);
3610 /* ack bmdma irq events */
3611 ap->ops->irq_clear(ap);
3613 /* complete taskfile transaction */
3614 ata_qc_complete(qc, status);
3621 return 1; /* irq handled */
3624 ap->stats.idle_irq++;
3627 if ((ap->stats.idle_irq % 1000) == 0) {
3629 ata_irq_ack(ap, 0); /* debug trap */
3630 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3633 return 0; /* irq not handled */
3637 * ata_interrupt - Default ATA host interrupt handler
3638 * @irq: irq line (unused)
3639 * @dev_instance: pointer to our ata_host_set information structure
3642 * Default interrupt handler for PCI IDE devices. Calls
3643 * ata_host_intr() for each port that is not disabled.
3646 * Obtains host_set lock during operation.
3649 * IRQ_NONE or IRQ_HANDLED.
3653 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3655 struct ata_host_set *host_set = dev_instance;
3657 unsigned int handled = 0;
3658 unsigned long flags;
3660 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3661 spin_lock_irqsave(&host_set->lock, flags);
3663 for (i = 0; i < host_set->n_ports; i++) {
3664 struct ata_port *ap;
3666 ap = host_set->ports[i];
3667 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3668 struct ata_queued_cmd *qc;
3670 qc = ata_qc_from_tag(ap, ap->active_tag);
3671 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3672 (qc->flags & ATA_QCFLAG_ACTIVE))
3673 handled |= ata_host_intr(ap, qc);
3677 spin_unlock_irqrestore(&host_set->lock, flags);
3679 return IRQ_RETVAL(handled);
3683 * atapi_packet_task - Write CDB bytes to hardware
3684 * @_data: Port to which ATAPI device is attached.
3686 * When device has indicated its readiness to accept
3687 * a CDB, this function is called. Send the CDB.
3688 * If DMA is to be performed, exit immediately.
3689 * Otherwise, we are in polling mode, so poll
3690 * status under operation succeeds or fails.
3693 * Kernel thread context (may sleep)
3696 static void atapi_packet_task(void *_data)
3698 struct ata_port *ap = _data;
3699 struct ata_queued_cmd *qc;
3702 qc = ata_qc_from_tag(ap, ap->active_tag);
3704 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3706 /* sleep-wait for BSY to clear */
3707 DPRINTK("busy wait\n");
3708 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3711 /* make sure DRQ is set */
3712 status = ata_chk_status(ap);
3713 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3717 DPRINTK("send cdb\n");
3718 assert(ap->cdb_len >= 12);
3719 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3721 /* if we are DMA'ing, irq handler takes over from here */
3722 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3723 ap->ops->bmdma_start(qc); /* initiate bmdma */
3725 /* non-data commands are also handled via irq */
3726 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3730 /* PIO commands are handled by polling */
3732 ap->pio_task_state = PIO_ST;
3733 queue_work(ata_wq, &ap->pio_task);
3739 ata_qc_complete(qc, ATA_ERR);
3744 * ata_port_start - Set port up for dma.
3745 * @ap: Port to initialize
3747 * Called just after data structures for each port are
3748 * initialized. Allocates space for PRD table.
3750 * May be used as the port_start() entry in ata_port_operations.
3755 int ata_port_start (struct ata_port *ap)
3757 struct device *dev = ap->host_set->dev;
3759 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3763 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3770 * ata_port_stop - Undo ata_port_start()
3771 * @ap: Port to shut down
3773 * Frees the PRD table.
3775 * May be used as the port_stop() entry in ata_port_operations.
3780 void ata_port_stop (struct ata_port *ap)
3782 struct device *dev = ap->host_set->dev;
3784 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3787 void ata_host_stop (struct ata_host_set *host_set)
3789 if (host_set->mmio_base)
3790 iounmap(host_set->mmio_base);
3795 * ata_host_remove - Unregister SCSI host structure with upper layers
3796 * @ap: Port to unregister
3797 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3802 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3804 struct Scsi_Host *sh = ap->host;
3809 scsi_remove_host(sh);
3811 ap->ops->port_stop(ap);
3815 * ata_host_init - Initialize an ata_port structure
3816 * @ap: Structure to initialize
3817 * @host: associated SCSI mid-layer structure
3818 * @host_set: Collection of hosts to which @ap belongs
3819 * @ent: Probe information provided by low-level driver
3820 * @port_no: Port number associated with this ata_port
3822 * Initialize a new ata_port structure, and its associated
3826 * Inherited from caller.
3830 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3831 struct ata_host_set *host_set,
3832 struct ata_probe_ent *ent, unsigned int port_no)
3838 host->max_channel = 1;
3839 host->unique_id = ata_unique_id++;
3840 host->max_cmd_len = 12;
3841 scsi_set_device(host, ent->dev);
3842 scsi_assign_lock(host, &host_set->lock);
3844 ap->flags = ATA_FLAG_PORT_DISABLED;
3845 ap->id = host->unique_id;
3847 ap->ctl = ATA_DEVCTL_OBS;
3848 ap->host_set = host_set;
3849 ap->port_no = port_no;
3851 ent->legacy_mode ? ent->hard_port_no : port_no;
3852 ap->pio_mask = ent->pio_mask;
3853 ap->mwdma_mask = ent->mwdma_mask;
3854 ap->udma_mask = ent->udma_mask;
3855 ap->flags |= ent->host_flags;
3856 ap->ops = ent->port_ops;
3857 ap->cbl = ATA_CBL_NONE;
3858 ap->active_tag = ATA_TAG_POISON;
3859 ap->last_ctl = 0xFF;
3861 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3862 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3864 for (i = 0; i < ATA_MAX_DEVICES; i++)
3865 ap->device[i].devno = i;
3868 ap->stats.unhandled_irq = 1;
3869 ap->stats.idle_irq = 1;
3872 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3876 * ata_host_add - Attach low-level ATA driver to system
3877 * @ent: Information provided by low-level driver
3878 * @host_set: Collections of ports to which we add
3879 * @port_no: Port number associated with this host
3881 * Attach low-level ATA driver to system.
3884 * PCI/etc. bus probe sem.
3887 * New ata_port on success, for NULL on error.
3891 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3892 struct ata_host_set *host_set,
3893 unsigned int port_no)
3895 struct Scsi_Host *host;
3896 struct ata_port *ap;
3900 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3904 ap = (struct ata_port *) &host->hostdata[0];
3906 ata_host_init(ap, host, host_set, ent, port_no);
3908 rc = ap->ops->port_start(ap);
3915 scsi_host_put(host);
3920 * ata_device_add - Register hardware device with ATA and SCSI layers
3921 * @ent: Probe information describing hardware device to be registered
3923 * This function processes the information provided in the probe
3924 * information struct @ent, allocates the necessary ATA and SCSI
3925 * host information structures, initializes them, and registers
3926 * everything with requisite kernel subsystems.
3928 * This function requests irqs, probes the ATA bus, and probes
3932 * PCI/etc. bus probe sem.
3935 * Number of ports registered. Zero on error (no ports registered).
3939 int ata_device_add(struct ata_probe_ent *ent)
3941 unsigned int count = 0, i;
3942 struct device *dev = ent->dev;
3943 struct ata_host_set *host_set;
3946 /* alloc a container for our list of ATA ports (buses) */
3947 host_set = kmalloc(sizeof(struct ata_host_set) +
3948 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3951 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3952 spin_lock_init(&host_set->lock);
3954 host_set->dev = dev;
3955 host_set->n_ports = ent->n_ports;
3956 host_set->irq = ent->irq;
3957 host_set->mmio_base = ent->mmio_base;
3958 host_set->private_data = ent->private_data;
3959 host_set->ops = ent->port_ops;
3961 /* register each port bound to this device */
3962 for (i = 0; i < ent->n_ports; i++) {
3963 struct ata_port *ap;
3964 unsigned long xfer_mode_mask;
3966 ap = ata_host_add(ent, host_set, i);
3970 host_set->ports[i] = ap;
3971 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3972 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3973 (ap->pio_mask << ATA_SHIFT_PIO);
3975 /* print per-port info to dmesg */
3976 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3977 "bmdma 0x%lX irq %lu\n",
3979 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3980 ata_mode_string(xfer_mode_mask),
3981 ap->ioaddr.cmd_addr,
3982 ap->ioaddr.ctl_addr,
3983 ap->ioaddr.bmdma_addr,
3987 host_set->ops->irq_clear(ap);
3996 /* obtain irq, that is shared between channels */
3997 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3998 DRV_NAME, host_set))
4001 /* perform each probe synchronously */
4002 DPRINTK("probe begin\n");
4003 for (i = 0; i < count; i++) {
4004 struct ata_port *ap;
4007 ap = host_set->ports[i];
4009 DPRINTK("ata%u: probe begin\n", ap->id);
4010 rc = ata_bus_probe(ap);
4011 DPRINTK("ata%u: probe end\n", ap->id);
4014 /* FIXME: do something useful here?
4015 * Current libata behavior will
4016 * tear down everything when
4017 * the module is removed
4018 * or the h/w is unplugged.
4022 rc = scsi_add_host(ap->host, dev);
4024 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4026 /* FIXME: do something useful here */
4027 /* FIXME: handle unconditional calls to
4028 * scsi_scan_host and ata_host_remove, below,
4034 /* probes are done, now scan each port's disk(s) */
4035 DPRINTK("probe begin\n");
4036 for (i = 0; i < count; i++) {
4037 struct ata_port *ap = host_set->ports[i];
4039 scsi_scan_host(ap->host);
4042 dev_set_drvdata(dev, host_set);
4044 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4045 return ent->n_ports; /* success */
4048 for (i = 0; i < count; i++) {
4049 ata_host_remove(host_set->ports[i], 1);
4050 scsi_host_put(host_set->ports[i]->host);
4053 VPRINTK("EXIT, returning 0\n");
4058 * ata_scsi_release - SCSI layer callback hook for host unload
4059 * @host: libata host to be unloaded
4061 * Performs all duties necessary to shut down a libata port...
4062 * Kill port kthread, disable port, and release resources.
4065 * Inherited from SCSI layer.
4071 int ata_scsi_release(struct Scsi_Host *host)
4073 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4077 ap->ops->port_disable(ap);
4078 ata_host_remove(ap, 0);
4085 * ata_std_ports - initialize ioaddr with standard port offsets.
4086 * @ioaddr: IO address structure to be initialized
4088 * Utility function which initializes data_addr, error_addr,
4089 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4090 * device_addr, status_addr, and command_addr to standard offsets
4091 * relative to cmd_addr.
4093 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4096 void ata_std_ports(struct ata_ioports *ioaddr)
4098 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4099 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4100 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4101 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4102 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4103 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4104 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4105 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4106 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4107 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4110 static struct ata_probe_ent *
4111 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4113 struct ata_probe_ent *probe_ent;
4115 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
4117 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4118 kobject_name(&(dev->kobj)));
4122 memset(probe_ent, 0, sizeof(*probe_ent));
4124 INIT_LIST_HEAD(&probe_ent->node);
4125 probe_ent->dev = dev;
4127 probe_ent->sht = port->sht;
4128 probe_ent->host_flags = port->host_flags;
4129 probe_ent->pio_mask = port->pio_mask;
4130 probe_ent->mwdma_mask = port->mwdma_mask;
4131 probe_ent->udma_mask = port->udma_mask;
4132 probe_ent->port_ops = port->port_ops;
4140 * ata_pci_init_native_mode - Initialize native-mode driver
4141 * @pdev: pci device to be initialized
4142 * @port: array[2] of pointers to port info structures.
4144 * Utility function which allocates and initializes an
4145 * ata_probe_ent structure for a standard dual-port
4146 * PIO-based IDE controller. The returned ata_probe_ent
4147 * structure can be passed to ata_device_add(). The returned
4148 * ata_probe_ent structure should then be freed with kfree().
4152 struct ata_probe_ent *
4153 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4155 struct ata_probe_ent *probe_ent =
4156 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4160 probe_ent->n_ports = 2;
4161 probe_ent->irq = pdev->irq;
4162 probe_ent->irq_flags = SA_SHIRQ;
4164 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
4165 probe_ent->port[0].altstatus_addr =
4166 probe_ent->port[0].ctl_addr =
4167 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4168 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4170 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
4171 probe_ent->port[1].altstatus_addr =
4172 probe_ent->port[1].ctl_addr =
4173 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4174 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4176 ata_std_ports(&probe_ent->port[0]);
4177 ata_std_ports(&probe_ent->port[1]);
4182 static struct ata_probe_ent *
4183 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4184 struct ata_probe_ent **ppe2)
4186 struct ata_probe_ent *probe_ent, *probe_ent2;
4188 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4191 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4197 probe_ent->n_ports = 1;
4198 probe_ent->irq = 14;
4200 probe_ent->hard_port_no = 0;
4201 probe_ent->legacy_mode = 1;
4203 probe_ent2->n_ports = 1;
4204 probe_ent2->irq = 15;
4206 probe_ent2->hard_port_no = 1;
4207 probe_ent2->legacy_mode = 1;
4209 probe_ent->port[0].cmd_addr = 0x1f0;
4210 probe_ent->port[0].altstatus_addr =
4211 probe_ent->port[0].ctl_addr = 0x3f6;
4212 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4214 probe_ent2->port[0].cmd_addr = 0x170;
4215 probe_ent2->port[0].altstatus_addr =
4216 probe_ent2->port[0].ctl_addr = 0x376;
4217 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
4219 ata_std_ports(&probe_ent->port[0]);
4220 ata_std_ports(&probe_ent2->port[0]);
4227 * ata_pci_init_one - Initialize/register PCI IDE host controller
4228 * @pdev: Controller to be initialized
4229 * @port_info: Information from low-level host driver
4230 * @n_ports: Number of ports attached to host controller
4232 * This is a helper function which can be called from a driver's
4233 * xxx_init_one() probe function if the hardware uses traditional
4234 * IDE taskfile registers.
4236 * This function calls pci_enable_device(), reserves its register
4237 * regions, sets the dma mask, enables bus master mode, and calls
4241 * Inherited from PCI layer (may sleep).
4244 * Zero on success, negative on errno-based value on error.
4248 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4249 unsigned int n_ports)
4251 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
4252 struct ata_port_info *port[2];
4254 unsigned int legacy_mode = 0;
4255 int disable_dev_on_err = 1;
4260 port[0] = port_info[0];
4262 port[1] = port_info[1];
4266 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4267 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4268 /* TODO: support transitioning to native mode? */
4269 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4270 mask = (1 << 2) | (1 << 0);
4271 if ((tmp8 & mask) != mask)
4272 legacy_mode = (1 << 3);
4276 if ((!legacy_mode) && (n_ports > 1)) {
4277 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
4281 rc = pci_enable_device(pdev);
4285 rc = pci_request_regions(pdev, DRV_NAME);
4287 disable_dev_on_err = 0;
4292 if (!request_region(0x1f0, 8, "libata")) {
4293 struct resource *conflict, res;
4295 res.end = 0x1f0 + 8 - 1;
4296 conflict = ____request_resource(&ioport_resource, &res);
4297 if (!strcmp(conflict->name, "libata"))
4298 legacy_mode |= (1 << 0);
4300 disable_dev_on_err = 0;
4301 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4304 legacy_mode |= (1 << 0);
4306 if (!request_region(0x170, 8, "libata")) {
4307 struct resource *conflict, res;
4309 res.end = 0x170 + 8 - 1;
4310 conflict = ____request_resource(&ioport_resource, &res);
4311 if (!strcmp(conflict->name, "libata"))
4312 legacy_mode |= (1 << 1);
4314 disable_dev_on_err = 0;
4315 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4318 legacy_mode |= (1 << 1);
4321 /* we have legacy mode, but all ports are unavailable */
4322 if (legacy_mode == (1 << 3)) {
4324 goto err_out_regions;
4327 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4329 goto err_out_regions;
4330 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4332 goto err_out_regions;
4335 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
4337 probe_ent = ata_pci_init_native_mode(pdev, port);
4340 goto err_out_regions;
4343 pci_set_master(pdev);
4345 /* FIXME: check ata_device_add return */
4347 if (legacy_mode & (1 << 0))
4348 ata_device_add(probe_ent);
4349 if (legacy_mode & (1 << 1))
4350 ata_device_add(probe_ent2);
4352 ata_device_add(probe_ent);
4360 if (legacy_mode & (1 << 0))
4361 release_region(0x1f0, 8);
4362 if (legacy_mode & (1 << 1))
4363 release_region(0x170, 8);
4364 pci_release_regions(pdev);
4366 if (disable_dev_on_err)
4367 pci_disable_device(pdev);
4372 * ata_pci_remove_one - PCI layer callback for device removal
4373 * @pdev: PCI device that was removed
4375 * PCI layer indicates to libata via this hook that
4376 * hot-unplug or module unload event has occured.
4377 * Handle this by unregistering all objects associated
4378 * with this PCI device. Free those objects. Then finally
4379 * release PCI resources and disable device.
4382 * Inherited from PCI layer (may sleep).
4385 void ata_pci_remove_one (struct pci_dev *pdev)
4387 struct device *dev = pci_dev_to_dev(pdev);
4388 struct ata_host_set *host_set = dev_get_drvdata(dev);
4389 struct ata_port *ap;
4392 for (i = 0; i < host_set->n_ports; i++) {
4393 ap = host_set->ports[i];
4395 scsi_remove_host(ap->host);
4398 free_irq(host_set->irq, host_set);
4400 for (i = 0; i < host_set->n_ports; i++) {
4401 ap = host_set->ports[i];
4403 ata_scsi_release(ap->host);
4405 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4406 struct ata_ioports *ioaddr = &ap->ioaddr;
4408 if (ioaddr->cmd_addr == 0x1f0)
4409 release_region(0x1f0, 8);
4410 else if (ioaddr->cmd_addr == 0x170)
4411 release_region(0x170, 8);
4414 scsi_host_put(ap->host);
4417 if (host_set->ops->host_stop)
4418 host_set->ops->host_stop(host_set);
4422 pci_release_regions(pdev);
4423 pci_disable_device(pdev);
4424 dev_set_drvdata(dev, NULL);
4427 /* move to PCI subsystem */
4428 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4430 unsigned long tmp = 0;
4432 switch (bits->width) {
4435 pci_read_config_byte(pdev, bits->reg, &tmp8);
4441 pci_read_config_word(pdev, bits->reg, &tmp16);
4447 pci_read_config_dword(pdev, bits->reg, &tmp32);
4458 return (tmp == bits->val) ? 1 : 0;
4460 #endif /* CONFIG_PCI */
4463 static int __init ata_init(void)
4465 ata_wq = create_workqueue("ata");
4469 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4473 static void __exit ata_exit(void)
4475 destroy_workqueue(ata_wq);
4478 module_init(ata_init);
4479 module_exit(ata_exit);
4482 * libata is essentially a library of internal helper functions for
4483 * low-level ATA host controller drivers. As such, the API/ABI is
4484 * likely to change as new drivers are added and updated.
4485 * Do not depend on ABI/API stability.
4488 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4489 EXPORT_SYMBOL_GPL(ata_std_ports);
4490 EXPORT_SYMBOL_GPL(ata_device_add);
4491 EXPORT_SYMBOL_GPL(ata_sg_init);
4492 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4493 EXPORT_SYMBOL_GPL(ata_qc_complete);
4494 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4495 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4496 EXPORT_SYMBOL_GPL(ata_tf_load);
4497 EXPORT_SYMBOL_GPL(ata_tf_read);
4498 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4499 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4500 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4501 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4502 EXPORT_SYMBOL_GPL(ata_check_status);
4503 EXPORT_SYMBOL_GPL(ata_altstatus);
4504 EXPORT_SYMBOL_GPL(ata_chk_err);
4505 EXPORT_SYMBOL_GPL(ata_exec_command);
4506 EXPORT_SYMBOL_GPL(ata_port_start);
4507 EXPORT_SYMBOL_GPL(ata_port_stop);
4508 EXPORT_SYMBOL_GPL(ata_host_stop);
4509 EXPORT_SYMBOL_GPL(ata_interrupt);
4510 EXPORT_SYMBOL_GPL(ata_qc_prep);
4511 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4512 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4513 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4514 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4515 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4516 EXPORT_SYMBOL_GPL(ata_port_probe);
4517 EXPORT_SYMBOL_GPL(sata_phy_reset);
4518 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4519 EXPORT_SYMBOL_GPL(ata_bus_reset);
4520 EXPORT_SYMBOL_GPL(ata_port_disable);
4521 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4522 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4523 EXPORT_SYMBOL_GPL(ata_scsi_error);
4524 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4525 EXPORT_SYMBOL_GPL(ata_scsi_release);
4526 EXPORT_SYMBOL_GPL(ata_host_intr);
4527 EXPORT_SYMBOL_GPL(ata_dev_classify);
4528 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4529 EXPORT_SYMBOL_GPL(ata_dev_config);
4530 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4533 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4534 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4535 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4536 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4537 #endif /* CONFIG_PCI */