2 * drivers/s390/s390io.c
3 * S/390 common I/O routines
4 * $Revision: 1.247.4.4 $
7 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
9 * Author(s): Ingo Adlung (adlung@de.ibm.com)
10 * Cornelia Huck (cohuck@de.ibm.com)
11 * ChangeLog: 01/07/2001 Blacklist cleanup (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
12 * 01/04/2001 Holger Smolinski (smolinsk@de.ibm.com)
13 * Fixed lost interrupts and do_adapter_IO
14 * xx/xx/xxxx nnn multiple changes not reflected
15 * 03/12/2001 Ingo Adlung blacklist= - changed to cio_ignore=
16 * 03/14/2001 Ingo Adlung disable interrupts before start_IO
17 * in Path Group processing
18 * decrease retry2 on busy while
19 * disabling sync_isc; reset isc_cnt
20 * on io error during sync_isc enablement
21 * 05/09/2001 Cornelia Huck added exploitation of debug feature
22 * 05/16/2001 Cornelia Huck added /proc/deviceinfo/<devno>/
23 * 05/22/2001 Cornelia Huck added /proc/cio_ignore
24 * un-ignore blacklisted devices by piping
26 * xx/xx/xxxx some bugfixes & cleanups
27 * 08/02/2001 Cornelia Huck not already known devices can be blacklisted
28 * by piping to /proc/cio_ignore
29 * 09/xx/2001 couple more fixes
30 * 10/15/2001 Cornelia Huck xsch - internal only for now
31 * 10/29/2001 Cornelia Huck Blacklisting reworked again
32 * 10/29/2001 Cornelia Huck improved utilization of debug feature
33 * 10/29/2001 Cornelia Huck more work on cancel_IO - use the flag
34 * DOIO_CANCEL_ON_TIMEOUT in do_IO to get
36 * 11/15/2001 Cornelia Huck proper behaviour with procfs off
37 * 12/10/2001 Cornelia Huck added private_data + functions to
39 * 11-12/2001 Cornelia Huck various cleanups
40 * 01/09/2002 Cornelia Huck PGID fixes
41 * process css machine checks
42 * 01/10/2002 Cornelia Huck added /proc/chpids
43 * 04/10/2002 Cornelia Huck fixed reaction on css machine checks
44 * 04/23/2002 Cornelia Huck fixed console isc (un)setting
45 * 06/06/2002 Cornelia Huck added detection of locked devices
48 #include <linux/module.h>
49 #include <linux/config.h>
50 #include <linux/errno.h>
51 #include <linux/kernel_stat.h>
52 #include <linux/signal.h>
53 #include <linux/sched.h>
54 #include <linux/interrupt.h>
55 #include <linux/slab.h>
56 #include <linux/string.h>
57 #include <linux/smp.h>
58 #include <linux/threads.h>
59 #include <linux/smp_lock.h>
60 #include <linux/init.h>
61 #include <linux/bootmem.h>
62 #include <linux/ctype.h>
64 #include <linux/proc_fs.h>
66 #include <asm/system.h>
69 #include <asm/bitops.h>
71 #include <asm/pgtable.h>
72 #include <asm/delay.h>
73 #include <asm/processor.h>
74 #include <asm/lowcore.h>
75 #include <asm/idals.h>
76 #include <asm/uaccess.h>
77 #include <asm/cpcmd.h>
79 #include <asm/s390io.h>
80 #include <asm/s390dyn.h>
81 #include <asm/s390mach.h>
82 #include <asm/debug.h>
83 #include <asm/queue.h>
90 #define SANITY_CHECK(irq) do { \
91 if (irq > highest_subchannel || irq < 0) \
93 if (ioinfo[irq] == INVALID_STORAGE_AREA) \
95 if (ioinfo[irq]->st) \
99 #define CIO_TRACE_EVENT(imp, txt) do { \
100 if (cio_debug_initialized) \
101 debug_text_event(cio_debug_trace_id, \
106 #define CIO_MSG_EVENT(imp, args...) do { \
107 if (cio_debug_initialized) \
108 debug_sprintf_event(cio_debug_msg_id, \
113 #define CIO_CRW_EVENT(imp, args...) do { \
114 if (cio_debug_initialized) \
115 debug_sprintf_event(cio_debug_crw_id, \
120 #define CIO_HEX_EVENT(imp, args...) do { \
121 if (cio_debug_initialized) \
122 debug_event(cio_debug_trace_id, imp, ##args); \
125 #undef CONFIG_DEBUG_IO
126 #define CONFIG_DEBUG_CRW
127 #define CONFIG_DEBUG_CHSC
129 unsigned int highest_subchannel;
130 ioinfo_t *ioinfo_head = NULL;
131 ioinfo_t *ioinfo_tail = NULL;
132 ioinfo_t *ioinfo[__MAX_SUBCHANNELS] = {
133 [0 ... (__MAX_SUBCHANNELS - 1)] = INVALID_STORAGE_AREA
137 __u64 chpids[4] = {0,0,0,0};
138 __u64 chpids_logical[4] = {-1,-1,-1,-1};
139 __u64 chpids_known[4] = {0,0,0,0};
140 #endif /* CONFIG_CHSC */
142 static atomic_t sync_isc = ATOMIC_INIT (-1);
143 static int sync_isc_cnt = 0; /* synchronous irq processing lock */
145 static spinlock_t adapter_lock = SPIN_LOCK_UNLOCKED; /* adapter interrupt lock */
146 static int cons_dev = -1; /* identify console device */
147 static int init_IRQ_complete = 0;
148 static int cio_show_msg = 0;
149 static schib_t *p_init_schib = NULL;
150 static irb_t *p_init_irb = NULL;
151 static __u64 irq_IPL_TOD;
152 static adapter_int_handler_t adapter_handler = NULL;
153 static pgid_t * global_pgid;
155 /* for use of debug feature */
156 debug_info_t *cio_debug_msg_id = NULL;
157 debug_info_t *cio_debug_trace_id = NULL;
158 debug_info_t *cio_debug_crw_id = NULL;
159 int cio_debug_initialized = 0;
162 int cio_chsc_desc_avail = 0;
163 int cio_chsc_err_msg = 0;
166 static void init_IRQ_handler (int irq, void *dev_id, struct pt_regs *regs);
167 static void s390_process_subchannels (void);
168 static void s390_device_recognition_all (void);
169 static void s390_device_recognition_irq (int irq);
170 #ifdef CONFIG_PROC_FS
171 static void s390_redo_validation (void);
173 static int s390_validate_subchannel (int irq, int enable);
174 static int s390_SenseID (int irq, senseid_t * sid, __u8 lpm);
175 static int s390_SetPGID (int irq, __u8 lpm);
176 static int s390_SensePGID (int irq, __u8 lpm, pgid_t * pgid);
177 static int s390_process_IRQ (unsigned int irq);
178 static int enable_subchannel (unsigned int irq);
179 static int disable_subchannel (unsigned int irq);
180 int cancel_IO (int irq);
181 int s390_start_IO (int irq, ccw1_t * cpa, unsigned long user_intparm,
182 __u8 lpm, unsigned long flag);
184 #ifdef CONFIG_PROC_FS
185 static int chan_proc_init (void);
188 static inline void do_adapter_IO (__u32 intparm);
190 static void s390_schedule_path_verification(unsigned long irq);
191 int s390_DevicePathVerification (int irq, __u8 domask);
192 int s390_register_adapter_interrupt (adapter_int_handler_t handler);
193 int s390_unregister_adapter_interrupt (adapter_int_handler_t handler);
195 extern int do_none (unsigned int irq, int cpu, struct pt_regs *regs);
196 extern int enable_none (unsigned int irq);
197 extern int disable_none (unsigned int irq);
199 asmlinkage void do_IRQ (struct pt_regs regs);
202 static chsc_area_t *chsc_area_ssd = NULL;
203 static chsc_area_t *chsc_area_sei = NULL;
204 static spinlock_t chsc_lock_ssd = SPIN_LOCK_UNLOCKED;
205 static spinlock_t chsc_lock_sei = SPIN_LOCK_UNLOCKED;
206 static int chsc_get_sch_descriptions( void );
207 int s390_vary_chpid( __u8 chpid, int on );
210 #ifdef CONFIG_PROC_FS
211 #define MAX_CIO_PROCFS_ENTRIES 0x300
212 /* magic number; we want to have some room to spare */
214 int cio_procfs_device_create (int devno);
215 int cio_procfs_device_remove (int devno);
216 int cio_procfs_device_purge (void);
219 int cio_notoper_msg = 1;
221 #ifdef CONFIG_PROC_FS
222 int cio_proc_devinfo = 0; /* switch off the /proc/deviceinfo/ stuff by default
223 until problems are dealt with */
226 unsigned long s390_irq_count[NR_CPUS]; /* trace how many irqs have occured per cpu... */
227 int cio_count_irqs = 1; /* toggle use here... */
229 int cio_sid_with_pgid = 0; /* if we need a PGID for SenseID, switch this on */
232 * "Blacklisting" of certain devices:
233 * Device numbers given in the commandline as cio_ignore=... won't be known to Linux
234 * These can be single devices or ranges of devices
236 * 10/23/01 reworked to get rid of lists
239 static u32 bl_dev[2048];
241 static spinlock_t blacklist_lock = SPIN_LOCK_UNLOCKED;
242 static int highest_ignored = 0;
243 static int nr_ignored = 0;
246 * Function: blacklist_range_add
247 * Blacklist the devices from-to
251 blacklist_range_add (int from, int to, int locked)
257 if ((to && (from > to))
258 || (to<0) || (to > 0xffff)
259 || (from<0) || (from > 0xffff))
263 spin_lock_irqsave (&blacklist_lock, flags);
267 for (i = from; i <= to; i++) {
268 if (!test_and_set_bit (i, &bl_dev))
272 if (to >= highest_ignored)
273 highest_ignored = to;
276 spin_unlock_irqrestore (&blacklist_lock, flags);
280 * Function: blacklist_range_remove
281 * Removes a range from the blacklist chain
285 blacklist_range_remove (int from, int to)
290 if ((to && (from > to))
291 || (to<0) || (to > 0xffff)
292 || (from<0) || (from > 0xffff))
295 spin_lock_irqsave (&blacklist_lock, flags);
297 for (i = from; i <= to; i++) {
298 if (test_and_clear_bit (i, &bl_dev))
302 if (to == highest_ignored)
303 for (highest_ignored = from; (highest_ignored > 0)
304 && (!test_bit (highest_ignored, &bl_dev));
307 spin_unlock_irqrestore (&blacklist_lock, flags);
310 /* Parsing the commandline for blacklist parameters */
313 * Variable to hold the blacklisted devices given by the parameter line
316 char *blacklist[256] = { NULL, };
319 * Get the cio_ignore=... items from the parameter line
323 blacklist_split_parm_string (char *str)
330 end = strchr (tmp, ',');
332 len = strlen (tmp) + 1;
334 len = (long) end - (long) tmp + 1;
338 blacklist[count] = alloc_bootmem (len * sizeof (char));
339 if (blacklist == NULL) {
341 "can't store cio_ignore= parameter no %d\n",
345 memset (blacklist[count], 0, len * sizeof (char));
346 memcpy (blacklist[count], tmp, len * sizeof (char));
349 } while (tmp != NULL && *tmp != '\0');
353 * The blacklist parameters as one concatenated string
356 static char blacklist_parm_string[1024] __initdata = { 0, };
359 * function: blacklist_strtoul
360 * Strip leading '0x' and interpret the values as Hex
363 blacklist_strtoul (char *str, char **stra)
366 str++; /* strip leading zero */
368 str++; /* strip leading x */
370 return simple_strtoul (str, stra, 16); /* interpret anything as hex */
374 * Function: blacklist_parse
375 * Parse the parameters given to cio_ignore=...
376 * Add the blacklisted devices to the blacklist chain
380 blacklist_parse (char **str)
390 from = blacklist_strtoul (temp, &temp);
393 to = blacklist_strtoul (temp, &temp);
395 blacklist_range_add (from, to, 0);
396 #ifdef CONFIG_DEBUG_IO
397 printk (KERN_INFO "Blacklisted range from %X to %X\n", from,
405 * Initialisation of blacklist
409 blacklist_init (void)
411 #ifdef CONFIG_DEBUG_IO
412 printk (KERN_DEBUG "Reading blacklist...\n");
414 CIO_MSG_EVENT(6, "Reading blacklist\n");
416 blacklist_split_parm_string (blacklist_parm_string);
417 blacklist_parse (blacklist);
421 * Get all the blacklist parameters from parameter line
425 blacklist_setup (char *str, int *ints)
427 int len = strlen (blacklist_parm_string);
429 strcat (blacklist_parm_string, ",");
431 strcat (blacklist_parm_string, str);
435 blacklist_call_setup (char *str)
438 #ifdef CONFIG_DEBUG_IO
439 printk (KERN_DEBUG "Reading blacklist parameters...\n");
441 CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
443 blacklist_setup (str, &dummy);
445 /* Blacklist ranges must be ready when device recognition starts */
451 __setup ("cio_ignore=", blacklist_call_setup);
453 /* Checking if devices are blacklisted */
456 * Function: is_blacklisted
457 * Returns 1 if the given devicenumber can be found in the blacklist, otherwise 0.
461 is_blacklisted (int devno)
466 spin_lock_irqsave (&blacklist_lock, flags);
468 if (test_bit (devno, &bl_dev))
471 spin_unlock_irqrestore (&blacklist_lock, flags);
476 * Function: blacklist_free_all_ranges
477 * set all blacklisted devices free...
481 blacklist_free_all_ranges (void)
486 spin_lock_irqsave (&blacklist_lock, flags);
488 for (i = 0; i <= highest_ignored; i++)
489 clear_bit (i, &bl_dev);
493 spin_unlock_irqrestore (&blacklist_lock, flags);
496 #ifdef CONFIG_PROC_FS
498 * Function: blacklist_parse_proc_parameters
499 * parse the stuff which is piped to /proc/cio_ignore
502 blacklist_parse_proc_parameters (char *buf)
510 if (strstr (buf, "free ")) {
511 for (i = 0; i < 5; i++) {
514 if (strstr (buf, "all")) {
515 blacklist_free_all_ranges ();
516 s390_redo_validation ();
518 while (*buf != 0 && *buf != '\n') {
519 if (!isxdigit(*buf)) {
520 printk(KERN_WARNING "%s: error parsing "
521 "\"%s\"\n", __FUNCTION__, buf);
525 from = blacklist_strtoul (buf, &buf);
527 blacklist_strtoul (buf+1, &buf) : from;
529 blacklist_range_remove (from, to);
534 s390_redo_validation();
536 } else if (strstr (buf, "add ")) {
537 for (i = 0; i < 4; i++) {
540 while (*buf != 0 && *buf != '\n') {
541 if (!isxdigit(*buf)) {
542 printk(KERN_WARNING "%s: error parsing "
543 "\"%s\"\n", __FUNCTION__, buf);
547 from = blacklist_strtoul (buf, &buf);
549 blacklist_strtoul (buf+1, &buf) : from;
551 spin_lock_irqsave (&blacklist_lock, flags);
554 * Don't allow for already known devices to be
556 * The criterion is a bit dumb, devices which once were
557 * there but are already gone are also caught...
561 for (i = 0; i <= highest_subchannel; i++) {
562 if (ioinfo[i] != INVALID_STORAGE_AREA) {
564 if ((ioinfo[i]->schib.pmcw.dev >= from)
565 && (ioinfo[i]->schib.pmcw.dev <=
568 "cio_ignore: Won't blacklist "
569 "already known devices, "
570 "skipping range %x to %x\n",
579 blacklist_range_add (from, to, 1);
581 spin_unlock_irqrestore (&blacklist_lock, flags);
588 "cio_ignore: Parse error; "
589 "try using 'free all|<devno-range>,<devno-range>,...'\n");
591 "or 'add <devno-range>,<devno-range>,...'\n");
595 /* End of blacklist handling */
597 void s390_displayhex (char *str, void *ptr, s32 cnt);
600 s390_displayhex (char *str, void *ptr, s32 cnt)
602 s32 cnt1, cnt2, maxcnt2;
603 u32 *currptr = (__u32 *) ptr;
605 printk ("\n%s\n", str);
607 for (cnt1 = 0; cnt1 < cnt; cnt1 += 16) {
608 printk ("%08lX ", (unsigned long) currptr);
609 maxcnt2 = cnt - cnt1;
612 for (cnt2 = 0; cnt2 < maxcnt2; cnt2 += 4)
613 printk ("%08X ", *currptr++);
619 cio_setup (char *parm)
621 if (!strcmp (parm, "yes")) {
623 } else if (!strcmp (parm, "no")) {
626 printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'",
634 __setup ("cio_msg=", cio_setup);
637 cio_notoper_setup (char *parm)
639 if (!strcmp (parm, "yes")) {
641 } else if (!strcmp (parm, "no")) {
645 "cio_notoper_setup: "
646 "invalid cio_notoper_msg parameter '%s'", parm);
652 __setup ("cio_notoper_msg=", cio_notoper_setup);
654 #ifdef CONFIG_PROC_FS
656 cio_proc_devinfo_setup (char *parm)
658 if (!strcmp (parm, "yes")) {
659 cio_proc_devinfo = 1;
660 } else if (!strcmp (parm, "no")) {
661 cio_proc_devinfo = 0;
664 "cio_proc_devinfo_setup: invalid parameter '%s'\n",
671 __setup ("cio_proc_devinfo=", cio_proc_devinfo_setup);
675 cio_pgid_setup (char *parm)
677 if (!strcmp (parm, "yes")) {
678 cio_sid_with_pgid = 1;
679 } else if (!strcmp (parm, "no")) {
680 cio_sid_with_pgid = 0;
683 "cio_pgid_setup : invalid cio_msg parameter '%s'",
691 __setup ("cio_sid_with_pgid=", cio_pgid_setup);
694 * register for adapter interrupts
696 * With HiperSockets the zSeries architecture provides for
697 * means of adapter interrups, pseudo I/O interrupts that are
698 * not tied to an I/O subchannel, but to an adapter. However,
699 * it doesn't disclose the info how to enable/disable them, but
700 * to recognize them only. Perhaps we should consider them
701 * being shared interrupts, and thus build a linked list
702 * of adapter handlers ... to be evaluated ...
705 s390_register_adapter_interrupt (adapter_int_handler_t handler)
710 CIO_TRACE_EVENT (4, "rgaint");
712 spin_lock (&adapter_lock);
716 else if (adapter_handler)
719 adapter_handler = handler;
721 spin_unlock (&adapter_lock);
723 sprintf (dbf_txt, "ret:%d", ret);
724 CIO_TRACE_EVENT (4, dbf_txt);
730 s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
735 CIO_TRACE_EVENT (4, "urgaint");
737 spin_lock (&adapter_lock);
741 else if (handler != adapter_handler)
744 adapter_handler = NULL;
746 spin_unlock (&adapter_lock);
748 sprintf (dbf_txt, "ret:%d", ret);
749 CIO_TRACE_EVENT (4, dbf_txt);
755 do_adapter_IO (__u32 intparm)
757 CIO_TRACE_EVENT (4, "doaio");
759 spin_lock (&adapter_lock);
762 (*adapter_handler) (intparm);
764 spin_unlock (&adapter_lock);
769 void s390_free_irq (unsigned int irq, void *dev_id);
772 * Note : internal use of irqflags SA_PROBE for NOT path grouping
776 s390_request_irq_special (int irq,
777 io_handler_func_t io_handler,
778 not_oper_handler_func_t not_oper_handler,
779 unsigned long irqflags,
780 const char *devname, void *dev_id)
787 if (irq >= __MAX_SUBCHANNELS)
790 if (!io_handler || !dev_id)
793 if (ioinfo[irq] == INVALID_STORAGE_AREA)
799 sprintf (dbf_txt, "reqsp%x", irq);
800 CIO_TRACE_EVENT (4, dbf_txt);
803 * The following block of code has to be executed atomically
805 s390irq_spin_lock_irqsave (irq, flags);
807 if (ioinfo[irq]->ui.flags.unfriendly &&
808 !(irqflags & SA_FORCE)) {
811 } else if (!ioinfo[irq]->ui.flags.ready) {
814 ioinfo[irq]->irq_desc.handler = io_handler;
815 ioinfo[irq]->irq_desc.name = devname;
816 ioinfo[irq]->irq_desc.dev_id = dev_id;
817 ioinfo[irq]->ui.flags.ready = 1;
820 retval = enable_subchannel (irq);
822 ioinfo[irq]->ui.flags.ready = 0;
826 stsch (irq, &ioinfo[irq]->schib);
827 if (ioinfo[irq]->schib.pmcw.ena)
835 * interrupt already owned, and shared interrupts
836 * aren't supported on S/390.
842 s390irq_spin_unlock_irqrestore (irq, flags);
845 if (irqflags & SA_DOPATHGROUP) {
846 ioinfo[irq]->ui.flags.pgid_supp = 1;
847 ioinfo[irq]->ui.flags.notacccap = 1;
849 if ((irqflags & SA_DOPATHGROUP) &&
850 (!ioinfo[irq]->ui.flags.pgid ||
851 irqflags & SA_PROBE)) {
855 * Do an initial SensePGID to find out if device
856 * is locked by someone else.
858 memcpy(&pgid, global_pgid, sizeof(pgid_t));
861 for (i=0; i<8 && retval==-EAGAIN; i++) {
863 mask = (0x80 >> i) & ioinfo[irq]->opm;
868 retval = s390_SensePGID(irq, mask, &pgid);
870 if (retval == -EOPNOTSUPP)
871 /* Doesn't prevent us from proceeding */
876 if (!(irqflags & SA_PROBE) &&
877 (irqflags & SA_DOPATHGROUP) &&
878 (!ioinfo[irq]->ui.flags.unfriendly))
879 s390_DevicePathVerification (irq, 0);
881 if (ioinfo[irq]->ui.flags.unfriendly &&
882 !(irqflags & SA_FORCE)) {
884 * We found out during path verification that the
885 * device is locked by someone else and we have to
886 * let the device driver know.
889 free_irq(irq, dev_id);
891 ioinfo[irq]->ui.flags.newreq = 1;
892 ioinfo[irq]->nopfunc = not_oper_handler;
896 if (cio_debug_initialized)
897 debug_int_event (cio_debug_trace_id, 4, retval);
903 s390_request_irq (unsigned int irq,
904 void (*handler) (int, void *, struct pt_regs *),
905 unsigned long irqflags, const char *devname, void *dev_id)
909 ret = s390_request_irq_special (irq,
910 (io_handler_func_t) handler,
911 NULL, irqflags, devname, dev_id);
914 ioinfo[irq]->ui.flags.newreq = 0;
921 s390_free_irq (unsigned int irq, void *dev_id)
928 if (irq >= __MAX_SUBCHANNELS || ioinfo[irq] == INVALID_STORAGE_AREA)
934 sprintf (dbf_txt, "free%x", irq);
935 CIO_TRACE_EVENT (2, dbf_txt);
937 s390irq_spin_lock_irqsave (irq, flags);
939 #ifdef CONFIG_KERNEL_DEBUG
941 printk (KERN_DEBUG "Trying to free IRQ%d\n", irq);
943 CIO_MSG_EVENT(2, "Trying to free IRQ %d\n", irq);
946 * disable the device and reset all IRQ info if
947 * the IRQ is actually owned by the handler ...
949 if (ioinfo[irq]->ui.flags.ready) {
950 if (dev_id == ioinfo[irq]->irq_desc.dev_id) {
951 /* start deregister */
952 ioinfo[irq]->ui.flags.unready = 1;
954 ret = disable_subchannel (irq);
960 * We try to terminate the I/O by halt_IO first,
962 * Because the device may be gone (machine
963 * check handling), we can't use sync I/O.
966 halt_IO (irq, 0xC8C1D3E3, 0);
967 s390irq_spin_unlock_irqrestore (irq, flags);
968 udelay (200000); /* 200 ms */
969 s390irq_spin_lock_irqsave (irq, flags);
971 ret = disable_subchannel (irq);
975 clear_IO (irq, 0x40C3D3D9, 0);
976 s390irq_spin_unlock_irqrestore (irq,
978 udelay (1000000); /* 1000 ms */
979 s390irq_spin_lock_irqsave (irq, flags);
981 /* give it a very last try ... */
982 disable_subchannel (irq);
984 if (ioinfo[irq]->ui.flags.busy) {
987 "- device %04X busy, retry "
988 "count exceeded\n", irq,
989 ioinfo[irq]->devstat.
994 "retry count exceeded\n",
1003 ioinfo[irq]->ui.flags.ready = 0;
1004 ioinfo[irq]->ui.flags.unready = 0; /* deregister ended */
1006 ioinfo[irq]->nopfunc = NULL;
1008 s390irq_spin_unlock_irqrestore (irq, flags);
1010 s390irq_spin_unlock_irqrestore (irq, flags);
1012 printk (KERN_ERR "free_irq(%04X) : error, "
1013 "dev_id does not match !\n", irq);
1015 "free_irq(%04X) : error, "
1016 "dev_id does not match !\n",
1021 s390irq_spin_unlock_irqrestore (irq, flags);
1023 printk (KERN_ERR "free_irq(%04X) : error, "
1024 "no action block ... !\n", irq);
1026 "free_irq(%04X) : error, "
1027 "no action block ... !\n", irq);
1033 * Enable IRQ by modifying the subchannel
1036 enable_subchannel (unsigned int irq)
1045 sprintf (dbf_txt, "ensch%x", irq);
1046 CIO_TRACE_EVENT (2, dbf_txt);
1049 * If a previous disable request is pending we reset it. However, this
1050 * status implies that the device may (still) be not-operational.
1052 if (ioinfo[irq]->ui.flags.d_disable) {
1053 ioinfo[irq]->ui.flags.d_disable = 0;
1056 ccode = stsch (irq, &(ioinfo[irq]->schib));
1061 ioinfo[irq]->schib.pmcw.ena = 1;
1063 if (irq == cons_dev) {
1064 ioinfo[irq]->schib.pmcw.isc = 7;
1066 ioinfo[irq]->schib.pmcw.isc = 3;
1071 ccode = msch (irq, &(ioinfo[irq]->schib));
1079 case 1: /* status pending */
1081 ioinfo[irq]->ui.flags.s_pend = 1;
1082 s390_process_IRQ (irq);
1083 ioinfo[irq]->ui.flags.s_pend = 0;
1087 * might be overwritten on re-driving
1094 udelay (100); /* allow for recovery */
1099 case 3: /* not oper */
1100 ioinfo[irq]->ui.flags.oper = 0;
1111 sprintf (dbf_txt, "ret:%d", ret);
1112 CIO_TRACE_EVENT (2, dbf_txt);
1118 * Disable IRQ by modifying the subchannel
1121 disable_subchannel (unsigned int irq)
1123 int cc; /* condition code */
1124 int ret = 0; /* function return value */
1130 sprintf (dbf_txt, "dissch%x", irq);
1131 CIO_TRACE_EVENT (2, dbf_txt);
1133 if (ioinfo[irq]->ui.flags.busy) {
1135 * the disable function must not be called while there are
1136 * requests pending for completion !
1142 * If device isn't operational we have to perform delayed
1143 * disabling when the next interrupt occurs - unless the
1144 * irq is re-requested prior to the interrupt to occur.
1146 cc = stsch (irq, &(ioinfo[irq]->schib));
1149 ioinfo[irq]->ui.flags.oper = 0;
1150 ioinfo[irq]->ui.flags.d_disable = 1;
1153 } else { /* cc == 0 */
1155 ioinfo[irq]->schib.pmcw.ena = 0;
1158 cc = msch (irq, &(ioinfo[irq]->schib));
1166 case 1: /* status pending */
1167 ioinfo[irq]->ui.flags.s_pend = 1;
1168 s390_process_IRQ (irq);
1169 ioinfo[irq]->ui.flags.s_pend = 0;
1173 * might be overwritten on re-driving
1179 case 2: /* busy; this should not happen! */
1181 "disable_subchannel(%04X) "
1182 "- unexpected busy condition for "
1183 "device %04X received !\n", irq,
1184 ioinfo[irq]->devstat.devno);
1186 "disable_subchannel(%04X) "
1187 "- unexpected busy condition "
1188 "for device %04X received !\n",
1190 ioinfo[irq]->devstat.
1196 case 3: /* not oper */
1198 * should hardly occur ?!
1200 ioinfo[irq]->ui.flags.oper = 0;
1201 ioinfo[irq]->ui.flags.d_disable = 1;
1206 * if the device has gone, we don't need
1207 * to disable it anymore !
1218 sprintf (dbf_txt, "ret:%d", ret);
1219 CIO_TRACE_EVENT (2, dbf_txt);
1225 s390_init_IRQ (void)
1227 unsigned long flags; /* PSW flags */
1228 long cr6 __attribute__ ((aligned (8)));
1231 asm volatile ("STCK %0":"=m" (irq_IPL_TOD));
1233 p_init_schib = alloc_bootmem_low (sizeof (schib_t));
1234 p_init_irb = alloc_bootmem_low (sizeof (irb_t));
1237 * As we don't know about the calling environment
1238 * we assure running disabled. Before leaving the
1239 * function we resestablish the old environment.
1241 * Note : as we don't need a system wide lock, therefore
1242 * we shouldn't use cli(), but __cli() as this
1243 * affects the current CPU only.
1245 __save_flags (flags);
1249 * disable all interrupts
1252 __ctl_load (cr6, 6, 6);
1254 s390_process_subchannels ();
1256 if (cio_count_irqs) {
1258 for (i = 0; i < NR_CPUS; i++)
1259 s390_irq_count[i] = 0;
1264 * Let's build our path group ID here.
1267 global_pgid = (pgid_t *)alloc_bootmem(sizeof(pgid_t));
1269 cpuid = *(cpuid_t*) __LC_CPUID;
1271 if (MACHINE_NEW_STIDP)
1272 global_pgid->cpu_addr = 0x8000;
1275 global_pgid->cpu_addr = hard_smp_processor_id();
1277 global_pgid->cpu_addr = 0;
1280 global_pgid->cpu_id = cpuid.ident;
1281 global_pgid->cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
1282 global_pgid->tod_high = *(__u32 *) & irq_IPL_TOD;
1286 * enable default I/O-interrupt sublass 3
1289 __ctl_load (cr6, 6, 6);
1291 s390_device_recognition_all ();
1293 init_IRQ_complete = 1;
1295 __restore_flags (flags);
1301 * dummy handler, used during init_IRQ() processing for compatibility only
1304 init_IRQ_handler (int irq, void *dev_id, struct pt_regs *regs)
1306 /* this is a dummy handler only ... */
1310 s390_start_IO (int irq, /* IRQ */
1311 ccw1_t * cpa, /* logical channel prog addr */
1312 unsigned long user_intparm, /* interruption parameter */
1313 __u8 lpm, /* logical path mask */
1323 * The flag usage is mutal exclusive ...
1325 if ((flag & DOIO_EARLY_NOTIFICATION)
1326 && (flag & DOIO_REPORT_ALL)) {
1331 sprintf (dbf_txt, "stIO%x", irq);
1332 CIO_TRACE_EVENT (4, dbf_txt);
1337 ioinfo[irq]->orb.intparm = (__u32) (long) &ioinfo[irq]->u_intparm;
1338 ioinfo[irq]->orb.fmt = 1;
1340 ioinfo[irq]->orb.pfch = !(flag & DOIO_DENY_PREFETCH);
1341 ioinfo[irq]->orb.spnd = (flag & DOIO_ALLOW_SUSPEND ? TRUE : FALSE);
1342 ioinfo[irq]->orb.ssic = ((flag & DOIO_ALLOW_SUSPEND)
1343 && (flag & DOIO_SUPPRESS_INTER));
1345 if (flag & DOIO_VALID_LPM) {
1346 ioinfo[irq]->orb.lpm = lpm;
1348 ioinfo[irq]->orb.lpm = ioinfo[irq]->opm;
1352 #ifdef CONFIG_ARCH_S390X
1354 * for 64 bit we always support 64 bit IDAWs with 4k page size only
1356 ioinfo[irq]->orb.c64 = 1;
1357 ioinfo[irq]->orb.i2k = 0;
1360 ioinfo[irq]->orb.cpa = (__u32) virt_to_phys (cpa);
1363 * If sync processing was requested we lock the sync ISC, modify the
1364 * device to present interrupts for this ISC only and switch the
1365 * CPU to handle this ISC + the console ISC exclusively.
1367 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1368 ret = enable_cpu_sync_isc (irq);
1376 if (flag & DOIO_DONT_CALL_INTHDLR) {
1377 ioinfo[irq]->ui.flags.repnone = 1;
1382 * Issue "Start subchannel" and process condition code
1384 if (flag & DOIO_USE_DIAG98) {
1385 ioinfo[irq]->orb.key = get_storage_key() >> 4;
1386 ioinfo[irq]->orb.cpa =
1387 (__u32) pfix_get_addr((void *)ioinfo[irq]->orb.cpa);
1388 ccode = diag98 (irq, &(ioinfo[irq]->orb));
1390 ccode = ssch (irq, &(ioinfo[irq]->orb));
1393 sprintf (dbf_txt, "ccode:%d", ccode);
1394 CIO_TRACE_EVENT (4, dbf_txt);
1399 if (!ioinfo[irq]->ui.flags.w4sense) {
1401 * init the device driver specific devstat irb area
1403 * Note : don´t clear saved irb info in case of sense !
1405 memset (&((devstat_t *) ioinfo[irq]->irq_desc.dev_id)->
1406 ii.irb, '\0', sizeof (irb_t));
1409 memset (&ioinfo[irq]->devstat.ii.irb, '\0', sizeof (irb_t));
1412 * initialize device status information
1414 ioinfo[irq]->ui.flags.busy = 1;
1415 ioinfo[irq]->ui.flags.doio = 1;
1417 ioinfo[irq]->u_intparm = user_intparm;
1418 ioinfo[irq]->devstat.cstat = 0;
1419 ioinfo[irq]->devstat.dstat = 0;
1420 ioinfo[irq]->devstat.lpum = 0;
1421 ioinfo[irq]->devstat.flag = DEVSTAT_START_FUNCTION;
1422 ioinfo[irq]->devstat.scnt = 0;
1424 ioinfo[irq]->ui.flags.fast = 0;
1425 ioinfo[irq]->ui.flags.repall = 0;
1428 * Check for either early (FAST) notification requests
1429 * or if we are to return all interrupt info.
1430 * Default is to call IRQ handler at secondary status only
1432 if (flag & DOIO_EARLY_NOTIFICATION) {
1433 ioinfo[irq]->ui.flags.fast = 1;
1434 } else if (flag & DOIO_REPORT_ALL) {
1435 ioinfo[irq]->ui.flags.repall = 1;
1440 * If synchronous I/O processing is requested, we have
1441 * to wait for the corresponding interrupt to occur by
1442 * polling the interrupt condition. However, as multiple
1443 * interrupts may be outstanding, we must not just wait
1444 * for the first interrupt, but must poll until ours
1447 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1448 unsigned long psw_mask;
1450 uint64_t time_start;
1458 * We shouldn't perform a TPI loop, waiting for an
1459 * interrupt to occur, but should load a WAIT PSW
1460 * instead. Otherwise we may keep the channel subsystem
1461 * busy, not able to present the interrupt. When our
1462 * sync. interrupt arrived we reset the I/O old PSW to
1463 * its original value.
1469 case 0: /* primary-space */
1470 psw_mask = _IO_PSW_MASK
1471 | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
1473 case 1: /* secondary-space */
1474 psw_mask = _IO_PSW_MASK
1475 | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
1477 case 2: /* access-register */
1478 psw_mask = _IO_PSW_MASK
1479 | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
1481 case 3: /* home-space */
1482 psw_mask = _IO_PSW_MASK
1483 | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
1486 panic ("start_IO() : unexpected "
1487 "address-space-control %d\n", ccode);
1492 * Martin didn't like modifying the new PSW, now we take
1493 * a fast exit in do_IRQ() instead
1495 *(__u32 *) __LC_SYNC_IO_WORD = 1;
1497 asm volatile ("STCK %0":"=m" (time_start));
1499 time_start = time_start >> 32;
1502 if (flag & DOIO_TIMEOUT) {
1503 tpi_info_t tpi_info = { 0, };
1506 if (tpi (&tpi_info) == 1) {
1507 io_sub = tpi_info.irq;
1510 udelay (100); /* usecs */
1515 if (((time_curr >> 32) -
1523 __load_psw_mask (psw_mask);
1527 (__u16 *) __LC_SUBCHANNEL_NR;
1532 ready = s390_process_IRQ (io_sub);
1535 * surrender when retry count's exceeded ...
1537 } while (!((io_sub == irq)
1541 *(__u32 *) __LC_SYNC_IO_WORD = 0;
1550 case 1: /* status pending */
1553 * Don't do an inline processing of pending interrupt conditions
1554 * while doing async. I/O. The interrupt will pop up when we are
1555 * enabled again and the I/O can be retried.
1557 if (!ioinfo[irq]->ui.flags.syncio) {
1562 ioinfo[irq]->devstat.flag = DEVSTAT_START_FUNCTION
1563 | DEVSTAT_STATUS_PENDING;
1566 * initialize the device driver specific devstat irb area
1568 memset (&((devstat_t *) ioinfo[irq]->irq_desc.dev_id)->ii.irb,
1569 '\0', sizeof (irb_t));
1572 * Let the common interrupt handler process the pending status.
1573 * However, we must avoid calling the user action handler, as
1574 * it won't be prepared to handle a pending status during
1575 * do_IO() processing inline. This also implies that process_IRQ
1576 * must terminate synchronously - especially if device sensing
1579 ioinfo[irq]->ui.flags.s_pend = 1;
1580 ioinfo[irq]->ui.flags.busy = 1;
1581 ioinfo[irq]->ui.flags.doio = 1;
1583 s390_process_IRQ (irq);
1585 ioinfo[irq]->ui.flags.s_pend = 0;
1586 ioinfo[irq]->ui.flags.busy = 0;
1587 ioinfo[irq]->ui.flags.doio = 0;
1589 ioinfo[irq]->ui.flags.repall = 0;
1590 ioinfo[irq]->ui.flags.w4final = 0;
1592 ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
1595 * In multipath mode a condition code 3 implies the last path
1596 * has gone, except we have previously restricted the I/O to
1597 * a particular path. A condition code 1 (0 won't occur)
1598 * results in return code EIO as well as 3 with another path
1599 * than the one used (i.e. path available mask is non-zero).
1601 if (ioinfo[irq]->devstat.ii.irb.scsw.cc == 3) {
1603 if (ioinfo[irq]->opm == 0) {
1605 ioinfo[irq]->ui.flags.oper = 0;
1611 ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
1613 #ifdef CONFIG_DEBUG_IO
1617 stsch (irq, &(ioinfo[irq]->schib));
1620 "s390_start_IO(%04X) - irb for "
1621 "device %04X, after status pending\n",
1622 irq, ioinfo[irq]->devstat.devno);
1624 s390_displayhex (buffer,
1625 &(ioinfo[irq]->devstat.ii.irb),
1629 "s390_start_IO(%04X) - schib for "
1630 "device %04X, after status pending\n",
1631 irq, ioinfo[irq]->devstat.devno);
1633 s390_displayhex (buffer,
1634 &(ioinfo[irq]->schib),
1637 if (ioinfo[irq]->devstat.
1638 flag & DEVSTAT_FLAG_SENSE_AVAIL) {
1640 "s390_start_IO(%04X) "
1641 "- sense data for device %04X,"
1642 " after status pending\n",
1644 ioinfo[irq]->devstat.devno);
1646 s390_displayhex (buffer,
1647 ioinfo[irq]->irq_desc.
1648 dev_id->ii.sense.data,
1649 ioinfo[irq]->irq_desc.
1655 if (cio_debug_initialized) {
1656 stsch (irq, &(ioinfo[irq]->schib));
1658 sprintf(dbf_txt, "sp%x", irq);
1659 CIO_TRACE_EVENT(2, dbf_txt);
1660 CIO_TRACE_EVENT(2, "irb:");
1661 CIO_HEX_EVENT(2, &(ioinfo[irq]->devstat.ii.irb),
1663 CIO_TRACE_EVENT(2, "schib:");
1664 CIO_HEX_EVENT(2, &(ioinfo[irq]->schib),
1667 if (ioinfo[irq]->devstat.
1668 flag & DEVSTAT_FLAG_SENSE_AVAIL) {
1669 CIO_TRACE_EVENT(2, "sense:");
1670 CIO_HEX_EVENT(2, ioinfo[irq]->irq_desc.
1671 dev_id->ii.sense.data,
1672 ioinfo[irq]->irq_desc.
1679 ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER;
1680 ioinfo[irq]->ui.flags.oper = 1;
1691 default: /* device/path not operational */
1693 if (flag & DOIO_VALID_LPM) {
1694 ioinfo[irq]->opm &= ~lpm;
1696 ioinfo[irq]->opm = 0;
1700 if (ioinfo[irq]->opm == 0) {
1701 ioinfo[irq]->ui.flags.oper = 0;
1702 ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
1708 memcpy (ioinfo[irq]->irq_desc.dev_id,
1709 &(ioinfo[irq]->devstat), sizeof (devstat_t));
1711 #ifdef CONFIG_DEBUG_IO
1713 stsch (irq, &(ioinfo[irq]->schib));
1715 sprintf (buffer, "s390_start_IO(%04X) - schib for "
1716 "device %04X, after 'not oper' status\n",
1717 irq, ioinfo[irq]->devstat.devno);
1719 s390_displayhex (buffer,
1720 &(ioinfo[irq]->schib), sizeof (schib_t));
1722 if (cio_debug_initialized) {
1723 stsch (irq, &(ioinfo[irq]->schib));
1724 sprintf(dbf_txt, "no%x", irq);
1725 CIO_TRACE_EVENT(2, dbf_txt);
1726 CIO_HEX_EVENT(2, &(ioinfo[irq]->schib),
1734 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1735 disable_cpu_sync_isc (irq);
1739 if (flag & DOIO_DONT_CALL_INTHDLR) {
1740 ioinfo[irq]->ui.flags.repnone = 0;
1748 do_IO (int irq, /* IRQ */
1749 ccw1_t * cpa, /* channel program address */
1750 unsigned long user_intparm, /* interruption parameter */
1751 __u8 lpm, /* logical path mask */
1753 { /* flags : see above */
1759 /* handler registered ? or free_irq() in process already ? */
1760 if (!ioinfo[irq]->ui.flags.ready || ioinfo[irq]->ui.flags.unready) {
1765 sprintf (dbf_txt, "doIO%x", irq);
1766 CIO_TRACE_EVENT (4, dbf_txt);
1768 if (ioinfo[irq]->ui.flags.noio)
1772 * Note: We ignore the device operational status - if not operational,
1773 * the SSCH will lead to an -ENODEV condition ...
1775 if (!ioinfo[irq]->ui.flags.busy) { /* last I/O completed ? */
1776 ret = s390_start_IO (irq, cpa, user_intparm, lpm, flag);
1787 * resume suspended I/O operation
1797 sprintf (dbf_txt, "resIO%x", irq);
1798 CIO_TRACE_EVENT (4, dbf_txt);
1801 * We allow for 'resume' requests only for active I/O operations
1803 if (ioinfo[irq]->ui.flags.busy) {
1808 sprintf (dbf_txt, "ccode:%d", ccode);
1809 CIO_TRACE_EVENT (4, dbf_txt);
1825 * useless to wait for request completion
1826 * as device is no longer operational !
1828 ioinfo[irq]->ui.flags.oper = 0;
1829 ioinfo[irq]->ui.flags.busy = 0;
1844 * Note: The "intparm" parameter is not used by the halt_IO() function
1845 * itself, as no ORB is built for the HSCH instruction. However,
1846 * it allows the device interrupt handler to associate the upcoming
1847 * interrupt with the halt_IO() request.
1850 halt_IO (int irq, unsigned long user_intparm, unsigned long flag)
1851 { /* possible DOIO_WAIT_FOR_INTERRUPT */
1858 if (ioinfo[irq]->ui.flags.noio)
1862 * we only allow for halt_IO if the device has an I/O handler associated
1864 if (!ioinfo[irq]->ui.flags.ready) {
1868 * we ignore the halt_io() request if ending_status was received but
1869 * a SENSE operation is waiting for completion.
1871 if (ioinfo[irq]->ui.flags.w4sense) {
1874 sprintf (dbf_txt, "haltIO%x", irq);
1875 CIO_TRACE_EVENT (2, dbf_txt);
1878 * If sync processing was requested we lock the sync ISC,
1879 * modify the device to present interrupts for this ISC only
1880 * and switch the CPU to handle this ISC + the console ISC
1883 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1884 ret = enable_cpu_sync_isc (irq);
1891 * Issue "Halt subchannel" and process condition code
1895 sprintf (dbf_txt, "ccode:%d", ccode);
1896 CIO_TRACE_EVENT (2, dbf_txt);
1901 ioinfo[irq]->ui.flags.haltio = 1;
1903 if (!ioinfo[irq]->ui.flags.doio) {
1904 ioinfo[irq]->ui.flags.busy = 1;
1905 ioinfo[irq]->u_intparm = user_intparm;
1906 ioinfo[irq]->devstat.cstat = 0;
1907 ioinfo[irq]->devstat.dstat = 0;
1908 ioinfo[irq]->devstat.lpum = 0;
1909 ioinfo[irq]->devstat.flag = DEVSTAT_HALT_FUNCTION;
1910 ioinfo[irq]->devstat.scnt = 0;
1913 ioinfo[irq]->devstat.flag |= DEVSTAT_HALT_FUNCTION;
1918 * If synchronous I/O processing is requested, we have
1919 * to wait for the corresponding interrupt to occur by
1920 * polling the interrupt condition. However, as multiple
1921 * interrupts may be outstanding, we must not just wait
1922 * for the first interrupt, but must poll until ours
1925 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
1928 unsigned long psw_mask;
1934 * We shouldn't perform a TPI loop, waiting for
1935 * an interrupt to occur, but should load a
1936 * WAIT PSW instead. Otherwise we may keep the
1937 * channel subsystem busy, not able to present
1938 * the interrupt. When our sync. interrupt
1939 * arrived we reset the I/O old PSW to its
1946 case 0: /* primary-space */
1947 psw_mask = _IO_PSW_MASK
1948 | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
1950 case 1: /* secondary-space */
1951 psw_mask = _IO_PSW_MASK
1952 | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
1954 case 2: /* access-register */
1955 psw_mask = _IO_PSW_MASK
1956 | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
1958 case 3: /* home-space */
1959 psw_mask = _IO_PSW_MASK
1960 | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
1963 panic ("halt_IO() : unexpected "
1964 "address-space-control %d\n", ccode);
1969 * Martin didn't like modifying the new PSW, now we take
1970 * a fast exit in do_IRQ() instead
1972 *(__u32 *) __LC_SYNC_IO_WORD = 1;
1975 __load_psw_mask (psw_mask);
1977 io_parm = *(__u32 *) __LC_IO_INT_PARM;
1978 io_sub = (__u32) * (__u16 *) __LC_SUBCHANNEL_NR;
1980 ready = s390_process_IRQ (io_sub);
1982 } while (!((io_sub == irq) && (ready == 1)));
1984 *(__u32 *) __LC_SYNC_IO_WORD = 0;
1991 case 1: /* status pending */
1994 * Don't do an inline processing of pending interrupt conditions
1995 * while doing async. I/O. The interrupt will pop up when we are
1996 * enabled again and the I/O can be retried.
1998 if (!ioinfo[irq]->ui.flags.syncio) {
2003 ioinfo[irq]->devstat.flag |= DEVSTAT_STATUS_PENDING;
2006 * initialize the device driver specific devstat irb area
2008 memset (&ioinfo[irq]->irq_desc.dev_id->ii.irb,
2009 '\0', sizeof (irb_t));
2012 * Let the common interrupt handler process the pending
2013 * status. However, we must avoid calling the user
2014 * action handler, as it won't be prepared to handle
2015 * a pending status during do_IO() processing inline.
2016 * This also implies that s390_process_IRQ must
2017 * terminate synchronously - especially if device
2018 * sensing is required.
2020 ioinfo[irq]->ui.flags.s_pend = 1;
2021 ioinfo[irq]->ui.flags.busy = 1;
2022 ioinfo[irq]->ui.flags.doio = 1;
2024 s390_process_IRQ (irq);
2026 ioinfo[irq]->ui.flags.s_pend = 0;
2027 ioinfo[irq]->ui.flags.busy = 0;
2028 ioinfo[irq]->ui.flags.doio = 0;
2029 ioinfo[irq]->ui.flags.repall = 0;
2030 ioinfo[irq]->ui.flags.w4final = 0;
2032 ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
2035 * In multipath mode a condition code 3 implies the last
2036 * path has gone, except we have previously restricted
2037 * the I/O to a particular path. A condition code 1
2038 * (0 won't occur) results in return code EIO as well
2039 * as 3 with another path than the one used (i.e. path
2040 * available mask is non-zero).
2042 if (ioinfo[irq]->devstat.ii.irb.scsw.cc == 3) {
2044 ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
2045 ioinfo[irq]->ui.flags.oper = 0;
2048 ioinfo[irq]->devstat.flag &= ~DEVSTAT_NOT_OPER;
2049 ioinfo[irq]->ui.flags.oper = 1;
2060 default: /* device not operational */
2067 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2068 disable_cpu_sync_isc (irq);
2076 * Note: The "intparm" parameter is not used by the clear_IO() function
2077 * itself, as no ORB is built for the CSCH instruction. However,
2078 * it allows the device interrupt handler to associate the upcoming
2079 * interrupt with the clear_IO() request.
2082 clear_IO (int irq, unsigned long user_intparm, unsigned long flag)
2083 { /* possible DOIO_WAIT_FOR_INTERRUPT */
2090 if (ioinfo[irq] == INVALID_STORAGE_AREA)
2093 if (ioinfo[irq]->ui.flags.noio)
2096 * we only allow for clear_IO if the device has an I/O handler associated
2098 if (!ioinfo[irq]->ui.flags.ready)
2101 * we ignore the clear_io() request if ending_status was received but
2102 * a SENSE operation is waiting for completion.
2104 if (ioinfo[irq]->ui.flags.w4sense)
2107 sprintf (dbf_txt, "clearIO%x", irq);
2108 CIO_TRACE_EVENT (2, dbf_txt);
2111 * If sync processing was requested we lock the sync ISC,
2112 * modify the device to present interrupts for this ISC only
2113 * and switch the CPU to handle this ISC + the console ISC
2116 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2117 ret = enable_cpu_sync_isc (irq);
2124 * Issue "Clear subchannel" and process condition code
2128 sprintf (dbf_txt, "ccode:%d", ccode);
2129 CIO_TRACE_EVENT (2, dbf_txt);
2134 ioinfo[irq]->ui.flags.haltio = 1;
2136 if (!ioinfo[irq]->ui.flags.doio) {
2137 ioinfo[irq]->ui.flags.busy = 1;
2138 ioinfo[irq]->u_intparm = user_intparm;
2139 ioinfo[irq]->devstat.cstat = 0;
2140 ioinfo[irq]->devstat.dstat = 0;
2141 ioinfo[irq]->devstat.lpum = 0;
2142 ioinfo[irq]->devstat.flag = DEVSTAT_CLEAR_FUNCTION;
2143 ioinfo[irq]->devstat.scnt = 0;
2146 ioinfo[irq]->devstat.flag |= DEVSTAT_CLEAR_FUNCTION;
2151 * If synchronous I/O processing is requested, we have
2152 * to wait for the corresponding interrupt to occur by
2153 * polling the interrupt condition. However, as multiple
2154 * interrupts may be outstanding, we must not just wait
2155 * for the first interrupt, but must poll until ours
2158 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2161 unsigned long psw_mask;
2167 * We shouldn't perform a TPI loop, waiting for
2168 * an interrupt to occur, but should load a
2169 * WAIT PSW instead. Otherwise we may keep the
2170 * channel subsystem busy, not able to present
2171 * the interrupt. When our sync. interrupt
2172 * arrived we reset the I/O old PSW to its
2179 case 0: /* primary-space */
2180 psw_mask = _IO_PSW_MASK
2181 | _PSW_PRIM_SPACE_MODE | _PSW_IO_WAIT;
2183 case 1: /* secondary-space */
2184 psw_mask = _IO_PSW_MASK
2185 | _PSW_SEC_SPACE_MODE | _PSW_IO_WAIT;
2187 case 2: /* access-register */
2188 psw_mask = _IO_PSW_MASK
2189 | _PSW_ACC_REG_MODE | _PSW_IO_WAIT;
2191 case 3: /* home-space */
2192 psw_mask = _IO_PSW_MASK
2193 | _PSW_HOME_SPACE_MODE | _PSW_IO_WAIT;
2196 panic ("clear_IO() : unexpected "
2197 "address-space-control %d\n", ccode);
2202 * Martin didn't like modifying the new PSW, now we take
2203 * a fast exit in do_IRQ() instead
2205 *(__u32 *) __LC_SYNC_IO_WORD = 1;
2208 __load_psw_mask (psw_mask);
2210 io_parm = *(__u32 *) __LC_IO_INT_PARM;
2211 io_sub = (__u32) * (__u16 *) __LC_SUBCHANNEL_NR;
2213 ready = s390_process_IRQ (io_sub);
2215 } while (!((io_sub == irq) && (ready == 1)));
2217 *(__u32 *) __LC_SYNC_IO_WORD = 0;
2224 case 1: /* no status pending for csh */
2228 case 2: /* no busy for csh */
2232 default: /* device not operational */
2239 if (flag & DOIO_WAIT_FOR_INTERRUPT) {
2240 disable_cpu_sync_isc (irq);
2248 * Function: cancel_IO
2249 * Issues a "Cancel Subchannel" on the specified subchannel
2250 * Note: We don't need any fancy intparms and flags here
2251 * since xsch is executed synchronously.
2252 * Only for common I/O internal use as for now.
2264 sprintf (dbf_txt, "cancelIO%x", irq);
2265 CIO_TRACE_EVENT (2, dbf_txt);
2269 sprintf (dbf_txt, "ccode:%d", ccode);
2270 CIO_TRACE_EVENT (2, dbf_txt);
2274 case 0: /* success */
2278 case 1: /* status pending */
2283 case 2: /* not applicable */
2287 default: /* not oper */
2295 * do_IRQ() handles all normal I/O device IRQ's (the special
2296 * SMP cross-CPU interrupts have their own specific
2301 do_IRQ (struct pt_regs regs)
2304 * Get interrupt info from lowcore
2306 volatile tpi_info_t *tpi_info = (tpi_info_t *) (__LC_SUBCHANNEL_ID);
2307 int cpu = smp_processor_id ();
2310 * take fast exit if CPU is in sync. I/O state
2312 * Note: we have to turn off the WAIT bit and re-disable
2313 * interrupts prior to return as this was the initial
2314 * entry condition to synchronous I/O.
2316 if (*(__u32 *) __LC_SYNC_IO_WORD) {
2317 regs.psw.mask &= ~(_PSW_WAIT_MASK_BIT | _PSW_IO_MASK_BIT);
2321 #ifdef CONFIG_FAST_IRQ
2323 #endif /* CONFIG_FAST_IRQ */
2326 * Non I/O-subchannel thin interrupts are processed differently
2328 if (tpi_info->adapter_IO == 1 &&
2329 tpi_info->int_type == IO_INTERRUPT_TYPE) {
2330 irq_enter (cpu, -1);
2331 do_adapter_IO (tpi_info->intparm);
2334 unsigned int irq = tpi_info->irq;
2339 * instead of boxing the device, we need to schedule device
2340 * recognition, the interrupt stays pending. We need to
2341 * dynamically allocate an ioinfo structure, etc..
2343 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
2344 return; /* this keeps the device boxed ... */
2347 if (ioinfo[irq]->st) {
2348 /* How can that be? */
2349 printk(KERN_WARNING "Received interrupt on "
2350 "non-IO subchannel %x!\n", irq);
2354 irq_enter (cpu, irq);
2355 s390irq_spin_lock (irq);
2356 s390_process_IRQ (irq);
2357 s390irq_spin_unlock (irq);
2358 irq_exit (cpu, irq);
2361 #ifdef CONFIG_FAST_IRQ
2364 * Are more interrupts pending?
2365 * If so, the tpi instruction will update the lowcore
2366 * to hold the info for the next interrupt.
2368 } while (tpi (NULL) != 0);
2370 #endif /* CONFIG_FAST_IRQ */
2376 * s390_process_IRQ() handles status pending situations and interrupts
2378 * Called by : do_IRQ() - for "real" interrupts
2379 * s390_start_IO, halt_IO()
2380 * - status pending cond. after SSCH, or HSCH
2381 * disable_subchannel() - status pending conditions (after MSCH)
2383 * Returns: 0 - no ending status received, no further action taken
2384 * 1 - interrupt handler was called with ending status
2387 s390_process_IRQ (unsigned int irq)
2389 int ccode; /* cond code from tsch() operation */
2390 int irb_cc; /* cond code from irb */
2391 int sdevstat; /* struct devstat size to copy */
2392 unsigned int fctl; /* function control */
2393 unsigned int stctl; /* status control */
2394 unsigned int actl; /* activity control */
2397 int ending_status = 0;
2398 int allow4handler = 1;
2406 if (cio_count_irqs) {
2407 int cpu = smp_processor_id ();
2408 s390_irq_count[cpu]++;
2411 CIO_TRACE_EVENT (3, "procIRQ");
2412 sprintf (dbf_txt, "%x", irq);
2413 CIO_TRACE_EVENT (3, dbf_txt);
2415 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
2416 /* we can't properly process the interrupt ... */
2417 #ifdef CONFIG_DEBUG_IO
2418 printk (KERN_CRIT "s390_process_IRQ(%04X) - got interrupt "
2419 "for non-initialized subchannel!\n", irq);
2420 #endif /* CONFIG_DEBUG_IO */
2422 "s390_process_IRQ(%04X) - got interrupt "
2423 "for non-initialized subchannel!\n",
2425 tsch (irq, p_init_irb);
2430 if (ioinfo[irq]->st) {
2436 dp = &ioinfo[irq]->devstat;
2437 udp = ioinfo[irq]->irq_desc.dev_id;
2440 * It might be possible that a device was not-oper. at the time
2441 * of free_irq() processing. This means the handler is no longer
2442 * available when the device possibly becomes ready again. In
2443 * this case we perform delayed disable_subchannel() processing.
2445 if (!ioinfo[irq]->ui.flags.ready) {
2446 if (!ioinfo[irq]->ui.flags.d_disable) {
2447 #ifdef CONFIG_DEBUG_IO
2448 printk (KERN_CRIT "s390_process_IRQ(%04X) "
2449 "- no interrupt handler registered "
2450 "for device %04X !\n",
2451 irq, ioinfo[irq]->devstat.devno);
2452 #endif /* CONFIG_DEBUG_IO */
2454 "s390_process_IRQ(%04X) "
2455 "- no interrupt handler "
2456 "registered for device "
2459 ioinfo[irq]->devstat.devno);
2464 * retrieve the i/o interrupt information (irb),
2465 * update the device specific status information
2466 * and possibly call the interrupt handler.
2468 * Note 1: At this time we don't process the resulting
2469 * condition code (ccode) from tsch(), although
2470 * we probably should.
2472 * Note 2: Here we will have to check for channel
2473 * check conditions and call a channel check
2476 * Note 3: If a start function was issued, the interruption
2477 * parameter relates to it. If a halt function was
2478 * issued for an idle device, the intparm must not
2479 * be taken from lowcore, but from the devstat area.
2481 ccode = tsch (irq, &(dp->ii.irb));
2483 sprintf (dbf_txt, "ccode:%d", ccode);
2484 CIO_TRACE_EVENT (3, dbf_txt);
2487 #ifdef CONFIG_DEBUG_IO
2488 printk (KERN_INFO "s390_process_IRQ(%04X) - no status "
2489 "pending...\n", irq);
2490 #endif /* CONFIG_DEBUG_IO */
2492 "s390_process_IRQ(%04X) - no status pending\n",
2494 } else if (ccode == 3) {
2495 #ifdef CONFIG_DEBUG_IO
2496 printk (KERN_WARNING "s390_process_IRQ(%04X) - subchannel "
2497 "is not operational!\n",
2499 #endif /* CONFIG_DEBUG_IO */
2501 "s390_process_IRQ(%04X) - subchannel "
2502 "is not operational!\n",
2507 * We must only accumulate the status if the device is busy already
2509 if (ioinfo[irq]->ui.flags.busy) {
2510 dp->dstat |= dp->ii.irb.scsw.dstat;
2511 dp->cstat |= dp->ii.irb.scsw.cstat;
2512 dp->intparm = ioinfo[irq]->u_intparm;
2515 dp->dstat = dp->ii.irb.scsw.dstat;
2516 dp->cstat = dp->ii.irb.scsw.cstat;
2518 dp->flag = 0; /* reset status flags */
2523 dp->lpum = dp->ii.irb.esw.esw1.lpum;
2526 * reset device-busy bit if no longer set in irb
2528 if ((dp->dstat & DEV_STAT_BUSY)
2529 && ((dp->ii.irb.scsw.dstat & DEV_STAT_BUSY) == 0)) {
2530 dp->dstat &= ~DEV_STAT_BUSY;
2535 * Save residual count and CCW information in case primary and
2536 * secondary status are presented with different interrupts.
2538 if (dp->ii.irb.scsw.stctl
2539 & (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_INTER_STATUS)) {
2542 * If the subchannel status shows status pending
2543 * and we received a check condition, the count
2544 * information is not meaningful.
2547 if (!((dp->ii.irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)
2548 && (dp->ii.irb.scsw.cstat
2549 & (SCHN_STAT_CHN_DATA_CHK
2550 | SCHN_STAT_CHN_CTRL_CHK
2551 | SCHN_STAT_INTF_CTRL_CHK
2552 | SCHN_STAT_PROG_CHECK
2553 | SCHN_STAT_PROT_CHECK
2554 | SCHN_STAT_CHAIN_CHECK)))) {
2556 dp->rescnt = dp->ii.irb.scsw.count;
2558 dp->rescnt = SENSE_MAX_COUNT;
2561 dp->cpa = dp->ii.irb.scsw.cpa;
2564 irb_cc = dp->ii.irb.scsw.cc;
2567 * check for any kind of channel or interface control check but don't
2568 * issue the message for the console device
2570 if ((dp->ii.irb.scsw.cstat
2571 & (SCHN_STAT_CHN_DATA_CHK
2572 | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) {
2573 if (irq != cons_dev)
2574 printk (KERN_WARNING
2575 "Channel-Check or Interface-Control-Check "
2577 " ... device %04X on subchannel %04X, dev_stat "
2578 ": %02X sch_stat : %02X\n",
2579 ioinfo[irq]->devstat.devno, irq, dp->dstat,
2583 "Interface-Control-Check received\n");
2585 "... device %04X on subchannel %04X,"
2586 " dev_stat: %02X sch_stat: %02X\n",
2587 ioinfo[irq]->devstat.devno, irq,
2588 dp->dstat, dp->cstat);
2595 if (dp->ii.irb.scsw.ectl == 0) {
2597 } else if ((dp->ii.irb.scsw.stctl == SCSW_STCTL_STATUS_PEND)
2598 && (dp->ii.irb.scsw.eswf == 0)) {
2600 } else if ((dp->ii.irb.scsw.stctl ==
2601 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_INTER_STATUS))
2602 && ((dp->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) == 0)) {
2605 issense = dp->ii.irb.esw.esw0.erw.cons;
2610 dp->scnt = dp->ii.irb.esw.esw0.erw.scnt;
2611 dp->flag |= DEVSTAT_FLAG_SENSE_AVAIL;
2613 sdevstat = sizeof (devstat_t);
2615 #ifdef CONFIG_DEBUG_IO
2616 if (irq != cons_dev)
2617 printk (KERN_DEBUG "s390_process_IRQ( %04X ) : "
2618 "concurrent sense bytes avail %d\n",
2622 "s390_process_IRQ( %04X ): "
2623 "concurrent sense bytes avail %d\n",
2626 /* don't copy the sense data area ! */
2627 sdevstat = sizeof (devstat_t) - SENSE_MAX_COUNT;
2632 case 1: /* status pending */
2634 dp->flag |= DEVSTAT_STATUS_PENDING;
2636 case 0: /* normal i/o interruption */
2638 fctl = dp->ii.irb.scsw.fctl;
2639 stctl = dp->ii.irb.scsw.stctl;
2640 actl = dp->ii.irb.scsw.actl;
2643 sprintf (buffer, "s390_process_IRQ(%04X) - irb for "
2644 "device %04X after channel check "
2645 "or interface control check\n",
2648 s390_displayhex (buffer, &(dp->ii.irb), sizeof (irb_t));
2649 sprintf(dbf_txt, "chk%x", irq);
2650 CIO_TRACE_EVENT(0, dbf_txt);
2651 CIO_HEX_EVENT(0, &(dp->ii.irb), sizeof (irb_t));
2654 ioinfo[irq]->stctl |= stctl;
2656 ending_status = (stctl & SCSW_STCTL_SEC_STATUS)
2658 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
2659 || (stctl == SCSW_STCTL_STATUS_PEND);
2662 * Check for unsolicited interrupts - for debug purposes only
2664 * We only consider an interrupt as unsolicited, if the device was not
2665 * actively in use (busy) and an interrupt other than an ALERT status
2668 * Note: We must not issue a message to the console, if the
2669 * unsolicited interrupt applies to the console device
2672 if (!(stctl & SCSW_STCTL_ALERT_STATUS)
2673 && (ioinfo[irq]->ui.flags.busy == 0)) {
2675 #ifdef CONFIG_DEBUG_IO
2676 if (irq != cons_dev)
2678 "Unsolicited interrupt received for "
2679 "device %04X on subchannel %04X\n"
2680 " ... device status : %02X "
2681 "subchannel status : %02X\n",
2682 dp->devno, irq, dp->dstat, dp->cstat);
2684 sprintf (buffer, "s390_process_IRQ(%04X) - irb for "
2685 "device %04X, ending_status %d\n",
2686 irq, dp->devno, ending_status);
2688 s390_displayhex (buffer, &(dp->ii.irb), sizeof (irb_t));
2691 "Unsolicited interrupt "
2692 "received for device %04X "
2693 "on subchannel %04X\n"
2694 " ... device status : %02X "
2695 "subchannel status : %02X\n",
2697 irq, dp->dstat, dp->cstat);
2698 sprintf(dbf_txt, "uint%x", irq);
2699 CIO_TRACE_EVENT(2, dbf_txt);
2700 CIO_HEX_EVENT(2, &(dp->ii.irb), sizeof (irb_t));
2704 * take fast exit if no handler is available
2706 if (!ioinfo[irq]->ui.flags.ready)
2707 return (ending_status);
2710 * Check whether we must issue a SENSE CCW ourselves if there is no
2711 * concurrent sense facility installed for the subchannel.
2713 * Note: We should check for ioinfo[irq]->ui.flags.consns but VM
2714 * violates the ESA/390 architecture and doesn't present an
2715 * operand exception for virtual devices without concurrent
2716 * sense facility available/supported when enabling the
2717 * concurrent sense facility.
2719 if (((dp->ii.irb.scsw.dstat & DEV_STAT_UNIT_CHECK)
2721 || (ioinfo[irq]->ui.flags.delsense && ending_status)) {
2723 ccw1_t *s_ccw = &ioinfo[irq]->senseccw;
2724 unsigned long s_flag = 0;
2726 if (ending_status) {
2727 /* there is a chance that the command
2728 * that gave us the unit check actually
2729 * was a basic sense, so we must not
2730 * overwrite *udp in that case
2732 if (ioinfo[irq]->ui.flags.w4sense &&
2733 (dp->ii.irb.scsw.dstat & DEV_STAT_UNIT_CHECK)) {
2734 CIO_MSG_EVENT(4,"double unit check irq %04x, dstat %02x,"
2735 "flags %8x\n", irq, dp->ii.irb.scsw.dstat,
2736 ioinfo[irq]->ui.info, ending_status);
2739 * We copy the current status information into the device driver
2740 * status area. Then we can use the local devstat area for device
2741 * sensing. When finally calling the IRQ handler we must not overlay
2742 * the original device status but copy the sense data only.
2744 memcpy (udp, dp, sizeof (devstat_t));
2747 s_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
2749 (__u32) virt_to_phys (ioinfo[irq]->
2751 s_ccw->count = SENSE_MAX_COUNT;
2752 s_ccw->flags = CCW_FLAG_SLI;
2755 * If free_irq() or a sync do_IO/s390_start_IO() is in
2756 * process we have to sense synchronously
2758 if (ioinfo[irq]->ui.flags.unready
2759 || ioinfo[irq]->ui.flags.syncio)
2760 s_flag = DOIO_WAIT_FOR_INTERRUPT
2765 s_flag = DOIO_VALID_LPM;
2770 * It does not matter whether this is a sync. or async.
2771 * SENSE request, but we have to assure we don't call
2772 * the irq handler now, but keep the irq in busy state.
2773 * In sync. mode s390_process_IRQ() is called recursively,
2774 * while in async. mode we re-enter do_IRQ() with the
2777 * Note : this may be a delayed sense request !
2781 ioinfo[irq]->ui.flags.fast = 0;
2782 ioinfo[irq]->ui.flags.repall = 0;
2783 ioinfo[irq]->ui.flags.w4final = 0;
2784 ioinfo[irq]->ui.flags.delsense = 0;
2788 dp->rescnt = SENSE_MAX_COUNT;
2790 ioinfo[irq]->ui.flags.w4sense = 1;
2792 ret_io = s390_start_IO (irq, s_ccw, 0xE2C5D5E2, /* = SENSe */
2800 * The device is no longer operational.
2801 * We won't get any sense data.
2803 ioinfo[irq]->ui.flags.w4sense = 0;
2804 ioinfo[irq]->ui.flags.oper = 0;
2805 allow4handler = 1; /* to notify the driver */
2809 * The channel subsystem is either busy, or we have
2810 * a status pending. Retry later.
2812 ioinfo[irq]->ui.flags.w4sense = 0;
2813 ioinfo[irq]->ui.flags.delsense = 1;
2816 printk(KERN_ERR"irq %04X: Unexpected rc %d "
2817 "for BASIC SENSE!\n", irq, ret_io);
2818 ioinfo[irq]->ui.flags.w4sense = 0;
2823 * we received an Unit Check but we have no final
2824 * status yet, therefore we must delay the SENSE
2825 * processing. However, we must not report this
2826 * intermediate status to the device interrupt
2829 ioinfo[irq]->ui.flags.fast = 0;
2830 ioinfo[irq]->ui.flags.repall = 0;
2832 ioinfo[irq]->ui.flags.delsense = 1;
2840 * we allow for the device action handler if .
2841 * - we received ending status
2842 * - the action handler requested to see all interrupts
2843 * - we received an intermediate status
2844 * - fast notification was requested (primary status)
2845 * - unsollicited interrupts
2848 if (allow4handler) {
2849 allow4handler = ending_status
2850 || (ioinfo[irq]->ui.flags.repall)
2851 || (stctl & SCSW_STCTL_INTER_STATUS)
2852 || ((ioinfo[irq]->ui.flags.fast)
2853 && (stctl & SCSW_STCTL_PRIM_STATUS))
2854 || (ioinfo[irq]->ui.flags.oper == 0);
2859 * We used to copy the device status information right before
2860 * calling the device action handler. However, in status
2861 * pending situations during do_IO() or halt_IO(), as well as
2862 * enable_subchannel/disable_subchannel processing we must
2863 * synchronously return the status information and must not
2864 * call the device action handler.
2867 if (allow4handler) {
2869 * if we were waiting for sense data we copy the sense
2870 * bytes only as the original status information was
2871 * saved prior to sense already.
2873 if (ioinfo[irq]->ui.flags.w4sense) {
2876 ioinfo[irq]->devstat.rescnt;
2878 #ifdef CONFIG_DEBUG_IO
2879 if (irq != cons_dev)
2881 "s390_process_IRQ( %04X ) : "
2882 "BASIC SENSE bytes avail %d\n",
2886 "s390_process_IRQ( %04X ): "
2887 "BASIC SENSE bytes avail %d\n",
2889 ioinfo[irq]->ui.flags.w4sense = 0;
2890 udp->flag |= DEVSTAT_FLAG_SENSE_AVAIL;
2891 udp->scnt = sense_count;
2893 if (sense_count > 0) {
2894 memcpy (udp->ii.sense.data,
2895 ioinfo[irq]->sense_data,
2897 } else if (sense_count == 0) {
2898 udp->flag &= ~DEVSTAT_FLAG_SENSE_AVAIL;
2901 ("s390_process_IRQ(%04x) encountered "
2902 "negative sense count\n", irq);
2906 memcpy (udp, dp, sdevstat);
2913 * for status pending situations other than deferred interrupt
2914 * conditions detected by s390_process_IRQ() itself we must not
2915 * call the handler. This will synchronously be reported back
2916 * to the caller instead, e.g. when detected during do_IO().
2918 if (ioinfo[irq]->ui.flags.s_pend
2919 || ioinfo[irq]->ui.flags.unready
2920 || ioinfo[irq]->ui.flags.repnone) {
2921 if (ending_status) {
2923 ioinfo[irq]->ui.flags.busy = 0;
2924 ioinfo[irq]->ui.flags.doio = 0;
2925 ioinfo[irq]->ui.flags.haltio = 0;
2926 ioinfo[irq]->ui.flags.fast = 0;
2927 ioinfo[irq]->ui.flags.repall = 0;
2928 ioinfo[irq]->ui.flags.w4final = 0;
2930 dp->flag |= DEVSTAT_FINAL_STATUS;
2931 udp->flag |= DEVSTAT_FINAL_STATUS;
2940 * Call device action handler if applicable
2942 if (allow4handler) {
2945 * We only reset the busy condition when we are sure that no further
2946 * interrupt is pending for the current I/O request (ending_status).
2948 if (ending_status || !ioinfo[irq]->ui.flags.oper) {
2949 ioinfo[irq]->ui.flags.oper = 1; /* dev IS oper */
2951 ioinfo[irq]->ui.flags.busy = 0;
2952 ioinfo[irq]->ui.flags.doio = 0;
2953 ioinfo[irq]->ui.flags.haltio = 0;
2954 ioinfo[irq]->ui.flags.fast = 0;
2955 ioinfo[irq]->ui.flags.repall = 0;
2956 ioinfo[irq]->ui.flags.w4final = 0;
2958 dp->flag |= DEVSTAT_FINAL_STATUS;
2959 udp->flag |= DEVSTAT_FINAL_STATUS;
2961 if (!ioinfo[irq]->ui.flags.killio)
2962 ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
2965 * reset intparm after final status or we will badly present unsolicited
2966 * interrupts with a intparm value possibly no longer valid.
2971 ioinfo[irq]->ui.flags.w4final = 1;
2974 * Eventually reset subchannel PCI status and
2975 * set the PCI or SUSPENDED flag in the user
2976 * device status block if appropriate.
2978 if (dp->cstat & SCHN_STAT_PCI) {
2979 udp->flag |= DEVSTAT_PCI;
2980 dp->cstat &= ~SCHN_STAT_PCI;
2983 if (actl & SCSW_ACTL_SUSPENDED) {
2984 udp->flag |= DEVSTAT_SUSPENDED;
2988 ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
2996 case 3: /* device/path not operational */
2998 ioinfo[irq]->ui.flags.busy = 0;
2999 ioinfo[irq]->ui.flags.doio = 0;
3000 ioinfo[irq]->ui.flags.haltio = 0;
3005 if ((dp->ii.irb.scsw.fctl != 0) &&
3006 ((dp->ii.irb.scsw.stctl & SCSW_STCTL_STATUS_PEND) != 0) &&
3007 (((dp->ii.irb.scsw.stctl & SCSW_STCTL_INTER_STATUS) == 0) ||
3008 ((dp->ii.irb.scsw.actl & SCSW_ACTL_SUSPENDED) != 0)))
3009 if (dp->ii.irb.scsw.pno) {
3010 stsch(irq, &ioinfo[irq]->schib);
3012 ~ioinfo[irq]->schib.pmcw.pnom;
3015 if (ioinfo[irq]->opm == 0) {
3016 ioinfo[irq]->ui.flags.oper = 0;
3020 ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
3021 ioinfo[irq]->devstat.flag |= DEVSTAT_FINAL_STATUS;
3025 * When we find a device "not oper" we save the status
3026 * information into the device status area and call the
3027 * device specific interrupt handler.
3029 * Note: currently we don't have any way to reenable
3030 * the device unless an unsolicited interrupt
3031 * is presented. We don't check for spurious
3032 * interrupts on "not oper" conditions.
3036 ioinfo[irq]->ui.flags.fast = 0;
3037 ioinfo[irq]->ui.flags.repall = 0;
3038 ioinfo[irq]->ui.flags.w4final = 0;
3041 * take fast exit if no handler is available
3043 if (!ioinfo[irq]->ui.flags.ready)
3044 return (ending_status);
3047 * Special case: We got a deferred cc 3 on a basic sense.
3048 * We have to notify the device driver of the former unit
3049 * check, but must not confuse it by calling it with the status
3050 * for the failed basic sense.
3052 if (ioinfo[irq]->ui.flags.w4sense)
3053 ioinfo[irq]->ui.flags.w4sense = 0;
3055 memcpy (udp, &(ioinfo[irq]->devstat), sdevstat);
3057 ioinfo[irq]->devstat.intparm = 0;
3059 if (!ioinfo[irq]->ui.flags.s_pend
3060 && !ioinfo[irq]->ui.flags.repnone
3061 && !ioinfo[irq]->ui.flags.killio) {
3063 ioinfo[irq]->irq_desc.handler (irq, udp, NULL);
3072 if (ending_status &&
3073 ioinfo[irq]->ui.flags.noio &&
3074 !ioinfo[irq]->ui.flags.syncio &&
3075 !ioinfo[irq]->ui.flags.w4sense) {
3076 if(ioinfo[irq]->ui.flags.ready) {
3077 s390_schedule_path_verification(irq);
3079 ioinfo[irq]->ui.flags.killio = 0;
3080 ioinfo[irq]->ui.flags.noio = 0;
3084 return (ending_status);
3088 * Set the special i/o-interruption sublass 7 for the
3089 * device specified by parameter irq. There can only
3090 * be a single device been operated on this special
3091 * isc. This function is aimed being able to check
3092 * on special device interrupts in disabled state,
3093 * without having to delay I/O processing (by queueing)
3094 * for non-console devices.
3096 * Setting of this isc is done by set_cons_dev().
3097 * wait_cons_dev() allows
3098 * to actively wait on an interrupt for this device in
3099 * disabed state. When the interrupt condition is
3100 * encountered, wait_cons_dev(9 calls do_IRQ() to have
3101 * the console device driver processing the interrupt.
3104 set_cons_dev (int irq)
3115 sprintf (dbf_txt, "scons%x", irq);
3116 CIO_TRACE_EVENT (4, dbf_txt);
3119 * modify the indicated console device to operate
3120 * on special console interrupt sublass 7
3122 ccode = stsch (irq, &(ioinfo[irq]->schib));
3126 ioinfo[irq]->devstat.flag |= DEVSTAT_NOT_OPER;
3128 ioinfo[irq]->schib.pmcw.isc = 7;
3130 ccode = msch (irq, &(ioinfo[irq]->schib));
3138 * enable console I/O-interrupt sublass 7
3140 ctl_set_bit (6, 24);
3149 wait_cons_dev (int irq)
3155 if (irq != cons_dev)
3158 sprintf (dbf_txt, "wcons%x", irq);
3159 CIO_TRACE_EVENT (4, dbf_txt);
3162 * before entering the spinlock we may already have
3163 * processed the interrupt on a different CPU ...
3165 if (ioinfo[irq]->ui.flags.busy == 1) {
3166 long cr6 __attribute__ ((aligned (8)));
3169 * disable all, but isc 7 (console device)
3171 __ctl_store (cr6, 6, 6);
3174 __ctl_load (cr6, 6, 6);
3177 tpi_info_t tpi_info = { 0, };
3178 if (tpi (&tpi_info) == 1) {
3179 s390_process_IRQ (tpi_info.irq);
3181 s390irq_spin_unlock (irq);
3183 s390irq_spin_lock (irq);
3186 } while (ioinfo[irq]->ui.flags.busy == 1);
3189 * restore previous isc value
3192 __ctl_load (cr6, 6, 6);
3200 enable_cpu_sync_isc (int irq)
3203 long cr6 __attribute__ ((aligned (8)));
3209 sprintf (dbf_txt, "enisc%x", irq);
3210 CIO_TRACE_EVENT (4, dbf_txt);
3212 /* This one spins until it can get the sync_isc lock for irq# irq */
3214 if ((irq <= highest_subchannel) &&
3215 (ioinfo[irq] != INVALID_STORAGE_AREA) &&
3216 (!ioinfo[irq]->st)) {
3217 if (atomic_read (&sync_isc) != irq)
3218 atomic_compare_and_swap_spin (-1, irq, &sync_isc);
3222 if (sync_isc_cnt > 255) { /* fixme : magic number */
3223 panic ("Too many recursive calls to enable_sync_isc");
3227 * we only run the STSCH/MSCH path for the first enablement
3229 else if (sync_isc_cnt == 1) {
3231 ccode = stsch (irq, &(ioinfo[irq]->schib));
3234 ioinfo[irq]->schib.pmcw.isc = 5;
3238 &(ioinfo[irq]->schib));
3243 * enable special isc
3245 __ctl_store (cr6, 6, 6);
3246 /* enable sync isc 5 */
3248 /* disable standard isc 3 */
3250 /* disable console isc 7 */
3252 ioinfo[irq]->ui.flags.syncio = 1;
3253 __ctl_load (cr6, 6, 6);
3260 * process pending status
3262 ioinfo[irq]->ui.flags.s_pend =
3264 s390_process_IRQ (irq);
3265 ioinfo[irq]->ui.flags.s_pend =
3268 rc = -EIO; /* might be overwritten... */
3277 case 3: /* not oper */
3287 rc = -ENODEV; /* device is not-operational */
3292 if (rc) { /* can only happen if stsch/msch fails */
3294 atomic_set (&sync_isc, -1);
3295 } else if (sync_isc_cnt == 1) {
3298 ccode = stsch(irq, &ioinfo[irq]->schib);
3299 if (!ccode && ioinfo[irq]->schib.pmcw.isc != 5) {
3300 ioinfo[irq]->ui.flags.syncio = 0;
3302 atomic_set (&sync_isc, -1);
3306 #ifdef CONFIG_SYNC_ISC_PARANOIA
3307 panic ("enable_sync_isc: called with invalid %x\n", irq);
3318 disable_cpu_sync_isc (int irq)
3326 long cr6 __attribute__ ((aligned (8)));
3330 sprintf (dbf_txt, "disisc%x", irq);
3331 CIO_TRACE_EVENT (4, dbf_txt);
3333 if ((irq <= highest_subchannel) &&
3334 (ioinfo[irq] != INVALID_STORAGE_AREA) &&
3335 (!ioinfo[irq]->st)) {
3337 * We disable if we're the top user only, as we may
3338 * run recursively ...
3339 * We must not decrease the count immediately; during
3340 * msch() processing we may face another pending
3341 * status we have to process recursively (sync).
3344 #ifdef CONFIG_SYNC_ISC_PARANOIA
3345 if (atomic_read (&sync_isc) != irq)
3347 ("disable_sync_isc: called for %x while %x locked\n",
3348 irq, atomic_read (&sync_isc));
3351 if (sync_isc_cnt == 1) {
3352 ccode = stsch (irq, &(ioinfo[irq]->schib));
3354 ioinfo[irq]->schib.pmcw.isc = 3;
3360 msch (irq, &(ioinfo[irq]->schib));
3365 * disable special interrupt subclass in CPU
3367 __ctl_store (cr6, 6, 6);
3368 /* disable sync isc 5 */
3370 /* enable standard isc 3 */
3372 /* enable console isc 7 */
3374 __ctl_load (cr6, 6, 6);
3379 case 1: /* status pending */
3380 ioinfo[irq]->ui.flags.s_pend =
3382 s390_process_IRQ (irq);
3383 ioinfo[irq]->ui.flags.s_pend =
3391 udelay (100); /* give it time */
3394 default: /* not oper */
3403 /* try stopping it ... */
3404 if ((ccode) && !clear_pend) {
3405 clear_IO (irq, 0x00004711, 0);
3412 } while (retry1 && ccode);
3414 ioinfo[irq]->ui.flags.syncio = 0;
3417 atomic_set (&sync_isc, -1);
3424 #ifdef CONFIG_SYNC_ISC_PARANOIA
3425 if (atomic_read (&sync_isc) != -1)
3427 ("disable_sync_isc: called with invalid %x while %x locked\n",
3428 irq, atomic_read (&sync_isc));
3438 int diag210 (diag210_t *addr)
3442 __asm__ __volatile__(
3443 #ifdef CONFIG_ARCH_S390X
3445 " diag %1,0,0x210\n"
3448 " diag %1,0,0x210\n"
3460 * devno - device number
3461 * ps - pointer to sense ID data area
3465 VM_virtual_device_info (__u16 devno, senseid_t * ps)
3467 diag210_t *p_diag_data;
3472 CIO_TRACE_EVENT (4, "VMvdinf");
3474 if (init_IRQ_complete) {
3475 p_diag_data = kmalloc (sizeof (diag210_t), GFP_DMA | GFP_ATOMIC);
3477 p_diag_data = alloc_bootmem_low (sizeof (diag210_t));
3483 p_diag_data->vrdcdvno = devno;
3484 p_diag_data->vrdclen = sizeof (diag210_t);
3485 ccode = diag210 ((diag210_t *) virt_to_phys (p_diag_data));
3486 ps->reserved = 0xff;
3488 switch (p_diag_data->vrdcvcla) {
3491 switch (p_diag_data->vrdcvtyp) {
3494 ps->cu_type = 0x3215;
3510 switch (p_diag_data->vrdcvtyp) {
3513 ps->cu_type = 0x5080;
3519 ps->cu_type = 0x2250;
3525 ps->cu_type = 0x3277;
3531 ps->cu_type = 0x3278;
3547 switch (p_diag_data->vrdcvtyp) {
3550 ps->cu_type = 0x3505;
3556 ps->cu_type = 0x2540;
3562 ps->cu_type = 0x2501;
3578 switch (p_diag_data->vrdcvtyp) {
3581 ps->cu_type = 0x3525;
3587 ps->cu_type = 0x2540;
3595 ps->cu_type = 0x3820;
3603 ps->cu_type = 0x3800;
3609 ps->cu_type = 0x4248;
3615 ps->cu_type = 0x4245;
3621 ps->cu_type = 0x3262;
3627 ps->cu_type = 0x3203;
3633 ps->cu_type = 0x3211;
3639 ps->cu_type = 0x1403;
3655 switch (p_diag_data->vrdcvtyp) {
3658 ps->cu_type = 0x3422;
3664 ps->cu_type = 0x3490;
3670 ps->cu_type = 0x3420;
3676 ps->cu_type = 0x3430;
3682 ps->cu_type = 0x3480;
3688 ps->cu_type = 0x3424;
3694 ps->cu_type = 0x9348;
3708 case 02: /* special device class ... */
3710 switch (p_diag_data->vrdcvtyp) {
3711 case 0x20: /* OSA */
3713 ps->cu_type = 0x3088;
3714 ps->cu_model = 0x60;
3735 if (init_IRQ_complete) {
3736 kfree (p_diag_data);
3738 free_bootmem ((unsigned long) p_diag_data, sizeof (diag210_t));
3743 printk (KERN_ERR "DIAG X'210' for "
3744 "device %04X returned "
3745 "(cc = %d): vdev class : %02X, "
3746 "vdev type : %04X \n"
3747 " ... rdev class : %02X, rdev type : %04X, "
3748 "rdev model: %02X\n",
3751 p_diag_data->vrdcvcla,
3752 p_diag_data->vrdcvtyp,
3753 p_diag_data->vrdcrccl,
3754 p_diag_data->vrdccrty, p_diag_data->vrdccrmd);
3757 "device %04X returned "
3758 "(cc = %d): vdev class : %02X, "
3759 "vdev type : %04X \n ... "
3760 "rdev class : %02X, rdev type : %04X, "
3761 "rdev model: %02X\n",
3764 p_diag_data->vrdcvcla,
3765 p_diag_data->vrdcvtyp,
3766 p_diag_data->vrdcrccl,
3767 p_diag_data->vrdccrty,
3768 p_diag_data->vrdccrmd);
3774 * This routine returns the characteristics for the device
3775 * specified. Some old devices might not provide the necessary
3776 * command code information during SenseID processing. In this
3777 * case the function returns -EINVAL. Otherwise the function
3778 * allocates a decice specific data buffer and provides the
3779 * device characteristics together with the buffer size. Its
3780 * the callers responability to release the kernel memory if
3781 * not longer needed. In case of persistent I/O problems -EBUSY
3784 * The function may be called enabled or disabled. However, the
3785 * caller must have locked the irq it is requesting data for.
3787 * Note : It would have been nice to collect this information
3788 * during init_IRQ() processing but this is not possible
3790 * a) without statically pre-allocation fixed size buffers
3791 * as virtual memory management isn't available yet.
3793 * b) without unnecessarily increase system startup by
3794 * evaluating devices eventually not used at all.
3797 read_dev_chars (int irq, void **buffer, int length)
3799 unsigned long flags;
3811 if (!buffer || !length) {
3818 if (ioinfo[irq]->ui.flags.oper == 0) {
3823 if (ioinfo[irq]->ui.flags.unfriendly) {
3824 /* don't even try it */
3828 sprintf (dbf_txt, "rddevch%x", irq);
3829 CIO_TRACE_EVENT (4, dbf_txt);
3832 * Before playing around with irq locks we should assure
3833 * running disabled on (just) our CPU. Sync. I/O requests
3834 * also require to run disabled.
3836 * Note : as no global lock is required, we must not use
3837 * cli(), but __cli() instead.
3839 __save_flags (flags);
3842 rdc_ccw = &ioinfo[irq]->senseccw;
3844 if (!ioinfo[irq]->ui.flags.ready) {
3845 ret = request_irq (irq,
3846 init_IRQ_handler, SA_PROBE, "RDC", &devstat);
3857 rdc_buf = kmalloc (length, GFP_KERNEL);
3867 rdc_ccw->cmd_code = CCW_CMD_RDC;
3868 rdc_ccw->count = length;
3869 rdc_ccw->flags = CCW_FLAG_SLI;
3871 set_normalized_cda (rdc_ccw, rdc_buf);
3874 memset (ioinfo[irq]->irq_desc.dev_id,
3875 '\0', sizeof (devstat_t));
3877 ret = s390_start_IO (irq, rdc_ccw, 0x00524443, /* RDC */
3879 DOIO_WAIT_FOR_INTERRUPT
3881 DOIO_DONT_CALL_INTHDLR);
3884 ioinfo[irq]->irq_desc.dev_id->flag;
3886 clear_normalized_cda (rdc_ccw);
3888 udelay (100); /* wait for recovery */
3894 || (devflag & DEVSTAT_STATUS_PENDING)));
3899 ret = (ret == -ENOMEM) ? -ENOMEM : -EBUSY;
3903 __restore_flags (flags);
3906 * on success we update the user input parms
3914 free_irq (irq, &devstat);
3919 __restore_flags (flags);
3926 * Read Configuration data
3929 read_conf_data (int irq, void **buffer, int *length, __u8 lpm)
3931 unsigned long flags;
3934 int found = 0; /* RCD CIW found */
3935 int ret = 0; /* return code */
3941 if (!buffer || !length) {
3943 } else if (ioinfo[irq]->ui.flags.oper == 0) {
3945 } else if (ioinfo[irq]->ui.flags.esid == 0) {
3948 return (-EOPNOTSUPP);
3952 if (ioinfo[irq]->ui.flags.unfriendly) {
3953 /* don't even try it */
3957 sprintf (dbf_txt, "rdconf%x", irq);
3958 CIO_TRACE_EVENT (4, dbf_txt);
3961 * scan for RCD command in extended SenseID data
3964 for (ciw_cnt = 0; (found == 0) && (ciw_cnt < MAX_CIWS); ciw_cnt++) {
3965 if (ioinfo[irq]->senseid.ciw[ciw_cnt].ct == CIW_TYPE_RCD) {
3967 * paranoia check ...
3969 if (ioinfo[irq]->senseid.ciw[ciw_cnt].cmd != 0
3970 && ioinfo[irq]->senseid.ciw[ciw_cnt].count != 0) {
3981 devstat_t devstat; /* inline device status area */
3982 devstat_t *pdevstat;
3985 ccw1_t *rcd_ccw = &ioinfo[irq]->senseccw;
3986 char *rcd_buf = NULL;
3987 int emulated = 0; /* no i/O handler installed */
3988 int retry = 5; /* retry count */
3990 __save_flags (flags);
3993 if (!ioinfo[irq]->ui.flags.ready) {
3994 pdevstat = &devstat;
3995 ret = request_irq (irq,
3997 SA_PROBE, "RCD", pdevstat);
4004 pdevstat = ioinfo[irq]->irq_desc.dev_id;
4009 if (init_IRQ_complete) {
4011 kmalloc (ioinfo[irq]->senseid.ciw[ciw_cnt].
4012 count, GFP_DMA | GFP_ATOMIC);
4015 alloc_bootmem_low (ioinfo[irq]->senseid.
4016 ciw[ciw_cnt].count);
4020 if (rcd_buf == NULL) {
4027 ioinfo[irq]->senseid.ciw[ciw_cnt].
4032 ioinfo[irq]->senseid.ciw[ciw_cnt].
4035 (__u32) virt_to_phys (rcd_buf);
4037 ioinfo[irq]->senseid.ciw[ciw_cnt].
4039 rcd_ccw->flags = CCW_FLAG_SLI;
4041 memset (pdevstat, '\0',
4042 sizeof (devstat_t));
4046 DOIO_WAIT_FOR_INTERRUPT |
4048 DOIO_DONT_CALL_INTHDLR;
4051 DOIO_WAIT_FOR_INTERRUPT |
4052 DOIO_DONT_CALL_INTHDLR;
4056 ret = s390_start_IO (irq, rcd_ccw, 0x00524344, /* == RCD */
4065 (DEVSTAT_STATUS_PENDING |
4067 DEVSTAT_FLAG_SENSE_AVAIL)))
4069 retry = 0; /* we got it ... */
4071 retry--; /* try again ... */
4077 default: /* -EBUSY, -ENODEV, ??? */
4086 __restore_flags (flags);
4089 * on success we update the user input parms
4092 *length = ioinfo[irq]->senseid.ciw[ciw_cnt].count;
4095 if (rcd_buf != NULL) {
4096 if (init_IRQ_complete) {
4099 free_bootmem ((unsigned long) rcd_buf,
4100 ioinfo[irq]->senseid.
4101 ciw[ciw_cnt].count);
4113 free_irq (irq, pdevstat);
4126 get_dev_info (int irq, s390_dev_info_t * pdi)
4128 return (get_dev_info_by_irq (irq, pdi));
4131 static int __inline__
4132 get_next_available_irq (ioinfo_t * pi)
4134 int ret_val = -ENODEV;
4136 while (pi != NULL) {
4138 && (pi->ui.flags.oper)
4139 && (!pi->ui.flags.unfriendly)) {
4151 get_irq_first (void)
4156 if ((ioinfo_head->ui.flags.oper) &&
4157 (!ioinfo_head->ui.flags.unfriendly) &&
4158 (!ioinfo_head->st)) {
4159 ret_irq = ioinfo_head->irq;
4160 } else if (ioinfo_head->next) {
4161 ret_irq = get_next_available_irq (ioinfo_head->next);
4176 get_irq_next (int irq)
4180 if (ioinfo[irq] != INVALID_STORAGE_AREA) {
4181 if (ioinfo[irq]->next) {
4182 if ((ioinfo[irq]->next->ui.flags.oper) &&
4183 (!ioinfo[irq]->next->ui.flags.unfriendly) &&
4184 (!ioinfo[irq]->next->st)) {
4185 ret_irq = ioinfo[irq]->next->irq;
4188 get_next_available_irq (ioinfo[irq]->next);
4204 get_dev_info_by_irq (int irq, s390_dev_info_t * pdi)
4212 pdi->devno = ioinfo[irq]->schib.pmcw.dev;
4215 if (ioinfo[irq]->ui.flags.oper && !ioinfo[irq]->ui.flags.unknown) {
4217 memcpy (&(pdi->sid_data),
4218 &ioinfo[irq]->senseid, sizeof (senseid_t));
4220 } else if (ioinfo[irq]->ui.flags.unfriendly) {
4221 pdi->status = DEVSTAT_UNFRIENDLY_DEV;
4222 memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4223 pdi->sid_data.cu_type = 0xFFFF;
4225 } else if (ioinfo[irq]->ui.flags.unknown) {
4226 pdi->status = DEVSTAT_UNKNOWN_DEV;
4227 memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4228 pdi->sid_data.cu_type = 0xFFFF;
4231 pdi->status = DEVSTAT_NOT_OPER;
4232 memset (&(pdi->sid_data), '\0', sizeof (senseid_t));
4233 pdi->sid_data.cu_type = 0xFFFF;
4237 if (ioinfo[irq]->ui.flags.ready)
4238 pdi->status |= DEVSTAT_DEVICE_OWNED;
4244 get_dev_info_by_devno (__u16 devno, s390_dev_info_t * pdi)
4249 if (devno > 0x0000ffff)
4254 for (i = 0; i <= highest_subchannel; i++) {
4256 if ((ioinfo[i] != INVALID_STORAGE_AREA) &&
4258 (ioinfo[i]->schib.pmcw.dev == devno)) {
4263 if (ioinfo[i]->ui.flags.oper
4264 && !ioinfo[i]->ui.flags.unknown) {
4266 memcpy (&(pdi->sid_data),
4267 &ioinfo[i]->senseid,
4268 sizeof (senseid_t));
4270 } else if (ioinfo[i]->ui.flags.unfriendly) {
4271 pdi->status = DEVSTAT_UNFRIENDLY_DEV;
4272 memset (&(pdi->sid_data), '\0',
4273 sizeof (senseid_t));
4274 pdi->sid_data.cu_type = 0xFFFF;
4277 } else if (ioinfo[i]->ui.flags.unknown) {
4278 pdi->status = DEVSTAT_UNKNOWN_DEV;
4280 memset (&(pdi->sid_data),
4281 '\0', sizeof (senseid_t));
4283 pdi->sid_data.cu_type = 0xFFFF;
4285 pdi->status = DEVSTAT_NOT_OPER;
4287 memset (&(pdi->sid_data),
4288 '\0', sizeof (senseid_t));
4290 pdi->sid_data.cu_type = 0xFFFF;
4294 if (ioinfo[i]->ui.flags.ready)
4295 pdi->status |= DEVSTAT_DEVICE_OWNED;
4297 if (!ioinfo[i]->ui.flags.unfriendly)
4311 get_irq_by_devno (__u16 devno)
4316 if (devno <= 0x0000ffff) {
4317 for (i = 0; i <= highest_subchannel; i++) {
4318 if ((ioinfo[i] != INVALID_STORAGE_AREA)
4320 && (ioinfo[i]->schib.pmcw.dev == devno)
4321 && (ioinfo[i]->schib.pmcw.dnv == 1)) {
4332 get_devno_by_irq (int irq)
4335 if ((irq > highest_subchannel)
4337 || (ioinfo[irq] == INVALID_STORAGE_AREA)) {
4342 if (ioinfo[irq]->st)
4346 * we don't need to check for the device be operational
4347 * as the initial STSCH will always present the device
4348 * number defined by the IOCDS regardless of the device
4349 * existing or not. However, there could be subchannels
4350 * defined who's device number isn't valid ...
4352 if (ioinfo[irq]->schib.pmcw.dnv)
4353 return (ioinfo[irq]->schib.pmcw.dev);
4359 * s390_device_recognition_irq
4361 * Used for individual device recognition. Issues the device
4362 * independant SenseID command to obtain info the device type.
4366 s390_device_recognition_irq (int irq)
4371 sprintf (dbf_txt, "devrec%x", irq);
4372 CIO_TRACE_EVENT (4, dbf_txt);
4375 * We issue the SenseID command on I/O subchannels we think are
4378 if ((ioinfo[irq] != INVALID_STORAGE_AREA)
4379 && (!ioinfo[irq]->st)
4380 && (ioinfo[irq]->schib.pmcw.st == 0)
4381 && (ioinfo[irq]->ui.flags.oper == 1)) {
4385 if (ioinfo[irq]->ui.flags.pgid_supp)
4386 irq_ret = request_irq (irq,
4388 SA_PROBE | SA_DOPATHGROUP,
4391 irq_ret = request_irq (irq,
4393 SA_PROBE, "INIT", &devstat);
4396 ret = enable_cpu_sync_isc (irq);
4399 ioinfo[irq]->ui.flags.unknown = 0;
4401 memset (&ioinfo[irq]->senseid, '\0',
4402 sizeof (senseid_t));
4404 if (cio_sid_with_pgid) {
4406 ret = s390_DevicePathVerification(irq,0);
4408 if (ret == -EOPNOTSUPP)
4410 * Doesn't prevent us from proceeding
4416 * we'll fallthrough here if we don't want
4417 * to do SPID before SID
4420 ret = s390_SenseID (irq, &ioinfo[irq]->senseid, 0xff);
4421 if (ret == -ETIMEDOUT) {
4422 /* SenseID timed out.
4423 * We consider this device to be
4426 ioinfo[irq]->ui.flags.unfriendly = 1;
4431 * We initially check the configuration data for
4432 * those devices with more than a single path
4434 if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
4439 read_conf_data (irq,
4443 if (!ret) // on success only ...
4446 #ifdef CONFIG_DEBUG_IO
4448 "RCD for device(%04X)/"
4449 "subchannel(%04X) returns :\n",
4453 s390_displayhex (buffer, prcd,
4456 CIO_TRACE_EVENT(2, "rcddata:");
4457 CIO_HEX_EVENT(2, prcd, lrcd);
4459 if (init_IRQ_complete) {
4462 free_bootmem ((unsigned
4472 disable_cpu_sync_isc (irq);
4476 free_irq (irq, &devstat);
4483 * s390_device_recognition_all
4485 * Used for system wide device recognition.
4489 s390_device_recognition_all (void)
4491 int irq = 0; /* let's start with subchannel 0 ... */
4494 s390_device_recognition_irq (irq);
4498 } while (irq <= highest_subchannel);
4503 * Function: s390_redo_validation
4504 * Look for no longer blacklisted devices
4505 * FIXME: there must be a better way to do this...
4509 s390_redo_validation (void)
4514 CIO_TRACE_EVENT (0, "redoval");
4517 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
4518 ret = s390_validate_subchannel (irq, 0);
4520 s390_device_recognition_irq (irq);
4521 if (ioinfo[irq]->ui.flags.oper) {
4525 s390_search_devreg (ioinfo[irq]);
4526 if (pdevreg != NULL) {
4527 if (pdevreg->oper_func != NULL)
4528 pdevreg->oper_func (irq,
4533 #ifdef CONFIG_PROC_FS
4534 if (cio_proc_devinfo)
4535 if (irq < MAX_CIO_PROCFS_ENTRIES) {
4536 cio_procfs_device_create (ioinfo
4544 } while (irq <= highest_subchannel);
4549 * s390_trigger_resense
4551 * try to re-sense the device on subchannel irq
4552 * only to be called without interrupt handler
4555 s390_trigger_resense(int irq)
4560 if (ioinfo[irq]->ui.flags.ready) {
4561 printk (KERN_WARNING "s390_trigger_resense(%04X): "
4562 "Device is in use!\n", irq);
4567 * This function is called by dasd if it just executed a "steal lock".
4568 * Therefore, re-initialize the 'unfriendly' flag to 0.
4569 * We run into timeouts if the device is still boxed...
4571 ioinfo[irq]->ui.flags.unfriendly = 0;
4573 s390_device_recognition_irq(irq);
4579 * s390_search_devices
4581 * Determines all subchannels available to the system.
4585 s390_process_subchannels (void)
4588 int irq = 0; /* Evaluate all subchannels starting with 0 ... */
4591 ret = s390_validate_subchannel (irq, 0);
4596 } while ((ret != -ENXIO) && (irq < __MAX_SUBCHANNELS));
4598 highest_subchannel = (--irq);
4600 printk (KERN_INFO "Highest subchannel number detected (hex) : %04X\n",
4601 highest_subchannel);
4603 "Highest subchannel number detected "
4604 "(hex) : %04X\n", highest_subchannel);
4608 * s390_validate_subchannel()
4610 * Process the subchannel for the requested irq. Returns 1 for valid
4611 * subchannels, otherwise 0.
4614 s390_validate_subchannel (int irq, int enable)
4617 int retry; /* retry count for status pending conditions */
4618 int ccode; /* condition code for stsch() only */
4619 int ccode2; /* condition code for other I/O routines */
4625 #endif /* CONFIG_CHSC */
4629 sprintf (dbf_txt, "valsch%x", irq);
4630 CIO_TRACE_EVENT (4, dbf_txt);
4633 * The first subchannel that is not-operational (ccode==3)
4634 * indicates that there aren't any more devices available.
4636 if ((init_IRQ_complete)
4637 && (ioinfo[irq] != INVALID_STORAGE_AREA)) {
4638 p_schib = &ioinfo[irq]->schib;
4640 p_schib = p_init_schib;
4645 * If we knew the device before we assume the worst case ...
4647 if (ioinfo[irq] != INVALID_STORAGE_AREA) {
4648 ioinfo[irq]->ui.flags.oper = 0;
4649 ioinfo[irq]->ui.flags.dval = 0;
4653 ccode = stsch (irq, p_schib);
4659 * ... just being curious we check for non I/O subchannels
4661 if (p_schib->pmcw.st) {
4663 printk (KERN_INFO "Subchannel %04X reports "
4664 "non-I/O subchannel type %04X\n",
4665 irq, p_schib->pmcw.st);
4668 "Subchannel %04X reports "
4669 "non-I/O subchannel type %04X\n",
4670 irq, p_schib->pmcw.st);
4672 if (ioinfo[irq] != INVALID_STORAGE_AREA)
4673 ioinfo[irq]->ui.flags.oper = 0;
4677 if ((!p_schib->pmcw.dnv) && (!p_schib->pmcw.st)) {
4680 if (!p_schib->pmcw.st) {
4681 if (is_blacklisted (p_schib->pmcw.dev)) {
4683 * This device must not be known to Linux. So we simply say that
4684 * there is no device and return ENODEV.
4686 #ifdef CONFIG_DEBUG_IO
4688 "Blacklisted device detected at devno %04X\n",
4692 "Blacklisted device detected at devno %04X\n",
4698 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
4699 if (!init_IRQ_complete) {
4700 ioinfo[irq] = (ioinfo_t *)
4701 alloc_bootmem_low (sizeof (ioinfo_t));
4703 ioinfo[irq] = (ioinfo_t *)
4704 kmalloc (sizeof (ioinfo_t), GFP_DMA | GFP_ATOMIC);
4711 memset (ioinfo[irq], '\0', sizeof (ioinfo_t));
4712 memcpy (&ioinfo[irq]->schib, p_init_schib, sizeof (schib_t));
4715 * We have to insert the new ioinfo element
4716 * into the linked list, either at its head,
4717 * its tail or insert it.
4719 if (ioinfo_head == NULL) { /* first element */
4720 ioinfo_head = ioinfo[irq];
4721 ioinfo_tail = ioinfo[irq];
4722 } else if (irq < ioinfo_head->irq) { /* new head */
4723 ioinfo[irq]->next = ioinfo_head;
4724 ioinfo_head->prev = ioinfo[irq];
4725 ioinfo_head = ioinfo[irq];
4726 } else if (irq > ioinfo_tail->irq) { /* new tail */
4727 ioinfo_tail->next = ioinfo[irq];
4728 ioinfo[irq]->prev = ioinfo_tail;
4729 ioinfo_tail = ioinfo[irq];
4730 } else { /* insert element */
4732 ioinfo_t *pi = ioinfo_head;
4734 for (pi = ioinfo_head; pi != NULL; pi = pi->next) {
4736 if (irq < pi->next->irq) {
4737 ioinfo[irq]->next = pi->next;
4738 ioinfo[irq]->prev = pi;
4739 pi->next->prev = ioinfo[irq];
4740 pi->next = ioinfo[irq];
4748 /* initialize some values ... */
4749 ioinfo[irq]->irq = irq;
4750 ioinfo[irq]->st = ioinfo[irq]->schib.pmcw.st;
4751 if (ioinfo[irq]->st)
4754 ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
4755 & ioinfo[irq]->schib.pmcw.pam & ioinfo[irq]->schib.pmcw.pom;
4758 if (ioinfo[irq]->opm) {
4759 for (chp=0;chp<=7;chp++) {
4761 if (ioinfo[irq]->opm & mask) {
4763 (ioinfo[irq]->schib.pmcw.chpid[chp],
4765 /* disable using this path */
4766 ioinfo[irq]->opm &= ~mask;
4769 /* This chpid is not available to us */
4770 clear_bit(ioinfo[irq]->schib.pmcw.chpid[chp],
4775 #endif /* CONFIG_CHSC */
4779 "Detected device %04X "
4780 "on subchannel %04X"
4781 " - PIM = %02X, PAM = %02X, POM = %02X\n",
4782 ioinfo[irq]->schib.pmcw.dev,
4784 ioinfo[irq]->schib.pmcw.pim,
4785 ioinfo[irq]->schib.pmcw.pam,
4786 ioinfo[irq]->schib.pmcw.pom);
4790 "Detected device %04X "
4791 "on subchannel %04X"
4793 "PAM = %02X, POM = %02X\n",
4794 ioinfo[irq]->schib.pmcw.dev,
4796 ioinfo[irq]->schib.pmcw.pim,
4797 ioinfo[irq]->schib.pmcw.pam,
4798 ioinfo[irq]->schib.pmcw.pom);
4801 * initialize ioinfo structure
4803 if (!ioinfo[irq]->ui.flags.ready) {
4804 ioinfo[irq]->nopfunc = NULL;
4805 ioinfo[irq]->ui.flags.busy = 0;
4806 ioinfo[irq]->ui.flags.dval = 1;
4807 ioinfo[irq]->devstat.intparm = 0;
4810 ioinfo[irq]->devstat.devno = ioinfo[irq]->schib.pmcw.dev;
4811 ioinfo[irq]->devno = ioinfo[irq]->schib.pmcw.dev;
4814 * We should have at least one CHPID ...
4816 if (ioinfo[irq]->opm) {
4818 * We now have to initially ...
4819 * ... set "interruption sublass"
4820 * ... enable "concurrent sense"
4821 * ... enable "multipath mode" if more than one
4822 * CHPID is available. This is done regardless
4823 * whether multiple paths are available for us.
4825 * Note : we don't enable the device here, this is temporarily
4826 * done during device sensing below.
4828 ioinfo[irq]->schib.pmcw.isc = 3; /* could be smth. else */
4829 ioinfo[irq]->schib.pmcw.csense = 1; /* concurrent sense */
4830 ioinfo[irq]->schib.pmcw.ena = enable;
4831 ioinfo[irq]->schib.pmcw.intparm = ioinfo[irq]->schib.pmcw.dev;
4833 if ((ioinfo[irq]->opm != 0x80)
4834 && (ioinfo[irq]->opm != 0x40)
4835 && (ioinfo[irq]->opm != 0x20)
4836 && (ioinfo[irq]->opm != 0x10)
4837 && (ioinfo[irq]->opm != 0x08)
4838 && (ioinfo[irq]->opm != 0x04)
4839 && (ioinfo[irq]->opm != 0x02)
4840 && (ioinfo[irq]->opm != 0x01)) {
4841 ioinfo[irq]->schib.pmcw.mp = 1; /* multipath mode */
4848 ccode2 = msch_err (irq, &ioinfo[irq]->schib);
4853 * successful completion
4855 * concurrent sense facility available
4857 ioinfo[irq]->ui.flags.oper = 1;
4858 ioinfo[irq]->ui.flags.consns = 1;
4866 * How can we have a pending status
4867 * as the device is disabled for
4869 * Anyway, process it ...
4871 ioinfo[irq]->ui.flags.s_pend = 1;
4872 s390_process_IRQ (irq);
4873 ioinfo[irq]->ui.flags.s_pend = 0;
4882 * we mark it not-oper as we can't
4883 * properly operate it !
4885 ioinfo[irq]->ui.flags.oper = 0;
4886 udelay (100); /* allow for recovery */
4891 case 3: /* not operational */
4892 ioinfo[irq]->ui.flags.oper = 0;
4898 #define PGMCHK_OPERAND_EXC 0x15
4900 if ((ccode2 & PGMCHK_OPERAND_EXC)
4901 == PGMCHK_OPERAND_EXC) {
4903 * re-issue the modify subchannel without trying to
4904 * enable the concurrent sense facility
4906 ioinfo[irq]->schib.pmcw.csense = 0;
4909 msch_err (irq, &ioinfo[irq]->schib);
4913 " ... msch() (2) failed"
4920 ioinfo[irq]->ui.flags.oper = 0;
4923 ioinfo[irq]->ui.flags.oper = 1;
4932 " ... msch() (1) failed with "
4933 "CC = %X\n", ccode2);
4935 "msch() (1) failed with "
4936 "CC = %X\n", ccode2);
4937 ioinfo[irq]->ui.flags.oper = 0;
4947 } while (ccode2 && retry);
4949 if ((ccode2 != 0) && (ccode2 != 3)
4952 " ... msch() retry count for "
4953 "subchannel %04X exceeded, CC = %d\n",
4956 " ... msch() retry count for "
4957 "subchannel %04X exceeded, CC = %d\n",
4962 /* no path available ... */
4963 ioinfo[irq]->ui.flags.oper = 0;
4974 * Try to obtain the 'control unit'/'device type' information
4975 * associated with the subchannel.
4977 * The function is primarily meant to be called without irq
4978 * action handler in place. However, it also allows for
4979 * use with an action handler in place. If there is already
4980 * an action handler registered assure it can handle the
4981 * s390_SenseID() related device interrupts - interruption
4982 * parameter used is 0x00E2C9C4 ( SID ).
4985 s390_SenseID (int irq, senseid_t * sid, __u8 lpm)
4987 ccw1_t *sense_ccw; /* ccw area for SenseID command */
4988 senseid_t isid; /* internal sid */
4989 devstat_t devstat; /* required by request_irq() */
4990 __u8 pathmask; /* calulate path mask */
4991 __u8 domask; /* path mask to use */
4992 int inlreq; /* inline request_irq() */
4993 int irq_ret; /* return code */
4994 devstat_t *pdevstat; /* ptr to devstat in use */
4995 int retry; /* retry count */
4996 int io_retry; /* retry indicator */
4998 senseid_t *psid = sid; /* start with the external buffer */
4999 int sbuffer = 0; /* switch SID data buffer */
5003 int failure = 0; /* nothing went wrong yet */
5007 if (ioinfo[irq]->ui.flags.oper == 0) {
5012 if (ioinfo[irq]->ui.flags.unfriendly) {
5013 /* don't even try it */
5017 sprintf (dbf_txt, "snsID%x", irq);
5018 CIO_TRACE_EVENT (4, dbf_txt);
5020 inlreq = 0; /* to make the compiler quiet... */
5022 if (!ioinfo[irq]->ui.flags.ready) {
5024 pdevstat = &devstat;
5027 * Perform SENSE ID command processing. We have to request device
5028 * ownership and provide a dummy I/O handler. We issue sync. I/O
5029 * requests and evaluate the devstat area on return therefore
5030 * we don't need a real I/O handler in place.
5033 request_irq (irq, init_IRQ_handler, SA_PROBE, "SID",
5041 pdevstat = ioinfo[irq]->irq_desc.dev_id;
5049 s390irq_spin_lock (irq);
5051 if (init_IRQ_complete) {
5052 sense_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
5054 sense_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t));
5058 s390irq_spin_unlock (irq);
5060 free_irq (irq, &devstat);
5064 /* more than one path installed ? */
5065 if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
5066 sense_ccw[0].cmd_code = CCW_CMD_SUSPEND_RECONN;
5067 sense_ccw[0].cda = 0;
5068 sense_ccw[0].count = 0;
5069 sense_ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
5071 sense_ccw[1].cmd_code = CCW_CMD_SENSE_ID;
5072 sense_ccw[1].cda = (__u32) virt_to_phys (sid);
5073 sense_ccw[1].count = sizeof (senseid_t);
5074 sense_ccw[1].flags = CCW_FLAG_SLI;
5076 sense_ccw[0].cmd_code = CCW_CMD_SENSE_ID;
5077 sense_ccw[0].cda = (__u32) virt_to_phys (sid);
5078 sense_ccw[0].count = sizeof (senseid_t);
5079 sense_ccw[0].flags = CCW_FLAG_SLI;
5083 for (i = 0; (i < 8); i++) {
5084 pathmask = 0x80 >> i;
5086 domask = ioinfo[irq]->opm & pathmask;
5096 memset(psid, 0, sizeof(senseid_t));
5097 psid->cu_type = 0xFFFF; /* initialize fields ... */
5099 retry = 5; /* retry count */
5100 io_retry = 1; /* enable retries */
5103 * We now issue a SenseID request. In case of BUSY,
5104 * STATUS PENDING or non-CMD_REJECT error conditions
5105 * we run simple retries.
5108 memset (pdevstat, '\0', sizeof (devstat_t));
5110 irq_ret = s390_start_IO (irq, sense_ccw, 0x00E2C9C4, /* == SID */
5112 DOIO_WAIT_FOR_INTERRUPT
5115 | DOIO_DONT_CALL_INTHDLR);
5117 if ((psid->cu_type != 0xFFFF)
5118 && (psid->reserved == 0xFF)) {
5119 if (!sbuffer) { /* switch buffers */
5121 * we report back the
5126 if (ioinfo[irq]->schib.pmcw.pim != 0x80) {
5127 sense_ccw[1].cda = (__u32)
5128 virt_to_phys (psid);
5130 sense_ccw[0].cda = (__u32)
5131 virt_to_phys (psid);
5136 * if just the very first
5137 * was requested to be
5138 * sensed disable further
5148 if (pdevstat->rescnt < (sizeof (senseid_t) - 8)) {
5149 ioinfo[irq]->ui.flags.esid = 1;
5160 if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
5161 #ifdef CONFIG_DEBUG_IO
5163 "SenseID : device %04X on "
5165 "reports pending status, "
5167 ioinfo[irq]->schib.pmcw.dev, irq,
5171 "SenseID : device %04X on "
5173 "reports pending status, "
5176 [irq]->schib.pmcw.dev, irq, retry);
5179 else if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
5181 * if the device doesn't support the SenseID
5182 * command further retries wouldn't help ...
5184 if (pdevstat->ii.sense.data[0]
5185 & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) {
5186 #ifdef CONFIG_DEBUG_IO
5188 "SenseID : device %04X on "
5190 "reports cmd reject or "
5191 "intervention required\n",
5192 ioinfo[irq]->schib.pmcw.dev,
5196 "SenseID : device %04X on "
5198 "reports cmd reject or "
5199 "intervention required\n",
5200 ioinfo[irq]->schib.pmcw.dev,
5204 #ifdef CONFIG_DEBUG_IO
5213 " %02X%02X%02X%02X "
5214 "%02X%02X%02X%02X ...\n",
5215 ioinfo[irq]->schib.pmcw.dev,
5219 pdevstat->ii.sense.data[0],
5220 pdevstat->ii.sense.data[1],
5221 pdevstat->ii.sense.data[2],
5222 pdevstat->ii.sense.data[3],
5223 pdevstat->ii.sense.data[4],
5224 pdevstat->ii.sense.data[5],
5225 pdevstat->ii.sense.data[6],
5226 pdevstat->ii.sense.data[7]);
5235 " %02X%02X%02X%02X "
5236 "%02X%02X%02X%02X ...\n",
5261 } else if ((pdevstat->flag & DEVSTAT_NOT_OPER)
5262 || (irq_ret == -ENODEV)) {
5263 #ifdef CONFIG_DEBUG_IO
5265 "SenseID : path %02X for "
5268 "is 'not operational'\n",
5270 ioinfo[irq]->schib.pmcw.dev, irq);
5273 "SenseID : path %02X for "
5276 "is 'not operational'\n",
5278 ioinfo[irq]->schib.pmcw.dev, irq);
5281 ioinfo[irq]->opm &= ~domask;
5284 #ifdef CONFIG_DEBUG_IO
5286 "SenseID : start_IO() for "
5289 "returns %d, retry %d, "
5291 ioinfo[irq]->schib.pmcw.dev,
5292 irq, irq_ret, retry, pdevstat->flag);
5295 "SenseID : start_IO() for "
5298 "returns %d, retry %d, "
5300 ioinfo[irq]->schib.pmcw.dev, irq,
5301 irq_ret, retry, pdevstat->flag);
5303 if (irq_ret == -ETIMEDOUT) {
5307 * Seems we need to cancel the first ssch sometimes...
5308 * On the next try, the ssch will usually be fine.
5311 xret = cancel_IO (irq);
5315 "SenseID: sch canceled "
5316 "successfully for irq %x\n",
5331 if ((failure) && (io_retry)) {
5332 /* reset fields... */
5336 memset(psid, 0, sizeof(senseid_t));
5337 psid->cu_type = 0xFFFF;
5340 } while ((io_retry));
5344 if (init_IRQ_complete) {
5347 free_bootmem ((unsigned long) sense_ccw, 2 * sizeof (ccw1_t));
5351 s390irq_spin_unlock (irq);
5354 * If we installed the irq action handler we have to
5358 free_irq (irq, pdevstat);
5361 * if running under VM check there ... perhaps we should do
5362 * only if we suffered a command reject, but it doesn't harm
5364 if ((sid->cu_type == 0xFFFF)
5365 && (MACHINE_IS_VM)) {
5366 VM_virtual_device_info (ioinfo[irq]->schib.pmcw.dev, sid);
5369 if (sid->cu_type == 0xFFFF) {
5371 * SenseID CU-type of 0xffff indicates that no device
5372 * information could be retrieved (pre-init value).
5374 * If we can't couldn't identify the device type we
5375 * consider the device "not operational".
5377 #ifdef CONFIG_DEBUG_IO
5378 printk (KERN_WARNING
5379 "SenseID : unknown device %04X on subchannel %04X\n",
5380 ioinfo[irq]->schib.pmcw.dev, irq);
5383 "SenseID : unknown device %04X on subchannel %04X\n",
5384 ioinfo[irq]->schib.pmcw.dev, irq);
5385 ioinfo[irq]->ui.flags.unknown = 1;
5390 * Issue device info message if unit was operational .
5392 if (!ioinfo[irq]->ui.flags.unknown) {
5393 if (sid->dev_type != 0) {
5396 "SenseID : device %04X reports: "
5397 "CU Type/Mod = %04X/%02X,"
5398 " Dev Type/Mod = %04X/%02X\n",
5399 ioinfo[irq]->schib.pmcw.dev,
5400 sid->cu_type, sid->cu_model,
5401 sid->dev_type, sid->dev_model);
5403 "SenseID : device %04X reports: "
5404 "CU Type/Mod = %04X/%02X,"
5405 " Dev Type/Mod = %04X/%02X\n",
5415 "SenseID : device %04X reports:"
5416 " Dev Type/Mod = %04X/%02X\n",
5417 ioinfo[irq]->schib.pmcw.dev,
5418 sid->cu_type, sid->cu_model);
5420 "SenseID : device %04X reports:"
5421 " Dev Type/Mod = %04X/%02X\n",
5430 if (!ioinfo[irq]->ui.flags.unknown)
5432 else if (irq_ret != -ETIMEDOUT)
5438 static int __inline__
5439 s390_SetMultiPath (int irq)
5443 cc = stsch (irq, &ioinfo[irq]->schib);
5446 ioinfo[irq]->schib.pmcw.mp = 1; /* multipath mode */
5448 cc = msch (irq, &ioinfo[irq]->schib);
5456 s390_do_path_verification(int irq, __u8 usermask)
5466 sprintf(dbf_txt, "dopv%x", irq);
5467 CIO_TRACE_EVENT(2, dbf_txt);
5469 dev_path = usermask ? usermask : ioinfo[irq]->opm;
5471 if (ioinfo[irq]->ui.flags.pgid == 0) {
5472 memcpy (&ioinfo[irq]->pgid, global_pgid, sizeof (pgid_t));
5473 ioinfo[irq]->ui.flags.pgid = 1;
5476 for (i = 0; i < 8 && !ret; i++) {
5478 domask = dev_path & (0x80>>i);
5483 if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i],
5485 /* Chpid is logically offline, don't do io */
5488 ret = s390_SetPGID (irq, domask);
5491 * For the *first* path we are prepared for recovery
5493 * - If we fail setting the PGID we assume its
5494 * using a different PGID already (VM) we
5497 if (ret == -EOPNOTSUPP && first) {
5500 ret = s390_SensePGID (irq, domask, &pgid);
5505 * Check whether we retrieved
5506 * a reasonable PGID ...
5508 if (pgid.inf.ps.state1 == SNID_STATE1_GROUPED)
5509 memcpy (&ioinfo[irq]->pgid,
5510 &pgid, sizeof (pgid_t));
5511 else /* ungrouped or garbage ... */
5515 ioinfo[irq]->ui.flags.pgid_supp = 0;
5517 #ifdef CONFIG_DEBUG_IO
5518 printk (KERN_WARNING
5519 "PathVerification(%04X) - Device %04X "
5520 "doesn't support path grouping\n",
5521 irq, ioinfo[irq]->schib.pmcw.dev);
5523 CIO_MSG_EVENT(2, "PathVerification(%04X) "
5524 "- Device %04X doesn't "
5525 " support path grouping\n",
5527 ioinfo[irq]->schib.pmcw.dev);
5530 } else if (ret == -EIO) {
5531 #ifdef CONFIG_DEBUG_IO
5532 printk (KERN_ERR "PathVerification(%04X) - I/O error "
5533 "on device %04X\n", irq,
5534 ioinfo[irq]->schib.pmcw.dev);
5537 CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O error "
5538 "on device %04X\n", irq,
5539 ioinfo[irq]->schib.pmcw.dev);
5541 ioinfo[irq]->ui.flags.pgid_supp = 0;
5543 } else if (ret == -ETIMEDOUT) {
5544 #ifdef CONFIG_DEBUG_IO
5545 printk (KERN_ERR "PathVerification(%04X) - I/O timed "
5546 "out on device %04X\n", irq,
5547 ioinfo[irq]->schib.pmcw.dev);
5549 CIO_MSG_EVENT(2, "PathVerification(%04X) - I/O timed "
5550 "out on device %04X\n", irq,
5551 ioinfo[irq]->schib.pmcw.dev);
5553 ioinfo[irq]->ui.flags.pgid_supp = 0;
5555 } else if (ret == -EAGAIN) {
5558 } else if (ret == -EUSERS) {
5560 #ifdef CONFIG_DEBUG_IO
5561 printk (KERN_ERR "PathVerification(%04X) "
5562 "- Device is locked by someone else!\n",
5565 CIO_MSG_EVENT(2, "PathVerification(%04X) "
5566 "- Device is locked by someone else!\n",
5568 } else if (ret == -ENODEV) {
5569 #ifdef CONFIG_DEBUG_IO
5570 printk (KERN_ERR "PathVerification(%04X) "
5571 "- Device %04X is no longer there?!?\n",
5572 irq, ioinfo[irq]->schib.pmcw.dev);
5574 CIO_MSG_EVENT(2, "PathVerification(%04X) "
5575 "- Device %04X is no longer there?!?\n",
5576 irq, ioinfo[irq]->schib.pmcw.dev);
5578 } else if (ret == -EBUSY) {
5580 * The device is busy. Schedule the path verification
5581 * bottom half and we'll hopefully get in next time.
5583 if (!ioinfo[irq]->ui.flags.noio) {
5584 s390_schedule_path_verification(irq);
5586 return -EINPROGRESS;
5588 #ifdef CONFIG_DEBUG_IO
5589 printk (KERN_ERR "PathVerification(%04X) "
5590 "- Unexpected error %d on device %04X\n",
5591 irq, ret, ioinfo[irq]->schib.pmcw.dev);
5593 CIO_MSG_EVENT(2, "PathVerification(%04X) - "
5594 "Unexpected error %d on device %04X\n",
5595 irq, ret, ioinfo[irq]->schib.pmcw.dev);
5597 ioinfo[irq]->ui.flags.pgid_supp = 0;
5600 if (stsch(irq, &ioinfo[irq]->schib) != 0)
5601 /* FIXME: tell driver device is dead. */
5605 * stsch() doesn't always yield the correct pim, pam, and pom
5606 * values, if no device selection has been performed yet.
5607 * However, after complete path verification they are up to date.
5609 ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim &
5610 ioinfo[irq]->schib.pmcw.pam &
5611 ioinfo[irq]->schib.pmcw.pom;
5614 if (ioinfo[irq]->opm) {
5615 for (i=0;i<=7;i++) {
5616 int mask = 0x80 >> i;
5617 if ((ioinfo[irq]->opm & mask) &&
5618 (!test_bit(ioinfo[irq]->schib.pmcw.chpid[i],
5620 /* disable using this path */
5621 ioinfo[irq]->opm &= ~mask;
5624 #endif /* CONFIG_CHSC */
5626 ioinfo[irq]->ui.flags.noio = 0;
5628 /* Eventually wake up the device driver. */
5629 if (ioinfo[irq]->opm != 0) {
5631 pdevreg = s390_search_devreg(ioinfo[irq]);
5633 if (pdevreg && pdevreg->oper_func)
5634 pdevreg->oper_func(irq, pdevreg);
5641 * Device Path Verification
5643 * Path verification is accomplished by checking which paths (CHPIDs) are
5644 * available. Further, a path group ID is set, if possible in multipath
5645 * mode, otherwise in single path mode.
5647 * Note : This function must not be called during normal device recognition,
5648 * but during device driver initiated request_irq() processing only.
5651 s390_DevicePathVerification (int irq, __u8 usermask)
5658 #endif /* CONFIG_CHSC */
5665 sprintf (dbf_txt, "dpver%x", irq);
5666 CIO_TRACE_EVENT (4, dbf_txt);
5668 if (ioinfo[irq]->st)
5672 old_opm = ioinfo[irq]->opm;
5673 #endif /* CONFIG_CHSC */
5674 ccode = stsch (irq, &(ioinfo[irq]->schib));
5679 if (ioinfo[irq]->schib.pmcw.pim == 0x80) {
5681 * no error, just not required for single path only devices
5683 ioinfo[irq]->ui.flags.pgid_supp = 0;
5685 ioinfo[irq]->ui.flags.noio = 0;
5689 * disable if chpid is logically offline
5691 if (!test_bit(ioinfo[irq]->schib.pmcw.chpid[0],
5694 ioinfo[irq]->opm = 0;
5695 ioinfo[irq]->ui.flags.oper = 0;
5697 "No logical path for sch %d...\n",
5700 if (ioinfo[irq]->nopfunc) {
5701 if (ioinfo[irq]->ui.flags.notacccap)
5702 ioinfo[irq]->nopfunc(irq,
5705 not_oper_handler_func_t nopfunc =
5706 ioinfo[irq]->nopfunc;
5707 #ifdef CONFIG_PROC_FS
5708 /* remove procfs entry */
5709 if (cio_proc_devinfo)
5710 cio_procfs_device_remove
5711 (ioinfo[irq]->devno);
5714 ioinfo[irq]->irq_desc.dev_id);
5715 nopfunc(irq, DEVSTAT_DEVICE_GONE);
5722 ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
5723 & ioinfo[irq]->schib.pmcw.pam
5724 & ioinfo[irq]->schib.pmcw.pom;
5726 if (ioinfo[irq]->opm) {
5728 ioinfo[irq]->ui.flags.oper = 1;
5729 pdevreg = s390_search_devreg(ioinfo[irq]);
5731 if (pdevreg && pdevreg->oper_func)
5732 pdevreg->oper_func(irq, pdevreg);
5738 #endif /* CONFIG_CHSC */
5742 ioinfo[irq]->opm = ioinfo[irq]->schib.pmcw.pim
5743 & ioinfo[irq]->schib.pmcw.pam & ioinfo[irq]->schib.pmcw.pom;
5746 if (ioinfo[irq]->opm) {
5747 for (chp=0;chp<=7;chp++) {
5749 if ((ioinfo[irq]->opm & mask)
5750 &&(!test_bit(ioinfo[irq]->schib.pmcw.chpid[chp],
5752 /* disable using this path */
5753 ioinfo[irq]->opm &= ~mask;
5757 #endif /* CONFIG_CHSC */
5759 if (ioinfo[irq]->ui.flags.pgid_supp == 0) {
5761 if (ioinfo[irq]->opm == 0)
5764 ioinfo[irq]->ui.flags.oper = 1;
5765 ioinfo[irq]->ui.flags.noio = 0;
5767 pdevreg = s390_search_devreg(ioinfo[irq]);
5769 if (pdevreg && pdevreg->oper_func)
5770 pdevreg->oper_func(irq, pdevreg);
5775 if (ioinfo[irq]->ui.flags.ready)
5776 return s390_do_path_verification (irq, usermask);
5782 s390_kick_path_verification (unsigned long irq)
5784 long cr6 __attribute__ ((aligned (8)));
5786 atomic_set (&ioinfo[irq]->pver_pending, 0);
5787 /* Do not enter path verification if sync_isc is enabled. */
5788 __ctl_store (cr6, 6, 6);
5789 if (cr6 & 0x04000000) {
5790 s390_schedule_path_verification (irq);
5793 ioinfo[irq]->ui.flags.killio = 0;
5794 s390_DevicePathVerification(irq, 0xff);
5799 s390_schedule_path_verification(unsigned long irq)
5801 /* Protect against rescheduling, when already running */
5802 if (atomic_compare_and_swap (0, 1, &ioinfo[irq]->pver_pending)) {
5807 * Call path verification.
5808 * Note this is always called from inside the i/o layer, so we don't
5809 * need to care about the usermask.
5811 INIT_LIST_HEAD (&ioinfo[irq]->pver_bh.list);
5812 ioinfo[irq]->pver_bh.sync = 0;
5813 ioinfo[irq]->pver_bh.routine = (void*) (void*) s390_kick_path_verification;
5814 ioinfo[irq]->pver_bh.data = (void*) irq;
5815 queue_task (&ioinfo[irq]->pver_bh, &tq_immediate);
5816 mark_bh (IMMEDIATE_BH);
5826 s390_SetPGID (int irq, __u8 lpm)
5828 ccw1_t *spid_ccw; /* ccw area for SPID command */
5829 devstat_t devstat; /* required by request_irq() */
5830 devstat_t *pdevstat = &devstat;
5831 unsigned long flags;
5834 int irq_ret = 0; /* return code */
5835 int retry = 5; /* retry count */
5836 int inlreq = 0; /* inline request_irq() */
5837 int mpath = 1; /* try multi-path first */
5841 if (ioinfo[irq]->ui.flags.oper == 0) {
5846 if (ioinfo[irq]->ui.flags.unfriendly) {
5847 /* don't even try it */
5851 sprintf (dbf_txt, "SPID%x", irq);
5852 CIO_TRACE_EVENT (4, dbf_txt);
5854 if (!ioinfo[irq]->ui.flags.ready) {
5856 * Perform SetPGID command processing. We have to request device
5857 * ownership and provide a dummy I/O handler. We issue sync. I/O
5858 * requests and evaluate the devstat area on return therefore
5859 * we don't need a real I/O handler in place.
5861 irq_ret = request_irq (irq,
5863 SA_PROBE, "SPID", pdevstat);
5868 pdevstat = ioinfo[irq]->irq_desc.dev_id;
5876 s390irq_spin_lock_irqsave (irq, flags);
5878 if (init_IRQ_complete) {
5879 spid_ccw = kmalloc (2 * sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
5881 spid_ccw = alloc_bootmem_low (2 * sizeof (ccw1_t));
5884 s390irq_spin_unlock_irqrestore(irq, flags);
5886 free_irq(irq, pdevstat);
5890 spid_ccw[0].cmd_code = CCW_CMD_SUSPEND_RECONN;
5891 spid_ccw[0].cda = 0;
5892 spid_ccw[0].count = 0;
5893 spid_ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
5895 spid_ccw[1].cmd_code = CCW_CMD_SET_PGID;
5896 spid_ccw[1].cda = (__u32) virt_to_phys (&ioinfo[irq]->pgid);
5897 spid_ccw[1].count = sizeof (pgid_t);
5898 spid_ccw[1].flags = CCW_FLAG_SLI;
5900 ioinfo[irq]->pgid.inf.fc = SPID_FUNC_MULTI_PATH | SPID_FUNC_ESTABLISH;
5903 * We now issue a SetPGID request. In case of BUSY
5904 * or STATUS PENDING conditions we retry 5 times.
5907 memset (pdevstat, '\0', sizeof (devstat_t));
5909 irq_ret = s390_start_IO (irq, spid_ccw, 0xE2D7C9C4, /* == SPID */
5911 DOIO_WAIT_FOR_INTERRUPT
5913 | DOIO_DONT_CALL_INTHDLR
5917 if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
5918 #ifdef CONFIG_DEBUG_IO
5919 printk (KERN_DEBUG "SPID - Device %04X "
5920 "on Subchannel %04X "
5921 "reports pending status, "
5924 ioinfo[irq]->schib.pmcw.dev,
5928 "SPID - Device %04X "
5929 "on Subchannel %04X "
5930 "reports pending status, "
5933 ioinfo[irq]->schib.pmcw.
5934 dev, irq, lpm, retry);
5939 if (pdevstat->flag == (DEVSTAT_START_FUNCTION
5940 | DEVSTAT_FINAL_STATUS)) {
5941 retry = 0; /* successfully set ... */
5943 } else if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
5945 * If the device doesn't support the
5946 * Sense Path Group ID command
5947 * further retries wouldn't help ...
5949 if (pdevstat->ii.sense.
5950 data[0] & SNS0_CMD_REJECT) {
5953 * We now try single path mode.
5954 * Note we must not issue the suspend
5955 * multipath reconnect, or we will get
5956 * a command reject by tapes.
5959 spid_ccw[0].cmd_code =
5961 spid_ccw[0].cda = (__u32)
5962 virt_to_phys (&ioinfo[irq]->pgid);
5968 ioinfo[irq]->pgid.inf.fc =
5969 SPID_FUNC_SINGLE_PATH
5970 | SPID_FUNC_ESTABLISH;
5975 irq_ret = -EOPNOTSUPP;
5980 #ifdef CONFIG_DEBUG_IO
5981 printk (KERN_WARNING
5982 "SPID - device %04X,"
5984 " retry %d, cnt %02d,"
5986 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
5987 ioinfo[irq]->schib.pmcw.
6005 pdevstat->ii.sense.data[7]);
6009 "SPID - device %04X,"
6011 " retry %d, cnt %02d,"
6013 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6040 } else if (pdevstat->flag & DEVSTAT_NOT_OPER) {
6041 /* don't issue warnings during startup unless requested */
6042 if (init_IRQ_complete || cio_notoper_msg) {
6045 "SPID - Device %04X "
6046 "on Subchannel %04X, "
6048 "became 'not operational'\n",
6049 ioinfo[irq]->schib.pmcw.
6053 "SPID - Device %04X "
6054 "on Subchannel %04X, "
6056 "became 'not operational'\n",
6063 ioinfo[irq]->opm &= ~lpm;
6068 } else if (irq_ret == -ETIMEDOUT) {
6070 * SetPGID timed out, so we cancel it before
6075 xret = cancel_IO(irq);
6079 "SetPGID: sch canceled "
6080 "successfully for irq %x\n",
6084 } else if (irq_ret == -EBUSY) {
6085 #ifdef CONFIG_DEBUG_IO
6087 "SPID - device %x, irq %x is busy!\n",
6088 ioinfo[irq]->schib.pmcw.dev, irq);
6089 #endif /* CONFIG_DEBUG_IO */
6091 "SPID - device %x, irq %x is busy!\n",
6092 ioinfo[irq]->schib.pmcw.dev, irq);
6095 } else if (irq_ret != -ENODEV) {
6098 } else if (!pdevstat->flag & DEVSTAT_NOT_OPER) {
6102 /* don't issue warnings during startup unless requested */
6103 if (init_IRQ_complete || cio_notoper_msg) {
6106 "SPID - Device %04X "
6107 "on Subchannel %04X, "
6109 "became 'not operational'\n",
6110 ioinfo[irq]->schib.pmcw.
6114 "SPID - Device %04X "
6115 "on Subchannel %04X, "
6117 "became 'not operational'\n",
6124 ioinfo[irq]->opm &= ~lpm;
6126 if (ioinfo[irq]->opm != 0)
6133 } while (retry > 0);
6135 if (init_IRQ_complete) {
6138 free_bootmem ((unsigned long) spid_ccw, 2 * sizeof (ccw1_t));
6142 s390irq_spin_unlock_irqrestore (irq, flags);
6145 * If we installed the irq action handler we have to
6149 free_irq (irq, pdevstat);
6157 * Sense Path Group ID
6161 s390_SensePGID (int irq, __u8 lpm, pgid_t * pgid)
6163 ccw1_t *snid_ccw; /* ccw area for SNID command */
6164 devstat_t devstat; /* required by request_irq() */
6165 devstat_t *pdevstat = &devstat;
6169 int irq_ret = 0; /* return code */
6170 int retry = 5; /* retry count */
6171 int inlreq = 0; /* inline request_irq() */
6172 unsigned long flags;
6176 if (ioinfo[irq]->ui.flags.oper == 0) {
6181 sprintf (dbf_txt, "SNID%x", irq);
6182 CIO_TRACE_EVENT (4, dbf_txt);
6184 if (!ioinfo[irq]->ui.flags.ready) {
6186 * Perform SENSE PGID command processing. We have to request device
6187 * ownership and provide a dummy I/O handler. We issue sync. I/O
6188 * requests and evaluate the devstat area on return therefore
6189 * we don't need a real I/O handler in place.
6191 irq_ret = request_irq (irq,
6193 SA_PROBE, "SNID", pdevstat);
6199 pdevstat = ioinfo[irq]->irq_desc.dev_id;
6207 s390irq_spin_lock_irqsave (irq, flags);
6209 ioinfo[irq]->ui.flags.unfriendly = 0; /* assume it's friendly... */
6211 if (init_IRQ_complete) {
6212 snid_ccw = kmalloc (sizeof (ccw1_t), GFP_DMA | GFP_ATOMIC);
6213 tmp_pgid = kmalloc (sizeof (pgid_t), GFP_DMA | GFP_ATOMIC);
6215 snid_ccw = alloc_bootmem_low (sizeof (ccw1_t));
6216 tmp_pgid = alloc_bootmem_low (sizeof (pgid_t));
6219 if (!snid_ccw || !tmp_pgid) {
6221 if (init_IRQ_complete)
6224 free_bootmem((unsigned long) snid_ccw, sizeof(ccw1_t));
6227 if (init_IRQ_complete)
6230 free_bootmem((unsigned long) tmp_pgid, sizeof(pgid_t));
6232 s390irq_spin_unlock_irqrestore(irq, flags);
6234 free_irq (irq, pdevstat);
6238 snid_ccw->cmd_code = CCW_CMD_SENSE_PGID;
6239 snid_ccw->cda = (__u32) virt_to_phys (tmp_pgid);
6240 snid_ccw->count = sizeof (pgid_t);
6241 snid_ccw->flags = CCW_FLAG_SLI;
6244 * We now issue a SensePGID request. In case of BUSY
6245 * or STATUS PENDING conditions we retry 5 times.
6248 memset (pdevstat, '\0', sizeof (devstat_t));
6250 irq_ret = s390_start_IO (irq, snid_ccw, 0xE2D5C9C4, /* == SNID */
6252 DOIO_WAIT_FOR_INTERRUPT
6255 | DOIO_DONT_CALL_INTHDLR);
6258 if (pdevstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
6260 * If the device doesn't support the
6261 * Sense Path Group ID command
6262 * further retries wouldn't help ...
6264 if (pdevstat->ii.sense.data[0]
6265 & (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)) {
6267 irq_ret = -EOPNOTSUPP;
6269 #ifdef CONFIG_DEBUG_IO
6270 printk (KERN_WARNING
6271 "SNID - device %04X,"
6274 " retry %d, cnt %02d,"
6276 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6277 ioinfo[irq]->schib.pmcw.
6278 dev, pdevstat->flag,
6279 retry, pdevstat->scnt,
6294 pdevstat->ii.sense.data[7]);
6298 "SNID - device %04X,"
6301 " retry %d, cnt %02d,"
6303 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
6329 } else if (pdevstat->flag & DEVSTAT_NOT_OPER) {
6330 /* don't issue warnings during startup unless requested */
6331 if (init_IRQ_complete || cio_notoper_msg) {
6333 "SNID - Device %04X "
6334 "on Subchannel %04X, "
6336 "became 'not operational'\n",
6337 ioinfo[irq]->schib.pmcw.
6341 "SNID - Device %04X "
6342 "on Subchannel %04X, "
6344 "became 'not operational'\n",
6351 ioinfo[irq]->opm &= ~lpm;
6355 retry = 0; /* success ... */
6358 * Check if device is locked by someone else
6359 * -- we'll fail other commands if that is
6362 if (tmp_pgid->inf.ps.state2 ==
6363 SNID_STATE2_RESVD_ELSE) {
6364 printk (KERN_WARNING
6365 "SNID - Device %04X "
6366 "on Subchannel %04X "
6369 ioinfo[irq]->schib.pmcw.dev,
6372 "SNID - Device %04X "
6373 "on Subchannel %04X "
6380 ioinfo[irq]->ui.flags.unfriendly = 1;
6383 * device is friendly to us :)
6385 ioinfo[irq]->ui.flags.unfriendly = 0;
6387 memcpy(pgid, tmp_pgid, sizeof(pgid_t));
6390 } else if (irq_ret == -ETIMEDOUT) {
6391 #ifdef CONFIG_DEBUG_IO
6392 printk(KERN_INFO "SNID - Operation timed out "
6393 "on Device %04X, Subchannel %04X... "
6395 ioinfo[irq]->schib.pmcw.dev,
6397 #endif /* CONFIG_DEBUG_IO */
6399 "SNID - Operation timed out "
6400 "on Device %04X, Subchannel %04X... "
6402 ioinfo[irq]->schib.pmcw.dev,
6407 } else if (irq_ret != -ENODEV) { /* -EIO, or -EBUSY */
6409 if (pdevstat->flag & DEVSTAT_STATUS_PENDING) {
6410 #ifdef CONFIG_DEBUG_IO
6411 printk (KERN_INFO "SNID - Device %04X "
6412 "on Subchannel %04X "
6413 "reports pending status, "
6415 ioinfo[irq]->schib.pmcw.dev,
6419 "SNID - Device %04X "
6420 "on Subchannel %04X "
6421 "reports pending status, "
6423 ioinfo[irq]->schib.pmcw.
6427 printk (KERN_WARNING "SNID - device %04X,"
6428 " start_io() reports rc : %d, retrying ...\n",
6429 ioinfo[irq]->schib.pmcw.dev, irq_ret);
6431 "SNID - device %04X,"
6432 " start_io() reports rc : %d,"
6434 ioinfo[irq]->schib.pmcw.dev, irq_ret);
6437 } else if (!pdevstat->flag & DEVSTAT_NOT_OPER) {
6441 /* don't issue warnings during startup unless requested */
6442 if (init_IRQ_complete || cio_notoper_msg) {
6445 "SNID - Device %04X "
6446 "on Subchannel %04X, "
6448 "became 'not operational'\n",
6449 ioinfo[irq]->schib.pmcw.
6453 "SNID - Device %04X "
6454 "on Subchannel %04X, "
6456 "became 'not operational'\n",
6463 ioinfo[irq]->opm &= ~lpm;
6465 if (ioinfo[irq]->opm != 0)
6472 } while (retry > 0);
6474 if (init_IRQ_complete) {
6478 free_bootmem ((unsigned long) snid_ccw, sizeof (ccw1_t));
6479 free_bootmem ((unsigned long) tmp_pgid, sizeof (pgid_t));
6483 s390irq_spin_unlock_irqrestore (irq, flags);
6486 * If we installed the irq action handler we have to
6490 free_irq (irq, pdevstat);
6496 s390_process_subchannel_source (int irq)
6504 * If the device isn't known yet
6505 * we can't lock it ...
6507 if (ioinfo[irq] != INVALID_STORAGE_AREA) {
6508 s390irq_spin_lock (irq);
6511 if (!ioinfo[irq]->st) {
6512 dev_oper = ioinfo[irq]->ui.flags.oper;
6514 if (ioinfo[irq]->ui.flags.dval)
6515 dev_no = ioinfo[irq]->devno;
6517 is_owned = ioinfo[irq]->ui.flags.ready;
6521 #ifdef CONFIG_DEBUG_CRW
6523 "do_crw_pending : subchannel validation - start ...\n");
6525 CIO_CRW_EVENT(4, "subchannel validation - start\n");
6526 s390_validate_subchannel (irq, is_owned);
6528 if (irq > highest_subchannel)
6529 highest_subchannel = irq;
6531 #ifdef CONFIG_DEBUG_CRW
6532 printk (KERN_DEBUG "do_crw_pending : subchannel validation - done\n");
6534 CIO_CRW_EVENT(4, "subchannel validation - done\n");
6536 * After the validate processing
6537 * the ioinfo control block
6538 * should be allocated ...
6541 s390irq_spin_unlock (irq);
6544 if (ioinfo[irq] != INVALID_STORAGE_AREA) {
6545 #ifdef CONFIG_DEBUG_CRW
6546 printk (KERN_DEBUG "do_crw_pending : ioinfo at "
6547 #ifdef CONFIG_ARCH_S390X
6548 "%08lX\n", (unsigned long) ioinfo[irq]
6549 #else /* CONFIG_ARCH_S390X */
6550 "%08X\n", (unsigned) ioinfo[irq]
6551 #endif /* CONFIG_ARCH_S390X */
6554 #ifdef CONFIG_ARCH_S390X
6555 CIO_CRW_EVENT(4, "ioinfo at %08lX\n",
6556 (unsigned long)ioinfo[irq]);
6557 #else /* CONFIG_ARCH_S390X */
6558 CIO_CRW_EVENT(4, "ioinfo at %08X\n",
6559 (unsigned)ioinfo[irq]);
6560 #endif /* CONFIG_ARCH_S390X */
6562 if (ioinfo[irq]->st)
6565 if (ioinfo[irq]->ui.flags.oper == 0) {
6566 not_oper_handler_func_t nopfunc = ioinfo[irq]->nopfunc;
6567 #ifdef CONFIG_PROC_FS
6568 /* remove procfs entry */
6569 if (cio_proc_devinfo)
6570 cio_procfs_device_remove (dev_no);
6573 * If the device has gone
6574 * call not oper handler
6577 && (nopfunc != NULL)) {
6579 free_irq (irq, ioinfo[irq]->irq_desc.dev_id);
6580 nopfunc (irq, DEVSTAT_DEVICE_GONE);
6584 #ifdef CONFIG_DEBUG_CRW
6586 "do_crw_pending : device "
6587 "recognition - start ...\n");
6590 "device recognition - start\n");
6591 s390_device_recognition_irq (irq);
6593 #ifdef CONFIG_DEBUG_CRW
6595 "do_crw_pending : device "
6596 "recognition - done\n");
6599 "device recognition - done\n");
6601 * the device became operational
6603 if (dev_oper == 0) {
6606 pdevreg = s390_search_devreg (ioinfo[irq]);
6608 if (pdevreg && pdevreg->oper_func)
6609 pdevreg->oper_func(irq, pdevreg);
6611 #ifdef CONFIG_PROC_FS
6612 /* add new procfs entry */
6613 if (cio_proc_devinfo)
6614 if (highest_subchannel <
6615 MAX_CIO_PROCFS_ENTRIES) {
6616 cio_procfs_device_create
6617 (ioinfo[irq]->devno);
6622 * ... it is and was operational, but
6623 * the devno may have changed
6625 else if ((ioinfo[irq]->devno != dev_no)
6626 && (ioinfo[irq]->nopfunc != NULL)) {
6627 #ifdef CONFIG_PROC_FS
6628 int devno_old = ioinfo[irq]->devno;
6630 ioinfo[irq]->nopfunc (irq, DEVSTAT_REVALIDATE);
6631 #ifdef CONFIG_PROC_FS
6632 /* remove old entry, add new */
6633 if (cio_proc_devinfo) {
6634 cio_procfs_device_remove (devno_old);
6635 cio_procfs_device_create
6636 (ioinfo[irq]->devno);
6641 #ifdef CONFIG_PROC_FS
6642 /* get rid of dead procfs entries */
6643 if (cio_proc_devinfo)
6644 cio_procfs_device_purge ();
6651 chsc_get_sch_desc_irq(int irq)
6656 spin_lock(&chsc_lock_ssd);
6659 chsc_area_ssd = kmalloc(sizeof(chsc_area_t),GFP_KERNEL);
6661 if (!chsc_area_ssd) {
6662 printk( KERN_CRIT "No memory to determine sch descriptions...\n");
6663 spin_unlock(&chsc_lock_ssd);
6667 memset(chsc_area_ssd, 0, sizeof(chsc_area_t));
6669 chsc_area_ssd->request_block.command_code1=0x0010;
6670 chsc_area_ssd->request_block.command_code2=0x0004;
6671 chsc_area_ssd->request_block.request_block_data.ssd_req.f_sch=irq;
6672 chsc_area_ssd->request_block.request_block_data.ssd_req.l_sch=irq;
6674 ccode = chsc(chsc_area_ssd);
6675 #ifdef CONFIG_DEBUG_CHSC
6677 printk( KERN_DEBUG "chsc returned with ccode = %d\n",ccode);
6678 #endif /* CONFIG_DEBUG_CHSC */
6680 if (chsc_area_ssd->response_block.response_code == 0x0003) {
6681 #ifdef CONFIG_DEBUG_CHSC
6682 printk( KERN_WARNING "Error in chsc request block!\n");
6683 #endif /* CONFIG_DEBUG_CHSC */
6684 CIO_CRW_EVENT( 2, "Error in chsc request block!\n");
6685 spin_unlock(&chsc_lock_ssd);
6688 } else if (chsc_area_ssd->response_block.response_code == 0x0004) {
6689 #ifdef CONFIG_DEBUG_CHSC
6690 printk( KERN_WARNING "Model does not provide ssd\n");
6691 #endif /* CONFIG_DEBUG_CHSC */
6692 CIO_CRW_EVENT( 2, "Model does not provide ssd\n");
6693 spin_unlock(&chsc_lock_ssd);
6696 } else if (chsc_area_ssd->response_block.response_code == 0x0002) {
6697 #ifdef CONFIG_DEBUG_CHSC
6698 printk( KERN_WARNING "chsc: Invalid command!\n");
6699 #endif /* CONFIG_DEBUG_CHSC */
6701 "chsc: Invalid command!\n");
6704 } else if (chsc_area_ssd->response_block.response_code == 0x0001) {
6707 switch (chsc_area_ssd->response_block.response_block_data.ssd_res.st) {
6709 case 0: /* I/O subchannel */
6712 * All fields have meaning
6714 #ifdef CONFIG_DEBUG_CHSC
6717 "ssd: sch %x is I/O subchannel\n",
6719 #endif /* CONFIG_DEBUG_CHSC */
6721 "ssd: sch %x is I/O subchannel\n",
6724 if (ioinfo[irq] == INVALID_STORAGE_AREA)
6725 /* FIXME: we should do device rec. here... */
6728 ioinfo[irq]->ssd_info.valid = 1;
6729 ioinfo[irq]->ssd_info.type = 0;
6732 chsc_area_ssd->response_block.
6733 response_block_data.ssd_res.path_mask &
6734 chsc_area_ssd->response_block.
6735 response_block_data.ssd_res.fla_valid_mask) {
6737 if (chsc_area_ssd->response_block.
6738 response_block_data.ssd_res.chpid[j])
6740 if (!test_and_set_bit
6741 (chsc_area_ssd->response_block.
6742 response_block_data.
6747 (chsc_area_ssd->response_block.
6748 response_block_data.
6752 set_bit(chsc_area_ssd->response_block.
6753 response_block_data.
6757 ioinfo[irq]->ssd_info.chpid[j] =
6758 chsc_area_ssd->response_block.
6759 response_block_data.ssd_res.chpid[j];
6760 ioinfo[irq]->ssd_info.fla[j] =
6761 chsc_area_ssd->response_block.
6762 response_block_data.ssd_res.fla[j];
6767 case 1: /* CHSC subchannel */
6770 * Only sch_val, st and sch have meaning
6772 #ifdef CONFIG_DEBUG_CHSC
6775 "ssd: sch %x is chsc subchannel\n",
6777 #endif /* CONFIG_DEBUG_CHSC */
6779 "ssd: sch %x is chsc subchannel\n",
6782 if (ioinfo[irq] == INVALID_STORAGE_AREA)
6783 /* FIXME: we should do device rec. here... */
6786 ioinfo[irq]->ssd_info.valid = 1;
6787 ioinfo[irq]->ssd_info.type = 1;
6790 case 2: /* Message subchannel */
6793 * All fields except unit_addr have meaning
6795 #ifdef CONFIG_DEBUG_CHSC
6798 "ssd: sch %x is message subchannel\n",
6802 "ssd: sch %x is message subchannel\n",
6805 if (ioinfo[irq] == INVALID_STORAGE_AREA)
6806 /* FIXME: we should do device rec. here... */
6809 ioinfo[irq]->ssd_info.valid = 1;
6810 ioinfo[irq]->ssd_info.type = 2;
6813 chsc_area_ssd->response_block.
6814 response_block_data.ssd_res.path_mask &
6815 chsc_area_ssd->response_block.
6816 response_block_data.ssd_res.fla_valid_mask) {
6817 if (chsc_area_ssd->response_block.
6818 response_block_data.ssd_res.chpid[j])
6820 if (!test_and_set_bit
6821 (chsc_area_ssd->response_block.
6822 response_block_data.
6827 (chsc_area_ssd->response_block.
6828 response_block_data.
6832 set_bit(chsc_area_ssd->response_block.
6833 response_block_data.
6837 ioinfo[irq]->ssd_info.chpid[j] =
6838 chsc_area_ssd->response_block.
6839 response_block_data.ssd_res.chpid[j];
6840 ioinfo[irq]->ssd_info.fla[j] =
6841 chsc_area_ssd->response_block.
6842 response_block_data.ssd_res.fla[j];
6847 case 3: /* ADM subchannel */
6850 * Only sch_val, st and sch have meaning
6852 #ifdef CONFIG_DEBUG_CHSC
6855 "ssd: sch %x is ADM subchannel\n",
6857 #endif /* CONFIG_DEBUG_CHSC */
6859 "ssd: sch %x is ADM subchannel\n",
6862 if (ioinfo[irq] == INVALID_STORAGE_AREA)
6863 /* FIXME: we should do device rec. here... */
6866 ioinfo[irq]->ssd_info.valid = 1;
6867 ioinfo[irq]->ssd_info.type = 3;
6870 default: /* uhm, that looks strange... */
6871 #ifdef CONFIG_DEBUG_CHSC
6874 "Strange subchannel type %d for sch %x\n",
6875 chsc_area_ssd->response_block.
6876 response_block_data.ssd_res.st,
6878 #endif /* CONFIG_DEBUG_CHSC */
6880 "Strange subchannel type %d for "
6882 chsc_area_ssd->response_block.
6883 response_block_data.ssd_res.st,
6886 spin_unlock(&chsc_lock_ssd);
6890 spin_unlock(&chsc_lock_ssd);
6900 chsc_get_sch_descriptions( void )
6906 CIO_TRACE_EVENT( 4, "gsdesc");
6909 * get information about chpids and link addresses
6910 * by executing the chsc command 'store subchannel description'
6913 if (init_IRQ_complete) {
6915 for (irq=0; irq<=highest_subchannel; irq++) {
6918 * retrieve information for each sch
6920 err = chsc_get_sch_desc_irq(irq);
6922 if (!cio_chsc_err_msg) {
6924 "chsc_get_sch_descriptions:"
6925 " Error %d while doing chsc; "
6927 "some machine checks may "
6935 cio_chsc_desc_avail = 1;
6941 "Error: chsc_get_sch_descriptions called before "
6942 "initialization complete\n");
6948 __initcall(chsc_get_sch_descriptions);
6951 __check_for_io_and_kill(int irq, __u8 mask, int fatal)
6953 schib_t *schib = &ioinfo[irq]->schib;
6956 if (schib->scsw.actl & SCSW_ACTL_DEVACT) {
6957 if ((ioinfo[irq]->opm != mask) ||
6959 ret = CIO_PATHGONE_WAIT4INT;
6961 if ((schib->scsw.actl & SCSW_ACTL_SCHACT) &&
6962 (schib->pmcw.lpum == mask) &&
6965 /* Kill the IO. It won't complete. */
6966 ioinfo[irq]->ui.flags.noio = 0;
6967 ioinfo[irq]->ui.flags.killio = 1;
6968 cc = clear_IO(irq, 0xD2C9D3D3, 0);
6970 /* Eek, can't kill io. */
6976 ioinfo[irq]->ui.flags.killio = 0;
6977 s390irq_spin_unlock(irq);
6978 if ((cc == -ENODEV) &&
6979 (ioinfo[irq]->nopfunc)) {
6980 ioinfo[irq]->ui.flags.oper = 0;
6981 ioinfo[irq]->nopfunc(irq,
6982 DEVSTAT_DEVICE_GONE);
6984 ret = CIO_PATHGONE_DEVGONE;
6986 ret |= CIO_PATHGONE_WAIT4INT;
6988 ioinfo[irq]->ui.flags.noio = 1;
6989 ret |= CIO_PATHGONE_IOERR;
6992 } else if (schib->scsw.actl & (SCSW_ACTL_CLEAR_PEND |
6993 SCSW_ACTL_HALT_PEND |
6994 SCSW_ACTL_START_PEND |
6995 SCSW_ACTL_RESUME_PEND)) {
6996 if ((schib->pmcw.lpum != mask) ||
6998 ret = CIO_PATHGONE_WAIT4INT;
7001 /* Cancel the i/o. */
7002 cc = cancel_IO(irq);
7005 /* i/o cancelled; we can do path verif. */
7006 ret = CIO_PATHGONE_IOERR;
7009 /* Status pending, we'll get an interrupt */
7010 ret = CIO_PATHGONE_WAIT4INT;
7014 * There is either not only the start function
7015 * specified or we are subchannel active.
7018 ioinfo[irq]->ui.flags.noio = 0;
7019 ioinfo[irq]->ui.flags.killio = 1;
7020 cc = clear_IO(irq, 0xD2C9D3D3, 0);
7022 /* Eek, can't kill io. */
7028 ioinfo[irq]->ui.flags.killio = 0;
7029 s390irq_spin_unlock(irq);
7030 if ((cc == -ENODEV) &&
7031 (ioinfo[irq]->nopfunc)) {
7032 ioinfo[irq]->nopfunc(irq,
7033 DEVSTAT_DEVICE_GONE);
7034 ioinfo[irq]->ui.flags.oper = 0;
7036 ret = CIO_PATHGONE_DEVGONE;
7038 ret = CIO_PATHGONE_WAIT4INT
7039 | CIO_PATHGONE_IOERR;
7040 ioinfo[irq]->ui.flags.noio = 1;
7043 default: /* -ENODEV */
7044 s390irq_spin_unlock(irq);
7045 if (ioinfo[irq]->nopfunc) {
7046 ioinfo[irq]->ui.flags.oper = 0;
7047 ioinfo[irq]->nopfunc(irq,
7048 DEVSTAT_DEVICE_GONE);
7050 ret = CIO_PATHGONE_DEVGONE;
7058 s390_do_chpid_processing( __u8 chpid)
7065 sprintf(dbf_txt, "chpr%x", chpid);
7066 CIO_TRACE_EVENT( 2, dbf_txt);
7069 * TODO: the chpid may be not the chpid with the link incident,
7070 * but the chpid the report came in through. How to handle???
7072 clear_bit(chpid, &chpids);
7073 if (!test_and_clear_bit(chpid, &chpids_known)) {
7074 #ifdef CONFIG_DEBUG_CHSC
7075 pr_debug(KERN_DEBUG"Got link incident for unknown chpid %x\n",
7077 #endif /* CONFIG_DEBUG_CHSC */
7078 return; /* we didn't know the chpid anyway */
7081 for (irq=0;irq<=highest_subchannel;irq++) {
7084 if (ioinfo[irq] == INVALID_STORAGE_AREA)
7085 continue; /* we don't know the device anyway */
7086 if (ioinfo[irq]->st)
7087 continue; /* non-io subchannel */
7088 schib = &ioinfo[irq]->schib;
7089 for (j=0; j<8;j++) {
7090 int mask = 0x80 >> j;
7094 if (schib->pmcw.chpid[j] != chpid)
7097 if (stsch(irq, schib) != 0) {
7098 ioinfo[irq]->ui.flags.oper = 0;
7099 if (ioinfo[irq]->nopfunc)
7100 ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE);
7104 s390irq_spin_lock(irq);
7106 ioinfo[irq]->ui.flags.noio = 1;
7108 /* Do we still expect an interrupt for outstanding io? */
7109 if (ioinfo[irq]->ui.flags.busy) {
7110 int rck = __check_for_io_and_kill(irq, mask, 1);
7111 if (rck & CIO_PATHGONE_WAIT4INT)
7113 if (rck & CIO_PATHGONE_IOERR)
7115 if (rck & CIO_PATHGONE_DEVGONE)
7119 s390irq_spin_unlock(irq);
7122 * Tell the device driver not to disturb us.
7123 * If the driver is not capable of handling
7124 * DEVSTAT_NOT_ACC, it doesn't want path grouping anyway.
7126 if (ioinfo[irq]->ui.flags.ready &&
7127 schib->pmcw.pim != 0x80 &&
7128 ioinfo[irq]->nopfunc &&
7129 ioinfo[irq]->ui.flags.notacccap) {
7131 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR);
7133 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7136 ioinfo[irq]->opm &= ~mask;
7142 * Always schedule the path verification, even if opm=0.
7143 * Reason: We can't rely on stsch() to return latest&greatest
7144 * values, if a device selections hasn't been performed, and
7145 * we might miss a path we didn't get a mchk for.
7147 if (ioinfo[irq]->ui.flags.ready)
7148 s390_schedule_path_verification(irq);
7150 ioinfo[irq]->ui.flags.noio = 0;
7151 ioinfo[irq]->ui.flags.killio = 0;
7160 s390_do_res_acc_processing( __u8 chpid, __u16 fla, int info)
7165 __u32 fla_mask = 0xffff;
7169 sprintf(dbf_txt, "accpr%x", chpid);
7170 CIO_TRACE_EVENT( 2, dbf_txt);
7171 if (info != CHSC_SEI_ACC_CHPID) {
7172 sprintf(dbf_txt, "fla%x", fla);
7173 CIO_TRACE_EVENT( 2, dbf_txt);
7175 sprintf(dbf_txt, "info:%d", info);
7176 CIO_TRACE_EVENT( 2, dbf_txt);
7179 * I/O resources may have become accessible.
7180 * Scan through all subchannels that may be concerned and
7181 * do a validation on those.
7182 * The more information we have (info), the less scanning
7183 * will we have to do.
7186 if (!cio_chsc_desc_avail)
7187 chsc_get_sch_descriptions();
7189 if (!cio_chsc_desc_avail) {
7191 * Something went wrong...
7193 #ifdef CONFIG_DEBUG_CRW
7194 printk( KERN_WARNING
7195 "Error: Could not retrieve subchannel descriptions, "
7196 "will not process css machine check...\n");
7197 #endif /* CONFIG_DEBUG_CRW */
7199 "Error: Could not retrieve subchannel descriptions, "
7200 "will not process css machine check...\n");
7204 if (!test_bit(chpid, &chpids_logical)) {
7205 #ifdef CONFIG_DEBUG_CHSC
7206 printk(KERN_DEBUG"chpid %x is logically offline, "
7207 "skipping res acc processing\n", chpid);
7208 #endif /* CONFIG_DEBUG_CHSC */
7209 return; /* no need to do the rest */
7213 case CHSC_SEI_ACC_CHPID: /*
7214 * worst case, we only know about the chpid
7215 * the devices are attached to
7217 #ifdef CONFIG_DEBUG_CHSC
7218 printk( KERN_DEBUG "Looking at chpid %x...\n", chpid);
7219 #endif /* CONFIG_DEBUG_CHSC */
7221 for (irq=0; irq<__MAX_SUBCHANNELS; irq++) {
7223 if((ioinfo[irq] != INVALID_STORAGE_AREA)
7224 && (ioinfo[irq]->st != 0))
7227 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7229 * We don't know the device yet, but since a path
7230 * may be available now to the device we'll have
7231 * to do recognition again.
7232 * Since we don't have any idea about which chpid
7233 * that beast may be on we'll have to do a stsch
7234 * on all devices, grr...
7238 valret = s390_validate_subchannel(irq,0);
7239 if (valret == -ENXIO) {
7243 if (irq > highest_subchannel)
7244 highest_subchannel = irq;
7246 s390_device_recognition_irq(irq);
7250 for (chp=0;chp<=7;chp++) {
7254 * check if chpid is in information
7257 if ((!ioinfo[irq]->ssd_info.valid) ||
7258 (ioinfo[irq]->ssd_info.chpid[chp] != chpid))
7261 /* Tell the device driver not to disturb us. */
7262 if (ioinfo[irq]->ui.flags.ready &&
7263 ioinfo[irq]->schib.pmcw.pim != 0x80 &&
7264 ioinfo[irq]->nopfunc &&
7265 ioinfo[irq]->ui.flags.notacccap)
7266 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7268 ioinfo[irq]->ui.flags.noio = 1;
7270 /* Do we still expect an interrupt for outstanding io? */
7271 if (ioinfo[irq]->ui.flags.busy)
7272 /* Wait for interrupt. */
7275 if (ioinfo[irq]->ui.flags.ready) {
7276 s390_schedule_path_verification(irq);
7278 ioinfo[irq]->ui.flags.noio = 0;
7285 case CHSC_SEI_ACC_LINKADDR: /*
7286 * better, we know the link determined by
7287 * the link address and the chpid
7292 case CHSC_SEI_ACC_FULLLINKADDR: /*
7293 * best case, we know the CU image
7294 * by chpid and full link address
7297 #ifdef CONFIG_DEBUG_CHSC
7298 printk( KERN_DEBUG "Looking at chpid %x, link addr %x...\n",
7300 #endif /* CONFIG_DEBUG_CHSC */
7302 for (irq=0; irq<__MAX_SUBCHANNELS; irq++) {
7305 * Walk through all subchannels and
7306 * look if our chpid and our (masked) link
7307 * address are in somewhere
7308 * Do a stsch for the found subchannels and
7309 * perform path grouping
7311 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7312 /* The full program again (see above), grr... */
7315 valret = s390_validate_subchannel(irq,0);
7316 if (valret == -ENXIO) {
7320 if (irq > highest_subchannel)
7321 highest_subchannel = irq;
7323 s390_device_recognition_irq(irq);
7326 if (ioinfo[irq]->st != 0)
7329 /* Update our ssd_info */
7330 if (chsc_get_sch_desc_irq(irq))
7334 if ((ioinfo[irq]->ssd_info.chpid[j] != chpid) ||
7335 ((ioinfo[irq]->ssd_info.fla[j]&fla_mask) != fla))
7338 /* Tell the device driver not to disturb us. */
7339 if (ioinfo[irq]->ui.flags.ready &&
7340 ioinfo[irq]->schib.pmcw.pim != 0x80 &&
7341 ioinfo[irq]->nopfunc &&
7342 ioinfo[irq]->ui.flags.notacccap)
7343 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7345 ioinfo[irq]->ui.flags.noio = 1;
7347 /* Do we still expect an interrupt for outstanding io? */
7348 if (ioinfo[irq]->ui.flags.busy)
7349 /* Wait for interrupt. */
7352 if (ioinfo[irq]->ui.flags.ready) {
7353 s390_schedule_path_verification(irq);
7355 ioinfo[irq]->ui.flags.noio = 0;
7369 __get_chpid_from_lir(void *data)
7375 /* incident-node descriptor */
7377 /* attached-node descriptor */
7379 /* incident-specific information */
7383 lir = (struct lir*) data;
7384 if (!(lir->iq&0x80))
7385 /* NULL link incident record */
7387 if (!(lir->indesc[0]&0xc0000000))
7388 /* node descriptor not valid */
7390 if (!(lir->indesc[0]&0x10000000))
7391 /* don't handle device-type nodes - FIXME */
7393 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
7395 return (u16) (lir->indesc[0]&0x000000ff);
7399 s390_process_css( void )
7402 int ccode, do_sei, chpid;
7404 CIO_TRACE_EVENT( 2, "prcss");
7406 spin_lock(&chsc_lock_sei);
7408 if (!chsc_area_sei) {
7409 if (init_IRQ_complete)
7410 chsc_area_sei = kmalloc(sizeof(chsc_area_t),GFP_KERNEL);
7412 chsc_area_sei = alloc_bootmem(sizeof(chsc_area_t));
7415 if (!chsc_area_sei) {
7417 "No memory to store event information...\n");
7418 spin_unlock(&chsc_lock_sei);
7429 * build the chsc request block for store event information
7432 memset(chsc_area_sei,0,sizeof(chsc_area_t));
7433 chsc_area_sei->request_block.command_code1=0x0010;
7434 chsc_area_sei->request_block.command_code2=0x000E;
7436 ccode = chsc(chsc_area_sei);
7442 /* for debug purposes, check for problems */
7443 if (chsc_area_sei->response_block.response_code == 0x0003) {
7444 #ifdef CONFIG_DEBUG_CHSC
7445 printk( KERN_WARNING
7446 "s390_process_css: error in chsc request block!\n");
7447 #endif /* CONFIG_DEBUG_CHSC */
7449 "s390_process_css: "
7450 "error in chsc request block!\n");
7453 if (chsc_area_sei->response_block.response_code == 0x0005) {
7454 #ifdef CONFIG_DEBUG_CHSC
7455 printk( KERN_WARNING
7456 "s390_process_css: no event information stored\n");
7457 #endif /* CONFIG_DEBUG_CHSC */
7459 "s390_process_css: "
7460 "no event information stored\n");
7463 if (chsc_area_sei->response_block.response_code == 0x0002) {
7464 #ifdef CONFIG_DEBUG_CHSC
7465 printk( KERN_WARNING
7466 "s390_process_css: invalid command!\n");
7467 #endif /* CONFIG_DEBUG_CHSC */
7469 "s390_process_css: "
7470 "invalid command!\n");
7473 if (chsc_area_sei->response_block.response_code != 0x0001) {
7474 #ifdef CONFIG_DEBUG_CHSC
7475 printk( KERN_WARNING
7476 "s390_process_css: unknown response code %d\n",
7477 chsc_area_sei->response_block.response_code);
7478 #endif /* CONFIG_DEBUG_CHSC */
7480 "s390_process_css: unknown response "
7482 chsc_area_sei->response_block.response_code);
7486 #ifdef CONFIG_DEBUG_CHSC
7488 "s390_process_css: "
7489 "event information successfully stored\n");
7490 #endif /* CONFIG_DEBUG_CHSC */
7492 "s390_process_css: "
7493 "event information successfully stored\n");
7495 /* Check if there is more event information pending. */
7496 if (chsc_area_sei->response_block.response_block_data.
7497 sei_res.flags & 0x80) {
7498 #ifdef CONFIG_DEBUG_CHSC
7499 printk(KERN_INFO"s390_process_css: further event "
7500 "information pending...\n");
7501 #endif /* CONFIG_DEBUG_CHSC */
7502 CIO_CRW_EVENT( 2, "further event information pending\n");
7507 /* Check if we might have lost some information. */
7508 if (chsc_area_sei->response_block.response_block_data.
7509 sei_res.flags & 0x40) {
7510 #ifdef CONFIG_DEBUG_CHSC
7511 printk(KERN_ERR"s390_process_css: Event information has "
7512 "been lost due to overflow!\n");
7513 #endif /* CONFIG_DEBUG_CHSC */
7514 CIO_CRW_EVENT( 2, "Event information has "
7515 "been lost due to overflow!\n");
7518 if (chsc_area_sei->response_block.
7519 response_block_data.sei_res.rs != 4) {
7520 #ifdef CONFIG_DEBUG_CHSC
7522 "s390_process_css: "
7523 "reporting source (%04X) isn't a chpid!\n",
7524 chsc_area_sei->response_block.
7525 response_block_data.sei_res.rsid);
7526 #endif /* CONFIG_DEBUG_CHSC */
7528 "s390_process_css: "
7529 "reporting source (%04X) isn't a chpid!\n",
7530 chsc_area_sei->response_block.
7531 response_block_data.sei_res.rsid);
7535 /* which kind of information was stored? */
7536 switch (chsc_area_sei->response_block.
7537 response_block_data.sei_res.cc) {
7538 case 1: /* link incident*/
7539 #ifdef CONFIG_DEBUG_CHSC
7541 "s390_process_css: "
7542 "channel subsystem reports link incident,"
7543 " source is chpid %x\n",
7544 chsc_area_sei->response_block.
7545 response_block_data.sei_res.rsid);
7546 #endif /* CONFIG_DEBUG_CHSC */
7548 "s390_process_css: "
7549 "channel subsystem reports "
7551 "source is chpid %x\n",
7552 chsc_area_sei->response_block.
7553 response_block_data.sei_res.rsid);
7555 chpid = __get_chpid_from_lir(chsc_area_sei->response_block.
7556 response_block_data.sei_res.
7559 s390_do_chpid_processing(chpid);
7562 case 2: /* i/o resource accessibiliy */
7563 #ifdef CONFIG_DEBUG_CHSC
7565 "s390_process_css: channel subsystem "
7566 "reports some I/O devices "
7567 "may have become accessible\n");
7568 #endif /* CONFIG_DEBUG_CHSC */
7570 "s390_process_css: "
7571 "channel subsystem reports "
7573 "may have become accessible\n");
7574 #ifdef CONFIG_DEBUG_CHSC
7576 "Data received after sei: \n");
7578 "Validity flags: %x\n",
7579 chsc_area_sei->response_block.
7580 response_block_data.sei_res.vf);
7581 #endif /* CONFIG_DEBUG_CHSC */
7582 if ((chsc_area_sei->response_block.
7583 response_block_data.sei_res.vf&0x80)
7585 #ifdef CONFIG_DEBUG_CHSC
7586 printk( KERN_DEBUG "chpid: %x\n",
7587 chsc_area_sei->response_block.
7588 response_block_data.sei_res.rsid);
7589 #endif /* CONFIG_DEBUG_CHSC */
7590 s390_do_res_acc_processing
7591 (chsc_area_sei->response_block.
7592 response_block_data.sei_res.rsid,
7594 CHSC_SEI_ACC_CHPID);
7595 } else if ((chsc_area_sei->response_block.
7596 response_block_data.sei_res.vf&0xc0)
7598 #ifdef CONFIG_DEBUG_CHSC
7600 "chpid: %x link addr: %x\n",
7601 chsc_area_sei->response_block.
7602 response_block_data.sei_res.rsid,
7603 chsc_area_sei->response_block.
7604 response_block_data.sei_res.fla);
7605 #endif /* CONFIG_DEBUG_CHSC */
7606 s390_do_res_acc_processing
7607 (chsc_area_sei->response_block.
7608 response_block_data.sei_res.rsid,
7609 chsc_area_sei->response_block.
7610 response_block_data.sei_res.fla,
7611 CHSC_SEI_ACC_LINKADDR);
7612 } else if ((chsc_area_sei->response_block.
7613 response_block_data.sei_res.vf & 0xc0)
7615 #ifdef CONFIG_DEBUG_CHSC
7618 "full link addr: %x\n",
7619 chsc_area_sei->response_block.
7620 response_block_data.sei_res.rsid,
7621 chsc_area_sei->response_block.
7622 response_block_data.sei_res.fla);
7623 #endif /* CONFIG_DEBUG_CHSC */
7624 s390_do_res_acc_processing
7625 (chsc_area_sei->response_block.
7626 response_block_data.sei_res.rsid,
7627 chsc_area_sei->response_block.
7628 response_block_data.sei_res.fla,
7629 CHSC_SEI_ACC_FULLLINKADDR);
7631 #ifdef CONFIG_DEBUG_CHSC
7632 printk( KERN_DEBUG "\n");
7633 #endif /* CONFIG_DEBUG_CHSC */
7637 default: /* other stuff */
7638 #ifdef CONFIG_DEBUG_CHSC
7640 "s390_process_css: event %d\n",
7641 chsc_area_sei->response_block.
7642 response_block_data.sei_res.cc);
7643 #endif /* CONFIG_DEBUG_CHSC */
7645 "s390_process_css: event %d\n",
7646 chsc_area_sei->response_block.
7647 response_block_data.sei_res.cc);
7654 spin_unlock(&chsc_lock_sei);
7659 __process_chp_gone(int irq, int chpid)
7661 schib_t *schib = &ioinfo[irq]->schib;
7669 if (schib->pmcw.chpid[i] != chpid)
7672 if (stsch(irq, schib) != 0) {
7673 ioinfo[irq]->ui.flags.oper = 0;
7674 if (ioinfo[irq]->nopfunc)
7675 ioinfo[irq]->nopfunc(irq, DEVSTAT_DEVICE_GONE);
7679 s390irq_spin_lock(irq);
7681 ioinfo[irq]->ui.flags.noio = 1;
7683 /* Do we still expect an interrupt for outstanding io? */
7684 if (ioinfo[irq]->ui.flags.busy) {
7685 int rck = __check_for_io_and_kill(irq, mask, 1);
7686 if (rck & CIO_PATHGONE_WAIT4INT)
7688 if (rck & CIO_PATHGONE_IOERR)
7690 if (rck & CIO_PATHGONE_DEVGONE)
7694 s390irq_spin_unlock(irq);
7696 /* Tell the device driver not to disturb us. */
7697 if (ioinfo[irq]->ui.flags.ready &&
7698 schib->pmcw.pim != 0x80 &&
7699 ioinfo[irq]->nopfunc &&
7700 ioinfo[irq]->ui.flags.notacccap) {
7702 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC_ERR);
7704 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7710 if (ioinfo[irq]->ui.flags.ready) {
7711 s390_schedule_path_verification(irq);
7713 ioinfo[irq]->ui.flags.noio = 0;
7714 ioinfo[irq]->ui.flags.killio = 0;
7722 __process_chp_come(int irq, int chpid)
7724 schib_t *schib = &ioinfo[irq]->schib;
7729 if (schib->pmcw.chpid[i] != chpid)
7732 /* Tell the device driver not to disturb us. */
7733 if (ioinfo[irq]->ui.flags.ready &&
7734 schib->pmcw.pim != 0x80 &&
7735 ioinfo[irq]->nopfunc &&
7736 ioinfo[irq]->ui.flags.notacccap)
7737 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
7739 ioinfo[irq]->ui.flags.noio = 1;
7741 /* Do we still expect an interrupt for outstanding io? */
7742 if (ioinfo[irq]->ui.flags.busy)
7743 /* Wait for interrupt. */
7746 if (ioinfo[irq]->ui.flags.ready)
7747 s390_schedule_path_verification(irq);
7749 ioinfo[irq]->ui.flags.noio = 0;
7756 s390_process_chp_source(int chpid, int onoff)
7762 sprintf(dbf_txt, "prchp%x", chpid);
7763 CIO_TRACE_EVENT(2, dbf_txt);
7767 clear_bit(chpid, &chpids);
7769 set_bit(chpid, &chpids);
7770 set_bit(chpid, &chpids_known);
7772 #endif /* CONFIG_CHSC */
7775 for (irq=0;irq<=highest_subchannel;irq++) {
7777 if ((ioinfo[irq] == INVALID_STORAGE_AREA)
7778 || (ioinfo[irq]->st != 0))
7781 __process_chp_gone(irq, chpid);
7786 for (irq=0;irq<__MAX_SUBCHANNELS;irq++) {
7788 if (ioinfo[irq] == INVALID_STORAGE_AREA) {
7789 ret = s390_validate_subchannel(irq,0);
7791 if (irq > highest_subchannel)
7792 highest_subchannel = irq;
7793 #ifdef CONFIG_DEBUG_CRW
7794 printk(KERN_DEBUG"process_chp_source: Found "
7795 "device on irq %x\n", irq);
7796 #endif /* CONFIG_DEBUG_CRW */
7797 CIO_CRW_EVENT(4, "Found device on irq %x\n",
7799 s390_device_recognition_irq(irq);
7801 } else if (ioinfo[irq]->st == 0) {
7802 ret = stsch(irq, &ioinfo[irq]->schib);
7809 /* We're through. */
7815 __process_chp_come(irq, chpid);
7821 * s390_do_crw_pending
7823 * Called by the machine check handler to process CRW pending
7824 * conditions. It may be a single CRW, or CRWs may be chained.
7826 * Note : we currently process CRWs for subchannel source only
7829 s390_do_crw_pending (crwe_t * pcrwe)
7834 #ifdef CONFIG_DEBUG_CRW
7835 printk (KERN_DEBUG "do_crw_pending : starting ...\n");
7837 CIO_CRW_EVENT( 2, "do_crw_pending: starting\n");
7838 while (pcrwe != NULL) {
7840 switch (pcrwe->crw.rsc) {
7843 irq = pcrwe->crw.rsid;
7845 #ifdef CONFIG_DEBUG_CRW
7846 printk (KERN_NOTICE "do_crw_pending : source is "
7847 "subchannel %04X\n", irq);
7849 CIO_CRW_EVENT(2, "source is subchannel %04X\n",
7851 s390_process_subchannel_source (irq);
7855 case CRW_RSC_MONITOR:
7857 #ifdef CONFIG_DEBUG_CRW
7858 printk (KERN_NOTICE "do_crw_pending : source is "
7859 "monitoring facility\n");
7861 CIO_CRW_EVENT(2, "source is monitoring facility\n");
7866 chpid = pcrwe->crw.rsid;
7868 #ifdef CONFIG_DEBUG_CRW
7869 printk (KERN_NOTICE "do_crw_pending : source is "
7870 "channel path %02X\n", chpid);
7872 CIO_CRW_EVENT(2, "source is channel path %02X\n",
7874 switch (pcrwe->crw.erc) {
7875 case CRW_ERC_IPARM: /* Path has come. */
7876 s390_process_chp_source(chpid, 1);
7878 case CRW_ERC_PERRI: /* Path has gone. */
7879 s390_process_chp_source(chpid, 0);
7882 #ifdef CONFIG_DEBUG_CRW
7883 printk(KERN_WARNING"do_crw_pending: don't "
7884 "know how to handle erc=%x\n",
7886 #endif /* CONFIG_DEBUG_CRW */
7887 CIO_CRW_EVENT(0, "don't know how to handle "
7888 "erc=%x\n", pcrwe->crw.erc);
7892 case CRW_RSC_CONFIG:
7894 #ifdef CONFIG_DEBUG_CRW
7895 printk (KERN_NOTICE "do_crw_pending : source is "
7896 "configuration-alert facility\n");
7898 CIO_CRW_EVENT(2, "source is configuration-alert facility\n");
7903 #ifdef CONFIG_DEBUG_CRW
7904 printk (KERN_NOTICE "do_crw_pending : source is "
7905 "channel subsystem\n");
7907 CIO_CRW_EVENT(2, "source is channel subsystem\n");
7915 #ifdef CONFIG_DEBUG_CRW
7917 "do_crw_pending : unknown source\n");
7919 CIO_CRW_EVENT( 2, "unknown source\n");
7924 pcrwe = pcrwe->crwe_next;
7928 #ifdef CONFIG_DEBUG_CRW
7929 printk (KERN_DEBUG "do_crw_pending : done\n");
7931 CIO_CRW_EVENT(2, "do_crw_pending: done\n");
7935 /* added by Holger Smolinski for reipl support in reipl.S */
7936 extern void do_reipl (int);
7941 s390_dev_info_t dev_info;
7943 for (i = 0; i <= highest_subchannel; i++) {
7944 if (get_dev_info_by_irq (i, &dev_info) == 0
7945 && (dev_info.status & DEVSTAT_DEVICE_OWNED)) {
7946 free_irq (i, ioinfo[i]->irq_desc.dev_id);
7950 cpcmd ("IPL", NULL, 0);
7952 do_reipl (0x10000 | sch);
7956 * Function: cio_debug_init
7957 * Initializes three debug logs (under /proc/s390dbf) for common I/O:
7958 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
7959 * - cio_trace logs the calling of different functions
7960 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
7961 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
7964 cio_debug_init (void)
7968 cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16 * sizeof (long));
7969 if (cio_debug_msg_id != NULL) {
7970 debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
7971 debug_set_level (cio_debug_msg_id, 6);
7975 cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8);
7976 if (cio_debug_trace_id != NULL) {
7977 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
7978 debug_set_level (cio_debug_trace_id, 6);
7982 cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16 * sizeof (long));
7983 if (cio_debug_crw_id != NULL) {
7984 debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
7985 debug_set_level (cio_debug_crw_id, 6);
7991 cio_debug_initialized = 1;
7995 __initcall (cio_debug_init);
7997 #ifdef CONFIG_PROC_FS
8000 * Function: cio_parse_chpids_proc_parameters
8001 * parse the stuff piped to /proc/chpids
8004 cio_parse_chpids_proc_parameters(char* buf)
8010 if (strstr(buf, "on ")) {
8011 for (i=0; i<3; i++) {
8014 cp = blacklist_strtoul(buf, &buf);
8016 chsc_get_sch_descriptions();
8017 if (!cio_chsc_desc_avail) {
8018 printk(KERN_ERR "Could not get chpid status, "
8019 "vary on/off not available\n");
8023 if (!test_bit(cp, &chpids)) {
8024 ret = s390_vary_chpid(cp, 1);
8025 if (ret == -EINVAL) {
8026 #ifdef CONFIG_DEBUG_CHSC
8027 printk(KERN_ERR "/proc/chpids: "
8028 "Invalid chpid specified\n");
8029 #else /* CONFIG_DEBUG_CHSC */
8030 printk(KERN_DEBUG "/proc/chpids: "
8031 "Invalid chpid specified\n");
8032 #endif /* CONFIG_DEBUG_CHSC */
8033 } else if (ret == 0) {
8034 printk(KERN_INFO "/proc/chpids: "
8035 "Varied chpid %x logically online\n",
8039 printk(KERN_ERR "/proc/chpids: chpid %x is "
8043 } else if (strstr(buf, "off ")) {
8044 for (i=0; i<4; i++) {
8047 cp = blacklist_strtoul(buf, &buf);
8049 chsc_get_sch_descriptions();
8050 if (!cio_chsc_desc_avail) {
8051 printk(KERN_ERR "Could not get chpid status, "
8052 "vary on/off not available\n");
8056 if (test_bit(cp, &chpids)) {
8057 ret = s390_vary_chpid(cp, 0);
8058 if (ret == -EINVAL) {
8059 #ifdef CONFIG_DEBUG_CHSC
8060 printk(KERN_ERR "/proc/chpids: "
8061 "Invalid chpid specified\n");
8062 #else /* CONFIG_DEBUG_CHSC */
8063 printk(KERN_DEBUG "/proc/chpids: "
8064 "Invalid chpid specified\n");
8065 #endif /* CONFIG_DEBUG_CHSC */
8066 } else if (ret == 0) {
8067 printk(KERN_INFO "/proc/chpids: "
8068 "Varied chpid %x logically offline\n",
8072 printk(KERN_ERR "/proc/chpids: "
8073 "chpid %x is already offline\n",
8077 printk(KERN_ERR "/proc/chpids: Parse error; "
8078 "try using '{on,off} <chpid>'\n");
8083 __vary_chpid_offline(int irq, int chpid)
8085 schib_t *schib = &ioinfo[irq]->schib;
8091 unsigned long flags;
8093 if (ioinfo[irq]->ssd_info.chpid[i] != chpid)
8096 s390irq_spin_lock_irqsave(irq, flags);
8098 ioinfo[irq]->ui.flags.noio = 1;
8100 /* Hmm, the path is not really gone... */
8101 if (ioinfo[irq]->ui.flags.busy) {
8102 if (__check_for_io_and_kill(irq, mask, 0) != 0)
8106 s390irq_spin_unlock_irqrestore(irq, flags);
8108 /* Tell the device driver not to disturb us. */
8109 if (ioinfo[irq]->ui.flags.ready &&
8110 schib->pmcw.pim != 0x80 &&
8111 ioinfo[irq]->nopfunc &&
8112 ioinfo[irq]->ui.flags.notacccap)
8113 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
8118 if (ioinfo[irq]->ui.flags.ready)
8119 s390_schedule_path_verification(irq);
8121 ioinfo[irq]->ui.flags.noio = 0;
8129 __vary_chpid_online(int irq, int chpid)
8131 schib_t *schib = &ioinfo[irq]->schib;
8136 if (schib->pmcw.chpid[i] != chpid)
8139 /* Tell the device driver not to disturb us. */
8140 if (ioinfo[irq]->ui.flags.ready &&
8141 schib->pmcw.pim != 0x80 &&
8142 ioinfo[irq]->nopfunc &&
8143 ioinfo[irq]->ui.flags.notacccap)
8144 ioinfo[irq]->nopfunc(irq, DEVSTAT_NOT_ACC);
8146 ioinfo[irq]->ui.flags.noio = 1;
8148 /* Do we still expect an interrupt for outstanding io? */
8149 if (ioinfo[irq]->ui.flags.busy)
8150 /* Wait for interrupt. */
8153 s390_schedule_path_verification(irq);
8161 * Function: s390_vary_chpid
8162 * Varies the specified chpid online or offline
8165 s390_vary_chpid( __u8 chpid, int on)
8170 if ((chpid <=0) || (chpid >= NR_CHPIDS))
8173 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
8174 CIO_TRACE_EVENT( 2, dbf_text);
8176 if (!test_bit(chpid, &chpids_known)) {
8177 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
8181 if (on && test_bit(chpid, &chpids_logical)) {
8182 printk(KERN_ERR "chpid %02X already logically online\n",
8187 if (!on && !test_bit(chpid, &chpids_logical)) {
8188 printk(KERN_ERR "chpid %02X already logically offline\n",
8194 set_bit(chpid, &chpids_logical);
8195 set_bit(chpid, &chpids);
8198 clear_bit(chpid, &chpids_logical);
8199 clear_bit(chpid, &chpids);
8203 * Redo PathVerification on the devices the chpid connects to
8206 for (irq=0;irq<=highest_subchannel;irq++) {
8208 if (ioinfo[irq] == INVALID_STORAGE_AREA)
8211 if (ioinfo[irq]->st)
8214 if (!ioinfo[irq]->ssd_info.valid)
8218 __vary_chpid_online(irq, chpid);
8220 __vary_chpid_offline(irq, chpid);
8226 #endif /* CONFIG_CHSC */
8229 * Display info on subchannels in /proc/subchannels.
8230 * Adapted from procfs stuff in dasd.c by Cornelia Huck, 02/28/01.
8238 #define MIN(a,b) ((a)<(b)?(a):(b))
8240 static struct proc_dir_entry *chan_subch_entry;
8243 chan_subch_open (struct inode *inode, struct file *file)
8252 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8254 printk (KERN_WARNING "No memory available for data\n");
8257 file->private_data = (void *) info;
8260 size += (highest_subchannel + 1) * 128;
8261 info->data = (char *) vmalloc (size);
8263 if (size && info->data == NULL) {
8264 printk (KERN_WARNING "No memory available for data\n");
8269 len += sprintf (info->data + len,
8270 "Device sch. Dev Type/Model CU in use PIM PAM POM CHPIDs\n");
8271 len += sprintf (info->data + len,
8272 "---------------------------------------------------------------------\n");
8274 for (i = 0; i <= highest_subchannel; i++) {
8275 if (!((ioinfo[i] == NULL) || (ioinfo[i] == INVALID_STORAGE_AREA)
8276 || (ioinfo[i]->st )|| !(ioinfo[i]->ui.flags.oper))) {
8278 sprintf (info->data + len, "%04X %04X ",
8279 ioinfo[i]->schib.pmcw.dev, i);
8280 if (ioinfo[i]->senseid.dev_type != 0) {
8281 len += sprintf (info->data + len,
8282 "%04X/%02X %04X/%02X",
8283 ioinfo[i]->senseid.dev_type,
8284 ioinfo[i]->senseid.dev_model,
8285 ioinfo[i]->senseid.cu_type,
8286 ioinfo[i]->senseid.cu_model);
8288 len += sprintf (info->data + len,
8290 ioinfo[i]->senseid.cu_type,
8291 ioinfo[i]->senseid.cu_model);
8293 if (ioinfo[i]->ui.flags.ready) {
8294 len += sprintf (info->data + len, " yes ");
8296 len += sprintf (info->data + len, " ");
8298 len += sprintf (info->data + len,
8300 ioinfo[i]->schib.pmcw.pim,
8301 ioinfo[i]->schib.pmcw.pam,
8302 ioinfo[i]->schib.pmcw.pom);
8303 for (j = 0; j < 8; j++) {
8304 len += sprintf (info->data + len,
8306 ioinfo[i]->schib.pmcw.chpid[j]);
8308 len += sprintf (info->data + len, " ");
8311 len += sprintf (info->data + len, "\n");
8320 chan_subch_close (struct inode *inode, struct file *file)
8323 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8327 vfree (p_info->data);
8335 chan_subch_read (struct file *file, char *user_buf, size_t user_len,
8339 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8340 loff_t pos = *offset;
8342 if (pos < 0 || pos >= p_info->len) {
8345 len = MIN (user_len, (p_info->len - pos));
8346 if (copy_to_user (user_buf, &(p_info->data[pos]), len))
8348 *offset = pos + len;
8353 static struct file_operations chan_subch_file_ops = {
8354 read:chan_subch_read, open:chan_subch_open, release:chan_subch_close,
8358 chan_proc_init (void)
8361 create_proc_entry ("subchannels", S_IFREG | S_IRUGO, &proc_root);
8362 chan_subch_entry->proc_fops = &chan_subch_file_ops;
8367 __initcall (chan_proc_init);
8370 chan_proc_cleanup (void)
8372 remove_proc_entry ("subchannels", &proc_root);
8376 * Display device specific information under /proc/deviceinfo/<devno>
8377 */ static struct proc_dir_entry *cio_procfs_deviceinfo_root = NULL;
8380 * cio_procfs_device_list holds all devno-specific procfs directories
8385 struct proc_dir_entry *cio_device_entry;
8386 struct proc_dir_entry *cio_sensedata_entry;
8387 struct proc_dir_entry *cio_in_use_entry;
8388 struct proc_dir_entry *cio_chpid_entry;
8389 } cio_procfs_entry_t;
8391 typedef struct _cio_procfs_device {
8392 struct _cio_procfs_device *next;
8393 cio_procfs_entry_t *entry;
8394 } cio_procfs_device_t;
8396 cio_procfs_device_t *cio_procfs_device_list = NULL;
8403 cio_device_entry_close (struct inode *inode, struct file *file)
8406 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8410 vfree (p_info->data);
8418 cio_device_entry_read (struct file *file, char *user_buf, size_t user_len,
8422 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8423 loff_t pos = *offset;
8425 if (pos < 0 || pos >= p_info->len) {
8428 len = MIN (user_len, (p_info->len - pos));
8429 if (copy_to_user (user_buf, &(p_info->data[pos]), len))
8431 *offset = pos + len;
8437 cio_sensedata_entry_open (struct inode *inode, struct file *file)
8447 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8449 printk (KERN_WARNING "No memory available for data\n");
8452 file->private_data = (void *) info;
8454 info->data = (char *) vmalloc (size);
8455 if (size && info->data == NULL) {
8456 printk (KERN_WARNING "No memory available for data\n");
8460 devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8461 memset (devno_str, 0, 6 * sizeof (char));
8463 file->f_dentry->d_parent->d_name.name,
8464 strlen (file->f_dentry->d_parent->d_name.name) +
8466 devno = simple_strtoul (devno_str, &devno_str, 16);
8467 irq = get_irq_by_devno (devno);
8470 sprintf (info->data + len,
8472 if (ioinfo[irq]->senseid.dev_type == 0) {
8474 sprintf (info->data + len,
8476 ioinfo[irq]->senseid.
8478 ioinfo[irq]->senseid.
8482 sprintf (info->data + len,
8484 ioinfo[irq]->senseid.
8486 ioinfo[irq]->senseid.
8489 sprintf (info->data + len,
8490 "CU Type/Mod: %04X/%02X\n",
8491 ioinfo[irq]->senseid.
8493 ioinfo[irq]->senseid.
8505 cio_in_use_entry_open (struct inode *inode, struct file *file)
8515 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8517 printk (KERN_WARNING "No memory available for data\n");
8520 file->private_data = (void *) info;
8522 info->data = (char *) vmalloc (size);
8523 if (size && info->data == NULL) {
8524 printk (KERN_WARNING "No memory available for data\n");
8528 devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8529 memset (devno_str, 0, 6 * sizeof (char));
8531 file->f_dentry->d_parent->d_name.name,
8532 strlen (file->f_dentry->d_parent->d_name.name) +
8534 devno = simple_strtoul (devno_str, &devno_str, 16);
8535 irq = get_irq_by_devno (devno);
8538 sprintf (info->data + len, "%s\n",
8539 ioinfo[irq]->ui.flags.
8540 ready ? "yes" : "no");
8550 cio_chpid_entry_open (struct inode *inode, struct file *file)
8561 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8563 printk (KERN_WARNING "No memory available for data\n");
8566 file->private_data = (void *) info;
8568 info->data = (char *) vmalloc (size);
8569 if (size && info->data == NULL) {
8570 printk (KERN_WARNING "No memory available for data\n");
8574 devno_str = kmalloc (6 * sizeof (char), GFP_KERNEL);
8575 memset (devno_str, 0, 6 * sizeof (char));
8577 file->f_dentry->d_parent->d_name.name,
8578 strlen (file->f_dentry->d_parent->d_name.name) +
8580 devno = simple_strtoul (devno_str, &devno_str, 16);
8581 irq = get_irq_by_devno (devno);
8583 for (i = 0; i < 8; i++) {
8585 sprintf (info->data + len,
8588 sprintf (info->data + len, "%02X\n",
8589 ioinfo[irq]->schib.pmcw.
8600 static struct file_operations cio_sensedata_entry_file_ops = {
8601 read:cio_device_entry_read, open:cio_sensedata_entry_open,
8602 release:cio_device_entry_close,
8605 static struct file_operations cio_in_use_entry_file_ops = {
8606 read:cio_device_entry_read, open:cio_in_use_entry_open,
8607 release:cio_device_entry_close,
8610 static struct file_operations cio_chpid_entry_file_ops = {
8611 read:cio_device_entry_read, open:cio_chpid_entry_open,
8612 release:cio_device_entry_close,
8616 * Function: cio_procfs_device_create
8617 * create procfs entry for given device number
8618 * and insert it into list
8621 cio_procfs_device_create (int devno)
8623 cio_procfs_entry_t *entry;
8624 cio_procfs_device_t *tmp;
8625 cio_procfs_device_t *where;
8630 /* create the directory entry */
8632 (cio_procfs_entry_t *) kmalloc (sizeof (cio_procfs_entry_t),
8635 entry->devno = devno;
8636 sprintf (buf, "%x", devno);
8637 entry->cio_device_entry =
8638 proc_mkdir (buf, cio_procfs_deviceinfo_root);
8640 if (entry->cio_device_entry) {
8641 tmp = (cio_procfs_device_t *)
8642 kmalloc (sizeof (cio_procfs_device_t), GFP_KERNEL);
8646 if (cio_procfs_device_list == NULL) {
8647 cio_procfs_device_list = tmp;
8650 where = cio_procfs_device_list;
8651 i = where->entry->devno;
8653 && (where->next != NULL)) {
8654 where = where->next;
8655 i = where->entry->devno;
8657 if (where->next == NULL) {
8661 tmp->next = where->next;
8665 /* create the different entries */
8666 entry->cio_sensedata_entry =
8667 create_proc_entry ("sensedata",
8669 entry->cio_device_entry);
8670 entry->cio_sensedata_entry->proc_fops =
8671 &cio_sensedata_entry_file_ops;
8672 entry->cio_in_use_entry =
8673 create_proc_entry ("in_use",
8675 entry->cio_device_entry);
8676 entry->cio_in_use_entry->proc_fops =
8677 &cio_in_use_entry_file_ops;
8678 entry->cio_chpid_entry =
8679 create_proc_entry ("chpids",
8681 entry->cio_device_entry);
8682 entry->cio_chpid_entry->proc_fops =
8683 &cio_chpid_entry_file_ops;
8685 printk (KERN_WARNING
8686 "Error, could not allocate procfs structure!\n");
8687 remove_proc_entry (buf,
8688 cio_procfs_deviceinfo_root);
8693 printk (KERN_WARNING
8694 "Error, could not allocate procfs structure!\n");
8700 printk (KERN_WARNING
8701 "Error, could not allocate procfs structure!\n");
8708 * Function: cio_procfs_device_remove
8709 * remove procfs entry for given device number
8712 cio_procfs_device_remove (int devno)
8715 cio_procfs_device_t *tmp;
8716 cio_procfs_device_t *prev = NULL;
8718 tmp = cio_procfs_device_list;
8720 if (tmp->entry->devno == devno)
8728 remove_proc_entry ("sensedata", tmp->entry->cio_device_entry);
8729 remove_proc_entry ("in_use", tmp->entry->cio_device_entry);
8730 remove_proc_entry ("chpid", tmp->entry->cio_device_entry);
8731 sprintf (buf, "%x", devno);
8732 remove_proc_entry (buf, cio_procfs_deviceinfo_root);
8734 if (tmp == cio_procfs_device_list) {
8735 cio_procfs_device_list = tmp->next;
8737 prev->next = tmp->next;
8749 * Function: cio_procfs_purge
8750 * purge /proc/deviceinfo of entries for gone devices
8754 cio_procfs_device_purge (void)
8758 for (i = 0; i <= highest_subchannel; i++) {
8759 if (ioinfo[i] != INVALID_STORAGE_AREA) {
8760 if (!ioinfo[i]->ui.flags.oper)
8761 cio_procfs_device_remove (ioinfo[i]->devno);
8768 * Function: cio_procfs_create
8769 * create /proc/deviceinfo/ and subdirs for the devices
8772 cio_procfs_create (void)
8776 if (cio_proc_devinfo) {
8778 cio_procfs_deviceinfo_root =
8779 proc_mkdir ("deviceinfo", &proc_root);
8781 if (highest_subchannel >= MAX_CIO_PROCFS_ENTRIES) {
8783 "Warning: Not enough inodes for creating all "
8784 "entries under /proc/deviceinfo/. "
8785 "Not every device will get an entry.\n");
8788 for (irq = 0; irq <= highest_subchannel; irq++) {
8789 if (irq >= MAX_CIO_PROCFS_ENTRIES)
8791 if (ioinfo[irq] != INVALID_STORAGE_AREA) {
8792 if (ioinfo[irq]->ui.flags.oper)
8793 if (cio_procfs_device_create
8794 (ioinfo[irq]->devno) == -ENOMEM) {
8796 "Out of memory while creating "
8797 "entries in /proc/deviceinfo/, "
8798 "not all devices might show up\n");
8809 __initcall (cio_procfs_create);
8812 * Entry /proc/cio_ignore to display blacklisted ranges of devices.
8813 * un-ignore devices by piping to /proc/cio_ignore:
8814 * free all frees all blacklisted devices, free <range>,<range>,...
8815 * frees specified ranges of devnos
8816 * add <range>,<range>,... will add a range of devices to blacklist -
8817 * but only for devices not already known
8820 static struct proc_dir_entry *cio_ignore_proc_entry;
8822 cio_ignore_proc_open (struct inode *inode, struct file *file)
8831 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8833 printk (KERN_WARNING "No memory available for data\n");
8836 file->private_data = (void *) info;
8837 size += nr_ignored * 6;
8838 info->data = (char *) vmalloc (size);
8839 if (size && info->data == NULL) {
8840 printk (KERN_WARNING "No memory available for data\n");
8844 spin_lock_irqsave (&blacklist_lock, flags);
8845 for (i = 0; i <= highest_ignored; i++)
8846 if (test_bit (i, &bl_dev)) {
8848 sprintf (info->data + len, "%04x ",
8850 for (j = i; (j <= highest_ignored)
8851 && (test_bit (j, &bl_dev)); j++) ;
8855 sprintf (info->data + len,
8857 len += sprintf (info->data + len, "\n");
8860 spin_unlock_irqrestore (&blacklist_lock, flags);
8868 cio_ignore_proc_close (struct inode *inode, struct file *file)
8871 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8875 vfree (p_info->data);
8883 cio_ignore_proc_read (struct file *file, char *user_buf, size_t user_len,
8887 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8888 loff_t pos = *offset;
8890 if (pos < 0 || pos >= p_info->len) {
8893 len = MIN (user_len, (p_info->len - *offset));
8894 if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
8896 (*offset) = pos + len;
8902 cio_ignore_proc_write (struct file *file, const char *user_buf,
8903 size_t user_len, loff_t * offset)
8907 if(user_len > 65536)
8910 buffer = vmalloc (user_len + 1);
8914 if (copy_from_user (buffer, user_buf, user_len)) {
8918 buffer[user_len] = '\0';
8919 #ifdef CONFIG_DEBUG_IO
8920 printk (KERN_DEBUG "/proc/cio_ignore: '%s'\n", buffer);
8921 #endif /* CONFIG_DEBUG_IO */
8923 blacklist_parse_proc_parameters (buffer);
8929 static struct file_operations cio_ignore_proc_file_ops = {
8930 read:cio_ignore_proc_read, open:cio_ignore_proc_open,
8931 write:cio_ignore_proc_write, release:cio_ignore_proc_close,
8935 cio_ignore_proc_init (void)
8937 cio_ignore_proc_entry =
8938 create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
8940 cio_ignore_proc_entry->proc_fops = &cio_ignore_proc_file_ops;
8945 __initcall (cio_ignore_proc_init);
8948 * Entry /proc/irq_count
8949 * display how many irqs have occured per cpu...
8952 static struct proc_dir_entry *cio_irq_proc_entry;
8955 cio_irq_proc_open (struct inode *inode, struct file *file)
8963 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
8965 printk (KERN_WARNING "No memory available for data\n");
8968 file->private_data = (void *) info;
8969 size += NR_CPUS * 16;
8970 info->data = (char *) vmalloc (size);
8971 if (size && info->data == NULL) {
8972 printk (KERN_WARNING "No memory available for data\n");
8976 for (i = 0; i < NR_CPUS; i++) {
8977 if (s390_irq_count[i] != 0)
8979 sprintf (info->data + len, "%lx\n",
8989 cio_irq_proc_close (struct inode *inode, struct file *file)
8992 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
8996 vfree (p_info->data);
9004 cio_irq_proc_read (struct file *file, char *user_buf, size_t user_len,
9008 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9009 loff_t pos = *offset;
9011 if (pos < 0 || pos >= p_info->len) {
9014 len = MIN (user_len, (p_info->len - *offset));
9015 if (copy_to_user (user_buf, &(p_info->data[*offset]), len))
9017 (*offset) = pos + len;
9022 static struct file_operations cio_irq_proc_file_ops = {
9023 read:cio_irq_proc_read, open:cio_irq_proc_open,
9024 release:cio_irq_proc_close,
9028 cio_irq_proc_init (void)
9033 if (cio_count_irqs) {
9034 for (i = 0; i < NR_CPUS; i++)
9035 s390_irq_count[i] = 0;
9036 cio_irq_proc_entry =
9037 create_proc_entry ("irq_count", S_IFREG | S_IRUGO,
9039 cio_irq_proc_entry->proc_fops = &cio_irq_proc_file_ops;
9045 __initcall (cio_irq_proc_init);
9050 * /proc/chpids to display available chpids
9051 * vary chpids on/off by piping to it
9054 static struct proc_dir_entry *cio_chpids_proc_entry;
9057 cio_chpids_proc_open(struct inode *inode, struct file *file)
9065 if (!cio_chsc_desc_avail) {
9067 * We have not yet retrieved the link addresses,
9070 chsc_get_sch_descriptions();
9074 info = (tempinfo_t *) vmalloc(sizeof(tempinfo_t));
9076 printk( KERN_WARNING "No memory available for data\n");
9079 file->private_data = (void *) info;
9080 size += NR_CHPIDS * 16;
9081 info->data = (char *) vmalloc(size);
9082 if ( size && info->data == NULL) {
9083 printk( KERN_WARNING "No memory available for data\n");
9087 /* update our stuff */
9088 chsc_get_sch_descriptions();
9089 if (!cio_chsc_desc_avail) {
9090 len += sprintf(info->data+len, "no info available\n");
9094 for (i=0;i<NR_CHPIDS;i++) {
9095 if (test_bit(i, &chpids_known)) {
9096 if (!test_bit(i, &chpids))
9097 len += sprintf(info->data+len,
9100 else if (test_bit(i, &chpids_logical))
9101 len += sprintf(info->data+len,
9105 len += sprintf(info->data+len,
9120 cio_chpids_proc_close(struct inode *inode, struct file *file)
9123 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9127 vfree( p_info->data );
9135 cio_chpids_proc_read( struct file *file, char *user_buf, size_t user_len, loff_t * offset)
9138 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
9139 loff_t pos = *offset;
9141 if (pos < 0 || pos >= p_info->len) {
9144 len = MIN(user_len, (p_info->len - pos));
9145 if (copy_to_user( user_buf, &(p_info->data[pos]), len))
9147 *offset = pos + len;
9153 cio_chpids_proc_write (struct file *file, const char *user_buf,
9154 size_t user_len, loff_t * offset)
9158 if(user_len > 65536)
9161 buffer = vmalloc (user_len + 1);
9165 if (copy_from_user (buffer, user_buf, user_len)) {
9169 buffer[user_len]='\0';
9171 printk("/proc/chpids: '%s'\n", buffer);
9172 #endif /* CIO_DEBUG_IO */
9174 cio_parse_chpids_proc_parameters(buffer);
9180 static struct file_operations cio_chpids_proc_file_ops =
9182 read:cio_chpids_proc_read,
9183 open:cio_chpids_proc_open,
9184 write:cio_chpids_proc_write,
9185 release:cio_chpids_proc_close,
9189 cio_chpids_proc_init(void)
9192 cio_chpids_proc_entry = create_proc_entry("chpids", S_IFREG|S_IRUGO|S_IWUSR, &proc_root);
9193 cio_chpids_proc_entry->proc_fops = &cio_chpids_proc_file_ops;
9200 __initcall(cio_chpids_proc_init);
9202 /* end of procfs stuff */
9206 s390_get_schib (int irq)
9208 if ((irq > highest_subchannel) || (irq < 0))
9210 if (ioinfo[irq] == INVALID_STORAGE_AREA)
9212 if (ioinfo[irq]->st)
9214 return &ioinfo[irq]->schib;
9219 s390_set_private_data(int irq, void *data)
9223 ioinfo[irq]->private_data = data;
9229 s390_get_private_data(int irq)
9231 if ((irq > highest_subchannel) || (irq < 0))
9233 if (ioinfo[irq] == INVALID_STORAGE_AREA)
9235 if (ioinfo[irq]->st)
9237 return ioinfo[irq]->private_data;
9240 EXPORT_SYMBOL (halt_IO);
9241 EXPORT_SYMBOL (clear_IO);
9242 EXPORT_SYMBOL (do_IO);
9243 EXPORT_SYMBOL (resume_IO);
9244 EXPORT_SYMBOL (ioinfo);
9245 EXPORT_SYMBOL (diag210);
9246 EXPORT_SYMBOL (get_dev_info_by_irq);
9247 EXPORT_SYMBOL (get_dev_info_by_devno);
9248 EXPORT_SYMBOL (get_irq_by_devno);
9249 EXPORT_SYMBOL (get_devno_by_irq);
9250 EXPORT_SYMBOL (get_irq_first);
9251 EXPORT_SYMBOL (get_irq_next);
9252 EXPORT_SYMBOL (read_conf_data);
9253 EXPORT_SYMBOL (read_dev_chars);
9254 EXPORT_SYMBOL (s390_request_irq_special);
9255 EXPORT_SYMBOL (s390_get_schib);
9256 EXPORT_SYMBOL (s390_register_adapter_interrupt);
9257 EXPORT_SYMBOL (s390_unregister_adapter_interrupt);
9258 EXPORT_SYMBOL (s390_set_private_data);
9259 EXPORT_SYMBOL (s390_get_private_data);
9260 EXPORT_SYMBOL (s390_trigger_resense);