OSDN Git Service

Merge branch 'sched/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[uclinux-h8/linux.git] / drivers / char / ipmi / ipmi_msghandler.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_msghandler.c
4  *
5  * Incoming and outgoing message routing for an IPMI interface.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
16
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35
36 #define IPMI_DRIVER_VERSION "39.2"
37
38 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
39 static int ipmi_init_msghandler(void);
40 static void smi_recv_tasklet(unsigned long);
41 static void handle_new_recv_msgs(struct ipmi_smi *intf);
42 static void need_waiter(struct ipmi_smi *intf);
43 static int handle_one_recv_msg(struct ipmi_smi *intf,
44                                struct ipmi_smi_msg *msg);
45
46 #ifdef DEBUG
47 static void ipmi_debug_msg(const char *title, unsigned char *data,
48                            unsigned int len)
49 {
50         int i, pos;
51         char buf[100];
52
53         pos = snprintf(buf, sizeof(buf), "%s: ", title);
54         for (i = 0; i < len; i++)
55                 pos += snprintf(buf + pos, sizeof(buf) - pos,
56                                 " %2.2x", data[i]);
57         pr_debug("%s\n", buf);
58 }
59 #else
60 static void ipmi_debug_msg(const char *title, unsigned char *data,
61                            unsigned int len)
62 { }
63 #endif
64
65 static int initialized;
66
67 enum ipmi_panic_event_op {
68         IPMI_SEND_PANIC_EVENT_NONE,
69         IPMI_SEND_PANIC_EVENT,
70         IPMI_SEND_PANIC_EVENT_STRING
71 };
72 #ifdef CONFIG_IPMI_PANIC_STRING
73 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
74 #elif defined(CONFIG_IPMI_PANIC_EVENT)
75 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
76 #else
77 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
78 #endif
79 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
80
81 static int panic_op_write_handler(const char *val,
82                                   const struct kernel_param *kp)
83 {
84         char valcp[16];
85         char *s;
86
87         strncpy(valcp, val, 15);
88         valcp[15] = '\0';
89
90         s = strstrip(valcp);
91
92         if (strcmp(s, "none") == 0)
93                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
94         else if (strcmp(s, "event") == 0)
95                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
96         else if (strcmp(s, "string") == 0)
97                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
98         else
99                 return -EINVAL;
100
101         return 0;
102 }
103
104 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
105 {
106         switch (ipmi_send_panic_event) {
107         case IPMI_SEND_PANIC_EVENT_NONE:
108                 strcpy(buffer, "none");
109                 break;
110
111         case IPMI_SEND_PANIC_EVENT:
112                 strcpy(buffer, "event");
113                 break;
114
115         case IPMI_SEND_PANIC_EVENT_STRING:
116                 strcpy(buffer, "string");
117                 break;
118
119         default:
120                 strcpy(buffer, "???");
121                 break;
122         }
123
124         return strlen(buffer);
125 }
126
127 static const struct kernel_param_ops panic_op_ops = {
128         .set = panic_op_write_handler,
129         .get = panic_op_read_handler
130 };
131 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
132 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
133
134
135 #define MAX_EVENTS_IN_QUEUE     25
136
137 /* Remain in auto-maintenance mode for this amount of time (in ms). */
138 static unsigned long maintenance_mode_timeout_ms = 30000;
139 module_param(maintenance_mode_timeout_ms, ulong, 0644);
140 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
141                  "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
142
143 /*
144  * Don't let a message sit in a queue forever, always time it with at lest
145  * the max message timer.  This is in milliseconds.
146  */
147 #define MAX_MSG_TIMEOUT         60000
148
149 /*
150  * Timeout times below are in milliseconds, and are done off a 1
151  * second timer.  So setting the value to 1000 would mean anything
152  * between 0 and 1000ms.  So really the only reasonable minimum
153  * setting it 2000ms, which is between 1 and 2 seconds.
154  */
155
156 /* The default timeout for message retries. */
157 static unsigned long default_retry_ms = 2000;
158 module_param(default_retry_ms, ulong, 0644);
159 MODULE_PARM_DESC(default_retry_ms,
160                  "The time (milliseconds) between retry sends");
161
162 /* The default timeout for maintenance mode message retries. */
163 static unsigned long default_maintenance_retry_ms = 3000;
164 module_param(default_maintenance_retry_ms, ulong, 0644);
165 MODULE_PARM_DESC(default_maintenance_retry_ms,
166                  "The time (milliseconds) between retry sends in maintenance mode");
167
168 /* The default maximum number of retries */
169 static unsigned int default_max_retries = 4;
170 module_param(default_max_retries, uint, 0644);
171 MODULE_PARM_DESC(default_max_retries,
172                  "The time (milliseconds) between retry sends in maintenance mode");
173
174 /* Call every ~1000 ms. */
175 #define IPMI_TIMEOUT_TIME       1000
176
177 /* How many jiffies does it take to get to the timeout time. */
178 #define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
179
180 /*
181  * Request events from the queue every second (this is the number of
182  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
183  * future, IPMI will add a way to know immediately if an event is in
184  * the queue and this silliness can go away.
185  */
186 #define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
187
188 /* How long should we cache dynamic device IDs? */
189 #define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
190
191 /*
192  * The main "user" data structure.
193  */
194 struct ipmi_user {
195         struct list_head link;
196
197         /*
198          * Set to NULL when the user is destroyed, a pointer to myself
199          * so srcu_dereference can be used on it.
200          */
201         struct ipmi_user *self;
202         struct srcu_struct release_barrier;
203
204         struct kref refcount;
205
206         /* The upper layer that handles receive messages. */
207         const struct ipmi_user_hndl *handler;
208         void             *handler_data;
209
210         /* The interface this user is bound to. */
211         struct ipmi_smi *intf;
212
213         /* Does this interface receive IPMI events? */
214         bool gets_events;
215 };
216
217 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
218         __acquires(user->release_barrier)
219 {
220         struct ipmi_user *ruser;
221
222         *index = srcu_read_lock(&user->release_barrier);
223         ruser = srcu_dereference(user->self, &user->release_barrier);
224         if (!ruser)
225                 srcu_read_unlock(&user->release_barrier, *index);
226         return ruser;
227 }
228
229 static void release_ipmi_user(struct ipmi_user *user, int index)
230 {
231         srcu_read_unlock(&user->release_barrier, index);
232 }
233
234 struct cmd_rcvr {
235         struct list_head link;
236
237         struct ipmi_user *user;
238         unsigned char netfn;
239         unsigned char cmd;
240         unsigned int  chans;
241
242         /*
243          * This is used to form a linked lised during mass deletion.
244          * Since this is in an RCU list, we cannot use the link above
245          * or change any data until the RCU period completes.  So we
246          * use this next variable during mass deletion so we can have
247          * a list and don't have to wait and restart the search on
248          * every individual deletion of a command.
249          */
250         struct cmd_rcvr *next;
251 };
252
253 struct seq_table {
254         unsigned int         inuse : 1;
255         unsigned int         broadcast : 1;
256
257         unsigned long        timeout;
258         unsigned long        orig_timeout;
259         unsigned int         retries_left;
260
261         /*
262          * To verify on an incoming send message response that this is
263          * the message that the response is for, we keep a sequence id
264          * and increment it every time we send a message.
265          */
266         long                 seqid;
267
268         /*
269          * This is held so we can properly respond to the message on a
270          * timeout, and it is used to hold the temporary data for
271          * retransmission, too.
272          */
273         struct ipmi_recv_msg *recv_msg;
274 };
275
276 /*
277  * Store the information in a msgid (long) to allow us to find a
278  * sequence table entry from the msgid.
279  */
280 #define STORE_SEQ_IN_MSGID(seq, seqid) \
281         ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
282
283 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
284         do {                                                            \
285                 seq = (((msgid) >> 26) & 0x3f);                         \
286                 seqid = ((msgid) & 0x3ffffff);                          \
287         } while (0)
288
289 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
290
291 #define IPMI_MAX_CHANNELS       16
292 struct ipmi_channel {
293         unsigned char medium;
294         unsigned char protocol;
295 };
296
297 struct ipmi_channel_set {
298         struct ipmi_channel c[IPMI_MAX_CHANNELS];
299 };
300
301 struct ipmi_my_addrinfo {
302         /*
303          * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
304          * but may be changed by the user.
305          */
306         unsigned char address;
307
308         /*
309          * My LUN.  This should generally stay the SMS LUN, but just in
310          * case...
311          */
312         unsigned char lun;
313 };
314
315 /*
316  * Note that the product id, manufacturer id, guid, and device id are
317  * immutable in this structure, so dyn_mutex is not required for
318  * accessing those.  If those change on a BMC, a new BMC is allocated.
319  */
320 struct bmc_device {
321         struct platform_device pdev;
322         struct list_head       intfs; /* Interfaces on this BMC. */
323         struct ipmi_device_id  id;
324         struct ipmi_device_id  fetch_id;
325         int                    dyn_id_set;
326         unsigned long          dyn_id_expiry;
327         struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
328         guid_t                 guid;
329         guid_t                 fetch_guid;
330         int                    dyn_guid_set;
331         struct kref            usecount;
332         struct work_struct     remove_work;
333 };
334 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
335
336 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
337                              struct ipmi_device_id *id,
338                              bool *guid_set, guid_t *guid);
339
340 /*
341  * Various statistics for IPMI, these index stats[] in the ipmi_smi
342  * structure.
343  */
344 enum ipmi_stat_indexes {
345         /* Commands we got from the user that were invalid. */
346         IPMI_STAT_sent_invalid_commands = 0,
347
348         /* Commands we sent to the MC. */
349         IPMI_STAT_sent_local_commands,
350
351         /* Responses from the MC that were delivered to a user. */
352         IPMI_STAT_handled_local_responses,
353
354         /* Responses from the MC that were not delivered to a user. */
355         IPMI_STAT_unhandled_local_responses,
356
357         /* Commands we sent out to the IPMB bus. */
358         IPMI_STAT_sent_ipmb_commands,
359
360         /* Commands sent on the IPMB that had errors on the SEND CMD */
361         IPMI_STAT_sent_ipmb_command_errs,
362
363         /* Each retransmit increments this count. */
364         IPMI_STAT_retransmitted_ipmb_commands,
365
366         /*
367          * When a message times out (runs out of retransmits) this is
368          * incremented.
369          */
370         IPMI_STAT_timed_out_ipmb_commands,
371
372         /*
373          * This is like above, but for broadcasts.  Broadcasts are
374          * *not* included in the above count (they are expected to
375          * time out).
376          */
377         IPMI_STAT_timed_out_ipmb_broadcasts,
378
379         /* Responses I have sent to the IPMB bus. */
380         IPMI_STAT_sent_ipmb_responses,
381
382         /* The response was delivered to the user. */
383         IPMI_STAT_handled_ipmb_responses,
384
385         /* The response had invalid data in it. */
386         IPMI_STAT_invalid_ipmb_responses,
387
388         /* The response didn't have anyone waiting for it. */
389         IPMI_STAT_unhandled_ipmb_responses,
390
391         /* Commands we sent out to the IPMB bus. */
392         IPMI_STAT_sent_lan_commands,
393
394         /* Commands sent on the IPMB that had errors on the SEND CMD */
395         IPMI_STAT_sent_lan_command_errs,
396
397         /* Each retransmit increments this count. */
398         IPMI_STAT_retransmitted_lan_commands,
399
400         /*
401          * When a message times out (runs out of retransmits) this is
402          * incremented.
403          */
404         IPMI_STAT_timed_out_lan_commands,
405
406         /* Responses I have sent to the IPMB bus. */
407         IPMI_STAT_sent_lan_responses,
408
409         /* The response was delivered to the user. */
410         IPMI_STAT_handled_lan_responses,
411
412         /* The response had invalid data in it. */
413         IPMI_STAT_invalid_lan_responses,
414
415         /* The response didn't have anyone waiting for it. */
416         IPMI_STAT_unhandled_lan_responses,
417
418         /* The command was delivered to the user. */
419         IPMI_STAT_handled_commands,
420
421         /* The command had invalid data in it. */
422         IPMI_STAT_invalid_commands,
423
424         /* The command didn't have anyone waiting for it. */
425         IPMI_STAT_unhandled_commands,
426
427         /* Invalid data in an event. */
428         IPMI_STAT_invalid_events,
429
430         /* Events that were received with the proper format. */
431         IPMI_STAT_events,
432
433         /* Retransmissions on IPMB that failed. */
434         IPMI_STAT_dropped_rexmit_ipmb_commands,
435
436         /* Retransmissions on LAN that failed. */
437         IPMI_STAT_dropped_rexmit_lan_commands,
438
439         /* This *must* remain last, add new values above this. */
440         IPMI_NUM_STATS
441 };
442
443
444 #define IPMI_IPMB_NUM_SEQ       64
445 struct ipmi_smi {
446         /* What interface number are we? */
447         int intf_num;
448
449         struct kref refcount;
450
451         /* Set when the interface is being unregistered. */
452         bool in_shutdown;
453
454         /* Used for a list of interfaces. */
455         struct list_head link;
456
457         /*
458          * The list of upper layers that are using me.  seq_lock write
459          * protects this.  Read protection is with srcu.
460          */
461         struct list_head users;
462         struct srcu_struct users_srcu;
463
464         /* Used for wake ups at startup. */
465         wait_queue_head_t waitq;
466
467         /*
468          * Prevents the interface from being unregistered when the
469          * interface is used by being looked up through the BMC
470          * structure.
471          */
472         struct mutex bmc_reg_mutex;
473
474         struct bmc_device tmp_bmc;
475         struct bmc_device *bmc;
476         bool bmc_registered;
477         struct list_head bmc_link;
478         char *my_dev_name;
479         bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
480         struct work_struct bmc_reg_work;
481
482         const struct ipmi_smi_handlers *handlers;
483         void                     *send_info;
484
485         /* Driver-model device for the system interface. */
486         struct device          *si_dev;
487
488         /*
489          * A table of sequence numbers for this interface.  We use the
490          * sequence numbers for IPMB messages that go out of the
491          * interface to match them up with their responses.  A routine
492          * is called periodically to time the items in this list.
493          */
494         spinlock_t       seq_lock;
495         struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
496         int curr_seq;
497
498         /*
499          * Messages queued for delivery.  If delivery fails (out of memory
500          * for instance), They will stay in here to be processed later in a
501          * periodic timer interrupt.  The tasklet is for handling received
502          * messages directly from the handler.
503          */
504         spinlock_t       waiting_rcv_msgs_lock;
505         struct list_head waiting_rcv_msgs;
506         atomic_t         watchdog_pretimeouts_to_deliver;
507         struct tasklet_struct recv_tasklet;
508
509         spinlock_t             xmit_msgs_lock;
510         struct list_head       xmit_msgs;
511         struct ipmi_smi_msg    *curr_msg;
512         struct list_head       hp_xmit_msgs;
513
514         /*
515          * The list of command receivers that are registered for commands
516          * on this interface.
517          */
518         struct mutex     cmd_rcvrs_mutex;
519         struct list_head cmd_rcvrs;
520
521         /*
522          * Events that were queues because no one was there to receive
523          * them.
524          */
525         spinlock_t       events_lock; /* For dealing with event stuff. */
526         struct list_head waiting_events;
527         unsigned int     waiting_events_count; /* How many events in queue? */
528         char             delivering_events;
529         char             event_msg_printed;
530         atomic_t         event_waiters;
531         unsigned int     ticks_to_req_ev;
532         int              last_needs_timer;
533
534         /*
535          * The event receiver for my BMC, only really used at panic
536          * shutdown as a place to store this.
537          */
538         unsigned char event_receiver;
539         unsigned char event_receiver_lun;
540         unsigned char local_sel_device;
541         unsigned char local_event_generator;
542
543         /* For handling of maintenance mode. */
544         int maintenance_mode;
545         bool maintenance_mode_enable;
546         int auto_maintenance_timeout;
547         spinlock_t maintenance_mode_lock; /* Used in a timer... */
548
549         /*
550          * If we are doing maintenance on something on IPMB, extend
551          * the timeout time to avoid timeouts writing firmware and
552          * such.
553          */
554         int ipmb_maintenance_mode_timeout;
555
556         /*
557          * A cheap hack, if this is non-null and a message to an
558          * interface comes in with a NULL user, call this routine with
559          * it.  Note that the message will still be freed by the
560          * caller.  This only works on the system interface.
561          *
562          * Protected by bmc_reg_mutex.
563          */
564         void (*null_user_handler)(struct ipmi_smi *intf,
565                                   struct ipmi_recv_msg *msg);
566
567         /*
568          * When we are scanning the channels for an SMI, this will
569          * tell which channel we are scanning.
570          */
571         int curr_channel;
572
573         /* Channel information */
574         struct ipmi_channel_set *channel_list;
575         unsigned int curr_working_cset; /* First index into the following. */
576         struct ipmi_channel_set wchannels[2];
577         struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
578         bool channels_ready;
579
580         atomic_t stats[IPMI_NUM_STATS];
581
582         /*
583          * run_to_completion duplicate of smb_info, smi_info
584          * and ipmi_serial_info structures. Used to decrease numbers of
585          * parameters passed by "low" level IPMI code.
586          */
587         int run_to_completion;
588 };
589 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
590
591 static void __get_guid(struct ipmi_smi *intf);
592 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
593 static int __ipmi_bmc_register(struct ipmi_smi *intf,
594                                struct ipmi_device_id *id,
595                                bool guid_set, guid_t *guid, int intf_num);
596 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
597
598
599 /**
600  * The driver model view of the IPMI messaging driver.
601  */
602 static struct platform_driver ipmidriver = {
603         .driver = {
604                 .name = "ipmi",
605                 .bus = &platform_bus_type
606         }
607 };
608 /*
609  * This mutex keeps us from adding the same BMC twice.
610  */
611 static DEFINE_MUTEX(ipmidriver_mutex);
612
613 static LIST_HEAD(ipmi_interfaces);
614 static DEFINE_MUTEX(ipmi_interfaces_mutex);
615 DEFINE_STATIC_SRCU(ipmi_interfaces_srcu);
616
617 /*
618  * List of watchers that want to know when smi's are added and deleted.
619  */
620 static LIST_HEAD(smi_watchers);
621 static DEFINE_MUTEX(smi_watchers_mutex);
622
623 #define ipmi_inc_stat(intf, stat) \
624         atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
625 #define ipmi_get_stat(intf, stat) \
626         ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
627
628 static const char * const addr_src_to_str[] = {
629         "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
630         "device-tree", "platform"
631 };
632
633 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
634 {
635         if (src >= SI_LAST)
636                 src = 0; /* Invalid */
637         return addr_src_to_str[src];
638 }
639 EXPORT_SYMBOL(ipmi_addr_src_to_str);
640
641 static int is_lan_addr(struct ipmi_addr *addr)
642 {
643         return addr->addr_type == IPMI_LAN_ADDR_TYPE;
644 }
645
646 static int is_ipmb_addr(struct ipmi_addr *addr)
647 {
648         return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
649 }
650
651 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
652 {
653         return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
654 }
655
656 static void free_recv_msg_list(struct list_head *q)
657 {
658         struct ipmi_recv_msg *msg, *msg2;
659
660         list_for_each_entry_safe(msg, msg2, q, link) {
661                 list_del(&msg->link);
662                 ipmi_free_recv_msg(msg);
663         }
664 }
665
666 static void free_smi_msg_list(struct list_head *q)
667 {
668         struct ipmi_smi_msg *msg, *msg2;
669
670         list_for_each_entry_safe(msg, msg2, q, link) {
671                 list_del(&msg->link);
672                 ipmi_free_smi_msg(msg);
673         }
674 }
675
676 static void clean_up_interface_data(struct ipmi_smi *intf)
677 {
678         int              i;
679         struct cmd_rcvr  *rcvr, *rcvr2;
680         struct list_head list;
681
682         tasklet_kill(&intf->recv_tasklet);
683
684         free_smi_msg_list(&intf->waiting_rcv_msgs);
685         free_recv_msg_list(&intf->waiting_events);
686
687         /*
688          * Wholesale remove all the entries from the list in the
689          * interface and wait for RCU to know that none are in use.
690          */
691         mutex_lock(&intf->cmd_rcvrs_mutex);
692         INIT_LIST_HEAD(&list);
693         list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
694         mutex_unlock(&intf->cmd_rcvrs_mutex);
695
696         list_for_each_entry_safe(rcvr, rcvr2, &list, link)
697                 kfree(rcvr);
698
699         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
700                 if ((intf->seq_table[i].inuse)
701                                         && (intf->seq_table[i].recv_msg))
702                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
703         }
704 }
705
706 static void intf_free(struct kref *ref)
707 {
708         struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
709
710         clean_up_interface_data(intf);
711         kfree(intf);
712 }
713
714 struct watcher_entry {
715         int              intf_num;
716         struct ipmi_smi  *intf;
717         struct list_head link;
718 };
719
720 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
721 {
722         struct ipmi_smi *intf;
723         int index;
724
725         mutex_lock(&smi_watchers_mutex);
726
727         list_add(&watcher->link, &smi_watchers);
728
729         index = srcu_read_lock(&ipmi_interfaces_srcu);
730         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
731                 int intf_num = READ_ONCE(intf->intf_num);
732
733                 if (intf_num == -1)
734                         continue;
735                 watcher->new_smi(intf_num, intf->si_dev);
736         }
737         srcu_read_unlock(&ipmi_interfaces_srcu, index);
738
739         mutex_unlock(&smi_watchers_mutex);
740
741         return 0;
742 }
743 EXPORT_SYMBOL(ipmi_smi_watcher_register);
744
745 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
746 {
747         mutex_lock(&smi_watchers_mutex);
748         list_del(&watcher->link);
749         mutex_unlock(&smi_watchers_mutex);
750         return 0;
751 }
752 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
753
754 /*
755  * Must be called with smi_watchers_mutex held.
756  */
757 static void
758 call_smi_watchers(int i, struct device *dev)
759 {
760         struct ipmi_smi_watcher *w;
761
762         mutex_lock(&smi_watchers_mutex);
763         list_for_each_entry(w, &smi_watchers, link) {
764                 if (try_module_get(w->owner)) {
765                         w->new_smi(i, dev);
766                         module_put(w->owner);
767                 }
768         }
769         mutex_unlock(&smi_watchers_mutex);
770 }
771
772 static int
773 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
774 {
775         if (addr1->addr_type != addr2->addr_type)
776                 return 0;
777
778         if (addr1->channel != addr2->channel)
779                 return 0;
780
781         if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
782                 struct ipmi_system_interface_addr *smi_addr1
783                     = (struct ipmi_system_interface_addr *) addr1;
784                 struct ipmi_system_interface_addr *smi_addr2
785                     = (struct ipmi_system_interface_addr *) addr2;
786                 return (smi_addr1->lun == smi_addr2->lun);
787         }
788
789         if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
790                 struct ipmi_ipmb_addr *ipmb_addr1
791                     = (struct ipmi_ipmb_addr *) addr1;
792                 struct ipmi_ipmb_addr *ipmb_addr2
793                     = (struct ipmi_ipmb_addr *) addr2;
794
795                 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
796                         && (ipmb_addr1->lun == ipmb_addr2->lun));
797         }
798
799         if (is_lan_addr(addr1)) {
800                 struct ipmi_lan_addr *lan_addr1
801                         = (struct ipmi_lan_addr *) addr1;
802                 struct ipmi_lan_addr *lan_addr2
803                     = (struct ipmi_lan_addr *) addr2;
804
805                 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
806                         && (lan_addr1->local_SWID == lan_addr2->local_SWID)
807                         && (lan_addr1->session_handle
808                             == lan_addr2->session_handle)
809                         && (lan_addr1->lun == lan_addr2->lun));
810         }
811
812         return 1;
813 }
814
815 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
816 {
817         if (len < sizeof(struct ipmi_system_interface_addr))
818                 return -EINVAL;
819
820         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
821                 if (addr->channel != IPMI_BMC_CHANNEL)
822                         return -EINVAL;
823                 return 0;
824         }
825
826         if ((addr->channel == IPMI_BMC_CHANNEL)
827             || (addr->channel >= IPMI_MAX_CHANNELS)
828             || (addr->channel < 0))
829                 return -EINVAL;
830
831         if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
832                 if (len < sizeof(struct ipmi_ipmb_addr))
833                         return -EINVAL;
834                 return 0;
835         }
836
837         if (is_lan_addr(addr)) {
838                 if (len < sizeof(struct ipmi_lan_addr))
839                         return -EINVAL;
840                 return 0;
841         }
842
843         return -EINVAL;
844 }
845 EXPORT_SYMBOL(ipmi_validate_addr);
846
847 unsigned int ipmi_addr_length(int addr_type)
848 {
849         if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
850                 return sizeof(struct ipmi_system_interface_addr);
851
852         if ((addr_type == IPMI_IPMB_ADDR_TYPE)
853                         || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
854                 return sizeof(struct ipmi_ipmb_addr);
855
856         if (addr_type == IPMI_LAN_ADDR_TYPE)
857                 return sizeof(struct ipmi_lan_addr);
858
859         return 0;
860 }
861 EXPORT_SYMBOL(ipmi_addr_length);
862
863 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
864 {
865         int rv = 0;
866
867         if (!msg->user) {
868                 /* Special handling for NULL users. */
869                 if (intf->null_user_handler) {
870                         intf->null_user_handler(intf, msg);
871                 } else {
872                         /* No handler, so give up. */
873                         rv = -EINVAL;
874                 }
875                 ipmi_free_recv_msg(msg);
876         } else if (!oops_in_progress) {
877                 /*
878                  * If we are running in the panic context, calling the
879                  * receive handler doesn't much meaning and has a deadlock
880                  * risk.  At this moment, simply skip it in that case.
881                  */
882                 int index;
883                 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
884
885                 if (user) {
886                         user->handler->ipmi_recv_hndl(msg, user->handler_data);
887                         release_ipmi_user(msg->user, index);
888                 } else {
889                         /* User went away, give up. */
890                         ipmi_free_recv_msg(msg);
891                         rv = -EINVAL;
892                 }
893         }
894
895         return rv;
896 }
897
898 static void deliver_local_response(struct ipmi_smi *intf,
899                                    struct ipmi_recv_msg *msg)
900 {
901         if (deliver_response(intf, msg))
902                 ipmi_inc_stat(intf, unhandled_local_responses);
903         else
904                 ipmi_inc_stat(intf, handled_local_responses);
905 }
906
907 static void deliver_err_response(struct ipmi_smi *intf,
908                                  struct ipmi_recv_msg *msg, int err)
909 {
910         msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
911         msg->msg_data[0] = err;
912         msg->msg.netfn |= 1; /* Convert to a response. */
913         msg->msg.data_len = 1;
914         msg->msg.data = msg->msg_data;
915         deliver_local_response(intf, msg);
916 }
917
918 /*
919  * Find the next sequence number not being used and add the given
920  * message with the given timeout to the sequence table.  This must be
921  * called with the interface's seq_lock held.
922  */
923 static int intf_next_seq(struct ipmi_smi      *intf,
924                          struct ipmi_recv_msg *recv_msg,
925                          unsigned long        timeout,
926                          int                  retries,
927                          int                  broadcast,
928                          unsigned char        *seq,
929                          long                 *seqid)
930 {
931         int          rv = 0;
932         unsigned int i;
933
934         if (timeout == 0)
935                 timeout = default_retry_ms;
936         if (retries < 0)
937                 retries = default_max_retries;
938
939         for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
940                                         i = (i+1)%IPMI_IPMB_NUM_SEQ) {
941                 if (!intf->seq_table[i].inuse)
942                         break;
943         }
944
945         if (!intf->seq_table[i].inuse) {
946                 intf->seq_table[i].recv_msg = recv_msg;
947
948                 /*
949                  * Start with the maximum timeout, when the send response
950                  * comes in we will start the real timer.
951                  */
952                 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
953                 intf->seq_table[i].orig_timeout = timeout;
954                 intf->seq_table[i].retries_left = retries;
955                 intf->seq_table[i].broadcast = broadcast;
956                 intf->seq_table[i].inuse = 1;
957                 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
958                 *seq = i;
959                 *seqid = intf->seq_table[i].seqid;
960                 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
961                 need_waiter(intf);
962         } else {
963                 rv = -EAGAIN;
964         }
965
966         return rv;
967 }
968
969 /*
970  * Return the receive message for the given sequence number and
971  * release the sequence number so it can be reused.  Some other data
972  * is passed in to be sure the message matches up correctly (to help
973  * guard against message coming in after their timeout and the
974  * sequence number being reused).
975  */
976 static int intf_find_seq(struct ipmi_smi      *intf,
977                          unsigned char        seq,
978                          short                channel,
979                          unsigned char        cmd,
980                          unsigned char        netfn,
981                          struct ipmi_addr     *addr,
982                          struct ipmi_recv_msg **recv_msg)
983 {
984         int           rv = -ENODEV;
985         unsigned long flags;
986
987         if (seq >= IPMI_IPMB_NUM_SEQ)
988                 return -EINVAL;
989
990         spin_lock_irqsave(&intf->seq_lock, flags);
991         if (intf->seq_table[seq].inuse) {
992                 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
993
994                 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
995                                 && (msg->msg.netfn == netfn)
996                                 && (ipmi_addr_equal(addr, &msg->addr))) {
997                         *recv_msg = msg;
998                         intf->seq_table[seq].inuse = 0;
999                         rv = 0;
1000                 }
1001         }
1002         spin_unlock_irqrestore(&intf->seq_lock, flags);
1003
1004         return rv;
1005 }
1006
1007
1008 /* Start the timer for a specific sequence table entry. */
1009 static int intf_start_seq_timer(struct ipmi_smi *intf,
1010                                 long       msgid)
1011 {
1012         int           rv = -ENODEV;
1013         unsigned long flags;
1014         unsigned char seq;
1015         unsigned long seqid;
1016
1017
1018         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1019
1020         spin_lock_irqsave(&intf->seq_lock, flags);
1021         /*
1022          * We do this verification because the user can be deleted
1023          * while a message is outstanding.
1024          */
1025         if ((intf->seq_table[seq].inuse)
1026                                 && (intf->seq_table[seq].seqid == seqid)) {
1027                 struct seq_table *ent = &intf->seq_table[seq];
1028                 ent->timeout = ent->orig_timeout;
1029                 rv = 0;
1030         }
1031         spin_unlock_irqrestore(&intf->seq_lock, flags);
1032
1033         return rv;
1034 }
1035
1036 /* Got an error for the send message for a specific sequence number. */
1037 static int intf_err_seq(struct ipmi_smi *intf,
1038                         long         msgid,
1039                         unsigned int err)
1040 {
1041         int                  rv = -ENODEV;
1042         unsigned long        flags;
1043         unsigned char        seq;
1044         unsigned long        seqid;
1045         struct ipmi_recv_msg *msg = NULL;
1046
1047
1048         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1049
1050         spin_lock_irqsave(&intf->seq_lock, flags);
1051         /*
1052          * We do this verification because the user can be deleted
1053          * while a message is outstanding.
1054          */
1055         if ((intf->seq_table[seq].inuse)
1056                                 && (intf->seq_table[seq].seqid == seqid)) {
1057                 struct seq_table *ent = &intf->seq_table[seq];
1058
1059                 ent->inuse = 0;
1060                 msg = ent->recv_msg;
1061                 rv = 0;
1062         }
1063         spin_unlock_irqrestore(&intf->seq_lock, flags);
1064
1065         if (msg)
1066                 deliver_err_response(intf, msg, err);
1067
1068         return rv;
1069 }
1070
1071
1072 int ipmi_create_user(unsigned int          if_num,
1073                      const struct ipmi_user_hndl *handler,
1074                      void                  *handler_data,
1075                      struct ipmi_user      **user)
1076 {
1077         unsigned long flags;
1078         struct ipmi_user *new_user;
1079         int           rv = 0, index;
1080         struct ipmi_smi *intf;
1081
1082         /*
1083          * There is no module usecount here, because it's not
1084          * required.  Since this can only be used by and called from
1085          * other modules, they will implicitly use this module, and
1086          * thus this can't be removed unless the other modules are
1087          * removed.
1088          */
1089
1090         if (handler == NULL)
1091                 return -EINVAL;
1092
1093         /*
1094          * Make sure the driver is actually initialized, this handles
1095          * problems with initialization order.
1096          */
1097         if (!initialized) {
1098                 rv = ipmi_init_msghandler();
1099                 if (rv)
1100                         return rv;
1101
1102                 /*
1103                  * The init code doesn't return an error if it was turned
1104                  * off, but it won't initialize.  Check that.
1105                  */
1106                 if (!initialized)
1107                         return -ENODEV;
1108         }
1109
1110         new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1111         if (!new_user)
1112                 return -ENOMEM;
1113
1114         index = srcu_read_lock(&ipmi_interfaces_srcu);
1115         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1116                 if (intf->intf_num == if_num)
1117                         goto found;
1118         }
1119         /* Not found, return an error */
1120         rv = -EINVAL;
1121         goto out_kfree;
1122
1123  found:
1124         rv = init_srcu_struct(&new_user->release_barrier);
1125         if (rv)
1126                 goto out_kfree;
1127
1128         /* Note that each existing user holds a refcount to the interface. */
1129         kref_get(&intf->refcount);
1130
1131         kref_init(&new_user->refcount);
1132         new_user->handler = handler;
1133         new_user->handler_data = handler_data;
1134         new_user->intf = intf;
1135         new_user->gets_events = false;
1136
1137         rcu_assign_pointer(new_user->self, new_user);
1138         spin_lock_irqsave(&intf->seq_lock, flags);
1139         list_add_rcu(&new_user->link, &intf->users);
1140         spin_unlock_irqrestore(&intf->seq_lock, flags);
1141         if (handler->ipmi_watchdog_pretimeout) {
1142                 /* User wants pretimeouts, so make sure to watch for them. */
1143                 if (atomic_inc_return(&intf->event_waiters) == 1)
1144                         need_waiter(intf);
1145         }
1146         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1147         *user = new_user;
1148         return 0;
1149
1150 out_kfree:
1151         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1152         kfree(new_user);
1153         return rv;
1154 }
1155 EXPORT_SYMBOL(ipmi_create_user);
1156
1157 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1158 {
1159         int rv, index;
1160         struct ipmi_smi *intf;
1161
1162         index = srcu_read_lock(&ipmi_interfaces_srcu);
1163         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1164                 if (intf->intf_num == if_num)
1165                         goto found;
1166         }
1167         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1168
1169         /* Not found, return an error */
1170         return -EINVAL;
1171
1172 found:
1173         if (!intf->handlers->get_smi_info)
1174                 rv = -ENOTTY;
1175         else
1176                 rv = intf->handlers->get_smi_info(intf->send_info, data);
1177         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1178
1179         return rv;
1180 }
1181 EXPORT_SYMBOL(ipmi_get_smi_info);
1182
1183 static void free_user(struct kref *ref)
1184 {
1185         struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1186         kfree(user);
1187 }
1188
1189 static void _ipmi_destroy_user(struct ipmi_user *user)
1190 {
1191         struct ipmi_smi  *intf = user->intf;
1192         int              i;
1193         unsigned long    flags;
1194         struct cmd_rcvr  *rcvr;
1195         struct cmd_rcvr  *rcvrs = NULL;
1196
1197         if (!acquire_ipmi_user(user, &i)) {
1198                 /*
1199                  * The user has already been cleaned up, just make sure
1200                  * nothing is using it and return.
1201                  */
1202                 synchronize_srcu(&user->release_barrier);
1203                 return;
1204         }
1205
1206         rcu_assign_pointer(user->self, NULL);
1207         release_ipmi_user(user, i);
1208
1209         synchronize_srcu(&user->release_barrier);
1210
1211         if (user->handler->shutdown)
1212                 user->handler->shutdown(user->handler_data);
1213
1214         if (user->handler->ipmi_watchdog_pretimeout)
1215                 atomic_dec(&intf->event_waiters);
1216
1217         if (user->gets_events)
1218                 atomic_dec(&intf->event_waiters);
1219
1220         /* Remove the user from the interface's sequence table. */
1221         spin_lock_irqsave(&intf->seq_lock, flags);
1222         list_del_rcu(&user->link);
1223
1224         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1225                 if (intf->seq_table[i].inuse
1226                     && (intf->seq_table[i].recv_msg->user == user)) {
1227                         intf->seq_table[i].inuse = 0;
1228                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1229                 }
1230         }
1231         spin_unlock_irqrestore(&intf->seq_lock, flags);
1232
1233         /*
1234          * Remove the user from the command receiver's table.  First
1235          * we build a list of everything (not using the standard link,
1236          * since other things may be using it till we do
1237          * synchronize_srcu()) then free everything in that list.
1238          */
1239         mutex_lock(&intf->cmd_rcvrs_mutex);
1240         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1241                 if (rcvr->user == user) {
1242                         list_del_rcu(&rcvr->link);
1243                         rcvr->next = rcvrs;
1244                         rcvrs = rcvr;
1245                 }
1246         }
1247         mutex_unlock(&intf->cmd_rcvrs_mutex);
1248         synchronize_rcu();
1249         while (rcvrs) {
1250                 rcvr = rcvrs;
1251                 rcvrs = rcvr->next;
1252                 kfree(rcvr);
1253         }
1254
1255         kref_put(&intf->refcount, intf_free);
1256 }
1257
1258 int ipmi_destroy_user(struct ipmi_user *user)
1259 {
1260         _ipmi_destroy_user(user);
1261
1262         cleanup_srcu_struct(&user->release_barrier);
1263         kref_put(&user->refcount, free_user);
1264
1265         return 0;
1266 }
1267 EXPORT_SYMBOL(ipmi_destroy_user);
1268
1269 int ipmi_get_version(struct ipmi_user *user,
1270                      unsigned char *major,
1271                      unsigned char *minor)
1272 {
1273         struct ipmi_device_id id;
1274         int rv, index;
1275
1276         user = acquire_ipmi_user(user, &index);
1277         if (!user)
1278                 return -ENODEV;
1279
1280         rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1281         if (!rv) {
1282                 *major = ipmi_version_major(&id);
1283                 *minor = ipmi_version_minor(&id);
1284         }
1285         release_ipmi_user(user, index);
1286
1287         return rv;
1288 }
1289 EXPORT_SYMBOL(ipmi_get_version);
1290
1291 int ipmi_set_my_address(struct ipmi_user *user,
1292                         unsigned int  channel,
1293                         unsigned char address)
1294 {
1295         int index, rv = 0;
1296
1297         user = acquire_ipmi_user(user, &index);
1298         if (!user)
1299                 return -ENODEV;
1300
1301         if (channel >= IPMI_MAX_CHANNELS)
1302                 rv = -EINVAL;
1303         else
1304                 user->intf->addrinfo[channel].address = address;
1305         release_ipmi_user(user, index);
1306
1307         return rv;
1308 }
1309 EXPORT_SYMBOL(ipmi_set_my_address);
1310
1311 int ipmi_get_my_address(struct ipmi_user *user,
1312                         unsigned int  channel,
1313                         unsigned char *address)
1314 {
1315         int index, rv = 0;
1316
1317         user = acquire_ipmi_user(user, &index);
1318         if (!user)
1319                 return -ENODEV;
1320
1321         if (channel >= IPMI_MAX_CHANNELS)
1322                 rv = -EINVAL;
1323         else
1324                 *address = user->intf->addrinfo[channel].address;
1325         release_ipmi_user(user, index);
1326
1327         return rv;
1328 }
1329 EXPORT_SYMBOL(ipmi_get_my_address);
1330
1331 int ipmi_set_my_LUN(struct ipmi_user *user,
1332                     unsigned int  channel,
1333                     unsigned char LUN)
1334 {
1335         int index, rv = 0;
1336
1337         user = acquire_ipmi_user(user, &index);
1338         if (!user)
1339                 return -ENODEV;
1340
1341         if (channel >= IPMI_MAX_CHANNELS)
1342                 rv = -EINVAL;
1343         else
1344                 user->intf->addrinfo[channel].lun = LUN & 0x3;
1345         release_ipmi_user(user, index);
1346
1347         return rv;
1348 }
1349 EXPORT_SYMBOL(ipmi_set_my_LUN);
1350
1351 int ipmi_get_my_LUN(struct ipmi_user *user,
1352                     unsigned int  channel,
1353                     unsigned char *address)
1354 {
1355         int index, rv = 0;
1356
1357         user = acquire_ipmi_user(user, &index);
1358         if (!user)
1359                 return -ENODEV;
1360
1361         if (channel >= IPMI_MAX_CHANNELS)
1362                 rv = -EINVAL;
1363         else
1364                 *address = user->intf->addrinfo[channel].lun;
1365         release_ipmi_user(user, index);
1366
1367         return rv;
1368 }
1369 EXPORT_SYMBOL(ipmi_get_my_LUN);
1370
1371 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1372 {
1373         int mode, index;
1374         unsigned long flags;
1375
1376         user = acquire_ipmi_user(user, &index);
1377         if (!user)
1378                 return -ENODEV;
1379
1380         spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1381         mode = user->intf->maintenance_mode;
1382         spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1383         release_ipmi_user(user, index);
1384
1385         return mode;
1386 }
1387 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1388
1389 static void maintenance_mode_update(struct ipmi_smi *intf)
1390 {
1391         if (intf->handlers->set_maintenance_mode)
1392                 intf->handlers->set_maintenance_mode(
1393                         intf->send_info, intf->maintenance_mode_enable);
1394 }
1395
1396 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1397 {
1398         int rv = 0, index;
1399         unsigned long flags;
1400         struct ipmi_smi *intf = user->intf;
1401
1402         user = acquire_ipmi_user(user, &index);
1403         if (!user)
1404                 return -ENODEV;
1405
1406         spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1407         if (intf->maintenance_mode != mode) {
1408                 switch (mode) {
1409                 case IPMI_MAINTENANCE_MODE_AUTO:
1410                         intf->maintenance_mode_enable
1411                                 = (intf->auto_maintenance_timeout > 0);
1412                         break;
1413
1414                 case IPMI_MAINTENANCE_MODE_OFF:
1415                         intf->maintenance_mode_enable = false;
1416                         break;
1417
1418                 case IPMI_MAINTENANCE_MODE_ON:
1419                         intf->maintenance_mode_enable = true;
1420                         break;
1421
1422                 default:
1423                         rv = -EINVAL;
1424                         goto out_unlock;
1425                 }
1426                 intf->maintenance_mode = mode;
1427
1428                 maintenance_mode_update(intf);
1429         }
1430  out_unlock:
1431         spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1432         release_ipmi_user(user, index);
1433
1434         return rv;
1435 }
1436 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1437
1438 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1439 {
1440         unsigned long        flags;
1441         struct ipmi_smi      *intf = user->intf;
1442         struct ipmi_recv_msg *msg, *msg2;
1443         struct list_head     msgs;
1444         int index;
1445
1446         user = acquire_ipmi_user(user, &index);
1447         if (!user)
1448                 return -ENODEV;
1449
1450         INIT_LIST_HEAD(&msgs);
1451
1452         spin_lock_irqsave(&intf->events_lock, flags);
1453         if (user->gets_events == val)
1454                 goto out;
1455
1456         user->gets_events = val;
1457
1458         if (val) {
1459                 if (atomic_inc_return(&intf->event_waiters) == 1)
1460                         need_waiter(intf);
1461         } else {
1462                 atomic_dec(&intf->event_waiters);
1463         }
1464
1465         if (intf->delivering_events)
1466                 /*
1467                  * Another thread is delivering events for this, so
1468                  * let it handle any new events.
1469                  */
1470                 goto out;
1471
1472         /* Deliver any queued events. */
1473         while (user->gets_events && !list_empty(&intf->waiting_events)) {
1474                 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1475                         list_move_tail(&msg->link, &msgs);
1476                 intf->waiting_events_count = 0;
1477                 if (intf->event_msg_printed) {
1478                         dev_warn(intf->si_dev, "Event queue no longer full\n");
1479                         intf->event_msg_printed = 0;
1480                 }
1481
1482                 intf->delivering_events = 1;
1483                 spin_unlock_irqrestore(&intf->events_lock, flags);
1484
1485                 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1486                         msg->user = user;
1487                         kref_get(&user->refcount);
1488                         deliver_local_response(intf, msg);
1489                 }
1490
1491                 spin_lock_irqsave(&intf->events_lock, flags);
1492                 intf->delivering_events = 0;
1493         }
1494
1495  out:
1496         spin_unlock_irqrestore(&intf->events_lock, flags);
1497         release_ipmi_user(user, index);
1498
1499         return 0;
1500 }
1501 EXPORT_SYMBOL(ipmi_set_gets_events);
1502
1503 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1504                                       unsigned char netfn,
1505                                       unsigned char cmd,
1506                                       unsigned char chan)
1507 {
1508         struct cmd_rcvr *rcvr;
1509
1510         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1511                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1512                                         && (rcvr->chans & (1 << chan)))
1513                         return rcvr;
1514         }
1515         return NULL;
1516 }
1517
1518 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1519                                  unsigned char netfn,
1520                                  unsigned char cmd,
1521                                  unsigned int  chans)
1522 {
1523         struct cmd_rcvr *rcvr;
1524
1525         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1526                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1527                                         && (rcvr->chans & chans))
1528                         return 0;
1529         }
1530         return 1;
1531 }
1532
1533 int ipmi_register_for_cmd(struct ipmi_user *user,
1534                           unsigned char netfn,
1535                           unsigned char cmd,
1536                           unsigned int  chans)
1537 {
1538         struct ipmi_smi *intf = user->intf;
1539         struct cmd_rcvr *rcvr;
1540         int rv = 0, index;
1541
1542         user = acquire_ipmi_user(user, &index);
1543         if (!user)
1544                 return -ENODEV;
1545
1546         rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1547         if (!rcvr) {
1548                 rv = -ENOMEM;
1549                 goto out_release;
1550         }
1551         rcvr->cmd = cmd;
1552         rcvr->netfn = netfn;
1553         rcvr->chans = chans;
1554         rcvr->user = user;
1555
1556         mutex_lock(&intf->cmd_rcvrs_mutex);
1557         /* Make sure the command/netfn is not already registered. */
1558         if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1559                 rv = -EBUSY;
1560                 goto out_unlock;
1561         }
1562
1563         if (atomic_inc_return(&intf->event_waiters) == 1)
1564                 need_waiter(intf);
1565
1566         list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1567
1568 out_unlock:
1569         mutex_unlock(&intf->cmd_rcvrs_mutex);
1570         if (rv)
1571                 kfree(rcvr);
1572 out_release:
1573         release_ipmi_user(user, index);
1574
1575         return rv;
1576 }
1577 EXPORT_SYMBOL(ipmi_register_for_cmd);
1578
1579 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1580                             unsigned char netfn,
1581                             unsigned char cmd,
1582                             unsigned int  chans)
1583 {
1584         struct ipmi_smi *intf = user->intf;
1585         struct cmd_rcvr *rcvr;
1586         struct cmd_rcvr *rcvrs = NULL;
1587         int i, rv = -ENOENT, index;
1588
1589         user = acquire_ipmi_user(user, &index);
1590         if (!user)
1591                 return -ENODEV;
1592
1593         mutex_lock(&intf->cmd_rcvrs_mutex);
1594         for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1595                 if (((1 << i) & chans) == 0)
1596                         continue;
1597                 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1598                 if (rcvr == NULL)
1599                         continue;
1600                 if (rcvr->user == user) {
1601                         rv = 0;
1602                         rcvr->chans &= ~chans;
1603                         if (rcvr->chans == 0) {
1604                                 list_del_rcu(&rcvr->link);
1605                                 rcvr->next = rcvrs;
1606                                 rcvrs = rcvr;
1607                         }
1608                 }
1609         }
1610         mutex_unlock(&intf->cmd_rcvrs_mutex);
1611         synchronize_rcu();
1612         release_ipmi_user(user, index);
1613         while (rcvrs) {
1614                 atomic_dec(&intf->event_waiters);
1615                 rcvr = rcvrs;
1616                 rcvrs = rcvr->next;
1617                 kfree(rcvr);
1618         }
1619
1620         return rv;
1621 }
1622 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1623
1624 static unsigned char
1625 ipmb_checksum(unsigned char *data, int size)
1626 {
1627         unsigned char csum = 0;
1628
1629         for (; size > 0; size--, data++)
1630                 csum += *data;
1631
1632         return -csum;
1633 }
1634
1635 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1636                                    struct kernel_ipmi_msg *msg,
1637                                    struct ipmi_ipmb_addr *ipmb_addr,
1638                                    long                  msgid,
1639                                    unsigned char         ipmb_seq,
1640                                    int                   broadcast,
1641                                    unsigned char         source_address,
1642                                    unsigned char         source_lun)
1643 {
1644         int i = broadcast;
1645
1646         /* Format the IPMB header data. */
1647         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1648         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1649         smi_msg->data[2] = ipmb_addr->channel;
1650         if (broadcast)
1651                 smi_msg->data[3] = 0;
1652         smi_msg->data[i+3] = ipmb_addr->slave_addr;
1653         smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1654         smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1655         smi_msg->data[i+6] = source_address;
1656         smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1657         smi_msg->data[i+8] = msg->cmd;
1658
1659         /* Now tack on the data to the message. */
1660         if (msg->data_len > 0)
1661                 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1662         smi_msg->data_size = msg->data_len + 9;
1663
1664         /* Now calculate the checksum and tack it on. */
1665         smi_msg->data[i+smi_msg->data_size]
1666                 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1667
1668         /*
1669          * Add on the checksum size and the offset from the
1670          * broadcast.
1671          */
1672         smi_msg->data_size += 1 + i;
1673
1674         smi_msg->msgid = msgid;
1675 }
1676
1677 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1678                                   struct kernel_ipmi_msg *msg,
1679                                   struct ipmi_lan_addr  *lan_addr,
1680                                   long                  msgid,
1681                                   unsigned char         ipmb_seq,
1682                                   unsigned char         source_lun)
1683 {
1684         /* Format the IPMB header data. */
1685         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1686         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1687         smi_msg->data[2] = lan_addr->channel;
1688         smi_msg->data[3] = lan_addr->session_handle;
1689         smi_msg->data[4] = lan_addr->remote_SWID;
1690         smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1691         smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1692         smi_msg->data[7] = lan_addr->local_SWID;
1693         smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1694         smi_msg->data[9] = msg->cmd;
1695
1696         /* Now tack on the data to the message. */
1697         if (msg->data_len > 0)
1698                 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1699         smi_msg->data_size = msg->data_len + 10;
1700
1701         /* Now calculate the checksum and tack it on. */
1702         smi_msg->data[smi_msg->data_size]
1703                 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1704
1705         /*
1706          * Add on the checksum size and the offset from the
1707          * broadcast.
1708          */
1709         smi_msg->data_size += 1;
1710
1711         smi_msg->msgid = msgid;
1712 }
1713
1714 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1715                                              struct ipmi_smi_msg *smi_msg,
1716                                              int priority)
1717 {
1718         if (intf->curr_msg) {
1719                 if (priority > 0)
1720                         list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1721                 else
1722                         list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1723                 smi_msg = NULL;
1724         } else {
1725                 intf->curr_msg = smi_msg;
1726         }
1727
1728         return smi_msg;
1729 }
1730
1731
1732 static void smi_send(struct ipmi_smi *intf,
1733                      const struct ipmi_smi_handlers *handlers,
1734                      struct ipmi_smi_msg *smi_msg, int priority)
1735 {
1736         int run_to_completion = intf->run_to_completion;
1737
1738         if (run_to_completion) {
1739                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1740         } else {
1741                 unsigned long flags;
1742
1743                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1744                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1745                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1746         }
1747
1748         if (smi_msg)
1749                 handlers->sender(intf->send_info, smi_msg);
1750 }
1751
1752 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1753 {
1754         return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1755                  && ((msg->cmd == IPMI_COLD_RESET_CMD)
1756                      || (msg->cmd == IPMI_WARM_RESET_CMD)))
1757                 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1758 }
1759
1760 static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1761                               struct ipmi_addr       *addr,
1762                               long                   msgid,
1763                               struct kernel_ipmi_msg *msg,
1764                               struct ipmi_smi_msg    *smi_msg,
1765                               struct ipmi_recv_msg   *recv_msg,
1766                               int                    retries,
1767                               unsigned int           retry_time_ms)
1768 {
1769         struct ipmi_system_interface_addr *smi_addr;
1770
1771         if (msg->netfn & 1)
1772                 /* Responses are not allowed to the SMI. */
1773                 return -EINVAL;
1774
1775         smi_addr = (struct ipmi_system_interface_addr *) addr;
1776         if (smi_addr->lun > 3) {
1777                 ipmi_inc_stat(intf, sent_invalid_commands);
1778                 return -EINVAL;
1779         }
1780
1781         memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1782
1783         if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1784             && ((msg->cmd == IPMI_SEND_MSG_CMD)
1785                 || (msg->cmd == IPMI_GET_MSG_CMD)
1786                 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1787                 /*
1788                  * We don't let the user do these, since we manage
1789                  * the sequence numbers.
1790                  */
1791                 ipmi_inc_stat(intf, sent_invalid_commands);
1792                 return -EINVAL;
1793         }
1794
1795         if (is_maintenance_mode_cmd(msg)) {
1796                 unsigned long flags;
1797
1798                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1799                 intf->auto_maintenance_timeout
1800                         = maintenance_mode_timeout_ms;
1801                 if (!intf->maintenance_mode
1802                     && !intf->maintenance_mode_enable) {
1803                         intf->maintenance_mode_enable = true;
1804                         maintenance_mode_update(intf);
1805                 }
1806                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1807                                        flags);
1808         }
1809
1810         if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1811                 ipmi_inc_stat(intf, sent_invalid_commands);
1812                 return -EMSGSIZE;
1813         }
1814
1815         smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1816         smi_msg->data[1] = msg->cmd;
1817         smi_msg->msgid = msgid;
1818         smi_msg->user_data = recv_msg;
1819         if (msg->data_len > 0)
1820                 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1821         smi_msg->data_size = msg->data_len + 2;
1822         ipmi_inc_stat(intf, sent_local_commands);
1823
1824         return 0;
1825 }
1826
1827 static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1828                            struct ipmi_addr       *addr,
1829                            long                   msgid,
1830                            struct kernel_ipmi_msg *msg,
1831                            struct ipmi_smi_msg    *smi_msg,
1832                            struct ipmi_recv_msg   *recv_msg,
1833                            unsigned char          source_address,
1834                            unsigned char          source_lun,
1835                            int                    retries,
1836                            unsigned int           retry_time_ms)
1837 {
1838         struct ipmi_ipmb_addr *ipmb_addr;
1839         unsigned char ipmb_seq;
1840         long seqid;
1841         int broadcast = 0;
1842         struct ipmi_channel *chans;
1843         int rv = 0;
1844
1845         if (addr->channel >= IPMI_MAX_CHANNELS) {
1846                 ipmi_inc_stat(intf, sent_invalid_commands);
1847                 return -EINVAL;
1848         }
1849
1850         chans = READ_ONCE(intf->channel_list)->c;
1851
1852         if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1853                 ipmi_inc_stat(intf, sent_invalid_commands);
1854                 return -EINVAL;
1855         }
1856
1857         if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1858                 /*
1859                  * Broadcasts add a zero at the beginning of the
1860                  * message, but otherwise is the same as an IPMB
1861                  * address.
1862                  */
1863                 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1864                 broadcast = 1;
1865                 retries = 0; /* Don't retry broadcasts. */
1866         }
1867
1868         /*
1869          * 9 for the header and 1 for the checksum, plus
1870          * possibly one for the broadcast.
1871          */
1872         if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1873                 ipmi_inc_stat(intf, sent_invalid_commands);
1874                 return -EMSGSIZE;
1875         }
1876
1877         ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1878         if (ipmb_addr->lun > 3) {
1879                 ipmi_inc_stat(intf, sent_invalid_commands);
1880                 return -EINVAL;
1881         }
1882
1883         memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1884
1885         if (recv_msg->msg.netfn & 0x1) {
1886                 /*
1887                  * It's a response, so use the user's sequence
1888                  * from msgid.
1889                  */
1890                 ipmi_inc_stat(intf, sent_ipmb_responses);
1891                 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1892                                 msgid, broadcast,
1893                                 source_address, source_lun);
1894
1895                 /*
1896                  * Save the receive message so we can use it
1897                  * to deliver the response.
1898                  */
1899                 smi_msg->user_data = recv_msg;
1900         } else {
1901                 /* It's a command, so get a sequence for it. */
1902                 unsigned long flags;
1903
1904                 spin_lock_irqsave(&intf->seq_lock, flags);
1905
1906                 if (is_maintenance_mode_cmd(msg))
1907                         intf->ipmb_maintenance_mode_timeout =
1908                                 maintenance_mode_timeout_ms;
1909
1910                 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1911                         /* Different default in maintenance mode */
1912                         retry_time_ms = default_maintenance_retry_ms;
1913
1914                 /*
1915                  * Create a sequence number with a 1 second
1916                  * timeout and 4 retries.
1917                  */
1918                 rv = intf_next_seq(intf,
1919                                    recv_msg,
1920                                    retry_time_ms,
1921                                    retries,
1922                                    broadcast,
1923                                    &ipmb_seq,
1924                                    &seqid);
1925                 if (rv)
1926                         /*
1927                          * We have used up all the sequence numbers,
1928                          * probably, so abort.
1929                          */
1930                         goto out_err;
1931
1932                 ipmi_inc_stat(intf, sent_ipmb_commands);
1933
1934                 /*
1935                  * Store the sequence number in the message,
1936                  * so that when the send message response
1937                  * comes back we can start the timer.
1938                  */
1939                 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1940                                 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1941                                 ipmb_seq, broadcast,
1942                                 source_address, source_lun);
1943
1944                 /*
1945                  * Copy the message into the recv message data, so we
1946                  * can retransmit it later if necessary.
1947                  */
1948                 memcpy(recv_msg->msg_data, smi_msg->data,
1949                        smi_msg->data_size);
1950                 recv_msg->msg.data = recv_msg->msg_data;
1951                 recv_msg->msg.data_len = smi_msg->data_size;
1952
1953                 /*
1954                  * We don't unlock until here, because we need
1955                  * to copy the completed message into the
1956                  * recv_msg before we release the lock.
1957                  * Otherwise, race conditions may bite us.  I
1958                  * know that's pretty paranoid, but I prefer
1959                  * to be correct.
1960                  */
1961 out_err:
1962                 spin_unlock_irqrestore(&intf->seq_lock, flags);
1963         }
1964
1965         return rv;
1966 }
1967
1968 static int i_ipmi_req_lan(struct ipmi_smi        *intf,
1969                           struct ipmi_addr       *addr,
1970                           long                   msgid,
1971                           struct kernel_ipmi_msg *msg,
1972                           struct ipmi_smi_msg    *smi_msg,
1973                           struct ipmi_recv_msg   *recv_msg,
1974                           unsigned char          source_lun,
1975                           int                    retries,
1976                           unsigned int           retry_time_ms)
1977 {
1978         struct ipmi_lan_addr  *lan_addr;
1979         unsigned char ipmb_seq;
1980         long seqid;
1981         struct ipmi_channel *chans;
1982         int rv = 0;
1983
1984         if (addr->channel >= IPMI_MAX_CHANNELS) {
1985                 ipmi_inc_stat(intf, sent_invalid_commands);
1986                 return -EINVAL;
1987         }
1988
1989         chans = READ_ONCE(intf->channel_list)->c;
1990
1991         if ((chans[addr->channel].medium
1992                                 != IPMI_CHANNEL_MEDIUM_8023LAN)
1993                         && (chans[addr->channel].medium
1994                             != IPMI_CHANNEL_MEDIUM_ASYNC)) {
1995                 ipmi_inc_stat(intf, sent_invalid_commands);
1996                 return -EINVAL;
1997         }
1998
1999         /* 11 for the header and 1 for the checksum. */
2000         if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2001                 ipmi_inc_stat(intf, sent_invalid_commands);
2002                 return -EMSGSIZE;
2003         }
2004
2005         lan_addr = (struct ipmi_lan_addr *) addr;
2006         if (lan_addr->lun > 3) {
2007                 ipmi_inc_stat(intf, sent_invalid_commands);
2008                 return -EINVAL;
2009         }
2010
2011         memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2012
2013         if (recv_msg->msg.netfn & 0x1) {
2014                 /*
2015                  * It's a response, so use the user's sequence
2016                  * from msgid.
2017                  */
2018                 ipmi_inc_stat(intf, sent_lan_responses);
2019                 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2020                                msgid, source_lun);
2021
2022                 /*
2023                  * Save the receive message so we can use it
2024                  * to deliver the response.
2025                  */
2026                 smi_msg->user_data = recv_msg;
2027         } else {
2028                 /* It's a command, so get a sequence for it. */
2029                 unsigned long flags;
2030
2031                 spin_lock_irqsave(&intf->seq_lock, flags);
2032
2033                 /*
2034                  * Create a sequence number with a 1 second
2035                  * timeout and 4 retries.
2036                  */
2037                 rv = intf_next_seq(intf,
2038                                    recv_msg,
2039                                    retry_time_ms,
2040                                    retries,
2041                                    0,
2042                                    &ipmb_seq,
2043                                    &seqid);
2044                 if (rv)
2045                         /*
2046                          * We have used up all the sequence numbers,
2047                          * probably, so abort.
2048                          */
2049                         goto out_err;
2050
2051                 ipmi_inc_stat(intf, sent_lan_commands);
2052
2053                 /*
2054                  * Store the sequence number in the message,
2055                  * so that when the send message response
2056                  * comes back we can start the timer.
2057                  */
2058                 format_lan_msg(smi_msg, msg, lan_addr,
2059                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2060                                ipmb_seq, source_lun);
2061
2062                 /*
2063                  * Copy the message into the recv message data, so we
2064                  * can retransmit it later if necessary.
2065                  */
2066                 memcpy(recv_msg->msg_data, smi_msg->data,
2067                        smi_msg->data_size);
2068                 recv_msg->msg.data = recv_msg->msg_data;
2069                 recv_msg->msg.data_len = smi_msg->data_size;
2070
2071                 /*
2072                  * We don't unlock until here, because we need
2073                  * to copy the completed message into the
2074                  * recv_msg before we release the lock.
2075                  * Otherwise, race conditions may bite us.  I
2076                  * know that's pretty paranoid, but I prefer
2077                  * to be correct.
2078                  */
2079 out_err:
2080                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2081         }
2082
2083         return rv;
2084 }
2085
2086 /*
2087  * Separate from ipmi_request so that the user does not have to be
2088  * supplied in certain circumstances (mainly at panic time).  If
2089  * messages are supplied, they will be freed, even if an error
2090  * occurs.
2091  */
2092 static int i_ipmi_request(struct ipmi_user     *user,
2093                           struct ipmi_smi      *intf,
2094                           struct ipmi_addr     *addr,
2095                           long                 msgid,
2096                           struct kernel_ipmi_msg *msg,
2097                           void                 *user_msg_data,
2098                           void                 *supplied_smi,
2099                           struct ipmi_recv_msg *supplied_recv,
2100                           int                  priority,
2101                           unsigned char        source_address,
2102                           unsigned char        source_lun,
2103                           int                  retries,
2104                           unsigned int         retry_time_ms)
2105 {
2106         struct ipmi_smi_msg *smi_msg;
2107         struct ipmi_recv_msg *recv_msg;
2108         int rv = 0;
2109
2110         if (supplied_recv)
2111                 recv_msg = supplied_recv;
2112         else {
2113                 recv_msg = ipmi_alloc_recv_msg();
2114                 if (recv_msg == NULL) {
2115                         rv = -ENOMEM;
2116                         goto out;
2117                 }
2118         }
2119         recv_msg->user_msg_data = user_msg_data;
2120
2121         if (supplied_smi)
2122                 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2123         else {
2124                 smi_msg = ipmi_alloc_smi_msg();
2125                 if (smi_msg == NULL) {
2126                         ipmi_free_recv_msg(recv_msg);
2127                         rv = -ENOMEM;
2128                         goto out;
2129                 }
2130         }
2131
2132         rcu_read_lock();
2133         if (intf->in_shutdown) {
2134                 rv = -ENODEV;
2135                 goto out_err;
2136         }
2137
2138         recv_msg->user = user;
2139         if (user)
2140                 /* The put happens when the message is freed. */
2141                 kref_get(&user->refcount);
2142         recv_msg->msgid = msgid;
2143         /*
2144          * Store the message to send in the receive message so timeout
2145          * responses can get the proper response data.
2146          */
2147         recv_msg->msg = *msg;
2148
2149         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2150                 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2151                                         recv_msg, retries, retry_time_ms);
2152         } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2153                 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2154                                      source_address, source_lun,
2155                                      retries, retry_time_ms);
2156         } else if (is_lan_addr(addr)) {
2157                 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2158                                     source_lun, retries, retry_time_ms);
2159         } else {
2160             /* Unknown address type. */
2161                 ipmi_inc_stat(intf, sent_invalid_commands);
2162                 rv = -EINVAL;
2163         }
2164
2165         if (rv) {
2166 out_err:
2167                 ipmi_free_smi_msg(smi_msg);
2168                 ipmi_free_recv_msg(recv_msg);
2169         } else {
2170                 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2171
2172                 smi_send(intf, intf->handlers, smi_msg, priority);
2173         }
2174         rcu_read_unlock();
2175
2176 out:
2177         return rv;
2178 }
2179
2180 static int check_addr(struct ipmi_smi  *intf,
2181                       struct ipmi_addr *addr,
2182                       unsigned char    *saddr,
2183                       unsigned char    *lun)
2184 {
2185         if (addr->channel >= IPMI_MAX_CHANNELS)
2186                 return -EINVAL;
2187         *lun = intf->addrinfo[addr->channel].lun;
2188         *saddr = intf->addrinfo[addr->channel].address;
2189         return 0;
2190 }
2191
2192 int ipmi_request_settime(struct ipmi_user *user,
2193                          struct ipmi_addr *addr,
2194                          long             msgid,
2195                          struct kernel_ipmi_msg  *msg,
2196                          void             *user_msg_data,
2197                          int              priority,
2198                          int              retries,
2199                          unsigned int     retry_time_ms)
2200 {
2201         unsigned char saddr = 0, lun = 0;
2202         int rv, index;
2203
2204         if (!user)
2205                 return -EINVAL;
2206
2207         user = acquire_ipmi_user(user, &index);
2208         if (!user)
2209                 return -ENODEV;
2210
2211         rv = check_addr(user->intf, addr, &saddr, &lun);
2212         if (!rv)
2213                 rv = i_ipmi_request(user,
2214                                     user->intf,
2215                                     addr,
2216                                     msgid,
2217                                     msg,
2218                                     user_msg_data,
2219                                     NULL, NULL,
2220                                     priority,
2221                                     saddr,
2222                                     lun,
2223                                     retries,
2224                                     retry_time_ms);
2225
2226         release_ipmi_user(user, index);
2227         return rv;
2228 }
2229 EXPORT_SYMBOL(ipmi_request_settime);
2230
2231 int ipmi_request_supply_msgs(struct ipmi_user     *user,
2232                              struct ipmi_addr     *addr,
2233                              long                 msgid,
2234                              struct kernel_ipmi_msg *msg,
2235                              void                 *user_msg_data,
2236                              void                 *supplied_smi,
2237                              struct ipmi_recv_msg *supplied_recv,
2238                              int                  priority)
2239 {
2240         unsigned char saddr = 0, lun = 0;
2241         int rv, index;
2242
2243         if (!user)
2244                 return -EINVAL;
2245
2246         user = acquire_ipmi_user(user, &index);
2247         if (!user)
2248                 return -ENODEV;
2249
2250         rv = check_addr(user->intf, addr, &saddr, &lun);
2251         if (!rv)
2252                 rv = i_ipmi_request(user,
2253                                     user->intf,
2254                                     addr,
2255                                     msgid,
2256                                     msg,
2257                                     user_msg_data,
2258                                     supplied_smi,
2259                                     supplied_recv,
2260                                     priority,
2261                                     saddr,
2262                                     lun,
2263                                     -1, 0);
2264
2265         release_ipmi_user(user, index);
2266         return rv;
2267 }
2268 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2269
2270 static void bmc_device_id_handler(struct ipmi_smi *intf,
2271                                   struct ipmi_recv_msg *msg)
2272 {
2273         int rv;
2274
2275         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2276                         || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2277                         || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2278                 dev_warn(intf->si_dev,
2279                          "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2280                          msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2281                 return;
2282         }
2283
2284         rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2285                         msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2286         if (rv) {
2287                 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2288                 intf->bmc->dyn_id_set = 0;
2289         } else {
2290                 /*
2291                  * Make sure the id data is available before setting
2292                  * dyn_id_set.
2293                  */
2294                 smp_wmb();
2295                 intf->bmc->dyn_id_set = 1;
2296         }
2297
2298         wake_up(&intf->waitq);
2299 }
2300
2301 static int
2302 send_get_device_id_cmd(struct ipmi_smi *intf)
2303 {
2304         struct ipmi_system_interface_addr si;
2305         struct kernel_ipmi_msg msg;
2306
2307         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2308         si.channel = IPMI_BMC_CHANNEL;
2309         si.lun = 0;
2310
2311         msg.netfn = IPMI_NETFN_APP_REQUEST;
2312         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2313         msg.data = NULL;
2314         msg.data_len = 0;
2315
2316         return i_ipmi_request(NULL,
2317                               intf,
2318                               (struct ipmi_addr *) &si,
2319                               0,
2320                               &msg,
2321                               intf,
2322                               NULL,
2323                               NULL,
2324                               0,
2325                               intf->addrinfo[0].address,
2326                               intf->addrinfo[0].lun,
2327                               -1, 0);
2328 }
2329
2330 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2331 {
2332         int rv;
2333
2334         bmc->dyn_id_set = 2;
2335
2336         intf->null_user_handler = bmc_device_id_handler;
2337
2338         rv = send_get_device_id_cmd(intf);
2339         if (rv)
2340                 return rv;
2341
2342         wait_event(intf->waitq, bmc->dyn_id_set != 2);
2343
2344         if (!bmc->dyn_id_set)
2345                 rv = -EIO; /* Something went wrong in the fetch. */
2346
2347         /* dyn_id_set makes the id data available. */
2348         smp_rmb();
2349
2350         intf->null_user_handler = NULL;
2351
2352         return rv;
2353 }
2354
2355 /*
2356  * Fetch the device id for the bmc/interface.  You must pass in either
2357  * bmc or intf, this code will get the other one.  If the data has
2358  * been recently fetched, this will just use the cached data.  Otherwise
2359  * it will run a new fetch.
2360  *
2361  * Except for the first time this is called (in ipmi_register_smi()),
2362  * this will always return good data;
2363  */
2364 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2365                                struct ipmi_device_id *id,
2366                                bool *guid_set, guid_t *guid, int intf_num)
2367 {
2368         int rv = 0;
2369         int prev_dyn_id_set, prev_guid_set;
2370         bool intf_set = intf != NULL;
2371
2372         if (!intf) {
2373                 mutex_lock(&bmc->dyn_mutex);
2374 retry_bmc_lock:
2375                 if (list_empty(&bmc->intfs)) {
2376                         mutex_unlock(&bmc->dyn_mutex);
2377                         return -ENOENT;
2378                 }
2379                 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2380                                         bmc_link);
2381                 kref_get(&intf->refcount);
2382                 mutex_unlock(&bmc->dyn_mutex);
2383                 mutex_lock(&intf->bmc_reg_mutex);
2384                 mutex_lock(&bmc->dyn_mutex);
2385                 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2386                                              bmc_link)) {
2387                         mutex_unlock(&intf->bmc_reg_mutex);
2388                         kref_put(&intf->refcount, intf_free);
2389                         goto retry_bmc_lock;
2390                 }
2391         } else {
2392                 mutex_lock(&intf->bmc_reg_mutex);
2393                 bmc = intf->bmc;
2394                 mutex_lock(&bmc->dyn_mutex);
2395                 kref_get(&intf->refcount);
2396         }
2397
2398         /* If we have a valid and current ID, just return that. */
2399         if (intf->in_bmc_register ||
2400             (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2401                 goto out_noprocessing;
2402
2403         prev_guid_set = bmc->dyn_guid_set;
2404         __get_guid(intf);
2405
2406         prev_dyn_id_set = bmc->dyn_id_set;
2407         rv = __get_device_id(intf, bmc);
2408         if (rv)
2409                 goto out;
2410
2411         /*
2412          * The guid, device id, manufacturer id, and product id should
2413          * not change on a BMC.  If it does we have to do some dancing.
2414          */
2415         if (!intf->bmc_registered
2416             || (!prev_guid_set && bmc->dyn_guid_set)
2417             || (!prev_dyn_id_set && bmc->dyn_id_set)
2418             || (prev_guid_set && bmc->dyn_guid_set
2419                 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2420             || bmc->id.device_id != bmc->fetch_id.device_id
2421             || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2422             || bmc->id.product_id != bmc->fetch_id.product_id) {
2423                 struct ipmi_device_id id = bmc->fetch_id;
2424                 int guid_set = bmc->dyn_guid_set;
2425                 guid_t guid;
2426
2427                 guid = bmc->fetch_guid;
2428                 mutex_unlock(&bmc->dyn_mutex);
2429
2430                 __ipmi_bmc_unregister(intf);
2431                 /* Fill in the temporary BMC for good measure. */
2432                 intf->bmc->id = id;
2433                 intf->bmc->dyn_guid_set = guid_set;
2434                 intf->bmc->guid = guid;
2435                 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2436                         need_waiter(intf); /* Retry later on an error. */
2437                 else
2438                         __scan_channels(intf, &id);
2439
2440
2441                 if (!intf_set) {
2442                         /*
2443                          * We weren't given the interface on the
2444                          * command line, so restart the operation on
2445                          * the next interface for the BMC.
2446                          */
2447                         mutex_unlock(&intf->bmc_reg_mutex);
2448                         mutex_lock(&bmc->dyn_mutex);
2449                         goto retry_bmc_lock;
2450                 }
2451
2452                 /* We have a new BMC, set it up. */
2453                 bmc = intf->bmc;
2454                 mutex_lock(&bmc->dyn_mutex);
2455                 goto out_noprocessing;
2456         } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2457                 /* Version info changes, scan the channels again. */
2458                 __scan_channels(intf, &bmc->fetch_id);
2459
2460         bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2461
2462 out:
2463         if (rv && prev_dyn_id_set) {
2464                 rv = 0; /* Ignore failures if we have previous data. */
2465                 bmc->dyn_id_set = prev_dyn_id_set;
2466         }
2467         if (!rv) {
2468                 bmc->id = bmc->fetch_id;
2469                 if (bmc->dyn_guid_set)
2470                         bmc->guid = bmc->fetch_guid;
2471                 else if (prev_guid_set)
2472                         /*
2473                          * The guid used to be valid and it failed to fetch,
2474                          * just use the cached value.
2475                          */
2476                         bmc->dyn_guid_set = prev_guid_set;
2477         }
2478 out_noprocessing:
2479         if (!rv) {
2480                 if (id)
2481                         *id = bmc->id;
2482
2483                 if (guid_set)
2484                         *guid_set = bmc->dyn_guid_set;
2485
2486                 if (guid && bmc->dyn_guid_set)
2487                         *guid =  bmc->guid;
2488         }
2489
2490         mutex_unlock(&bmc->dyn_mutex);
2491         mutex_unlock(&intf->bmc_reg_mutex);
2492
2493         kref_put(&intf->refcount, intf_free);
2494         return rv;
2495 }
2496
2497 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2498                              struct ipmi_device_id *id,
2499                              bool *guid_set, guid_t *guid)
2500 {
2501         return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2502 }
2503
2504 static ssize_t device_id_show(struct device *dev,
2505                               struct device_attribute *attr,
2506                               char *buf)
2507 {
2508         struct bmc_device *bmc = to_bmc_device(dev);
2509         struct ipmi_device_id id;
2510         int rv;
2511
2512         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2513         if (rv)
2514                 return rv;
2515
2516         return snprintf(buf, 10, "%u\n", id.device_id);
2517 }
2518 static DEVICE_ATTR_RO(device_id);
2519
2520 static ssize_t provides_device_sdrs_show(struct device *dev,
2521                                          struct device_attribute *attr,
2522                                          char *buf)
2523 {
2524         struct bmc_device *bmc = to_bmc_device(dev);
2525         struct ipmi_device_id id;
2526         int rv;
2527
2528         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2529         if (rv)
2530                 return rv;
2531
2532         return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2533 }
2534 static DEVICE_ATTR_RO(provides_device_sdrs);
2535
2536 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2537                              char *buf)
2538 {
2539         struct bmc_device *bmc = to_bmc_device(dev);
2540         struct ipmi_device_id id;
2541         int rv;
2542
2543         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2544         if (rv)
2545                 return rv;
2546
2547         return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2548 }
2549 static DEVICE_ATTR_RO(revision);
2550
2551 static ssize_t firmware_revision_show(struct device *dev,
2552                                       struct device_attribute *attr,
2553                                       char *buf)
2554 {
2555         struct bmc_device *bmc = to_bmc_device(dev);
2556         struct ipmi_device_id id;
2557         int rv;
2558
2559         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2560         if (rv)
2561                 return rv;
2562
2563         return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2564                         id.firmware_revision_2);
2565 }
2566 static DEVICE_ATTR_RO(firmware_revision);
2567
2568 static ssize_t ipmi_version_show(struct device *dev,
2569                                  struct device_attribute *attr,
2570                                  char *buf)
2571 {
2572         struct bmc_device *bmc = to_bmc_device(dev);
2573         struct ipmi_device_id id;
2574         int rv;
2575
2576         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2577         if (rv)
2578                 return rv;
2579
2580         return snprintf(buf, 20, "%u.%u\n",
2581                         ipmi_version_major(&id),
2582                         ipmi_version_minor(&id));
2583 }
2584 static DEVICE_ATTR_RO(ipmi_version);
2585
2586 static ssize_t add_dev_support_show(struct device *dev,
2587                                     struct device_attribute *attr,
2588                                     char *buf)
2589 {
2590         struct bmc_device *bmc = to_bmc_device(dev);
2591         struct ipmi_device_id id;
2592         int rv;
2593
2594         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2595         if (rv)
2596                 return rv;
2597
2598         return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2599 }
2600 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2601                    NULL);
2602
2603 static ssize_t manufacturer_id_show(struct device *dev,
2604                                     struct device_attribute *attr,
2605                                     char *buf)
2606 {
2607         struct bmc_device *bmc = to_bmc_device(dev);
2608         struct ipmi_device_id id;
2609         int rv;
2610
2611         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2612         if (rv)
2613                 return rv;
2614
2615         return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2616 }
2617 static DEVICE_ATTR_RO(manufacturer_id);
2618
2619 static ssize_t product_id_show(struct device *dev,
2620                                struct device_attribute *attr,
2621                                char *buf)
2622 {
2623         struct bmc_device *bmc = to_bmc_device(dev);
2624         struct ipmi_device_id id;
2625         int rv;
2626
2627         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2628         if (rv)
2629                 return rv;
2630
2631         return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2632 }
2633 static DEVICE_ATTR_RO(product_id);
2634
2635 static ssize_t aux_firmware_rev_show(struct device *dev,
2636                                      struct device_attribute *attr,
2637                                      char *buf)
2638 {
2639         struct bmc_device *bmc = to_bmc_device(dev);
2640         struct ipmi_device_id id;
2641         int rv;
2642
2643         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2644         if (rv)
2645                 return rv;
2646
2647         return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2648                         id.aux_firmware_revision[3],
2649                         id.aux_firmware_revision[2],
2650                         id.aux_firmware_revision[1],
2651                         id.aux_firmware_revision[0]);
2652 }
2653 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2654
2655 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2656                          char *buf)
2657 {
2658         struct bmc_device *bmc = to_bmc_device(dev);
2659         bool guid_set;
2660         guid_t guid;
2661         int rv;
2662
2663         rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2664         if (rv)
2665                 return rv;
2666         if (!guid_set)
2667                 return -ENOENT;
2668
2669         return snprintf(buf, 38, "%pUl\n", guid.b);
2670 }
2671 static DEVICE_ATTR_RO(guid);
2672
2673 static struct attribute *bmc_dev_attrs[] = {
2674         &dev_attr_device_id.attr,
2675         &dev_attr_provides_device_sdrs.attr,
2676         &dev_attr_revision.attr,
2677         &dev_attr_firmware_revision.attr,
2678         &dev_attr_ipmi_version.attr,
2679         &dev_attr_additional_device_support.attr,
2680         &dev_attr_manufacturer_id.attr,
2681         &dev_attr_product_id.attr,
2682         &dev_attr_aux_firmware_revision.attr,
2683         &dev_attr_guid.attr,
2684         NULL
2685 };
2686
2687 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2688                                        struct attribute *attr, int idx)
2689 {
2690         struct device *dev = kobj_to_dev(kobj);
2691         struct bmc_device *bmc = to_bmc_device(dev);
2692         umode_t mode = attr->mode;
2693         int rv;
2694
2695         if (attr == &dev_attr_aux_firmware_revision.attr) {
2696                 struct ipmi_device_id id;
2697
2698                 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2699                 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2700         }
2701         if (attr == &dev_attr_guid.attr) {
2702                 bool guid_set;
2703
2704                 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2705                 return (!rv && guid_set) ? mode : 0;
2706         }
2707         return mode;
2708 }
2709
2710 static const struct attribute_group bmc_dev_attr_group = {
2711         .attrs          = bmc_dev_attrs,
2712         .is_visible     = bmc_dev_attr_is_visible,
2713 };
2714
2715 static const struct attribute_group *bmc_dev_attr_groups[] = {
2716         &bmc_dev_attr_group,
2717         NULL
2718 };
2719
2720 static const struct device_type bmc_device_type = {
2721         .groups         = bmc_dev_attr_groups,
2722 };
2723
2724 static int __find_bmc_guid(struct device *dev, void *data)
2725 {
2726         guid_t *guid = data;
2727         struct bmc_device *bmc;
2728         int rv;
2729
2730         if (dev->type != &bmc_device_type)
2731                 return 0;
2732
2733         bmc = to_bmc_device(dev);
2734         rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2735         if (rv)
2736                 rv = kref_get_unless_zero(&bmc->usecount);
2737         return rv;
2738 }
2739
2740 /*
2741  * Returns with the bmc's usecount incremented, if it is non-NULL.
2742  */
2743 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2744                                              guid_t *guid)
2745 {
2746         struct device *dev;
2747         struct bmc_device *bmc = NULL;
2748
2749         dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2750         if (dev) {
2751                 bmc = to_bmc_device(dev);
2752                 put_device(dev);
2753         }
2754         return bmc;
2755 }
2756
2757 struct prod_dev_id {
2758         unsigned int  product_id;
2759         unsigned char device_id;
2760 };
2761
2762 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2763 {
2764         struct prod_dev_id *cid = data;
2765         struct bmc_device *bmc;
2766         int rv;
2767
2768         if (dev->type != &bmc_device_type)
2769                 return 0;
2770
2771         bmc = to_bmc_device(dev);
2772         rv = (bmc->id.product_id == cid->product_id
2773               && bmc->id.device_id == cid->device_id);
2774         if (rv)
2775                 rv = kref_get_unless_zero(&bmc->usecount);
2776         return rv;
2777 }
2778
2779 /*
2780  * Returns with the bmc's usecount incremented, if it is non-NULL.
2781  */
2782 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2783         struct device_driver *drv,
2784         unsigned int product_id, unsigned char device_id)
2785 {
2786         struct prod_dev_id id = {
2787                 .product_id = product_id,
2788                 .device_id = device_id,
2789         };
2790         struct device *dev;
2791         struct bmc_device *bmc = NULL;
2792
2793         dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2794         if (dev) {
2795                 bmc = to_bmc_device(dev);
2796                 put_device(dev);
2797         }
2798         return bmc;
2799 }
2800
2801 static DEFINE_IDA(ipmi_bmc_ida);
2802
2803 static void
2804 release_bmc_device(struct device *dev)
2805 {
2806         kfree(to_bmc_device(dev));
2807 }
2808
2809 static void cleanup_bmc_work(struct work_struct *work)
2810 {
2811         struct bmc_device *bmc = container_of(work, struct bmc_device,
2812                                               remove_work);
2813         int id = bmc->pdev.id; /* Unregister overwrites id */
2814
2815         platform_device_unregister(&bmc->pdev);
2816         ida_simple_remove(&ipmi_bmc_ida, id);
2817 }
2818
2819 static void
2820 cleanup_bmc_device(struct kref *ref)
2821 {
2822         struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2823
2824         /*
2825          * Remove the platform device in a work queue to avoid issues
2826          * with removing the device attributes while reading a device
2827          * attribute.
2828          */
2829         schedule_work(&bmc->remove_work);
2830 }
2831
2832 /*
2833  * Must be called with intf->bmc_reg_mutex held.
2834  */
2835 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2836 {
2837         struct bmc_device *bmc = intf->bmc;
2838
2839         if (!intf->bmc_registered)
2840                 return;
2841
2842         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2843         sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2844         kfree(intf->my_dev_name);
2845         intf->my_dev_name = NULL;
2846
2847         mutex_lock(&bmc->dyn_mutex);
2848         list_del(&intf->bmc_link);
2849         mutex_unlock(&bmc->dyn_mutex);
2850         intf->bmc = &intf->tmp_bmc;
2851         kref_put(&bmc->usecount, cleanup_bmc_device);
2852         intf->bmc_registered = false;
2853 }
2854
2855 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2856 {
2857         mutex_lock(&intf->bmc_reg_mutex);
2858         __ipmi_bmc_unregister(intf);
2859         mutex_unlock(&intf->bmc_reg_mutex);
2860 }
2861
2862 /*
2863  * Must be called with intf->bmc_reg_mutex held.
2864  */
2865 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2866                                struct ipmi_device_id *id,
2867                                bool guid_set, guid_t *guid, int intf_num)
2868 {
2869         int               rv;
2870         struct bmc_device *bmc;
2871         struct bmc_device *old_bmc;
2872
2873         /*
2874          * platform_device_register() can cause bmc_reg_mutex to
2875          * be claimed because of the is_visible functions of
2876          * the attributes.  Eliminate possible recursion and
2877          * release the lock.
2878          */
2879         intf->in_bmc_register = true;
2880         mutex_unlock(&intf->bmc_reg_mutex);
2881
2882         /*
2883          * Try to find if there is an bmc_device struct
2884          * representing the interfaced BMC already
2885          */
2886         mutex_lock(&ipmidriver_mutex);
2887         if (guid_set)
2888                 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2889         else
2890                 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2891                                                     id->product_id,
2892                                                     id->device_id);
2893
2894         /*
2895          * If there is already an bmc_device, free the new one,
2896          * otherwise register the new BMC device
2897          */
2898         if (old_bmc) {
2899                 bmc = old_bmc;
2900                 /*
2901                  * Note: old_bmc already has usecount incremented by
2902                  * the BMC find functions.
2903                  */
2904                 intf->bmc = old_bmc;
2905                 mutex_lock(&bmc->dyn_mutex);
2906                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2907                 mutex_unlock(&bmc->dyn_mutex);
2908
2909                 dev_info(intf->si_dev,
2910                          "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2911                          bmc->id.manufacturer_id,
2912                          bmc->id.product_id,
2913                          bmc->id.device_id);
2914         } else {
2915                 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2916                 if (!bmc) {
2917                         rv = -ENOMEM;
2918                         goto out;
2919                 }
2920                 INIT_LIST_HEAD(&bmc->intfs);
2921                 mutex_init(&bmc->dyn_mutex);
2922                 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
2923
2924                 bmc->id = *id;
2925                 bmc->dyn_id_set = 1;
2926                 bmc->dyn_guid_set = guid_set;
2927                 bmc->guid = *guid;
2928                 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2929
2930                 bmc->pdev.name = "ipmi_bmc";
2931
2932                 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2933                 if (rv < 0)
2934                         goto out;
2935                 bmc->pdev.dev.driver = &ipmidriver.driver;
2936                 bmc->pdev.id = rv;
2937                 bmc->pdev.dev.release = release_bmc_device;
2938                 bmc->pdev.dev.type = &bmc_device_type;
2939                 kref_init(&bmc->usecount);
2940
2941                 intf->bmc = bmc;
2942                 mutex_lock(&bmc->dyn_mutex);
2943                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2944                 mutex_unlock(&bmc->dyn_mutex);
2945
2946                 rv = platform_device_register(&bmc->pdev);
2947                 if (rv) {
2948                         dev_err(intf->si_dev,
2949                                 "Unable to register bmc device: %d\n",
2950                                 rv);
2951                         goto out_list_del;
2952                 }
2953
2954                 dev_info(intf->si_dev,
2955                          "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2956                          bmc->id.manufacturer_id,
2957                          bmc->id.product_id,
2958                          bmc->id.device_id);
2959         }
2960
2961         /*
2962          * create symlink from system interface device to bmc device
2963          * and back.
2964          */
2965         rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
2966         if (rv) {
2967                 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
2968                 goto out_put_bmc;
2969         }
2970
2971         if (intf_num == -1)
2972                 intf_num = intf->intf_num;
2973         intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
2974         if (!intf->my_dev_name) {
2975                 rv = -ENOMEM;
2976                 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
2977                         rv);
2978                 goto out_unlink1;
2979         }
2980
2981         rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
2982                                intf->my_dev_name);
2983         if (rv) {
2984                 kfree(intf->my_dev_name);
2985                 intf->my_dev_name = NULL;
2986                 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
2987                         rv);
2988                 goto out_free_my_dev_name;
2989         }
2990
2991         intf->bmc_registered = true;
2992
2993 out:
2994         mutex_unlock(&ipmidriver_mutex);
2995         mutex_lock(&intf->bmc_reg_mutex);
2996         intf->in_bmc_register = false;
2997         return rv;
2998
2999
3000 out_free_my_dev_name:
3001         kfree(intf->my_dev_name);
3002         intf->my_dev_name = NULL;
3003
3004 out_unlink1:
3005         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3006
3007 out_put_bmc:
3008         mutex_lock(&bmc->dyn_mutex);
3009         list_del(&intf->bmc_link);
3010         mutex_unlock(&bmc->dyn_mutex);
3011         intf->bmc = &intf->tmp_bmc;
3012         kref_put(&bmc->usecount, cleanup_bmc_device);
3013         goto out;
3014
3015 out_list_del:
3016         mutex_lock(&bmc->dyn_mutex);
3017         list_del(&intf->bmc_link);
3018         mutex_unlock(&bmc->dyn_mutex);
3019         intf->bmc = &intf->tmp_bmc;
3020         put_device(&bmc->pdev.dev);
3021         goto out;
3022 }
3023
3024 static int
3025 send_guid_cmd(struct ipmi_smi *intf, int chan)
3026 {
3027         struct kernel_ipmi_msg            msg;
3028         struct ipmi_system_interface_addr si;
3029
3030         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3031         si.channel = IPMI_BMC_CHANNEL;
3032         si.lun = 0;
3033
3034         msg.netfn = IPMI_NETFN_APP_REQUEST;
3035         msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3036         msg.data = NULL;
3037         msg.data_len = 0;
3038         return i_ipmi_request(NULL,
3039                               intf,
3040                               (struct ipmi_addr *) &si,
3041                               0,
3042                               &msg,
3043                               intf,
3044                               NULL,
3045                               NULL,
3046                               0,
3047                               intf->addrinfo[0].address,
3048                               intf->addrinfo[0].lun,
3049                               -1, 0);
3050 }
3051
3052 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3053 {
3054         struct bmc_device *bmc = intf->bmc;
3055
3056         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3057             || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3058             || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3059                 /* Not for me */
3060                 return;
3061
3062         if (msg->msg.data[0] != 0) {
3063                 /* Error from getting the GUID, the BMC doesn't have one. */
3064                 bmc->dyn_guid_set = 0;
3065                 goto out;
3066         }
3067
3068         if (msg->msg.data_len < 17) {
3069                 bmc->dyn_guid_set = 0;
3070                 dev_warn(intf->si_dev,
3071                          "The GUID response from the BMC was too short, it was %d but should have been 17.  Assuming GUID is not available.\n",
3072                          msg->msg.data_len);
3073                 goto out;
3074         }
3075
3076         memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3077         /*
3078          * Make sure the guid data is available before setting
3079          * dyn_guid_set.
3080          */
3081         smp_wmb();
3082         bmc->dyn_guid_set = 1;
3083  out:
3084         wake_up(&intf->waitq);
3085 }
3086
3087 static void __get_guid(struct ipmi_smi *intf)
3088 {
3089         int rv;
3090         struct bmc_device *bmc = intf->bmc;
3091
3092         bmc->dyn_guid_set = 2;
3093         intf->null_user_handler = guid_handler;
3094         rv = send_guid_cmd(intf, 0);
3095         if (rv)
3096                 /* Send failed, no GUID available. */
3097                 bmc->dyn_guid_set = 0;
3098
3099         wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3100
3101         /* dyn_guid_set makes the guid data available. */
3102         smp_rmb();
3103
3104         intf->null_user_handler = NULL;
3105 }
3106
3107 static int
3108 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3109 {
3110         struct kernel_ipmi_msg            msg;
3111         unsigned char                     data[1];
3112         struct ipmi_system_interface_addr si;
3113
3114         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3115         si.channel = IPMI_BMC_CHANNEL;
3116         si.lun = 0;
3117
3118         msg.netfn = IPMI_NETFN_APP_REQUEST;
3119         msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3120         msg.data = data;
3121         msg.data_len = 1;
3122         data[0] = chan;
3123         return i_ipmi_request(NULL,
3124                               intf,
3125                               (struct ipmi_addr *) &si,
3126                               0,
3127                               &msg,
3128                               intf,
3129                               NULL,
3130                               NULL,
3131                               0,
3132                               intf->addrinfo[0].address,
3133                               intf->addrinfo[0].lun,
3134                               -1, 0);
3135 }
3136
3137 static void
3138 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3139 {
3140         int rv = 0;
3141         int ch;
3142         unsigned int set = intf->curr_working_cset;
3143         struct ipmi_channel *chans;
3144
3145         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3146             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3147             && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3148                 /* It's the one we want */
3149                 if (msg->msg.data[0] != 0) {
3150                         /* Got an error from the channel, just go on. */
3151
3152                         if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3153                                 /*
3154                                  * If the MC does not support this
3155                                  * command, that is legal.  We just
3156                                  * assume it has one IPMB at channel
3157                                  * zero.
3158                                  */
3159                                 intf->wchannels[set].c[0].medium
3160                                         = IPMI_CHANNEL_MEDIUM_IPMB;
3161                                 intf->wchannels[set].c[0].protocol
3162                                         = IPMI_CHANNEL_PROTOCOL_IPMB;
3163
3164                                 intf->channel_list = intf->wchannels + set;
3165                                 intf->channels_ready = true;
3166                                 wake_up(&intf->waitq);
3167                                 goto out;
3168                         }
3169                         goto next_channel;
3170                 }
3171                 if (msg->msg.data_len < 4) {
3172                         /* Message not big enough, just go on. */
3173                         goto next_channel;
3174                 }
3175                 ch = intf->curr_channel;
3176                 chans = intf->wchannels[set].c;
3177                 chans[ch].medium = msg->msg.data[2] & 0x7f;
3178                 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3179
3180  next_channel:
3181                 intf->curr_channel++;
3182                 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3183                         intf->channel_list = intf->wchannels + set;
3184                         intf->channels_ready = true;
3185                         wake_up(&intf->waitq);
3186                 } else {
3187                         intf->channel_list = intf->wchannels + set;
3188                         intf->channels_ready = true;
3189                         rv = send_channel_info_cmd(intf, intf->curr_channel);
3190                 }
3191
3192                 if (rv) {
3193                         /* Got an error somehow, just give up. */
3194                         dev_warn(intf->si_dev,
3195                                  "Error sending channel information for channel %d: %d\n",
3196                                  intf->curr_channel, rv);
3197
3198                         intf->channel_list = intf->wchannels + set;
3199                         intf->channels_ready = true;
3200                         wake_up(&intf->waitq);
3201                 }
3202         }
3203  out:
3204         return;
3205 }
3206
3207 /*
3208  * Must be holding intf->bmc_reg_mutex to call this.
3209  */
3210 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3211 {
3212         int rv;
3213
3214         if (ipmi_version_major(id) > 1
3215                         || (ipmi_version_major(id) == 1
3216                             && ipmi_version_minor(id) >= 5)) {
3217                 unsigned int set;
3218
3219                 /*
3220                  * Start scanning the channels to see what is
3221                  * available.
3222                  */
3223                 set = !intf->curr_working_cset;
3224                 intf->curr_working_cset = set;
3225                 memset(&intf->wchannels[set], 0,
3226                        sizeof(struct ipmi_channel_set));
3227
3228                 intf->null_user_handler = channel_handler;
3229                 intf->curr_channel = 0;
3230                 rv = send_channel_info_cmd(intf, 0);
3231                 if (rv) {
3232                         dev_warn(intf->si_dev,
3233                                  "Error sending channel information for channel 0, %d\n",
3234                                  rv);
3235                         return -EIO;
3236                 }
3237
3238                 /* Wait for the channel info to be read. */
3239                 wait_event(intf->waitq, intf->channels_ready);
3240                 intf->null_user_handler = NULL;
3241         } else {
3242                 unsigned int set = intf->curr_working_cset;
3243
3244                 /* Assume a single IPMB channel at zero. */
3245                 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3246                 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3247                 intf->channel_list = intf->wchannels + set;
3248                 intf->channels_ready = true;
3249         }
3250
3251         return 0;
3252 }
3253
3254 static void ipmi_poll(struct ipmi_smi *intf)
3255 {
3256         if (intf->handlers->poll)
3257                 intf->handlers->poll(intf->send_info);
3258         /* In case something came in */
3259         handle_new_recv_msgs(intf);
3260 }
3261
3262 void ipmi_poll_interface(struct ipmi_user *user)
3263 {
3264         ipmi_poll(user->intf);
3265 }
3266 EXPORT_SYMBOL(ipmi_poll_interface);
3267
3268 static void redo_bmc_reg(struct work_struct *work)
3269 {
3270         struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3271                                              bmc_reg_work);
3272
3273         if (!intf->in_shutdown)
3274                 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3275
3276         kref_put(&intf->refcount, intf_free);
3277 }
3278
3279 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3280                       void                     *send_info,
3281                       struct device            *si_dev,
3282                       unsigned char            slave_addr)
3283 {
3284         int              i, j;
3285         int              rv;
3286         struct ipmi_smi *intf, *tintf;
3287         struct list_head *link;
3288         struct ipmi_device_id id;
3289
3290         /*
3291          * Make sure the driver is actually initialized, this handles
3292          * problems with initialization order.
3293          */
3294         if (!initialized) {
3295                 rv = ipmi_init_msghandler();
3296                 if (rv)
3297                         return rv;
3298                 /*
3299                  * The init code doesn't return an error if it was turned
3300                  * off, but it won't initialize.  Check that.
3301                  */
3302                 if (!initialized)
3303                         return -ENODEV;
3304         }
3305
3306         intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3307         if (!intf)
3308                 return -ENOMEM;
3309
3310         rv = init_srcu_struct(&intf->users_srcu);
3311         if (rv) {
3312                 kfree(intf);
3313                 return rv;
3314         }
3315
3316
3317         intf->bmc = &intf->tmp_bmc;
3318         INIT_LIST_HEAD(&intf->bmc->intfs);
3319         mutex_init(&intf->bmc->dyn_mutex);
3320         INIT_LIST_HEAD(&intf->bmc_link);
3321         mutex_init(&intf->bmc_reg_mutex);
3322         intf->intf_num = -1; /* Mark it invalid for now. */
3323         kref_init(&intf->refcount);
3324         INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3325         intf->si_dev = si_dev;
3326         for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3327                 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3328                 intf->addrinfo[j].lun = 2;
3329         }
3330         if (slave_addr != 0)
3331                 intf->addrinfo[0].address = slave_addr;
3332         INIT_LIST_HEAD(&intf->users);
3333         intf->handlers = handlers;
3334         intf->send_info = send_info;
3335         spin_lock_init(&intf->seq_lock);
3336         for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3337                 intf->seq_table[j].inuse = 0;
3338                 intf->seq_table[j].seqid = 0;
3339         }
3340         intf->curr_seq = 0;
3341         spin_lock_init(&intf->waiting_rcv_msgs_lock);
3342         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3343         tasklet_init(&intf->recv_tasklet,
3344                      smi_recv_tasklet,
3345                      (unsigned long) intf);
3346         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3347         spin_lock_init(&intf->xmit_msgs_lock);
3348         INIT_LIST_HEAD(&intf->xmit_msgs);
3349         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3350         spin_lock_init(&intf->events_lock);
3351         atomic_set(&intf->event_waiters, 0);
3352         intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3353         INIT_LIST_HEAD(&intf->waiting_events);
3354         intf->waiting_events_count = 0;
3355         mutex_init(&intf->cmd_rcvrs_mutex);
3356         spin_lock_init(&intf->maintenance_mode_lock);
3357         INIT_LIST_HEAD(&intf->cmd_rcvrs);
3358         init_waitqueue_head(&intf->waitq);
3359         for (i = 0; i < IPMI_NUM_STATS; i++)
3360                 atomic_set(&intf->stats[i], 0);
3361
3362         mutex_lock(&ipmi_interfaces_mutex);
3363         /* Look for a hole in the numbers. */
3364         i = 0;
3365         link = &ipmi_interfaces;
3366         list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3367                 if (tintf->intf_num != i) {
3368                         link = &tintf->link;
3369                         break;
3370                 }
3371                 i++;
3372         }
3373         /* Add the new interface in numeric order. */
3374         if (i == 0)
3375                 list_add_rcu(&intf->link, &ipmi_interfaces);
3376         else
3377                 list_add_tail_rcu(&intf->link, link);
3378
3379         rv = handlers->start_processing(send_info, intf);
3380         if (rv)
3381                 goto out_err;
3382
3383         rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3384         if (rv) {
3385                 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3386                 goto out_err_started;
3387         }
3388
3389         mutex_lock(&intf->bmc_reg_mutex);
3390         rv = __scan_channels(intf, &id);
3391         mutex_unlock(&intf->bmc_reg_mutex);
3392         if (rv)
3393                 goto out_err_bmc_reg;
3394
3395         /*
3396          * Keep memory order straight for RCU readers.  Make
3397          * sure everything else is committed to memory before
3398          * setting intf_num to mark the interface valid.
3399          */
3400         smp_wmb();
3401         intf->intf_num = i;
3402         mutex_unlock(&ipmi_interfaces_mutex);
3403
3404         /* After this point the interface is legal to use. */
3405         call_smi_watchers(i, intf->si_dev);
3406
3407         return 0;
3408
3409  out_err_bmc_reg:
3410         ipmi_bmc_unregister(intf);
3411  out_err_started:
3412         if (intf->handlers->shutdown)
3413                 intf->handlers->shutdown(intf->send_info);
3414  out_err:
3415         list_del_rcu(&intf->link);
3416         mutex_unlock(&ipmi_interfaces_mutex);
3417         synchronize_srcu(&ipmi_interfaces_srcu);
3418         cleanup_srcu_struct(&intf->users_srcu);
3419         kref_put(&intf->refcount, intf_free);
3420
3421         return rv;
3422 }
3423 EXPORT_SYMBOL(ipmi_register_smi);
3424
3425 static void deliver_smi_err_response(struct ipmi_smi *intf,
3426                                      struct ipmi_smi_msg *msg,
3427                                      unsigned char err)
3428 {
3429         msg->rsp[0] = msg->data[0] | 4;
3430         msg->rsp[1] = msg->data[1];
3431         msg->rsp[2] = err;
3432         msg->rsp_size = 3;
3433         /* It's an error, so it will never requeue, no need to check return. */
3434         handle_one_recv_msg(intf, msg);
3435 }
3436
3437 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3438 {
3439         int              i;
3440         struct seq_table *ent;
3441         struct ipmi_smi_msg *msg;
3442         struct list_head *entry;
3443         struct list_head tmplist;
3444
3445         /* Clear out our transmit queues and hold the messages. */
3446         INIT_LIST_HEAD(&tmplist);
3447         list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3448         list_splice_tail(&intf->xmit_msgs, &tmplist);
3449
3450         /* Current message first, to preserve order */
3451         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3452                 /* Wait for the message to clear out. */
3453                 schedule_timeout(1);
3454         }
3455
3456         /* No need for locks, the interface is down. */
3457
3458         /*
3459          * Return errors for all pending messages in queue and in the
3460          * tables waiting for remote responses.
3461          */
3462         while (!list_empty(&tmplist)) {
3463                 entry = tmplist.next;
3464                 list_del(entry);
3465                 msg = list_entry(entry, struct ipmi_smi_msg, link);
3466                 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3467         }
3468
3469         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3470                 ent = &intf->seq_table[i];
3471                 if (!ent->inuse)
3472                         continue;
3473                 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3474         }
3475 }
3476
3477 void ipmi_unregister_smi(struct ipmi_smi *intf)
3478 {
3479         struct ipmi_smi_watcher *w;
3480         int intf_num = intf->intf_num, index;
3481
3482         mutex_lock(&ipmi_interfaces_mutex);
3483         intf->intf_num = -1;
3484         intf->in_shutdown = true;
3485         list_del_rcu(&intf->link);
3486         mutex_unlock(&ipmi_interfaces_mutex);
3487         synchronize_srcu(&ipmi_interfaces_srcu);
3488
3489         /* At this point no users can be added to the interface. */
3490
3491         /*
3492          * Call all the watcher interfaces to tell them that
3493          * an interface is going away.
3494          */
3495         mutex_lock(&smi_watchers_mutex);
3496         list_for_each_entry(w, &smi_watchers, link)
3497                 w->smi_gone(intf_num);
3498         mutex_unlock(&smi_watchers_mutex);
3499
3500         index = srcu_read_lock(&intf->users_srcu);
3501         while (!list_empty(&intf->users)) {
3502                 struct ipmi_user *user =
3503                         container_of(list_next_rcu(&intf->users),
3504                                      struct ipmi_user, link);
3505
3506                 _ipmi_destroy_user(user);
3507         }
3508         srcu_read_unlock(&intf->users_srcu, index);
3509
3510         if (intf->handlers->shutdown)
3511                 intf->handlers->shutdown(intf->send_info);
3512
3513         cleanup_smi_msgs(intf);
3514
3515         ipmi_bmc_unregister(intf);
3516
3517         cleanup_srcu_struct(&intf->users_srcu);
3518         kref_put(&intf->refcount, intf_free);
3519 }
3520 EXPORT_SYMBOL(ipmi_unregister_smi);
3521
3522 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3523                                    struct ipmi_smi_msg *msg)
3524 {
3525         struct ipmi_ipmb_addr ipmb_addr;
3526         struct ipmi_recv_msg  *recv_msg;
3527
3528         /*
3529          * This is 11, not 10, because the response must contain a
3530          * completion code.
3531          */
3532         if (msg->rsp_size < 11) {
3533                 /* Message not big enough, just ignore it. */
3534                 ipmi_inc_stat(intf, invalid_ipmb_responses);
3535                 return 0;
3536         }
3537
3538         if (msg->rsp[2] != 0) {
3539                 /* An error getting the response, just ignore it. */
3540                 return 0;
3541         }
3542
3543         ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3544         ipmb_addr.slave_addr = msg->rsp[6];
3545         ipmb_addr.channel = msg->rsp[3] & 0x0f;
3546         ipmb_addr.lun = msg->rsp[7] & 3;
3547
3548         /*
3549          * It's a response from a remote entity.  Look up the sequence
3550          * number and handle the response.
3551          */
3552         if (intf_find_seq(intf,
3553                           msg->rsp[7] >> 2,
3554                           msg->rsp[3] & 0x0f,
3555                           msg->rsp[8],
3556                           (msg->rsp[4] >> 2) & (~1),
3557                           (struct ipmi_addr *) &ipmb_addr,
3558                           &recv_msg)) {
3559                 /*
3560                  * We were unable to find the sequence number,
3561                  * so just nuke the message.
3562                  */
3563                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3564                 return 0;
3565         }
3566
3567         memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3568         /*
3569          * The other fields matched, so no need to set them, except
3570          * for netfn, which needs to be the response that was
3571          * returned, not the request value.
3572          */
3573         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3574         recv_msg->msg.data = recv_msg->msg_data;
3575         recv_msg->msg.data_len = msg->rsp_size - 10;
3576         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3577         if (deliver_response(intf, recv_msg))
3578                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3579         else
3580                 ipmi_inc_stat(intf, handled_ipmb_responses);
3581
3582         return 0;
3583 }
3584
3585 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3586                                    struct ipmi_smi_msg *msg)
3587 {
3588         struct cmd_rcvr          *rcvr;
3589         int                      rv = 0;
3590         unsigned char            netfn;
3591         unsigned char            cmd;
3592         unsigned char            chan;
3593         struct ipmi_user         *user = NULL;
3594         struct ipmi_ipmb_addr    *ipmb_addr;
3595         struct ipmi_recv_msg     *recv_msg;
3596
3597         if (msg->rsp_size < 10) {
3598                 /* Message not big enough, just ignore it. */
3599                 ipmi_inc_stat(intf, invalid_commands);
3600                 return 0;
3601         }
3602
3603         if (msg->rsp[2] != 0) {
3604                 /* An error getting the response, just ignore it. */
3605                 return 0;
3606         }
3607
3608         netfn = msg->rsp[4] >> 2;
3609         cmd = msg->rsp[8];
3610         chan = msg->rsp[3] & 0xf;
3611
3612         rcu_read_lock();
3613         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3614         if (rcvr) {
3615                 user = rcvr->user;
3616                 kref_get(&user->refcount);
3617         } else
3618                 user = NULL;
3619         rcu_read_unlock();
3620
3621         if (user == NULL) {
3622                 /* We didn't find a user, deliver an error response. */
3623                 ipmi_inc_stat(intf, unhandled_commands);
3624
3625                 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3626                 msg->data[1] = IPMI_SEND_MSG_CMD;
3627                 msg->data[2] = msg->rsp[3];
3628                 msg->data[3] = msg->rsp[6];
3629                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3630                 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3631                 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3632                 /* rqseq/lun */
3633                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3634                 msg->data[8] = msg->rsp[8]; /* cmd */
3635                 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3636                 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3637                 msg->data_size = 11;
3638
3639                 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3640
3641                 rcu_read_lock();
3642                 if (!intf->in_shutdown) {
3643                         smi_send(intf, intf->handlers, msg, 0);
3644                         /*
3645                          * We used the message, so return the value
3646                          * that causes it to not be freed or
3647                          * queued.
3648                          */
3649                         rv = -1;
3650                 }
3651                 rcu_read_unlock();
3652         } else {
3653                 recv_msg = ipmi_alloc_recv_msg();
3654                 if (!recv_msg) {
3655                         /*
3656                          * We couldn't allocate memory for the
3657                          * message, so requeue it for handling
3658                          * later.
3659                          */
3660                         rv = 1;
3661                         kref_put(&user->refcount, free_user);
3662                 } else {
3663                         /* Extract the source address from the data. */
3664                         ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3665                         ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3666                         ipmb_addr->slave_addr = msg->rsp[6];
3667                         ipmb_addr->lun = msg->rsp[7] & 3;
3668                         ipmb_addr->channel = msg->rsp[3] & 0xf;
3669
3670                         /*
3671                          * Extract the rest of the message information
3672                          * from the IPMB header.
3673                          */
3674                         recv_msg->user = user;
3675                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3676                         recv_msg->msgid = msg->rsp[7] >> 2;
3677                         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3678                         recv_msg->msg.cmd = msg->rsp[8];
3679                         recv_msg->msg.data = recv_msg->msg_data;
3680
3681                         /*
3682                          * We chop off 10, not 9 bytes because the checksum
3683                          * at the end also needs to be removed.
3684                          */
3685                         recv_msg->msg.data_len = msg->rsp_size - 10;
3686                         memcpy(recv_msg->msg_data, &msg->rsp[9],
3687                                msg->rsp_size - 10);
3688                         if (deliver_response(intf, recv_msg))
3689                                 ipmi_inc_stat(intf, unhandled_commands);
3690                         else
3691                                 ipmi_inc_stat(intf, handled_commands);
3692                 }
3693         }
3694
3695         return rv;
3696 }
3697
3698 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3699                                   struct ipmi_smi_msg *msg)
3700 {
3701         struct ipmi_lan_addr  lan_addr;
3702         struct ipmi_recv_msg  *recv_msg;
3703
3704
3705         /*
3706          * This is 13, not 12, because the response must contain a
3707          * completion code.
3708          */
3709         if (msg->rsp_size < 13) {
3710                 /* Message not big enough, just ignore it. */
3711                 ipmi_inc_stat(intf, invalid_lan_responses);
3712                 return 0;
3713         }
3714
3715         if (msg->rsp[2] != 0) {
3716                 /* An error getting the response, just ignore it. */
3717                 return 0;
3718         }
3719
3720         lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3721         lan_addr.session_handle = msg->rsp[4];
3722         lan_addr.remote_SWID = msg->rsp[8];
3723         lan_addr.local_SWID = msg->rsp[5];
3724         lan_addr.channel = msg->rsp[3] & 0x0f;
3725         lan_addr.privilege = msg->rsp[3] >> 4;
3726         lan_addr.lun = msg->rsp[9] & 3;
3727
3728         /*
3729          * It's a response from a remote entity.  Look up the sequence
3730          * number and handle the response.
3731          */
3732         if (intf_find_seq(intf,
3733                           msg->rsp[9] >> 2,
3734                           msg->rsp[3] & 0x0f,
3735                           msg->rsp[10],
3736                           (msg->rsp[6] >> 2) & (~1),
3737                           (struct ipmi_addr *) &lan_addr,
3738                           &recv_msg)) {
3739                 /*
3740                  * We were unable to find the sequence number,
3741                  * so just nuke the message.
3742                  */
3743                 ipmi_inc_stat(intf, unhandled_lan_responses);
3744                 return 0;
3745         }
3746
3747         memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3748         /*
3749          * The other fields matched, so no need to set them, except
3750          * for netfn, which needs to be the response that was
3751          * returned, not the request value.
3752          */
3753         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3754         recv_msg->msg.data = recv_msg->msg_data;
3755         recv_msg->msg.data_len = msg->rsp_size - 12;
3756         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3757         if (deliver_response(intf, recv_msg))
3758                 ipmi_inc_stat(intf, unhandled_lan_responses);
3759         else
3760                 ipmi_inc_stat(intf, handled_lan_responses);
3761
3762         return 0;
3763 }
3764
3765 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3766                                   struct ipmi_smi_msg *msg)
3767 {
3768         struct cmd_rcvr          *rcvr;
3769         int                      rv = 0;
3770         unsigned char            netfn;
3771         unsigned char            cmd;
3772         unsigned char            chan;
3773         struct ipmi_user         *user = NULL;
3774         struct ipmi_lan_addr     *lan_addr;
3775         struct ipmi_recv_msg     *recv_msg;
3776
3777         if (msg->rsp_size < 12) {
3778                 /* Message not big enough, just ignore it. */
3779                 ipmi_inc_stat(intf, invalid_commands);
3780                 return 0;
3781         }
3782
3783         if (msg->rsp[2] != 0) {
3784                 /* An error getting the response, just ignore it. */
3785                 return 0;
3786         }
3787
3788         netfn = msg->rsp[6] >> 2;
3789         cmd = msg->rsp[10];
3790         chan = msg->rsp[3] & 0xf;
3791
3792         rcu_read_lock();
3793         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3794         if (rcvr) {
3795                 user = rcvr->user;
3796                 kref_get(&user->refcount);
3797         } else
3798                 user = NULL;
3799         rcu_read_unlock();
3800
3801         if (user == NULL) {
3802                 /* We didn't find a user, just give up. */
3803                 ipmi_inc_stat(intf, unhandled_commands);
3804
3805                 /*
3806                  * Don't do anything with these messages, just allow
3807                  * them to be freed.
3808                  */
3809                 rv = 0;
3810         } else {
3811                 recv_msg = ipmi_alloc_recv_msg();
3812                 if (!recv_msg) {
3813                         /*
3814                          * We couldn't allocate memory for the
3815                          * message, so requeue it for handling later.
3816                          */
3817                         rv = 1;
3818                         kref_put(&user->refcount, free_user);
3819                 } else {
3820                         /* Extract the source address from the data. */
3821                         lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3822                         lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3823                         lan_addr->session_handle = msg->rsp[4];
3824                         lan_addr->remote_SWID = msg->rsp[8];
3825                         lan_addr->local_SWID = msg->rsp[5];
3826                         lan_addr->lun = msg->rsp[9] & 3;
3827                         lan_addr->channel = msg->rsp[3] & 0xf;
3828                         lan_addr->privilege = msg->rsp[3] >> 4;
3829
3830                         /*
3831                          * Extract the rest of the message information
3832                          * from the IPMB header.
3833                          */
3834                         recv_msg->user = user;
3835                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3836                         recv_msg->msgid = msg->rsp[9] >> 2;
3837                         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3838                         recv_msg->msg.cmd = msg->rsp[10];
3839                         recv_msg->msg.data = recv_msg->msg_data;
3840
3841                         /*
3842                          * We chop off 12, not 11 bytes because the checksum
3843                          * at the end also needs to be removed.
3844                          */
3845                         recv_msg->msg.data_len = msg->rsp_size - 12;
3846                         memcpy(recv_msg->msg_data, &msg->rsp[11],
3847                                msg->rsp_size - 12);
3848                         if (deliver_response(intf, recv_msg))
3849                                 ipmi_inc_stat(intf, unhandled_commands);
3850                         else
3851                                 ipmi_inc_stat(intf, handled_commands);
3852                 }
3853         }
3854
3855         return rv;
3856 }
3857
3858 /*
3859  * This routine will handle "Get Message" command responses with
3860  * channels that use an OEM Medium. The message format belongs to
3861  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3862  * Chapter 22, sections 22.6 and 22.24 for more details.
3863  */
3864 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3865                                   struct ipmi_smi_msg *msg)
3866 {
3867         struct cmd_rcvr       *rcvr;
3868         int                   rv = 0;
3869         unsigned char         netfn;
3870         unsigned char         cmd;
3871         unsigned char         chan;
3872         struct ipmi_user *user = NULL;
3873         struct ipmi_system_interface_addr *smi_addr;
3874         struct ipmi_recv_msg  *recv_msg;
3875
3876         /*
3877          * We expect the OEM SW to perform error checking
3878          * so we just do some basic sanity checks
3879          */
3880         if (msg->rsp_size < 4) {
3881                 /* Message not big enough, just ignore it. */
3882                 ipmi_inc_stat(intf, invalid_commands);
3883                 return 0;
3884         }
3885
3886         if (msg->rsp[2] != 0) {
3887                 /* An error getting the response, just ignore it. */
3888                 return 0;
3889         }
3890
3891         /*
3892          * This is an OEM Message so the OEM needs to know how
3893          * handle the message. We do no interpretation.
3894          */
3895         netfn = msg->rsp[0] >> 2;
3896         cmd = msg->rsp[1];
3897         chan = msg->rsp[3] & 0xf;
3898
3899         rcu_read_lock();
3900         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3901         if (rcvr) {
3902                 user = rcvr->user;
3903                 kref_get(&user->refcount);
3904         } else
3905                 user = NULL;
3906         rcu_read_unlock();
3907
3908         if (user == NULL) {
3909                 /* We didn't find a user, just give up. */
3910                 ipmi_inc_stat(intf, unhandled_commands);
3911
3912                 /*
3913                  * Don't do anything with these messages, just allow
3914                  * them to be freed.
3915                  */
3916
3917                 rv = 0;
3918         } else {
3919                 recv_msg = ipmi_alloc_recv_msg();
3920                 if (!recv_msg) {
3921                         /*
3922                          * We couldn't allocate memory for the
3923                          * message, so requeue it for handling
3924                          * later.
3925                          */
3926                         rv = 1;
3927                         kref_put(&user->refcount, free_user);
3928                 } else {
3929                         /*
3930                          * OEM Messages are expected to be delivered via
3931                          * the system interface to SMS software.  We might
3932                          * need to visit this again depending on OEM
3933                          * requirements
3934                          */
3935                         smi_addr = ((struct ipmi_system_interface_addr *)
3936                                     &recv_msg->addr);
3937                         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3938                         smi_addr->channel = IPMI_BMC_CHANNEL;
3939                         smi_addr->lun = msg->rsp[0] & 3;
3940
3941                         recv_msg->user = user;
3942                         recv_msg->user_msg_data = NULL;
3943                         recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3944                         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3945                         recv_msg->msg.cmd = msg->rsp[1];
3946                         recv_msg->msg.data = recv_msg->msg_data;
3947
3948                         /*
3949                          * The message starts at byte 4 which follows the
3950                          * the Channel Byte in the "GET MESSAGE" command
3951                          */
3952                         recv_msg->msg.data_len = msg->rsp_size - 4;
3953                         memcpy(recv_msg->msg_data, &msg->rsp[4],
3954                                msg->rsp_size - 4);
3955                         if (deliver_response(intf, recv_msg))
3956                                 ipmi_inc_stat(intf, unhandled_commands);
3957                         else
3958                                 ipmi_inc_stat(intf, handled_commands);
3959                 }
3960         }
3961
3962         return rv;
3963 }
3964
3965 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3966                                      struct ipmi_smi_msg  *msg)
3967 {
3968         struct ipmi_system_interface_addr *smi_addr;
3969
3970         recv_msg->msgid = 0;
3971         smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
3972         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3973         smi_addr->channel = IPMI_BMC_CHANNEL;
3974         smi_addr->lun = msg->rsp[0] & 3;
3975         recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3976         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3977         recv_msg->msg.cmd = msg->rsp[1];
3978         memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
3979         recv_msg->msg.data = recv_msg->msg_data;
3980         recv_msg->msg.data_len = msg->rsp_size - 3;
3981 }
3982
3983 static int handle_read_event_rsp(struct ipmi_smi *intf,
3984                                  struct ipmi_smi_msg *msg)
3985 {
3986         struct ipmi_recv_msg *recv_msg, *recv_msg2;
3987         struct list_head     msgs;
3988         struct ipmi_user     *user;
3989         int rv = 0, deliver_count = 0, index;
3990         unsigned long        flags;
3991
3992         if (msg->rsp_size < 19) {
3993                 /* Message is too small to be an IPMB event. */
3994                 ipmi_inc_stat(intf, invalid_events);
3995                 return 0;
3996         }
3997
3998         if (msg->rsp[2] != 0) {
3999                 /* An error getting the event, just ignore it. */
4000                 return 0;
4001         }
4002
4003         INIT_LIST_HEAD(&msgs);
4004
4005         spin_lock_irqsave(&intf->events_lock, flags);
4006
4007         ipmi_inc_stat(intf, events);
4008
4009         /*
4010          * Allocate and fill in one message for every user that is
4011          * getting events.
4012          */
4013         index = srcu_read_lock(&intf->users_srcu);
4014         list_for_each_entry_rcu(user, &intf->users, link) {
4015                 if (!user->gets_events)
4016                         continue;
4017
4018                 recv_msg = ipmi_alloc_recv_msg();
4019                 if (!recv_msg) {
4020                         rcu_read_unlock();
4021                         list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4022                                                  link) {
4023                                 list_del(&recv_msg->link);
4024                                 ipmi_free_recv_msg(recv_msg);
4025                         }
4026                         /*
4027                          * We couldn't allocate memory for the
4028                          * message, so requeue it for handling
4029                          * later.
4030                          */
4031                         rv = 1;
4032                         goto out;
4033                 }
4034
4035                 deliver_count++;
4036
4037                 copy_event_into_recv_msg(recv_msg, msg);
4038                 recv_msg->user = user;
4039                 kref_get(&user->refcount);
4040                 list_add_tail(&recv_msg->link, &msgs);
4041         }
4042         srcu_read_unlock(&intf->users_srcu, index);
4043
4044         if (deliver_count) {
4045                 /* Now deliver all the messages. */
4046                 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4047                         list_del(&recv_msg->link);
4048                         deliver_local_response(intf, recv_msg);
4049                 }
4050         } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4051                 /*
4052                  * No one to receive the message, put it in queue if there's
4053                  * not already too many things in the queue.
4054                  */
4055                 recv_msg = ipmi_alloc_recv_msg();
4056                 if (!recv_msg) {
4057                         /*
4058                          * We couldn't allocate memory for the
4059                          * message, so requeue it for handling
4060                          * later.
4061                          */
4062                         rv = 1;
4063                         goto out;
4064                 }
4065
4066                 copy_event_into_recv_msg(recv_msg, msg);
4067                 list_add_tail(&recv_msg->link, &intf->waiting_events);
4068                 intf->waiting_events_count++;
4069         } else if (!intf->event_msg_printed) {
4070                 /*
4071                  * There's too many things in the queue, discard this
4072                  * message.
4073                  */
4074                 dev_warn(intf->si_dev,
4075                          "Event queue full, discarding incoming events\n");
4076                 intf->event_msg_printed = 1;
4077         }
4078
4079  out:
4080         spin_unlock_irqrestore(&intf->events_lock, flags);
4081
4082         return rv;
4083 }
4084
4085 static int handle_bmc_rsp(struct ipmi_smi *intf,
4086                           struct ipmi_smi_msg *msg)
4087 {
4088         struct ipmi_recv_msg *recv_msg;
4089         struct ipmi_system_interface_addr *smi_addr;
4090
4091         recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4092         if (recv_msg == NULL) {
4093                 dev_warn(intf->si_dev,
4094                          "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4095                 return 0;
4096         }
4097
4098         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4099         recv_msg->msgid = msg->msgid;
4100         smi_addr = ((struct ipmi_system_interface_addr *)
4101                     &recv_msg->addr);
4102         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4103         smi_addr->channel = IPMI_BMC_CHANNEL;
4104         smi_addr->lun = msg->rsp[0] & 3;
4105         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4106         recv_msg->msg.cmd = msg->rsp[1];
4107         memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4108         recv_msg->msg.data = recv_msg->msg_data;
4109         recv_msg->msg.data_len = msg->rsp_size - 2;
4110         deliver_local_response(intf, recv_msg);
4111
4112         return 0;
4113 }
4114
4115 /*
4116  * Handle a received message.  Return 1 if the message should be requeued,
4117  * 0 if the message should be freed, or -1 if the message should not
4118  * be freed or requeued.
4119  */
4120 static int handle_one_recv_msg(struct ipmi_smi *intf,
4121                                struct ipmi_smi_msg *msg)
4122 {
4123         int requeue;
4124         int chan;
4125
4126         ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4127         if (msg->rsp_size < 2) {
4128                 /* Message is too small to be correct. */
4129                 dev_warn(intf->si_dev,
4130                          "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4131                          (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4132
4133                 /* Generate an error response for the message. */
4134                 msg->rsp[0] = msg->data[0] | (1 << 2);
4135                 msg->rsp[1] = msg->data[1];
4136                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4137                 msg->rsp_size = 3;
4138         } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4139                    || (msg->rsp[1] != msg->data[1])) {
4140                 /*
4141                  * The NetFN and Command in the response is not even
4142                  * marginally correct.
4143                  */
4144                 dev_warn(intf->si_dev,
4145                          "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4146                          (msg->data[0] >> 2) | 1, msg->data[1],
4147                          msg->rsp[0] >> 2, msg->rsp[1]);
4148
4149                 /* Generate an error response for the message. */
4150                 msg->rsp[0] = msg->data[0] | (1 << 2);
4151                 msg->rsp[1] = msg->data[1];
4152                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4153                 msg->rsp_size = 3;
4154         }
4155
4156         if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4157             && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4158             && (msg->user_data != NULL)) {
4159                 /*
4160                  * It's a response to a response we sent.  For this we
4161                  * deliver a send message response to the user.
4162                  */
4163                 struct ipmi_recv_msg *recv_msg = msg->user_data;
4164
4165                 requeue = 0;
4166                 if (msg->rsp_size < 2)
4167                         /* Message is too small to be correct. */
4168                         goto out;
4169
4170                 chan = msg->data[2] & 0x0f;
4171                 if (chan >= IPMI_MAX_CHANNELS)
4172                         /* Invalid channel number */
4173                         goto out;
4174
4175                 if (!recv_msg)
4176                         goto out;
4177
4178                 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4179                 recv_msg->msg.data = recv_msg->msg_data;
4180                 recv_msg->msg.data_len = 1;
4181                 recv_msg->msg_data[0] = msg->rsp[2];
4182                 deliver_local_response(intf, recv_msg);
4183         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4184                    && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4185                 struct ipmi_channel   *chans;
4186
4187                 /* It's from the receive queue. */
4188                 chan = msg->rsp[3] & 0xf;
4189                 if (chan >= IPMI_MAX_CHANNELS) {
4190                         /* Invalid channel number */
4191                         requeue = 0;
4192                         goto out;
4193                 }
4194
4195                 /*
4196                  * We need to make sure the channels have been initialized.
4197                  * The channel_handler routine will set the "curr_channel"
4198                  * equal to or greater than IPMI_MAX_CHANNELS when all the
4199                  * channels for this interface have been initialized.
4200                  */
4201                 if (!intf->channels_ready) {
4202                         requeue = 0; /* Throw the message away */
4203                         goto out;
4204                 }
4205
4206                 chans = READ_ONCE(intf->channel_list)->c;
4207
4208                 switch (chans[chan].medium) {
4209                 case IPMI_CHANNEL_MEDIUM_IPMB:
4210                         if (msg->rsp[4] & 0x04) {
4211                                 /*
4212                                  * It's a response, so find the
4213                                  * requesting message and send it up.
4214                                  */
4215                                 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4216                         } else {
4217                                 /*
4218                                  * It's a command to the SMS from some other
4219                                  * entity.  Handle that.
4220                                  */
4221                                 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4222                         }
4223                         break;
4224
4225                 case IPMI_CHANNEL_MEDIUM_8023LAN:
4226                 case IPMI_CHANNEL_MEDIUM_ASYNC:
4227                         if (msg->rsp[6] & 0x04) {
4228                                 /*
4229                                  * It's a response, so find the
4230                                  * requesting message and send it up.
4231                                  */
4232                                 requeue = handle_lan_get_msg_rsp(intf, msg);
4233                         } else {
4234                                 /*
4235                                  * It's a command to the SMS from some other
4236                                  * entity.  Handle that.
4237                                  */
4238                                 requeue = handle_lan_get_msg_cmd(intf, msg);
4239                         }
4240                         break;
4241
4242                 default:
4243                         /* Check for OEM Channels.  Clients had better
4244                            register for these commands. */
4245                         if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4246                             && (chans[chan].medium
4247                                 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4248                                 requeue = handle_oem_get_msg_cmd(intf, msg);
4249                         } else {
4250                                 /*
4251                                  * We don't handle the channel type, so just
4252                                  * free the message.
4253                                  */
4254                                 requeue = 0;
4255                         }
4256                 }
4257
4258         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4259                    && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4260                 /* It's an asynchronous event. */
4261                 requeue = handle_read_event_rsp(intf, msg);
4262         } else {
4263                 /* It's a response from the local BMC. */
4264                 requeue = handle_bmc_rsp(intf, msg);
4265         }
4266
4267  out:
4268         return requeue;
4269 }
4270
4271 /*
4272  * If there are messages in the queue or pretimeouts, handle them.
4273  */
4274 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4275 {
4276         struct ipmi_smi_msg  *smi_msg;
4277         unsigned long        flags = 0;
4278         int                  rv;
4279         int                  run_to_completion = intf->run_to_completion;
4280
4281         /* See if any waiting messages need to be processed. */
4282         if (!run_to_completion)
4283                 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4284         while (!list_empty(&intf->waiting_rcv_msgs)) {
4285                 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4286                                      struct ipmi_smi_msg, link);
4287                 list_del(&smi_msg->link);
4288                 if (!run_to_completion)
4289                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4290                                                flags);
4291                 rv = handle_one_recv_msg(intf, smi_msg);
4292                 if (!run_to_completion)
4293                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4294                 if (rv > 0) {
4295                         /*
4296                          * To preserve message order, quit if we
4297                          * can't handle a message.  Add the message
4298                          * back at the head, this is safe because this
4299                          * tasklet is the only thing that pulls the
4300                          * messages.
4301                          */
4302                         list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4303                         break;
4304                 } else {
4305                         if (rv == 0)
4306                                 /* Message handled */
4307                                 ipmi_free_smi_msg(smi_msg);
4308                         /* If rv < 0, fatal error, del but don't free. */
4309                 }
4310         }
4311         if (!run_to_completion)
4312                 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4313
4314         /*
4315          * If the pretimout count is non-zero, decrement one from it and
4316          * deliver pretimeouts to all the users.
4317          */
4318         if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4319                 struct ipmi_user *user;
4320                 int index;
4321
4322                 index = srcu_read_lock(&intf->users_srcu);
4323                 list_for_each_entry_rcu(user, &intf->users, link) {
4324                         if (user->handler->ipmi_watchdog_pretimeout)
4325                                 user->handler->ipmi_watchdog_pretimeout(
4326                                         user->handler_data);
4327                 }
4328                 srcu_read_unlock(&intf->users_srcu, index);
4329         }
4330 }
4331
4332 static void smi_recv_tasklet(unsigned long val)
4333 {
4334         unsigned long flags = 0; /* keep us warning-free. */
4335         struct ipmi_smi *intf = (struct ipmi_smi *) val;
4336         int run_to_completion = intf->run_to_completion;
4337         struct ipmi_smi_msg *newmsg = NULL;
4338
4339         /*
4340          * Start the next message if available.
4341          *
4342          * Do this here, not in the actual receiver, because we may deadlock
4343          * because the lower layer is allowed to hold locks while calling
4344          * message delivery.
4345          */
4346
4347         rcu_read_lock();
4348
4349         if (!run_to_completion)
4350                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4351         if (intf->curr_msg == NULL && !intf->in_shutdown) {
4352                 struct list_head *entry = NULL;
4353
4354                 /* Pick the high priority queue first. */
4355                 if (!list_empty(&intf->hp_xmit_msgs))
4356                         entry = intf->hp_xmit_msgs.next;
4357                 else if (!list_empty(&intf->xmit_msgs))
4358                         entry = intf->xmit_msgs.next;
4359
4360                 if (entry) {
4361                         list_del(entry);
4362                         newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4363                         intf->curr_msg = newmsg;
4364                 }
4365         }
4366         if (!run_to_completion)
4367                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4368         if (newmsg)
4369                 intf->handlers->sender(intf->send_info, newmsg);
4370
4371         rcu_read_unlock();
4372
4373         handle_new_recv_msgs(intf);
4374 }
4375
4376 /* Handle a new message from the lower layer. */
4377 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4378                            struct ipmi_smi_msg *msg)
4379 {
4380         unsigned long flags = 0; /* keep us warning-free. */
4381         int run_to_completion = intf->run_to_completion;
4382
4383         if ((msg->data_size >= 2)
4384             && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4385             && (msg->data[1] == IPMI_SEND_MSG_CMD)
4386             && (msg->user_data == NULL)) {
4387
4388                 if (intf->in_shutdown)
4389                         goto free_msg;
4390
4391                 /*
4392                  * This is the local response to a command send, start
4393                  * the timer for these.  The user_data will not be
4394                  * NULL if this is a response send, and we will let
4395                  * response sends just go through.
4396                  */
4397
4398                 /*
4399                  * Check for errors, if we get certain errors (ones
4400                  * that mean basically we can try again later), we
4401                  * ignore them and start the timer.  Otherwise we
4402                  * report the error immediately.
4403                  */
4404                 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4405                     && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4406                     && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4407                     && (msg->rsp[2] != IPMI_BUS_ERR)
4408                     && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4409                         int ch = msg->rsp[3] & 0xf;
4410                         struct ipmi_channel *chans;
4411
4412                         /* Got an error sending the message, handle it. */
4413
4414                         chans = READ_ONCE(intf->channel_list)->c;
4415                         if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4416                             || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4417                                 ipmi_inc_stat(intf, sent_lan_command_errs);
4418                         else
4419                                 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4420                         intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4421                 } else
4422                         /* The message was sent, start the timer. */
4423                         intf_start_seq_timer(intf, msg->msgid);
4424
4425 free_msg:
4426                 ipmi_free_smi_msg(msg);
4427         } else {
4428                 /*
4429                  * To preserve message order, we keep a queue and deliver from
4430                  * a tasklet.
4431                  */
4432                 if (!run_to_completion)
4433                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4434                 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4435                 if (!run_to_completion)
4436                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4437                                                flags);
4438         }
4439
4440         if (!run_to_completion)
4441                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4442         /*
4443          * We can get an asynchronous event or receive message in addition
4444          * to commands we send.
4445          */
4446         if (msg == intf->curr_msg)
4447                 intf->curr_msg = NULL;
4448         if (!run_to_completion)
4449                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4450
4451         if (run_to_completion)
4452                 smi_recv_tasklet((unsigned long) intf);
4453         else
4454                 tasklet_schedule(&intf->recv_tasklet);
4455 }
4456 EXPORT_SYMBOL(ipmi_smi_msg_received);
4457
4458 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4459 {
4460         if (intf->in_shutdown)
4461                 return;
4462
4463         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4464         tasklet_schedule(&intf->recv_tasklet);
4465 }
4466 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4467
4468 static struct ipmi_smi_msg *
4469 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4470                   unsigned char seq, long seqid)
4471 {
4472         struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4473         if (!smi_msg)
4474                 /*
4475                  * If we can't allocate the message, then just return, we
4476                  * get 4 retries, so this should be ok.
4477                  */
4478                 return NULL;
4479
4480         memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4481         smi_msg->data_size = recv_msg->msg.data_len;
4482         smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4483
4484         ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4485
4486         return smi_msg;
4487 }
4488
4489 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4490                               struct list_head *timeouts,
4491                               unsigned long timeout_period,
4492                               int slot, unsigned long *flags,
4493                               unsigned int *waiting_msgs)
4494 {
4495         struct ipmi_recv_msg *msg;
4496
4497         if (intf->in_shutdown)
4498                 return;
4499
4500         if (!ent->inuse)
4501                 return;
4502
4503         if (timeout_period < ent->timeout) {
4504                 ent->timeout -= timeout_period;
4505                 (*waiting_msgs)++;
4506                 return;
4507         }
4508
4509         if (ent->retries_left == 0) {
4510                 /* The message has used all its retries. */
4511                 ent->inuse = 0;
4512                 msg = ent->recv_msg;
4513                 list_add_tail(&msg->link, timeouts);
4514                 if (ent->broadcast)
4515                         ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4516                 else if (is_lan_addr(&ent->recv_msg->addr))
4517                         ipmi_inc_stat(intf, timed_out_lan_commands);
4518                 else
4519                         ipmi_inc_stat(intf, timed_out_ipmb_commands);
4520         } else {
4521                 struct ipmi_smi_msg *smi_msg;
4522                 /* More retries, send again. */
4523
4524                 (*waiting_msgs)++;
4525
4526                 /*
4527                  * Start with the max timer, set to normal timer after
4528                  * the message is sent.
4529                  */
4530                 ent->timeout = MAX_MSG_TIMEOUT;
4531                 ent->retries_left--;
4532                 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4533                                             ent->seqid);
4534                 if (!smi_msg) {
4535                         if (is_lan_addr(&ent->recv_msg->addr))
4536                                 ipmi_inc_stat(intf,
4537                                               dropped_rexmit_lan_commands);
4538                         else
4539                                 ipmi_inc_stat(intf,
4540                                               dropped_rexmit_ipmb_commands);
4541                         return;
4542                 }
4543
4544                 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4545
4546                 /*
4547                  * Send the new message.  We send with a zero
4548                  * priority.  It timed out, I doubt time is that
4549                  * critical now, and high priority messages are really
4550                  * only for messages to the local MC, which don't get
4551                  * resent.
4552                  */
4553                 if (intf->handlers) {
4554                         if (is_lan_addr(&ent->recv_msg->addr))
4555                                 ipmi_inc_stat(intf,
4556                                               retransmitted_lan_commands);
4557                         else
4558                                 ipmi_inc_stat(intf,
4559                                               retransmitted_ipmb_commands);
4560
4561                         smi_send(intf, intf->handlers, smi_msg, 0);
4562                 } else
4563                         ipmi_free_smi_msg(smi_msg);
4564
4565                 spin_lock_irqsave(&intf->seq_lock, *flags);
4566         }
4567 }
4568
4569 static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
4570                                          unsigned long timeout_period)
4571 {
4572         struct list_head     timeouts;
4573         struct ipmi_recv_msg *msg, *msg2;
4574         unsigned long        flags;
4575         int                  i;
4576         unsigned int         waiting_msgs = 0;
4577
4578         if (!intf->bmc_registered) {
4579                 kref_get(&intf->refcount);
4580                 if (!schedule_work(&intf->bmc_reg_work)) {
4581                         kref_put(&intf->refcount, intf_free);
4582                         waiting_msgs++;
4583                 }
4584         }
4585
4586         /*
4587          * Go through the seq table and find any messages that
4588          * have timed out, putting them in the timeouts
4589          * list.
4590          */
4591         INIT_LIST_HEAD(&timeouts);
4592         spin_lock_irqsave(&intf->seq_lock, flags);
4593         if (intf->ipmb_maintenance_mode_timeout) {
4594                 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4595                         intf->ipmb_maintenance_mode_timeout = 0;
4596                 else
4597                         intf->ipmb_maintenance_mode_timeout -= timeout_period;
4598         }
4599         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4600                 check_msg_timeout(intf, &intf->seq_table[i],
4601                                   &timeouts, timeout_period, i,
4602                                   &flags, &waiting_msgs);
4603         spin_unlock_irqrestore(&intf->seq_lock, flags);
4604
4605         list_for_each_entry_safe(msg, msg2, &timeouts, link)
4606                 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4607
4608         /*
4609          * Maintenance mode handling.  Check the timeout
4610          * optimistically before we claim the lock.  It may
4611          * mean a timeout gets missed occasionally, but that
4612          * only means the timeout gets extended by one period
4613          * in that case.  No big deal, and it avoids the lock
4614          * most of the time.
4615          */
4616         if (intf->auto_maintenance_timeout > 0) {
4617                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4618                 if (intf->auto_maintenance_timeout > 0) {
4619                         intf->auto_maintenance_timeout
4620                                 -= timeout_period;
4621                         if (!intf->maintenance_mode
4622                             && (intf->auto_maintenance_timeout <= 0)) {
4623                                 intf->maintenance_mode_enable = false;
4624                                 maintenance_mode_update(intf);
4625                         }
4626                 }
4627                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4628                                        flags);
4629         }
4630
4631         tasklet_schedule(&intf->recv_tasklet);
4632
4633         return waiting_msgs;
4634 }
4635
4636 static void ipmi_request_event(struct ipmi_smi *intf)
4637 {
4638         /* No event requests when in maintenance mode. */
4639         if (intf->maintenance_mode_enable)
4640                 return;
4641
4642         if (!intf->in_shutdown)
4643                 intf->handlers->request_events(intf->send_info);
4644 }
4645
4646 static struct timer_list ipmi_timer;
4647
4648 static atomic_t stop_operation;
4649
4650 static void ipmi_timeout(struct timer_list *unused)
4651 {
4652         struct ipmi_smi *intf;
4653         int nt = 0, index;
4654
4655         if (atomic_read(&stop_operation))
4656                 return;
4657
4658         index = srcu_read_lock(&ipmi_interfaces_srcu);
4659         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4660                 int lnt = 0;
4661
4662                 if (atomic_read(&intf->event_waiters)) {
4663                         intf->ticks_to_req_ev--;
4664                         if (intf->ticks_to_req_ev == 0) {
4665                                 ipmi_request_event(intf);
4666                                 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4667                         }
4668                         lnt++;
4669                 }
4670
4671                 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4672
4673                 lnt = !!lnt;
4674                 if (lnt != intf->last_needs_timer &&
4675                                         intf->handlers->set_need_watch)
4676                         intf->handlers->set_need_watch(intf->send_info, lnt);
4677                 intf->last_needs_timer = lnt;
4678
4679                 nt += lnt;
4680         }
4681         srcu_read_unlock(&ipmi_interfaces_srcu, index);
4682
4683         if (nt)
4684                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4685 }
4686
4687 static void need_waiter(struct ipmi_smi *intf)
4688 {
4689         /* Racy, but worst case we start the timer twice. */
4690         if (!timer_pending(&ipmi_timer))
4691                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4692 }
4693
4694 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4695 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4696
4697 static void free_smi_msg(struct ipmi_smi_msg *msg)
4698 {
4699         atomic_dec(&smi_msg_inuse_count);
4700         kfree(msg);
4701 }
4702
4703 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4704 {
4705         struct ipmi_smi_msg *rv;
4706         rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4707         if (rv) {
4708                 rv->done = free_smi_msg;
4709                 rv->user_data = NULL;
4710                 atomic_inc(&smi_msg_inuse_count);
4711         }
4712         return rv;
4713 }
4714 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4715
4716 static void free_recv_msg(struct ipmi_recv_msg *msg)
4717 {
4718         atomic_dec(&recv_msg_inuse_count);
4719         kfree(msg);
4720 }
4721
4722 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4723 {
4724         struct ipmi_recv_msg *rv;
4725
4726         rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4727         if (rv) {
4728                 rv->user = NULL;
4729                 rv->done = free_recv_msg;
4730                 atomic_inc(&recv_msg_inuse_count);
4731         }
4732         return rv;
4733 }
4734
4735 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4736 {
4737         if (msg->user)
4738                 kref_put(&msg->user->refcount, free_user);
4739         msg->done(msg);
4740 }
4741 EXPORT_SYMBOL(ipmi_free_recv_msg);
4742
4743 static atomic_t panic_done_count = ATOMIC_INIT(0);
4744
4745 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4746 {
4747         atomic_dec(&panic_done_count);
4748 }
4749
4750 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4751 {
4752         atomic_dec(&panic_done_count);
4753 }
4754
4755 /*
4756  * Inside a panic, send a message and wait for a response.
4757  */
4758 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4759                                         struct ipmi_addr *addr,
4760                                         struct kernel_ipmi_msg *msg)
4761 {
4762         struct ipmi_smi_msg  smi_msg;
4763         struct ipmi_recv_msg recv_msg;
4764         int rv;
4765
4766         smi_msg.done = dummy_smi_done_handler;
4767         recv_msg.done = dummy_recv_done_handler;
4768         atomic_add(2, &panic_done_count);
4769         rv = i_ipmi_request(NULL,
4770                             intf,
4771                             addr,
4772                             0,
4773                             msg,
4774                             intf,
4775                             &smi_msg,
4776                             &recv_msg,
4777                             0,
4778                             intf->addrinfo[0].address,
4779                             intf->addrinfo[0].lun,
4780                             0, 1); /* Don't retry, and don't wait. */
4781         if (rv)
4782                 atomic_sub(2, &panic_done_count);
4783         else if (intf->handlers->flush_messages)
4784                 intf->handlers->flush_messages(intf->send_info);
4785
4786         while (atomic_read(&panic_done_count) != 0)
4787                 ipmi_poll(intf);
4788 }
4789
4790 static void event_receiver_fetcher(struct ipmi_smi *intf,
4791                                    struct ipmi_recv_msg *msg)
4792 {
4793         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4794             && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4795             && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4796             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4797                 /* A get event receiver command, save it. */
4798                 intf->event_receiver = msg->msg.data[1];
4799                 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4800         }
4801 }
4802
4803 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4804 {
4805         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4806             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4807             && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4808             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4809                 /*
4810                  * A get device id command, save if we are an event
4811                  * receiver or generator.
4812                  */
4813                 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4814                 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4815         }
4816 }
4817
4818 static void send_panic_events(struct ipmi_smi *intf, char *str)
4819 {
4820         struct kernel_ipmi_msg msg;
4821         unsigned char data[16];
4822         struct ipmi_system_interface_addr *si;
4823         struct ipmi_addr addr;
4824         char *p = str;
4825         struct ipmi_ipmb_addr *ipmb;
4826         int j;
4827
4828         if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4829                 return;
4830
4831         si = (struct ipmi_system_interface_addr *) &addr;
4832         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4833         si->channel = IPMI_BMC_CHANNEL;
4834         si->lun = 0;
4835
4836         /* Fill in an event telling that we have failed. */
4837         msg.netfn = 0x04; /* Sensor or Event. */
4838         msg.cmd = 2; /* Platform event command. */
4839         msg.data = data;
4840         msg.data_len = 8;
4841         data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4842         data[1] = 0x03; /* This is for IPMI 1.0. */
4843         data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4844         data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4845         data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4846
4847         /*
4848          * Put a few breadcrumbs in.  Hopefully later we can add more things
4849          * to make the panic events more useful.
4850          */
4851         if (str) {
4852                 data[3] = str[0];
4853                 data[6] = str[1];
4854                 data[7] = str[2];
4855         }
4856
4857         /* Send the event announcing the panic. */
4858         ipmi_panic_request_and_wait(intf, &addr, &msg);
4859
4860         /*
4861          * On every interface, dump a bunch of OEM event holding the
4862          * string.
4863          */
4864         if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4865                 return;
4866
4867         /*
4868          * intf_num is used as an marker to tell if the
4869          * interface is valid.  Thus we need a read barrier to
4870          * make sure data fetched before checking intf_num
4871          * won't be used.
4872          */
4873         smp_rmb();
4874
4875         /*
4876          * First job here is to figure out where to send the
4877          * OEM events.  There's no way in IPMI to send OEM
4878          * events using an event send command, so we have to
4879          * find the SEL to put them in and stick them in
4880          * there.
4881          */
4882
4883         /* Get capabilities from the get device id. */
4884         intf->local_sel_device = 0;
4885         intf->local_event_generator = 0;
4886         intf->event_receiver = 0;
4887
4888         /* Request the device info from the local MC. */
4889         msg.netfn = IPMI_NETFN_APP_REQUEST;
4890         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4891         msg.data = NULL;
4892         msg.data_len = 0;
4893         intf->null_user_handler = device_id_fetcher;
4894         ipmi_panic_request_and_wait(intf, &addr, &msg);
4895
4896         if (intf->local_event_generator) {
4897                 /* Request the event receiver from the local MC. */
4898                 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4899                 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4900                 msg.data = NULL;
4901                 msg.data_len = 0;
4902                 intf->null_user_handler = event_receiver_fetcher;
4903                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4904         }
4905         intf->null_user_handler = NULL;
4906
4907         /*
4908          * Validate the event receiver.  The low bit must not
4909          * be 1 (it must be a valid IPMB address), it cannot
4910          * be zero, and it must not be my address.
4911          */
4912         if (((intf->event_receiver & 1) == 0)
4913             && (intf->event_receiver != 0)
4914             && (intf->event_receiver != intf->addrinfo[0].address)) {
4915                 /*
4916                  * The event receiver is valid, send an IPMB
4917                  * message.
4918                  */
4919                 ipmb = (struct ipmi_ipmb_addr *) &addr;
4920                 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4921                 ipmb->channel = 0; /* FIXME - is this right? */
4922                 ipmb->lun = intf->event_receiver_lun;
4923                 ipmb->slave_addr = intf->event_receiver;
4924         } else if (intf->local_sel_device) {
4925                 /*
4926                  * The event receiver was not valid (or was
4927                  * me), but I am an SEL device, just dump it
4928                  * in my SEL.
4929                  */
4930                 si = (struct ipmi_system_interface_addr *) &addr;
4931                 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4932                 si->channel = IPMI_BMC_CHANNEL;
4933                 si->lun = 0;
4934         } else
4935                 return; /* No where to send the event. */
4936
4937         msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4938         msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4939         msg.data = data;
4940         msg.data_len = 16;
4941
4942         j = 0;
4943         while (*p) {
4944                 int size = strlen(p);
4945
4946                 if (size > 11)
4947                         size = 11;
4948                 data[0] = 0;
4949                 data[1] = 0;
4950                 data[2] = 0xf0; /* OEM event without timestamp. */
4951                 data[3] = intf->addrinfo[0].address;
4952                 data[4] = j++; /* sequence # */
4953                 /*
4954                  * Always give 11 bytes, so strncpy will fill
4955                  * it with zeroes for me.
4956                  */
4957                 strncpy(data+5, p, 11);
4958                 p += size;
4959
4960                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4961         }
4962 }
4963
4964 static int has_panicked;
4965
4966 static int panic_event(struct notifier_block *this,
4967                        unsigned long         event,
4968                        void                  *ptr)
4969 {
4970         struct ipmi_smi *intf;
4971         struct ipmi_user *user;
4972
4973         if (has_panicked)
4974                 return NOTIFY_DONE;
4975         has_panicked = 1;
4976
4977         /* For every registered interface, set it to run to completion. */
4978         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4979                 if (!intf->handlers || intf->intf_num == -1)
4980                         /* Interface is not ready. */
4981                         continue;
4982
4983                 if (!intf->handlers->poll)
4984                         continue;
4985
4986                 /*
4987                  * If we were interrupted while locking xmit_msgs_lock or
4988                  * waiting_rcv_msgs_lock, the corresponding list may be
4989                  * corrupted.  In this case, drop items on the list for
4990                  * the safety.
4991                  */
4992                 if (!spin_trylock(&intf->xmit_msgs_lock)) {
4993                         INIT_LIST_HEAD(&intf->xmit_msgs);
4994                         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
4995                 } else
4996                         spin_unlock(&intf->xmit_msgs_lock);
4997
4998                 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
4999                         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5000                 else
5001                         spin_unlock(&intf->waiting_rcv_msgs_lock);
5002
5003                 intf->run_to_completion = 1;
5004                 if (intf->handlers->set_run_to_completion)
5005                         intf->handlers->set_run_to_completion(intf->send_info,
5006                                                               1);
5007
5008                 list_for_each_entry_rcu(user, &intf->users, link) {
5009                         if (user->handler->ipmi_panic_handler)
5010                                 user->handler->ipmi_panic_handler(
5011                                         user->handler_data);
5012                 }
5013
5014                 send_panic_events(intf, ptr);
5015         }
5016
5017         return NOTIFY_DONE;
5018 }
5019
5020 static struct notifier_block panic_block = {
5021         .notifier_call  = panic_event,
5022         .next           = NULL,
5023         .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5024 };
5025
5026 static int ipmi_init_msghandler(void)
5027 {
5028         int rv;
5029
5030         if (initialized)
5031                 return 0;
5032
5033         rv = driver_register(&ipmidriver.driver);
5034         if (rv) {
5035                 pr_err("Could not register IPMI driver\n");
5036                 return rv;
5037         }
5038
5039         pr_info("version " IPMI_DRIVER_VERSION "\n");
5040
5041         timer_setup(&ipmi_timer, ipmi_timeout, 0);
5042         mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5043
5044         atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5045
5046         initialized = 1;
5047
5048         return 0;
5049 }
5050
5051 static int __init ipmi_init_msghandler_mod(void)
5052 {
5053         ipmi_init_msghandler();
5054         return 0;
5055 }
5056
5057 static void __exit cleanup_ipmi(void)
5058 {
5059         int count;
5060
5061         if (!initialized)
5062                 return;
5063
5064         atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5065
5066         /*
5067          * This can't be called if any interfaces exist, so no worry
5068          * about shutting down the interfaces.
5069          */
5070
5071         /*
5072          * Tell the timer to stop, then wait for it to stop.  This
5073          * avoids problems with race conditions removing the timer
5074          * here.
5075          */
5076         atomic_inc(&stop_operation);
5077         del_timer_sync(&ipmi_timer);
5078
5079         driver_unregister(&ipmidriver.driver);
5080
5081         initialized = 0;
5082
5083         /* Check for buffer leaks. */
5084         count = atomic_read(&smi_msg_inuse_count);
5085         if (count != 0)
5086                 pr_warn("SMI message count %d at exit\n", count);
5087         count = atomic_read(&recv_msg_inuse_count);
5088         if (count != 0)
5089                 pr_warn("recv message count %d at exit\n", count);
5090 }
5091 module_exit(cleanup_ipmi);
5092
5093 module_init(ipmi_init_msghandler_mod);
5094 MODULE_LICENSE("GPL");
5095 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5096 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5097                    " interface.");
5098 MODULE_VERSION(IPMI_DRIVER_VERSION);
5099 MODULE_SOFTDEP("post: ipmi_devintf");