Merge remote-tracking branch 'stable/linux-4.19.y' into rpi-4.19.y
[platform/kernel/linux-rpi.git] / drivers / char / ipmi / ipmi_msghandler.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_msghandler.c
4  *
5  * Incoming and outgoing message routing for an IPMI interface.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/poll.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21 #include <linux/slab.h>
22 #include <linux/ipmi.h>
23 #include <linux/ipmi_smi.h>
24 #include <linux/notifier.h>
25 #include <linux/init.h>
26 #include <linux/proc_fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/interrupt.h>
29 #include <linux/moduleparam.h>
30 #include <linux/workqueue.h>
31 #include <linux/uuid.h>
32 #include <linux/nospec.h>
33
34 #define PFX "IPMI message handler: "
35
36 #define IPMI_DRIVER_VERSION "39.2"
37
38 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
39 static int ipmi_init_msghandler(void);
40 static void smi_recv_tasklet(unsigned long);
41 static void handle_new_recv_msgs(struct ipmi_smi *intf);
42 static void need_waiter(struct ipmi_smi *intf);
43 static int handle_one_recv_msg(struct ipmi_smi *intf,
44                                struct ipmi_smi_msg *msg);
45
46 #ifdef DEBUG
47 static void ipmi_debug_msg(const char *title, unsigned char *data,
48                            unsigned int len)
49 {
50         int i, pos;
51         char buf[100];
52
53         pos = snprintf(buf, sizeof(buf), "%s: ", title);
54         for (i = 0; i < len; i++)
55                 pos += snprintf(buf + pos, sizeof(buf) - pos,
56                                 " %2.2x", data[i]);
57         pr_debug("%s\n", buf);
58 }
59 #else
60 static void ipmi_debug_msg(const char *title, unsigned char *data,
61                            unsigned int len)
62 { }
63 #endif
64
65 static bool initialized;
66 static bool drvregistered;
67
68 enum ipmi_panic_event_op {
69         IPMI_SEND_PANIC_EVENT_NONE,
70         IPMI_SEND_PANIC_EVENT,
71         IPMI_SEND_PANIC_EVENT_STRING
72 };
73 #ifdef CONFIG_IPMI_PANIC_STRING
74 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
75 #elif defined(CONFIG_IPMI_PANIC_EVENT)
76 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
77 #else
78 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
79 #endif
80 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
81
82 static int panic_op_write_handler(const char *val,
83                                   const struct kernel_param *kp)
84 {
85         char valcp[16];
86         char *s;
87
88         strncpy(valcp, val, 15);
89         valcp[15] = '\0';
90
91         s = strstrip(valcp);
92
93         if (strcmp(s, "none") == 0)
94                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
95         else if (strcmp(s, "event") == 0)
96                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
97         else if (strcmp(s, "string") == 0)
98                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
99         else
100                 return -EINVAL;
101
102         return 0;
103 }
104
105 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
106 {
107         switch (ipmi_send_panic_event) {
108         case IPMI_SEND_PANIC_EVENT_NONE:
109                 strcpy(buffer, "none");
110                 break;
111
112         case IPMI_SEND_PANIC_EVENT:
113                 strcpy(buffer, "event");
114                 break;
115
116         case IPMI_SEND_PANIC_EVENT_STRING:
117                 strcpy(buffer, "string");
118                 break;
119
120         default:
121                 strcpy(buffer, "???");
122                 break;
123         }
124
125         return strlen(buffer);
126 }
127
128 static const struct kernel_param_ops panic_op_ops = {
129         .set = panic_op_write_handler,
130         .get = panic_op_read_handler
131 };
132 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
133 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
134
135
136 #define MAX_EVENTS_IN_QUEUE     25
137
138 /* Remain in auto-maintenance mode for this amount of time (in ms). */
139 static unsigned long maintenance_mode_timeout_ms = 30000;
140 module_param(maintenance_mode_timeout_ms, ulong, 0644);
141 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
142                  "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
143
144 /*
145  * Don't let a message sit in a queue forever, always time it with at lest
146  * the max message timer.  This is in milliseconds.
147  */
148 #define MAX_MSG_TIMEOUT         60000
149
150 /*
151  * Timeout times below are in milliseconds, and are done off a 1
152  * second timer.  So setting the value to 1000 would mean anything
153  * between 0 and 1000ms.  So really the only reasonable minimum
154  * setting it 2000ms, which is between 1 and 2 seconds.
155  */
156
157 /* The default timeout for message retries. */
158 static unsigned long default_retry_ms = 2000;
159 module_param(default_retry_ms, ulong, 0644);
160 MODULE_PARM_DESC(default_retry_ms,
161                  "The time (milliseconds) between retry sends");
162
163 /* The default timeout for maintenance mode message retries. */
164 static unsigned long default_maintenance_retry_ms = 3000;
165 module_param(default_maintenance_retry_ms, ulong, 0644);
166 MODULE_PARM_DESC(default_maintenance_retry_ms,
167                  "The time (milliseconds) between retry sends in maintenance mode");
168
169 /* The default maximum number of retries */
170 static unsigned int default_max_retries = 4;
171 module_param(default_max_retries, uint, 0644);
172 MODULE_PARM_DESC(default_max_retries,
173                  "The time (milliseconds) between retry sends in maintenance mode");
174
175 /* Call every ~1000 ms. */
176 #define IPMI_TIMEOUT_TIME       1000
177
178 /* How many jiffies does it take to get to the timeout time. */
179 #define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
180
181 /*
182  * Request events from the queue every second (this is the number of
183  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
184  * future, IPMI will add a way to know immediately if an event is in
185  * the queue and this silliness can go away.
186  */
187 #define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
188
189 /* How long should we cache dynamic device IDs? */
190 #define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
191
192 /*
193  * The main "user" data structure.
194  */
195 struct ipmi_user {
196         struct list_head link;
197
198         /*
199          * Set to NULL when the user is destroyed, a pointer to myself
200          * so srcu_dereference can be used on it.
201          */
202         struct ipmi_user *self;
203         struct srcu_struct release_barrier;
204
205         struct kref refcount;
206
207         /* The upper layer that handles receive messages. */
208         const struct ipmi_user_hndl *handler;
209         void             *handler_data;
210
211         /* The interface this user is bound to. */
212         struct ipmi_smi *intf;
213
214         /* Does this interface receive IPMI events? */
215         bool gets_events;
216
217         /* Free must run in process context for RCU cleanup. */
218         struct work_struct remove_work;
219 };
220
221 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
222         __acquires(user->release_barrier)
223 {
224         struct ipmi_user *ruser;
225
226         *index = srcu_read_lock(&user->release_barrier);
227         ruser = srcu_dereference(user->self, &user->release_barrier);
228         if (!ruser)
229                 srcu_read_unlock(&user->release_barrier, *index);
230         return ruser;
231 }
232
233 static void release_ipmi_user(struct ipmi_user *user, int index)
234 {
235         srcu_read_unlock(&user->release_barrier, index);
236 }
237
238 struct cmd_rcvr {
239         struct list_head link;
240
241         struct ipmi_user *user;
242         unsigned char netfn;
243         unsigned char cmd;
244         unsigned int  chans;
245
246         /*
247          * This is used to form a linked lised during mass deletion.
248          * Since this is in an RCU list, we cannot use the link above
249          * or change any data until the RCU period completes.  So we
250          * use this next variable during mass deletion so we can have
251          * a list and don't have to wait and restart the search on
252          * every individual deletion of a command.
253          */
254         struct cmd_rcvr *next;
255 };
256
257 struct seq_table {
258         unsigned int         inuse : 1;
259         unsigned int         broadcast : 1;
260
261         unsigned long        timeout;
262         unsigned long        orig_timeout;
263         unsigned int         retries_left;
264
265         /*
266          * To verify on an incoming send message response that this is
267          * the message that the response is for, we keep a sequence id
268          * and increment it every time we send a message.
269          */
270         long                 seqid;
271
272         /*
273          * This is held so we can properly respond to the message on a
274          * timeout, and it is used to hold the temporary data for
275          * retransmission, too.
276          */
277         struct ipmi_recv_msg *recv_msg;
278 };
279
280 /*
281  * Store the information in a msgid (long) to allow us to find a
282  * sequence table entry from the msgid.
283  */
284 #define STORE_SEQ_IN_MSGID(seq, seqid) \
285         ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
286
287 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
288         do {                                                            \
289                 seq = (((msgid) >> 26) & 0x3f);                         \
290                 seqid = ((msgid) & 0x3ffffff);                          \
291         } while (0)
292
293 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
294
295 #define IPMI_MAX_CHANNELS       16
296 struct ipmi_channel {
297         unsigned char medium;
298         unsigned char protocol;
299 };
300
301 struct ipmi_channel_set {
302         struct ipmi_channel c[IPMI_MAX_CHANNELS];
303 };
304
305 struct ipmi_my_addrinfo {
306         /*
307          * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
308          * but may be changed by the user.
309          */
310         unsigned char address;
311
312         /*
313          * My LUN.  This should generally stay the SMS LUN, but just in
314          * case...
315          */
316         unsigned char lun;
317 };
318
319 /*
320  * Note that the product id, manufacturer id, guid, and device id are
321  * immutable in this structure, so dyn_mutex is not required for
322  * accessing those.  If those change on a BMC, a new BMC is allocated.
323  */
324 struct bmc_device {
325         struct platform_device pdev;
326         struct list_head       intfs; /* Interfaces on this BMC. */
327         struct ipmi_device_id  id;
328         struct ipmi_device_id  fetch_id;
329         int                    dyn_id_set;
330         unsigned long          dyn_id_expiry;
331         struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
332         guid_t                 guid;
333         guid_t                 fetch_guid;
334         int                    dyn_guid_set;
335         struct kref            usecount;
336         struct work_struct     remove_work;
337 };
338 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
339
340 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
341                              struct ipmi_device_id *id,
342                              bool *guid_set, guid_t *guid);
343
344 /*
345  * Various statistics for IPMI, these index stats[] in the ipmi_smi
346  * structure.
347  */
348 enum ipmi_stat_indexes {
349         /* Commands we got from the user that were invalid. */
350         IPMI_STAT_sent_invalid_commands = 0,
351
352         /* Commands we sent to the MC. */
353         IPMI_STAT_sent_local_commands,
354
355         /* Responses from the MC that were delivered to a user. */
356         IPMI_STAT_handled_local_responses,
357
358         /* Responses from the MC that were not delivered to a user. */
359         IPMI_STAT_unhandled_local_responses,
360
361         /* Commands we sent out to the IPMB bus. */
362         IPMI_STAT_sent_ipmb_commands,
363
364         /* Commands sent on the IPMB that had errors on the SEND CMD */
365         IPMI_STAT_sent_ipmb_command_errs,
366
367         /* Each retransmit increments this count. */
368         IPMI_STAT_retransmitted_ipmb_commands,
369
370         /*
371          * When a message times out (runs out of retransmits) this is
372          * incremented.
373          */
374         IPMI_STAT_timed_out_ipmb_commands,
375
376         /*
377          * This is like above, but for broadcasts.  Broadcasts are
378          * *not* included in the above count (they are expected to
379          * time out).
380          */
381         IPMI_STAT_timed_out_ipmb_broadcasts,
382
383         /* Responses I have sent to the IPMB bus. */
384         IPMI_STAT_sent_ipmb_responses,
385
386         /* The response was delivered to the user. */
387         IPMI_STAT_handled_ipmb_responses,
388
389         /* The response had invalid data in it. */
390         IPMI_STAT_invalid_ipmb_responses,
391
392         /* The response didn't have anyone waiting for it. */
393         IPMI_STAT_unhandled_ipmb_responses,
394
395         /* Commands we sent out to the IPMB bus. */
396         IPMI_STAT_sent_lan_commands,
397
398         /* Commands sent on the IPMB that had errors on the SEND CMD */
399         IPMI_STAT_sent_lan_command_errs,
400
401         /* Each retransmit increments this count. */
402         IPMI_STAT_retransmitted_lan_commands,
403
404         /*
405          * When a message times out (runs out of retransmits) this is
406          * incremented.
407          */
408         IPMI_STAT_timed_out_lan_commands,
409
410         /* Responses I have sent to the IPMB bus. */
411         IPMI_STAT_sent_lan_responses,
412
413         /* The response was delivered to the user. */
414         IPMI_STAT_handled_lan_responses,
415
416         /* The response had invalid data in it. */
417         IPMI_STAT_invalid_lan_responses,
418
419         /* The response didn't have anyone waiting for it. */
420         IPMI_STAT_unhandled_lan_responses,
421
422         /* The command was delivered to the user. */
423         IPMI_STAT_handled_commands,
424
425         /* The command had invalid data in it. */
426         IPMI_STAT_invalid_commands,
427
428         /* The command didn't have anyone waiting for it. */
429         IPMI_STAT_unhandled_commands,
430
431         /* Invalid data in an event. */
432         IPMI_STAT_invalid_events,
433
434         /* Events that were received with the proper format. */
435         IPMI_STAT_events,
436
437         /* Retransmissions on IPMB that failed. */
438         IPMI_STAT_dropped_rexmit_ipmb_commands,
439
440         /* Retransmissions on LAN that failed. */
441         IPMI_STAT_dropped_rexmit_lan_commands,
442
443         /* This *must* remain last, add new values above this. */
444         IPMI_NUM_STATS
445 };
446
447
448 #define IPMI_IPMB_NUM_SEQ       64
449 struct ipmi_smi {
450         struct module *owner;
451
452         /* What interface number are we? */
453         int intf_num;
454
455         struct kref refcount;
456
457         /* Set when the interface is being unregistered. */
458         bool in_shutdown;
459
460         /* Used for a list of interfaces. */
461         struct list_head link;
462
463         /*
464          * The list of upper layers that are using me.  seq_lock write
465          * protects this.  Read protection is with srcu.
466          */
467         struct list_head users;
468         struct srcu_struct users_srcu;
469
470         /* Used for wake ups at startup. */
471         wait_queue_head_t waitq;
472
473         /*
474          * Prevents the interface from being unregistered when the
475          * interface is used by being looked up through the BMC
476          * structure.
477          */
478         struct mutex bmc_reg_mutex;
479
480         struct bmc_device tmp_bmc;
481         struct bmc_device *bmc;
482         bool bmc_registered;
483         struct list_head bmc_link;
484         char *my_dev_name;
485         bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
486         struct work_struct bmc_reg_work;
487
488         const struct ipmi_smi_handlers *handlers;
489         void                     *send_info;
490
491         /* Driver-model device for the system interface. */
492         struct device          *si_dev;
493
494         /*
495          * A table of sequence numbers for this interface.  We use the
496          * sequence numbers for IPMB messages that go out of the
497          * interface to match them up with their responses.  A routine
498          * is called periodically to time the items in this list.
499          */
500         spinlock_t       seq_lock;
501         struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
502         int curr_seq;
503
504         /*
505          * Messages queued for delivery.  If delivery fails (out of memory
506          * for instance), They will stay in here to be processed later in a
507          * periodic timer interrupt.  The tasklet is for handling received
508          * messages directly from the handler.
509          */
510         spinlock_t       waiting_rcv_msgs_lock;
511         struct list_head waiting_rcv_msgs;
512         atomic_t         watchdog_pretimeouts_to_deliver;
513         struct tasklet_struct recv_tasklet;
514
515         spinlock_t             xmit_msgs_lock;
516         struct list_head       xmit_msgs;
517         struct ipmi_smi_msg    *curr_msg;
518         struct list_head       hp_xmit_msgs;
519
520         /*
521          * The list of command receivers that are registered for commands
522          * on this interface.
523          */
524         struct mutex     cmd_rcvrs_mutex;
525         struct list_head cmd_rcvrs;
526
527         /*
528          * Events that were queues because no one was there to receive
529          * them.
530          */
531         spinlock_t       events_lock; /* For dealing with event stuff. */
532         struct list_head waiting_events;
533         unsigned int     waiting_events_count; /* How many events in queue? */
534         char             delivering_events;
535         char             event_msg_printed;
536         atomic_t         event_waiters;
537         unsigned int     ticks_to_req_ev;
538         int              last_needs_timer;
539
540         /*
541          * The event receiver for my BMC, only really used at panic
542          * shutdown as a place to store this.
543          */
544         unsigned char event_receiver;
545         unsigned char event_receiver_lun;
546         unsigned char local_sel_device;
547         unsigned char local_event_generator;
548
549         /* For handling of maintenance mode. */
550         int maintenance_mode;
551         bool maintenance_mode_enable;
552         int auto_maintenance_timeout;
553         spinlock_t maintenance_mode_lock; /* Used in a timer... */
554
555         /*
556          * If we are doing maintenance on something on IPMB, extend
557          * the timeout time to avoid timeouts writing firmware and
558          * such.
559          */
560         int ipmb_maintenance_mode_timeout;
561
562         /*
563          * A cheap hack, if this is non-null and a message to an
564          * interface comes in with a NULL user, call this routine with
565          * it.  Note that the message will still be freed by the
566          * caller.  This only works on the system interface.
567          *
568          * Protected by bmc_reg_mutex.
569          */
570         void (*null_user_handler)(struct ipmi_smi *intf,
571                                   struct ipmi_recv_msg *msg);
572
573         /*
574          * When we are scanning the channels for an SMI, this will
575          * tell which channel we are scanning.
576          */
577         int curr_channel;
578
579         /* Channel information */
580         struct ipmi_channel_set *channel_list;
581         unsigned int curr_working_cset; /* First index into the following. */
582         struct ipmi_channel_set wchannels[2];
583         struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
584         bool channels_ready;
585
586         atomic_t stats[IPMI_NUM_STATS];
587
588         /*
589          * run_to_completion duplicate of smb_info, smi_info
590          * and ipmi_serial_info structures. Used to decrease numbers of
591          * parameters passed by "low" level IPMI code.
592          */
593         int run_to_completion;
594 };
595 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
596
597 static void __get_guid(struct ipmi_smi *intf);
598 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
599 static int __ipmi_bmc_register(struct ipmi_smi *intf,
600                                struct ipmi_device_id *id,
601                                bool guid_set, guid_t *guid, int intf_num);
602 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
603
604
605 /**
606  * The driver model view of the IPMI messaging driver.
607  */
608 static struct platform_driver ipmidriver = {
609         .driver = {
610                 .name = "ipmi",
611                 .bus = &platform_bus_type
612         }
613 };
614 /*
615  * This mutex keeps us from adding the same BMC twice.
616  */
617 static DEFINE_MUTEX(ipmidriver_mutex);
618
619 static LIST_HEAD(ipmi_interfaces);
620 static DEFINE_MUTEX(ipmi_interfaces_mutex);
621 struct srcu_struct ipmi_interfaces_srcu;
622
623 /*
624  * List of watchers that want to know when smi's are added and deleted.
625  */
626 static LIST_HEAD(smi_watchers);
627 static DEFINE_MUTEX(smi_watchers_mutex);
628
629 #define ipmi_inc_stat(intf, stat) \
630         atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
631 #define ipmi_get_stat(intf, stat) \
632         ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
633
634 static const char * const addr_src_to_str[] = {
635         "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
636         "device-tree", "platform"
637 };
638
639 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
640 {
641         if (src >= SI_LAST)
642                 src = 0; /* Invalid */
643         return addr_src_to_str[src];
644 }
645 EXPORT_SYMBOL(ipmi_addr_src_to_str);
646
647 static int is_lan_addr(struct ipmi_addr *addr)
648 {
649         return addr->addr_type == IPMI_LAN_ADDR_TYPE;
650 }
651
652 static int is_ipmb_addr(struct ipmi_addr *addr)
653 {
654         return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
655 }
656
657 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
658 {
659         return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
660 }
661
662 static void free_recv_msg_list(struct list_head *q)
663 {
664         struct ipmi_recv_msg *msg, *msg2;
665
666         list_for_each_entry_safe(msg, msg2, q, link) {
667                 list_del(&msg->link);
668                 ipmi_free_recv_msg(msg);
669         }
670 }
671
672 static void free_smi_msg_list(struct list_head *q)
673 {
674         struct ipmi_smi_msg *msg, *msg2;
675
676         list_for_each_entry_safe(msg, msg2, q, link) {
677                 list_del(&msg->link);
678                 ipmi_free_smi_msg(msg);
679         }
680 }
681
682 static void clean_up_interface_data(struct ipmi_smi *intf)
683 {
684         int              i;
685         struct cmd_rcvr  *rcvr, *rcvr2;
686         struct list_head list;
687
688         tasklet_kill(&intf->recv_tasklet);
689
690         free_smi_msg_list(&intf->waiting_rcv_msgs);
691         free_recv_msg_list(&intf->waiting_events);
692
693         /*
694          * Wholesale remove all the entries from the list in the
695          * interface and wait for RCU to know that none are in use.
696          */
697         mutex_lock(&intf->cmd_rcvrs_mutex);
698         INIT_LIST_HEAD(&list);
699         list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
700         mutex_unlock(&intf->cmd_rcvrs_mutex);
701
702         list_for_each_entry_safe(rcvr, rcvr2, &list, link)
703                 kfree(rcvr);
704
705         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
706                 if ((intf->seq_table[i].inuse)
707                                         && (intf->seq_table[i].recv_msg))
708                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
709         }
710 }
711
712 static void intf_free(struct kref *ref)
713 {
714         struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
715
716         clean_up_interface_data(intf);
717         kfree(intf);
718 }
719
720 struct watcher_entry {
721         int              intf_num;
722         struct ipmi_smi  *intf;
723         struct list_head link;
724 };
725
726 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
727 {
728         struct ipmi_smi *intf;
729         int index, rv;
730
731         /*
732          * Make sure the driver is actually initialized, this handles
733          * problems with initialization order.
734          */
735         rv = ipmi_init_msghandler();
736         if (rv)
737                 return rv;
738
739         mutex_lock(&smi_watchers_mutex);
740
741         list_add(&watcher->link, &smi_watchers);
742
743         index = srcu_read_lock(&ipmi_interfaces_srcu);
744         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
745                 int intf_num = READ_ONCE(intf->intf_num);
746
747                 if (intf_num == -1)
748                         continue;
749                 watcher->new_smi(intf_num, intf->si_dev);
750         }
751         srcu_read_unlock(&ipmi_interfaces_srcu, index);
752
753         mutex_unlock(&smi_watchers_mutex);
754
755         return 0;
756 }
757 EXPORT_SYMBOL(ipmi_smi_watcher_register);
758
759 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
760 {
761         mutex_lock(&smi_watchers_mutex);
762         list_del(&watcher->link);
763         mutex_unlock(&smi_watchers_mutex);
764         return 0;
765 }
766 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
767
768 /*
769  * Must be called with smi_watchers_mutex held.
770  */
771 static void
772 call_smi_watchers(int i, struct device *dev)
773 {
774         struct ipmi_smi_watcher *w;
775
776         mutex_lock(&smi_watchers_mutex);
777         list_for_each_entry(w, &smi_watchers, link) {
778                 if (try_module_get(w->owner)) {
779                         w->new_smi(i, dev);
780                         module_put(w->owner);
781                 }
782         }
783         mutex_unlock(&smi_watchers_mutex);
784 }
785
786 static int
787 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
788 {
789         if (addr1->addr_type != addr2->addr_type)
790                 return 0;
791
792         if (addr1->channel != addr2->channel)
793                 return 0;
794
795         if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
796                 struct ipmi_system_interface_addr *smi_addr1
797                     = (struct ipmi_system_interface_addr *) addr1;
798                 struct ipmi_system_interface_addr *smi_addr2
799                     = (struct ipmi_system_interface_addr *) addr2;
800                 return (smi_addr1->lun == smi_addr2->lun);
801         }
802
803         if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
804                 struct ipmi_ipmb_addr *ipmb_addr1
805                     = (struct ipmi_ipmb_addr *) addr1;
806                 struct ipmi_ipmb_addr *ipmb_addr2
807                     = (struct ipmi_ipmb_addr *) addr2;
808
809                 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
810                         && (ipmb_addr1->lun == ipmb_addr2->lun));
811         }
812
813         if (is_lan_addr(addr1)) {
814                 struct ipmi_lan_addr *lan_addr1
815                         = (struct ipmi_lan_addr *) addr1;
816                 struct ipmi_lan_addr *lan_addr2
817                     = (struct ipmi_lan_addr *) addr2;
818
819                 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
820                         && (lan_addr1->local_SWID == lan_addr2->local_SWID)
821                         && (lan_addr1->session_handle
822                             == lan_addr2->session_handle)
823                         && (lan_addr1->lun == lan_addr2->lun));
824         }
825
826         return 1;
827 }
828
829 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
830 {
831         if (len < sizeof(struct ipmi_system_interface_addr))
832                 return -EINVAL;
833
834         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
835                 if (addr->channel != IPMI_BMC_CHANNEL)
836                         return -EINVAL;
837                 return 0;
838         }
839
840         if ((addr->channel == IPMI_BMC_CHANNEL)
841             || (addr->channel >= IPMI_MAX_CHANNELS)
842             || (addr->channel < 0))
843                 return -EINVAL;
844
845         if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
846                 if (len < sizeof(struct ipmi_ipmb_addr))
847                         return -EINVAL;
848                 return 0;
849         }
850
851         if (is_lan_addr(addr)) {
852                 if (len < sizeof(struct ipmi_lan_addr))
853                         return -EINVAL;
854                 return 0;
855         }
856
857         return -EINVAL;
858 }
859 EXPORT_SYMBOL(ipmi_validate_addr);
860
861 unsigned int ipmi_addr_length(int addr_type)
862 {
863         if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
864                 return sizeof(struct ipmi_system_interface_addr);
865
866         if ((addr_type == IPMI_IPMB_ADDR_TYPE)
867                         || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
868                 return sizeof(struct ipmi_ipmb_addr);
869
870         if (addr_type == IPMI_LAN_ADDR_TYPE)
871                 return sizeof(struct ipmi_lan_addr);
872
873         return 0;
874 }
875 EXPORT_SYMBOL(ipmi_addr_length);
876
877 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
878 {
879         int rv = 0;
880
881         if (!msg->user) {
882                 /* Special handling for NULL users. */
883                 if (intf->null_user_handler) {
884                         intf->null_user_handler(intf, msg);
885                 } else {
886                         /* No handler, so give up. */
887                         rv = -EINVAL;
888                 }
889                 ipmi_free_recv_msg(msg);
890         } else if (!oops_in_progress) {
891                 /*
892                  * If we are running in the panic context, calling the
893                  * receive handler doesn't much meaning and has a deadlock
894                  * risk.  At this moment, simply skip it in that case.
895                  */
896                 int index;
897                 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
898
899                 if (user) {
900                         user->handler->ipmi_recv_hndl(msg, user->handler_data);
901                         release_ipmi_user(user, index);
902                 } else {
903                         /* User went away, give up. */
904                         ipmi_free_recv_msg(msg);
905                         rv = -EINVAL;
906                 }
907         }
908
909         return rv;
910 }
911
912 static void deliver_local_response(struct ipmi_smi *intf,
913                                    struct ipmi_recv_msg *msg)
914 {
915         if (deliver_response(intf, msg))
916                 ipmi_inc_stat(intf, unhandled_local_responses);
917         else
918                 ipmi_inc_stat(intf, handled_local_responses);
919 }
920
921 static void deliver_err_response(struct ipmi_smi *intf,
922                                  struct ipmi_recv_msg *msg, int err)
923 {
924         msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
925         msg->msg_data[0] = err;
926         msg->msg.netfn |= 1; /* Convert to a response. */
927         msg->msg.data_len = 1;
928         msg->msg.data = msg->msg_data;
929         deliver_local_response(intf, msg);
930 }
931
932 /*
933  * Find the next sequence number not being used and add the given
934  * message with the given timeout to the sequence table.  This must be
935  * called with the interface's seq_lock held.
936  */
937 static int intf_next_seq(struct ipmi_smi      *intf,
938                          struct ipmi_recv_msg *recv_msg,
939                          unsigned long        timeout,
940                          int                  retries,
941                          int                  broadcast,
942                          unsigned char        *seq,
943                          long                 *seqid)
944 {
945         int          rv = 0;
946         unsigned int i;
947
948         if (timeout == 0)
949                 timeout = default_retry_ms;
950         if (retries < 0)
951                 retries = default_max_retries;
952
953         for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
954                                         i = (i+1)%IPMI_IPMB_NUM_SEQ) {
955                 if (!intf->seq_table[i].inuse)
956                         break;
957         }
958
959         if (!intf->seq_table[i].inuse) {
960                 intf->seq_table[i].recv_msg = recv_msg;
961
962                 /*
963                  * Start with the maximum timeout, when the send response
964                  * comes in we will start the real timer.
965                  */
966                 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
967                 intf->seq_table[i].orig_timeout = timeout;
968                 intf->seq_table[i].retries_left = retries;
969                 intf->seq_table[i].broadcast = broadcast;
970                 intf->seq_table[i].inuse = 1;
971                 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
972                 *seq = i;
973                 *seqid = intf->seq_table[i].seqid;
974                 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
975                 need_waiter(intf);
976         } else {
977                 rv = -EAGAIN;
978         }
979
980         return rv;
981 }
982
983 /*
984  * Return the receive message for the given sequence number and
985  * release the sequence number so it can be reused.  Some other data
986  * is passed in to be sure the message matches up correctly (to help
987  * guard against message coming in after their timeout and the
988  * sequence number being reused).
989  */
990 static int intf_find_seq(struct ipmi_smi      *intf,
991                          unsigned char        seq,
992                          short                channel,
993                          unsigned char        cmd,
994                          unsigned char        netfn,
995                          struct ipmi_addr     *addr,
996                          struct ipmi_recv_msg **recv_msg)
997 {
998         int           rv = -ENODEV;
999         unsigned long flags;
1000
1001         if (seq >= IPMI_IPMB_NUM_SEQ)
1002                 return -EINVAL;
1003
1004         spin_lock_irqsave(&intf->seq_lock, flags);
1005         if (intf->seq_table[seq].inuse) {
1006                 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1007
1008                 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1009                                 && (msg->msg.netfn == netfn)
1010                                 && (ipmi_addr_equal(addr, &msg->addr))) {
1011                         *recv_msg = msg;
1012                         intf->seq_table[seq].inuse = 0;
1013                         rv = 0;
1014                 }
1015         }
1016         spin_unlock_irqrestore(&intf->seq_lock, flags);
1017
1018         return rv;
1019 }
1020
1021
1022 /* Start the timer for a specific sequence table entry. */
1023 static int intf_start_seq_timer(struct ipmi_smi *intf,
1024                                 long       msgid)
1025 {
1026         int           rv = -ENODEV;
1027         unsigned long flags;
1028         unsigned char seq;
1029         unsigned long seqid;
1030
1031
1032         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1033
1034         spin_lock_irqsave(&intf->seq_lock, flags);
1035         /*
1036          * We do this verification because the user can be deleted
1037          * while a message is outstanding.
1038          */
1039         if ((intf->seq_table[seq].inuse)
1040                                 && (intf->seq_table[seq].seqid == seqid)) {
1041                 struct seq_table *ent = &intf->seq_table[seq];
1042                 ent->timeout = ent->orig_timeout;
1043                 rv = 0;
1044         }
1045         spin_unlock_irqrestore(&intf->seq_lock, flags);
1046
1047         return rv;
1048 }
1049
1050 /* Got an error for the send message for a specific sequence number. */
1051 static int intf_err_seq(struct ipmi_smi *intf,
1052                         long         msgid,
1053                         unsigned int err)
1054 {
1055         int                  rv = -ENODEV;
1056         unsigned long        flags;
1057         unsigned char        seq;
1058         unsigned long        seqid;
1059         struct ipmi_recv_msg *msg = NULL;
1060
1061
1062         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1063
1064         spin_lock_irqsave(&intf->seq_lock, flags);
1065         /*
1066          * We do this verification because the user can be deleted
1067          * while a message is outstanding.
1068          */
1069         if ((intf->seq_table[seq].inuse)
1070                                 && (intf->seq_table[seq].seqid == seqid)) {
1071                 struct seq_table *ent = &intf->seq_table[seq];
1072
1073                 ent->inuse = 0;
1074                 msg = ent->recv_msg;
1075                 rv = 0;
1076         }
1077         spin_unlock_irqrestore(&intf->seq_lock, flags);
1078
1079         if (msg)
1080                 deliver_err_response(intf, msg, err);
1081
1082         return rv;
1083 }
1084
1085
1086 static void free_user_work(struct work_struct *work)
1087 {
1088         struct ipmi_user *user = container_of(work, struct ipmi_user,
1089                                               remove_work);
1090
1091         cleanup_srcu_struct(&user->release_barrier);
1092         kfree(user);
1093 }
1094
1095 int ipmi_create_user(unsigned int          if_num,
1096                      const struct ipmi_user_hndl *handler,
1097                      void                  *handler_data,
1098                      struct ipmi_user      **user)
1099 {
1100         unsigned long flags;
1101         struct ipmi_user *new_user;
1102         int           rv, index;
1103         struct ipmi_smi *intf;
1104
1105         /*
1106          * There is no module usecount here, because it's not
1107          * required.  Since this can only be used by and called from
1108          * other modules, they will implicitly use this module, and
1109          * thus this can't be removed unless the other modules are
1110          * removed.
1111          */
1112
1113         if (handler == NULL)
1114                 return -EINVAL;
1115
1116         /*
1117          * Make sure the driver is actually initialized, this handles
1118          * problems with initialization order.
1119          */
1120         rv = ipmi_init_msghandler();
1121         if (rv)
1122                 return rv;
1123
1124         new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1125         if (!new_user)
1126                 return -ENOMEM;
1127
1128         index = srcu_read_lock(&ipmi_interfaces_srcu);
1129         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1130                 if (intf->intf_num == if_num)
1131                         goto found;
1132         }
1133         /* Not found, return an error */
1134         rv = -EINVAL;
1135         goto out_kfree;
1136
1137  found:
1138         INIT_WORK(&new_user->remove_work, free_user_work);
1139
1140         rv = init_srcu_struct(&new_user->release_barrier);
1141         if (rv)
1142                 goto out_kfree;
1143
1144         if (!try_module_get(intf->owner)) {
1145                 rv = -ENODEV;
1146                 goto out_kfree;
1147         }
1148
1149         /* Note that each existing user holds a refcount to the interface. */
1150         kref_get(&intf->refcount);
1151
1152         kref_init(&new_user->refcount);
1153         new_user->handler = handler;
1154         new_user->handler_data = handler_data;
1155         new_user->intf = intf;
1156         new_user->gets_events = false;
1157
1158         rcu_assign_pointer(new_user->self, new_user);
1159         spin_lock_irqsave(&intf->seq_lock, flags);
1160         list_add_rcu(&new_user->link, &intf->users);
1161         spin_unlock_irqrestore(&intf->seq_lock, flags);
1162         if (handler->ipmi_watchdog_pretimeout) {
1163                 /* User wants pretimeouts, so make sure to watch for them. */
1164                 if (atomic_inc_return(&intf->event_waiters) == 1)
1165                         need_waiter(intf);
1166         }
1167         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1168         *user = new_user;
1169         return 0;
1170
1171 out_kfree:
1172         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1173         kfree(new_user);
1174         return rv;
1175 }
1176 EXPORT_SYMBOL(ipmi_create_user);
1177
1178 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1179 {
1180         int rv, index;
1181         struct ipmi_smi *intf;
1182
1183         index = srcu_read_lock(&ipmi_interfaces_srcu);
1184         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1185                 if (intf->intf_num == if_num)
1186                         goto found;
1187         }
1188         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1189
1190         /* Not found, return an error */
1191         return -EINVAL;
1192
1193 found:
1194         if (!intf->handlers->get_smi_info)
1195                 rv = -ENOTTY;
1196         else
1197                 rv = intf->handlers->get_smi_info(intf->send_info, data);
1198         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1199
1200         return rv;
1201 }
1202 EXPORT_SYMBOL(ipmi_get_smi_info);
1203
1204 static void free_user(struct kref *ref)
1205 {
1206         struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1207
1208         /* SRCU cleanup must happen in task context. */
1209         schedule_work(&user->remove_work);
1210 }
1211
1212 static void _ipmi_destroy_user(struct ipmi_user *user)
1213 {
1214         struct ipmi_smi  *intf = user->intf;
1215         int              i;
1216         unsigned long    flags;
1217         struct cmd_rcvr  *rcvr;
1218         struct cmd_rcvr  *rcvrs = NULL;
1219
1220         if (!acquire_ipmi_user(user, &i)) {
1221                 /*
1222                  * The user has already been cleaned up, just make sure
1223                  * nothing is using it and return.
1224                  */
1225                 synchronize_srcu(&user->release_barrier);
1226                 return;
1227         }
1228
1229         rcu_assign_pointer(user->self, NULL);
1230         release_ipmi_user(user, i);
1231
1232         synchronize_srcu(&user->release_barrier);
1233
1234         if (user->handler->shutdown)
1235                 user->handler->shutdown(user->handler_data);
1236
1237         if (user->handler->ipmi_watchdog_pretimeout)
1238                 atomic_dec(&intf->event_waiters);
1239
1240         if (user->gets_events)
1241                 atomic_dec(&intf->event_waiters);
1242
1243         /* Remove the user from the interface's sequence table. */
1244         spin_lock_irqsave(&intf->seq_lock, flags);
1245         list_del_rcu(&user->link);
1246
1247         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1248                 if (intf->seq_table[i].inuse
1249                     && (intf->seq_table[i].recv_msg->user == user)) {
1250                         intf->seq_table[i].inuse = 0;
1251                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1252                 }
1253         }
1254         spin_unlock_irqrestore(&intf->seq_lock, flags);
1255
1256         /*
1257          * Remove the user from the command receiver's table.  First
1258          * we build a list of everything (not using the standard link,
1259          * since other things may be using it till we do
1260          * synchronize_srcu()) then free everything in that list.
1261          */
1262         mutex_lock(&intf->cmd_rcvrs_mutex);
1263         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1264                 if (rcvr->user == user) {
1265                         list_del_rcu(&rcvr->link);
1266                         rcvr->next = rcvrs;
1267                         rcvrs = rcvr;
1268                 }
1269         }
1270         mutex_unlock(&intf->cmd_rcvrs_mutex);
1271         synchronize_rcu();
1272         while (rcvrs) {
1273                 rcvr = rcvrs;
1274                 rcvrs = rcvr->next;
1275                 kfree(rcvr);
1276         }
1277
1278         kref_put(&intf->refcount, intf_free);
1279         module_put(intf->owner);
1280 }
1281
1282 int ipmi_destroy_user(struct ipmi_user *user)
1283 {
1284         _ipmi_destroy_user(user);
1285
1286         kref_put(&user->refcount, free_user);
1287
1288         return 0;
1289 }
1290 EXPORT_SYMBOL(ipmi_destroy_user);
1291
1292 int ipmi_get_version(struct ipmi_user *user,
1293                      unsigned char *major,
1294                      unsigned char *minor)
1295 {
1296         struct ipmi_device_id id;
1297         int rv, index;
1298
1299         user = acquire_ipmi_user(user, &index);
1300         if (!user)
1301                 return -ENODEV;
1302
1303         rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1304         if (!rv) {
1305                 *major = ipmi_version_major(&id);
1306                 *minor = ipmi_version_minor(&id);
1307         }
1308         release_ipmi_user(user, index);
1309
1310         return rv;
1311 }
1312 EXPORT_SYMBOL(ipmi_get_version);
1313
1314 int ipmi_set_my_address(struct ipmi_user *user,
1315                         unsigned int  channel,
1316                         unsigned char address)
1317 {
1318         int index, rv = 0;
1319
1320         user = acquire_ipmi_user(user, &index);
1321         if (!user)
1322                 return -ENODEV;
1323
1324         if (channel >= IPMI_MAX_CHANNELS) {
1325                 rv = -EINVAL;
1326         } else {
1327                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1328                 user->intf->addrinfo[channel].address = address;
1329         }
1330         release_ipmi_user(user, index);
1331
1332         return rv;
1333 }
1334 EXPORT_SYMBOL(ipmi_set_my_address);
1335
1336 int ipmi_get_my_address(struct ipmi_user *user,
1337                         unsigned int  channel,
1338                         unsigned char *address)
1339 {
1340         int index, rv = 0;
1341
1342         user = acquire_ipmi_user(user, &index);
1343         if (!user)
1344                 return -ENODEV;
1345
1346         if (channel >= IPMI_MAX_CHANNELS) {
1347                 rv = -EINVAL;
1348         } else {
1349                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1350                 *address = user->intf->addrinfo[channel].address;
1351         }
1352         release_ipmi_user(user, index);
1353
1354         return rv;
1355 }
1356 EXPORT_SYMBOL(ipmi_get_my_address);
1357
1358 int ipmi_set_my_LUN(struct ipmi_user *user,
1359                     unsigned int  channel,
1360                     unsigned char LUN)
1361 {
1362         int index, rv = 0;
1363
1364         user = acquire_ipmi_user(user, &index);
1365         if (!user)
1366                 return -ENODEV;
1367
1368         if (channel >= IPMI_MAX_CHANNELS) {
1369                 rv = -EINVAL;
1370         } else {
1371                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1372                 user->intf->addrinfo[channel].lun = LUN & 0x3;
1373         }
1374         release_ipmi_user(user, index);
1375
1376         return rv;
1377 }
1378 EXPORT_SYMBOL(ipmi_set_my_LUN);
1379
1380 int ipmi_get_my_LUN(struct ipmi_user *user,
1381                     unsigned int  channel,
1382                     unsigned char *address)
1383 {
1384         int index, rv = 0;
1385
1386         user = acquire_ipmi_user(user, &index);
1387         if (!user)
1388                 return -ENODEV;
1389
1390         if (channel >= IPMI_MAX_CHANNELS) {
1391                 rv = -EINVAL;
1392         } else {
1393                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1394                 *address = user->intf->addrinfo[channel].lun;
1395         }
1396         release_ipmi_user(user, index);
1397
1398         return rv;
1399 }
1400 EXPORT_SYMBOL(ipmi_get_my_LUN);
1401
1402 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1403 {
1404         int mode, index;
1405         unsigned long flags;
1406
1407         user = acquire_ipmi_user(user, &index);
1408         if (!user)
1409                 return -ENODEV;
1410
1411         spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1412         mode = user->intf->maintenance_mode;
1413         spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1414         release_ipmi_user(user, index);
1415
1416         return mode;
1417 }
1418 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1419
1420 static void maintenance_mode_update(struct ipmi_smi *intf)
1421 {
1422         if (intf->handlers->set_maintenance_mode)
1423                 intf->handlers->set_maintenance_mode(
1424                         intf->send_info, intf->maintenance_mode_enable);
1425 }
1426
1427 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1428 {
1429         int rv = 0, index;
1430         unsigned long flags;
1431         struct ipmi_smi *intf = user->intf;
1432
1433         user = acquire_ipmi_user(user, &index);
1434         if (!user)
1435                 return -ENODEV;
1436
1437         spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1438         if (intf->maintenance_mode != mode) {
1439                 switch (mode) {
1440                 case IPMI_MAINTENANCE_MODE_AUTO:
1441                         intf->maintenance_mode_enable
1442                                 = (intf->auto_maintenance_timeout > 0);
1443                         break;
1444
1445                 case IPMI_MAINTENANCE_MODE_OFF:
1446                         intf->maintenance_mode_enable = false;
1447                         break;
1448
1449                 case IPMI_MAINTENANCE_MODE_ON:
1450                         intf->maintenance_mode_enable = true;
1451                         break;
1452
1453                 default:
1454                         rv = -EINVAL;
1455                         goto out_unlock;
1456                 }
1457                 intf->maintenance_mode = mode;
1458
1459                 maintenance_mode_update(intf);
1460         }
1461  out_unlock:
1462         spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1463         release_ipmi_user(user, index);
1464
1465         return rv;
1466 }
1467 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1468
1469 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1470 {
1471         unsigned long        flags;
1472         struct ipmi_smi      *intf = user->intf;
1473         struct ipmi_recv_msg *msg, *msg2;
1474         struct list_head     msgs;
1475         int index;
1476
1477         user = acquire_ipmi_user(user, &index);
1478         if (!user)
1479                 return -ENODEV;
1480
1481         INIT_LIST_HEAD(&msgs);
1482
1483         spin_lock_irqsave(&intf->events_lock, flags);
1484         if (user->gets_events == val)
1485                 goto out;
1486
1487         user->gets_events = val;
1488
1489         if (val) {
1490                 if (atomic_inc_return(&intf->event_waiters) == 1)
1491                         need_waiter(intf);
1492         } else {
1493                 atomic_dec(&intf->event_waiters);
1494         }
1495
1496         if (intf->delivering_events)
1497                 /*
1498                  * Another thread is delivering events for this, so
1499                  * let it handle any new events.
1500                  */
1501                 goto out;
1502
1503         /* Deliver any queued events. */
1504         while (user->gets_events && !list_empty(&intf->waiting_events)) {
1505                 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1506                         list_move_tail(&msg->link, &msgs);
1507                 intf->waiting_events_count = 0;
1508                 if (intf->event_msg_printed) {
1509                         dev_warn(intf->si_dev,
1510                                  PFX "Event queue no longer full\n");
1511                         intf->event_msg_printed = 0;
1512                 }
1513
1514                 intf->delivering_events = 1;
1515                 spin_unlock_irqrestore(&intf->events_lock, flags);
1516
1517                 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1518                         msg->user = user;
1519                         kref_get(&user->refcount);
1520                         deliver_local_response(intf, msg);
1521                 }
1522
1523                 spin_lock_irqsave(&intf->events_lock, flags);
1524                 intf->delivering_events = 0;
1525         }
1526
1527  out:
1528         spin_unlock_irqrestore(&intf->events_lock, flags);
1529         release_ipmi_user(user, index);
1530
1531         return 0;
1532 }
1533 EXPORT_SYMBOL(ipmi_set_gets_events);
1534
1535 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1536                                       unsigned char netfn,
1537                                       unsigned char cmd,
1538                                       unsigned char chan)
1539 {
1540         struct cmd_rcvr *rcvr;
1541
1542         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1543                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1544                                         && (rcvr->chans & (1 << chan)))
1545                         return rcvr;
1546         }
1547         return NULL;
1548 }
1549
1550 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1551                                  unsigned char netfn,
1552                                  unsigned char cmd,
1553                                  unsigned int  chans)
1554 {
1555         struct cmd_rcvr *rcvr;
1556
1557         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1558                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1559                                         && (rcvr->chans & chans))
1560                         return 0;
1561         }
1562         return 1;
1563 }
1564
1565 int ipmi_register_for_cmd(struct ipmi_user *user,
1566                           unsigned char netfn,
1567                           unsigned char cmd,
1568                           unsigned int  chans)
1569 {
1570         struct ipmi_smi *intf = user->intf;
1571         struct cmd_rcvr *rcvr;
1572         int rv = 0, index;
1573
1574         user = acquire_ipmi_user(user, &index);
1575         if (!user)
1576                 return -ENODEV;
1577
1578         rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1579         if (!rcvr) {
1580                 rv = -ENOMEM;
1581                 goto out_release;
1582         }
1583         rcvr->cmd = cmd;
1584         rcvr->netfn = netfn;
1585         rcvr->chans = chans;
1586         rcvr->user = user;
1587
1588         mutex_lock(&intf->cmd_rcvrs_mutex);
1589         /* Make sure the command/netfn is not already registered. */
1590         if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1591                 rv = -EBUSY;
1592                 goto out_unlock;
1593         }
1594
1595         if (atomic_inc_return(&intf->event_waiters) == 1)
1596                 need_waiter(intf);
1597
1598         list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1599
1600 out_unlock:
1601         mutex_unlock(&intf->cmd_rcvrs_mutex);
1602         if (rv)
1603                 kfree(rcvr);
1604 out_release:
1605         release_ipmi_user(user, index);
1606
1607         return rv;
1608 }
1609 EXPORT_SYMBOL(ipmi_register_for_cmd);
1610
1611 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1612                             unsigned char netfn,
1613                             unsigned char cmd,
1614                             unsigned int  chans)
1615 {
1616         struct ipmi_smi *intf = user->intf;
1617         struct cmd_rcvr *rcvr;
1618         struct cmd_rcvr *rcvrs = NULL;
1619         int i, rv = -ENOENT, index;
1620
1621         user = acquire_ipmi_user(user, &index);
1622         if (!user)
1623                 return -ENODEV;
1624
1625         mutex_lock(&intf->cmd_rcvrs_mutex);
1626         for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1627                 if (((1 << i) & chans) == 0)
1628                         continue;
1629                 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1630                 if (rcvr == NULL)
1631                         continue;
1632                 if (rcvr->user == user) {
1633                         rv = 0;
1634                         rcvr->chans &= ~chans;
1635                         if (rcvr->chans == 0) {
1636                                 list_del_rcu(&rcvr->link);
1637                                 rcvr->next = rcvrs;
1638                                 rcvrs = rcvr;
1639                         }
1640                 }
1641         }
1642         mutex_unlock(&intf->cmd_rcvrs_mutex);
1643         synchronize_rcu();
1644         release_ipmi_user(user, index);
1645         while (rcvrs) {
1646                 atomic_dec(&intf->event_waiters);
1647                 rcvr = rcvrs;
1648                 rcvrs = rcvr->next;
1649                 kfree(rcvr);
1650         }
1651
1652         return rv;
1653 }
1654 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1655
1656 static unsigned char
1657 ipmb_checksum(unsigned char *data, int size)
1658 {
1659         unsigned char csum = 0;
1660
1661         for (; size > 0; size--, data++)
1662                 csum += *data;
1663
1664         return -csum;
1665 }
1666
1667 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1668                                    struct kernel_ipmi_msg *msg,
1669                                    struct ipmi_ipmb_addr *ipmb_addr,
1670                                    long                  msgid,
1671                                    unsigned char         ipmb_seq,
1672                                    int                   broadcast,
1673                                    unsigned char         source_address,
1674                                    unsigned char         source_lun)
1675 {
1676         int i = broadcast;
1677
1678         /* Format the IPMB header data. */
1679         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1680         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1681         smi_msg->data[2] = ipmb_addr->channel;
1682         if (broadcast)
1683                 smi_msg->data[3] = 0;
1684         smi_msg->data[i+3] = ipmb_addr->slave_addr;
1685         smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1686         smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1687         smi_msg->data[i+6] = source_address;
1688         smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1689         smi_msg->data[i+8] = msg->cmd;
1690
1691         /* Now tack on the data to the message. */
1692         if (msg->data_len > 0)
1693                 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1694         smi_msg->data_size = msg->data_len + 9;
1695
1696         /* Now calculate the checksum and tack it on. */
1697         smi_msg->data[i+smi_msg->data_size]
1698                 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1699
1700         /*
1701          * Add on the checksum size and the offset from the
1702          * broadcast.
1703          */
1704         smi_msg->data_size += 1 + i;
1705
1706         smi_msg->msgid = msgid;
1707 }
1708
1709 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1710                                   struct kernel_ipmi_msg *msg,
1711                                   struct ipmi_lan_addr  *lan_addr,
1712                                   long                  msgid,
1713                                   unsigned char         ipmb_seq,
1714                                   unsigned char         source_lun)
1715 {
1716         /* Format the IPMB header data. */
1717         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1718         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1719         smi_msg->data[2] = lan_addr->channel;
1720         smi_msg->data[3] = lan_addr->session_handle;
1721         smi_msg->data[4] = lan_addr->remote_SWID;
1722         smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1723         smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1724         smi_msg->data[7] = lan_addr->local_SWID;
1725         smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1726         smi_msg->data[9] = msg->cmd;
1727
1728         /* Now tack on the data to the message. */
1729         if (msg->data_len > 0)
1730                 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1731         smi_msg->data_size = msg->data_len + 10;
1732
1733         /* Now calculate the checksum and tack it on. */
1734         smi_msg->data[smi_msg->data_size]
1735                 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1736
1737         /*
1738          * Add on the checksum size and the offset from the
1739          * broadcast.
1740          */
1741         smi_msg->data_size += 1;
1742
1743         smi_msg->msgid = msgid;
1744 }
1745
1746 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1747                                              struct ipmi_smi_msg *smi_msg,
1748                                              int priority)
1749 {
1750         if (intf->curr_msg) {
1751                 if (priority > 0)
1752                         list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1753                 else
1754                         list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1755                 smi_msg = NULL;
1756         } else {
1757                 intf->curr_msg = smi_msg;
1758         }
1759
1760         return smi_msg;
1761 }
1762
1763
1764 static void smi_send(struct ipmi_smi *intf,
1765                      const struct ipmi_smi_handlers *handlers,
1766                      struct ipmi_smi_msg *smi_msg, int priority)
1767 {
1768         int run_to_completion = intf->run_to_completion;
1769
1770         if (run_to_completion) {
1771                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1772         } else {
1773                 unsigned long flags;
1774
1775                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1776                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1777                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1778         }
1779
1780         if (smi_msg)
1781                 handlers->sender(intf->send_info, smi_msg);
1782 }
1783
1784 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1785 {
1786         return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1787                  && ((msg->cmd == IPMI_COLD_RESET_CMD)
1788                      || (msg->cmd == IPMI_WARM_RESET_CMD)))
1789                 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1790 }
1791
1792 static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1793                               struct ipmi_addr       *addr,
1794                               long                   msgid,
1795                               struct kernel_ipmi_msg *msg,
1796                               struct ipmi_smi_msg    *smi_msg,
1797                               struct ipmi_recv_msg   *recv_msg,
1798                               int                    retries,
1799                               unsigned int           retry_time_ms)
1800 {
1801         struct ipmi_system_interface_addr *smi_addr;
1802
1803         if (msg->netfn & 1)
1804                 /* Responses are not allowed to the SMI. */
1805                 return -EINVAL;
1806
1807         smi_addr = (struct ipmi_system_interface_addr *) addr;
1808         if (smi_addr->lun > 3) {
1809                 ipmi_inc_stat(intf, sent_invalid_commands);
1810                 return -EINVAL;
1811         }
1812
1813         memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1814
1815         if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1816             && ((msg->cmd == IPMI_SEND_MSG_CMD)
1817                 || (msg->cmd == IPMI_GET_MSG_CMD)
1818                 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1819                 /*
1820                  * We don't let the user do these, since we manage
1821                  * the sequence numbers.
1822                  */
1823                 ipmi_inc_stat(intf, sent_invalid_commands);
1824                 return -EINVAL;
1825         }
1826
1827         if (is_maintenance_mode_cmd(msg)) {
1828                 unsigned long flags;
1829
1830                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1831                 intf->auto_maintenance_timeout
1832                         = maintenance_mode_timeout_ms;
1833                 if (!intf->maintenance_mode
1834                     && !intf->maintenance_mode_enable) {
1835                         intf->maintenance_mode_enable = true;
1836                         maintenance_mode_update(intf);
1837                 }
1838                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1839                                        flags);
1840         }
1841
1842         if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1843                 ipmi_inc_stat(intf, sent_invalid_commands);
1844                 return -EMSGSIZE;
1845         }
1846
1847         smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1848         smi_msg->data[1] = msg->cmd;
1849         smi_msg->msgid = msgid;
1850         smi_msg->user_data = recv_msg;
1851         if (msg->data_len > 0)
1852                 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1853         smi_msg->data_size = msg->data_len + 2;
1854         ipmi_inc_stat(intf, sent_local_commands);
1855
1856         return 0;
1857 }
1858
1859 static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1860                            struct ipmi_addr       *addr,
1861                            long                   msgid,
1862                            struct kernel_ipmi_msg *msg,
1863                            struct ipmi_smi_msg    *smi_msg,
1864                            struct ipmi_recv_msg   *recv_msg,
1865                            unsigned char          source_address,
1866                            unsigned char          source_lun,
1867                            int                    retries,
1868                            unsigned int           retry_time_ms)
1869 {
1870         struct ipmi_ipmb_addr *ipmb_addr;
1871         unsigned char ipmb_seq;
1872         long seqid;
1873         int broadcast = 0;
1874         struct ipmi_channel *chans;
1875         int rv = 0;
1876
1877         if (addr->channel >= IPMI_MAX_CHANNELS) {
1878                 ipmi_inc_stat(intf, sent_invalid_commands);
1879                 return -EINVAL;
1880         }
1881
1882         chans = READ_ONCE(intf->channel_list)->c;
1883
1884         if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1885                 ipmi_inc_stat(intf, sent_invalid_commands);
1886                 return -EINVAL;
1887         }
1888
1889         if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1890                 /*
1891                  * Broadcasts add a zero at the beginning of the
1892                  * message, but otherwise is the same as an IPMB
1893                  * address.
1894                  */
1895                 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1896                 broadcast = 1;
1897                 retries = 0; /* Don't retry broadcasts. */
1898         }
1899
1900         /*
1901          * 9 for the header and 1 for the checksum, plus
1902          * possibly one for the broadcast.
1903          */
1904         if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1905                 ipmi_inc_stat(intf, sent_invalid_commands);
1906                 return -EMSGSIZE;
1907         }
1908
1909         ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1910         if (ipmb_addr->lun > 3) {
1911                 ipmi_inc_stat(intf, sent_invalid_commands);
1912                 return -EINVAL;
1913         }
1914
1915         memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1916
1917         if (recv_msg->msg.netfn & 0x1) {
1918                 /*
1919                  * It's a response, so use the user's sequence
1920                  * from msgid.
1921                  */
1922                 ipmi_inc_stat(intf, sent_ipmb_responses);
1923                 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1924                                 msgid, broadcast,
1925                                 source_address, source_lun);
1926
1927                 /*
1928                  * Save the receive message so we can use it
1929                  * to deliver the response.
1930                  */
1931                 smi_msg->user_data = recv_msg;
1932         } else {
1933                 /* It's a command, so get a sequence for it. */
1934                 unsigned long flags;
1935
1936                 spin_lock_irqsave(&intf->seq_lock, flags);
1937
1938                 if (is_maintenance_mode_cmd(msg))
1939                         intf->ipmb_maintenance_mode_timeout =
1940                                 maintenance_mode_timeout_ms;
1941
1942                 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1943                         /* Different default in maintenance mode */
1944                         retry_time_ms = default_maintenance_retry_ms;
1945
1946                 /*
1947                  * Create a sequence number with a 1 second
1948                  * timeout and 4 retries.
1949                  */
1950                 rv = intf_next_seq(intf,
1951                                    recv_msg,
1952                                    retry_time_ms,
1953                                    retries,
1954                                    broadcast,
1955                                    &ipmb_seq,
1956                                    &seqid);
1957                 if (rv)
1958                         /*
1959                          * We have used up all the sequence numbers,
1960                          * probably, so abort.
1961                          */
1962                         goto out_err;
1963
1964                 ipmi_inc_stat(intf, sent_ipmb_commands);
1965
1966                 /*
1967                  * Store the sequence number in the message,
1968                  * so that when the send message response
1969                  * comes back we can start the timer.
1970                  */
1971                 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1972                                 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1973                                 ipmb_seq, broadcast,
1974                                 source_address, source_lun);
1975
1976                 /*
1977                  * Copy the message into the recv message data, so we
1978                  * can retransmit it later if necessary.
1979                  */
1980                 memcpy(recv_msg->msg_data, smi_msg->data,
1981                        smi_msg->data_size);
1982                 recv_msg->msg.data = recv_msg->msg_data;
1983                 recv_msg->msg.data_len = smi_msg->data_size;
1984
1985                 /*
1986                  * We don't unlock until here, because we need
1987                  * to copy the completed message into the
1988                  * recv_msg before we release the lock.
1989                  * Otherwise, race conditions may bite us.  I
1990                  * know that's pretty paranoid, but I prefer
1991                  * to be correct.
1992                  */
1993 out_err:
1994                 spin_unlock_irqrestore(&intf->seq_lock, flags);
1995         }
1996
1997         return rv;
1998 }
1999
2000 static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2001                           struct ipmi_addr       *addr,
2002                           long                   msgid,
2003                           struct kernel_ipmi_msg *msg,
2004                           struct ipmi_smi_msg    *smi_msg,
2005                           struct ipmi_recv_msg   *recv_msg,
2006                           unsigned char          source_lun,
2007                           int                    retries,
2008                           unsigned int           retry_time_ms)
2009 {
2010         struct ipmi_lan_addr  *lan_addr;
2011         unsigned char ipmb_seq;
2012         long seqid;
2013         struct ipmi_channel *chans;
2014         int rv = 0;
2015
2016         if (addr->channel >= IPMI_MAX_CHANNELS) {
2017                 ipmi_inc_stat(intf, sent_invalid_commands);
2018                 return -EINVAL;
2019         }
2020
2021         chans = READ_ONCE(intf->channel_list)->c;
2022
2023         if ((chans[addr->channel].medium
2024                                 != IPMI_CHANNEL_MEDIUM_8023LAN)
2025                         && (chans[addr->channel].medium
2026                             != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2027                 ipmi_inc_stat(intf, sent_invalid_commands);
2028                 return -EINVAL;
2029         }
2030
2031         /* 11 for the header and 1 for the checksum. */
2032         if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2033                 ipmi_inc_stat(intf, sent_invalid_commands);
2034                 return -EMSGSIZE;
2035         }
2036
2037         lan_addr = (struct ipmi_lan_addr *) addr;
2038         if (lan_addr->lun > 3) {
2039                 ipmi_inc_stat(intf, sent_invalid_commands);
2040                 return -EINVAL;
2041         }
2042
2043         memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2044
2045         if (recv_msg->msg.netfn & 0x1) {
2046                 /*
2047                  * It's a response, so use the user's sequence
2048                  * from msgid.
2049                  */
2050                 ipmi_inc_stat(intf, sent_lan_responses);
2051                 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2052                                msgid, source_lun);
2053
2054                 /*
2055                  * Save the receive message so we can use it
2056                  * to deliver the response.
2057                  */
2058                 smi_msg->user_data = recv_msg;
2059         } else {
2060                 /* It's a command, so get a sequence for it. */
2061                 unsigned long flags;
2062
2063                 spin_lock_irqsave(&intf->seq_lock, flags);
2064
2065                 /*
2066                  * Create a sequence number with a 1 second
2067                  * timeout and 4 retries.
2068                  */
2069                 rv = intf_next_seq(intf,
2070                                    recv_msg,
2071                                    retry_time_ms,
2072                                    retries,
2073                                    0,
2074                                    &ipmb_seq,
2075                                    &seqid);
2076                 if (rv)
2077                         /*
2078                          * We have used up all the sequence numbers,
2079                          * probably, so abort.
2080                          */
2081                         goto out_err;
2082
2083                 ipmi_inc_stat(intf, sent_lan_commands);
2084
2085                 /*
2086                  * Store the sequence number in the message,
2087                  * so that when the send message response
2088                  * comes back we can start the timer.
2089                  */
2090                 format_lan_msg(smi_msg, msg, lan_addr,
2091                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2092                                ipmb_seq, source_lun);
2093
2094                 /*
2095                  * Copy the message into the recv message data, so we
2096                  * can retransmit it later if necessary.
2097                  */
2098                 memcpy(recv_msg->msg_data, smi_msg->data,
2099                        smi_msg->data_size);
2100                 recv_msg->msg.data = recv_msg->msg_data;
2101                 recv_msg->msg.data_len = smi_msg->data_size;
2102
2103                 /*
2104                  * We don't unlock until here, because we need
2105                  * to copy the completed message into the
2106                  * recv_msg before we release the lock.
2107                  * Otherwise, race conditions may bite us.  I
2108                  * know that's pretty paranoid, but I prefer
2109                  * to be correct.
2110                  */
2111 out_err:
2112                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2113         }
2114
2115         return rv;
2116 }
2117
2118 /*
2119  * Separate from ipmi_request so that the user does not have to be
2120  * supplied in certain circumstances (mainly at panic time).  If
2121  * messages are supplied, they will be freed, even if an error
2122  * occurs.
2123  */
2124 static int i_ipmi_request(struct ipmi_user     *user,
2125                           struct ipmi_smi      *intf,
2126                           struct ipmi_addr     *addr,
2127                           long                 msgid,
2128                           struct kernel_ipmi_msg *msg,
2129                           void                 *user_msg_data,
2130                           void                 *supplied_smi,
2131                           struct ipmi_recv_msg *supplied_recv,
2132                           int                  priority,
2133                           unsigned char        source_address,
2134                           unsigned char        source_lun,
2135                           int                  retries,
2136                           unsigned int         retry_time_ms)
2137 {
2138         struct ipmi_smi_msg *smi_msg;
2139         struct ipmi_recv_msg *recv_msg;
2140         int rv = 0;
2141
2142         if (supplied_recv)
2143                 recv_msg = supplied_recv;
2144         else {
2145                 recv_msg = ipmi_alloc_recv_msg();
2146                 if (recv_msg == NULL) {
2147                         rv = -ENOMEM;
2148                         goto out;
2149                 }
2150         }
2151         recv_msg->user_msg_data = user_msg_data;
2152
2153         if (supplied_smi)
2154                 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2155         else {
2156                 smi_msg = ipmi_alloc_smi_msg();
2157                 if (smi_msg == NULL) {
2158                         ipmi_free_recv_msg(recv_msg);
2159                         rv = -ENOMEM;
2160                         goto out;
2161                 }
2162         }
2163
2164         rcu_read_lock();
2165         if (intf->in_shutdown) {
2166                 rv = -ENODEV;
2167                 goto out_err;
2168         }
2169
2170         recv_msg->user = user;
2171         if (user)
2172                 /* The put happens when the message is freed. */
2173                 kref_get(&user->refcount);
2174         recv_msg->msgid = msgid;
2175         /*
2176          * Store the message to send in the receive message so timeout
2177          * responses can get the proper response data.
2178          */
2179         recv_msg->msg = *msg;
2180
2181         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2182                 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2183                                         recv_msg, retries, retry_time_ms);
2184         } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2185                 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2186                                      source_address, source_lun,
2187                                      retries, retry_time_ms);
2188         } else if (is_lan_addr(addr)) {
2189                 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2190                                     source_lun, retries, retry_time_ms);
2191         } else {
2192             /* Unknown address type. */
2193                 ipmi_inc_stat(intf, sent_invalid_commands);
2194                 rv = -EINVAL;
2195         }
2196
2197         if (rv) {
2198 out_err:
2199                 ipmi_free_smi_msg(smi_msg);
2200                 ipmi_free_recv_msg(recv_msg);
2201         } else {
2202                 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2203
2204                 smi_send(intf, intf->handlers, smi_msg, priority);
2205         }
2206         rcu_read_unlock();
2207
2208 out:
2209         return rv;
2210 }
2211
2212 static int check_addr(struct ipmi_smi  *intf,
2213                       struct ipmi_addr *addr,
2214                       unsigned char    *saddr,
2215                       unsigned char    *lun)
2216 {
2217         if (addr->channel >= IPMI_MAX_CHANNELS)
2218                 return -EINVAL;
2219         addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2220         *lun = intf->addrinfo[addr->channel].lun;
2221         *saddr = intf->addrinfo[addr->channel].address;
2222         return 0;
2223 }
2224
2225 int ipmi_request_settime(struct ipmi_user *user,
2226                          struct ipmi_addr *addr,
2227                          long             msgid,
2228                          struct kernel_ipmi_msg  *msg,
2229                          void             *user_msg_data,
2230                          int              priority,
2231                          int              retries,
2232                          unsigned int     retry_time_ms)
2233 {
2234         unsigned char saddr = 0, lun = 0;
2235         int rv, index;
2236
2237         if (!user)
2238                 return -EINVAL;
2239
2240         user = acquire_ipmi_user(user, &index);
2241         if (!user)
2242                 return -ENODEV;
2243
2244         rv = check_addr(user->intf, addr, &saddr, &lun);
2245         if (!rv)
2246                 rv = i_ipmi_request(user,
2247                                     user->intf,
2248                                     addr,
2249                                     msgid,
2250                                     msg,
2251                                     user_msg_data,
2252                                     NULL, NULL,
2253                                     priority,
2254                                     saddr,
2255                                     lun,
2256                                     retries,
2257                                     retry_time_ms);
2258
2259         release_ipmi_user(user, index);
2260         return rv;
2261 }
2262 EXPORT_SYMBOL(ipmi_request_settime);
2263
2264 int ipmi_request_supply_msgs(struct ipmi_user     *user,
2265                              struct ipmi_addr     *addr,
2266                              long                 msgid,
2267                              struct kernel_ipmi_msg *msg,
2268                              void                 *user_msg_data,
2269                              void                 *supplied_smi,
2270                              struct ipmi_recv_msg *supplied_recv,
2271                              int                  priority)
2272 {
2273         unsigned char saddr = 0, lun = 0;
2274         int rv, index;
2275
2276         if (!user)
2277                 return -EINVAL;
2278
2279         user = acquire_ipmi_user(user, &index);
2280         if (!user)
2281                 return -ENODEV;
2282
2283         rv = check_addr(user->intf, addr, &saddr, &lun);
2284         if (!rv)
2285                 rv = i_ipmi_request(user,
2286                                     user->intf,
2287                                     addr,
2288                                     msgid,
2289                                     msg,
2290                                     user_msg_data,
2291                                     supplied_smi,
2292                                     supplied_recv,
2293                                     priority,
2294                                     saddr,
2295                                     lun,
2296                                     -1, 0);
2297
2298         release_ipmi_user(user, index);
2299         return rv;
2300 }
2301 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2302
2303 static void bmc_device_id_handler(struct ipmi_smi *intf,
2304                                   struct ipmi_recv_msg *msg)
2305 {
2306         int rv;
2307
2308         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2309                         || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2310                         || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2311                 dev_warn(intf->si_dev,
2312                          PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2313                         msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2314                 return;
2315         }
2316
2317         rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2318                         msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2319         if (rv) {
2320                 dev_warn(intf->si_dev,
2321                          PFX "device id demangle failed: %d\n", rv);
2322                 intf->bmc->dyn_id_set = 0;
2323         } else {
2324                 /*
2325                  * Make sure the id data is available before setting
2326                  * dyn_id_set.
2327                  */
2328                 smp_wmb();
2329                 intf->bmc->dyn_id_set = 1;
2330         }
2331
2332         wake_up(&intf->waitq);
2333 }
2334
2335 static int
2336 send_get_device_id_cmd(struct ipmi_smi *intf)
2337 {
2338         struct ipmi_system_interface_addr si;
2339         struct kernel_ipmi_msg msg;
2340
2341         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2342         si.channel = IPMI_BMC_CHANNEL;
2343         si.lun = 0;
2344
2345         msg.netfn = IPMI_NETFN_APP_REQUEST;
2346         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2347         msg.data = NULL;
2348         msg.data_len = 0;
2349
2350         return i_ipmi_request(NULL,
2351                               intf,
2352                               (struct ipmi_addr *) &si,
2353                               0,
2354                               &msg,
2355                               intf,
2356                               NULL,
2357                               NULL,
2358                               0,
2359                               intf->addrinfo[0].address,
2360                               intf->addrinfo[0].lun,
2361                               -1, 0);
2362 }
2363
2364 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2365 {
2366         int rv;
2367
2368         bmc->dyn_id_set = 2;
2369
2370         intf->null_user_handler = bmc_device_id_handler;
2371
2372         rv = send_get_device_id_cmd(intf);
2373         if (rv)
2374                 return rv;
2375
2376         wait_event(intf->waitq, bmc->dyn_id_set != 2);
2377
2378         if (!bmc->dyn_id_set)
2379                 rv = -EIO; /* Something went wrong in the fetch. */
2380
2381         /* dyn_id_set makes the id data available. */
2382         smp_rmb();
2383
2384         intf->null_user_handler = NULL;
2385
2386         return rv;
2387 }
2388
2389 /*
2390  * Fetch the device id for the bmc/interface.  You must pass in either
2391  * bmc or intf, this code will get the other one.  If the data has
2392  * been recently fetched, this will just use the cached data.  Otherwise
2393  * it will run a new fetch.
2394  *
2395  * Except for the first time this is called (in ipmi_add_smi()),
2396  * this will always return good data;
2397  */
2398 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2399                                struct ipmi_device_id *id,
2400                                bool *guid_set, guid_t *guid, int intf_num)
2401 {
2402         int rv = 0;
2403         int prev_dyn_id_set, prev_guid_set;
2404         bool intf_set = intf != NULL;
2405
2406         if (!intf) {
2407                 mutex_lock(&bmc->dyn_mutex);
2408 retry_bmc_lock:
2409                 if (list_empty(&bmc->intfs)) {
2410                         mutex_unlock(&bmc->dyn_mutex);
2411                         return -ENOENT;
2412                 }
2413                 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2414                                         bmc_link);
2415                 kref_get(&intf->refcount);
2416                 mutex_unlock(&bmc->dyn_mutex);
2417                 mutex_lock(&intf->bmc_reg_mutex);
2418                 mutex_lock(&bmc->dyn_mutex);
2419                 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2420                                              bmc_link)) {
2421                         mutex_unlock(&intf->bmc_reg_mutex);
2422                         kref_put(&intf->refcount, intf_free);
2423                         goto retry_bmc_lock;
2424                 }
2425         } else {
2426                 mutex_lock(&intf->bmc_reg_mutex);
2427                 bmc = intf->bmc;
2428                 mutex_lock(&bmc->dyn_mutex);
2429                 kref_get(&intf->refcount);
2430         }
2431
2432         /* If we have a valid and current ID, just return that. */
2433         if (intf->in_bmc_register ||
2434             (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2435                 goto out_noprocessing;
2436
2437         prev_guid_set = bmc->dyn_guid_set;
2438         __get_guid(intf);
2439
2440         prev_dyn_id_set = bmc->dyn_id_set;
2441         rv = __get_device_id(intf, bmc);
2442         if (rv)
2443                 goto out;
2444
2445         /*
2446          * The guid, device id, manufacturer id, and product id should
2447          * not change on a BMC.  If it does we have to do some dancing.
2448          */
2449         if (!intf->bmc_registered
2450             || (!prev_guid_set && bmc->dyn_guid_set)
2451             || (!prev_dyn_id_set && bmc->dyn_id_set)
2452             || (prev_guid_set && bmc->dyn_guid_set
2453                 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2454             || bmc->id.device_id != bmc->fetch_id.device_id
2455             || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2456             || bmc->id.product_id != bmc->fetch_id.product_id) {
2457                 struct ipmi_device_id id = bmc->fetch_id;
2458                 int guid_set = bmc->dyn_guid_set;
2459                 guid_t guid;
2460
2461                 guid = bmc->fetch_guid;
2462                 mutex_unlock(&bmc->dyn_mutex);
2463
2464                 __ipmi_bmc_unregister(intf);
2465                 /* Fill in the temporary BMC for good measure. */
2466                 intf->bmc->id = id;
2467                 intf->bmc->dyn_guid_set = guid_set;
2468                 intf->bmc->guid = guid;
2469                 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2470                         need_waiter(intf); /* Retry later on an error. */
2471                 else
2472                         __scan_channels(intf, &id);
2473
2474
2475                 if (!intf_set) {
2476                         /*
2477                          * We weren't given the interface on the
2478                          * command line, so restart the operation on
2479                          * the next interface for the BMC.
2480                          */
2481                         mutex_unlock(&intf->bmc_reg_mutex);
2482                         mutex_lock(&bmc->dyn_mutex);
2483                         goto retry_bmc_lock;
2484                 }
2485
2486                 /* We have a new BMC, set it up. */
2487                 bmc = intf->bmc;
2488                 mutex_lock(&bmc->dyn_mutex);
2489                 goto out_noprocessing;
2490         } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2491                 /* Version info changes, scan the channels again. */
2492                 __scan_channels(intf, &bmc->fetch_id);
2493
2494         bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2495
2496 out:
2497         if (rv && prev_dyn_id_set) {
2498                 rv = 0; /* Ignore failures if we have previous data. */
2499                 bmc->dyn_id_set = prev_dyn_id_set;
2500         }
2501         if (!rv) {
2502                 bmc->id = bmc->fetch_id;
2503                 if (bmc->dyn_guid_set)
2504                         bmc->guid = bmc->fetch_guid;
2505                 else if (prev_guid_set)
2506                         /*
2507                          * The guid used to be valid and it failed to fetch,
2508                          * just use the cached value.
2509                          */
2510                         bmc->dyn_guid_set = prev_guid_set;
2511         }
2512 out_noprocessing:
2513         if (!rv) {
2514                 if (id)
2515                         *id = bmc->id;
2516
2517                 if (guid_set)
2518                         *guid_set = bmc->dyn_guid_set;
2519
2520                 if (guid && bmc->dyn_guid_set)
2521                         *guid =  bmc->guid;
2522         }
2523
2524         mutex_unlock(&bmc->dyn_mutex);
2525         mutex_unlock(&intf->bmc_reg_mutex);
2526
2527         kref_put(&intf->refcount, intf_free);
2528         return rv;
2529 }
2530
2531 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2532                              struct ipmi_device_id *id,
2533                              bool *guid_set, guid_t *guid)
2534 {
2535         return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2536 }
2537
2538 static ssize_t device_id_show(struct device *dev,
2539                               struct device_attribute *attr,
2540                               char *buf)
2541 {
2542         struct bmc_device *bmc = to_bmc_device(dev);
2543         struct ipmi_device_id id;
2544         int rv;
2545
2546         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2547         if (rv)
2548                 return rv;
2549
2550         return snprintf(buf, 10, "%u\n", id.device_id);
2551 }
2552 static DEVICE_ATTR_RO(device_id);
2553
2554 static ssize_t provides_device_sdrs_show(struct device *dev,
2555                                          struct device_attribute *attr,
2556                                          char *buf)
2557 {
2558         struct bmc_device *bmc = to_bmc_device(dev);
2559         struct ipmi_device_id id;
2560         int rv;
2561
2562         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2563         if (rv)
2564                 return rv;
2565
2566         return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2567 }
2568 static DEVICE_ATTR_RO(provides_device_sdrs);
2569
2570 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2571                              char *buf)
2572 {
2573         struct bmc_device *bmc = to_bmc_device(dev);
2574         struct ipmi_device_id id;
2575         int rv;
2576
2577         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2578         if (rv)
2579                 return rv;
2580
2581         return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2582 }
2583 static DEVICE_ATTR_RO(revision);
2584
2585 static ssize_t firmware_revision_show(struct device *dev,
2586                                       struct device_attribute *attr,
2587                                       char *buf)
2588 {
2589         struct bmc_device *bmc = to_bmc_device(dev);
2590         struct ipmi_device_id id;
2591         int rv;
2592
2593         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2594         if (rv)
2595                 return rv;
2596
2597         return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2598                         id.firmware_revision_2);
2599 }
2600 static DEVICE_ATTR_RO(firmware_revision);
2601
2602 static ssize_t ipmi_version_show(struct device *dev,
2603                                  struct device_attribute *attr,
2604                                  char *buf)
2605 {
2606         struct bmc_device *bmc = to_bmc_device(dev);
2607         struct ipmi_device_id id;
2608         int rv;
2609
2610         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2611         if (rv)
2612                 return rv;
2613
2614         return snprintf(buf, 20, "%u.%u\n",
2615                         ipmi_version_major(&id),
2616                         ipmi_version_minor(&id));
2617 }
2618 static DEVICE_ATTR_RO(ipmi_version);
2619
2620 static ssize_t add_dev_support_show(struct device *dev,
2621                                     struct device_attribute *attr,
2622                                     char *buf)
2623 {
2624         struct bmc_device *bmc = to_bmc_device(dev);
2625         struct ipmi_device_id id;
2626         int rv;
2627
2628         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2629         if (rv)
2630                 return rv;
2631
2632         return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2633 }
2634 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2635                    NULL);
2636
2637 static ssize_t manufacturer_id_show(struct device *dev,
2638                                     struct device_attribute *attr,
2639                                     char *buf)
2640 {
2641         struct bmc_device *bmc = to_bmc_device(dev);
2642         struct ipmi_device_id id;
2643         int rv;
2644
2645         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2646         if (rv)
2647                 return rv;
2648
2649         return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2650 }
2651 static DEVICE_ATTR_RO(manufacturer_id);
2652
2653 static ssize_t product_id_show(struct device *dev,
2654                                struct device_attribute *attr,
2655                                char *buf)
2656 {
2657         struct bmc_device *bmc = to_bmc_device(dev);
2658         struct ipmi_device_id id;
2659         int rv;
2660
2661         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2662         if (rv)
2663                 return rv;
2664
2665         return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2666 }
2667 static DEVICE_ATTR_RO(product_id);
2668
2669 static ssize_t aux_firmware_rev_show(struct device *dev,
2670                                      struct device_attribute *attr,
2671                                      char *buf)
2672 {
2673         struct bmc_device *bmc = to_bmc_device(dev);
2674         struct ipmi_device_id id;
2675         int rv;
2676
2677         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2678         if (rv)
2679                 return rv;
2680
2681         return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2682                         id.aux_firmware_revision[3],
2683                         id.aux_firmware_revision[2],
2684                         id.aux_firmware_revision[1],
2685                         id.aux_firmware_revision[0]);
2686 }
2687 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2688
2689 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2690                          char *buf)
2691 {
2692         struct bmc_device *bmc = to_bmc_device(dev);
2693         bool guid_set;
2694         guid_t guid;
2695         int rv;
2696
2697         rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2698         if (rv)
2699                 return rv;
2700         if (!guid_set)
2701                 return -ENOENT;
2702
2703         return snprintf(buf, 38, "%pUl\n", guid.b);
2704 }
2705 static DEVICE_ATTR_RO(guid);
2706
2707 static struct attribute *bmc_dev_attrs[] = {
2708         &dev_attr_device_id.attr,
2709         &dev_attr_provides_device_sdrs.attr,
2710         &dev_attr_revision.attr,
2711         &dev_attr_firmware_revision.attr,
2712         &dev_attr_ipmi_version.attr,
2713         &dev_attr_additional_device_support.attr,
2714         &dev_attr_manufacturer_id.attr,
2715         &dev_attr_product_id.attr,
2716         &dev_attr_aux_firmware_revision.attr,
2717         &dev_attr_guid.attr,
2718         NULL
2719 };
2720
2721 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2722                                        struct attribute *attr, int idx)
2723 {
2724         struct device *dev = kobj_to_dev(kobj);
2725         struct bmc_device *bmc = to_bmc_device(dev);
2726         umode_t mode = attr->mode;
2727         int rv;
2728
2729         if (attr == &dev_attr_aux_firmware_revision.attr) {
2730                 struct ipmi_device_id id;
2731
2732                 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2733                 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2734         }
2735         if (attr == &dev_attr_guid.attr) {
2736                 bool guid_set;
2737
2738                 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2739                 return (!rv && guid_set) ? mode : 0;
2740         }
2741         return mode;
2742 }
2743
2744 static const struct attribute_group bmc_dev_attr_group = {
2745         .attrs          = bmc_dev_attrs,
2746         .is_visible     = bmc_dev_attr_is_visible,
2747 };
2748
2749 static const struct attribute_group *bmc_dev_attr_groups[] = {
2750         &bmc_dev_attr_group,
2751         NULL
2752 };
2753
2754 static const struct device_type bmc_device_type = {
2755         .groups         = bmc_dev_attr_groups,
2756 };
2757
2758 static int __find_bmc_guid(struct device *dev, void *data)
2759 {
2760         guid_t *guid = data;
2761         struct bmc_device *bmc;
2762         int rv;
2763
2764         if (dev->type != &bmc_device_type)
2765                 return 0;
2766
2767         bmc = to_bmc_device(dev);
2768         rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2769         if (rv)
2770                 rv = kref_get_unless_zero(&bmc->usecount);
2771         return rv;
2772 }
2773
2774 /*
2775  * Returns with the bmc's usecount incremented, if it is non-NULL.
2776  */
2777 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2778                                              guid_t *guid)
2779 {
2780         struct device *dev;
2781         struct bmc_device *bmc = NULL;
2782
2783         dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2784         if (dev) {
2785                 bmc = to_bmc_device(dev);
2786                 put_device(dev);
2787         }
2788         return bmc;
2789 }
2790
2791 struct prod_dev_id {
2792         unsigned int  product_id;
2793         unsigned char device_id;
2794 };
2795
2796 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2797 {
2798         struct prod_dev_id *cid = data;
2799         struct bmc_device *bmc;
2800         int rv;
2801
2802         if (dev->type != &bmc_device_type)
2803                 return 0;
2804
2805         bmc = to_bmc_device(dev);
2806         rv = (bmc->id.product_id == cid->product_id
2807               && bmc->id.device_id == cid->device_id);
2808         if (rv)
2809                 rv = kref_get_unless_zero(&bmc->usecount);
2810         return rv;
2811 }
2812
2813 /*
2814  * Returns with the bmc's usecount incremented, if it is non-NULL.
2815  */
2816 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2817         struct device_driver *drv,
2818         unsigned int product_id, unsigned char device_id)
2819 {
2820         struct prod_dev_id id = {
2821                 .product_id = product_id,
2822                 .device_id = device_id,
2823         };
2824         struct device *dev;
2825         struct bmc_device *bmc = NULL;
2826
2827         dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2828         if (dev) {
2829                 bmc = to_bmc_device(dev);
2830                 put_device(dev);
2831         }
2832         return bmc;
2833 }
2834
2835 static DEFINE_IDA(ipmi_bmc_ida);
2836
2837 static void
2838 release_bmc_device(struct device *dev)
2839 {
2840         kfree(to_bmc_device(dev));
2841 }
2842
2843 static void cleanup_bmc_work(struct work_struct *work)
2844 {
2845         struct bmc_device *bmc = container_of(work, struct bmc_device,
2846                                               remove_work);
2847         int id = bmc->pdev.id; /* Unregister overwrites id */
2848
2849         platform_device_unregister(&bmc->pdev);
2850         ida_simple_remove(&ipmi_bmc_ida, id);
2851 }
2852
2853 static void
2854 cleanup_bmc_device(struct kref *ref)
2855 {
2856         struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2857
2858         /*
2859          * Remove the platform device in a work queue to avoid issues
2860          * with removing the device attributes while reading a device
2861          * attribute.
2862          */
2863         schedule_work(&bmc->remove_work);
2864 }
2865
2866 /*
2867  * Must be called with intf->bmc_reg_mutex held.
2868  */
2869 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2870 {
2871         struct bmc_device *bmc = intf->bmc;
2872
2873         if (!intf->bmc_registered)
2874                 return;
2875
2876         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2877         sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2878         kfree(intf->my_dev_name);
2879         intf->my_dev_name = NULL;
2880
2881         mutex_lock(&bmc->dyn_mutex);
2882         list_del(&intf->bmc_link);
2883         mutex_unlock(&bmc->dyn_mutex);
2884         intf->bmc = &intf->tmp_bmc;
2885         kref_put(&bmc->usecount, cleanup_bmc_device);
2886         intf->bmc_registered = false;
2887 }
2888
2889 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2890 {
2891         mutex_lock(&intf->bmc_reg_mutex);
2892         __ipmi_bmc_unregister(intf);
2893         mutex_unlock(&intf->bmc_reg_mutex);
2894 }
2895
2896 /*
2897  * Must be called with intf->bmc_reg_mutex held.
2898  */
2899 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2900                                struct ipmi_device_id *id,
2901                                bool guid_set, guid_t *guid, int intf_num)
2902 {
2903         int               rv;
2904         struct bmc_device *bmc;
2905         struct bmc_device *old_bmc;
2906
2907         /*
2908          * platform_device_register() can cause bmc_reg_mutex to
2909          * be claimed because of the is_visible functions of
2910          * the attributes.  Eliminate possible recursion and
2911          * release the lock.
2912          */
2913         intf->in_bmc_register = true;
2914         mutex_unlock(&intf->bmc_reg_mutex);
2915
2916         /*
2917          * Try to find if there is an bmc_device struct
2918          * representing the interfaced BMC already
2919          */
2920         mutex_lock(&ipmidriver_mutex);
2921         if (guid_set)
2922                 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2923         else
2924                 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2925                                                     id->product_id,
2926                                                     id->device_id);
2927
2928         /*
2929          * If there is already an bmc_device, free the new one,
2930          * otherwise register the new BMC device
2931          */
2932         if (old_bmc) {
2933                 bmc = old_bmc;
2934                 /*
2935                  * Note: old_bmc already has usecount incremented by
2936                  * the BMC find functions.
2937                  */
2938                 intf->bmc = old_bmc;
2939                 mutex_lock(&bmc->dyn_mutex);
2940                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2941                 mutex_unlock(&bmc->dyn_mutex);
2942
2943                 dev_info(intf->si_dev,
2944                          "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2945                          " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2946                          bmc->id.manufacturer_id,
2947                          bmc->id.product_id,
2948                          bmc->id.device_id);
2949         } else {
2950                 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2951                 if (!bmc) {
2952                         rv = -ENOMEM;
2953                         goto out;
2954                 }
2955                 INIT_LIST_HEAD(&bmc->intfs);
2956                 mutex_init(&bmc->dyn_mutex);
2957                 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
2958
2959                 bmc->id = *id;
2960                 bmc->dyn_id_set = 1;
2961                 bmc->dyn_guid_set = guid_set;
2962                 bmc->guid = *guid;
2963                 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2964
2965                 bmc->pdev.name = "ipmi_bmc";
2966
2967                 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2968                 if (rv < 0)
2969                         goto out;
2970                 bmc->pdev.dev.driver = &ipmidriver.driver;
2971                 bmc->pdev.id = rv;
2972                 bmc->pdev.dev.release = release_bmc_device;
2973                 bmc->pdev.dev.type = &bmc_device_type;
2974                 kref_init(&bmc->usecount);
2975
2976                 intf->bmc = bmc;
2977                 mutex_lock(&bmc->dyn_mutex);
2978                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2979                 mutex_unlock(&bmc->dyn_mutex);
2980
2981                 rv = platform_device_register(&bmc->pdev);
2982                 if (rv) {
2983                         dev_err(intf->si_dev,
2984                                 PFX " Unable to register bmc device: %d\n",
2985                                 rv);
2986                         goto out_list_del;
2987                 }
2988
2989                 dev_info(intf->si_dev,
2990                          "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2991                          bmc->id.manufacturer_id,
2992                          bmc->id.product_id,
2993                          bmc->id.device_id);
2994         }
2995
2996         /*
2997          * create symlink from system interface device to bmc device
2998          * and back.
2999          */
3000         rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3001         if (rv) {
3002                 dev_err(intf->si_dev,
3003                         PFX "Unable to create bmc symlink: %d\n", rv);
3004                 goto out_put_bmc;
3005         }
3006
3007         if (intf_num == -1)
3008                 intf_num = intf->intf_num;
3009         intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3010         if (!intf->my_dev_name) {
3011                 rv = -ENOMEM;
3012                 dev_err(intf->si_dev,
3013                         PFX "Unable to allocate link from BMC: %d\n", rv);
3014                 goto out_unlink1;
3015         }
3016
3017         rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3018                                intf->my_dev_name);
3019         if (rv) {
3020                 kfree(intf->my_dev_name);
3021                 intf->my_dev_name = NULL;
3022                 dev_err(intf->si_dev,
3023                         PFX "Unable to create symlink to bmc: %d\n", rv);
3024                 goto out_free_my_dev_name;
3025         }
3026
3027         intf->bmc_registered = true;
3028
3029 out:
3030         mutex_unlock(&ipmidriver_mutex);
3031         mutex_lock(&intf->bmc_reg_mutex);
3032         intf->in_bmc_register = false;
3033         return rv;
3034
3035
3036 out_free_my_dev_name:
3037         kfree(intf->my_dev_name);
3038         intf->my_dev_name = NULL;
3039
3040 out_unlink1:
3041         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3042
3043 out_put_bmc:
3044         mutex_lock(&bmc->dyn_mutex);
3045         list_del(&intf->bmc_link);
3046         mutex_unlock(&bmc->dyn_mutex);
3047         intf->bmc = &intf->tmp_bmc;
3048         kref_put(&bmc->usecount, cleanup_bmc_device);
3049         goto out;
3050
3051 out_list_del:
3052         mutex_lock(&bmc->dyn_mutex);
3053         list_del(&intf->bmc_link);
3054         mutex_unlock(&bmc->dyn_mutex);
3055         intf->bmc = &intf->tmp_bmc;
3056         put_device(&bmc->pdev.dev);
3057         goto out;
3058 }
3059
3060 static int
3061 send_guid_cmd(struct ipmi_smi *intf, int chan)
3062 {
3063         struct kernel_ipmi_msg            msg;
3064         struct ipmi_system_interface_addr si;
3065
3066         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3067         si.channel = IPMI_BMC_CHANNEL;
3068         si.lun = 0;
3069
3070         msg.netfn = IPMI_NETFN_APP_REQUEST;
3071         msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3072         msg.data = NULL;
3073         msg.data_len = 0;
3074         return i_ipmi_request(NULL,
3075                               intf,
3076                               (struct ipmi_addr *) &si,
3077                               0,
3078                               &msg,
3079                               intf,
3080                               NULL,
3081                               NULL,
3082                               0,
3083                               intf->addrinfo[0].address,
3084                               intf->addrinfo[0].lun,
3085                               -1, 0);
3086 }
3087
3088 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3089 {
3090         struct bmc_device *bmc = intf->bmc;
3091
3092         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3093             || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3094             || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3095                 /* Not for me */
3096                 return;
3097
3098         if (msg->msg.data[0] != 0) {
3099                 /* Error from getting the GUID, the BMC doesn't have one. */
3100                 bmc->dyn_guid_set = 0;
3101                 goto out;
3102         }
3103
3104         if (msg->msg.data_len < 17) {
3105                 bmc->dyn_guid_set = 0;
3106                 dev_warn(intf->si_dev,
3107                          PFX "The GUID response from the BMC was too short, it was %d but should have been 17.  Assuming GUID is not available.\n",
3108                          msg->msg.data_len);
3109                 goto out;
3110         }
3111
3112         memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3113         /*
3114          * Make sure the guid data is available before setting
3115          * dyn_guid_set.
3116          */
3117         smp_wmb();
3118         bmc->dyn_guid_set = 1;
3119  out:
3120         wake_up(&intf->waitq);
3121 }
3122
3123 static void __get_guid(struct ipmi_smi *intf)
3124 {
3125         int rv;
3126         struct bmc_device *bmc = intf->bmc;
3127
3128         bmc->dyn_guid_set = 2;
3129         intf->null_user_handler = guid_handler;
3130         rv = send_guid_cmd(intf, 0);
3131         if (rv)
3132                 /* Send failed, no GUID available. */
3133                 bmc->dyn_guid_set = 0;
3134
3135         wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3136
3137         /* dyn_guid_set makes the guid data available. */
3138         smp_rmb();
3139
3140         intf->null_user_handler = NULL;
3141 }
3142
3143 static int
3144 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3145 {
3146         struct kernel_ipmi_msg            msg;
3147         unsigned char                     data[1];
3148         struct ipmi_system_interface_addr si;
3149
3150         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3151         si.channel = IPMI_BMC_CHANNEL;
3152         si.lun = 0;
3153
3154         msg.netfn = IPMI_NETFN_APP_REQUEST;
3155         msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3156         msg.data = data;
3157         msg.data_len = 1;
3158         data[0] = chan;
3159         return i_ipmi_request(NULL,
3160                               intf,
3161                               (struct ipmi_addr *) &si,
3162                               0,
3163                               &msg,
3164                               intf,
3165                               NULL,
3166                               NULL,
3167                               0,
3168                               intf->addrinfo[0].address,
3169                               intf->addrinfo[0].lun,
3170                               -1, 0);
3171 }
3172
3173 static void
3174 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3175 {
3176         int rv = 0;
3177         int ch;
3178         unsigned int set = intf->curr_working_cset;
3179         struct ipmi_channel *chans;
3180
3181         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3182             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3183             && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3184                 /* It's the one we want */
3185                 if (msg->msg.data[0] != 0) {
3186                         /* Got an error from the channel, just go on. */
3187
3188                         if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3189                                 /*
3190                                  * If the MC does not support this
3191                                  * command, that is legal.  We just
3192                                  * assume it has one IPMB at channel
3193                                  * zero.
3194                                  */
3195                                 intf->wchannels[set].c[0].medium
3196                                         = IPMI_CHANNEL_MEDIUM_IPMB;
3197                                 intf->wchannels[set].c[0].protocol
3198                                         = IPMI_CHANNEL_PROTOCOL_IPMB;
3199
3200                                 intf->channel_list = intf->wchannels + set;
3201                                 intf->channels_ready = true;
3202                                 wake_up(&intf->waitq);
3203                                 goto out;
3204                         }
3205                         goto next_channel;
3206                 }
3207                 if (msg->msg.data_len < 4) {
3208                         /* Message not big enough, just go on. */
3209                         goto next_channel;
3210                 }
3211                 ch = intf->curr_channel;
3212                 chans = intf->wchannels[set].c;
3213                 chans[ch].medium = msg->msg.data[2] & 0x7f;
3214                 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3215
3216  next_channel:
3217                 intf->curr_channel++;
3218                 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3219                         intf->channel_list = intf->wchannels + set;
3220                         intf->channels_ready = true;
3221                         wake_up(&intf->waitq);
3222                 } else {
3223                         intf->channel_list = intf->wchannels + set;
3224                         intf->channels_ready = true;
3225                         rv = send_channel_info_cmd(intf, intf->curr_channel);
3226                 }
3227
3228                 if (rv) {
3229                         /* Got an error somehow, just give up. */
3230                         dev_warn(intf->si_dev,
3231                                  PFX "Error sending channel information for channel %d: %d\n",
3232                                  intf->curr_channel, rv);
3233
3234                         intf->channel_list = intf->wchannels + set;
3235                         intf->channels_ready = true;
3236                         wake_up(&intf->waitq);
3237                 }
3238         }
3239  out:
3240         return;
3241 }
3242
3243 /*
3244  * Must be holding intf->bmc_reg_mutex to call this.
3245  */
3246 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3247 {
3248         int rv;
3249
3250         if (ipmi_version_major(id) > 1
3251                         || (ipmi_version_major(id) == 1
3252                             && ipmi_version_minor(id) >= 5)) {
3253                 unsigned int set;
3254
3255                 /*
3256                  * Start scanning the channels to see what is
3257                  * available.
3258                  */
3259                 set = !intf->curr_working_cset;
3260                 intf->curr_working_cset = set;
3261                 memset(&intf->wchannels[set], 0,
3262                        sizeof(struct ipmi_channel_set));
3263
3264                 intf->null_user_handler = channel_handler;
3265                 intf->curr_channel = 0;
3266                 rv = send_channel_info_cmd(intf, 0);
3267                 if (rv) {
3268                         dev_warn(intf->si_dev,
3269                                  "Error sending channel information for channel 0, %d\n",
3270                                  rv);
3271                         return -EIO;
3272                 }
3273
3274                 /* Wait for the channel info to be read. */
3275                 wait_event(intf->waitq, intf->channels_ready);
3276                 intf->null_user_handler = NULL;
3277         } else {
3278                 unsigned int set = intf->curr_working_cset;
3279
3280                 /* Assume a single IPMB channel at zero. */
3281                 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3282                 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3283                 intf->channel_list = intf->wchannels + set;
3284                 intf->channels_ready = true;
3285         }
3286
3287         return 0;
3288 }
3289
3290 static void ipmi_poll(struct ipmi_smi *intf)
3291 {
3292         if (intf->handlers->poll)
3293                 intf->handlers->poll(intf->send_info);
3294         /* In case something came in */
3295         handle_new_recv_msgs(intf);
3296 }
3297
3298 void ipmi_poll_interface(struct ipmi_user *user)
3299 {
3300         ipmi_poll(user->intf);
3301 }
3302 EXPORT_SYMBOL(ipmi_poll_interface);
3303
3304 static void redo_bmc_reg(struct work_struct *work)
3305 {
3306         struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3307                                              bmc_reg_work);
3308
3309         if (!intf->in_shutdown)
3310                 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3311
3312         kref_put(&intf->refcount, intf_free);
3313 }
3314
3315 int ipmi_add_smi(struct module         *owner,
3316                  const struct ipmi_smi_handlers *handlers,
3317                  void                  *send_info,
3318                  struct device         *si_dev,
3319                  unsigned char         slave_addr)
3320 {
3321         int              i, j;
3322         int              rv;
3323         struct ipmi_smi *intf, *tintf;
3324         struct list_head *link;
3325         struct ipmi_device_id id;
3326
3327         /*
3328          * Make sure the driver is actually initialized, this handles
3329          * problems with initialization order.
3330          */
3331         rv = ipmi_init_msghandler();
3332         if (rv)
3333                 return rv;
3334
3335         intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3336         if (!intf)
3337                 return -ENOMEM;
3338
3339         rv = init_srcu_struct(&intf->users_srcu);
3340         if (rv) {
3341                 kfree(intf);
3342                 return rv;
3343         }
3344
3345         intf->owner = owner;
3346         intf->bmc = &intf->tmp_bmc;
3347         INIT_LIST_HEAD(&intf->bmc->intfs);
3348         mutex_init(&intf->bmc->dyn_mutex);
3349         INIT_LIST_HEAD(&intf->bmc_link);
3350         mutex_init(&intf->bmc_reg_mutex);
3351         intf->intf_num = -1; /* Mark it invalid for now. */
3352         kref_init(&intf->refcount);
3353         INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3354         intf->si_dev = si_dev;
3355         for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3356                 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3357                 intf->addrinfo[j].lun = 2;
3358         }
3359         if (slave_addr != 0)
3360                 intf->addrinfo[0].address = slave_addr;
3361         INIT_LIST_HEAD(&intf->users);
3362         intf->handlers = handlers;
3363         intf->send_info = send_info;
3364         spin_lock_init(&intf->seq_lock);
3365         for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3366                 intf->seq_table[j].inuse = 0;
3367                 intf->seq_table[j].seqid = 0;
3368         }
3369         intf->curr_seq = 0;
3370         spin_lock_init(&intf->waiting_rcv_msgs_lock);
3371         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3372         tasklet_init(&intf->recv_tasklet,
3373                      smi_recv_tasklet,
3374                      (unsigned long) intf);
3375         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3376         spin_lock_init(&intf->xmit_msgs_lock);
3377         INIT_LIST_HEAD(&intf->xmit_msgs);
3378         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3379         spin_lock_init(&intf->events_lock);
3380         atomic_set(&intf->event_waiters, 0);
3381         intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3382         INIT_LIST_HEAD(&intf->waiting_events);
3383         intf->waiting_events_count = 0;
3384         mutex_init(&intf->cmd_rcvrs_mutex);
3385         spin_lock_init(&intf->maintenance_mode_lock);
3386         INIT_LIST_HEAD(&intf->cmd_rcvrs);
3387         init_waitqueue_head(&intf->waitq);
3388         for (i = 0; i < IPMI_NUM_STATS; i++)
3389                 atomic_set(&intf->stats[i], 0);
3390
3391         mutex_lock(&ipmi_interfaces_mutex);
3392         /* Look for a hole in the numbers. */
3393         i = 0;
3394         link = &ipmi_interfaces;
3395         list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3396                 if (tintf->intf_num != i) {
3397                         link = &tintf->link;
3398                         break;
3399                 }
3400                 i++;
3401         }
3402         /* Add the new interface in numeric order. */
3403         if (i == 0)
3404                 list_add_rcu(&intf->link, &ipmi_interfaces);
3405         else
3406                 list_add_tail_rcu(&intf->link, link);
3407
3408         rv = handlers->start_processing(send_info, intf);
3409         if (rv)
3410                 goto out_err;
3411
3412         rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3413         if (rv) {
3414                 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3415                 goto out_err_started;
3416         }
3417
3418         mutex_lock(&intf->bmc_reg_mutex);
3419         rv = __scan_channels(intf, &id);
3420         mutex_unlock(&intf->bmc_reg_mutex);
3421         if (rv)
3422                 goto out_err_bmc_reg;
3423
3424         /*
3425          * Keep memory order straight for RCU readers.  Make
3426          * sure everything else is committed to memory before
3427          * setting intf_num to mark the interface valid.
3428          */
3429         smp_wmb();
3430         intf->intf_num = i;
3431         mutex_unlock(&ipmi_interfaces_mutex);
3432
3433         /* After this point the interface is legal to use. */
3434         call_smi_watchers(i, intf->si_dev);
3435
3436         return 0;
3437
3438  out_err_bmc_reg:
3439         ipmi_bmc_unregister(intf);
3440  out_err_started:
3441         if (intf->handlers->shutdown)
3442                 intf->handlers->shutdown(intf->send_info);
3443  out_err:
3444         list_del_rcu(&intf->link);
3445         mutex_unlock(&ipmi_interfaces_mutex);
3446         synchronize_srcu(&ipmi_interfaces_srcu);
3447         cleanup_srcu_struct(&intf->users_srcu);
3448         kref_put(&intf->refcount, intf_free);
3449
3450         return rv;
3451 }
3452 EXPORT_SYMBOL(ipmi_add_smi);
3453
3454 static void deliver_smi_err_response(struct ipmi_smi *intf,
3455                                      struct ipmi_smi_msg *msg,
3456                                      unsigned char err)
3457 {
3458         msg->rsp[0] = msg->data[0] | 4;
3459         msg->rsp[1] = msg->data[1];
3460         msg->rsp[2] = err;
3461         msg->rsp_size = 3;
3462         /* It's an error, so it will never requeue, no need to check return. */
3463         handle_one_recv_msg(intf, msg);
3464 }
3465
3466 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3467 {
3468         int              i;
3469         struct seq_table *ent;
3470         struct ipmi_smi_msg *msg;
3471         struct list_head *entry;
3472         struct list_head tmplist;
3473
3474         /* Clear out our transmit queues and hold the messages. */
3475         INIT_LIST_HEAD(&tmplist);
3476         list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3477         list_splice_tail(&intf->xmit_msgs, &tmplist);
3478
3479         /* Current message first, to preserve order */
3480         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3481                 /* Wait for the message to clear out. */
3482                 schedule_timeout(1);
3483         }
3484
3485         /* No need for locks, the interface is down. */
3486
3487         /*
3488          * Return errors for all pending messages in queue and in the
3489          * tables waiting for remote responses.
3490          */
3491         while (!list_empty(&tmplist)) {
3492                 entry = tmplist.next;
3493                 list_del(entry);
3494                 msg = list_entry(entry, struct ipmi_smi_msg, link);
3495                 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3496         }
3497
3498         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3499                 ent = &intf->seq_table[i];
3500                 if (!ent->inuse)
3501                         continue;
3502                 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3503         }
3504 }
3505
3506 void ipmi_unregister_smi(struct ipmi_smi *intf)
3507 {
3508         struct ipmi_smi_watcher *w;
3509         int intf_num = intf->intf_num, index;
3510
3511         mutex_lock(&ipmi_interfaces_mutex);
3512         intf->intf_num = -1;
3513         intf->in_shutdown = true;
3514         list_del_rcu(&intf->link);
3515         mutex_unlock(&ipmi_interfaces_mutex);
3516         synchronize_srcu(&ipmi_interfaces_srcu);
3517
3518         /* At this point no users can be added to the interface. */
3519
3520         /*
3521          * Call all the watcher interfaces to tell them that
3522          * an interface is going away.
3523          */
3524         mutex_lock(&smi_watchers_mutex);
3525         list_for_each_entry(w, &smi_watchers, link)
3526                 w->smi_gone(intf_num);
3527         mutex_unlock(&smi_watchers_mutex);
3528
3529         index = srcu_read_lock(&intf->users_srcu);
3530         while (!list_empty(&intf->users)) {
3531                 struct ipmi_user *user =
3532                         container_of(list_next_rcu(&intf->users),
3533                                      struct ipmi_user, link);
3534
3535                 _ipmi_destroy_user(user);
3536         }
3537         srcu_read_unlock(&intf->users_srcu, index);
3538
3539         if (intf->handlers->shutdown)
3540                 intf->handlers->shutdown(intf->send_info);
3541
3542         cleanup_smi_msgs(intf);
3543
3544         ipmi_bmc_unregister(intf);
3545
3546         cleanup_srcu_struct(&intf->users_srcu);
3547         kref_put(&intf->refcount, intf_free);
3548 }
3549 EXPORT_SYMBOL(ipmi_unregister_smi);
3550
3551 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3552                                    struct ipmi_smi_msg *msg)
3553 {
3554         struct ipmi_ipmb_addr ipmb_addr;
3555         struct ipmi_recv_msg  *recv_msg;
3556
3557         /*
3558          * This is 11, not 10, because the response must contain a
3559          * completion code.
3560          */
3561         if (msg->rsp_size < 11) {
3562                 /* Message not big enough, just ignore it. */
3563                 ipmi_inc_stat(intf, invalid_ipmb_responses);
3564                 return 0;
3565         }
3566
3567         if (msg->rsp[2] != 0) {
3568                 /* An error getting the response, just ignore it. */
3569                 return 0;
3570         }
3571
3572         ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3573         ipmb_addr.slave_addr = msg->rsp[6];
3574         ipmb_addr.channel = msg->rsp[3] & 0x0f;
3575         ipmb_addr.lun = msg->rsp[7] & 3;
3576
3577         /*
3578          * It's a response from a remote entity.  Look up the sequence
3579          * number and handle the response.
3580          */
3581         if (intf_find_seq(intf,
3582                           msg->rsp[7] >> 2,
3583                           msg->rsp[3] & 0x0f,
3584                           msg->rsp[8],
3585                           (msg->rsp[4] >> 2) & (~1),
3586                           (struct ipmi_addr *) &ipmb_addr,
3587                           &recv_msg)) {
3588                 /*
3589                  * We were unable to find the sequence number,
3590                  * so just nuke the message.
3591                  */
3592                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3593                 return 0;
3594         }
3595
3596         memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3597         /*
3598          * The other fields matched, so no need to set them, except
3599          * for netfn, which needs to be the response that was
3600          * returned, not the request value.
3601          */
3602         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3603         recv_msg->msg.data = recv_msg->msg_data;
3604         recv_msg->msg.data_len = msg->rsp_size - 10;
3605         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3606         if (deliver_response(intf, recv_msg))
3607                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3608         else
3609                 ipmi_inc_stat(intf, handled_ipmb_responses);
3610
3611         return 0;
3612 }
3613
3614 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3615                                    struct ipmi_smi_msg *msg)
3616 {
3617         struct cmd_rcvr          *rcvr;
3618         int                      rv = 0;
3619         unsigned char            netfn;
3620         unsigned char            cmd;
3621         unsigned char            chan;
3622         struct ipmi_user         *user = NULL;
3623         struct ipmi_ipmb_addr    *ipmb_addr;
3624         struct ipmi_recv_msg     *recv_msg;
3625
3626         if (msg->rsp_size < 10) {
3627                 /* Message not big enough, just ignore it. */
3628                 ipmi_inc_stat(intf, invalid_commands);
3629                 return 0;
3630         }
3631
3632         if (msg->rsp[2] != 0) {
3633                 /* An error getting the response, just ignore it. */
3634                 return 0;
3635         }
3636
3637         netfn = msg->rsp[4] >> 2;
3638         cmd = msg->rsp[8];
3639         chan = msg->rsp[3] & 0xf;
3640
3641         rcu_read_lock();
3642         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3643         if (rcvr) {
3644                 user = rcvr->user;
3645                 kref_get(&user->refcount);
3646         } else
3647                 user = NULL;
3648         rcu_read_unlock();
3649
3650         if (user == NULL) {
3651                 /* We didn't find a user, deliver an error response. */
3652                 ipmi_inc_stat(intf, unhandled_commands);
3653
3654                 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3655                 msg->data[1] = IPMI_SEND_MSG_CMD;
3656                 msg->data[2] = msg->rsp[3];
3657                 msg->data[3] = msg->rsp[6];
3658                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3659                 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3660                 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3661                 /* rqseq/lun */
3662                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3663                 msg->data[8] = msg->rsp[8]; /* cmd */
3664                 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3665                 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3666                 msg->data_size = 11;
3667
3668                 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3669
3670                 rcu_read_lock();
3671                 if (!intf->in_shutdown) {
3672                         smi_send(intf, intf->handlers, msg, 0);
3673                         /*
3674                          * We used the message, so return the value
3675                          * that causes it to not be freed or
3676                          * queued.
3677                          */
3678                         rv = -1;
3679                 }
3680                 rcu_read_unlock();
3681         } else {
3682                 recv_msg = ipmi_alloc_recv_msg();
3683                 if (!recv_msg) {
3684                         /*
3685                          * We couldn't allocate memory for the
3686                          * message, so requeue it for handling
3687                          * later.
3688                          */
3689                         rv = 1;
3690                         kref_put(&user->refcount, free_user);
3691                 } else {
3692                         /* Extract the source address from the data. */
3693                         ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3694                         ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3695                         ipmb_addr->slave_addr = msg->rsp[6];
3696                         ipmb_addr->lun = msg->rsp[7] & 3;
3697                         ipmb_addr->channel = msg->rsp[3] & 0xf;
3698
3699                         /*
3700                          * Extract the rest of the message information
3701                          * from the IPMB header.
3702                          */
3703                         recv_msg->user = user;
3704                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3705                         recv_msg->msgid = msg->rsp[7] >> 2;
3706                         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3707                         recv_msg->msg.cmd = msg->rsp[8];
3708                         recv_msg->msg.data = recv_msg->msg_data;
3709
3710                         /*
3711                          * We chop off 10, not 9 bytes because the checksum
3712                          * at the end also needs to be removed.
3713                          */
3714                         recv_msg->msg.data_len = msg->rsp_size - 10;
3715                         memcpy(recv_msg->msg_data, &msg->rsp[9],
3716                                msg->rsp_size - 10);
3717                         if (deliver_response(intf, recv_msg))
3718                                 ipmi_inc_stat(intf, unhandled_commands);
3719                         else
3720                                 ipmi_inc_stat(intf, handled_commands);
3721                 }
3722         }
3723
3724         return rv;
3725 }
3726
3727 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3728                                   struct ipmi_smi_msg *msg)
3729 {
3730         struct ipmi_lan_addr  lan_addr;
3731         struct ipmi_recv_msg  *recv_msg;
3732
3733
3734         /*
3735          * This is 13, not 12, because the response must contain a
3736          * completion code.
3737          */
3738         if (msg->rsp_size < 13) {
3739                 /* Message not big enough, just ignore it. */
3740                 ipmi_inc_stat(intf, invalid_lan_responses);
3741                 return 0;
3742         }
3743
3744         if (msg->rsp[2] != 0) {
3745                 /* An error getting the response, just ignore it. */
3746                 return 0;
3747         }
3748
3749         lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3750         lan_addr.session_handle = msg->rsp[4];
3751         lan_addr.remote_SWID = msg->rsp[8];
3752         lan_addr.local_SWID = msg->rsp[5];
3753         lan_addr.channel = msg->rsp[3] & 0x0f;
3754         lan_addr.privilege = msg->rsp[3] >> 4;
3755         lan_addr.lun = msg->rsp[9] & 3;
3756
3757         /*
3758          * It's a response from a remote entity.  Look up the sequence
3759          * number and handle the response.
3760          */
3761         if (intf_find_seq(intf,
3762                           msg->rsp[9] >> 2,
3763                           msg->rsp[3] & 0x0f,
3764                           msg->rsp[10],
3765                           (msg->rsp[6] >> 2) & (~1),
3766                           (struct ipmi_addr *) &lan_addr,
3767                           &recv_msg)) {
3768                 /*
3769                  * We were unable to find the sequence number,
3770                  * so just nuke the message.
3771                  */
3772                 ipmi_inc_stat(intf, unhandled_lan_responses);
3773                 return 0;
3774         }
3775
3776         memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3777         /*
3778          * The other fields matched, so no need to set them, except
3779          * for netfn, which needs to be the response that was
3780          * returned, not the request value.
3781          */
3782         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3783         recv_msg->msg.data = recv_msg->msg_data;
3784         recv_msg->msg.data_len = msg->rsp_size - 12;
3785         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3786         if (deliver_response(intf, recv_msg))
3787                 ipmi_inc_stat(intf, unhandled_lan_responses);
3788         else
3789                 ipmi_inc_stat(intf, handled_lan_responses);
3790
3791         return 0;
3792 }
3793
3794 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3795                                   struct ipmi_smi_msg *msg)
3796 {
3797         struct cmd_rcvr          *rcvr;
3798         int                      rv = 0;
3799         unsigned char            netfn;
3800         unsigned char            cmd;
3801         unsigned char            chan;
3802         struct ipmi_user         *user = NULL;
3803         struct ipmi_lan_addr     *lan_addr;
3804         struct ipmi_recv_msg     *recv_msg;
3805
3806         if (msg->rsp_size < 12) {
3807                 /* Message not big enough, just ignore it. */
3808                 ipmi_inc_stat(intf, invalid_commands);
3809                 return 0;
3810         }
3811
3812         if (msg->rsp[2] != 0) {
3813                 /* An error getting the response, just ignore it. */
3814                 return 0;
3815         }
3816
3817         netfn = msg->rsp[6] >> 2;
3818         cmd = msg->rsp[10];
3819         chan = msg->rsp[3] & 0xf;
3820
3821         rcu_read_lock();
3822         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3823         if (rcvr) {
3824                 user = rcvr->user;
3825                 kref_get(&user->refcount);
3826         } else
3827                 user = NULL;
3828         rcu_read_unlock();
3829
3830         if (user == NULL) {
3831                 /* We didn't find a user, just give up. */
3832                 ipmi_inc_stat(intf, unhandled_commands);
3833
3834                 /*
3835                  * Don't do anything with these messages, just allow
3836                  * them to be freed.
3837                  */
3838                 rv = 0;
3839         } else {
3840                 recv_msg = ipmi_alloc_recv_msg();
3841                 if (!recv_msg) {
3842                         /*
3843                          * We couldn't allocate memory for the
3844                          * message, so requeue it for handling later.
3845                          */
3846                         rv = 1;
3847                         kref_put(&user->refcount, free_user);
3848                 } else {
3849                         /* Extract the source address from the data. */
3850                         lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3851                         lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3852                         lan_addr->session_handle = msg->rsp[4];
3853                         lan_addr->remote_SWID = msg->rsp[8];
3854                         lan_addr->local_SWID = msg->rsp[5];
3855                         lan_addr->lun = msg->rsp[9] & 3;
3856                         lan_addr->channel = msg->rsp[3] & 0xf;
3857                         lan_addr->privilege = msg->rsp[3] >> 4;
3858
3859                         /*
3860                          * Extract the rest of the message information
3861                          * from the IPMB header.
3862                          */
3863                         recv_msg->user = user;
3864                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3865                         recv_msg->msgid = msg->rsp[9] >> 2;
3866                         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3867                         recv_msg->msg.cmd = msg->rsp[10];
3868                         recv_msg->msg.data = recv_msg->msg_data;
3869
3870                         /*
3871                          * We chop off 12, not 11 bytes because the checksum
3872                          * at the end also needs to be removed.
3873                          */
3874                         recv_msg->msg.data_len = msg->rsp_size - 12;
3875                         memcpy(recv_msg->msg_data, &msg->rsp[11],
3876                                msg->rsp_size - 12);
3877                         if (deliver_response(intf, recv_msg))
3878                                 ipmi_inc_stat(intf, unhandled_commands);
3879                         else
3880                                 ipmi_inc_stat(intf, handled_commands);
3881                 }
3882         }
3883
3884         return rv;
3885 }
3886
3887 /*
3888  * This routine will handle "Get Message" command responses with
3889  * channels that use an OEM Medium. The message format belongs to
3890  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3891  * Chapter 22, sections 22.6 and 22.24 for more details.
3892  */
3893 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3894                                   struct ipmi_smi_msg *msg)
3895 {
3896         struct cmd_rcvr       *rcvr;
3897         int                   rv = 0;
3898         unsigned char         netfn;
3899         unsigned char         cmd;
3900         unsigned char         chan;
3901         struct ipmi_user *user = NULL;
3902         struct ipmi_system_interface_addr *smi_addr;
3903         struct ipmi_recv_msg  *recv_msg;
3904
3905         /*
3906          * We expect the OEM SW to perform error checking
3907          * so we just do some basic sanity checks
3908          */
3909         if (msg->rsp_size < 4) {
3910                 /* Message not big enough, just ignore it. */
3911                 ipmi_inc_stat(intf, invalid_commands);
3912                 return 0;
3913         }
3914
3915         if (msg->rsp[2] != 0) {
3916                 /* An error getting the response, just ignore it. */
3917                 return 0;
3918         }
3919
3920         /*
3921          * This is an OEM Message so the OEM needs to know how
3922          * handle the message. We do no interpretation.
3923          */
3924         netfn = msg->rsp[0] >> 2;
3925         cmd = msg->rsp[1];
3926         chan = msg->rsp[3] & 0xf;
3927
3928         rcu_read_lock();
3929         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3930         if (rcvr) {
3931                 user = rcvr->user;
3932                 kref_get(&user->refcount);
3933         } else
3934                 user = NULL;
3935         rcu_read_unlock();
3936
3937         if (user == NULL) {
3938                 /* We didn't find a user, just give up. */
3939                 ipmi_inc_stat(intf, unhandled_commands);
3940
3941                 /*
3942                  * Don't do anything with these messages, just allow
3943                  * them to be freed.
3944                  */
3945
3946                 rv = 0;
3947         } else {
3948                 recv_msg = ipmi_alloc_recv_msg();
3949                 if (!recv_msg) {
3950                         /*
3951                          * We couldn't allocate memory for the
3952                          * message, so requeue it for handling
3953                          * later.
3954                          */
3955                         rv = 1;
3956                         kref_put(&user->refcount, free_user);
3957                 } else {
3958                         /*
3959                          * OEM Messages are expected to be delivered via
3960                          * the system interface to SMS software.  We might
3961                          * need to visit this again depending on OEM
3962                          * requirements
3963                          */
3964                         smi_addr = ((struct ipmi_system_interface_addr *)
3965                                     &recv_msg->addr);
3966                         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3967                         smi_addr->channel = IPMI_BMC_CHANNEL;
3968                         smi_addr->lun = msg->rsp[0] & 3;
3969
3970                         recv_msg->user = user;
3971                         recv_msg->user_msg_data = NULL;
3972                         recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3973                         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3974                         recv_msg->msg.cmd = msg->rsp[1];
3975                         recv_msg->msg.data = recv_msg->msg_data;
3976
3977                         /*
3978                          * The message starts at byte 4 which follows the
3979                          * the Channel Byte in the "GET MESSAGE" command
3980                          */
3981                         recv_msg->msg.data_len = msg->rsp_size - 4;
3982                         memcpy(recv_msg->msg_data, &msg->rsp[4],
3983                                msg->rsp_size - 4);
3984                         if (deliver_response(intf, recv_msg))
3985                                 ipmi_inc_stat(intf, unhandled_commands);
3986                         else
3987                                 ipmi_inc_stat(intf, handled_commands);
3988                 }
3989         }
3990
3991         return rv;
3992 }
3993
3994 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3995                                      struct ipmi_smi_msg  *msg)
3996 {
3997         struct ipmi_system_interface_addr *smi_addr;
3998
3999         recv_msg->msgid = 0;
4000         smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4001         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4002         smi_addr->channel = IPMI_BMC_CHANNEL;
4003         smi_addr->lun = msg->rsp[0] & 3;
4004         recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4005         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4006         recv_msg->msg.cmd = msg->rsp[1];
4007         memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4008         recv_msg->msg.data = recv_msg->msg_data;
4009         recv_msg->msg.data_len = msg->rsp_size - 3;
4010 }
4011
4012 static int handle_read_event_rsp(struct ipmi_smi *intf,
4013                                  struct ipmi_smi_msg *msg)
4014 {
4015         struct ipmi_recv_msg *recv_msg, *recv_msg2;
4016         struct list_head     msgs;
4017         struct ipmi_user     *user;
4018         int rv = 0, deliver_count = 0, index;
4019         unsigned long        flags;
4020
4021         if (msg->rsp_size < 19) {
4022                 /* Message is too small to be an IPMB event. */
4023                 ipmi_inc_stat(intf, invalid_events);
4024                 return 0;
4025         }
4026
4027         if (msg->rsp[2] != 0) {
4028                 /* An error getting the event, just ignore it. */
4029                 return 0;
4030         }
4031
4032         INIT_LIST_HEAD(&msgs);
4033
4034         spin_lock_irqsave(&intf->events_lock, flags);
4035
4036         ipmi_inc_stat(intf, events);
4037
4038         /*
4039          * Allocate and fill in one message for every user that is
4040          * getting events.
4041          */
4042         index = srcu_read_lock(&intf->users_srcu);
4043         list_for_each_entry_rcu(user, &intf->users, link) {
4044                 if (!user->gets_events)
4045                         continue;
4046
4047                 recv_msg = ipmi_alloc_recv_msg();
4048                 if (!recv_msg) {
4049                         rcu_read_unlock();
4050                         list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4051                                                  link) {
4052                                 list_del(&recv_msg->link);
4053                                 ipmi_free_recv_msg(recv_msg);
4054                         }
4055                         /*
4056                          * We couldn't allocate memory for the
4057                          * message, so requeue it for handling
4058                          * later.
4059                          */
4060                         rv = 1;
4061                         goto out;
4062                 }
4063
4064                 deliver_count++;
4065
4066                 copy_event_into_recv_msg(recv_msg, msg);
4067                 recv_msg->user = user;
4068                 kref_get(&user->refcount);
4069                 list_add_tail(&recv_msg->link, &msgs);
4070         }
4071         srcu_read_unlock(&intf->users_srcu, index);
4072
4073         if (deliver_count) {
4074                 /* Now deliver all the messages. */
4075                 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4076                         list_del(&recv_msg->link);
4077                         deliver_local_response(intf, recv_msg);
4078                 }
4079         } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4080                 /*
4081                  * No one to receive the message, put it in queue if there's
4082                  * not already too many things in the queue.
4083                  */
4084                 recv_msg = ipmi_alloc_recv_msg();
4085                 if (!recv_msg) {
4086                         /*
4087                          * We couldn't allocate memory for the
4088                          * message, so requeue it for handling
4089                          * later.
4090                          */
4091                         rv = 1;
4092                         goto out;
4093                 }
4094
4095                 copy_event_into_recv_msg(recv_msg, msg);
4096                 list_add_tail(&recv_msg->link, &intf->waiting_events);
4097                 intf->waiting_events_count++;
4098         } else if (!intf->event_msg_printed) {
4099                 /*
4100                  * There's too many things in the queue, discard this
4101                  * message.
4102                  */
4103                 dev_warn(intf->si_dev,
4104                          PFX "Event queue full, discarding incoming events\n");
4105                 intf->event_msg_printed = 1;
4106         }
4107
4108  out:
4109         spin_unlock_irqrestore(&intf->events_lock, flags);
4110
4111         return rv;
4112 }
4113
4114 static int handle_bmc_rsp(struct ipmi_smi *intf,
4115                           struct ipmi_smi_msg *msg)
4116 {
4117         struct ipmi_recv_msg *recv_msg;
4118         struct ipmi_system_interface_addr *smi_addr;
4119
4120         recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4121         if (recv_msg == NULL) {
4122                 dev_warn(intf->si_dev,
4123                          "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vender for assistance\n");
4124                 return 0;
4125         }
4126
4127         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4128         recv_msg->msgid = msg->msgid;
4129         smi_addr = ((struct ipmi_system_interface_addr *)
4130                     &recv_msg->addr);
4131         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4132         smi_addr->channel = IPMI_BMC_CHANNEL;
4133         smi_addr->lun = msg->rsp[0] & 3;
4134         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4135         recv_msg->msg.cmd = msg->rsp[1];
4136         memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4137         recv_msg->msg.data = recv_msg->msg_data;
4138         recv_msg->msg.data_len = msg->rsp_size - 2;
4139         deliver_local_response(intf, recv_msg);
4140
4141         return 0;
4142 }
4143
4144 /*
4145  * Handle a received message.  Return 1 if the message should be requeued,
4146  * 0 if the message should be freed, or -1 if the message should not
4147  * be freed or requeued.
4148  */
4149 static int handle_one_recv_msg(struct ipmi_smi *intf,
4150                                struct ipmi_smi_msg *msg)
4151 {
4152         int requeue;
4153         int chan;
4154
4155         ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4156         if (msg->rsp_size < 2) {
4157                 /* Message is too small to be correct. */
4158                 dev_warn(intf->si_dev,
4159                          PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
4160                          (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4161
4162                 /* Generate an error response for the message. */
4163                 msg->rsp[0] = msg->data[0] | (1 << 2);
4164                 msg->rsp[1] = msg->data[1];
4165                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4166                 msg->rsp_size = 3;
4167         } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4168                    || (msg->rsp[1] != msg->data[1])) {
4169                 /*
4170                  * The NetFN and Command in the response is not even
4171                  * marginally correct.
4172                  */
4173                 dev_warn(intf->si_dev,
4174                          PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4175                          (msg->data[0] >> 2) | 1, msg->data[1],
4176                          msg->rsp[0] >> 2, msg->rsp[1]);
4177
4178                 /* Generate an error response for the message. */
4179                 msg->rsp[0] = msg->data[0] | (1 << 2);
4180                 msg->rsp[1] = msg->data[1];
4181                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4182                 msg->rsp_size = 3;
4183         }
4184
4185         if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4186             && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4187             && (msg->user_data != NULL)) {
4188                 /*
4189                  * It's a response to a response we sent.  For this we
4190                  * deliver a send message response to the user.
4191                  */
4192                 struct ipmi_recv_msg *recv_msg = msg->user_data;
4193
4194                 requeue = 0;
4195                 if (msg->rsp_size < 2)
4196                         /* Message is too small to be correct. */
4197                         goto out;
4198
4199                 chan = msg->data[2] & 0x0f;
4200                 if (chan >= IPMI_MAX_CHANNELS)
4201                         /* Invalid channel number */
4202                         goto out;
4203
4204                 if (!recv_msg)
4205                         goto out;
4206
4207                 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4208                 recv_msg->msg.data = recv_msg->msg_data;
4209                 recv_msg->msg.data_len = 1;
4210                 recv_msg->msg_data[0] = msg->rsp[2];
4211                 deliver_local_response(intf, recv_msg);
4212         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4213                    && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4214                 struct ipmi_channel   *chans;
4215
4216                 /* It's from the receive queue. */
4217                 chan = msg->rsp[3] & 0xf;
4218                 if (chan >= IPMI_MAX_CHANNELS) {
4219                         /* Invalid channel number */
4220                         requeue = 0;
4221                         goto out;
4222                 }
4223
4224                 /*
4225                  * We need to make sure the channels have been initialized.
4226                  * The channel_handler routine will set the "curr_channel"
4227                  * equal to or greater than IPMI_MAX_CHANNELS when all the
4228                  * channels for this interface have been initialized.
4229                  */
4230                 if (!intf->channels_ready) {
4231                         requeue = 0; /* Throw the message away */
4232                         goto out;
4233                 }
4234
4235                 chans = READ_ONCE(intf->channel_list)->c;
4236
4237                 switch (chans[chan].medium) {
4238                 case IPMI_CHANNEL_MEDIUM_IPMB:
4239                         if (msg->rsp[4] & 0x04) {
4240                                 /*
4241                                  * It's a response, so find the
4242                                  * requesting message and send it up.
4243                                  */
4244                                 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4245                         } else {
4246                                 /*
4247                                  * It's a command to the SMS from some other
4248                                  * entity.  Handle that.
4249                                  */
4250                                 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4251                         }
4252                         break;
4253
4254                 case IPMI_CHANNEL_MEDIUM_8023LAN:
4255                 case IPMI_CHANNEL_MEDIUM_ASYNC:
4256                         if (msg->rsp[6] & 0x04) {
4257                                 /*
4258                                  * It's a response, so find the
4259                                  * requesting message and send it up.
4260                                  */
4261                                 requeue = handle_lan_get_msg_rsp(intf, msg);
4262                         } else {
4263                                 /*
4264                                  * It's a command to the SMS from some other
4265                                  * entity.  Handle that.
4266                                  */
4267                                 requeue = handle_lan_get_msg_cmd(intf, msg);
4268                         }
4269                         break;
4270
4271                 default:
4272                         /* Check for OEM Channels.  Clients had better
4273                            register for these commands. */
4274                         if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4275                             && (chans[chan].medium
4276                                 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4277                                 requeue = handle_oem_get_msg_cmd(intf, msg);
4278                         } else {
4279                                 /*
4280                                  * We don't handle the channel type, so just
4281                                  * free the message.
4282                                  */
4283                                 requeue = 0;
4284                         }
4285                 }
4286
4287         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4288                    && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4289                 /* It's an asynchronous event. */
4290                 requeue = handle_read_event_rsp(intf, msg);
4291         } else {
4292                 /* It's a response from the local BMC. */
4293                 requeue = handle_bmc_rsp(intf, msg);
4294         }
4295
4296  out:
4297         return requeue;
4298 }
4299
4300 /*
4301  * If there are messages in the queue or pretimeouts, handle them.
4302  */
4303 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4304 {
4305         struct ipmi_smi_msg  *smi_msg;
4306         unsigned long        flags = 0;
4307         int                  rv;
4308         int                  run_to_completion = intf->run_to_completion;
4309
4310         /* See if any waiting messages need to be processed. */
4311         if (!run_to_completion)
4312                 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4313         while (!list_empty(&intf->waiting_rcv_msgs)) {
4314                 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4315                                      struct ipmi_smi_msg, link);
4316                 list_del(&smi_msg->link);
4317                 if (!run_to_completion)
4318                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4319                                                flags);
4320                 rv = handle_one_recv_msg(intf, smi_msg);
4321                 if (!run_to_completion)
4322                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4323                 if (rv > 0) {
4324                         /*
4325                          * To preserve message order, quit if we
4326                          * can't handle a message.  Add the message
4327                          * back at the head, this is safe because this
4328                          * tasklet is the only thing that pulls the
4329                          * messages.
4330                          */
4331                         list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4332                         break;
4333                 } else {
4334                         if (rv == 0)
4335                                 /* Message handled */
4336                                 ipmi_free_smi_msg(smi_msg);
4337                         /* If rv < 0, fatal error, del but don't free. */
4338                 }
4339         }
4340         if (!run_to_completion)
4341                 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4342
4343         /*
4344          * If the pretimout count is non-zero, decrement one from it and
4345          * deliver pretimeouts to all the users.
4346          */
4347         if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4348                 struct ipmi_user *user;
4349                 int index;
4350
4351                 index = srcu_read_lock(&intf->users_srcu);
4352                 list_for_each_entry_rcu(user, &intf->users, link) {
4353                         if (user->handler->ipmi_watchdog_pretimeout)
4354                                 user->handler->ipmi_watchdog_pretimeout(
4355                                         user->handler_data);
4356                 }
4357                 srcu_read_unlock(&intf->users_srcu, index);
4358         }
4359 }
4360
4361 static void smi_recv_tasklet(unsigned long val)
4362 {
4363         unsigned long flags = 0; /* keep us warning-free. */
4364         struct ipmi_smi *intf = (struct ipmi_smi *) val;
4365         int run_to_completion = intf->run_to_completion;
4366         struct ipmi_smi_msg *newmsg = NULL;
4367
4368         /*
4369          * Start the next message if available.
4370          *
4371          * Do this here, not in the actual receiver, because we may deadlock
4372          * because the lower layer is allowed to hold locks while calling
4373          * message delivery.
4374          */
4375
4376         rcu_read_lock();
4377
4378         if (!run_to_completion)
4379                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4380         if (intf->curr_msg == NULL && !intf->in_shutdown) {
4381                 struct list_head *entry = NULL;
4382
4383                 /* Pick the high priority queue first. */
4384                 if (!list_empty(&intf->hp_xmit_msgs))
4385                         entry = intf->hp_xmit_msgs.next;
4386                 else if (!list_empty(&intf->xmit_msgs))
4387                         entry = intf->xmit_msgs.next;
4388
4389                 if (entry) {
4390                         list_del(entry);
4391                         newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4392                         intf->curr_msg = newmsg;
4393                 }
4394         }
4395         if (!run_to_completion)
4396                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4397         if (newmsg)
4398                 intf->handlers->sender(intf->send_info, newmsg);
4399
4400         rcu_read_unlock();
4401
4402         handle_new_recv_msgs(intf);
4403 }
4404
4405 /* Handle a new message from the lower layer. */
4406 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4407                            struct ipmi_smi_msg *msg)
4408 {
4409         unsigned long flags = 0; /* keep us warning-free. */
4410         int run_to_completion = intf->run_to_completion;
4411
4412         if ((msg->data_size >= 2)
4413             && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4414             && (msg->data[1] == IPMI_SEND_MSG_CMD)
4415             && (msg->user_data == NULL)) {
4416
4417                 if (intf->in_shutdown)
4418                         goto free_msg;
4419
4420                 /*
4421                  * This is the local response to a command send, start
4422                  * the timer for these.  The user_data will not be
4423                  * NULL if this is a response send, and we will let
4424                  * response sends just go through.
4425                  */
4426
4427                 /*
4428                  * Check for errors, if we get certain errors (ones
4429                  * that mean basically we can try again later), we
4430                  * ignore them and start the timer.  Otherwise we
4431                  * report the error immediately.
4432                  */
4433                 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4434                     && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4435                     && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4436                     && (msg->rsp[2] != IPMI_BUS_ERR)
4437                     && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4438                         int ch = msg->rsp[3] & 0xf;
4439                         struct ipmi_channel *chans;
4440
4441                         /* Got an error sending the message, handle it. */
4442
4443                         chans = READ_ONCE(intf->channel_list)->c;
4444                         if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4445                             || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4446                                 ipmi_inc_stat(intf, sent_lan_command_errs);
4447                         else
4448                                 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4449                         intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4450                 } else
4451                         /* The message was sent, start the timer. */
4452                         intf_start_seq_timer(intf, msg->msgid);
4453
4454 free_msg:
4455                 ipmi_free_smi_msg(msg);
4456         } else {
4457                 /*
4458                  * To preserve message order, we keep a queue and deliver from
4459                  * a tasklet.
4460                  */
4461                 if (!run_to_completion)
4462                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4463                 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4464                 if (!run_to_completion)
4465                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4466                                                flags);
4467         }
4468
4469         if (!run_to_completion)
4470                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4471         /*
4472          * We can get an asynchronous event or receive message in addition
4473          * to commands we send.
4474          */
4475         if (msg == intf->curr_msg)
4476                 intf->curr_msg = NULL;
4477         if (!run_to_completion)
4478                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4479
4480         if (run_to_completion)
4481                 smi_recv_tasklet((unsigned long) intf);
4482         else
4483                 tasklet_schedule(&intf->recv_tasklet);
4484 }
4485 EXPORT_SYMBOL(ipmi_smi_msg_received);
4486
4487 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4488 {
4489         if (intf->in_shutdown)
4490                 return;
4491
4492         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4493         tasklet_schedule(&intf->recv_tasklet);
4494 }
4495 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4496
4497 static struct ipmi_smi_msg *
4498 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4499                   unsigned char seq, long seqid)
4500 {
4501         struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4502         if (!smi_msg)
4503                 /*
4504                  * If we can't allocate the message, then just return, we
4505                  * get 4 retries, so this should be ok.
4506                  */
4507                 return NULL;
4508
4509         memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4510         smi_msg->data_size = recv_msg->msg.data_len;
4511         smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4512
4513         ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4514
4515         return smi_msg;
4516 }
4517
4518 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4519                               struct list_head *timeouts,
4520                               unsigned long timeout_period,
4521                               int slot, unsigned long *flags,
4522                               unsigned int *waiting_msgs)
4523 {
4524         struct ipmi_recv_msg *msg;
4525
4526         if (intf->in_shutdown)
4527                 return;
4528
4529         if (!ent->inuse)
4530                 return;
4531
4532         if (timeout_period < ent->timeout) {
4533                 ent->timeout -= timeout_period;
4534                 (*waiting_msgs)++;
4535                 return;
4536         }
4537
4538         if (ent->retries_left == 0) {
4539                 /* The message has used all its retries. */
4540                 ent->inuse = 0;
4541                 msg = ent->recv_msg;
4542                 list_add_tail(&msg->link, timeouts);
4543                 if (ent->broadcast)
4544                         ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4545                 else if (is_lan_addr(&ent->recv_msg->addr))
4546                         ipmi_inc_stat(intf, timed_out_lan_commands);
4547                 else
4548                         ipmi_inc_stat(intf, timed_out_ipmb_commands);
4549         } else {
4550                 struct ipmi_smi_msg *smi_msg;
4551                 /* More retries, send again. */
4552
4553                 (*waiting_msgs)++;
4554
4555                 /*
4556                  * Start with the max timer, set to normal timer after
4557                  * the message is sent.
4558                  */
4559                 ent->timeout = MAX_MSG_TIMEOUT;
4560                 ent->retries_left--;
4561                 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4562                                             ent->seqid);
4563                 if (!smi_msg) {
4564                         if (is_lan_addr(&ent->recv_msg->addr))
4565                                 ipmi_inc_stat(intf,
4566                                               dropped_rexmit_lan_commands);
4567                         else
4568                                 ipmi_inc_stat(intf,
4569                                               dropped_rexmit_ipmb_commands);
4570                         return;
4571                 }
4572
4573                 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4574
4575                 /*
4576                  * Send the new message.  We send with a zero
4577                  * priority.  It timed out, I doubt time is that
4578                  * critical now, and high priority messages are really
4579                  * only for messages to the local MC, which don't get
4580                  * resent.
4581                  */
4582                 if (intf->handlers) {
4583                         if (is_lan_addr(&ent->recv_msg->addr))
4584                                 ipmi_inc_stat(intf,
4585                                               retransmitted_lan_commands);
4586                         else
4587                                 ipmi_inc_stat(intf,
4588                                               retransmitted_ipmb_commands);
4589
4590                         smi_send(intf, intf->handlers, smi_msg, 0);
4591                 } else
4592                         ipmi_free_smi_msg(smi_msg);
4593
4594                 spin_lock_irqsave(&intf->seq_lock, *flags);
4595         }
4596 }
4597
4598 static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
4599                                          unsigned long timeout_period)
4600 {
4601         struct list_head     timeouts;
4602         struct ipmi_recv_msg *msg, *msg2;
4603         unsigned long        flags;
4604         int                  i;
4605         unsigned int         waiting_msgs = 0;
4606
4607         if (!intf->bmc_registered) {
4608                 kref_get(&intf->refcount);
4609                 if (!schedule_work(&intf->bmc_reg_work)) {
4610                         kref_put(&intf->refcount, intf_free);
4611                         waiting_msgs++;
4612                 }
4613         }
4614
4615         /*
4616          * Go through the seq table and find any messages that
4617          * have timed out, putting them in the timeouts
4618          * list.
4619          */
4620         INIT_LIST_HEAD(&timeouts);
4621         spin_lock_irqsave(&intf->seq_lock, flags);
4622         if (intf->ipmb_maintenance_mode_timeout) {
4623                 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4624                         intf->ipmb_maintenance_mode_timeout = 0;
4625                 else
4626                         intf->ipmb_maintenance_mode_timeout -= timeout_period;
4627         }
4628         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4629                 check_msg_timeout(intf, &intf->seq_table[i],
4630                                   &timeouts, timeout_period, i,
4631                                   &flags, &waiting_msgs);
4632         spin_unlock_irqrestore(&intf->seq_lock, flags);
4633
4634         list_for_each_entry_safe(msg, msg2, &timeouts, link)
4635                 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4636
4637         /*
4638          * Maintenance mode handling.  Check the timeout
4639          * optimistically before we claim the lock.  It may
4640          * mean a timeout gets missed occasionally, but that
4641          * only means the timeout gets extended by one period
4642          * in that case.  No big deal, and it avoids the lock
4643          * most of the time.
4644          */
4645         if (intf->auto_maintenance_timeout > 0) {
4646                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4647                 if (intf->auto_maintenance_timeout > 0) {
4648                         intf->auto_maintenance_timeout
4649                                 -= timeout_period;
4650                         if (!intf->maintenance_mode
4651                             && (intf->auto_maintenance_timeout <= 0)) {
4652                                 intf->maintenance_mode_enable = false;
4653                                 maintenance_mode_update(intf);
4654                         }
4655                 }
4656                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4657                                        flags);
4658         }
4659
4660         tasklet_schedule(&intf->recv_tasklet);
4661
4662         return waiting_msgs;
4663 }
4664
4665 static void ipmi_request_event(struct ipmi_smi *intf)
4666 {
4667         /* No event requests when in maintenance mode. */
4668         if (intf->maintenance_mode_enable)
4669                 return;
4670
4671         if (!intf->in_shutdown)
4672                 intf->handlers->request_events(intf->send_info);
4673 }
4674
4675 static struct timer_list ipmi_timer;
4676
4677 static atomic_t stop_operation;
4678
4679 static void ipmi_timeout(struct timer_list *unused)
4680 {
4681         struct ipmi_smi *intf;
4682         int nt = 0, index;
4683
4684         if (atomic_read(&stop_operation))
4685                 return;
4686
4687         index = srcu_read_lock(&ipmi_interfaces_srcu);
4688         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4689                 int lnt = 0;
4690
4691                 if (atomic_read(&intf->event_waiters)) {
4692                         intf->ticks_to_req_ev--;
4693                         if (intf->ticks_to_req_ev == 0) {
4694                                 ipmi_request_event(intf);
4695                                 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4696                         }
4697                         lnt++;
4698                 }
4699
4700                 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4701
4702                 lnt = !!lnt;
4703                 if (lnt != intf->last_needs_timer &&
4704                                         intf->handlers->set_need_watch)
4705                         intf->handlers->set_need_watch(intf->send_info, lnt);
4706                 intf->last_needs_timer = lnt;
4707
4708                 nt += lnt;
4709         }
4710         srcu_read_unlock(&ipmi_interfaces_srcu, index);
4711
4712         if (nt)
4713                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4714 }
4715
4716 static void need_waiter(struct ipmi_smi *intf)
4717 {
4718         /* Racy, but worst case we start the timer twice. */
4719         if (!timer_pending(&ipmi_timer))
4720                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4721 }
4722
4723 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4724 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4725
4726 static void free_smi_msg(struct ipmi_smi_msg *msg)
4727 {
4728         atomic_dec(&smi_msg_inuse_count);
4729         kfree(msg);
4730 }
4731
4732 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4733 {
4734         struct ipmi_smi_msg *rv;
4735         rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4736         if (rv) {
4737                 rv->done = free_smi_msg;
4738                 rv->user_data = NULL;
4739                 atomic_inc(&smi_msg_inuse_count);
4740         }
4741         return rv;
4742 }
4743 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4744
4745 static void free_recv_msg(struct ipmi_recv_msg *msg)
4746 {
4747         atomic_dec(&recv_msg_inuse_count);
4748         kfree(msg);
4749 }
4750
4751 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4752 {
4753         struct ipmi_recv_msg *rv;
4754
4755         rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4756         if (rv) {
4757                 rv->user = NULL;
4758                 rv->done = free_recv_msg;
4759                 atomic_inc(&recv_msg_inuse_count);
4760         }
4761         return rv;
4762 }
4763
4764 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4765 {
4766         if (msg->user)
4767                 kref_put(&msg->user->refcount, free_user);
4768         msg->done(msg);
4769 }
4770 EXPORT_SYMBOL(ipmi_free_recv_msg);
4771
4772 static atomic_t panic_done_count = ATOMIC_INIT(0);
4773
4774 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4775 {
4776         atomic_dec(&panic_done_count);
4777 }
4778
4779 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4780 {
4781         atomic_dec(&panic_done_count);
4782 }
4783
4784 /*
4785  * Inside a panic, send a message and wait for a response.
4786  */
4787 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4788                                         struct ipmi_addr *addr,
4789                                         struct kernel_ipmi_msg *msg)
4790 {
4791         struct ipmi_smi_msg  smi_msg;
4792         struct ipmi_recv_msg recv_msg;
4793         int rv;
4794
4795         smi_msg.done = dummy_smi_done_handler;
4796         recv_msg.done = dummy_recv_done_handler;
4797         atomic_add(2, &panic_done_count);
4798         rv = i_ipmi_request(NULL,
4799                             intf,
4800                             addr,
4801                             0,
4802                             msg,
4803                             intf,
4804                             &smi_msg,
4805                             &recv_msg,
4806                             0,
4807                             intf->addrinfo[0].address,
4808                             intf->addrinfo[0].lun,
4809                             0, 1); /* Don't retry, and don't wait. */
4810         if (rv)
4811                 atomic_sub(2, &panic_done_count);
4812         else if (intf->handlers->flush_messages)
4813                 intf->handlers->flush_messages(intf->send_info);
4814
4815         while (atomic_read(&panic_done_count) != 0)
4816                 ipmi_poll(intf);
4817 }
4818
4819 static void event_receiver_fetcher(struct ipmi_smi *intf,
4820                                    struct ipmi_recv_msg *msg)
4821 {
4822         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4823             && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4824             && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4825             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4826                 /* A get event receiver command, save it. */
4827                 intf->event_receiver = msg->msg.data[1];
4828                 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4829         }
4830 }
4831
4832 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4833 {
4834         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4835             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4836             && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4837             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4838                 /*
4839                  * A get device id command, save if we are an event
4840                  * receiver or generator.
4841                  */
4842                 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4843                 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4844         }
4845 }
4846
4847 static void send_panic_events(struct ipmi_smi *intf, char *str)
4848 {
4849         struct kernel_ipmi_msg msg;
4850         unsigned char data[16];
4851         struct ipmi_system_interface_addr *si;
4852         struct ipmi_addr addr;
4853         char *p = str;
4854         struct ipmi_ipmb_addr *ipmb;
4855         int j;
4856
4857         if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4858                 return;
4859
4860         si = (struct ipmi_system_interface_addr *) &addr;
4861         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4862         si->channel = IPMI_BMC_CHANNEL;
4863         si->lun = 0;
4864
4865         /* Fill in an event telling that we have failed. */
4866         msg.netfn = 0x04; /* Sensor or Event. */
4867         msg.cmd = 2; /* Platform event command. */
4868         msg.data = data;
4869         msg.data_len = 8;
4870         data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4871         data[1] = 0x03; /* This is for IPMI 1.0. */
4872         data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4873         data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4874         data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4875
4876         /*
4877          * Put a few breadcrumbs in.  Hopefully later we can add more things
4878          * to make the panic events more useful.
4879          */
4880         if (str) {
4881                 data[3] = str[0];
4882                 data[6] = str[1];
4883                 data[7] = str[2];
4884         }
4885
4886         /* Send the event announcing the panic. */
4887         ipmi_panic_request_and_wait(intf, &addr, &msg);
4888
4889         /*
4890          * On every interface, dump a bunch of OEM event holding the
4891          * string.
4892          */
4893         if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4894                 return;
4895
4896         /*
4897          * intf_num is used as an marker to tell if the
4898          * interface is valid.  Thus we need a read barrier to
4899          * make sure data fetched before checking intf_num
4900          * won't be used.
4901          */
4902         smp_rmb();
4903
4904         /*
4905          * First job here is to figure out where to send the
4906          * OEM events.  There's no way in IPMI to send OEM
4907          * events using an event send command, so we have to
4908          * find the SEL to put them in and stick them in
4909          * there.
4910          */
4911
4912         /* Get capabilities from the get device id. */
4913         intf->local_sel_device = 0;
4914         intf->local_event_generator = 0;
4915         intf->event_receiver = 0;
4916
4917         /* Request the device info from the local MC. */
4918         msg.netfn = IPMI_NETFN_APP_REQUEST;
4919         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4920         msg.data = NULL;
4921         msg.data_len = 0;
4922         intf->null_user_handler = device_id_fetcher;
4923         ipmi_panic_request_and_wait(intf, &addr, &msg);
4924
4925         if (intf->local_event_generator) {
4926                 /* Request the event receiver from the local MC. */
4927                 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4928                 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4929                 msg.data = NULL;
4930                 msg.data_len = 0;
4931                 intf->null_user_handler = event_receiver_fetcher;
4932                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4933         }
4934         intf->null_user_handler = NULL;
4935
4936         /*
4937          * Validate the event receiver.  The low bit must not
4938          * be 1 (it must be a valid IPMB address), it cannot
4939          * be zero, and it must not be my address.
4940          */
4941         if (((intf->event_receiver & 1) == 0)
4942             && (intf->event_receiver != 0)
4943             && (intf->event_receiver != intf->addrinfo[0].address)) {
4944                 /*
4945                  * The event receiver is valid, send an IPMB
4946                  * message.
4947                  */
4948                 ipmb = (struct ipmi_ipmb_addr *) &addr;
4949                 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4950                 ipmb->channel = 0; /* FIXME - is this right? */
4951                 ipmb->lun = intf->event_receiver_lun;
4952                 ipmb->slave_addr = intf->event_receiver;
4953         } else if (intf->local_sel_device) {
4954                 /*
4955                  * The event receiver was not valid (or was
4956                  * me), but I am an SEL device, just dump it
4957                  * in my SEL.
4958                  */
4959                 si = (struct ipmi_system_interface_addr *) &addr;
4960                 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4961                 si->channel = IPMI_BMC_CHANNEL;
4962                 si->lun = 0;
4963         } else
4964                 return; /* No where to send the event. */
4965
4966         msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4967         msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4968         msg.data = data;
4969         msg.data_len = 16;
4970
4971         j = 0;
4972         while (*p) {
4973                 int size = strlen(p);
4974
4975                 if (size > 11)
4976                         size = 11;
4977                 data[0] = 0;
4978                 data[1] = 0;
4979                 data[2] = 0xf0; /* OEM event without timestamp. */
4980                 data[3] = intf->addrinfo[0].address;
4981                 data[4] = j++; /* sequence # */
4982                 /*
4983                  * Always give 11 bytes, so strncpy will fill
4984                  * it with zeroes for me.
4985                  */
4986                 strncpy(data+5, p, 11);
4987                 p += size;
4988
4989                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4990         }
4991 }
4992
4993 static int has_panicked;
4994
4995 static int panic_event(struct notifier_block *this,
4996                        unsigned long         event,
4997                        void                  *ptr)
4998 {
4999         struct ipmi_smi *intf;
5000         struct ipmi_user *user;
5001
5002         if (has_panicked)
5003                 return NOTIFY_DONE;
5004         has_panicked = 1;
5005
5006         /* For every registered interface, set it to run to completion. */
5007         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5008                 if (!intf->handlers || intf->intf_num == -1)
5009                         /* Interface is not ready. */
5010                         continue;
5011
5012                 if (!intf->handlers->poll)
5013                         continue;
5014
5015                 /*
5016                  * If we were interrupted while locking xmit_msgs_lock or
5017                  * waiting_rcv_msgs_lock, the corresponding list may be
5018                  * corrupted.  In this case, drop items on the list for
5019                  * the safety.
5020                  */
5021                 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5022                         INIT_LIST_HEAD(&intf->xmit_msgs);
5023                         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5024                 } else
5025                         spin_unlock(&intf->xmit_msgs_lock);
5026
5027                 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5028                         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5029                 else
5030                         spin_unlock(&intf->waiting_rcv_msgs_lock);
5031
5032                 intf->run_to_completion = 1;
5033                 if (intf->handlers->set_run_to_completion)
5034                         intf->handlers->set_run_to_completion(intf->send_info,
5035                                                               1);
5036
5037                 list_for_each_entry_rcu(user, &intf->users, link) {
5038                         if (user->handler->ipmi_panic_handler)
5039                                 user->handler->ipmi_panic_handler(
5040                                         user->handler_data);
5041                 }
5042
5043                 send_panic_events(intf, ptr);
5044         }
5045
5046         return NOTIFY_DONE;
5047 }
5048
5049 /* Must be called with ipmi_interfaces_mutex held. */
5050 static int ipmi_register_driver(void)
5051 {
5052         int rv;
5053
5054         if (drvregistered)
5055                 return 0;
5056
5057         rv = driver_register(&ipmidriver.driver);
5058         if (rv)
5059                 pr_err("Could not register IPMI driver\n");
5060         else
5061                 drvregistered = true;
5062         return rv;
5063 }
5064
5065 static struct notifier_block panic_block = {
5066         .notifier_call  = panic_event,
5067         .next           = NULL,
5068         .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5069 };
5070
5071 static int ipmi_init_msghandler(void)
5072 {
5073         int rv;
5074
5075         mutex_lock(&ipmi_interfaces_mutex);
5076         rv = ipmi_register_driver();
5077         if (rv)
5078                 goto out;
5079         if (initialized)
5080                 goto out;
5081
5082         init_srcu_struct(&ipmi_interfaces_srcu);
5083
5084         timer_setup(&ipmi_timer, ipmi_timeout, 0);
5085         mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5086
5087         atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5088
5089         initialized = true;
5090
5091 out:
5092         mutex_unlock(&ipmi_interfaces_mutex);
5093         return rv;
5094 }
5095
5096 static int __init ipmi_init_msghandler_mod(void)
5097 {
5098         int rv;
5099
5100         pr_info("version " IPMI_DRIVER_VERSION "\n");
5101
5102         mutex_lock(&ipmi_interfaces_mutex);
5103         rv = ipmi_register_driver();
5104         mutex_unlock(&ipmi_interfaces_mutex);
5105
5106         return rv;
5107 }
5108
5109 static void __exit cleanup_ipmi(void)
5110 {
5111         int count;
5112
5113         if (initialized) {
5114                 atomic_notifier_chain_unregister(&panic_notifier_list,
5115                                                  &panic_block);
5116
5117                 /*
5118                  * This can't be called if any interfaces exist, so no worry
5119                  * about shutting down the interfaces.
5120                  */
5121
5122                 /*
5123                  * Tell the timer to stop, then wait for it to stop.  This
5124                  * avoids problems with race conditions removing the timer
5125                  * here.
5126                  */
5127                 atomic_inc(&stop_operation);
5128                 del_timer_sync(&ipmi_timer);
5129
5130                 initialized = false;
5131
5132                 /* Check for buffer leaks. */
5133                 count = atomic_read(&smi_msg_inuse_count);
5134                 if (count != 0)
5135                         pr_warn(PFX "SMI message count %d at exit\n", count);
5136                 count = atomic_read(&recv_msg_inuse_count);
5137                 if (count != 0)
5138                         pr_warn(PFX "recv message count %d at exit\n", count);
5139                 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5140         }
5141         if (drvregistered)
5142                 driver_unregister(&ipmidriver.driver);
5143 }
5144 module_exit(cleanup_ipmi);
5145
5146 module_init(ipmi_init_msghandler_mod);
5147 MODULE_LICENSE("GPL");
5148 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5149 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5150                    " interface.");
5151 MODULE_VERSION(IPMI_DRIVER_VERSION);
5152 MODULE_SOFTDEP("post: ipmi_devintf");