Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/ipmi_msghandler.c
51261 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* ipmi_msghandler.c
4
*
5
* Incoming and outgoing message routing for an IPMI interface.
6
*
7
* Author: MontaVista Software, Inc.
8
* Corey Minyard <[email protected]>
9
* [email protected]
10
*
11
* Copyright 2002 MontaVista Software Inc.
12
*/
13
14
#define pr_fmt(fmt) "IPMI message handler: " fmt
15
#define dev_fmt(fmt) pr_fmt(fmt)
16
17
#include <linux/module.h>
18
#include <linux/errno.h>
19
#include <linux/panic_notifier.h>
20
#include <linux/poll.h>
21
#include <linux/sched.h>
22
#include <linux/seq_file.h>
23
#include <linux/spinlock.h>
24
#include <linux/mutex.h>
25
#include <linux/slab.h>
26
#include <linux/ipmi.h>
27
#include <linux/ipmi_smi.h>
28
#include <linux/notifier.h>
29
#include <linux/init.h>
30
#include <linux/rcupdate.h>
31
#include <linux/interrupt.h>
32
#include <linux/moduleparam.h>
33
#include <linux/workqueue.h>
34
#include <linux/uuid.h>
35
#include <linux/nospec.h>
36
#include <linux/vmalloc.h>
37
#include <linux/delay.h>
38
39
#define IPMI_DRIVER_VERSION "39.2"
40
41
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
42
static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
43
struct ipmi_user *user);
44
static int ipmi_init_msghandler(void);
45
static void smi_work(struct work_struct *t);
46
static void handle_new_recv_msgs(struct ipmi_smi *intf);
47
static void need_waiter(struct ipmi_smi *intf);
48
static int handle_one_recv_msg(struct ipmi_smi *intf,
49
struct ipmi_smi_msg *msg);
50
static void intf_free(struct kref *ref);
51
52
static bool initialized;
53
static bool drvregistered;
54
55
static struct timer_list ipmi_timer;
56
57
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
58
enum ipmi_panic_event_op {
59
IPMI_SEND_PANIC_EVENT_NONE,
60
IPMI_SEND_PANIC_EVENT,
61
IPMI_SEND_PANIC_EVENT_STRING,
62
IPMI_SEND_PANIC_EVENT_MAX
63
};
64
65
/* Indices in this array should be mapped to enum ipmi_panic_event_op */
66
static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
67
68
#ifdef CONFIG_IPMI_PANIC_STRING
69
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
70
#elif defined(CONFIG_IPMI_PANIC_EVENT)
71
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
72
#else
73
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
74
#endif
75
76
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
77
78
static int panic_op_write_handler(const char *val,
79
const struct kernel_param *kp)
80
{
81
char valcp[16];
82
int e;
83
84
strscpy(valcp, val, sizeof(valcp));
85
e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
86
if (e < 0)
87
return e;
88
89
ipmi_send_panic_event = e;
90
return 0;
91
}
92
93
static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
94
{
95
const char *event_str;
96
97
if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
98
event_str = "???";
99
else
100
event_str = ipmi_panic_event_str[ipmi_send_panic_event];
101
102
return sprintf(buffer, "%s\n", event_str);
103
}
104
105
static const struct kernel_param_ops panic_op_ops = {
106
.set = panic_op_write_handler,
107
.get = panic_op_read_handler
108
};
109
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
110
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
111
112
113
#define MAX_EVENTS_IN_QUEUE 25
114
115
/* Remain in auto-maintenance mode for this amount of time (in ms). */
116
static unsigned long maintenance_mode_timeout_ms = 30000;
117
module_param(maintenance_mode_timeout_ms, ulong, 0644);
118
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
119
"The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
120
121
/*
122
* Don't let a message sit in a queue forever, always time it with at lest
123
* the max message timer. This is in milliseconds.
124
*/
125
#define MAX_MSG_TIMEOUT 60000
126
127
/*
128
* Timeout times below are in milliseconds, and are done off a 1
129
* second timer. So setting the value to 1000 would mean anything
130
* between 0 and 1000ms. So really the only reasonable minimum
131
* setting it 2000ms, which is between 1 and 2 seconds.
132
*/
133
134
/* The default timeout for message retries. */
135
static unsigned long default_retry_ms = 2000;
136
module_param(default_retry_ms, ulong, 0644);
137
MODULE_PARM_DESC(default_retry_ms,
138
"The time (milliseconds) between retry sends");
139
140
/* The default timeout for maintenance mode message retries. */
141
static unsigned long default_maintenance_retry_ms = 3000;
142
module_param(default_maintenance_retry_ms, ulong, 0644);
143
MODULE_PARM_DESC(default_maintenance_retry_ms,
144
"The time (milliseconds) between retry sends in maintenance mode");
145
146
/* The default maximum number of retries */
147
static unsigned int default_max_retries = 4;
148
module_param(default_max_retries, uint, 0644);
149
MODULE_PARM_DESC(default_max_retries,
150
"The time (milliseconds) between retry sends in maintenance mode");
151
152
/* The default maximum number of users that may register. */
153
static unsigned int max_users = 30;
154
module_param(max_users, uint, 0644);
155
MODULE_PARM_DESC(max_users,
156
"The most users that may use the IPMI stack at one time.");
157
158
/* The default maximum number of message a user may have outstanding. */
159
static unsigned int max_msgs_per_user = 100;
160
module_param(max_msgs_per_user, uint, 0644);
161
MODULE_PARM_DESC(max_msgs_per_user,
162
"The most message a user may have outstanding.");
163
164
/* Call every ~1000 ms. */
165
#define IPMI_TIMEOUT_TIME 1000
166
167
/* How many jiffies does it take to get to the timeout time. */
168
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
169
170
/*
171
* Request events from the queue every second (this is the number of
172
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
173
* future, IPMI will add a way to know immediately if an event is in
174
* the queue and this silliness can go away.
175
*/
176
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
177
178
/* How long should we cache dynamic device IDs? */
179
#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
180
181
/*
182
* The main "user" data structure.
183
*/
184
struct ipmi_user {
185
struct list_head link;
186
187
struct kref refcount;
188
refcount_t destroyed;
189
190
/* The upper layer that handles receive messages. */
191
const struct ipmi_user_hndl *handler;
192
void *handler_data;
193
194
/* The interface this user is bound to. */
195
struct ipmi_smi *intf;
196
197
/* Does this interface receive IPMI events? */
198
bool gets_events;
199
200
atomic_t nr_msgs;
201
};
202
203
struct cmd_rcvr {
204
struct list_head link;
205
206
struct ipmi_user *user;
207
unsigned char netfn;
208
unsigned char cmd;
209
unsigned int chans;
210
211
/*
212
* This is used to form a linked lised during mass deletion.
213
* Since this is in an RCU list, we cannot use the link above
214
* or change any data until the RCU period completes. So we
215
* use this next variable during mass deletion so we can have
216
* a list and don't have to wait and restart the search on
217
* every individual deletion of a command.
218
*/
219
struct cmd_rcvr *next;
220
};
221
222
struct seq_table {
223
unsigned int inuse : 1;
224
unsigned int broadcast : 1;
225
226
unsigned long timeout;
227
unsigned long orig_timeout;
228
unsigned int retries_left;
229
230
/*
231
* To verify on an incoming send message response that this is
232
* the message that the response is for, we keep a sequence id
233
* and increment it every time we send a message.
234
*/
235
long seqid;
236
237
/*
238
* This is held so we can properly respond to the message on a
239
* timeout, and it is used to hold the temporary data for
240
* retransmission, too.
241
*/
242
struct ipmi_recv_msg *recv_msg;
243
};
244
245
/*
246
* Store the information in a msgid (long) to allow us to find a
247
* sequence table entry from the msgid.
248
*/
249
#define STORE_SEQ_IN_MSGID(seq, seqid) \
250
((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
251
252
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
253
do { \
254
seq = (((msgid) >> 26) & 0x3f); \
255
seqid = ((msgid) & 0x3ffffff); \
256
} while (0)
257
258
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
259
260
#define IPMI_MAX_CHANNELS 16
261
struct ipmi_channel {
262
unsigned char medium;
263
unsigned char protocol;
264
};
265
266
struct ipmi_channel_set {
267
struct ipmi_channel c[IPMI_MAX_CHANNELS];
268
};
269
270
struct ipmi_my_addrinfo {
271
/*
272
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
273
* but may be changed by the user.
274
*/
275
unsigned char address;
276
277
/*
278
* My LUN. This should generally stay the SMS LUN, but just in
279
* case...
280
*/
281
unsigned char lun;
282
};
283
284
/*
285
* Note that the product id, manufacturer id, guid, and device id are
286
* immutable in this structure, so dyn_mutex is not required for
287
* accessing those. If those change on a BMC, a new BMC is allocated.
288
*/
289
struct bmc_device {
290
struct platform_device pdev;
291
struct list_head intfs; /* Interfaces on this BMC. */
292
struct ipmi_device_id id;
293
struct ipmi_device_id fetch_id;
294
int dyn_id_set;
295
unsigned long dyn_id_expiry;
296
struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
297
guid_t guid;
298
guid_t fetch_guid;
299
int dyn_guid_set;
300
struct kref usecount;
301
struct work_struct remove_work;
302
unsigned char cc; /* completion code */
303
};
304
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
305
306
static struct workqueue_struct *bmc_remove_work_wq;
307
308
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
309
struct ipmi_device_id *id,
310
bool *guid_set, guid_t *guid);
311
312
/*
313
* Various statistics for IPMI, these index stats[] in the ipmi_smi
314
* structure.
315
*/
316
enum ipmi_stat_indexes {
317
/* Commands we got from the user that were invalid. */
318
IPMI_STAT_sent_invalid_commands = 0,
319
320
/* Commands we sent to the MC. */
321
IPMI_STAT_sent_local_commands,
322
323
/* Responses from the MC that were delivered to a user. */
324
IPMI_STAT_handled_local_responses,
325
326
/* Responses from the MC that were not delivered to a user. */
327
IPMI_STAT_unhandled_local_responses,
328
329
/* Commands we sent out to the IPMB bus. */
330
IPMI_STAT_sent_ipmb_commands,
331
332
/* Commands sent on the IPMB that had errors on the SEND CMD */
333
IPMI_STAT_sent_ipmb_command_errs,
334
335
/* Each retransmit increments this count. */
336
IPMI_STAT_retransmitted_ipmb_commands,
337
338
/*
339
* When a message times out (runs out of retransmits) this is
340
* incremented.
341
*/
342
IPMI_STAT_timed_out_ipmb_commands,
343
344
/*
345
* This is like above, but for broadcasts. Broadcasts are
346
* *not* included in the above count (they are expected to
347
* time out).
348
*/
349
IPMI_STAT_timed_out_ipmb_broadcasts,
350
351
/* Responses I have sent to the IPMB bus. */
352
IPMI_STAT_sent_ipmb_responses,
353
354
/* The response was delivered to the user. */
355
IPMI_STAT_handled_ipmb_responses,
356
357
/* The response had invalid data in it. */
358
IPMI_STAT_invalid_ipmb_responses,
359
360
/* The response didn't have anyone waiting for it. */
361
IPMI_STAT_unhandled_ipmb_responses,
362
363
/* Commands we sent out to the IPMB bus. */
364
IPMI_STAT_sent_lan_commands,
365
366
/* Commands sent on the IPMB that had errors on the SEND CMD */
367
IPMI_STAT_sent_lan_command_errs,
368
369
/* Each retransmit increments this count. */
370
IPMI_STAT_retransmitted_lan_commands,
371
372
/*
373
* When a message times out (runs out of retransmits) this is
374
* incremented.
375
*/
376
IPMI_STAT_timed_out_lan_commands,
377
378
/* Responses I have sent to the IPMB bus. */
379
IPMI_STAT_sent_lan_responses,
380
381
/* The response was delivered to the user. */
382
IPMI_STAT_handled_lan_responses,
383
384
/* The response had invalid data in it. */
385
IPMI_STAT_invalid_lan_responses,
386
387
/* The response didn't have anyone waiting for it. */
388
IPMI_STAT_unhandled_lan_responses,
389
390
/* The command was delivered to the user. */
391
IPMI_STAT_handled_commands,
392
393
/* The command had invalid data in it. */
394
IPMI_STAT_invalid_commands,
395
396
/* The command didn't have anyone waiting for it. */
397
IPMI_STAT_unhandled_commands,
398
399
/* Invalid data in an event. */
400
IPMI_STAT_invalid_events,
401
402
/* Events that were received with the proper format. */
403
IPMI_STAT_events,
404
405
/* Retransmissions on IPMB that failed. */
406
IPMI_STAT_dropped_rexmit_ipmb_commands,
407
408
/* Retransmissions on LAN that failed. */
409
IPMI_STAT_dropped_rexmit_lan_commands,
410
411
/* This *must* remain last, add new values above this. */
412
IPMI_NUM_STATS
413
};
414
415
416
#define IPMI_IPMB_NUM_SEQ 64
417
struct ipmi_smi {
418
struct module *owner;
419
420
/* What interface number are we? */
421
int intf_num;
422
423
struct kref refcount;
424
425
/* Set when the interface is being unregistered. */
426
bool in_shutdown;
427
428
/* Used for a list of interfaces. */
429
struct list_head link;
430
431
/*
432
* The list of upper layers that are using me.
433
*/
434
struct list_head users;
435
struct mutex users_mutex;
436
atomic_t nr_users;
437
struct device_attribute nr_users_devattr;
438
struct device_attribute nr_msgs_devattr;
439
struct device_attribute maintenance_mode_devattr;
440
441
442
/* Used for wake ups at startup. */
443
wait_queue_head_t waitq;
444
445
/*
446
* Prevents the interface from being unregistered when the
447
* interface is used by being looked up through the BMC
448
* structure.
449
*/
450
struct mutex bmc_reg_mutex;
451
452
struct bmc_device tmp_bmc;
453
struct bmc_device *bmc;
454
bool bmc_registered;
455
struct list_head bmc_link;
456
char *my_dev_name;
457
bool in_bmc_register; /* Handle recursive situations. Yuck. */
458
struct work_struct bmc_reg_work;
459
460
const struct ipmi_smi_handlers *handlers;
461
void *send_info;
462
463
/* Driver-model device for the system interface. */
464
struct device *si_dev;
465
466
/*
467
* A table of sequence numbers for this interface. We use the
468
* sequence numbers for IPMB messages that go out of the
469
* interface to match them up with their responses. A routine
470
* is called periodically to time the items in this list.
471
*/
472
struct mutex seq_lock;
473
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
474
int curr_seq;
475
476
/*
477
* Messages queued for deliver to the user.
478
*/
479
struct mutex user_msgs_mutex;
480
struct list_head user_msgs;
481
482
/*
483
* Messages queued for processing. If processing fails (out
484
* of memory for instance), They will stay in here to be
485
* processed later in a periodic timer interrupt. The
486
* workqueue is for handling received messages directly from
487
* the handler.
488
*/
489
spinlock_t waiting_rcv_msgs_lock;
490
struct list_head waiting_rcv_msgs;
491
atomic_t watchdog_pretimeouts_to_deliver;
492
struct work_struct smi_work;
493
494
spinlock_t xmit_msgs_lock;
495
struct list_head xmit_msgs;
496
struct ipmi_smi_msg *curr_msg;
497
struct list_head hp_xmit_msgs;
498
499
/*
500
* The list of command receivers that are registered for commands
501
* on this interface.
502
*/
503
struct mutex cmd_rcvrs_mutex;
504
struct list_head cmd_rcvrs;
505
506
/*
507
* Events that were queues because no one was there to receive
508
* them.
509
*/
510
struct mutex events_mutex; /* For dealing with event stuff. */
511
struct list_head waiting_events;
512
unsigned int waiting_events_count; /* How many events in queue? */
513
char event_msg_printed;
514
515
/* How many users are waiting for events? */
516
atomic_t event_waiters;
517
unsigned int ticks_to_req_ev;
518
519
spinlock_t watch_lock; /* For dealing with watch stuff below. */
520
521
/* How many users are waiting for commands? */
522
unsigned int command_waiters;
523
524
/* How many users are waiting for watchdogs? */
525
unsigned int watchdog_waiters;
526
527
/* How many users are waiting for message responses? */
528
unsigned int response_waiters;
529
530
/*
531
* Tells what the lower layer has last been asked to watch for,
532
* messages and/or watchdogs. Protected by watch_lock.
533
*/
534
unsigned int last_watch_mask;
535
536
/*
537
* The event receiver for my BMC, only really used at panic
538
* shutdown as a place to store this.
539
*/
540
unsigned char event_receiver;
541
unsigned char event_receiver_lun;
542
unsigned char local_sel_device;
543
unsigned char local_event_generator;
544
545
/* For handling of maintenance mode. */
546
int maintenance_mode;
547
548
#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
549
#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
550
#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
551
int maintenance_mode_state;
552
int auto_maintenance_timeout;
553
spinlock_t maintenance_mode_lock; /* Used in a timer... */
554
555
/*
556
* If we are doing maintenance on something on IPMB, extend
557
* the timeout time to avoid timeouts writing firmware and
558
* such.
559
*/
560
int ipmb_maintenance_mode_timeout;
561
562
/*
563
* A cheap hack, if this is non-null and a message to an
564
* interface comes in with a NULL user, call this routine with
565
* it. Note that the message will still be freed by the
566
* caller. This only works on the system interface.
567
*
568
* Protected by bmc_reg_mutex.
569
*/
570
void (*null_user_handler)(struct ipmi_smi *intf,
571
struct ipmi_recv_msg *msg);
572
573
/*
574
* When we are scanning the channels for an SMI, this will
575
* tell which channel we are scanning.
576
*/
577
int curr_channel;
578
579
/* Channel information */
580
struct ipmi_channel_set *channel_list;
581
unsigned int curr_working_cset; /* First index into the following. */
582
struct ipmi_channel_set wchannels[2];
583
struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
584
bool channels_ready;
585
586
atomic_t stats[IPMI_NUM_STATS];
587
588
/*
589
* run_to_completion duplicate of smb_info, smi_info
590
* and ipmi_serial_info structures. Used to decrease numbers of
591
* parameters passed by "low" level IPMI code.
592
*/
593
int run_to_completion;
594
};
595
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
596
597
static void __get_guid(struct ipmi_smi *intf);
598
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
599
static int __ipmi_bmc_register(struct ipmi_smi *intf,
600
struct ipmi_device_id *id,
601
bool guid_set, guid_t *guid, int intf_num);
602
static int __scan_channels(struct ipmi_smi *intf,
603
struct ipmi_device_id *id, bool rescan);
604
605
static void free_ipmi_user(struct kref *ref)
606
{
607
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
608
struct module *owner;
609
610
owner = user->intf->owner;
611
kref_put(&user->intf->refcount, intf_free);
612
module_put(owner);
613
vfree(user);
614
}
615
616
static void release_ipmi_user(struct ipmi_user *user)
617
{
618
kref_put(&user->refcount, free_ipmi_user);
619
}
620
621
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
622
{
623
if (!kref_get_unless_zero(&user->refcount))
624
return NULL;
625
return user;
626
}
627
628
/*
629
* The driver model view of the IPMI messaging driver.
630
*/
631
static struct platform_driver ipmidriver = {
632
.driver = {
633
.name = "ipmi",
634
.bus = &platform_bus_type
635
}
636
};
637
/*
638
* This mutex keeps us from adding the same BMC twice.
639
*/
640
static DEFINE_MUTEX(ipmidriver_mutex);
641
642
static LIST_HEAD(ipmi_interfaces);
643
static DEFINE_MUTEX(ipmi_interfaces_mutex);
644
645
/*
646
* List of watchers that want to know when smi's are added and deleted.
647
*/
648
static LIST_HEAD(smi_watchers);
649
static DEFINE_MUTEX(smi_watchers_mutex);
650
651
#define ipmi_inc_stat(intf, stat) \
652
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
653
#define ipmi_get_stat(intf, stat) \
654
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
655
656
static const char * const addr_src_to_str[] = {
657
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
658
"device-tree", "platform"
659
};
660
661
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
662
{
663
if (src >= SI_LAST)
664
src = 0; /* Invalid */
665
return addr_src_to_str[src];
666
}
667
EXPORT_SYMBOL(ipmi_addr_src_to_str);
668
669
static int is_lan_addr(struct ipmi_addr *addr)
670
{
671
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
672
}
673
674
static int is_ipmb_addr(struct ipmi_addr *addr)
675
{
676
return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
677
}
678
679
static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
680
{
681
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
682
}
683
684
static int is_ipmb_direct_addr(struct ipmi_addr *addr)
685
{
686
return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
687
}
688
689
static void free_recv_msg_list(struct list_head *q)
690
{
691
struct ipmi_recv_msg *msg, *msg2;
692
693
list_for_each_entry_safe(msg, msg2, q, link) {
694
list_del(&msg->link);
695
ipmi_free_recv_msg(msg);
696
}
697
}
698
699
static void free_smi_msg_list(struct list_head *q)
700
{
701
struct ipmi_smi_msg *msg, *msg2;
702
703
list_for_each_entry_safe(msg, msg2, q, link) {
704
list_del(&msg->link);
705
ipmi_free_smi_msg(msg);
706
}
707
}
708
709
static void intf_free(struct kref *ref)
710
{
711
struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
712
int i;
713
struct cmd_rcvr *rcvr, *rcvr2;
714
715
free_smi_msg_list(&intf->waiting_rcv_msgs);
716
free_recv_msg_list(&intf->waiting_events);
717
718
/*
719
* Wholesale remove all the entries from the list in the
720
* interface. No need for locks, this is single-threaded.
721
*/
722
list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
723
kfree(rcvr);
724
725
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
726
if ((intf->seq_table[i].inuse)
727
&& (intf->seq_table[i].recv_msg))
728
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
729
}
730
731
kfree(intf);
732
}
733
734
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
735
{
736
struct ipmi_smi *intf;
737
unsigned int count = 0, i;
738
int *interfaces = NULL;
739
struct device **devices = NULL;
740
int rv = 0;
741
742
/*
743
* Make sure the driver is actually initialized, this handles
744
* problems with initialization order.
745
*/
746
rv = ipmi_init_msghandler();
747
if (rv)
748
return rv;
749
750
mutex_lock(&smi_watchers_mutex);
751
752
list_add(&watcher->link, &smi_watchers);
753
754
/*
755
* Build an array of ipmi interfaces and fill it in, and
756
* another array of the devices. We can't call the callback
757
* with ipmi_interfaces_mutex held. smi_watchers_mutex will
758
* keep things in order for the user.
759
*/
760
mutex_lock(&ipmi_interfaces_mutex);
761
list_for_each_entry(intf, &ipmi_interfaces, link)
762
count++;
763
if (count > 0) {
764
interfaces = kmalloc_array(count, sizeof(*interfaces),
765
GFP_KERNEL);
766
if (!interfaces) {
767
rv = -ENOMEM;
768
} else {
769
devices = kmalloc_array(count, sizeof(*devices),
770
GFP_KERNEL);
771
if (!devices) {
772
kfree(interfaces);
773
interfaces = NULL;
774
rv = -ENOMEM;
775
}
776
}
777
count = 0;
778
}
779
if (interfaces) {
780
list_for_each_entry(intf, &ipmi_interfaces, link) {
781
int intf_num = READ_ONCE(intf->intf_num);
782
783
if (intf_num == -1)
784
continue;
785
devices[count] = intf->si_dev;
786
interfaces[count++] = intf_num;
787
}
788
}
789
mutex_unlock(&ipmi_interfaces_mutex);
790
791
if (interfaces) {
792
for (i = 0; i < count; i++)
793
watcher->new_smi(interfaces[i], devices[i]);
794
kfree(interfaces);
795
kfree(devices);
796
}
797
798
mutex_unlock(&smi_watchers_mutex);
799
800
return rv;
801
}
802
EXPORT_SYMBOL(ipmi_smi_watcher_register);
803
804
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
805
{
806
mutex_lock(&smi_watchers_mutex);
807
list_del(&watcher->link);
808
mutex_unlock(&smi_watchers_mutex);
809
return 0;
810
}
811
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
812
813
static void
814
call_smi_watchers(int i, struct device *dev)
815
{
816
struct ipmi_smi_watcher *w;
817
818
list_for_each_entry(w, &smi_watchers, link) {
819
if (try_module_get(w->owner)) {
820
w->new_smi(i, dev);
821
module_put(w->owner);
822
}
823
}
824
}
825
826
static int
827
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
828
{
829
if (addr1->addr_type != addr2->addr_type)
830
return 0;
831
832
if (addr1->channel != addr2->channel)
833
return 0;
834
835
if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
836
struct ipmi_system_interface_addr *smi_addr1
837
= (struct ipmi_system_interface_addr *) addr1;
838
struct ipmi_system_interface_addr *smi_addr2
839
= (struct ipmi_system_interface_addr *) addr2;
840
return (smi_addr1->lun == smi_addr2->lun);
841
}
842
843
if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
844
struct ipmi_ipmb_addr *ipmb_addr1
845
= (struct ipmi_ipmb_addr *) addr1;
846
struct ipmi_ipmb_addr *ipmb_addr2
847
= (struct ipmi_ipmb_addr *) addr2;
848
849
return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
850
&& (ipmb_addr1->lun == ipmb_addr2->lun));
851
}
852
853
if (is_ipmb_direct_addr(addr1)) {
854
struct ipmi_ipmb_direct_addr *daddr1
855
= (struct ipmi_ipmb_direct_addr *) addr1;
856
struct ipmi_ipmb_direct_addr *daddr2
857
= (struct ipmi_ipmb_direct_addr *) addr2;
858
859
return daddr1->slave_addr == daddr2->slave_addr &&
860
daddr1->rq_lun == daddr2->rq_lun &&
861
daddr1->rs_lun == daddr2->rs_lun;
862
}
863
864
if (is_lan_addr(addr1)) {
865
struct ipmi_lan_addr *lan_addr1
866
= (struct ipmi_lan_addr *) addr1;
867
struct ipmi_lan_addr *lan_addr2
868
= (struct ipmi_lan_addr *) addr2;
869
870
return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
871
&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
872
&& (lan_addr1->session_handle
873
== lan_addr2->session_handle)
874
&& (lan_addr1->lun == lan_addr2->lun));
875
}
876
877
return 1;
878
}
879
880
int ipmi_validate_addr(struct ipmi_addr *addr, int len)
881
{
882
if (len < sizeof(struct ipmi_system_interface_addr))
883
return -EINVAL;
884
885
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
886
if (addr->channel != IPMI_BMC_CHANNEL)
887
return -EINVAL;
888
return 0;
889
}
890
891
if ((addr->channel == IPMI_BMC_CHANNEL)
892
|| (addr->channel >= IPMI_MAX_CHANNELS)
893
|| (addr->channel < 0))
894
return -EINVAL;
895
896
if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
897
if (len < sizeof(struct ipmi_ipmb_addr))
898
return -EINVAL;
899
return 0;
900
}
901
902
if (is_ipmb_direct_addr(addr)) {
903
struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
904
905
if (addr->channel != 0)
906
return -EINVAL;
907
if (len < sizeof(struct ipmi_ipmb_direct_addr))
908
return -EINVAL;
909
910
if (daddr->slave_addr & 0x01)
911
return -EINVAL;
912
if (daddr->rq_lun >= 4)
913
return -EINVAL;
914
if (daddr->rs_lun >= 4)
915
return -EINVAL;
916
return 0;
917
}
918
919
if (is_lan_addr(addr)) {
920
if (len < sizeof(struct ipmi_lan_addr))
921
return -EINVAL;
922
return 0;
923
}
924
925
return -EINVAL;
926
}
927
EXPORT_SYMBOL(ipmi_validate_addr);
928
929
unsigned int ipmi_addr_length(int addr_type)
930
{
931
if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
932
return sizeof(struct ipmi_system_interface_addr);
933
934
if ((addr_type == IPMI_IPMB_ADDR_TYPE)
935
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
936
return sizeof(struct ipmi_ipmb_addr);
937
938
if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
939
return sizeof(struct ipmi_ipmb_direct_addr);
940
941
if (addr_type == IPMI_LAN_ADDR_TYPE)
942
return sizeof(struct ipmi_lan_addr);
943
944
return 0;
945
}
946
EXPORT_SYMBOL(ipmi_addr_length);
947
948
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
949
{
950
int rv = 0;
951
952
if (!msg->user) {
953
/* Special handling for NULL users. */
954
if (intf->null_user_handler) {
955
intf->null_user_handler(intf, msg);
956
} else {
957
/* No handler, so give up. */
958
rv = -EINVAL;
959
}
960
ipmi_free_recv_msg(msg);
961
} else if (oops_in_progress) {
962
/*
963
* If we are running in the panic context, calling the
964
* receive handler doesn't much meaning and has a deadlock
965
* risk. At this moment, simply skip it in that case.
966
*/
967
ipmi_free_recv_msg(msg);
968
} else {
969
/*
970
* Deliver it in smi_work. The message will hold a
971
* refcount to the user.
972
*/
973
mutex_lock(&intf->user_msgs_mutex);
974
list_add_tail(&msg->link, &intf->user_msgs);
975
mutex_unlock(&intf->user_msgs_mutex);
976
queue_work(system_wq, &intf->smi_work);
977
}
978
979
return rv;
980
}
981
982
static void deliver_local_response(struct ipmi_smi *intf,
983
struct ipmi_recv_msg *msg)
984
{
985
if (deliver_response(intf, msg))
986
ipmi_inc_stat(intf, unhandled_local_responses);
987
else
988
ipmi_inc_stat(intf, handled_local_responses);
989
}
990
991
static void deliver_err_response(struct ipmi_smi *intf,
992
struct ipmi_recv_msg *msg, int err)
993
{
994
msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
995
msg->msg_data[0] = err;
996
msg->msg.netfn |= 1; /* Convert to a response. */
997
msg->msg.data_len = 1;
998
msg->msg.data = msg->msg_data;
999
deliver_local_response(intf, msg);
1000
}
1001
1002
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
1003
{
1004
unsigned long iflags;
1005
1006
if (!intf->handlers->set_need_watch)
1007
return;
1008
1009
spin_lock_irqsave(&intf->watch_lock, iflags);
1010
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1011
intf->response_waiters++;
1012
1013
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1014
intf->watchdog_waiters++;
1015
1016
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1017
intf->command_waiters++;
1018
1019
if ((intf->last_watch_mask & flags) != flags) {
1020
intf->last_watch_mask |= flags;
1021
intf->handlers->set_need_watch(intf->send_info,
1022
intf->last_watch_mask);
1023
}
1024
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1025
}
1026
1027
static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1028
{
1029
unsigned long iflags;
1030
1031
if (!intf->handlers->set_need_watch)
1032
return;
1033
1034
spin_lock_irqsave(&intf->watch_lock, iflags);
1035
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1036
intf->response_waiters--;
1037
1038
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1039
intf->watchdog_waiters--;
1040
1041
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1042
intf->command_waiters--;
1043
1044
flags = 0;
1045
if (intf->response_waiters)
1046
flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1047
if (intf->watchdog_waiters)
1048
flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1049
if (intf->command_waiters)
1050
flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1051
1052
if (intf->last_watch_mask != flags) {
1053
intf->last_watch_mask = flags;
1054
intf->handlers->set_need_watch(intf->send_info,
1055
intf->last_watch_mask);
1056
}
1057
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1058
}
1059
1060
/*
1061
* Find the next sequence number not being used and add the given
1062
* message with the given timeout to the sequence table. This must be
1063
* called with the interface's seq_lock held.
1064
*/
1065
static int intf_next_seq(struct ipmi_smi *intf,
1066
struct ipmi_recv_msg *recv_msg,
1067
unsigned long timeout,
1068
int retries,
1069
int broadcast,
1070
unsigned char *seq,
1071
long *seqid)
1072
{
1073
int rv = 0;
1074
unsigned int i;
1075
1076
if (timeout == 0)
1077
timeout = default_retry_ms;
1078
if (retries < 0)
1079
retries = default_max_retries;
1080
1081
for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1082
i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1083
if (!intf->seq_table[i].inuse)
1084
break;
1085
}
1086
1087
if (!intf->seq_table[i].inuse) {
1088
intf->seq_table[i].recv_msg = recv_msg;
1089
1090
/*
1091
* Start with the maximum timeout, when the send response
1092
* comes in we will start the real timer.
1093
*/
1094
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1095
intf->seq_table[i].orig_timeout = timeout;
1096
intf->seq_table[i].retries_left = retries;
1097
intf->seq_table[i].broadcast = broadcast;
1098
intf->seq_table[i].inuse = 1;
1099
intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1100
*seq = i;
1101
*seqid = intf->seq_table[i].seqid;
1102
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1103
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1104
need_waiter(intf);
1105
} else {
1106
rv = -EAGAIN;
1107
}
1108
1109
return rv;
1110
}
1111
1112
/*
1113
* Return the receive message for the given sequence number and
1114
* release the sequence number so it can be reused. Some other data
1115
* is passed in to be sure the message matches up correctly (to help
1116
* guard against message coming in after their timeout and the
1117
* sequence number being reused).
1118
*/
1119
static int intf_find_seq(struct ipmi_smi *intf,
1120
unsigned char seq,
1121
short channel,
1122
unsigned char cmd,
1123
unsigned char netfn,
1124
struct ipmi_addr *addr,
1125
struct ipmi_recv_msg **recv_msg)
1126
{
1127
int rv = -ENODEV;
1128
1129
if (seq >= IPMI_IPMB_NUM_SEQ)
1130
return -EINVAL;
1131
1132
mutex_lock(&intf->seq_lock);
1133
if (intf->seq_table[seq].inuse) {
1134
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1135
1136
if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1137
&& (msg->msg.netfn == netfn)
1138
&& (ipmi_addr_equal(addr, &msg->addr))) {
1139
*recv_msg = msg;
1140
intf->seq_table[seq].inuse = 0;
1141
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1142
rv = 0;
1143
}
1144
}
1145
mutex_unlock(&intf->seq_lock);
1146
1147
return rv;
1148
}
1149
1150
1151
/* Start the timer for a specific sequence table entry. */
1152
static int intf_start_seq_timer(struct ipmi_smi *intf,
1153
long msgid)
1154
{
1155
int rv = -ENODEV;
1156
unsigned char seq;
1157
unsigned long seqid;
1158
1159
1160
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1161
1162
mutex_lock(&intf->seq_lock);
1163
/*
1164
* We do this verification because the user can be deleted
1165
* while a message is outstanding.
1166
*/
1167
if ((intf->seq_table[seq].inuse)
1168
&& (intf->seq_table[seq].seqid == seqid)) {
1169
struct seq_table *ent = &intf->seq_table[seq];
1170
ent->timeout = ent->orig_timeout;
1171
rv = 0;
1172
}
1173
mutex_unlock(&intf->seq_lock);
1174
1175
return rv;
1176
}
1177
1178
/* Got an error for the send message for a specific sequence number. */
1179
static int intf_err_seq(struct ipmi_smi *intf,
1180
long msgid,
1181
unsigned int err)
1182
{
1183
int rv = -ENODEV;
1184
unsigned char seq;
1185
unsigned long seqid;
1186
struct ipmi_recv_msg *msg = NULL;
1187
1188
1189
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1190
1191
mutex_lock(&intf->seq_lock);
1192
/*
1193
* We do this verification because the user can be deleted
1194
* while a message is outstanding.
1195
*/
1196
if ((intf->seq_table[seq].inuse)
1197
&& (intf->seq_table[seq].seqid == seqid)) {
1198
struct seq_table *ent = &intf->seq_table[seq];
1199
1200
ent->inuse = 0;
1201
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1202
msg = ent->recv_msg;
1203
rv = 0;
1204
}
1205
mutex_unlock(&intf->seq_lock);
1206
1207
if (msg)
1208
deliver_err_response(intf, msg, err);
1209
1210
return rv;
1211
}
1212
1213
int ipmi_create_user(unsigned int if_num,
1214
const struct ipmi_user_hndl *handler,
1215
void *handler_data,
1216
struct ipmi_user **user)
1217
{
1218
struct ipmi_user *new_user = NULL;
1219
int rv = 0;
1220
struct ipmi_smi *intf;
1221
1222
/*
1223
* There is no module usecount here, because it's not
1224
* required. Since this can only be used by and called from
1225
* other modules, they will implicitly use this module, and
1226
* thus this can't be removed unless the other modules are
1227
* removed.
1228
*/
1229
1230
if (handler == NULL)
1231
return -EINVAL;
1232
1233
/*
1234
* Make sure the driver is actually initialized, this handles
1235
* problems with initialization order.
1236
*/
1237
rv = ipmi_init_msghandler();
1238
if (rv)
1239
return rv;
1240
1241
mutex_lock(&ipmi_interfaces_mutex);
1242
list_for_each_entry(intf, &ipmi_interfaces, link) {
1243
if (intf->intf_num == if_num)
1244
goto found;
1245
}
1246
/* Not found, return an error */
1247
rv = -EINVAL;
1248
goto out_unlock;
1249
1250
found:
1251
if (intf->in_shutdown) {
1252
rv = -ENODEV;
1253
goto out_unlock;
1254
}
1255
1256
if (atomic_add_return(1, &intf->nr_users) > max_users) {
1257
rv = -EBUSY;
1258
goto out_kfree;
1259
}
1260
1261
new_user = vzalloc(sizeof(*new_user));
1262
if (!new_user) {
1263
rv = -ENOMEM;
1264
goto out_kfree;
1265
}
1266
1267
if (!try_module_get(intf->owner)) {
1268
rv = -ENODEV;
1269
goto out_kfree;
1270
}
1271
1272
/* Note that each existing user holds a refcount to the interface. */
1273
kref_get(&intf->refcount);
1274
1275
atomic_set(&new_user->nr_msgs, 0);
1276
kref_init(&new_user->refcount);
1277
refcount_set(&new_user->destroyed, 1);
1278
kref_get(&new_user->refcount); /* Destroy owns a refcount. */
1279
new_user->handler = handler;
1280
new_user->handler_data = handler_data;
1281
new_user->intf = intf;
1282
new_user->gets_events = false;
1283
1284
mutex_lock(&intf->users_mutex);
1285
mutex_lock(&intf->seq_lock);
1286
list_add(&new_user->link, &intf->users);
1287
mutex_unlock(&intf->seq_lock);
1288
mutex_unlock(&intf->users_mutex);
1289
1290
if (handler->ipmi_watchdog_pretimeout)
1291
/* User wants pretimeouts, so make sure to watch for them. */
1292
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1293
1294
out_kfree:
1295
if (rv) {
1296
atomic_dec(&intf->nr_users);
1297
vfree(new_user);
1298
} else {
1299
*user = new_user;
1300
}
1301
out_unlock:
1302
mutex_unlock(&ipmi_interfaces_mutex);
1303
return rv;
1304
}
1305
EXPORT_SYMBOL(ipmi_create_user);
1306
1307
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1308
{
1309
int rv = -EINVAL;
1310
struct ipmi_smi *intf;
1311
1312
mutex_lock(&ipmi_interfaces_mutex);
1313
list_for_each_entry(intf, &ipmi_interfaces, link) {
1314
if (intf->intf_num == if_num) {
1315
if (!intf->handlers->get_smi_info)
1316
rv = -ENOTTY;
1317
else
1318
rv = intf->handlers->get_smi_info(intf->send_info, data);
1319
break;
1320
}
1321
}
1322
mutex_unlock(&ipmi_interfaces_mutex);
1323
1324
return rv;
1325
}
1326
EXPORT_SYMBOL(ipmi_get_smi_info);
1327
1328
/* Must be called with intf->users_mutex held. */
1329
static void _ipmi_destroy_user(struct ipmi_user *user)
1330
{
1331
struct ipmi_smi *intf = user->intf;
1332
int i;
1333
struct cmd_rcvr *rcvr;
1334
struct cmd_rcvr *rcvrs = NULL;
1335
struct ipmi_recv_msg *msg, *msg2;
1336
1337
if (!refcount_dec_if_one(&user->destroyed))
1338
return;
1339
1340
if (user->handler->shutdown)
1341
user->handler->shutdown(user->handler_data);
1342
1343
if (user->handler->ipmi_watchdog_pretimeout)
1344
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1345
1346
if (user->gets_events)
1347
atomic_dec(&intf->event_waiters);
1348
1349
/* Remove the user from the interface's list and sequence table. */
1350
list_del(&user->link);
1351
atomic_dec(&intf->nr_users);
1352
1353
mutex_lock(&intf->seq_lock);
1354
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1355
if (intf->seq_table[i].inuse
1356
&& (intf->seq_table[i].recv_msg->user == user)) {
1357
intf->seq_table[i].inuse = 0;
1358
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1359
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1360
}
1361
}
1362
mutex_unlock(&intf->seq_lock);
1363
1364
/*
1365
* Remove the user from the command receiver's table. First
1366
* we build a list of everything (not using the standard link,
1367
* since other things may be using it till we do
1368
* synchronize_rcu()) then free everything in that list.
1369
*/
1370
mutex_lock(&intf->cmd_rcvrs_mutex);
1371
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1372
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1373
if (rcvr->user == user) {
1374
list_del_rcu(&rcvr->link);
1375
rcvr->next = rcvrs;
1376
rcvrs = rcvr;
1377
}
1378
}
1379
mutex_unlock(&intf->cmd_rcvrs_mutex);
1380
while (rcvrs) {
1381
rcvr = rcvrs;
1382
rcvrs = rcvr->next;
1383
kfree(rcvr);
1384
}
1385
1386
mutex_lock(&intf->user_msgs_mutex);
1387
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
1388
if (msg->user != user)
1389
continue;
1390
list_del(&msg->link);
1391
ipmi_free_recv_msg(msg);
1392
}
1393
mutex_unlock(&intf->user_msgs_mutex);
1394
1395
release_ipmi_user(user);
1396
}
1397
1398
void ipmi_destroy_user(struct ipmi_user *user)
1399
{
1400
struct ipmi_smi *intf = user->intf;
1401
1402
mutex_lock(&intf->users_mutex);
1403
_ipmi_destroy_user(user);
1404
mutex_unlock(&intf->users_mutex);
1405
1406
kref_put(&user->refcount, free_ipmi_user);
1407
}
1408
EXPORT_SYMBOL(ipmi_destroy_user);
1409
1410
int ipmi_get_version(struct ipmi_user *user,
1411
unsigned char *major,
1412
unsigned char *minor)
1413
{
1414
struct ipmi_device_id id;
1415
int rv;
1416
1417
user = acquire_ipmi_user(user);
1418
if (!user)
1419
return -ENODEV;
1420
1421
rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1422
if (!rv) {
1423
*major = ipmi_version_major(&id);
1424
*minor = ipmi_version_minor(&id);
1425
}
1426
release_ipmi_user(user);
1427
1428
return rv;
1429
}
1430
EXPORT_SYMBOL(ipmi_get_version);
1431
1432
int ipmi_set_my_address(struct ipmi_user *user,
1433
unsigned int channel,
1434
unsigned char address)
1435
{
1436
int rv = 0;
1437
1438
user = acquire_ipmi_user(user);
1439
if (!user)
1440
return -ENODEV;
1441
1442
if (channel >= IPMI_MAX_CHANNELS) {
1443
rv = -EINVAL;
1444
} else {
1445
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1446
user->intf->addrinfo[channel].address = address;
1447
}
1448
release_ipmi_user(user);
1449
1450
return rv;
1451
}
1452
EXPORT_SYMBOL(ipmi_set_my_address);
1453
1454
int ipmi_get_my_address(struct ipmi_user *user,
1455
unsigned int channel,
1456
unsigned char *address)
1457
{
1458
int rv = 0;
1459
1460
user = acquire_ipmi_user(user);
1461
if (!user)
1462
return -ENODEV;
1463
1464
if (channel >= IPMI_MAX_CHANNELS) {
1465
rv = -EINVAL;
1466
} else {
1467
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1468
*address = user->intf->addrinfo[channel].address;
1469
}
1470
release_ipmi_user(user);
1471
1472
return rv;
1473
}
1474
EXPORT_SYMBOL(ipmi_get_my_address);
1475
1476
int ipmi_set_my_LUN(struct ipmi_user *user,
1477
unsigned int channel,
1478
unsigned char LUN)
1479
{
1480
int rv = 0;
1481
1482
user = acquire_ipmi_user(user);
1483
if (!user)
1484
return -ENODEV;
1485
1486
if (channel >= IPMI_MAX_CHANNELS) {
1487
rv = -EINVAL;
1488
} else {
1489
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1490
user->intf->addrinfo[channel].lun = LUN & 0x3;
1491
}
1492
release_ipmi_user(user);
1493
1494
return rv;
1495
}
1496
EXPORT_SYMBOL(ipmi_set_my_LUN);
1497
1498
int ipmi_get_my_LUN(struct ipmi_user *user,
1499
unsigned int channel,
1500
unsigned char *address)
1501
{
1502
int rv = 0;
1503
1504
user = acquire_ipmi_user(user);
1505
if (!user)
1506
return -ENODEV;
1507
1508
if (channel >= IPMI_MAX_CHANNELS) {
1509
rv = -EINVAL;
1510
} else {
1511
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1512
*address = user->intf->addrinfo[channel].lun;
1513
}
1514
release_ipmi_user(user);
1515
1516
return rv;
1517
}
1518
EXPORT_SYMBOL(ipmi_get_my_LUN);
1519
1520
int ipmi_get_maintenance_mode(struct ipmi_user *user)
1521
{
1522
int mode;
1523
unsigned long flags;
1524
1525
user = acquire_ipmi_user(user);
1526
if (!user)
1527
return -ENODEV;
1528
1529
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1530
mode = user->intf->maintenance_mode;
1531
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1532
release_ipmi_user(user);
1533
1534
return mode;
1535
}
1536
EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1537
1538
static void maintenance_mode_update(struct ipmi_smi *intf)
1539
{
1540
if (intf->handlers->set_maintenance_mode)
1541
/*
1542
* Lower level drivers only care about firmware mode
1543
* as it affects their timing. They don't care about
1544
* reset, which disables all commands for a while.
1545
*/
1546
intf->handlers->set_maintenance_mode(
1547
intf->send_info,
1548
(intf->maintenance_mode_state ==
1549
IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
1550
}
1551
1552
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1553
{
1554
int rv = 0;
1555
unsigned long flags;
1556
struct ipmi_smi *intf = user->intf;
1557
1558
user = acquire_ipmi_user(user);
1559
if (!user)
1560
return -ENODEV;
1561
1562
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1563
if (intf->maintenance_mode != mode) {
1564
switch (mode) {
1565
case IPMI_MAINTENANCE_MODE_AUTO:
1566
/* Just leave it alone. */
1567
break;
1568
1569
case IPMI_MAINTENANCE_MODE_OFF:
1570
intf->maintenance_mode_state =
1571
IPMI_MAINTENANCE_MODE_STATE_OFF;
1572
break;
1573
1574
case IPMI_MAINTENANCE_MODE_ON:
1575
intf->maintenance_mode_state =
1576
IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
1577
break;
1578
1579
default:
1580
rv = -EINVAL;
1581
goto out_unlock;
1582
}
1583
intf->maintenance_mode = mode;
1584
1585
maintenance_mode_update(intf);
1586
}
1587
out_unlock:
1588
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1589
release_ipmi_user(user);
1590
1591
return rv;
1592
}
1593
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1594
1595
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1596
{
1597
struct ipmi_smi *intf = user->intf;
1598
struct ipmi_recv_msg *msg, *msg2;
1599
struct list_head msgs;
1600
1601
user = acquire_ipmi_user(user);
1602
if (!user)
1603
return -ENODEV;
1604
1605
INIT_LIST_HEAD(&msgs);
1606
1607
mutex_lock(&intf->events_mutex);
1608
if (user->gets_events == val)
1609
goto out;
1610
1611
user->gets_events = val;
1612
1613
if (val) {
1614
if (atomic_inc_return(&intf->event_waiters) == 1)
1615
need_waiter(intf);
1616
} else {
1617
atomic_dec(&intf->event_waiters);
1618
}
1619
1620
/* Deliver any queued events. */
1621
while (user->gets_events && !list_empty(&intf->waiting_events)) {
1622
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1623
list_move_tail(&msg->link, &msgs);
1624
intf->waiting_events_count = 0;
1625
if (intf->event_msg_printed) {
1626
dev_warn(intf->si_dev, "Event queue no longer full\n");
1627
intf->event_msg_printed = 0;
1628
}
1629
1630
list_for_each_entry_safe(msg, msg2, &msgs, link) {
1631
ipmi_set_recv_msg_user(msg, user);
1632
deliver_local_response(intf, msg);
1633
}
1634
}
1635
1636
out:
1637
mutex_unlock(&intf->events_mutex);
1638
release_ipmi_user(user);
1639
1640
return 0;
1641
}
1642
EXPORT_SYMBOL(ipmi_set_gets_events);
1643
1644
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1645
unsigned char netfn,
1646
unsigned char cmd,
1647
unsigned char chan)
1648
{
1649
struct cmd_rcvr *rcvr;
1650
1651
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1652
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1653
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1654
&& (rcvr->chans & (1 << chan)))
1655
return rcvr;
1656
}
1657
return NULL;
1658
}
1659
1660
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1661
unsigned char netfn,
1662
unsigned char cmd,
1663
unsigned int chans)
1664
{
1665
struct cmd_rcvr *rcvr;
1666
1667
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1668
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1669
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1670
&& (rcvr->chans & chans))
1671
return 0;
1672
}
1673
return 1;
1674
}
1675
1676
int ipmi_register_for_cmd(struct ipmi_user *user,
1677
unsigned char netfn,
1678
unsigned char cmd,
1679
unsigned int chans)
1680
{
1681
struct ipmi_smi *intf = user->intf;
1682
struct cmd_rcvr *rcvr;
1683
int rv = 0;
1684
1685
user = acquire_ipmi_user(user);
1686
if (!user)
1687
return -ENODEV;
1688
1689
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1690
if (!rcvr) {
1691
rv = -ENOMEM;
1692
goto out_release;
1693
}
1694
rcvr->cmd = cmd;
1695
rcvr->netfn = netfn;
1696
rcvr->chans = chans;
1697
rcvr->user = user;
1698
1699
mutex_lock(&intf->cmd_rcvrs_mutex);
1700
/* Make sure the command/netfn is not already registered. */
1701
if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1702
rv = -EBUSY;
1703
goto out_unlock;
1704
}
1705
1706
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1707
1708
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1709
1710
out_unlock:
1711
mutex_unlock(&intf->cmd_rcvrs_mutex);
1712
if (rv)
1713
kfree(rcvr);
1714
out_release:
1715
release_ipmi_user(user);
1716
1717
return rv;
1718
}
1719
EXPORT_SYMBOL(ipmi_register_for_cmd);
1720
1721
int ipmi_unregister_for_cmd(struct ipmi_user *user,
1722
unsigned char netfn,
1723
unsigned char cmd,
1724
unsigned int chans)
1725
{
1726
struct ipmi_smi *intf = user->intf;
1727
struct cmd_rcvr *rcvr;
1728
struct cmd_rcvr *rcvrs = NULL;
1729
int i, rv = -ENOENT;
1730
1731
user = acquire_ipmi_user(user);
1732
if (!user)
1733
return -ENODEV;
1734
1735
mutex_lock(&intf->cmd_rcvrs_mutex);
1736
for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1737
if (((1 << i) & chans) == 0)
1738
continue;
1739
rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1740
if (rcvr == NULL)
1741
continue;
1742
if (rcvr->user == user) {
1743
rv = 0;
1744
rcvr->chans &= ~chans;
1745
if (rcvr->chans == 0) {
1746
list_del_rcu(&rcvr->link);
1747
rcvr->next = rcvrs;
1748
rcvrs = rcvr;
1749
}
1750
}
1751
}
1752
mutex_unlock(&intf->cmd_rcvrs_mutex);
1753
synchronize_rcu();
1754
release_ipmi_user(user);
1755
while (rcvrs) {
1756
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1757
rcvr = rcvrs;
1758
rcvrs = rcvr->next;
1759
kfree(rcvr);
1760
}
1761
1762
return rv;
1763
}
1764
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1765
1766
unsigned char
1767
ipmb_checksum(unsigned char *data, int size)
1768
{
1769
unsigned char csum = 0;
1770
1771
for (; size > 0; size--, data++)
1772
csum += *data;
1773
1774
return -csum;
1775
}
1776
EXPORT_SYMBOL(ipmb_checksum);
1777
1778
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1779
struct kernel_ipmi_msg *msg,
1780
struct ipmi_ipmb_addr *ipmb_addr,
1781
long msgid,
1782
unsigned char ipmb_seq,
1783
int broadcast,
1784
unsigned char source_address,
1785
unsigned char source_lun)
1786
{
1787
int i = broadcast;
1788
1789
/* Format the IPMB header data. */
1790
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1791
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1792
smi_msg->data[2] = ipmb_addr->channel;
1793
if (broadcast)
1794
smi_msg->data[3] = 0;
1795
smi_msg->data[i+3] = ipmb_addr->slave_addr;
1796
smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1797
smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1798
smi_msg->data[i+6] = source_address;
1799
smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1800
smi_msg->data[i+8] = msg->cmd;
1801
1802
/* Now tack on the data to the message. */
1803
if (msg->data_len > 0)
1804
memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1805
smi_msg->data_size = msg->data_len + 9;
1806
1807
/* Now calculate the checksum and tack it on. */
1808
smi_msg->data[i+smi_msg->data_size]
1809
= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1810
1811
/*
1812
* Add on the checksum size and the offset from the
1813
* broadcast.
1814
*/
1815
smi_msg->data_size += 1 + i;
1816
1817
smi_msg->msgid = msgid;
1818
}
1819
1820
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1821
struct kernel_ipmi_msg *msg,
1822
struct ipmi_lan_addr *lan_addr,
1823
long msgid,
1824
unsigned char ipmb_seq,
1825
unsigned char source_lun)
1826
{
1827
/* Format the IPMB header data. */
1828
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1829
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1830
smi_msg->data[2] = lan_addr->channel;
1831
smi_msg->data[3] = lan_addr->session_handle;
1832
smi_msg->data[4] = lan_addr->remote_SWID;
1833
smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1834
smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1835
smi_msg->data[7] = lan_addr->local_SWID;
1836
smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1837
smi_msg->data[9] = msg->cmd;
1838
1839
/* Now tack on the data to the message. */
1840
if (msg->data_len > 0)
1841
memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1842
smi_msg->data_size = msg->data_len + 10;
1843
1844
/* Now calculate the checksum and tack it on. */
1845
smi_msg->data[smi_msg->data_size]
1846
= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1847
1848
/*
1849
* Add on the checksum size and the offset from the
1850
* broadcast.
1851
*/
1852
smi_msg->data_size += 1;
1853
1854
smi_msg->msgid = msgid;
1855
}
1856
1857
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1858
struct ipmi_smi_msg *smi_msg,
1859
int priority)
1860
{
1861
if (intf->curr_msg) {
1862
if (priority > 0)
1863
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1864
else
1865
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1866
smi_msg = NULL;
1867
} else {
1868
intf->curr_msg = smi_msg;
1869
}
1870
1871
return smi_msg;
1872
}
1873
1874
static void smi_send(struct ipmi_smi *intf,
1875
const struct ipmi_smi_handlers *handlers,
1876
struct ipmi_smi_msg *smi_msg, int priority)
1877
{
1878
int run_to_completion = READ_ONCE(intf->run_to_completion);
1879
unsigned long flags = 0;
1880
1881
if (!run_to_completion)
1882
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1883
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1884
if (!run_to_completion)
1885
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1886
1887
if (smi_msg)
1888
handlers->sender(intf->send_info, smi_msg);
1889
}
1890
1891
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1892
{
1893
return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1894
&& ((msg->cmd == IPMI_COLD_RESET_CMD)
1895
|| (msg->cmd == IPMI_WARM_RESET_CMD)))
1896
|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1897
}
1898
1899
static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1900
struct ipmi_addr *addr,
1901
long msgid,
1902
struct kernel_ipmi_msg *msg,
1903
struct ipmi_smi_msg *smi_msg,
1904
struct ipmi_recv_msg *recv_msg,
1905
int retries,
1906
unsigned int retry_time_ms)
1907
{
1908
struct ipmi_system_interface_addr *smi_addr;
1909
1910
if (msg->netfn & 1)
1911
/* Responses are not allowed to the SMI. */
1912
return -EINVAL;
1913
1914
smi_addr = (struct ipmi_system_interface_addr *) addr;
1915
if (smi_addr->lun > 3) {
1916
ipmi_inc_stat(intf, sent_invalid_commands);
1917
return -EINVAL;
1918
}
1919
1920
memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1921
1922
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1923
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
1924
|| (msg->cmd == IPMI_GET_MSG_CMD)
1925
|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1926
/*
1927
* We don't let the user do these, since we manage
1928
* the sequence numbers.
1929
*/
1930
ipmi_inc_stat(intf, sent_invalid_commands);
1931
return -EINVAL;
1932
}
1933
1934
if (is_maintenance_mode_cmd(msg)) {
1935
unsigned long flags;
1936
int newst;
1937
1938
if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
1939
newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
1940
else
1941
newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
1942
1943
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1944
intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
1945
if (!intf->maintenance_mode
1946
&& intf->maintenance_mode_state < newst) {
1947
intf->maintenance_mode_state = newst;
1948
maintenance_mode_update(intf);
1949
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
1950
}
1951
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1952
flags);
1953
}
1954
1955
if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1956
ipmi_inc_stat(intf, sent_invalid_commands);
1957
return -EMSGSIZE;
1958
}
1959
1960
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1961
smi_msg->data[1] = msg->cmd;
1962
smi_msg->msgid = msgid;
1963
smi_msg->recv_msg = recv_msg;
1964
if (msg->data_len > 0)
1965
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1966
smi_msg->data_size = msg->data_len + 2;
1967
ipmi_inc_stat(intf, sent_local_commands);
1968
1969
return 0;
1970
}
1971
1972
static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1973
struct ipmi_addr *addr,
1974
long msgid,
1975
struct kernel_ipmi_msg *msg,
1976
struct ipmi_smi_msg *smi_msg,
1977
struct ipmi_recv_msg *recv_msg,
1978
unsigned char source_address,
1979
unsigned char source_lun,
1980
int retries,
1981
unsigned int retry_time_ms)
1982
{
1983
struct ipmi_ipmb_addr *ipmb_addr;
1984
unsigned char ipmb_seq;
1985
long seqid;
1986
int broadcast = 0;
1987
struct ipmi_channel *chans;
1988
int rv = 0;
1989
1990
if (addr->channel >= IPMI_MAX_CHANNELS) {
1991
ipmi_inc_stat(intf, sent_invalid_commands);
1992
return -EINVAL;
1993
}
1994
1995
chans = READ_ONCE(intf->channel_list)->c;
1996
1997
if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1998
ipmi_inc_stat(intf, sent_invalid_commands);
1999
return -EINVAL;
2000
}
2001
2002
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
2003
/*
2004
* Broadcasts add a zero at the beginning of the
2005
* message, but otherwise is the same as an IPMB
2006
* address.
2007
*/
2008
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2009
broadcast = 1;
2010
retries = 0; /* Don't retry broadcasts. */
2011
}
2012
2013
/*
2014
* 9 for the header and 1 for the checksum, plus
2015
* possibly one for the broadcast.
2016
*/
2017
if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
2018
ipmi_inc_stat(intf, sent_invalid_commands);
2019
return -EMSGSIZE;
2020
}
2021
2022
ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2023
if (ipmb_addr->lun > 3) {
2024
ipmi_inc_stat(intf, sent_invalid_commands);
2025
return -EINVAL;
2026
}
2027
2028
memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2029
2030
if (recv_msg->msg.netfn & 0x1) {
2031
/*
2032
* It's a response, so use the user's sequence
2033
* from msgid.
2034
*/
2035
ipmi_inc_stat(intf, sent_ipmb_responses);
2036
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2037
msgid, broadcast,
2038
source_address, source_lun);
2039
2040
/*
2041
* Save the receive message so we can use it
2042
* to deliver the response.
2043
*/
2044
smi_msg->recv_msg = recv_msg;
2045
} else {
2046
mutex_lock(&intf->seq_lock);
2047
2048
if (is_maintenance_mode_cmd(msg))
2049
intf->ipmb_maintenance_mode_timeout =
2050
maintenance_mode_timeout_ms;
2051
2052
if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2053
/* Different default in maintenance mode */
2054
retry_time_ms = default_maintenance_retry_ms;
2055
2056
/*
2057
* Create a sequence number with a 1 second
2058
* timeout and 4 retries.
2059
*/
2060
rv = intf_next_seq(intf,
2061
recv_msg,
2062
retry_time_ms,
2063
retries,
2064
broadcast,
2065
&ipmb_seq,
2066
&seqid);
2067
if (rv)
2068
/*
2069
* We have used up all the sequence numbers,
2070
* probably, so abort.
2071
*/
2072
goto out_err;
2073
2074
ipmi_inc_stat(intf, sent_ipmb_commands);
2075
2076
/*
2077
* Store the sequence number in the message,
2078
* so that when the send message response
2079
* comes back we can start the timer.
2080
*/
2081
format_ipmb_msg(smi_msg, msg, ipmb_addr,
2082
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2083
ipmb_seq, broadcast,
2084
source_address, source_lun);
2085
2086
/*
2087
* Copy the message into the recv message data, so we
2088
* can retransmit it later if necessary.
2089
*/
2090
memcpy(recv_msg->msg_data, smi_msg->data,
2091
smi_msg->data_size);
2092
recv_msg->msg.data = recv_msg->msg_data;
2093
recv_msg->msg.data_len = smi_msg->data_size;
2094
2095
/*
2096
* We don't unlock until here, because we need
2097
* to copy the completed message into the
2098
* recv_msg before we release the lock.
2099
* Otherwise, race conditions may bite us. I
2100
* know that's pretty paranoid, but I prefer
2101
* to be correct.
2102
*/
2103
out_err:
2104
mutex_unlock(&intf->seq_lock);
2105
}
2106
2107
return rv;
2108
}
2109
2110
static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
2111
struct ipmi_addr *addr,
2112
long msgid,
2113
struct kernel_ipmi_msg *msg,
2114
struct ipmi_smi_msg *smi_msg,
2115
struct ipmi_recv_msg *recv_msg,
2116
unsigned char source_lun)
2117
{
2118
struct ipmi_ipmb_direct_addr *daddr;
2119
bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2120
2121
if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2122
return -EAFNOSUPPORT;
2123
2124
/* Responses must have a completion code. */
2125
if (!is_cmd && msg->data_len < 1) {
2126
ipmi_inc_stat(intf, sent_invalid_commands);
2127
return -EINVAL;
2128
}
2129
2130
if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2131
ipmi_inc_stat(intf, sent_invalid_commands);
2132
return -EMSGSIZE;
2133
}
2134
2135
daddr = (struct ipmi_ipmb_direct_addr *) addr;
2136
if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2137
ipmi_inc_stat(intf, sent_invalid_commands);
2138
return -EINVAL;
2139
}
2140
2141
smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2142
smi_msg->msgid = msgid;
2143
2144
if (is_cmd) {
2145
smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2146
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2147
} else {
2148
smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2149
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2150
}
2151
smi_msg->data[1] = daddr->slave_addr;
2152
smi_msg->data[3] = msg->cmd;
2153
2154
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2155
smi_msg->data_size = msg->data_len + 4;
2156
2157
smi_msg->recv_msg = recv_msg;
2158
2159
return 0;
2160
}
2161
2162
static int i_ipmi_req_lan(struct ipmi_smi *intf,
2163
struct ipmi_addr *addr,
2164
long msgid,
2165
struct kernel_ipmi_msg *msg,
2166
struct ipmi_smi_msg *smi_msg,
2167
struct ipmi_recv_msg *recv_msg,
2168
unsigned char source_lun,
2169
int retries,
2170
unsigned int retry_time_ms)
2171
{
2172
struct ipmi_lan_addr *lan_addr;
2173
unsigned char ipmb_seq;
2174
long seqid;
2175
struct ipmi_channel *chans;
2176
int rv = 0;
2177
2178
if (addr->channel >= IPMI_MAX_CHANNELS) {
2179
ipmi_inc_stat(intf, sent_invalid_commands);
2180
return -EINVAL;
2181
}
2182
2183
chans = READ_ONCE(intf->channel_list)->c;
2184
2185
if ((chans[addr->channel].medium
2186
!= IPMI_CHANNEL_MEDIUM_8023LAN)
2187
&& (chans[addr->channel].medium
2188
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
2189
ipmi_inc_stat(intf, sent_invalid_commands);
2190
return -EINVAL;
2191
}
2192
2193
/* 11 for the header and 1 for the checksum. */
2194
if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2195
ipmi_inc_stat(intf, sent_invalid_commands);
2196
return -EMSGSIZE;
2197
}
2198
2199
lan_addr = (struct ipmi_lan_addr *) addr;
2200
if (lan_addr->lun > 3) {
2201
ipmi_inc_stat(intf, sent_invalid_commands);
2202
return -EINVAL;
2203
}
2204
2205
memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2206
2207
if (recv_msg->msg.netfn & 0x1) {
2208
/*
2209
* It's a response, so use the user's sequence
2210
* from msgid.
2211
*/
2212
ipmi_inc_stat(intf, sent_lan_responses);
2213
format_lan_msg(smi_msg, msg, lan_addr, msgid,
2214
msgid, source_lun);
2215
2216
/*
2217
* Save the receive message so we can use it
2218
* to deliver the response.
2219
*/
2220
smi_msg->recv_msg = recv_msg;
2221
} else {
2222
mutex_lock(&intf->seq_lock);
2223
2224
/*
2225
* Create a sequence number with a 1 second
2226
* timeout and 4 retries.
2227
*/
2228
rv = intf_next_seq(intf,
2229
recv_msg,
2230
retry_time_ms,
2231
retries,
2232
0,
2233
&ipmb_seq,
2234
&seqid);
2235
if (rv)
2236
/*
2237
* We have used up all the sequence numbers,
2238
* probably, so abort.
2239
*/
2240
goto out_err;
2241
2242
ipmi_inc_stat(intf, sent_lan_commands);
2243
2244
/*
2245
* Store the sequence number in the message,
2246
* so that when the send message response
2247
* comes back we can start the timer.
2248
*/
2249
format_lan_msg(smi_msg, msg, lan_addr,
2250
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2251
ipmb_seq, source_lun);
2252
2253
/*
2254
* Copy the message into the recv message data, so we
2255
* can retransmit it later if necessary.
2256
*/
2257
memcpy(recv_msg->msg_data, smi_msg->data,
2258
smi_msg->data_size);
2259
recv_msg->msg.data = recv_msg->msg_data;
2260
recv_msg->msg.data_len = smi_msg->data_size;
2261
2262
/*
2263
* We don't unlock until here, because we need
2264
* to copy the completed message into the
2265
* recv_msg before we release the lock.
2266
* Otherwise, race conditions may bite us. I
2267
* know that's pretty paranoid, but I prefer
2268
* to be correct.
2269
*/
2270
out_err:
2271
mutex_unlock(&intf->seq_lock);
2272
}
2273
2274
return rv;
2275
}
2276
2277
/*
2278
* Separate from ipmi_request so that the user does not have to be
2279
* supplied in certain circumstances (mainly at panic time). If
2280
* messages are supplied, they will be freed, even if an error
2281
* occurs.
2282
*/
2283
static int i_ipmi_request(struct ipmi_user *user,
2284
struct ipmi_smi *intf,
2285
struct ipmi_addr *addr,
2286
long msgid,
2287
struct kernel_ipmi_msg *msg,
2288
void *user_msg_data,
2289
void *supplied_smi,
2290
struct ipmi_recv_msg *supplied_recv,
2291
int priority,
2292
unsigned char source_address,
2293
unsigned char source_lun,
2294
int retries,
2295
unsigned int retry_time_ms)
2296
{
2297
struct ipmi_smi_msg *smi_msg;
2298
struct ipmi_recv_msg *recv_msg;
2299
int run_to_completion = READ_ONCE(intf->run_to_completion);
2300
int rv = 0;
2301
2302
if (supplied_recv) {
2303
recv_msg = supplied_recv;
2304
recv_msg->user = user;
2305
if (user) {
2306
atomic_inc(&user->nr_msgs);
2307
/* The put happens when the message is freed. */
2308
kref_get(&user->refcount);
2309
}
2310
} else {
2311
recv_msg = ipmi_alloc_recv_msg(user);
2312
if (IS_ERR(recv_msg))
2313
return PTR_ERR(recv_msg);
2314
}
2315
recv_msg->user_msg_data = user_msg_data;
2316
2317
if (supplied_smi)
2318
smi_msg = supplied_smi;
2319
else {
2320
smi_msg = ipmi_alloc_smi_msg();
2321
if (smi_msg == NULL) {
2322
if (!supplied_recv)
2323
ipmi_free_recv_msg(recv_msg);
2324
return -ENOMEM;
2325
}
2326
}
2327
2328
if (!run_to_completion)
2329
mutex_lock(&intf->users_mutex);
2330
if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
2331
/* No messages while the BMC is in reset. */
2332
rv = -EBUSY;
2333
goto out_err;
2334
}
2335
if (intf->in_shutdown) {
2336
rv = -ENODEV;
2337
goto out_err;
2338
}
2339
2340
recv_msg->msgid = msgid;
2341
/*
2342
* Store the message to send in the receive message so timeout
2343
* responses can get the proper response data.
2344
*/
2345
recv_msg->msg = *msg;
2346
2347
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2348
rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2349
recv_msg, retries, retry_time_ms);
2350
} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2351
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2352
source_address, source_lun,
2353
retries, retry_time_ms);
2354
} else if (is_ipmb_direct_addr(addr)) {
2355
rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2356
recv_msg, source_lun);
2357
} else if (is_lan_addr(addr)) {
2358
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2359
source_lun, retries, retry_time_ms);
2360
} else {
2361
/* Unknown address type. */
2362
ipmi_inc_stat(intf, sent_invalid_commands);
2363
rv = -EINVAL;
2364
}
2365
2366
if (rv) {
2367
out_err:
2368
if (!supplied_smi)
2369
ipmi_free_smi_msg(smi_msg);
2370
if (!supplied_recv)
2371
ipmi_free_recv_msg(recv_msg);
2372
} else {
2373
dev_dbg(intf->si_dev, "Send: %*ph\n",
2374
smi_msg->data_size, smi_msg->data);
2375
2376
smi_send(intf, intf->handlers, smi_msg, priority);
2377
}
2378
if (!run_to_completion)
2379
mutex_unlock(&intf->users_mutex);
2380
2381
return rv;
2382
}
2383
2384
static int check_addr(struct ipmi_smi *intf,
2385
struct ipmi_addr *addr,
2386
unsigned char *saddr,
2387
unsigned char *lun)
2388
{
2389
if (addr->channel >= IPMI_MAX_CHANNELS)
2390
return -EINVAL;
2391
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2392
*lun = intf->addrinfo[addr->channel].lun;
2393
*saddr = intf->addrinfo[addr->channel].address;
2394
return 0;
2395
}
2396
2397
int ipmi_request_settime(struct ipmi_user *user,
2398
struct ipmi_addr *addr,
2399
long msgid,
2400
struct kernel_ipmi_msg *msg,
2401
void *user_msg_data,
2402
int priority,
2403
int retries,
2404
unsigned int retry_time_ms)
2405
{
2406
unsigned char saddr = 0, lun = 0;
2407
int rv;
2408
2409
if (!user)
2410
return -EINVAL;
2411
2412
user = acquire_ipmi_user(user);
2413
if (!user)
2414
return -ENODEV;
2415
2416
rv = check_addr(user->intf, addr, &saddr, &lun);
2417
if (!rv)
2418
rv = i_ipmi_request(user,
2419
user->intf,
2420
addr,
2421
msgid,
2422
msg,
2423
user_msg_data,
2424
NULL, NULL,
2425
priority,
2426
saddr,
2427
lun,
2428
retries,
2429
retry_time_ms);
2430
2431
release_ipmi_user(user);
2432
return rv;
2433
}
2434
EXPORT_SYMBOL(ipmi_request_settime);
2435
2436
int ipmi_request_supply_msgs(struct ipmi_user *user,
2437
struct ipmi_addr *addr,
2438
long msgid,
2439
struct kernel_ipmi_msg *msg,
2440
void *user_msg_data,
2441
void *supplied_smi,
2442
struct ipmi_recv_msg *supplied_recv,
2443
int priority)
2444
{
2445
unsigned char saddr = 0, lun = 0;
2446
int rv;
2447
2448
if (!user)
2449
return -EINVAL;
2450
2451
user = acquire_ipmi_user(user);
2452
if (!user)
2453
return -ENODEV;
2454
2455
rv = check_addr(user->intf, addr, &saddr, &lun);
2456
if (!rv)
2457
rv = i_ipmi_request(user,
2458
user->intf,
2459
addr,
2460
msgid,
2461
msg,
2462
user_msg_data,
2463
supplied_smi,
2464
supplied_recv,
2465
priority,
2466
saddr,
2467
lun,
2468
-1, 0);
2469
2470
release_ipmi_user(user);
2471
return rv;
2472
}
2473
EXPORT_SYMBOL(ipmi_request_supply_msgs);
2474
2475
static void bmc_device_id_handler(struct ipmi_smi *intf,
2476
struct ipmi_recv_msg *msg)
2477
{
2478
int rv;
2479
2480
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2481
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2482
|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2483
dev_warn(intf->si_dev,
2484
"invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2485
msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2486
return;
2487
}
2488
2489
if (msg->msg.data[0]) {
2490
dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2491
msg->msg.data[0]);
2492
intf->bmc->dyn_id_set = 0;
2493
goto out;
2494
}
2495
2496
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2497
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2498
if (rv) {
2499
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2500
/* record completion code when error */
2501
intf->bmc->cc = msg->msg.data[0];
2502
intf->bmc->dyn_id_set = 0;
2503
} else {
2504
/*
2505
* Make sure the id data is available before setting
2506
* dyn_id_set.
2507
*/
2508
smp_wmb();
2509
intf->bmc->dyn_id_set = 1;
2510
}
2511
out:
2512
wake_up(&intf->waitq);
2513
}
2514
2515
static int
2516
send_get_device_id_cmd(struct ipmi_smi *intf)
2517
{
2518
struct ipmi_system_interface_addr si;
2519
struct kernel_ipmi_msg msg;
2520
2521
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2522
si.channel = IPMI_BMC_CHANNEL;
2523
si.lun = 0;
2524
2525
msg.netfn = IPMI_NETFN_APP_REQUEST;
2526
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2527
msg.data = NULL;
2528
msg.data_len = 0;
2529
2530
return i_ipmi_request(NULL,
2531
intf,
2532
(struct ipmi_addr *) &si,
2533
0,
2534
&msg,
2535
intf,
2536
NULL,
2537
NULL,
2538
0,
2539
intf->addrinfo[0].address,
2540
intf->addrinfo[0].lun,
2541
-1, 0);
2542
}
2543
2544
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2545
{
2546
int rv;
2547
unsigned int retry_count = 0;
2548
2549
intf->null_user_handler = bmc_device_id_handler;
2550
2551
retry:
2552
bmc->cc = 0;
2553
bmc->dyn_id_set = 2;
2554
2555
rv = send_get_device_id_cmd(intf);
2556
if (rv)
2557
goto out_reset_handler;
2558
2559
wait_event(intf->waitq, bmc->dyn_id_set != 2);
2560
2561
if (!bmc->dyn_id_set) {
2562
if (bmc->cc != IPMI_CC_NO_ERROR &&
2563
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2564
msleep(500);
2565
dev_warn(intf->si_dev,
2566
"BMC returned 0x%2.2x, retry get bmc device id\n",
2567
bmc->cc);
2568
goto retry;
2569
}
2570
2571
rv = -EIO; /* Something went wrong in the fetch. */
2572
}
2573
2574
/* dyn_id_set makes the id data available. */
2575
smp_rmb();
2576
2577
out_reset_handler:
2578
intf->null_user_handler = NULL;
2579
2580
return rv;
2581
}
2582
2583
/*
2584
* Fetch the device id for the bmc/interface. You must pass in either
2585
* bmc or intf, this code will get the other one. If the data has
2586
* been recently fetched, this will just use the cached data. Otherwise
2587
* it will run a new fetch.
2588
*
2589
* Except for the first time this is called (in ipmi_add_smi()),
2590
* this will always return good data;
2591
*/
2592
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2593
struct ipmi_device_id *id,
2594
bool *guid_set, guid_t *guid, int intf_num)
2595
{
2596
int rv = 0;
2597
int prev_dyn_id_set, prev_guid_set;
2598
bool intf_set = intf != NULL;
2599
2600
if (!intf) {
2601
mutex_lock(&bmc->dyn_mutex);
2602
retry_bmc_lock:
2603
if (list_empty(&bmc->intfs)) {
2604
mutex_unlock(&bmc->dyn_mutex);
2605
return -ENOENT;
2606
}
2607
intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2608
bmc_link);
2609
kref_get(&intf->refcount);
2610
mutex_unlock(&bmc->dyn_mutex);
2611
mutex_lock(&intf->bmc_reg_mutex);
2612
mutex_lock(&bmc->dyn_mutex);
2613
if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2614
bmc_link)) {
2615
mutex_unlock(&intf->bmc_reg_mutex);
2616
kref_put(&intf->refcount, intf_free);
2617
goto retry_bmc_lock;
2618
}
2619
} else {
2620
mutex_lock(&intf->bmc_reg_mutex);
2621
bmc = intf->bmc;
2622
mutex_lock(&bmc->dyn_mutex);
2623
kref_get(&intf->refcount);
2624
}
2625
2626
/* If we have a valid and current ID, just return that. */
2627
if (intf->in_bmc_register ||
2628
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2629
goto out_noprocessing;
2630
2631
/* Don't allow sysfs access when in maintenance mode. */
2632
if (intf->maintenance_mode_state) {
2633
rv = -EBUSY;
2634
goto out_noprocessing;
2635
}
2636
2637
prev_guid_set = bmc->dyn_guid_set;
2638
__get_guid(intf);
2639
2640
prev_dyn_id_set = bmc->dyn_id_set;
2641
rv = __get_device_id(intf, bmc);
2642
if (rv)
2643
goto out;
2644
2645
/*
2646
* The guid, device id, manufacturer id, and product id should
2647
* not change on a BMC. If it does we have to do some dancing.
2648
*/
2649
if (!intf->bmc_registered
2650
|| (!prev_guid_set && bmc->dyn_guid_set)
2651
|| (!prev_dyn_id_set && bmc->dyn_id_set)
2652
|| (prev_guid_set && bmc->dyn_guid_set
2653
&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2654
|| bmc->id.device_id != bmc->fetch_id.device_id
2655
|| bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2656
|| bmc->id.product_id != bmc->fetch_id.product_id) {
2657
struct ipmi_device_id id = bmc->fetch_id;
2658
int guid_set = bmc->dyn_guid_set;
2659
guid_t guid;
2660
2661
guid = bmc->fetch_guid;
2662
mutex_unlock(&bmc->dyn_mutex);
2663
2664
__ipmi_bmc_unregister(intf);
2665
/* Fill in the temporary BMC for good measure. */
2666
intf->bmc->id = id;
2667
intf->bmc->dyn_guid_set = guid_set;
2668
intf->bmc->guid = guid;
2669
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2670
need_waiter(intf); /* Retry later on an error. */
2671
else
2672
__scan_channels(intf, &id, false);
2673
2674
2675
if (!intf_set) {
2676
/*
2677
* We weren't given the interface on the
2678
* command line, so restart the operation on
2679
* the next interface for the BMC.
2680
*/
2681
mutex_unlock(&intf->bmc_reg_mutex);
2682
mutex_lock(&bmc->dyn_mutex);
2683
goto retry_bmc_lock;
2684
}
2685
2686
/* We have a new BMC, set it up. */
2687
bmc = intf->bmc;
2688
mutex_lock(&bmc->dyn_mutex);
2689
goto out_noprocessing;
2690
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2691
/* Version info changes, scan the channels again. */
2692
__scan_channels(intf, &bmc->fetch_id, true);
2693
2694
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2695
2696
out:
2697
if (rv && prev_dyn_id_set) {
2698
rv = 0; /* Ignore failures if we have previous data. */
2699
bmc->dyn_id_set = prev_dyn_id_set;
2700
}
2701
if (!rv) {
2702
bmc->id = bmc->fetch_id;
2703
if (bmc->dyn_guid_set)
2704
bmc->guid = bmc->fetch_guid;
2705
else if (prev_guid_set)
2706
/*
2707
* The guid used to be valid and it failed to fetch,
2708
* just use the cached value.
2709
*/
2710
bmc->dyn_guid_set = prev_guid_set;
2711
}
2712
out_noprocessing:
2713
if (!rv) {
2714
if (id)
2715
*id = bmc->id;
2716
2717
if (guid_set)
2718
*guid_set = bmc->dyn_guid_set;
2719
2720
if (guid && bmc->dyn_guid_set)
2721
*guid = bmc->guid;
2722
}
2723
2724
mutex_unlock(&bmc->dyn_mutex);
2725
mutex_unlock(&intf->bmc_reg_mutex);
2726
2727
kref_put(&intf->refcount, intf_free);
2728
return rv;
2729
}
2730
2731
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2732
struct ipmi_device_id *id,
2733
bool *guid_set, guid_t *guid)
2734
{
2735
return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2736
}
2737
2738
static ssize_t device_id_show(struct device *dev,
2739
struct device_attribute *attr,
2740
char *buf)
2741
{
2742
struct bmc_device *bmc = to_bmc_device(dev);
2743
struct ipmi_device_id id;
2744
int rv;
2745
2746
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2747
if (rv)
2748
return rv;
2749
2750
return sysfs_emit(buf, "%u\n", id.device_id);
2751
}
2752
static DEVICE_ATTR_RO(device_id);
2753
2754
static ssize_t provides_device_sdrs_show(struct device *dev,
2755
struct device_attribute *attr,
2756
char *buf)
2757
{
2758
struct bmc_device *bmc = to_bmc_device(dev);
2759
struct ipmi_device_id id;
2760
int rv;
2761
2762
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2763
if (rv)
2764
return rv;
2765
2766
return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2767
}
2768
static DEVICE_ATTR_RO(provides_device_sdrs);
2769
2770
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2771
char *buf)
2772
{
2773
struct bmc_device *bmc = to_bmc_device(dev);
2774
struct ipmi_device_id id;
2775
int rv;
2776
2777
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2778
if (rv)
2779
return rv;
2780
2781
return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2782
}
2783
static DEVICE_ATTR_RO(revision);
2784
2785
static ssize_t firmware_revision_show(struct device *dev,
2786
struct device_attribute *attr,
2787
char *buf)
2788
{
2789
struct bmc_device *bmc = to_bmc_device(dev);
2790
struct ipmi_device_id id;
2791
int rv;
2792
2793
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2794
if (rv)
2795
return rv;
2796
2797
return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2798
id.firmware_revision_2);
2799
}
2800
static DEVICE_ATTR_RO(firmware_revision);
2801
2802
static ssize_t ipmi_version_show(struct device *dev,
2803
struct device_attribute *attr,
2804
char *buf)
2805
{
2806
struct bmc_device *bmc = to_bmc_device(dev);
2807
struct ipmi_device_id id;
2808
int rv;
2809
2810
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2811
if (rv)
2812
return rv;
2813
2814
return sysfs_emit(buf, "%u.%u\n",
2815
ipmi_version_major(&id),
2816
ipmi_version_minor(&id));
2817
}
2818
static DEVICE_ATTR_RO(ipmi_version);
2819
2820
static ssize_t add_dev_support_show(struct device *dev,
2821
struct device_attribute *attr,
2822
char *buf)
2823
{
2824
struct bmc_device *bmc = to_bmc_device(dev);
2825
struct ipmi_device_id id;
2826
int rv;
2827
2828
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2829
if (rv)
2830
return rv;
2831
2832
return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2833
}
2834
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2835
NULL);
2836
2837
static ssize_t manufacturer_id_show(struct device *dev,
2838
struct device_attribute *attr,
2839
char *buf)
2840
{
2841
struct bmc_device *bmc = to_bmc_device(dev);
2842
struct ipmi_device_id id;
2843
int rv;
2844
2845
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2846
if (rv)
2847
return rv;
2848
2849
return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2850
}
2851
static DEVICE_ATTR_RO(manufacturer_id);
2852
2853
static ssize_t product_id_show(struct device *dev,
2854
struct device_attribute *attr,
2855
char *buf)
2856
{
2857
struct bmc_device *bmc = to_bmc_device(dev);
2858
struct ipmi_device_id id;
2859
int rv;
2860
2861
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2862
if (rv)
2863
return rv;
2864
2865
return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2866
}
2867
static DEVICE_ATTR_RO(product_id);
2868
2869
static ssize_t aux_firmware_rev_show(struct device *dev,
2870
struct device_attribute *attr,
2871
char *buf)
2872
{
2873
struct bmc_device *bmc = to_bmc_device(dev);
2874
struct ipmi_device_id id;
2875
int rv;
2876
2877
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2878
if (rv)
2879
return rv;
2880
2881
return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2882
id.aux_firmware_revision[3],
2883
id.aux_firmware_revision[2],
2884
id.aux_firmware_revision[1],
2885
id.aux_firmware_revision[0]);
2886
}
2887
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2888
2889
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2890
char *buf)
2891
{
2892
struct bmc_device *bmc = to_bmc_device(dev);
2893
bool guid_set;
2894
guid_t guid;
2895
int rv;
2896
2897
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2898
if (rv)
2899
return rv;
2900
if (!guid_set)
2901
return -ENOENT;
2902
2903
return sysfs_emit(buf, "%pUl\n", &guid);
2904
}
2905
static DEVICE_ATTR_RO(guid);
2906
2907
static struct attribute *bmc_dev_attrs[] = {
2908
&dev_attr_device_id.attr,
2909
&dev_attr_provides_device_sdrs.attr,
2910
&dev_attr_revision.attr,
2911
&dev_attr_firmware_revision.attr,
2912
&dev_attr_ipmi_version.attr,
2913
&dev_attr_additional_device_support.attr,
2914
&dev_attr_manufacturer_id.attr,
2915
&dev_attr_product_id.attr,
2916
&dev_attr_aux_firmware_revision.attr,
2917
&dev_attr_guid.attr,
2918
NULL
2919
};
2920
2921
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2922
struct attribute *attr, int idx)
2923
{
2924
struct device *dev = kobj_to_dev(kobj);
2925
struct bmc_device *bmc = to_bmc_device(dev);
2926
umode_t mode = attr->mode;
2927
int rv;
2928
2929
if (attr == &dev_attr_aux_firmware_revision.attr) {
2930
struct ipmi_device_id id;
2931
2932
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2933
return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2934
}
2935
if (attr == &dev_attr_guid.attr) {
2936
bool guid_set;
2937
2938
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2939
return (!rv && guid_set) ? mode : 0;
2940
}
2941
return mode;
2942
}
2943
2944
static const struct attribute_group bmc_dev_attr_group = {
2945
.attrs = bmc_dev_attrs,
2946
.is_visible = bmc_dev_attr_is_visible,
2947
};
2948
2949
static const struct attribute_group *bmc_dev_attr_groups[] = {
2950
&bmc_dev_attr_group,
2951
NULL
2952
};
2953
2954
static const struct device_type bmc_device_type = {
2955
.groups = bmc_dev_attr_groups,
2956
};
2957
2958
static int __find_bmc_guid(struct device *dev, const void *data)
2959
{
2960
const guid_t *guid = data;
2961
struct bmc_device *bmc;
2962
int rv;
2963
2964
if (dev->type != &bmc_device_type)
2965
return 0;
2966
2967
bmc = to_bmc_device(dev);
2968
rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2969
if (rv)
2970
rv = kref_get_unless_zero(&bmc->usecount);
2971
return rv;
2972
}
2973
2974
/*
2975
* Returns with the bmc's usecount incremented, if it is non-NULL.
2976
*/
2977
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2978
guid_t *guid)
2979
{
2980
struct device *dev;
2981
struct bmc_device *bmc = NULL;
2982
2983
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2984
if (dev) {
2985
bmc = to_bmc_device(dev);
2986
put_device(dev);
2987
}
2988
return bmc;
2989
}
2990
2991
struct prod_dev_id {
2992
unsigned int product_id;
2993
unsigned char device_id;
2994
};
2995
2996
static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2997
{
2998
const struct prod_dev_id *cid = data;
2999
struct bmc_device *bmc;
3000
int rv;
3001
3002
if (dev->type != &bmc_device_type)
3003
return 0;
3004
3005
bmc = to_bmc_device(dev);
3006
rv = (bmc->id.product_id == cid->product_id
3007
&& bmc->id.device_id == cid->device_id);
3008
if (rv)
3009
rv = kref_get_unless_zero(&bmc->usecount);
3010
return rv;
3011
}
3012
3013
/*
3014
* Returns with the bmc's usecount incremented, if it is non-NULL.
3015
*/
3016
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
3017
struct device_driver *drv,
3018
unsigned int product_id, unsigned char device_id)
3019
{
3020
struct prod_dev_id id = {
3021
.product_id = product_id,
3022
.device_id = device_id,
3023
};
3024
struct device *dev;
3025
struct bmc_device *bmc = NULL;
3026
3027
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
3028
if (dev) {
3029
bmc = to_bmc_device(dev);
3030
put_device(dev);
3031
}
3032
return bmc;
3033
}
3034
3035
static DEFINE_IDA(ipmi_bmc_ida);
3036
3037
static void
3038
release_bmc_device(struct device *dev)
3039
{
3040
kfree(to_bmc_device(dev));
3041
}
3042
3043
static void cleanup_bmc_work(struct work_struct *work)
3044
{
3045
struct bmc_device *bmc = container_of(work, struct bmc_device,
3046
remove_work);
3047
int id = bmc->pdev.id; /* Unregister overwrites id */
3048
3049
platform_device_unregister(&bmc->pdev);
3050
ida_free(&ipmi_bmc_ida, id);
3051
}
3052
3053
static void
3054
cleanup_bmc_device(struct kref *ref)
3055
{
3056
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3057
3058
/*
3059
* Remove the platform device in a work queue to avoid issues
3060
* with removing the device attributes while reading a device
3061
* attribute.
3062
*/
3063
queue_work(bmc_remove_work_wq, &bmc->remove_work);
3064
}
3065
3066
/*
3067
* Must be called with intf->bmc_reg_mutex held.
3068
*/
3069
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3070
{
3071
struct bmc_device *bmc = intf->bmc;
3072
3073
if (!intf->bmc_registered)
3074
return;
3075
3076
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3077
sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3078
kfree(intf->my_dev_name);
3079
intf->my_dev_name = NULL;
3080
3081
mutex_lock(&bmc->dyn_mutex);
3082
list_del(&intf->bmc_link);
3083
mutex_unlock(&bmc->dyn_mutex);
3084
intf->bmc = &intf->tmp_bmc;
3085
kref_put(&bmc->usecount, cleanup_bmc_device);
3086
intf->bmc_registered = false;
3087
}
3088
3089
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3090
{
3091
mutex_lock(&intf->bmc_reg_mutex);
3092
__ipmi_bmc_unregister(intf);
3093
mutex_unlock(&intf->bmc_reg_mutex);
3094
}
3095
3096
/*
3097
* Must be called with intf->bmc_reg_mutex held.
3098
*/
3099
static int __ipmi_bmc_register(struct ipmi_smi *intf,
3100
struct ipmi_device_id *id,
3101
bool guid_set, guid_t *guid, int intf_num)
3102
{
3103
int rv;
3104
struct bmc_device *bmc;
3105
struct bmc_device *old_bmc;
3106
3107
/*
3108
* platform_device_register() can cause bmc_reg_mutex to
3109
* be claimed because of the is_visible functions of
3110
* the attributes. Eliminate possible recursion and
3111
* release the lock.
3112
*/
3113
intf->in_bmc_register = true;
3114
mutex_unlock(&intf->bmc_reg_mutex);
3115
3116
/*
3117
* Try to find if there is an bmc_device struct
3118
* representing the interfaced BMC already
3119
*/
3120
mutex_lock(&ipmidriver_mutex);
3121
if (guid_set)
3122
old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3123
else
3124
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3125
id->product_id,
3126
id->device_id);
3127
3128
/*
3129
* If there is already an bmc_device, free the new one,
3130
* otherwise register the new BMC device
3131
*/
3132
if (old_bmc) {
3133
bmc = old_bmc;
3134
/*
3135
* Note: old_bmc already has usecount incremented by
3136
* the BMC find functions.
3137
*/
3138
intf->bmc = old_bmc;
3139
mutex_lock(&bmc->dyn_mutex);
3140
list_add_tail(&intf->bmc_link, &bmc->intfs);
3141
mutex_unlock(&bmc->dyn_mutex);
3142
3143
dev_info(intf->si_dev,
3144
"interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3145
bmc->id.manufacturer_id,
3146
bmc->id.product_id,
3147
bmc->id.device_id);
3148
} else {
3149
bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3150
if (!bmc) {
3151
rv = -ENOMEM;
3152
goto out;
3153
}
3154
INIT_LIST_HEAD(&bmc->intfs);
3155
mutex_init(&bmc->dyn_mutex);
3156
INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3157
3158
bmc->id = *id;
3159
bmc->dyn_id_set = 1;
3160
bmc->dyn_guid_set = guid_set;
3161
bmc->guid = *guid;
3162
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3163
3164
bmc->pdev.name = "ipmi_bmc";
3165
3166
rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
3167
if (rv < 0) {
3168
kfree(bmc);
3169
goto out;
3170
}
3171
3172
bmc->pdev.dev.driver = &ipmidriver.driver;
3173
bmc->pdev.id = rv;
3174
bmc->pdev.dev.release = release_bmc_device;
3175
bmc->pdev.dev.type = &bmc_device_type;
3176
kref_init(&bmc->usecount);
3177
3178
intf->bmc = bmc;
3179
mutex_lock(&bmc->dyn_mutex);
3180
list_add_tail(&intf->bmc_link, &bmc->intfs);
3181
mutex_unlock(&bmc->dyn_mutex);
3182
3183
rv = platform_device_register(&bmc->pdev);
3184
if (rv) {
3185
dev_err(intf->si_dev,
3186
"Unable to register bmc device: %d\n",
3187
rv);
3188
goto out_list_del;
3189
}
3190
3191
dev_info(intf->si_dev,
3192
"Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3193
bmc->id.manufacturer_id,
3194
bmc->id.product_id,
3195
bmc->id.device_id);
3196
}
3197
3198
/*
3199
* create symlink from system interface device to bmc device
3200
* and back.
3201
*/
3202
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3203
if (rv) {
3204
dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3205
goto out_put_bmc;
3206
}
3207
3208
if (intf_num == -1)
3209
intf_num = intf->intf_num;
3210
intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3211
if (!intf->my_dev_name) {
3212
rv = -ENOMEM;
3213
dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3214
rv);
3215
goto out_unlink1;
3216
}
3217
3218
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3219
intf->my_dev_name);
3220
if (rv) {
3221
dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3222
rv);
3223
goto out_free_my_dev_name;
3224
}
3225
3226
intf->bmc_registered = true;
3227
3228
out:
3229
mutex_unlock(&ipmidriver_mutex);
3230
mutex_lock(&intf->bmc_reg_mutex);
3231
intf->in_bmc_register = false;
3232
return rv;
3233
3234
3235
out_free_my_dev_name:
3236
kfree(intf->my_dev_name);
3237
intf->my_dev_name = NULL;
3238
3239
out_unlink1:
3240
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3241
3242
out_put_bmc:
3243
mutex_lock(&bmc->dyn_mutex);
3244
list_del(&intf->bmc_link);
3245
mutex_unlock(&bmc->dyn_mutex);
3246
intf->bmc = &intf->tmp_bmc;
3247
kref_put(&bmc->usecount, cleanup_bmc_device);
3248
goto out;
3249
3250
out_list_del:
3251
mutex_lock(&bmc->dyn_mutex);
3252
list_del(&intf->bmc_link);
3253
mutex_unlock(&bmc->dyn_mutex);
3254
intf->bmc = &intf->tmp_bmc;
3255
put_device(&bmc->pdev.dev);
3256
goto out;
3257
}
3258
3259
static int
3260
send_guid_cmd(struct ipmi_smi *intf, int chan)
3261
{
3262
struct kernel_ipmi_msg msg;
3263
struct ipmi_system_interface_addr si;
3264
3265
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3266
si.channel = IPMI_BMC_CHANNEL;
3267
si.lun = 0;
3268
3269
msg.netfn = IPMI_NETFN_APP_REQUEST;
3270
msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3271
msg.data = NULL;
3272
msg.data_len = 0;
3273
return i_ipmi_request(NULL,
3274
intf,
3275
(struct ipmi_addr *) &si,
3276
0,
3277
&msg,
3278
intf,
3279
NULL,
3280
NULL,
3281
0,
3282
intf->addrinfo[0].address,
3283
intf->addrinfo[0].lun,
3284
-1, 0);
3285
}
3286
3287
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3288
{
3289
struct bmc_device *bmc = intf->bmc;
3290
3291
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3292
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3293
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3294
/* Not for me */
3295
return;
3296
3297
if (msg->msg.data[0] != 0) {
3298
/* Error from getting the GUID, the BMC doesn't have one. */
3299
bmc->dyn_guid_set = 0;
3300
goto out;
3301
}
3302
3303
if (msg->msg.data_len < UUID_SIZE + 1) {
3304
bmc->dyn_guid_set = 0;
3305
dev_warn(intf->si_dev,
3306
"The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3307
msg->msg.data_len, UUID_SIZE + 1);
3308
goto out;
3309
}
3310
3311
import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3312
/*
3313
* Make sure the guid data is available before setting
3314
* dyn_guid_set.
3315
*/
3316
smp_wmb();
3317
bmc->dyn_guid_set = 1;
3318
out:
3319
wake_up(&intf->waitq);
3320
}
3321
3322
static void __get_guid(struct ipmi_smi *intf)
3323
{
3324
int rv;
3325
struct bmc_device *bmc = intf->bmc;
3326
3327
bmc->dyn_guid_set = 2;
3328
intf->null_user_handler = guid_handler;
3329
rv = send_guid_cmd(intf, 0);
3330
if (rv)
3331
/* Send failed, no GUID available. */
3332
bmc->dyn_guid_set = 0;
3333
else
3334
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3335
3336
/* dyn_guid_set makes the guid data available. */
3337
smp_rmb();
3338
3339
intf->null_user_handler = NULL;
3340
}
3341
3342
static int
3343
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3344
{
3345
struct kernel_ipmi_msg msg;
3346
unsigned char data[1];
3347
struct ipmi_system_interface_addr si;
3348
3349
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3350
si.channel = IPMI_BMC_CHANNEL;
3351
si.lun = 0;
3352
3353
msg.netfn = IPMI_NETFN_APP_REQUEST;
3354
msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3355
msg.data = data;
3356
msg.data_len = 1;
3357
data[0] = chan;
3358
return i_ipmi_request(NULL,
3359
intf,
3360
(struct ipmi_addr *) &si,
3361
0,
3362
&msg,
3363
intf,
3364
NULL,
3365
NULL,
3366
0,
3367
intf->addrinfo[0].address,
3368
intf->addrinfo[0].lun,
3369
-1, 0);
3370
}
3371
3372
static void
3373
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3374
{
3375
int rv = 0;
3376
int ch;
3377
unsigned int set = intf->curr_working_cset;
3378
struct ipmi_channel *chans;
3379
3380
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3381
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3382
&& (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3383
/* It's the one we want */
3384
if (msg->msg.data[0] != 0) {
3385
/* Got an error from the channel, just go on. */
3386
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3387
/*
3388
* If the MC does not support this
3389
* command, that is legal. We just
3390
* assume it has one IPMB at channel
3391
* zero.
3392
*/
3393
intf->wchannels[set].c[0].medium
3394
= IPMI_CHANNEL_MEDIUM_IPMB;
3395
intf->wchannels[set].c[0].protocol
3396
= IPMI_CHANNEL_PROTOCOL_IPMB;
3397
3398
intf->channel_list = intf->wchannels + set;
3399
intf->channels_ready = true;
3400
wake_up(&intf->waitq);
3401
goto out;
3402
}
3403
goto next_channel;
3404
}
3405
if (msg->msg.data_len < 4) {
3406
/* Message not big enough, just go on. */
3407
goto next_channel;
3408
}
3409
ch = intf->curr_channel;
3410
chans = intf->wchannels[set].c;
3411
chans[ch].medium = msg->msg.data[2] & 0x7f;
3412
chans[ch].protocol = msg->msg.data[3] & 0x1f;
3413
3414
next_channel:
3415
intf->curr_channel++;
3416
if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3417
intf->channel_list = intf->wchannels + set;
3418
intf->channels_ready = true;
3419
wake_up(&intf->waitq);
3420
} else {
3421
rv = send_channel_info_cmd(intf, intf->curr_channel);
3422
}
3423
3424
if (rv) {
3425
/* Got an error somehow, just give up. */
3426
dev_warn(intf->si_dev,
3427
"Error sending channel information for channel %d: %d\n",
3428
intf->curr_channel, rv);
3429
3430
intf->channel_list = intf->wchannels + set;
3431
intf->channels_ready = true;
3432
wake_up(&intf->waitq);
3433
}
3434
}
3435
out:
3436
return;
3437
}
3438
3439
/*
3440
* Must be holding intf->bmc_reg_mutex to call this.
3441
*/
3442
static int __scan_channels(struct ipmi_smi *intf,
3443
struct ipmi_device_id *id,
3444
bool rescan)
3445
{
3446
int rv;
3447
3448
if (rescan) {
3449
/* Clear channels_ready to force channels rescan. */
3450
intf->channels_ready = false;
3451
}
3452
3453
/* Skip channel scan if channels are already marked ready */
3454
if (intf->channels_ready)
3455
return 0;
3456
3457
if (ipmi_version_major(id) > 1
3458
|| (ipmi_version_major(id) == 1
3459
&& ipmi_version_minor(id) >= 5)) {
3460
unsigned int set;
3461
3462
/*
3463
* Start scanning the channels to see what is
3464
* available.
3465
*/
3466
set = !intf->curr_working_cset;
3467
intf->curr_working_cset = set;
3468
memset(&intf->wchannels[set], 0,
3469
sizeof(struct ipmi_channel_set));
3470
3471
intf->null_user_handler = channel_handler;
3472
intf->curr_channel = 0;
3473
rv = send_channel_info_cmd(intf, 0);
3474
if (rv) {
3475
dev_warn(intf->si_dev,
3476
"Error sending channel information for channel 0, %d\n",
3477
rv);
3478
intf->null_user_handler = NULL;
3479
return -EIO;
3480
}
3481
3482
/* Wait for the channel info to be read. */
3483
wait_event(intf->waitq, intf->channels_ready);
3484
intf->null_user_handler = NULL;
3485
} else {
3486
unsigned int set = intf->curr_working_cset;
3487
3488
/* Assume a single IPMB channel at zero. */
3489
intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3490
intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3491
intf->channel_list = intf->wchannels + set;
3492
intf->channels_ready = true;
3493
}
3494
3495
return 0;
3496
}
3497
3498
static void ipmi_poll(struct ipmi_smi *intf)
3499
{
3500
if (intf->handlers->poll)
3501
intf->handlers->poll(intf->send_info);
3502
/* In case something came in */
3503
handle_new_recv_msgs(intf);
3504
}
3505
3506
void ipmi_poll_interface(struct ipmi_user *user)
3507
{
3508
ipmi_poll(user->intf);
3509
}
3510
EXPORT_SYMBOL(ipmi_poll_interface);
3511
3512
static ssize_t nr_users_show(struct device *dev,
3513
struct device_attribute *attr,
3514
char *buf)
3515
{
3516
struct ipmi_smi *intf = container_of(attr,
3517
struct ipmi_smi, nr_users_devattr);
3518
3519
return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3520
}
3521
static DEVICE_ATTR_RO(nr_users);
3522
3523
static ssize_t nr_msgs_show(struct device *dev,
3524
struct device_attribute *attr,
3525
char *buf)
3526
{
3527
struct ipmi_smi *intf = container_of(attr,
3528
struct ipmi_smi, nr_msgs_devattr);
3529
struct ipmi_user *user;
3530
unsigned int count = 0;
3531
3532
mutex_lock(&intf->users_mutex);
3533
list_for_each_entry(user, &intf->users, link)
3534
count += atomic_read(&user->nr_msgs);
3535
mutex_unlock(&intf->users_mutex);
3536
3537
return sysfs_emit(buf, "%u\n", count);
3538
}
3539
static DEVICE_ATTR_RO(nr_msgs);
3540
3541
static ssize_t maintenance_mode_show(struct device *dev,
3542
struct device_attribute *attr,
3543
char *buf)
3544
{
3545
struct ipmi_smi *intf = container_of(attr,
3546
struct ipmi_smi,
3547
maintenance_mode_devattr);
3548
3549
return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
3550
intf->auto_maintenance_timeout);
3551
}
3552
static DEVICE_ATTR_RO(maintenance_mode);
3553
3554
static void redo_bmc_reg(struct work_struct *work)
3555
{
3556
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3557
bmc_reg_work);
3558
3559
if (!intf->in_shutdown)
3560
bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3561
3562
kref_put(&intf->refcount, intf_free);
3563
}
3564
3565
int ipmi_add_smi(struct module *owner,
3566
const struct ipmi_smi_handlers *handlers,
3567
void *send_info,
3568
struct device *si_dev,
3569
unsigned char slave_addr)
3570
{
3571
int i, j;
3572
int rv;
3573
struct ipmi_smi *intf, *tintf;
3574
struct list_head *link;
3575
struct ipmi_device_id id;
3576
3577
/*
3578
* Make sure the driver is actually initialized, this handles
3579
* problems with initialization order.
3580
*/
3581
rv = ipmi_init_msghandler();
3582
if (rv)
3583
return rv;
3584
3585
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3586
if (!intf)
3587
return -ENOMEM;
3588
3589
intf->owner = owner;
3590
intf->bmc = &intf->tmp_bmc;
3591
INIT_LIST_HEAD(&intf->bmc->intfs);
3592
mutex_init(&intf->bmc->dyn_mutex);
3593
INIT_LIST_HEAD(&intf->bmc_link);
3594
mutex_init(&intf->bmc_reg_mutex);
3595
intf->intf_num = -1; /* Mark it invalid for now. */
3596
kref_init(&intf->refcount);
3597
INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3598
intf->si_dev = si_dev;
3599
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3600
intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3601
intf->addrinfo[j].lun = 2;
3602
}
3603
if (slave_addr != 0)
3604
intf->addrinfo[0].address = slave_addr;
3605
INIT_LIST_HEAD(&intf->user_msgs);
3606
mutex_init(&intf->user_msgs_mutex);
3607
INIT_LIST_HEAD(&intf->users);
3608
mutex_init(&intf->users_mutex);
3609
atomic_set(&intf->nr_users, 0);
3610
intf->handlers = handlers;
3611
intf->send_info = send_info;
3612
mutex_init(&intf->seq_lock);
3613
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3614
intf->seq_table[j].inuse = 0;
3615
intf->seq_table[j].seqid = 0;
3616
}
3617
intf->curr_seq = 0;
3618
spin_lock_init(&intf->waiting_rcv_msgs_lock);
3619
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3620
INIT_WORK(&intf->smi_work, smi_work);
3621
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3622
spin_lock_init(&intf->xmit_msgs_lock);
3623
INIT_LIST_HEAD(&intf->xmit_msgs);
3624
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3625
mutex_init(&intf->events_mutex);
3626
spin_lock_init(&intf->watch_lock);
3627
atomic_set(&intf->event_waiters, 0);
3628
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3629
INIT_LIST_HEAD(&intf->waiting_events);
3630
intf->waiting_events_count = 0;
3631
mutex_init(&intf->cmd_rcvrs_mutex);
3632
spin_lock_init(&intf->maintenance_mode_lock);
3633
INIT_LIST_HEAD(&intf->cmd_rcvrs);
3634
init_waitqueue_head(&intf->waitq);
3635
for (i = 0; i < IPMI_NUM_STATS; i++)
3636
atomic_set(&intf->stats[i], 0);
3637
3638
/*
3639
* Grab the watchers mutex so we can deliver the new interface
3640
* without races.
3641
*/
3642
mutex_lock(&smi_watchers_mutex);
3643
mutex_lock(&ipmi_interfaces_mutex);
3644
/* Look for a hole in the numbers. */
3645
i = 0;
3646
link = &ipmi_interfaces;
3647
list_for_each_entry(tintf, &ipmi_interfaces, link) {
3648
if (tintf->intf_num != i) {
3649
link = &tintf->link;
3650
break;
3651
}
3652
i++;
3653
}
3654
/* Add the new interface in numeric order. */
3655
if (i == 0)
3656
list_add(&intf->link, &ipmi_interfaces);
3657
else
3658
list_add_tail(&intf->link, link);
3659
3660
rv = handlers->start_processing(send_info, intf);
3661
if (rv)
3662
goto out_err;
3663
3664
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3665
if (rv) {
3666
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3667
goto out_err_started;
3668
}
3669
3670
mutex_lock(&intf->bmc_reg_mutex);
3671
rv = __scan_channels(intf, &id, false);
3672
mutex_unlock(&intf->bmc_reg_mutex);
3673
if (rv)
3674
goto out_err_bmc_reg;
3675
3676
intf->nr_users_devattr = dev_attr_nr_users;
3677
sysfs_attr_init(&intf->nr_users_devattr.attr);
3678
rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3679
if (rv)
3680
goto out_err_bmc_reg;
3681
3682
intf->nr_msgs_devattr = dev_attr_nr_msgs;
3683
sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3684
rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3685
if (rv) {
3686
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3687
goto out_err_bmc_reg;
3688
}
3689
3690
intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
3691
sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
3692
rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
3693
if (rv) {
3694
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3695
goto out_err_bmc_reg;
3696
}
3697
3698
intf->intf_num = i;
3699
mutex_unlock(&ipmi_interfaces_mutex);
3700
3701
/* After this point the interface is legal to use. */
3702
call_smi_watchers(i, intf->si_dev);
3703
3704
mutex_unlock(&smi_watchers_mutex);
3705
3706
return 0;
3707
3708
out_err_bmc_reg:
3709
ipmi_bmc_unregister(intf);
3710
out_err_started:
3711
if (intf->handlers->shutdown)
3712
intf->handlers->shutdown(intf->send_info);
3713
out_err:
3714
list_del(&intf->link);
3715
mutex_unlock(&ipmi_interfaces_mutex);
3716
mutex_unlock(&smi_watchers_mutex);
3717
kref_put(&intf->refcount, intf_free);
3718
3719
return rv;
3720
}
3721
EXPORT_SYMBOL(ipmi_add_smi);
3722
3723
static void deliver_smi_err_response(struct ipmi_smi *intf,
3724
struct ipmi_smi_msg *msg,
3725
unsigned char err)
3726
{
3727
int rv;
3728
msg->rsp[0] = msg->data[0] | 4;
3729
msg->rsp[1] = msg->data[1];
3730
msg->rsp[2] = err;
3731
msg->rsp_size = 3;
3732
3733
/* This will never requeue, but it may ask us to free the message. */
3734
rv = handle_one_recv_msg(intf, msg);
3735
if (rv == 0)
3736
ipmi_free_smi_msg(msg);
3737
}
3738
3739
static void cleanup_smi_msgs(struct ipmi_smi *intf)
3740
{
3741
int i;
3742
struct seq_table *ent;
3743
struct ipmi_smi_msg *msg;
3744
struct list_head *entry;
3745
struct list_head tmplist;
3746
3747
/* Clear out our transmit queues and hold the messages. */
3748
INIT_LIST_HEAD(&tmplist);
3749
list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3750
list_splice_tail(&intf->xmit_msgs, &tmplist);
3751
3752
/* Current message first, to preserve order */
3753
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3754
/* Wait for the message to clear out. */
3755
schedule_timeout(1);
3756
}
3757
3758
/* No need for locks, the interface is down. */
3759
3760
/*
3761
* Return errors for all pending messages in queue and in the
3762
* tables waiting for remote responses.
3763
*/
3764
while (!list_empty(&tmplist)) {
3765
entry = tmplist.next;
3766
list_del(entry);
3767
msg = list_entry(entry, struct ipmi_smi_msg, link);
3768
deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3769
}
3770
3771
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3772
ent = &intf->seq_table[i];
3773
if (!ent->inuse)
3774
continue;
3775
deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3776
}
3777
}
3778
3779
void ipmi_unregister_smi(struct ipmi_smi *intf)
3780
{
3781
struct ipmi_smi_watcher *w;
3782
int intf_num;
3783
3784
if (!intf)
3785
return;
3786
3787
intf_num = intf->intf_num;
3788
mutex_lock(&ipmi_interfaces_mutex);
3789
cancel_work_sync(&intf->smi_work);
3790
/* smi_work() can no longer be in progress after this. */
3791
3792
intf->intf_num = -1;
3793
intf->in_shutdown = true;
3794
list_del(&intf->link);
3795
mutex_unlock(&ipmi_interfaces_mutex);
3796
3797
/*
3798
* At this point no users can be added to the interface and no
3799
* new messages can be sent.
3800
*/
3801
3802
if (intf->handlers->shutdown)
3803
intf->handlers->shutdown(intf->send_info);
3804
3805
device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
3806
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3807
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3808
3809
/*
3810
* Call all the watcher interfaces to tell them that
3811
* an interface is going away.
3812
*/
3813
mutex_lock(&smi_watchers_mutex);
3814
list_for_each_entry(w, &smi_watchers, link)
3815
w->smi_gone(intf_num);
3816
mutex_unlock(&smi_watchers_mutex);
3817
3818
mutex_lock(&intf->users_mutex);
3819
while (!list_empty(&intf->users)) {
3820
struct ipmi_user *user = list_first_entry(&intf->users,
3821
struct ipmi_user, link);
3822
3823
_ipmi_destroy_user(user);
3824
}
3825
mutex_unlock(&intf->users_mutex);
3826
3827
cleanup_smi_msgs(intf);
3828
3829
ipmi_bmc_unregister(intf);
3830
3831
kref_put(&intf->refcount, intf_free);
3832
}
3833
EXPORT_SYMBOL(ipmi_unregister_smi);
3834
3835
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3836
struct ipmi_smi_msg *msg)
3837
{
3838
struct ipmi_ipmb_addr ipmb_addr;
3839
struct ipmi_recv_msg *recv_msg;
3840
3841
/*
3842
* This is 11, not 10, because the response must contain a
3843
* completion code.
3844
*/
3845
if (msg->rsp_size < 11) {
3846
/* Message not big enough, just ignore it. */
3847
ipmi_inc_stat(intf, invalid_ipmb_responses);
3848
return 0;
3849
}
3850
3851
if (msg->rsp[2] != 0) {
3852
/* An error getting the response, just ignore it. */
3853
return 0;
3854
}
3855
3856
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3857
ipmb_addr.slave_addr = msg->rsp[6];
3858
ipmb_addr.channel = msg->rsp[3] & 0x0f;
3859
ipmb_addr.lun = msg->rsp[7] & 3;
3860
3861
/*
3862
* It's a response from a remote entity. Look up the sequence
3863
* number and handle the response.
3864
*/
3865
if (intf_find_seq(intf,
3866
msg->rsp[7] >> 2,
3867
msg->rsp[3] & 0x0f,
3868
msg->rsp[8],
3869
(msg->rsp[4] >> 2) & (~1),
3870
(struct ipmi_addr *) &ipmb_addr,
3871
&recv_msg)) {
3872
/*
3873
* We were unable to find the sequence number,
3874
* so just nuke the message.
3875
*/
3876
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3877
return 0;
3878
}
3879
3880
memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3881
/*
3882
* The other fields matched, so no need to set them, except
3883
* for netfn, which needs to be the response that was
3884
* returned, not the request value.
3885
*/
3886
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3887
recv_msg->msg.data = recv_msg->msg_data;
3888
recv_msg->msg.data_len = msg->rsp_size - 10;
3889
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3890
if (deliver_response(intf, recv_msg))
3891
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3892
else
3893
ipmi_inc_stat(intf, handled_ipmb_responses);
3894
3895
return 0;
3896
}
3897
3898
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3899
struct ipmi_smi_msg *msg)
3900
{
3901
struct cmd_rcvr *rcvr;
3902
int rv = 0;
3903
unsigned char netfn;
3904
unsigned char cmd;
3905
unsigned char chan;
3906
struct ipmi_user *user = NULL;
3907
struct ipmi_ipmb_addr *ipmb_addr;
3908
struct ipmi_recv_msg *recv_msg = NULL;
3909
3910
if (msg->rsp_size < 10) {
3911
/* Message not big enough, just ignore it. */
3912
ipmi_inc_stat(intf, invalid_commands);
3913
return 0;
3914
}
3915
3916
if (msg->rsp[2] != 0) {
3917
/* An error getting the response, just ignore it. */
3918
return 0;
3919
}
3920
3921
netfn = msg->rsp[4] >> 2;
3922
cmd = msg->rsp[8];
3923
chan = msg->rsp[3] & 0xf;
3924
3925
rcu_read_lock();
3926
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3927
if (rcvr) {
3928
user = rcvr->user;
3929
recv_msg = ipmi_alloc_recv_msg(user);
3930
}
3931
rcu_read_unlock();
3932
3933
if (user == NULL) {
3934
/* We didn't find a user, deliver an error response. */
3935
ipmi_inc_stat(intf, unhandled_commands);
3936
3937
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3938
msg->data[1] = IPMI_SEND_MSG_CMD;
3939
msg->data[2] = msg->rsp[3];
3940
msg->data[3] = msg->rsp[6];
3941
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3942
msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3943
msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3944
/* rqseq/lun */
3945
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3946
msg->data[8] = msg->rsp[8]; /* cmd */
3947
msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3948
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3949
msg->data_size = 11;
3950
3951
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3952
msg->data_size, msg->data);
3953
3954
smi_send(intf, intf->handlers, msg, 0);
3955
/*
3956
* We used the message, so return the value that
3957
* causes it to not be freed or queued.
3958
*/
3959
rv = -1;
3960
} else if (!IS_ERR(recv_msg)) {
3961
/* Extract the source address from the data. */
3962
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3963
ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3964
ipmb_addr->slave_addr = msg->rsp[6];
3965
ipmb_addr->lun = msg->rsp[7] & 3;
3966
ipmb_addr->channel = msg->rsp[3] & 0xf;
3967
3968
/*
3969
* Extract the rest of the message information
3970
* from the IPMB header.
3971
*/
3972
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3973
recv_msg->msgid = msg->rsp[7] >> 2;
3974
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3975
recv_msg->msg.cmd = msg->rsp[8];
3976
recv_msg->msg.data = recv_msg->msg_data;
3977
3978
/*
3979
* We chop off 10, not 9 bytes because the checksum
3980
* at the end also needs to be removed.
3981
*/
3982
recv_msg->msg.data_len = msg->rsp_size - 10;
3983
memcpy(recv_msg->msg_data, &msg->rsp[9],
3984
msg->rsp_size - 10);
3985
if (deliver_response(intf, recv_msg))
3986
ipmi_inc_stat(intf, unhandled_commands);
3987
else
3988
ipmi_inc_stat(intf, handled_commands);
3989
} else {
3990
/*
3991
* We couldn't allocate memory for the message, so
3992
* requeue it for handling later.
3993
*/
3994
rv = 1;
3995
}
3996
3997
return rv;
3998
}
3999
4000
static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
4001
struct ipmi_smi_msg *msg)
4002
{
4003
struct cmd_rcvr *rcvr;
4004
int rv = 0;
4005
struct ipmi_user *user = NULL;
4006
struct ipmi_ipmb_direct_addr *daddr;
4007
struct ipmi_recv_msg *recv_msg = NULL;
4008
unsigned char netfn = msg->rsp[0] >> 2;
4009
unsigned char cmd = msg->rsp[3];
4010
4011
rcu_read_lock();
4012
/* We always use channel 0 for direct messages. */
4013
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
4014
if (rcvr) {
4015
user = rcvr->user;
4016
recv_msg = ipmi_alloc_recv_msg(user);
4017
}
4018
rcu_read_unlock();
4019
4020
if (user == NULL) {
4021
/* We didn't find a user, deliver an error response. */
4022
ipmi_inc_stat(intf, unhandled_commands);
4023
4024
msg->data[0] = (netfn + 1) << 2;
4025
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
4026
msg->data[1] = msg->rsp[1]; /* Addr */
4027
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
4028
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
4029
msg->data[3] = cmd;
4030
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
4031
msg->data_size = 5;
4032
4033
smi_send(intf, intf->handlers, msg, 0);
4034
/*
4035
* We used the message, so return the value that
4036
* causes it to not be freed or queued.
4037
*/
4038
rv = -1;
4039
} else if (!IS_ERR(recv_msg)) {
4040
/* Extract the source address from the data. */
4041
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
4042
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4043
daddr->channel = 0;
4044
daddr->slave_addr = msg->rsp[1];
4045
daddr->rs_lun = msg->rsp[0] & 3;
4046
daddr->rq_lun = msg->rsp[2] & 3;
4047
4048
/*
4049
* Extract the rest of the message information
4050
* from the IPMB header.
4051
*/
4052
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4053
recv_msg->msgid = (msg->rsp[2] >> 2);
4054
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4055
recv_msg->msg.cmd = msg->rsp[3];
4056
recv_msg->msg.data = recv_msg->msg_data;
4057
4058
recv_msg->msg.data_len = msg->rsp_size - 4;
4059
memcpy(recv_msg->msg_data, msg->rsp + 4,
4060
msg->rsp_size - 4);
4061
if (deliver_response(intf, recv_msg))
4062
ipmi_inc_stat(intf, unhandled_commands);
4063
else
4064
ipmi_inc_stat(intf, handled_commands);
4065
} else {
4066
/*
4067
* We couldn't allocate memory for the message, so
4068
* requeue it for handling later.
4069
*/
4070
rv = 1;
4071
}
4072
4073
return rv;
4074
}
4075
4076
static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4077
struct ipmi_smi_msg *msg)
4078
{
4079
struct ipmi_recv_msg *recv_msg;
4080
struct ipmi_ipmb_direct_addr *daddr;
4081
4082
recv_msg = msg->recv_msg;
4083
if (recv_msg == NULL) {
4084
dev_warn(intf->si_dev,
4085
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4086
return 0;
4087
}
4088
4089
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4090
recv_msg->msgid = msg->msgid;
4091
daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4092
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4093
daddr->channel = 0;
4094
daddr->slave_addr = msg->rsp[1];
4095
daddr->rq_lun = msg->rsp[0] & 3;
4096
daddr->rs_lun = msg->rsp[2] & 3;
4097
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4098
recv_msg->msg.cmd = msg->rsp[3];
4099
memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4100
recv_msg->msg.data = recv_msg->msg_data;
4101
recv_msg->msg.data_len = msg->rsp_size - 4;
4102
deliver_local_response(intf, recv_msg);
4103
4104
return 0;
4105
}
4106
4107
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4108
struct ipmi_smi_msg *msg)
4109
{
4110
struct ipmi_lan_addr lan_addr;
4111
struct ipmi_recv_msg *recv_msg;
4112
4113
4114
/*
4115
* This is 13, not 12, because the response must contain a
4116
* completion code.
4117
*/
4118
if (msg->rsp_size < 13) {
4119
/* Message not big enough, just ignore it. */
4120
ipmi_inc_stat(intf, invalid_lan_responses);
4121
return 0;
4122
}
4123
4124
if (msg->rsp[2] != 0) {
4125
/* An error getting the response, just ignore it. */
4126
return 0;
4127
}
4128
4129
lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4130
lan_addr.session_handle = msg->rsp[4];
4131
lan_addr.remote_SWID = msg->rsp[8];
4132
lan_addr.local_SWID = msg->rsp[5];
4133
lan_addr.channel = msg->rsp[3] & 0x0f;
4134
lan_addr.privilege = msg->rsp[3] >> 4;
4135
lan_addr.lun = msg->rsp[9] & 3;
4136
4137
/*
4138
* It's a response from a remote entity. Look up the sequence
4139
* number and handle the response.
4140
*/
4141
if (intf_find_seq(intf,
4142
msg->rsp[9] >> 2,
4143
msg->rsp[3] & 0x0f,
4144
msg->rsp[10],
4145
(msg->rsp[6] >> 2) & (~1),
4146
(struct ipmi_addr *) &lan_addr,
4147
&recv_msg)) {
4148
/*
4149
* We were unable to find the sequence number,
4150
* so just nuke the message.
4151
*/
4152
ipmi_inc_stat(intf, unhandled_lan_responses);
4153
return 0;
4154
}
4155
4156
memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4157
/*
4158
* The other fields matched, so no need to set them, except
4159
* for netfn, which needs to be the response that was
4160
* returned, not the request value.
4161
*/
4162
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4163
recv_msg->msg.data = recv_msg->msg_data;
4164
recv_msg->msg.data_len = msg->rsp_size - 12;
4165
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4166
if (deliver_response(intf, recv_msg))
4167
ipmi_inc_stat(intf, unhandled_lan_responses);
4168
else
4169
ipmi_inc_stat(intf, handled_lan_responses);
4170
4171
return 0;
4172
}
4173
4174
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4175
struct ipmi_smi_msg *msg)
4176
{
4177
struct cmd_rcvr *rcvr;
4178
int rv = 0;
4179
unsigned char netfn;
4180
unsigned char cmd;
4181
unsigned char chan;
4182
struct ipmi_user *user = NULL;
4183
struct ipmi_lan_addr *lan_addr;
4184
struct ipmi_recv_msg *recv_msg = NULL;
4185
4186
if (msg->rsp_size < 12) {
4187
/* Message not big enough, just ignore it. */
4188
ipmi_inc_stat(intf, invalid_commands);
4189
return 0;
4190
}
4191
4192
if (msg->rsp[2] != 0) {
4193
/* An error getting the response, just ignore it. */
4194
return 0;
4195
}
4196
4197
netfn = msg->rsp[6] >> 2;
4198
cmd = msg->rsp[10];
4199
chan = msg->rsp[3] & 0xf;
4200
4201
rcu_read_lock();
4202
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4203
if (rcvr) {
4204
user = rcvr->user;
4205
recv_msg = ipmi_alloc_recv_msg(user);
4206
}
4207
rcu_read_unlock();
4208
4209
if (user == NULL) {
4210
/* We didn't find a user, just give up and return an error. */
4211
ipmi_inc_stat(intf, unhandled_commands);
4212
4213
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
4214
msg->data[1] = IPMI_SEND_MSG_CMD;
4215
msg->data[2] = chan;
4216
msg->data[3] = msg->rsp[4]; /* handle */
4217
msg->data[4] = msg->rsp[8]; /* rsSWID */
4218
msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
4219
msg->data[6] = ipmb_checksum(&msg->data[3], 3);
4220
msg->data[7] = msg->rsp[5]; /* rqSWID */
4221
/* rqseq/lun */
4222
msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
4223
msg->data[9] = cmd;
4224
msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
4225
msg->data[11] = ipmb_checksum(&msg->data[7], 4);
4226
msg->data_size = 12;
4227
4228
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
4229
msg->data_size, msg->data);
4230
4231
smi_send(intf, intf->handlers, msg, 0);
4232
/*
4233
* We used the message, so return the value that
4234
* causes it to not be freed or queued.
4235
*/
4236
rv = -1;
4237
} else if (!IS_ERR(recv_msg)) {
4238
/* Extract the source address from the data. */
4239
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4240
lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4241
lan_addr->session_handle = msg->rsp[4];
4242
lan_addr->remote_SWID = msg->rsp[8];
4243
lan_addr->local_SWID = msg->rsp[5];
4244
lan_addr->lun = msg->rsp[9] & 3;
4245
lan_addr->channel = msg->rsp[3] & 0xf;
4246
lan_addr->privilege = msg->rsp[3] >> 4;
4247
4248
/*
4249
* Extract the rest of the message information
4250
* from the IPMB header.
4251
*/
4252
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4253
recv_msg->msgid = msg->rsp[9] >> 2;
4254
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4255
recv_msg->msg.cmd = msg->rsp[10];
4256
recv_msg->msg.data = recv_msg->msg_data;
4257
4258
/*
4259
* We chop off 12, not 11 bytes because the checksum
4260
* at the end also needs to be removed.
4261
*/
4262
recv_msg->msg.data_len = msg->rsp_size - 12;
4263
memcpy(recv_msg->msg_data, &msg->rsp[11],
4264
msg->rsp_size - 12);
4265
if (deliver_response(intf, recv_msg))
4266
ipmi_inc_stat(intf, unhandled_commands);
4267
else
4268
ipmi_inc_stat(intf, handled_commands);
4269
} else {
4270
/*
4271
* We couldn't allocate memory for the message, so
4272
* requeue it for handling later.
4273
*/
4274
rv = 1;
4275
}
4276
4277
return rv;
4278
}
4279
4280
/*
4281
* This routine will handle "Get Message" command responses with
4282
* channels that use an OEM Medium. The message format belongs to
4283
* the OEM. See IPMI 2.0 specification, Chapter 6 and
4284
* Chapter 22, sections 22.6 and 22.24 for more details.
4285
*/
4286
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4287
struct ipmi_smi_msg *msg)
4288
{
4289
struct cmd_rcvr *rcvr;
4290
int rv = 0;
4291
unsigned char netfn;
4292
unsigned char cmd;
4293
unsigned char chan;
4294
struct ipmi_user *user = NULL;
4295
struct ipmi_system_interface_addr *smi_addr;
4296
struct ipmi_recv_msg *recv_msg = NULL;
4297
4298
/*
4299
* We expect the OEM SW to perform error checking
4300
* so we just do some basic sanity checks
4301
*/
4302
if (msg->rsp_size < 4) {
4303
/* Message not big enough, just ignore it. */
4304
ipmi_inc_stat(intf, invalid_commands);
4305
return 0;
4306
}
4307
4308
if (msg->rsp[2] != 0) {
4309
/* An error getting the response, just ignore it. */
4310
return 0;
4311
}
4312
4313
/*
4314
* This is an OEM Message so the OEM needs to know how
4315
* handle the message. We do no interpretation.
4316
*/
4317
netfn = msg->rsp[0] >> 2;
4318
cmd = msg->rsp[1];
4319
chan = msg->rsp[3] & 0xf;
4320
4321
rcu_read_lock();
4322
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4323
if (rcvr) {
4324
user = rcvr->user;
4325
recv_msg = ipmi_alloc_recv_msg(user);
4326
}
4327
rcu_read_unlock();
4328
4329
if (user == NULL) {
4330
/* We didn't find a user, just give up. */
4331
ipmi_inc_stat(intf, unhandled_commands);
4332
4333
/*
4334
* Don't do anything with these messages, just allow
4335
* them to be freed.
4336
*/
4337
4338
rv = 0;
4339
} else if (!IS_ERR(recv_msg)) {
4340
/*
4341
* OEM Messages are expected to be delivered via
4342
* the system interface to SMS software. We might
4343
* need to visit this again depending on OEM
4344
* requirements
4345
*/
4346
smi_addr = ((struct ipmi_system_interface_addr *)
4347
&recv_msg->addr);
4348
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4349
smi_addr->channel = IPMI_BMC_CHANNEL;
4350
smi_addr->lun = msg->rsp[0] & 3;
4351
4352
recv_msg->user_msg_data = NULL;
4353
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4354
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4355
recv_msg->msg.cmd = msg->rsp[1];
4356
recv_msg->msg.data = recv_msg->msg_data;
4357
4358
/*
4359
* The message starts at byte 4 which follows the
4360
* Channel Byte in the "GET MESSAGE" command
4361
*/
4362
recv_msg->msg.data_len = msg->rsp_size - 4;
4363
memcpy(recv_msg->msg_data, &msg->rsp[4],
4364
msg->rsp_size - 4);
4365
if (deliver_response(intf, recv_msg))
4366
ipmi_inc_stat(intf, unhandled_commands);
4367
else
4368
ipmi_inc_stat(intf, handled_commands);
4369
} else {
4370
/*
4371
* We couldn't allocate memory for the message, so
4372
* requeue it for handling later.
4373
*/
4374
rv = 1;
4375
}
4376
4377
return rv;
4378
}
4379
4380
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4381
struct ipmi_smi_msg *msg)
4382
{
4383
struct ipmi_system_interface_addr *smi_addr;
4384
4385
recv_msg->msgid = 0;
4386
smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4387
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4388
smi_addr->channel = IPMI_BMC_CHANNEL;
4389
smi_addr->lun = msg->rsp[0] & 3;
4390
recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4391
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4392
recv_msg->msg.cmd = msg->rsp[1];
4393
memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4394
recv_msg->msg.data = recv_msg->msg_data;
4395
recv_msg->msg.data_len = msg->rsp_size - 3;
4396
}
4397
4398
static int handle_read_event_rsp(struct ipmi_smi *intf,
4399
struct ipmi_smi_msg *msg)
4400
{
4401
struct ipmi_recv_msg *recv_msg, *recv_msg2;
4402
struct list_head msgs;
4403
struct ipmi_user *user;
4404
int rv = 0, deliver_count = 0;
4405
4406
if (msg->rsp_size < 19) {
4407
/* Message is too small to be an IPMB event. */
4408
ipmi_inc_stat(intf, invalid_events);
4409
return 0;
4410
}
4411
4412
if (msg->rsp[2] != 0) {
4413
/* An error getting the event, just ignore it. */
4414
return 0;
4415
}
4416
4417
INIT_LIST_HEAD(&msgs);
4418
4419
mutex_lock(&intf->events_mutex);
4420
4421
ipmi_inc_stat(intf, events);
4422
4423
/*
4424
* Allocate and fill in one message for every user that is
4425
* getting events.
4426
*/
4427
mutex_lock(&intf->users_mutex);
4428
list_for_each_entry(user, &intf->users, link) {
4429
if (!user->gets_events)
4430
continue;
4431
4432
recv_msg = ipmi_alloc_recv_msg(user);
4433
if (IS_ERR(recv_msg)) {
4434
mutex_unlock(&intf->users_mutex);
4435
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4436
link) {
4437
user = recv_msg->user;
4438
list_del(&recv_msg->link);
4439
ipmi_free_recv_msg(recv_msg);
4440
kref_put(&user->refcount, free_ipmi_user);
4441
}
4442
/*
4443
* We couldn't allocate memory for the
4444
* message, so requeue it for handling
4445
* later.
4446
*/
4447
rv = 1;
4448
goto out;
4449
}
4450
4451
deliver_count++;
4452
4453
copy_event_into_recv_msg(recv_msg, msg);
4454
list_add_tail(&recv_msg->link, &msgs);
4455
}
4456
mutex_unlock(&intf->users_mutex);
4457
4458
if (deliver_count) {
4459
/* Now deliver all the messages. */
4460
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4461
list_del(&recv_msg->link);
4462
deliver_local_response(intf, recv_msg);
4463
}
4464
} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4465
/*
4466
* No one to receive the message, put it in queue if there's
4467
* not already too many things in the queue.
4468
*/
4469
recv_msg = ipmi_alloc_recv_msg(NULL);
4470
if (IS_ERR(recv_msg)) {
4471
/*
4472
* We couldn't allocate memory for the
4473
* message, so requeue it for handling
4474
* later.
4475
*/
4476
rv = 1;
4477
goto out;
4478
}
4479
4480
copy_event_into_recv_msg(recv_msg, msg);
4481
list_add_tail(&recv_msg->link, &intf->waiting_events);
4482
intf->waiting_events_count++;
4483
} else if (!intf->event_msg_printed) {
4484
/*
4485
* There's too many things in the queue, discard this
4486
* message.
4487
*/
4488
dev_warn(intf->si_dev,
4489
"Event queue full, discarding incoming events\n");
4490
intf->event_msg_printed = 1;
4491
}
4492
4493
out:
4494
mutex_unlock(&intf->events_mutex);
4495
4496
return rv;
4497
}
4498
4499
static int handle_bmc_rsp(struct ipmi_smi *intf,
4500
struct ipmi_smi_msg *msg)
4501
{
4502
struct ipmi_recv_msg *recv_msg;
4503
struct ipmi_system_interface_addr *smi_addr;
4504
4505
recv_msg = msg->recv_msg;
4506
if (recv_msg == NULL) {
4507
dev_warn(intf->si_dev,
4508
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4509
return 0;
4510
}
4511
4512
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4513
recv_msg->msgid = msg->msgid;
4514
smi_addr = ((struct ipmi_system_interface_addr *)
4515
&recv_msg->addr);
4516
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4517
smi_addr->channel = IPMI_BMC_CHANNEL;
4518
smi_addr->lun = msg->rsp[0] & 3;
4519
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4520
recv_msg->msg.cmd = msg->rsp[1];
4521
memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4522
recv_msg->msg.data = recv_msg->msg_data;
4523
recv_msg->msg.data_len = msg->rsp_size - 2;
4524
deliver_local_response(intf, recv_msg);
4525
4526
return 0;
4527
}
4528
4529
/*
4530
* Handle a received message. Return 1 if the message should be requeued,
4531
* 0 if the message should be freed, or -1 if the message should not
4532
* be freed or requeued.
4533
*/
4534
static int handle_one_recv_msg(struct ipmi_smi *intf,
4535
struct ipmi_smi_msg *msg)
4536
{
4537
int requeue = 0;
4538
int chan;
4539
unsigned char cc;
4540
bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4541
4542
dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4543
4544
if (msg->rsp_size < 2) {
4545
/* Message is too small to be correct. */
4546
dev_warn_ratelimited(intf->si_dev,
4547
"BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4548
(msg->data[0] >> 2) | 1,
4549
msg->data[1], msg->rsp_size);
4550
4551
return_unspecified:
4552
/* Generate an error response for the message. */
4553
msg->rsp[0] = msg->data[0] | (1 << 2);
4554
msg->rsp[1] = msg->data[1];
4555
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4556
msg->rsp_size = 3;
4557
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4558
/* commands must have at least 4 bytes, responses 5. */
4559
if (is_cmd && (msg->rsp_size < 4)) {
4560
ipmi_inc_stat(intf, invalid_commands);
4561
goto out;
4562
}
4563
if (!is_cmd && (msg->rsp_size < 5)) {
4564
ipmi_inc_stat(intf, invalid_ipmb_responses);
4565
/* Construct a valid error response. */
4566
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4567
msg->rsp[0] |= (1 << 2); /* Make it a response */
4568
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4569
msg->rsp[1] = msg->data[1]; /* Addr */
4570
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4571
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4572
msg->rsp[3] = msg->data[3]; /* Cmd */
4573
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4574
msg->rsp_size = 5;
4575
}
4576
} else if ((msg->data_size >= 2)
4577
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4578
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
4579
&& (msg->recv_msg == NULL)) {
4580
4581
if (intf->in_shutdown || intf->run_to_completion)
4582
goto out;
4583
4584
/*
4585
* This is the local response to a command send, start
4586
* the timer for these. The recv_msg will not be
4587
* NULL if this is a response send, and we will let
4588
* response sends just go through.
4589
*/
4590
4591
/*
4592
* Check for errors, if we get certain errors (ones
4593
* that mean basically we can try again later), we
4594
* ignore them and start the timer. Otherwise we
4595
* report the error immediately.
4596
*/
4597
if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4598
&& (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4599
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4600
&& (msg->rsp[2] != IPMI_BUS_ERR)
4601
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4602
int ch = msg->rsp[3] & 0xf;
4603
struct ipmi_channel *chans;
4604
4605
/* Got an error sending the message, handle it. */
4606
4607
chans = READ_ONCE(intf->channel_list)->c;
4608
if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4609
|| (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4610
ipmi_inc_stat(intf, sent_lan_command_errs);
4611
else
4612
ipmi_inc_stat(intf, sent_ipmb_command_errs);
4613
intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4614
} else
4615
/* The message was sent, start the timer. */
4616
intf_start_seq_timer(intf, msg->msgid);
4617
requeue = 0;
4618
goto out;
4619
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4620
|| (msg->rsp[1] != msg->data[1])) {
4621
/*
4622
* The NetFN and Command in the response is not even
4623
* marginally correct.
4624
*/
4625
dev_warn_ratelimited(intf->si_dev,
4626
"BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4627
(msg->data[0] >> 2) | 1, msg->data[1],
4628
msg->rsp[0] >> 2, msg->rsp[1]);
4629
4630
goto return_unspecified;
4631
}
4632
4633
if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4634
if ((msg->data[0] >> 2) & 1) {
4635
/* It's a response to a sent response. */
4636
chan = 0;
4637
cc = msg->rsp[4];
4638
goto process_response_response;
4639
}
4640
if (is_cmd)
4641
requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4642
else
4643
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4644
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4645
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4646
&& (msg->recv_msg != NULL)) {
4647
/*
4648
* It's a response to a response we sent. For this we
4649
* deliver a send message response to the user.
4650
*/
4651
struct ipmi_recv_msg *recv_msg;
4652
4653
if (intf->run_to_completion)
4654
goto out;
4655
4656
chan = msg->data[2] & 0x0f;
4657
if (chan >= IPMI_MAX_CHANNELS)
4658
/* Invalid channel number */
4659
goto out;
4660
cc = msg->rsp[2];
4661
4662
process_response_response:
4663
recv_msg = msg->recv_msg;
4664
4665
requeue = 0;
4666
if (!recv_msg)
4667
goto out;
4668
4669
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4670
recv_msg->msg.data = recv_msg->msg_data;
4671
recv_msg->msg_data[0] = cc;
4672
recv_msg->msg.data_len = 1;
4673
deliver_local_response(intf, recv_msg);
4674
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4675
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4676
struct ipmi_channel *chans;
4677
4678
if (intf->run_to_completion)
4679
goto out;
4680
4681
/* It's from the receive queue. */
4682
chan = msg->rsp[3] & 0xf;
4683
if (chan >= IPMI_MAX_CHANNELS) {
4684
/* Invalid channel number */
4685
requeue = 0;
4686
goto out;
4687
}
4688
4689
/*
4690
* We need to make sure the channels have been initialized.
4691
* The channel_handler routine will set the "curr_channel"
4692
* equal to or greater than IPMI_MAX_CHANNELS when all the
4693
* channels for this interface have been initialized.
4694
*/
4695
if (!intf->channels_ready) {
4696
requeue = 0; /* Throw the message away */
4697
goto out;
4698
}
4699
4700
chans = READ_ONCE(intf->channel_list)->c;
4701
4702
switch (chans[chan].medium) {
4703
case IPMI_CHANNEL_MEDIUM_IPMB:
4704
if (msg->rsp[4] & 0x04) {
4705
/*
4706
* It's a response, so find the
4707
* requesting message and send it up.
4708
*/
4709
requeue = handle_ipmb_get_msg_rsp(intf, msg);
4710
} else {
4711
/*
4712
* It's a command to the SMS from some other
4713
* entity. Handle that.
4714
*/
4715
requeue = handle_ipmb_get_msg_cmd(intf, msg);
4716
}
4717
break;
4718
4719
case IPMI_CHANNEL_MEDIUM_8023LAN:
4720
case IPMI_CHANNEL_MEDIUM_ASYNC:
4721
if (msg->rsp[6] & 0x04) {
4722
/*
4723
* It's a response, so find the
4724
* requesting message and send it up.
4725
*/
4726
requeue = handle_lan_get_msg_rsp(intf, msg);
4727
} else {
4728
/*
4729
* It's a command to the SMS from some other
4730
* entity. Handle that.
4731
*/
4732
requeue = handle_lan_get_msg_cmd(intf, msg);
4733
}
4734
break;
4735
4736
default:
4737
/* Check for OEM Channels. Clients had better
4738
register for these commands. */
4739
if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4740
&& (chans[chan].medium
4741
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4742
requeue = handle_oem_get_msg_cmd(intf, msg);
4743
} else {
4744
/*
4745
* We don't handle the channel type, so just
4746
* free the message.
4747
*/
4748
requeue = 0;
4749
}
4750
}
4751
4752
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4753
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4754
/* It's an asynchronous event. */
4755
if (intf->run_to_completion)
4756
goto out;
4757
4758
requeue = handle_read_event_rsp(intf, msg);
4759
} else {
4760
/* It's a response from the local BMC. */
4761
requeue = handle_bmc_rsp(intf, msg);
4762
}
4763
4764
out:
4765
return requeue;
4766
}
4767
4768
/*
4769
* If there are messages in the queue or pretimeouts, handle them.
4770
*/
4771
static void handle_new_recv_msgs(struct ipmi_smi *intf)
4772
{
4773
struct ipmi_smi_msg *smi_msg;
4774
unsigned long flags = 0;
4775
int rv;
4776
int run_to_completion = READ_ONCE(intf->run_to_completion);
4777
4778
/* See if any waiting messages need to be processed. */
4779
if (!run_to_completion)
4780
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4781
while (!list_empty(&intf->waiting_rcv_msgs)) {
4782
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4783
struct ipmi_smi_msg, link);
4784
list_del(&smi_msg->link);
4785
if (!run_to_completion)
4786
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4787
flags);
4788
rv = handle_one_recv_msg(intf, smi_msg);
4789
if (!run_to_completion)
4790
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4791
if (rv > 0) {
4792
/*
4793
* To preserve message order, quit if we
4794
* can't handle a message. Add the message
4795
* back at the head, this is safe because this
4796
* workqueue is the only thing that pulls the
4797
* messages.
4798
*/
4799
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4800
break;
4801
} else {
4802
if (rv == 0)
4803
/* Message handled */
4804
ipmi_free_smi_msg(smi_msg);
4805
/* If rv < 0, fatal error, del but don't free. */
4806
}
4807
}
4808
if (!run_to_completion)
4809
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4810
}
4811
4812
static void smi_work(struct work_struct *t)
4813
{
4814
unsigned long flags = 0; /* keep us warning-free. */
4815
struct ipmi_smi *intf = from_work(intf, t, smi_work);
4816
int run_to_completion = READ_ONCE(intf->run_to_completion);
4817
struct ipmi_smi_msg *newmsg = NULL;
4818
struct ipmi_recv_msg *msg, *msg2;
4819
int cc;
4820
4821
/*
4822
* Start the next message if available.
4823
*
4824
* Do this here, not in the actual receiver, because we may deadlock
4825
* because the lower layer is allowed to hold locks while calling
4826
* message delivery.
4827
*/
4828
restart:
4829
if (!run_to_completion)
4830
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4831
if (intf->curr_msg == NULL && !intf->in_shutdown) {
4832
struct list_head *entry = NULL;
4833
4834
/* Pick the high priority queue first. */
4835
if (!list_empty(&intf->hp_xmit_msgs))
4836
entry = intf->hp_xmit_msgs.next;
4837
else if (!list_empty(&intf->xmit_msgs))
4838
entry = intf->xmit_msgs.next;
4839
4840
if (entry) {
4841
list_del(entry);
4842
newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4843
intf->curr_msg = newmsg;
4844
}
4845
}
4846
if (!run_to_completion)
4847
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4848
4849
if (newmsg) {
4850
cc = intf->handlers->sender(intf->send_info, newmsg);
4851
if (cc) {
4852
if (newmsg->recv_msg)
4853
deliver_err_response(intf,
4854
newmsg->recv_msg, cc);
4855
else
4856
ipmi_free_smi_msg(newmsg);
4857
goto restart;
4858
}
4859
}
4860
4861
handle_new_recv_msgs(intf);
4862
4863
/* Nothing below applies during panic time. */
4864
if (run_to_completion)
4865
return;
4866
4867
/*
4868
* If the pretimout count is non-zero, decrement one from it and
4869
* deliver pretimeouts to all the users.
4870
*/
4871
if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4872
struct ipmi_user *user;
4873
4874
mutex_lock(&intf->users_mutex);
4875
list_for_each_entry(user, &intf->users, link) {
4876
if (user->handler->ipmi_watchdog_pretimeout)
4877
user->handler->ipmi_watchdog_pretimeout(
4878
user->handler_data);
4879
}
4880
mutex_unlock(&intf->users_mutex);
4881
}
4882
4883
/*
4884
* Freeing the message can cause a user to be released, which
4885
* can then cause the interface to be freed. Make sure that
4886
* doesn't happen until we are ready.
4887
*/
4888
kref_get(&intf->refcount);
4889
4890
mutex_lock(&intf->user_msgs_mutex);
4891
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
4892
struct ipmi_user *user = msg->user;
4893
4894
list_del(&msg->link);
4895
4896
if (refcount_read(&user->destroyed) == 0)
4897
ipmi_free_recv_msg(msg);
4898
else
4899
user->handler->ipmi_recv_hndl(msg, user->handler_data);
4900
}
4901
mutex_unlock(&intf->user_msgs_mutex);
4902
4903
kref_put(&intf->refcount, intf_free);
4904
}
4905
4906
/* Handle a new message from the lower layer. */
4907
void ipmi_smi_msg_received(struct ipmi_smi *intf,
4908
struct ipmi_smi_msg *msg)
4909
{
4910
unsigned long flags = 0; /* keep us warning-free. */
4911
int run_to_completion = READ_ONCE(intf->run_to_completion);
4912
4913
/*
4914
* To preserve message order, we keep a queue and deliver from
4915
* a workqueue.
4916
*/
4917
if (!run_to_completion)
4918
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4919
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4920
if (!run_to_completion)
4921
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4922
flags);
4923
4924
if (!run_to_completion)
4925
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4926
/*
4927
* We can get an asynchronous event or receive message in addition
4928
* to commands we send.
4929
*/
4930
if (msg == intf->curr_msg)
4931
intf->curr_msg = NULL;
4932
if (!run_to_completion)
4933
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4934
4935
if (run_to_completion)
4936
smi_work(&intf->smi_work);
4937
else
4938
queue_work(system_wq, &intf->smi_work);
4939
}
4940
EXPORT_SYMBOL(ipmi_smi_msg_received);
4941
4942
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4943
{
4944
if (intf->in_shutdown)
4945
return;
4946
4947
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4948
queue_work(system_wq, &intf->smi_work);
4949
}
4950
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4951
4952
static struct ipmi_smi_msg *
4953
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4954
unsigned char seq, long seqid)
4955
{
4956
struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4957
if (!smi_msg)
4958
/*
4959
* If we can't allocate the message, then just return, we
4960
* get 4 retries, so this should be ok.
4961
*/
4962
return NULL;
4963
4964
memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4965
smi_msg->data_size = recv_msg->msg.data_len;
4966
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4967
4968
dev_dbg(intf->si_dev, "Resend: %*ph\n",
4969
smi_msg->data_size, smi_msg->data);
4970
4971
return smi_msg;
4972
}
4973
4974
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4975
struct list_head *timeouts,
4976
unsigned long timeout_period,
4977
int slot, bool *need_timer)
4978
{
4979
struct ipmi_recv_msg *msg;
4980
4981
if (intf->in_shutdown)
4982
return;
4983
4984
if (!ent->inuse)
4985
return;
4986
4987
if (timeout_period < ent->timeout) {
4988
ent->timeout -= timeout_period;
4989
*need_timer = true;
4990
return;
4991
}
4992
4993
if (ent->retries_left == 0) {
4994
/* The message has used all its retries. */
4995
ent->inuse = 0;
4996
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4997
msg = ent->recv_msg;
4998
list_add_tail(&msg->link, timeouts);
4999
if (ent->broadcast)
5000
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
5001
else if (is_lan_addr(&ent->recv_msg->addr))
5002
ipmi_inc_stat(intf, timed_out_lan_commands);
5003
else
5004
ipmi_inc_stat(intf, timed_out_ipmb_commands);
5005
} else {
5006
struct ipmi_smi_msg *smi_msg;
5007
/* More retries, send again. */
5008
5009
*need_timer = true;
5010
5011
/*
5012
* Start with the max timer, set to normal timer after
5013
* the message is sent.
5014
*/
5015
ent->timeout = MAX_MSG_TIMEOUT;
5016
ent->retries_left--;
5017
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
5018
ent->seqid);
5019
if (!smi_msg) {
5020
if (is_lan_addr(&ent->recv_msg->addr))
5021
ipmi_inc_stat(intf,
5022
dropped_rexmit_lan_commands);
5023
else
5024
ipmi_inc_stat(intf,
5025
dropped_rexmit_ipmb_commands);
5026
return;
5027
}
5028
5029
mutex_unlock(&intf->seq_lock);
5030
5031
/*
5032
* Send the new message. We send with a zero
5033
* priority. It timed out, I doubt time is that
5034
* critical now, and high priority messages are really
5035
* only for messages to the local MC, which don't get
5036
* resent.
5037
*/
5038
if (intf->handlers) {
5039
if (is_lan_addr(&ent->recv_msg->addr))
5040
ipmi_inc_stat(intf,
5041
retransmitted_lan_commands);
5042
else
5043
ipmi_inc_stat(intf,
5044
retransmitted_ipmb_commands);
5045
5046
smi_send(intf, intf->handlers, smi_msg, 0);
5047
} else
5048
ipmi_free_smi_msg(smi_msg);
5049
5050
mutex_lock(&intf->seq_lock);
5051
}
5052
}
5053
5054
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5055
unsigned long timeout_period)
5056
{
5057
struct list_head timeouts;
5058
struct ipmi_recv_msg *msg, *msg2;
5059
unsigned long flags;
5060
int i;
5061
bool need_timer = false;
5062
5063
if (!intf->bmc_registered) {
5064
kref_get(&intf->refcount);
5065
if (!schedule_work(&intf->bmc_reg_work)) {
5066
kref_put(&intf->refcount, intf_free);
5067
need_timer = true;
5068
}
5069
}
5070
5071
/*
5072
* Go through the seq table and find any messages that
5073
* have timed out, putting them in the timeouts
5074
* list.
5075
*/
5076
INIT_LIST_HEAD(&timeouts);
5077
mutex_lock(&intf->seq_lock);
5078
if (intf->ipmb_maintenance_mode_timeout) {
5079
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5080
intf->ipmb_maintenance_mode_timeout = 0;
5081
else
5082
intf->ipmb_maintenance_mode_timeout -= timeout_period;
5083
}
5084
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
5085
check_msg_timeout(intf, &intf->seq_table[i],
5086
&timeouts, timeout_period, i,
5087
&need_timer);
5088
mutex_unlock(&intf->seq_lock);
5089
5090
list_for_each_entry_safe(msg, msg2, &timeouts, link)
5091
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5092
5093
/*
5094
* Maintenance mode handling. Check the timeout
5095
* optimistically before we claim the lock. It may
5096
* mean a timeout gets missed occasionally, but that
5097
* only means the timeout gets extended by one period
5098
* in that case. No big deal, and it avoids the lock
5099
* most of the time.
5100
*/
5101
if (intf->auto_maintenance_timeout > 0) {
5102
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5103
if (intf->auto_maintenance_timeout > 0) {
5104
intf->auto_maintenance_timeout
5105
-= timeout_period;
5106
if (!intf->maintenance_mode
5107
&& (intf->auto_maintenance_timeout <= 0)) {
5108
intf->maintenance_mode_state =
5109
IPMI_MAINTENANCE_MODE_STATE_OFF;
5110
intf->auto_maintenance_timeout = 0;
5111
maintenance_mode_update(intf);
5112
}
5113
}
5114
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5115
flags);
5116
}
5117
5118
queue_work(system_wq, &intf->smi_work);
5119
5120
return need_timer;
5121
}
5122
5123
static void ipmi_request_event(struct ipmi_smi *intf)
5124
{
5125
/* No event requests when in maintenance mode. */
5126
if (intf->maintenance_mode_state)
5127
return;
5128
5129
if (!intf->in_shutdown)
5130
intf->handlers->request_events(intf->send_info);
5131
}
5132
5133
static atomic_t stop_operation;
5134
5135
static void ipmi_timeout_work(struct work_struct *work)
5136
{
5137
if (atomic_read(&stop_operation))
5138
return;
5139
5140
struct ipmi_smi *intf;
5141
bool need_timer = false;
5142
5143
if (atomic_read(&stop_operation))
5144
return;
5145
5146
mutex_lock(&ipmi_interfaces_mutex);
5147
list_for_each_entry(intf, &ipmi_interfaces, link) {
5148
if (atomic_read(&intf->event_waiters)) {
5149
intf->ticks_to_req_ev--;
5150
if (intf->ticks_to_req_ev == 0) {
5151
ipmi_request_event(intf);
5152
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5153
}
5154
need_timer = true;
5155
}
5156
if (intf->maintenance_mode_state)
5157
need_timer = true;
5158
5159
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5160
}
5161
mutex_unlock(&ipmi_interfaces_mutex);
5162
5163
if (need_timer)
5164
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5165
}
5166
5167
static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
5168
5169
static void ipmi_timeout(struct timer_list *unused)
5170
{
5171
if (atomic_read(&stop_operation))
5172
return;
5173
5174
queue_work(system_wq, &ipmi_timer_work);
5175
}
5176
5177
static void need_waiter(struct ipmi_smi *intf)
5178
{
5179
/* Racy, but worst case we start the timer twice. */
5180
if (!timer_pending(&ipmi_timer))
5181
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5182
}
5183
5184
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5185
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5186
5187
static void free_smi_msg(struct ipmi_smi_msg *msg)
5188
{
5189
atomic_dec(&smi_msg_inuse_count);
5190
/* Try to keep as much stuff out of the panic path as possible. */
5191
if (!oops_in_progress)
5192
kfree(msg);
5193
}
5194
5195
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5196
{
5197
struct ipmi_smi_msg *rv;
5198
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5199
if (rv) {
5200
rv->done = free_smi_msg;
5201
rv->recv_msg = NULL;
5202
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5203
atomic_inc(&smi_msg_inuse_count);
5204
}
5205
return rv;
5206
}
5207
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5208
5209
static void free_recv_msg(struct ipmi_recv_msg *msg)
5210
{
5211
atomic_dec(&recv_msg_inuse_count);
5212
/* Try to keep as much stuff out of the panic path as possible. */
5213
if (!oops_in_progress)
5214
kfree(msg);
5215
}
5216
5217
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
5218
{
5219
struct ipmi_recv_msg *rv;
5220
5221
if (user) {
5222
if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
5223
atomic_dec(&user->nr_msgs);
5224
return ERR_PTR(-EBUSY);
5225
}
5226
}
5227
5228
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5229
if (!rv) {
5230
if (user)
5231
atomic_dec(&user->nr_msgs);
5232
return ERR_PTR(-ENOMEM);
5233
}
5234
5235
rv->user = user;
5236
rv->done = free_recv_msg;
5237
if (user)
5238
kref_get(&user->refcount);
5239
atomic_inc(&recv_msg_inuse_count);
5240
return rv;
5241
}
5242
5243
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5244
{
5245
if (msg->user && !oops_in_progress) {
5246
atomic_dec(&msg->user->nr_msgs);
5247
kref_put(&msg->user->refcount, free_ipmi_user);
5248
}
5249
msg->done(msg);
5250
}
5251
EXPORT_SYMBOL(ipmi_free_recv_msg);
5252
5253
static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
5254
struct ipmi_user *user)
5255
{
5256
WARN_ON_ONCE(msg->user); /* User should not be set. */
5257
msg->user = user;
5258
atomic_inc(&user->nr_msgs);
5259
kref_get(&user->refcount);
5260
}
5261
5262
static atomic_t panic_done_count = ATOMIC_INIT(0);
5263
5264
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5265
{
5266
atomic_dec(&panic_done_count);
5267
}
5268
5269
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5270
{
5271
atomic_dec(&panic_done_count);
5272
}
5273
5274
/*
5275
* Inside a panic, send a message and wait for a response.
5276
*/
5277
static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5278
struct ipmi_addr *addr,
5279
struct kernel_ipmi_msg *msg)
5280
{
5281
struct ipmi_smi_msg smi_msg;
5282
struct ipmi_recv_msg recv_msg;
5283
int rv;
5284
5285
smi_msg.done = dummy_smi_done_handler;
5286
recv_msg.done = dummy_recv_done_handler;
5287
atomic_add(2, &panic_done_count);
5288
rv = i_ipmi_request(NULL,
5289
intf,
5290
addr,
5291
0,
5292
msg,
5293
intf,
5294
&smi_msg,
5295
&recv_msg,
5296
0,
5297
intf->addrinfo[0].address,
5298
intf->addrinfo[0].lun,
5299
0, 1); /* Don't retry, and don't wait. */
5300
if (rv)
5301
atomic_sub(2, &panic_done_count);
5302
else if (intf->handlers->flush_messages)
5303
intf->handlers->flush_messages(intf->send_info);
5304
5305
while (atomic_read(&panic_done_count) != 0)
5306
ipmi_poll(intf);
5307
}
5308
5309
void ipmi_panic_request_and_wait(struct ipmi_user *user,
5310
struct ipmi_addr *addr,
5311
struct kernel_ipmi_msg *msg)
5312
{
5313
user->intf->run_to_completion = 1;
5314
_ipmi_panic_request_and_wait(user->intf, addr, msg);
5315
}
5316
EXPORT_SYMBOL(ipmi_panic_request_and_wait);
5317
5318
static void event_receiver_fetcher(struct ipmi_smi *intf,
5319
struct ipmi_recv_msg *msg)
5320
{
5321
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5322
&& (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5323
&& (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5324
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5325
/* A get event receiver command, save it. */
5326
intf->event_receiver = msg->msg.data[1];
5327
intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5328
}
5329
}
5330
5331
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5332
{
5333
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5334
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5335
&& (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5336
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5337
/*
5338
* A get device id command, save if we are an event
5339
* receiver or generator.
5340
*/
5341
intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5342
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5343
}
5344
}
5345
5346
static void send_panic_events(struct ipmi_smi *intf, char *str)
5347
{
5348
struct kernel_ipmi_msg msg;
5349
unsigned char data[16];
5350
struct ipmi_system_interface_addr *si;
5351
struct ipmi_addr addr;
5352
char *p = str;
5353
struct ipmi_ipmb_addr *ipmb;
5354
int j;
5355
5356
if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5357
return;
5358
5359
si = (struct ipmi_system_interface_addr *) &addr;
5360
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5361
si->channel = IPMI_BMC_CHANNEL;
5362
si->lun = 0;
5363
5364
/* Fill in an event telling that we have failed. */
5365
msg.netfn = 0x04; /* Sensor or Event. */
5366
msg.cmd = 2; /* Platform event command. */
5367
msg.data = data;
5368
msg.data_len = 8;
5369
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5370
data[1] = 0x03; /* This is for IPMI 1.0. */
5371
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5372
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5373
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5374
5375
/*
5376
* Put a few breadcrumbs in. Hopefully later we can add more things
5377
* to make the panic events more useful.
5378
*/
5379
if (str) {
5380
data[3] = str[0];
5381
data[6] = str[1];
5382
data[7] = str[2];
5383
}
5384
5385
/* Send the event announcing the panic. */
5386
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5387
5388
/*
5389
* On every interface, dump a bunch of OEM event holding the
5390
* string.
5391
*/
5392
if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5393
return;
5394
5395
/*
5396
* intf_num is used as an marker to tell if the
5397
* interface is valid. Thus we need a read barrier to
5398
* make sure data fetched before checking intf_num
5399
* won't be used.
5400
*/
5401
smp_rmb();
5402
5403
/*
5404
* First job here is to figure out where to send the
5405
* OEM events. There's no way in IPMI to send OEM
5406
* events using an event send command, so we have to
5407
* find the SEL to put them in and stick them in
5408
* there.
5409
*/
5410
5411
/* Get capabilities from the get device id. */
5412
intf->local_sel_device = 0;
5413
intf->local_event_generator = 0;
5414
intf->event_receiver = 0;
5415
5416
/* Request the device info from the local MC. */
5417
msg.netfn = IPMI_NETFN_APP_REQUEST;
5418
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5419
msg.data = NULL;
5420
msg.data_len = 0;
5421
intf->null_user_handler = device_id_fetcher;
5422
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5423
5424
if (intf->local_event_generator) {
5425
/* Request the event receiver from the local MC. */
5426
msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5427
msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5428
msg.data = NULL;
5429
msg.data_len = 0;
5430
intf->null_user_handler = event_receiver_fetcher;
5431
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5432
}
5433
intf->null_user_handler = NULL;
5434
5435
/*
5436
* Validate the event receiver. The low bit must not
5437
* be 1 (it must be a valid IPMB address), it cannot
5438
* be zero, and it must not be my address.
5439
*/
5440
if (((intf->event_receiver & 1) == 0)
5441
&& (intf->event_receiver != 0)
5442
&& (intf->event_receiver != intf->addrinfo[0].address)) {
5443
/*
5444
* The event receiver is valid, send an IPMB
5445
* message.
5446
*/
5447
ipmb = (struct ipmi_ipmb_addr *) &addr;
5448
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5449
ipmb->channel = 0; /* FIXME - is this right? */
5450
ipmb->lun = intf->event_receiver_lun;
5451
ipmb->slave_addr = intf->event_receiver;
5452
} else if (intf->local_sel_device) {
5453
/*
5454
* The event receiver was not valid (or was
5455
* me), but I am an SEL device, just dump it
5456
* in my SEL.
5457
*/
5458
si = (struct ipmi_system_interface_addr *) &addr;
5459
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5460
si->channel = IPMI_BMC_CHANNEL;
5461
si->lun = 0;
5462
} else
5463
return; /* No where to send the event. */
5464
5465
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5466
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5467
msg.data = data;
5468
msg.data_len = 16;
5469
5470
j = 0;
5471
while (*p) {
5472
int size = strnlen(p, 11);
5473
5474
data[0] = 0;
5475
data[1] = 0;
5476
data[2] = 0xf0; /* OEM event without timestamp. */
5477
data[3] = intf->addrinfo[0].address;
5478
data[4] = j++; /* sequence # */
5479
5480
memcpy_and_pad(data+5, 11, p, size, '\0');
5481
p += size;
5482
5483
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5484
}
5485
}
5486
5487
static int has_panicked;
5488
5489
static int panic_event(struct notifier_block *this,
5490
unsigned long event,
5491
void *ptr)
5492
{
5493
struct ipmi_smi *intf;
5494
struct ipmi_user *user;
5495
5496
if (has_panicked)
5497
return NOTIFY_DONE;
5498
has_panicked = 1;
5499
5500
/* For every registered interface, set it to run to completion. */
5501
list_for_each_entry(intf, &ipmi_interfaces, link) {
5502
if (!intf->handlers || intf->intf_num == -1)
5503
/* Interface is not ready. */
5504
continue;
5505
5506
if (!intf->handlers->poll)
5507
continue;
5508
5509
/*
5510
* If we were interrupted while locking xmit_msgs_lock or
5511
* waiting_rcv_msgs_lock, the corresponding list may be
5512
* corrupted. In this case, drop items on the list for
5513
* the safety.
5514
*/
5515
if (!spin_trylock(&intf->xmit_msgs_lock)) {
5516
INIT_LIST_HEAD(&intf->xmit_msgs);
5517
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5518
} else
5519
spin_unlock(&intf->xmit_msgs_lock);
5520
5521
if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5522
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5523
else
5524
spin_unlock(&intf->waiting_rcv_msgs_lock);
5525
5526
intf->run_to_completion = 1;
5527
if (intf->handlers->set_run_to_completion)
5528
intf->handlers->set_run_to_completion(intf->send_info,
5529
1);
5530
5531
list_for_each_entry(user, &intf->users, link) {
5532
if (user->handler->ipmi_panic_handler)
5533
user->handler->ipmi_panic_handler(
5534
user->handler_data);
5535
}
5536
5537
send_panic_events(intf, ptr);
5538
}
5539
5540
return NOTIFY_DONE;
5541
}
5542
5543
/* Must be called with ipmi_interfaces_mutex held. */
5544
static int ipmi_register_driver(void)
5545
{
5546
int rv;
5547
5548
if (drvregistered)
5549
return 0;
5550
5551
rv = driver_register(&ipmidriver.driver);
5552
if (rv)
5553
pr_err("Could not register IPMI driver\n");
5554
else
5555
drvregistered = true;
5556
return rv;
5557
}
5558
5559
static struct notifier_block panic_block = {
5560
.notifier_call = panic_event,
5561
.next = NULL,
5562
.priority = 200 /* priority: INT_MAX >= x >= 0 */
5563
};
5564
5565
static int ipmi_init_msghandler(void)
5566
{
5567
int rv;
5568
5569
mutex_lock(&ipmi_interfaces_mutex);
5570
rv = ipmi_register_driver();
5571
if (rv)
5572
goto out;
5573
if (initialized)
5574
goto out;
5575
5576
bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5577
if (!bmc_remove_work_wq) {
5578
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5579
rv = -ENOMEM;
5580
goto out;
5581
}
5582
5583
timer_setup(&ipmi_timer, ipmi_timeout, 0);
5584
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5585
5586
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5587
5588
initialized = true;
5589
5590
out:
5591
mutex_unlock(&ipmi_interfaces_mutex);
5592
return rv;
5593
}
5594
5595
static int __init ipmi_init_msghandler_mod(void)
5596
{
5597
int rv;
5598
5599
pr_info("version " IPMI_DRIVER_VERSION "\n");
5600
5601
mutex_lock(&ipmi_interfaces_mutex);
5602
rv = ipmi_register_driver();
5603
mutex_unlock(&ipmi_interfaces_mutex);
5604
5605
return rv;
5606
}
5607
5608
static void __exit cleanup_ipmi(void)
5609
{
5610
int count;
5611
5612
if (initialized) {
5613
destroy_workqueue(bmc_remove_work_wq);
5614
5615
atomic_notifier_chain_unregister(&panic_notifier_list,
5616
&panic_block);
5617
5618
/*
5619
* This can't be called if any interfaces exist, so no worry
5620
* about shutting down the interfaces.
5621
*/
5622
5623
/*
5624
* Tell the timer to stop, then wait for it to stop. This
5625
* avoids problems with race conditions removing the timer
5626
* here.
5627
*/
5628
atomic_set(&stop_operation, 1);
5629
timer_delete_sync(&ipmi_timer);
5630
cancel_work_sync(&ipmi_timer_work);
5631
5632
initialized = false;
5633
5634
/* Check for buffer leaks. */
5635
count = atomic_read(&smi_msg_inuse_count);
5636
if (count != 0)
5637
pr_warn("SMI message count %d at exit\n", count);
5638
count = atomic_read(&recv_msg_inuse_count);
5639
if (count != 0)
5640
pr_warn("recv message count %d at exit\n", count);
5641
}
5642
if (drvregistered)
5643
driver_unregister(&ipmidriver.driver);
5644
}
5645
module_exit(cleanup_ipmi);
5646
5647
module_init(ipmi_init_msghandler_mod);
5648
MODULE_LICENSE("GPL");
5649
MODULE_AUTHOR("Corey Minyard <[email protected]>");
5650
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5651
MODULE_VERSION(IPMI_DRIVER_VERSION);
5652
MODULE_SOFTDEP("post: ipmi_devintf");
5653
5654