Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/ipmi_msghandler.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* ipmi_msghandler.c
4
*
5
* Incoming and outgoing message routing for an IPMI interface.
6
*
7
* Author: MontaVista Software, Inc.
8
* Corey Minyard <[email protected]>
9
* [email protected]
10
*
11
* Copyright 2002 MontaVista Software Inc.
12
*/
13
14
#define pr_fmt(fmt) "IPMI message handler: " fmt
15
#define dev_fmt(fmt) pr_fmt(fmt)
16
17
#include <linux/module.h>
18
#include <linux/errno.h>
19
#include <linux/panic_notifier.h>
20
#include <linux/poll.h>
21
#include <linux/sched.h>
22
#include <linux/seq_file.h>
23
#include <linux/spinlock.h>
24
#include <linux/mutex.h>
25
#include <linux/slab.h>
26
#include <linux/ipmi.h>
27
#include <linux/ipmi_smi.h>
28
#include <linux/notifier.h>
29
#include <linux/init.h>
30
#include <linux/rcupdate.h>
31
#include <linux/interrupt.h>
32
#include <linux/moduleparam.h>
33
#include <linux/workqueue.h>
34
#include <linux/uuid.h>
35
#include <linux/nospec.h>
36
#include <linux/vmalloc.h>
37
#include <linux/delay.h>
38
39
#define IPMI_DRIVER_VERSION "39.2"
40
41
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
42
static int ipmi_init_msghandler(void);
43
static void smi_work(struct work_struct *t);
44
static void handle_new_recv_msgs(struct ipmi_smi *intf);
45
static void need_waiter(struct ipmi_smi *intf);
46
static int handle_one_recv_msg(struct ipmi_smi *intf,
47
struct ipmi_smi_msg *msg);
48
static void intf_free(struct kref *ref);
49
50
static bool initialized;
51
static bool drvregistered;
52
53
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
54
enum ipmi_panic_event_op {
55
IPMI_SEND_PANIC_EVENT_NONE,
56
IPMI_SEND_PANIC_EVENT,
57
IPMI_SEND_PANIC_EVENT_STRING,
58
IPMI_SEND_PANIC_EVENT_MAX
59
};
60
61
/* Indices in this array should be mapped to enum ipmi_panic_event_op */
62
static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
63
64
#ifdef CONFIG_IPMI_PANIC_STRING
65
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
66
#elif defined(CONFIG_IPMI_PANIC_EVENT)
67
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
68
#else
69
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
70
#endif
71
72
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
73
74
static int panic_op_write_handler(const char *val,
75
const struct kernel_param *kp)
76
{
77
char valcp[16];
78
int e;
79
80
strscpy(valcp, val, sizeof(valcp));
81
e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
82
if (e < 0)
83
return e;
84
85
ipmi_send_panic_event = e;
86
return 0;
87
}
88
89
static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
90
{
91
const char *event_str;
92
93
if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
94
event_str = "???";
95
else
96
event_str = ipmi_panic_event_str[ipmi_send_panic_event];
97
98
return sprintf(buffer, "%s\n", event_str);
99
}
100
101
static const struct kernel_param_ops panic_op_ops = {
102
.set = panic_op_write_handler,
103
.get = panic_op_read_handler
104
};
105
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
106
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
107
108
109
#define MAX_EVENTS_IN_QUEUE 25
110
111
/* Remain in auto-maintenance mode for this amount of time (in ms). */
112
static unsigned long maintenance_mode_timeout_ms = 30000;
113
module_param(maintenance_mode_timeout_ms, ulong, 0644);
114
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
115
"The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
116
117
/*
118
* Don't let a message sit in a queue forever, always time it with at lest
119
* the max message timer. This is in milliseconds.
120
*/
121
#define MAX_MSG_TIMEOUT 60000
122
123
/*
124
* Timeout times below are in milliseconds, and are done off a 1
125
* second timer. So setting the value to 1000 would mean anything
126
* between 0 and 1000ms. So really the only reasonable minimum
127
* setting it 2000ms, which is between 1 and 2 seconds.
128
*/
129
130
/* The default timeout for message retries. */
131
static unsigned long default_retry_ms = 2000;
132
module_param(default_retry_ms, ulong, 0644);
133
MODULE_PARM_DESC(default_retry_ms,
134
"The time (milliseconds) between retry sends");
135
136
/* The default timeout for maintenance mode message retries. */
137
static unsigned long default_maintenance_retry_ms = 3000;
138
module_param(default_maintenance_retry_ms, ulong, 0644);
139
MODULE_PARM_DESC(default_maintenance_retry_ms,
140
"The time (milliseconds) between retry sends in maintenance mode");
141
142
/* The default maximum number of retries */
143
static unsigned int default_max_retries = 4;
144
module_param(default_max_retries, uint, 0644);
145
MODULE_PARM_DESC(default_max_retries,
146
"The time (milliseconds) between retry sends in maintenance mode");
147
148
/* The default maximum number of users that may register. */
149
static unsigned int max_users = 30;
150
module_param(max_users, uint, 0644);
151
MODULE_PARM_DESC(max_users,
152
"The most users that may use the IPMI stack at one time.");
153
154
/* The default maximum number of message a user may have outstanding. */
155
static unsigned int max_msgs_per_user = 100;
156
module_param(max_msgs_per_user, uint, 0644);
157
MODULE_PARM_DESC(max_msgs_per_user,
158
"The most message a user may have outstanding.");
159
160
/* Call every ~1000 ms. */
161
#define IPMI_TIMEOUT_TIME 1000
162
163
/* How many jiffies does it take to get to the timeout time. */
164
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
165
166
/*
167
* Request events from the queue every second (this is the number of
168
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
169
* future, IPMI will add a way to know immediately if an event is in
170
* the queue and this silliness can go away.
171
*/
172
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
173
174
/* How long should we cache dynamic device IDs? */
175
#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
176
177
/*
178
* The main "user" data structure.
179
*/
180
struct ipmi_user {
181
struct list_head link;
182
183
struct kref refcount;
184
refcount_t destroyed;
185
186
/* The upper layer that handles receive messages. */
187
const struct ipmi_user_hndl *handler;
188
void *handler_data;
189
190
/* The interface this user is bound to. */
191
struct ipmi_smi *intf;
192
193
/* Does this interface receive IPMI events? */
194
bool gets_events;
195
196
atomic_t nr_msgs;
197
};
198
199
struct cmd_rcvr {
200
struct list_head link;
201
202
struct ipmi_user *user;
203
unsigned char netfn;
204
unsigned char cmd;
205
unsigned int chans;
206
207
/*
208
* This is used to form a linked lised during mass deletion.
209
* Since this is in an RCU list, we cannot use the link above
210
* or change any data until the RCU period completes. So we
211
* use this next variable during mass deletion so we can have
212
* a list and don't have to wait and restart the search on
213
* every individual deletion of a command.
214
*/
215
struct cmd_rcvr *next;
216
};
217
218
struct seq_table {
219
unsigned int inuse : 1;
220
unsigned int broadcast : 1;
221
222
unsigned long timeout;
223
unsigned long orig_timeout;
224
unsigned int retries_left;
225
226
/*
227
* To verify on an incoming send message response that this is
228
* the message that the response is for, we keep a sequence id
229
* and increment it every time we send a message.
230
*/
231
long seqid;
232
233
/*
234
* This is held so we can properly respond to the message on a
235
* timeout, and it is used to hold the temporary data for
236
* retransmission, too.
237
*/
238
struct ipmi_recv_msg *recv_msg;
239
};
240
241
/*
242
* Store the information in a msgid (long) to allow us to find a
243
* sequence table entry from the msgid.
244
*/
245
#define STORE_SEQ_IN_MSGID(seq, seqid) \
246
((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
247
248
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
249
do { \
250
seq = (((msgid) >> 26) & 0x3f); \
251
seqid = ((msgid) & 0x3ffffff); \
252
} while (0)
253
254
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
255
256
#define IPMI_MAX_CHANNELS 16
257
struct ipmi_channel {
258
unsigned char medium;
259
unsigned char protocol;
260
};
261
262
struct ipmi_channel_set {
263
struct ipmi_channel c[IPMI_MAX_CHANNELS];
264
};
265
266
struct ipmi_my_addrinfo {
267
/*
268
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
269
* but may be changed by the user.
270
*/
271
unsigned char address;
272
273
/*
274
* My LUN. This should generally stay the SMS LUN, but just in
275
* case...
276
*/
277
unsigned char lun;
278
};
279
280
/*
281
* Note that the product id, manufacturer id, guid, and device id are
282
* immutable in this structure, so dyn_mutex is not required for
283
* accessing those. If those change on a BMC, a new BMC is allocated.
284
*/
285
struct bmc_device {
286
struct platform_device pdev;
287
struct list_head intfs; /* Interfaces on this BMC. */
288
struct ipmi_device_id id;
289
struct ipmi_device_id fetch_id;
290
int dyn_id_set;
291
unsigned long dyn_id_expiry;
292
struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
293
guid_t guid;
294
guid_t fetch_guid;
295
int dyn_guid_set;
296
struct kref usecount;
297
struct work_struct remove_work;
298
unsigned char cc; /* completion code */
299
};
300
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
301
302
static struct workqueue_struct *bmc_remove_work_wq;
303
304
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
305
struct ipmi_device_id *id,
306
bool *guid_set, guid_t *guid);
307
308
/*
309
* Various statistics for IPMI, these index stats[] in the ipmi_smi
310
* structure.
311
*/
312
enum ipmi_stat_indexes {
313
/* Commands we got from the user that were invalid. */
314
IPMI_STAT_sent_invalid_commands = 0,
315
316
/* Commands we sent to the MC. */
317
IPMI_STAT_sent_local_commands,
318
319
/* Responses from the MC that were delivered to a user. */
320
IPMI_STAT_handled_local_responses,
321
322
/* Responses from the MC that were not delivered to a user. */
323
IPMI_STAT_unhandled_local_responses,
324
325
/* Commands we sent out to the IPMB bus. */
326
IPMI_STAT_sent_ipmb_commands,
327
328
/* Commands sent on the IPMB that had errors on the SEND CMD */
329
IPMI_STAT_sent_ipmb_command_errs,
330
331
/* Each retransmit increments this count. */
332
IPMI_STAT_retransmitted_ipmb_commands,
333
334
/*
335
* When a message times out (runs out of retransmits) this is
336
* incremented.
337
*/
338
IPMI_STAT_timed_out_ipmb_commands,
339
340
/*
341
* This is like above, but for broadcasts. Broadcasts are
342
* *not* included in the above count (they are expected to
343
* time out).
344
*/
345
IPMI_STAT_timed_out_ipmb_broadcasts,
346
347
/* Responses I have sent to the IPMB bus. */
348
IPMI_STAT_sent_ipmb_responses,
349
350
/* The response was delivered to the user. */
351
IPMI_STAT_handled_ipmb_responses,
352
353
/* The response had invalid data in it. */
354
IPMI_STAT_invalid_ipmb_responses,
355
356
/* The response didn't have anyone waiting for it. */
357
IPMI_STAT_unhandled_ipmb_responses,
358
359
/* Commands we sent out to the IPMB bus. */
360
IPMI_STAT_sent_lan_commands,
361
362
/* Commands sent on the IPMB that had errors on the SEND CMD */
363
IPMI_STAT_sent_lan_command_errs,
364
365
/* Each retransmit increments this count. */
366
IPMI_STAT_retransmitted_lan_commands,
367
368
/*
369
* When a message times out (runs out of retransmits) this is
370
* incremented.
371
*/
372
IPMI_STAT_timed_out_lan_commands,
373
374
/* Responses I have sent to the IPMB bus. */
375
IPMI_STAT_sent_lan_responses,
376
377
/* The response was delivered to the user. */
378
IPMI_STAT_handled_lan_responses,
379
380
/* The response had invalid data in it. */
381
IPMI_STAT_invalid_lan_responses,
382
383
/* The response didn't have anyone waiting for it. */
384
IPMI_STAT_unhandled_lan_responses,
385
386
/* The command was delivered to the user. */
387
IPMI_STAT_handled_commands,
388
389
/* The command had invalid data in it. */
390
IPMI_STAT_invalid_commands,
391
392
/* The command didn't have anyone waiting for it. */
393
IPMI_STAT_unhandled_commands,
394
395
/* Invalid data in an event. */
396
IPMI_STAT_invalid_events,
397
398
/* Events that were received with the proper format. */
399
IPMI_STAT_events,
400
401
/* Retransmissions on IPMB that failed. */
402
IPMI_STAT_dropped_rexmit_ipmb_commands,
403
404
/* Retransmissions on LAN that failed. */
405
IPMI_STAT_dropped_rexmit_lan_commands,
406
407
/* This *must* remain last, add new values above this. */
408
IPMI_NUM_STATS
409
};
410
411
412
#define IPMI_IPMB_NUM_SEQ 64
413
struct ipmi_smi {
414
struct module *owner;
415
416
/* What interface number are we? */
417
int intf_num;
418
419
struct kref refcount;
420
421
/* Set when the interface is being unregistered. */
422
bool in_shutdown;
423
424
/* Used for a list of interfaces. */
425
struct list_head link;
426
427
/*
428
* The list of upper layers that are using me.
429
*/
430
struct list_head users;
431
struct mutex users_mutex;
432
atomic_t nr_users;
433
struct device_attribute nr_users_devattr;
434
struct device_attribute nr_msgs_devattr;
435
436
437
/* Used for wake ups at startup. */
438
wait_queue_head_t waitq;
439
440
/*
441
* Prevents the interface from being unregistered when the
442
* interface is used by being looked up through the BMC
443
* structure.
444
*/
445
struct mutex bmc_reg_mutex;
446
447
struct bmc_device tmp_bmc;
448
struct bmc_device *bmc;
449
bool bmc_registered;
450
struct list_head bmc_link;
451
char *my_dev_name;
452
bool in_bmc_register; /* Handle recursive situations. Yuck. */
453
struct work_struct bmc_reg_work;
454
455
const struct ipmi_smi_handlers *handlers;
456
void *send_info;
457
458
/* Driver-model device for the system interface. */
459
struct device *si_dev;
460
461
/*
462
* A table of sequence numbers for this interface. We use the
463
* sequence numbers for IPMB messages that go out of the
464
* interface to match them up with their responses. A routine
465
* is called periodically to time the items in this list.
466
*/
467
spinlock_t seq_lock;
468
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
469
int curr_seq;
470
471
/*
472
* Messages queued for deliver to the user.
473
*/
474
struct mutex user_msgs_mutex;
475
struct list_head user_msgs;
476
477
/*
478
* Messages queued for processing. If processing fails (out
479
* of memory for instance), They will stay in here to be
480
* processed later in a periodic timer interrupt. The
481
* workqueue is for handling received messages directly from
482
* the handler.
483
*/
484
spinlock_t waiting_rcv_msgs_lock;
485
struct list_head waiting_rcv_msgs;
486
atomic_t watchdog_pretimeouts_to_deliver;
487
struct work_struct smi_work;
488
489
spinlock_t xmit_msgs_lock;
490
struct list_head xmit_msgs;
491
struct ipmi_smi_msg *curr_msg;
492
struct list_head hp_xmit_msgs;
493
494
/*
495
* The list of command receivers that are registered for commands
496
* on this interface.
497
*/
498
struct mutex cmd_rcvrs_mutex;
499
struct list_head cmd_rcvrs;
500
501
/*
502
* Events that were queues because no one was there to receive
503
* them.
504
*/
505
struct mutex events_mutex; /* For dealing with event stuff. */
506
struct list_head waiting_events;
507
unsigned int waiting_events_count; /* How many events in queue? */
508
char event_msg_printed;
509
510
/* How many users are waiting for events? */
511
atomic_t event_waiters;
512
unsigned int ticks_to_req_ev;
513
514
spinlock_t watch_lock; /* For dealing with watch stuff below. */
515
516
/* How many users are waiting for commands? */
517
unsigned int command_waiters;
518
519
/* How many users are waiting for watchdogs? */
520
unsigned int watchdog_waiters;
521
522
/* How many users are waiting for message responses? */
523
unsigned int response_waiters;
524
525
/*
526
* Tells what the lower layer has last been asked to watch for,
527
* messages and/or watchdogs. Protected by watch_lock.
528
*/
529
unsigned int last_watch_mask;
530
531
/*
532
* The event receiver for my BMC, only really used at panic
533
* shutdown as a place to store this.
534
*/
535
unsigned char event_receiver;
536
unsigned char event_receiver_lun;
537
unsigned char local_sel_device;
538
unsigned char local_event_generator;
539
540
/* For handling of maintenance mode. */
541
int maintenance_mode;
542
bool maintenance_mode_enable;
543
int auto_maintenance_timeout;
544
spinlock_t maintenance_mode_lock; /* Used in a timer... */
545
546
/*
547
* If we are doing maintenance on something on IPMB, extend
548
* the timeout time to avoid timeouts writing firmware and
549
* such.
550
*/
551
int ipmb_maintenance_mode_timeout;
552
553
/*
554
* A cheap hack, if this is non-null and a message to an
555
* interface comes in with a NULL user, call this routine with
556
* it. Note that the message will still be freed by the
557
* caller. This only works on the system interface.
558
*
559
* Protected by bmc_reg_mutex.
560
*/
561
void (*null_user_handler)(struct ipmi_smi *intf,
562
struct ipmi_recv_msg *msg);
563
564
/*
565
* When we are scanning the channels for an SMI, this will
566
* tell which channel we are scanning.
567
*/
568
int curr_channel;
569
570
/* Channel information */
571
struct ipmi_channel_set *channel_list;
572
unsigned int curr_working_cset; /* First index into the following. */
573
struct ipmi_channel_set wchannels[2];
574
struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
575
bool channels_ready;
576
577
atomic_t stats[IPMI_NUM_STATS];
578
579
/*
580
* run_to_completion duplicate of smb_info, smi_info
581
* and ipmi_serial_info structures. Used to decrease numbers of
582
* parameters passed by "low" level IPMI code.
583
*/
584
int run_to_completion;
585
};
586
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
587
588
static void __get_guid(struct ipmi_smi *intf);
589
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
590
static int __ipmi_bmc_register(struct ipmi_smi *intf,
591
struct ipmi_device_id *id,
592
bool guid_set, guid_t *guid, int intf_num);
593
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
594
595
static void free_ipmi_user(struct kref *ref)
596
{
597
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
598
struct module *owner;
599
600
owner = user->intf->owner;
601
kref_put(&user->intf->refcount, intf_free);
602
module_put(owner);
603
vfree(user);
604
}
605
606
static void release_ipmi_user(struct ipmi_user *user)
607
{
608
kref_put(&user->refcount, free_ipmi_user);
609
}
610
611
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
612
{
613
if (!kref_get_unless_zero(&user->refcount))
614
return NULL;
615
return user;
616
}
617
618
/*
619
* The driver model view of the IPMI messaging driver.
620
*/
621
static struct platform_driver ipmidriver = {
622
.driver = {
623
.name = "ipmi",
624
.bus = &platform_bus_type
625
}
626
};
627
/*
628
* This mutex keeps us from adding the same BMC twice.
629
*/
630
static DEFINE_MUTEX(ipmidriver_mutex);
631
632
static LIST_HEAD(ipmi_interfaces);
633
static DEFINE_MUTEX(ipmi_interfaces_mutex);
634
635
/*
636
* List of watchers that want to know when smi's are added and deleted.
637
*/
638
static LIST_HEAD(smi_watchers);
639
static DEFINE_MUTEX(smi_watchers_mutex);
640
641
#define ipmi_inc_stat(intf, stat) \
642
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
643
#define ipmi_get_stat(intf, stat) \
644
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
645
646
static const char * const addr_src_to_str[] = {
647
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
648
"device-tree", "platform"
649
};
650
651
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
652
{
653
if (src >= SI_LAST)
654
src = 0; /* Invalid */
655
return addr_src_to_str[src];
656
}
657
EXPORT_SYMBOL(ipmi_addr_src_to_str);
658
659
static int is_lan_addr(struct ipmi_addr *addr)
660
{
661
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
662
}
663
664
static int is_ipmb_addr(struct ipmi_addr *addr)
665
{
666
return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
667
}
668
669
static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
670
{
671
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
672
}
673
674
static int is_ipmb_direct_addr(struct ipmi_addr *addr)
675
{
676
return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
677
}
678
679
static void free_recv_msg_list(struct list_head *q)
680
{
681
struct ipmi_recv_msg *msg, *msg2;
682
683
list_for_each_entry_safe(msg, msg2, q, link) {
684
list_del(&msg->link);
685
ipmi_free_recv_msg(msg);
686
}
687
}
688
689
static void free_smi_msg_list(struct list_head *q)
690
{
691
struct ipmi_smi_msg *msg, *msg2;
692
693
list_for_each_entry_safe(msg, msg2, q, link) {
694
list_del(&msg->link);
695
ipmi_free_smi_msg(msg);
696
}
697
}
698
699
static void intf_free(struct kref *ref)
700
{
701
struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
702
int i;
703
struct cmd_rcvr *rcvr, *rcvr2;
704
705
free_smi_msg_list(&intf->waiting_rcv_msgs);
706
free_recv_msg_list(&intf->waiting_events);
707
708
/*
709
* Wholesale remove all the entries from the list in the
710
* interface. No need for locks, this is single-threaded.
711
*/
712
list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
713
kfree(rcvr);
714
715
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
716
if ((intf->seq_table[i].inuse)
717
&& (intf->seq_table[i].recv_msg))
718
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
719
}
720
721
kfree(intf);
722
}
723
724
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
725
{
726
struct ipmi_smi *intf;
727
unsigned int count = 0, i;
728
int *interfaces = NULL;
729
struct device **devices = NULL;
730
int rv = 0;
731
732
/*
733
* Make sure the driver is actually initialized, this handles
734
* problems with initialization order.
735
*/
736
rv = ipmi_init_msghandler();
737
if (rv)
738
return rv;
739
740
mutex_lock(&smi_watchers_mutex);
741
742
list_add(&watcher->link, &smi_watchers);
743
744
/*
745
* Build an array of ipmi interfaces and fill it in, and
746
* another array of the devices. We can't call the callback
747
* with ipmi_interfaces_mutex held. smi_watchers_mutex will
748
* keep things in order for the user.
749
*/
750
mutex_lock(&ipmi_interfaces_mutex);
751
list_for_each_entry(intf, &ipmi_interfaces, link)
752
count++;
753
if (count > 0) {
754
interfaces = kmalloc_array(count, sizeof(*interfaces),
755
GFP_KERNEL);
756
if (!interfaces) {
757
rv = -ENOMEM;
758
} else {
759
devices = kmalloc_array(count, sizeof(*devices),
760
GFP_KERNEL);
761
if (!devices) {
762
kfree(interfaces);
763
interfaces = NULL;
764
rv = -ENOMEM;
765
}
766
}
767
count = 0;
768
}
769
if (interfaces) {
770
list_for_each_entry(intf, &ipmi_interfaces, link) {
771
int intf_num = READ_ONCE(intf->intf_num);
772
773
if (intf_num == -1)
774
continue;
775
devices[count] = intf->si_dev;
776
interfaces[count++] = intf_num;
777
}
778
}
779
mutex_unlock(&ipmi_interfaces_mutex);
780
781
if (interfaces) {
782
for (i = 0; i < count; i++)
783
watcher->new_smi(interfaces[i], devices[i]);
784
kfree(interfaces);
785
kfree(devices);
786
}
787
788
mutex_unlock(&smi_watchers_mutex);
789
790
return rv;
791
}
792
EXPORT_SYMBOL(ipmi_smi_watcher_register);
793
794
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
795
{
796
mutex_lock(&smi_watchers_mutex);
797
list_del(&watcher->link);
798
mutex_unlock(&smi_watchers_mutex);
799
return 0;
800
}
801
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
802
803
static void
804
call_smi_watchers(int i, struct device *dev)
805
{
806
struct ipmi_smi_watcher *w;
807
808
list_for_each_entry(w, &smi_watchers, link) {
809
if (try_module_get(w->owner)) {
810
w->new_smi(i, dev);
811
module_put(w->owner);
812
}
813
}
814
}
815
816
static int
817
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
818
{
819
if (addr1->addr_type != addr2->addr_type)
820
return 0;
821
822
if (addr1->channel != addr2->channel)
823
return 0;
824
825
if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
826
struct ipmi_system_interface_addr *smi_addr1
827
= (struct ipmi_system_interface_addr *) addr1;
828
struct ipmi_system_interface_addr *smi_addr2
829
= (struct ipmi_system_interface_addr *) addr2;
830
return (smi_addr1->lun == smi_addr2->lun);
831
}
832
833
if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
834
struct ipmi_ipmb_addr *ipmb_addr1
835
= (struct ipmi_ipmb_addr *) addr1;
836
struct ipmi_ipmb_addr *ipmb_addr2
837
= (struct ipmi_ipmb_addr *) addr2;
838
839
return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
840
&& (ipmb_addr1->lun == ipmb_addr2->lun));
841
}
842
843
if (is_ipmb_direct_addr(addr1)) {
844
struct ipmi_ipmb_direct_addr *daddr1
845
= (struct ipmi_ipmb_direct_addr *) addr1;
846
struct ipmi_ipmb_direct_addr *daddr2
847
= (struct ipmi_ipmb_direct_addr *) addr2;
848
849
return daddr1->slave_addr == daddr2->slave_addr &&
850
daddr1->rq_lun == daddr2->rq_lun &&
851
daddr1->rs_lun == daddr2->rs_lun;
852
}
853
854
if (is_lan_addr(addr1)) {
855
struct ipmi_lan_addr *lan_addr1
856
= (struct ipmi_lan_addr *) addr1;
857
struct ipmi_lan_addr *lan_addr2
858
= (struct ipmi_lan_addr *) addr2;
859
860
return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
861
&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
862
&& (lan_addr1->session_handle
863
== lan_addr2->session_handle)
864
&& (lan_addr1->lun == lan_addr2->lun));
865
}
866
867
return 1;
868
}
869
870
int ipmi_validate_addr(struct ipmi_addr *addr, int len)
871
{
872
if (len < sizeof(struct ipmi_system_interface_addr))
873
return -EINVAL;
874
875
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
876
if (addr->channel != IPMI_BMC_CHANNEL)
877
return -EINVAL;
878
return 0;
879
}
880
881
if ((addr->channel == IPMI_BMC_CHANNEL)
882
|| (addr->channel >= IPMI_MAX_CHANNELS)
883
|| (addr->channel < 0))
884
return -EINVAL;
885
886
if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
887
if (len < sizeof(struct ipmi_ipmb_addr))
888
return -EINVAL;
889
return 0;
890
}
891
892
if (is_ipmb_direct_addr(addr)) {
893
struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
894
895
if (addr->channel != 0)
896
return -EINVAL;
897
if (len < sizeof(struct ipmi_ipmb_direct_addr))
898
return -EINVAL;
899
900
if (daddr->slave_addr & 0x01)
901
return -EINVAL;
902
if (daddr->rq_lun >= 4)
903
return -EINVAL;
904
if (daddr->rs_lun >= 4)
905
return -EINVAL;
906
return 0;
907
}
908
909
if (is_lan_addr(addr)) {
910
if (len < sizeof(struct ipmi_lan_addr))
911
return -EINVAL;
912
return 0;
913
}
914
915
return -EINVAL;
916
}
917
EXPORT_SYMBOL(ipmi_validate_addr);
918
919
unsigned int ipmi_addr_length(int addr_type)
920
{
921
if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
922
return sizeof(struct ipmi_system_interface_addr);
923
924
if ((addr_type == IPMI_IPMB_ADDR_TYPE)
925
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
926
return sizeof(struct ipmi_ipmb_addr);
927
928
if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
929
return sizeof(struct ipmi_ipmb_direct_addr);
930
931
if (addr_type == IPMI_LAN_ADDR_TYPE)
932
return sizeof(struct ipmi_lan_addr);
933
934
return 0;
935
}
936
EXPORT_SYMBOL(ipmi_addr_length);
937
938
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
939
{
940
int rv = 0;
941
942
if (!msg->user) {
943
/* Special handling for NULL users. */
944
if (intf->null_user_handler) {
945
intf->null_user_handler(intf, msg);
946
} else {
947
/* No handler, so give up. */
948
rv = -EINVAL;
949
}
950
ipmi_free_recv_msg(msg);
951
} else if (oops_in_progress) {
952
/*
953
* If we are running in the panic context, calling the
954
* receive handler doesn't much meaning and has a deadlock
955
* risk. At this moment, simply skip it in that case.
956
*/
957
ipmi_free_recv_msg(msg);
958
atomic_dec(&msg->user->nr_msgs);
959
} else {
960
/*
961
* Deliver it in smi_work. The message will hold a
962
* refcount to the user.
963
*/
964
mutex_lock(&intf->user_msgs_mutex);
965
list_add_tail(&msg->link, &intf->user_msgs);
966
mutex_unlock(&intf->user_msgs_mutex);
967
queue_work(system_wq, &intf->smi_work);
968
}
969
970
return rv;
971
}
972
973
static void deliver_local_response(struct ipmi_smi *intf,
974
struct ipmi_recv_msg *msg)
975
{
976
if (deliver_response(intf, msg))
977
ipmi_inc_stat(intf, unhandled_local_responses);
978
else
979
ipmi_inc_stat(intf, handled_local_responses);
980
}
981
982
static void deliver_err_response(struct ipmi_smi *intf,
983
struct ipmi_recv_msg *msg, int err)
984
{
985
msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
986
msg->msg_data[0] = err;
987
msg->msg.netfn |= 1; /* Convert to a response. */
988
msg->msg.data_len = 1;
989
msg->msg.data = msg->msg_data;
990
deliver_local_response(intf, msg);
991
}
992
993
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
994
{
995
unsigned long iflags;
996
997
if (!intf->handlers->set_need_watch)
998
return;
999
1000
spin_lock_irqsave(&intf->watch_lock, iflags);
1001
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1002
intf->response_waiters++;
1003
1004
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1005
intf->watchdog_waiters++;
1006
1007
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1008
intf->command_waiters++;
1009
1010
if ((intf->last_watch_mask & flags) != flags) {
1011
intf->last_watch_mask |= flags;
1012
intf->handlers->set_need_watch(intf->send_info,
1013
intf->last_watch_mask);
1014
}
1015
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1016
}
1017
1018
static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1019
{
1020
unsigned long iflags;
1021
1022
if (!intf->handlers->set_need_watch)
1023
return;
1024
1025
spin_lock_irqsave(&intf->watch_lock, iflags);
1026
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1027
intf->response_waiters--;
1028
1029
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1030
intf->watchdog_waiters--;
1031
1032
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1033
intf->command_waiters--;
1034
1035
flags = 0;
1036
if (intf->response_waiters)
1037
flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1038
if (intf->watchdog_waiters)
1039
flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1040
if (intf->command_waiters)
1041
flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1042
1043
if (intf->last_watch_mask != flags) {
1044
intf->last_watch_mask = flags;
1045
intf->handlers->set_need_watch(intf->send_info,
1046
intf->last_watch_mask);
1047
}
1048
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1049
}
1050
1051
/*
1052
* Find the next sequence number not being used and add the given
1053
* message with the given timeout to the sequence table. This must be
1054
* called with the interface's seq_lock held.
1055
*/
1056
static int intf_next_seq(struct ipmi_smi *intf,
1057
struct ipmi_recv_msg *recv_msg,
1058
unsigned long timeout,
1059
int retries,
1060
int broadcast,
1061
unsigned char *seq,
1062
long *seqid)
1063
{
1064
int rv = 0;
1065
unsigned int i;
1066
1067
if (timeout == 0)
1068
timeout = default_retry_ms;
1069
if (retries < 0)
1070
retries = default_max_retries;
1071
1072
for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1073
i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1074
if (!intf->seq_table[i].inuse)
1075
break;
1076
}
1077
1078
if (!intf->seq_table[i].inuse) {
1079
intf->seq_table[i].recv_msg = recv_msg;
1080
1081
/*
1082
* Start with the maximum timeout, when the send response
1083
* comes in we will start the real timer.
1084
*/
1085
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1086
intf->seq_table[i].orig_timeout = timeout;
1087
intf->seq_table[i].retries_left = retries;
1088
intf->seq_table[i].broadcast = broadcast;
1089
intf->seq_table[i].inuse = 1;
1090
intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1091
*seq = i;
1092
*seqid = intf->seq_table[i].seqid;
1093
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1094
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1095
need_waiter(intf);
1096
} else {
1097
rv = -EAGAIN;
1098
}
1099
1100
return rv;
1101
}
1102
1103
/*
1104
* Return the receive message for the given sequence number and
1105
* release the sequence number so it can be reused. Some other data
1106
* is passed in to be sure the message matches up correctly (to help
1107
* guard against message coming in after their timeout and the
1108
* sequence number being reused).
1109
*/
1110
static int intf_find_seq(struct ipmi_smi *intf,
1111
unsigned char seq,
1112
short channel,
1113
unsigned char cmd,
1114
unsigned char netfn,
1115
struct ipmi_addr *addr,
1116
struct ipmi_recv_msg **recv_msg)
1117
{
1118
int rv = -ENODEV;
1119
unsigned long flags;
1120
1121
if (seq >= IPMI_IPMB_NUM_SEQ)
1122
return -EINVAL;
1123
1124
spin_lock_irqsave(&intf->seq_lock, flags);
1125
if (intf->seq_table[seq].inuse) {
1126
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1127
1128
if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1129
&& (msg->msg.netfn == netfn)
1130
&& (ipmi_addr_equal(addr, &msg->addr))) {
1131
*recv_msg = msg;
1132
intf->seq_table[seq].inuse = 0;
1133
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1134
rv = 0;
1135
}
1136
}
1137
spin_unlock_irqrestore(&intf->seq_lock, flags);
1138
1139
return rv;
1140
}
1141
1142
1143
/* Start the timer for a specific sequence table entry. */
1144
static int intf_start_seq_timer(struct ipmi_smi *intf,
1145
long msgid)
1146
{
1147
int rv = -ENODEV;
1148
unsigned long flags;
1149
unsigned char seq;
1150
unsigned long seqid;
1151
1152
1153
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1154
1155
spin_lock_irqsave(&intf->seq_lock, flags);
1156
/*
1157
* We do this verification because the user can be deleted
1158
* while a message is outstanding.
1159
*/
1160
if ((intf->seq_table[seq].inuse)
1161
&& (intf->seq_table[seq].seqid == seqid)) {
1162
struct seq_table *ent = &intf->seq_table[seq];
1163
ent->timeout = ent->orig_timeout;
1164
rv = 0;
1165
}
1166
spin_unlock_irqrestore(&intf->seq_lock, flags);
1167
1168
return rv;
1169
}
1170
1171
/* Got an error for the send message for a specific sequence number. */
1172
static int intf_err_seq(struct ipmi_smi *intf,
1173
long msgid,
1174
unsigned int err)
1175
{
1176
int rv = -ENODEV;
1177
unsigned long flags;
1178
unsigned char seq;
1179
unsigned long seqid;
1180
struct ipmi_recv_msg *msg = NULL;
1181
1182
1183
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1184
1185
spin_lock_irqsave(&intf->seq_lock, flags);
1186
/*
1187
* We do this verification because the user can be deleted
1188
* while a message is outstanding.
1189
*/
1190
if ((intf->seq_table[seq].inuse)
1191
&& (intf->seq_table[seq].seqid == seqid)) {
1192
struct seq_table *ent = &intf->seq_table[seq];
1193
1194
ent->inuse = 0;
1195
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1196
msg = ent->recv_msg;
1197
rv = 0;
1198
}
1199
spin_unlock_irqrestore(&intf->seq_lock, flags);
1200
1201
if (msg)
1202
deliver_err_response(intf, msg, err);
1203
1204
return rv;
1205
}
1206
1207
int ipmi_create_user(unsigned int if_num,
1208
const struct ipmi_user_hndl *handler,
1209
void *handler_data,
1210
struct ipmi_user **user)
1211
{
1212
unsigned long flags;
1213
struct ipmi_user *new_user = NULL;
1214
int rv = 0;
1215
struct ipmi_smi *intf;
1216
1217
/*
1218
* There is no module usecount here, because it's not
1219
* required. Since this can only be used by and called from
1220
* other modules, they will implicitly use this module, and
1221
* thus this can't be removed unless the other modules are
1222
* removed.
1223
*/
1224
1225
if (handler == NULL)
1226
return -EINVAL;
1227
1228
/*
1229
* Make sure the driver is actually initialized, this handles
1230
* problems with initialization order.
1231
*/
1232
rv = ipmi_init_msghandler();
1233
if (rv)
1234
return rv;
1235
1236
mutex_lock(&ipmi_interfaces_mutex);
1237
list_for_each_entry(intf, &ipmi_interfaces, link) {
1238
if (intf->intf_num == if_num)
1239
goto found;
1240
}
1241
/* Not found, return an error */
1242
rv = -EINVAL;
1243
goto out_unlock;
1244
1245
found:
1246
if (intf->in_shutdown) {
1247
rv = -ENODEV;
1248
goto out_unlock;
1249
}
1250
1251
if (atomic_add_return(1, &intf->nr_users) > max_users) {
1252
rv = -EBUSY;
1253
goto out_kfree;
1254
}
1255
1256
new_user = vzalloc(sizeof(*new_user));
1257
if (!new_user) {
1258
rv = -ENOMEM;
1259
goto out_kfree;
1260
}
1261
1262
if (!try_module_get(intf->owner)) {
1263
rv = -ENODEV;
1264
goto out_kfree;
1265
}
1266
1267
/* Note that each existing user holds a refcount to the interface. */
1268
kref_get(&intf->refcount);
1269
1270
atomic_set(&new_user->nr_msgs, 0);
1271
kref_init(&new_user->refcount);
1272
refcount_set(&new_user->destroyed, 1);
1273
kref_get(&new_user->refcount); /* Destroy owns a refcount. */
1274
new_user->handler = handler;
1275
new_user->handler_data = handler_data;
1276
new_user->intf = intf;
1277
new_user->gets_events = false;
1278
1279
mutex_lock(&intf->users_mutex);
1280
spin_lock_irqsave(&intf->seq_lock, flags);
1281
list_add(&new_user->link, &intf->users);
1282
spin_unlock_irqrestore(&intf->seq_lock, flags);
1283
mutex_unlock(&intf->users_mutex);
1284
1285
if (handler->ipmi_watchdog_pretimeout)
1286
/* User wants pretimeouts, so make sure to watch for them. */
1287
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1288
1289
out_kfree:
1290
if (rv) {
1291
atomic_dec(&intf->nr_users);
1292
vfree(new_user);
1293
} else {
1294
*user = new_user;
1295
}
1296
out_unlock:
1297
mutex_unlock(&ipmi_interfaces_mutex);
1298
return rv;
1299
}
1300
EXPORT_SYMBOL(ipmi_create_user);
1301
1302
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1303
{
1304
int rv = -EINVAL;
1305
struct ipmi_smi *intf;
1306
1307
mutex_lock(&ipmi_interfaces_mutex);
1308
list_for_each_entry(intf, &ipmi_interfaces, link) {
1309
if (intf->intf_num == if_num) {
1310
if (!intf->handlers->get_smi_info)
1311
rv = -ENOTTY;
1312
else
1313
rv = intf->handlers->get_smi_info(intf->send_info, data);
1314
break;
1315
}
1316
}
1317
mutex_unlock(&ipmi_interfaces_mutex);
1318
1319
return rv;
1320
}
1321
EXPORT_SYMBOL(ipmi_get_smi_info);
1322
1323
/* Must be called with intf->users_mutex held. */
1324
static void _ipmi_destroy_user(struct ipmi_user *user)
1325
{
1326
struct ipmi_smi *intf = user->intf;
1327
int i;
1328
unsigned long flags;
1329
struct cmd_rcvr *rcvr;
1330
struct cmd_rcvr *rcvrs = NULL;
1331
struct ipmi_recv_msg *msg, *msg2;
1332
1333
if (!refcount_dec_if_one(&user->destroyed))
1334
return;
1335
1336
if (user->handler->shutdown)
1337
user->handler->shutdown(user->handler_data);
1338
1339
if (user->handler->ipmi_watchdog_pretimeout)
1340
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1341
1342
if (user->gets_events)
1343
atomic_dec(&intf->event_waiters);
1344
1345
/* Remove the user from the interface's list and sequence table. */
1346
list_del(&user->link);
1347
atomic_dec(&intf->nr_users);
1348
1349
spin_lock_irqsave(&intf->seq_lock, flags);
1350
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1351
if (intf->seq_table[i].inuse
1352
&& (intf->seq_table[i].recv_msg->user == user)) {
1353
intf->seq_table[i].inuse = 0;
1354
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1355
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1356
}
1357
}
1358
spin_unlock_irqrestore(&intf->seq_lock, flags);
1359
1360
/*
1361
* Remove the user from the command receiver's table. First
1362
* we build a list of everything (not using the standard link,
1363
* since other things may be using it till we do
1364
* synchronize_rcu()) then free everything in that list.
1365
*/
1366
mutex_lock(&intf->cmd_rcvrs_mutex);
1367
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1368
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1369
if (rcvr->user == user) {
1370
list_del_rcu(&rcvr->link);
1371
rcvr->next = rcvrs;
1372
rcvrs = rcvr;
1373
}
1374
}
1375
mutex_unlock(&intf->cmd_rcvrs_mutex);
1376
while (rcvrs) {
1377
rcvr = rcvrs;
1378
rcvrs = rcvr->next;
1379
kfree(rcvr);
1380
}
1381
1382
mutex_lock(&intf->user_msgs_mutex);
1383
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
1384
if (msg->user != user)
1385
continue;
1386
list_del(&msg->link);
1387
ipmi_free_recv_msg(msg);
1388
}
1389
mutex_unlock(&intf->user_msgs_mutex);
1390
1391
release_ipmi_user(user);
1392
}
1393
1394
void ipmi_destroy_user(struct ipmi_user *user)
1395
{
1396
struct ipmi_smi *intf = user->intf;
1397
1398
mutex_lock(&intf->users_mutex);
1399
_ipmi_destroy_user(user);
1400
mutex_unlock(&intf->users_mutex);
1401
1402
kref_put(&user->refcount, free_ipmi_user);
1403
}
1404
EXPORT_SYMBOL(ipmi_destroy_user);
1405
1406
int ipmi_get_version(struct ipmi_user *user,
1407
unsigned char *major,
1408
unsigned char *minor)
1409
{
1410
struct ipmi_device_id id;
1411
int rv;
1412
1413
user = acquire_ipmi_user(user);
1414
if (!user)
1415
return -ENODEV;
1416
1417
rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1418
if (!rv) {
1419
*major = ipmi_version_major(&id);
1420
*minor = ipmi_version_minor(&id);
1421
}
1422
release_ipmi_user(user);
1423
1424
return rv;
1425
}
1426
EXPORT_SYMBOL(ipmi_get_version);
1427
1428
int ipmi_set_my_address(struct ipmi_user *user,
1429
unsigned int channel,
1430
unsigned char address)
1431
{
1432
int rv = 0;
1433
1434
user = acquire_ipmi_user(user);
1435
if (!user)
1436
return -ENODEV;
1437
1438
if (channel >= IPMI_MAX_CHANNELS) {
1439
rv = -EINVAL;
1440
} else {
1441
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1442
user->intf->addrinfo[channel].address = address;
1443
}
1444
release_ipmi_user(user);
1445
1446
return rv;
1447
}
1448
EXPORT_SYMBOL(ipmi_set_my_address);
1449
1450
int ipmi_get_my_address(struct ipmi_user *user,
1451
unsigned int channel,
1452
unsigned char *address)
1453
{
1454
int rv = 0;
1455
1456
user = acquire_ipmi_user(user);
1457
if (!user)
1458
return -ENODEV;
1459
1460
if (channel >= IPMI_MAX_CHANNELS) {
1461
rv = -EINVAL;
1462
} else {
1463
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1464
*address = user->intf->addrinfo[channel].address;
1465
}
1466
release_ipmi_user(user);
1467
1468
return rv;
1469
}
1470
EXPORT_SYMBOL(ipmi_get_my_address);
1471
1472
int ipmi_set_my_LUN(struct ipmi_user *user,
1473
unsigned int channel,
1474
unsigned char LUN)
1475
{
1476
int rv = 0;
1477
1478
user = acquire_ipmi_user(user);
1479
if (!user)
1480
return -ENODEV;
1481
1482
if (channel >= IPMI_MAX_CHANNELS) {
1483
rv = -EINVAL;
1484
} else {
1485
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1486
user->intf->addrinfo[channel].lun = LUN & 0x3;
1487
}
1488
release_ipmi_user(user);
1489
1490
return rv;
1491
}
1492
EXPORT_SYMBOL(ipmi_set_my_LUN);
1493
1494
int ipmi_get_my_LUN(struct ipmi_user *user,
1495
unsigned int channel,
1496
unsigned char *address)
1497
{
1498
int rv = 0;
1499
1500
user = acquire_ipmi_user(user);
1501
if (!user)
1502
return -ENODEV;
1503
1504
if (channel >= IPMI_MAX_CHANNELS) {
1505
rv = -EINVAL;
1506
} else {
1507
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1508
*address = user->intf->addrinfo[channel].lun;
1509
}
1510
release_ipmi_user(user);
1511
1512
return rv;
1513
}
1514
EXPORT_SYMBOL(ipmi_get_my_LUN);
1515
1516
int ipmi_get_maintenance_mode(struct ipmi_user *user)
1517
{
1518
int mode;
1519
unsigned long flags;
1520
1521
user = acquire_ipmi_user(user);
1522
if (!user)
1523
return -ENODEV;
1524
1525
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1526
mode = user->intf->maintenance_mode;
1527
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1528
release_ipmi_user(user);
1529
1530
return mode;
1531
}
1532
EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1533
1534
static void maintenance_mode_update(struct ipmi_smi *intf)
1535
{
1536
if (intf->handlers->set_maintenance_mode)
1537
intf->handlers->set_maintenance_mode(
1538
intf->send_info, intf->maintenance_mode_enable);
1539
}
1540
1541
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1542
{
1543
int rv = 0;
1544
unsigned long flags;
1545
struct ipmi_smi *intf = user->intf;
1546
1547
user = acquire_ipmi_user(user);
1548
if (!user)
1549
return -ENODEV;
1550
1551
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1552
if (intf->maintenance_mode != mode) {
1553
switch (mode) {
1554
case IPMI_MAINTENANCE_MODE_AUTO:
1555
intf->maintenance_mode_enable
1556
= (intf->auto_maintenance_timeout > 0);
1557
break;
1558
1559
case IPMI_MAINTENANCE_MODE_OFF:
1560
intf->maintenance_mode_enable = false;
1561
break;
1562
1563
case IPMI_MAINTENANCE_MODE_ON:
1564
intf->maintenance_mode_enable = true;
1565
break;
1566
1567
default:
1568
rv = -EINVAL;
1569
goto out_unlock;
1570
}
1571
intf->maintenance_mode = mode;
1572
1573
maintenance_mode_update(intf);
1574
}
1575
out_unlock:
1576
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1577
release_ipmi_user(user);
1578
1579
return rv;
1580
}
1581
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1582
1583
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1584
{
1585
struct ipmi_smi *intf = user->intf;
1586
struct ipmi_recv_msg *msg, *msg2;
1587
struct list_head msgs;
1588
1589
user = acquire_ipmi_user(user);
1590
if (!user)
1591
return -ENODEV;
1592
1593
INIT_LIST_HEAD(&msgs);
1594
1595
mutex_lock(&intf->events_mutex);
1596
if (user->gets_events == val)
1597
goto out;
1598
1599
user->gets_events = val;
1600
1601
if (val) {
1602
if (atomic_inc_return(&intf->event_waiters) == 1)
1603
need_waiter(intf);
1604
} else {
1605
atomic_dec(&intf->event_waiters);
1606
}
1607
1608
/* Deliver any queued events. */
1609
while (user->gets_events && !list_empty(&intf->waiting_events)) {
1610
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1611
list_move_tail(&msg->link, &msgs);
1612
intf->waiting_events_count = 0;
1613
if (intf->event_msg_printed) {
1614
dev_warn(intf->si_dev, "Event queue no longer full\n");
1615
intf->event_msg_printed = 0;
1616
}
1617
1618
list_for_each_entry_safe(msg, msg2, &msgs, link) {
1619
msg->user = user;
1620
kref_get(&user->refcount);
1621
deliver_local_response(intf, msg);
1622
}
1623
}
1624
1625
out:
1626
mutex_unlock(&intf->events_mutex);
1627
release_ipmi_user(user);
1628
1629
return 0;
1630
}
1631
EXPORT_SYMBOL(ipmi_set_gets_events);
1632
1633
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1634
unsigned char netfn,
1635
unsigned char cmd,
1636
unsigned char chan)
1637
{
1638
struct cmd_rcvr *rcvr;
1639
1640
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1641
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1642
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1643
&& (rcvr->chans & (1 << chan)))
1644
return rcvr;
1645
}
1646
return NULL;
1647
}
1648
1649
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1650
unsigned char netfn,
1651
unsigned char cmd,
1652
unsigned int chans)
1653
{
1654
struct cmd_rcvr *rcvr;
1655
1656
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1657
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1658
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1659
&& (rcvr->chans & chans))
1660
return 0;
1661
}
1662
return 1;
1663
}
1664
1665
int ipmi_register_for_cmd(struct ipmi_user *user,
1666
unsigned char netfn,
1667
unsigned char cmd,
1668
unsigned int chans)
1669
{
1670
struct ipmi_smi *intf = user->intf;
1671
struct cmd_rcvr *rcvr;
1672
int rv = 0;
1673
1674
user = acquire_ipmi_user(user);
1675
if (!user)
1676
return -ENODEV;
1677
1678
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1679
if (!rcvr) {
1680
rv = -ENOMEM;
1681
goto out_release;
1682
}
1683
rcvr->cmd = cmd;
1684
rcvr->netfn = netfn;
1685
rcvr->chans = chans;
1686
rcvr->user = user;
1687
1688
mutex_lock(&intf->cmd_rcvrs_mutex);
1689
/* Make sure the command/netfn is not already registered. */
1690
if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1691
rv = -EBUSY;
1692
goto out_unlock;
1693
}
1694
1695
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1696
1697
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1698
1699
out_unlock:
1700
mutex_unlock(&intf->cmd_rcvrs_mutex);
1701
if (rv)
1702
kfree(rcvr);
1703
out_release:
1704
release_ipmi_user(user);
1705
1706
return rv;
1707
}
1708
EXPORT_SYMBOL(ipmi_register_for_cmd);
1709
1710
int ipmi_unregister_for_cmd(struct ipmi_user *user,
1711
unsigned char netfn,
1712
unsigned char cmd,
1713
unsigned int chans)
1714
{
1715
struct ipmi_smi *intf = user->intf;
1716
struct cmd_rcvr *rcvr;
1717
struct cmd_rcvr *rcvrs = NULL;
1718
int i, rv = -ENOENT;
1719
1720
user = acquire_ipmi_user(user);
1721
if (!user)
1722
return -ENODEV;
1723
1724
mutex_lock(&intf->cmd_rcvrs_mutex);
1725
for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1726
if (((1 << i) & chans) == 0)
1727
continue;
1728
rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1729
if (rcvr == NULL)
1730
continue;
1731
if (rcvr->user == user) {
1732
rv = 0;
1733
rcvr->chans &= ~chans;
1734
if (rcvr->chans == 0) {
1735
list_del_rcu(&rcvr->link);
1736
rcvr->next = rcvrs;
1737
rcvrs = rcvr;
1738
}
1739
}
1740
}
1741
mutex_unlock(&intf->cmd_rcvrs_mutex);
1742
synchronize_rcu();
1743
release_ipmi_user(user);
1744
while (rcvrs) {
1745
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1746
rcvr = rcvrs;
1747
rcvrs = rcvr->next;
1748
kfree(rcvr);
1749
}
1750
1751
return rv;
1752
}
1753
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1754
1755
unsigned char
1756
ipmb_checksum(unsigned char *data, int size)
1757
{
1758
unsigned char csum = 0;
1759
1760
for (; size > 0; size--, data++)
1761
csum += *data;
1762
1763
return -csum;
1764
}
1765
EXPORT_SYMBOL(ipmb_checksum);
1766
1767
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1768
struct kernel_ipmi_msg *msg,
1769
struct ipmi_ipmb_addr *ipmb_addr,
1770
long msgid,
1771
unsigned char ipmb_seq,
1772
int broadcast,
1773
unsigned char source_address,
1774
unsigned char source_lun)
1775
{
1776
int i = broadcast;
1777
1778
/* Format the IPMB header data. */
1779
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1780
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1781
smi_msg->data[2] = ipmb_addr->channel;
1782
if (broadcast)
1783
smi_msg->data[3] = 0;
1784
smi_msg->data[i+3] = ipmb_addr->slave_addr;
1785
smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1786
smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1787
smi_msg->data[i+6] = source_address;
1788
smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1789
smi_msg->data[i+8] = msg->cmd;
1790
1791
/* Now tack on the data to the message. */
1792
if (msg->data_len > 0)
1793
memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1794
smi_msg->data_size = msg->data_len + 9;
1795
1796
/* Now calculate the checksum and tack it on. */
1797
smi_msg->data[i+smi_msg->data_size]
1798
= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1799
1800
/*
1801
* Add on the checksum size and the offset from the
1802
* broadcast.
1803
*/
1804
smi_msg->data_size += 1 + i;
1805
1806
smi_msg->msgid = msgid;
1807
}
1808
1809
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1810
struct kernel_ipmi_msg *msg,
1811
struct ipmi_lan_addr *lan_addr,
1812
long msgid,
1813
unsigned char ipmb_seq,
1814
unsigned char source_lun)
1815
{
1816
/* Format the IPMB header data. */
1817
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1818
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1819
smi_msg->data[2] = lan_addr->channel;
1820
smi_msg->data[3] = lan_addr->session_handle;
1821
smi_msg->data[4] = lan_addr->remote_SWID;
1822
smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1823
smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1824
smi_msg->data[7] = lan_addr->local_SWID;
1825
smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1826
smi_msg->data[9] = msg->cmd;
1827
1828
/* Now tack on the data to the message. */
1829
if (msg->data_len > 0)
1830
memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1831
smi_msg->data_size = msg->data_len + 10;
1832
1833
/* Now calculate the checksum and tack it on. */
1834
smi_msg->data[smi_msg->data_size]
1835
= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1836
1837
/*
1838
* Add on the checksum size and the offset from the
1839
* broadcast.
1840
*/
1841
smi_msg->data_size += 1;
1842
1843
smi_msg->msgid = msgid;
1844
}
1845
1846
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1847
struct ipmi_smi_msg *smi_msg,
1848
int priority)
1849
{
1850
if (intf->curr_msg) {
1851
if (priority > 0)
1852
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1853
else
1854
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1855
smi_msg = NULL;
1856
} else {
1857
intf->curr_msg = smi_msg;
1858
}
1859
1860
return smi_msg;
1861
}
1862
1863
static void smi_send(struct ipmi_smi *intf,
1864
const struct ipmi_smi_handlers *handlers,
1865
struct ipmi_smi_msg *smi_msg, int priority)
1866
{
1867
int run_to_completion = READ_ONCE(intf->run_to_completion);
1868
unsigned long flags = 0;
1869
1870
if (!run_to_completion)
1871
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1872
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1873
if (!run_to_completion)
1874
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1875
1876
if (smi_msg)
1877
handlers->sender(intf->send_info, smi_msg);
1878
}
1879
1880
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1881
{
1882
return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1883
&& ((msg->cmd == IPMI_COLD_RESET_CMD)
1884
|| (msg->cmd == IPMI_WARM_RESET_CMD)))
1885
|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1886
}
1887
1888
static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1889
struct ipmi_addr *addr,
1890
long msgid,
1891
struct kernel_ipmi_msg *msg,
1892
struct ipmi_smi_msg *smi_msg,
1893
struct ipmi_recv_msg *recv_msg,
1894
int retries,
1895
unsigned int retry_time_ms)
1896
{
1897
struct ipmi_system_interface_addr *smi_addr;
1898
1899
if (msg->netfn & 1)
1900
/* Responses are not allowed to the SMI. */
1901
return -EINVAL;
1902
1903
smi_addr = (struct ipmi_system_interface_addr *) addr;
1904
if (smi_addr->lun > 3) {
1905
ipmi_inc_stat(intf, sent_invalid_commands);
1906
return -EINVAL;
1907
}
1908
1909
memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1910
1911
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1912
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
1913
|| (msg->cmd == IPMI_GET_MSG_CMD)
1914
|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1915
/*
1916
* We don't let the user do these, since we manage
1917
* the sequence numbers.
1918
*/
1919
ipmi_inc_stat(intf, sent_invalid_commands);
1920
return -EINVAL;
1921
}
1922
1923
if (is_maintenance_mode_cmd(msg)) {
1924
unsigned long flags;
1925
1926
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1927
intf->auto_maintenance_timeout
1928
= maintenance_mode_timeout_ms;
1929
if (!intf->maintenance_mode
1930
&& !intf->maintenance_mode_enable) {
1931
intf->maintenance_mode_enable = true;
1932
maintenance_mode_update(intf);
1933
}
1934
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1935
flags);
1936
}
1937
1938
if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1939
ipmi_inc_stat(intf, sent_invalid_commands);
1940
return -EMSGSIZE;
1941
}
1942
1943
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1944
smi_msg->data[1] = msg->cmd;
1945
smi_msg->msgid = msgid;
1946
smi_msg->user_data = recv_msg;
1947
if (msg->data_len > 0)
1948
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1949
smi_msg->data_size = msg->data_len + 2;
1950
ipmi_inc_stat(intf, sent_local_commands);
1951
1952
return 0;
1953
}
1954
1955
static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1956
struct ipmi_addr *addr,
1957
long msgid,
1958
struct kernel_ipmi_msg *msg,
1959
struct ipmi_smi_msg *smi_msg,
1960
struct ipmi_recv_msg *recv_msg,
1961
unsigned char source_address,
1962
unsigned char source_lun,
1963
int retries,
1964
unsigned int retry_time_ms)
1965
{
1966
struct ipmi_ipmb_addr *ipmb_addr;
1967
unsigned char ipmb_seq;
1968
long seqid;
1969
int broadcast = 0;
1970
struct ipmi_channel *chans;
1971
int rv = 0;
1972
1973
if (addr->channel >= IPMI_MAX_CHANNELS) {
1974
ipmi_inc_stat(intf, sent_invalid_commands);
1975
return -EINVAL;
1976
}
1977
1978
chans = READ_ONCE(intf->channel_list)->c;
1979
1980
if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1981
ipmi_inc_stat(intf, sent_invalid_commands);
1982
return -EINVAL;
1983
}
1984
1985
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1986
/*
1987
* Broadcasts add a zero at the beginning of the
1988
* message, but otherwise is the same as an IPMB
1989
* address.
1990
*/
1991
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1992
broadcast = 1;
1993
retries = 0; /* Don't retry broadcasts. */
1994
}
1995
1996
/*
1997
* 9 for the header and 1 for the checksum, plus
1998
* possibly one for the broadcast.
1999
*/
2000
if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
2001
ipmi_inc_stat(intf, sent_invalid_commands);
2002
return -EMSGSIZE;
2003
}
2004
2005
ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2006
if (ipmb_addr->lun > 3) {
2007
ipmi_inc_stat(intf, sent_invalid_commands);
2008
return -EINVAL;
2009
}
2010
2011
memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2012
2013
if (recv_msg->msg.netfn & 0x1) {
2014
/*
2015
* It's a response, so use the user's sequence
2016
* from msgid.
2017
*/
2018
ipmi_inc_stat(intf, sent_ipmb_responses);
2019
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2020
msgid, broadcast,
2021
source_address, source_lun);
2022
2023
/*
2024
* Save the receive message so we can use it
2025
* to deliver the response.
2026
*/
2027
smi_msg->user_data = recv_msg;
2028
} else {
2029
/* It's a command, so get a sequence for it. */
2030
unsigned long flags;
2031
2032
spin_lock_irqsave(&intf->seq_lock, flags);
2033
2034
if (is_maintenance_mode_cmd(msg))
2035
intf->ipmb_maintenance_mode_timeout =
2036
maintenance_mode_timeout_ms;
2037
2038
if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2039
/* Different default in maintenance mode */
2040
retry_time_ms = default_maintenance_retry_ms;
2041
2042
/*
2043
* Create a sequence number with a 1 second
2044
* timeout and 4 retries.
2045
*/
2046
rv = intf_next_seq(intf,
2047
recv_msg,
2048
retry_time_ms,
2049
retries,
2050
broadcast,
2051
&ipmb_seq,
2052
&seqid);
2053
if (rv)
2054
/*
2055
* We have used up all the sequence numbers,
2056
* probably, so abort.
2057
*/
2058
goto out_err;
2059
2060
ipmi_inc_stat(intf, sent_ipmb_commands);
2061
2062
/*
2063
* Store the sequence number in the message,
2064
* so that when the send message response
2065
* comes back we can start the timer.
2066
*/
2067
format_ipmb_msg(smi_msg, msg, ipmb_addr,
2068
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2069
ipmb_seq, broadcast,
2070
source_address, source_lun);
2071
2072
/*
2073
* Copy the message into the recv message data, so we
2074
* can retransmit it later if necessary.
2075
*/
2076
memcpy(recv_msg->msg_data, smi_msg->data,
2077
smi_msg->data_size);
2078
recv_msg->msg.data = recv_msg->msg_data;
2079
recv_msg->msg.data_len = smi_msg->data_size;
2080
2081
/*
2082
* We don't unlock until here, because we need
2083
* to copy the completed message into the
2084
* recv_msg before we release the lock.
2085
* Otherwise, race conditions may bite us. I
2086
* know that's pretty paranoid, but I prefer
2087
* to be correct.
2088
*/
2089
out_err:
2090
spin_unlock_irqrestore(&intf->seq_lock, flags);
2091
}
2092
2093
return rv;
2094
}
2095
2096
static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
2097
struct ipmi_addr *addr,
2098
long msgid,
2099
struct kernel_ipmi_msg *msg,
2100
struct ipmi_smi_msg *smi_msg,
2101
struct ipmi_recv_msg *recv_msg,
2102
unsigned char source_lun)
2103
{
2104
struct ipmi_ipmb_direct_addr *daddr;
2105
bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2106
2107
if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2108
return -EAFNOSUPPORT;
2109
2110
/* Responses must have a completion code. */
2111
if (!is_cmd && msg->data_len < 1) {
2112
ipmi_inc_stat(intf, sent_invalid_commands);
2113
return -EINVAL;
2114
}
2115
2116
if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2117
ipmi_inc_stat(intf, sent_invalid_commands);
2118
return -EMSGSIZE;
2119
}
2120
2121
daddr = (struct ipmi_ipmb_direct_addr *) addr;
2122
if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2123
ipmi_inc_stat(intf, sent_invalid_commands);
2124
return -EINVAL;
2125
}
2126
2127
smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2128
smi_msg->msgid = msgid;
2129
2130
if (is_cmd) {
2131
smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2132
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2133
} else {
2134
smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2135
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2136
}
2137
smi_msg->data[1] = daddr->slave_addr;
2138
smi_msg->data[3] = msg->cmd;
2139
2140
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2141
smi_msg->data_size = msg->data_len + 4;
2142
2143
smi_msg->user_data = recv_msg;
2144
2145
return 0;
2146
}
2147
2148
static int i_ipmi_req_lan(struct ipmi_smi *intf,
2149
struct ipmi_addr *addr,
2150
long msgid,
2151
struct kernel_ipmi_msg *msg,
2152
struct ipmi_smi_msg *smi_msg,
2153
struct ipmi_recv_msg *recv_msg,
2154
unsigned char source_lun,
2155
int retries,
2156
unsigned int retry_time_ms)
2157
{
2158
struct ipmi_lan_addr *lan_addr;
2159
unsigned char ipmb_seq;
2160
long seqid;
2161
struct ipmi_channel *chans;
2162
int rv = 0;
2163
2164
if (addr->channel >= IPMI_MAX_CHANNELS) {
2165
ipmi_inc_stat(intf, sent_invalid_commands);
2166
return -EINVAL;
2167
}
2168
2169
chans = READ_ONCE(intf->channel_list)->c;
2170
2171
if ((chans[addr->channel].medium
2172
!= IPMI_CHANNEL_MEDIUM_8023LAN)
2173
&& (chans[addr->channel].medium
2174
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
2175
ipmi_inc_stat(intf, sent_invalid_commands);
2176
return -EINVAL;
2177
}
2178
2179
/* 11 for the header and 1 for the checksum. */
2180
if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2181
ipmi_inc_stat(intf, sent_invalid_commands);
2182
return -EMSGSIZE;
2183
}
2184
2185
lan_addr = (struct ipmi_lan_addr *) addr;
2186
if (lan_addr->lun > 3) {
2187
ipmi_inc_stat(intf, sent_invalid_commands);
2188
return -EINVAL;
2189
}
2190
2191
memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2192
2193
if (recv_msg->msg.netfn & 0x1) {
2194
/*
2195
* It's a response, so use the user's sequence
2196
* from msgid.
2197
*/
2198
ipmi_inc_stat(intf, sent_lan_responses);
2199
format_lan_msg(smi_msg, msg, lan_addr, msgid,
2200
msgid, source_lun);
2201
2202
/*
2203
* Save the receive message so we can use it
2204
* to deliver the response.
2205
*/
2206
smi_msg->user_data = recv_msg;
2207
} else {
2208
/* It's a command, so get a sequence for it. */
2209
unsigned long flags;
2210
2211
spin_lock_irqsave(&intf->seq_lock, flags);
2212
2213
/*
2214
* Create a sequence number with a 1 second
2215
* timeout and 4 retries.
2216
*/
2217
rv = intf_next_seq(intf,
2218
recv_msg,
2219
retry_time_ms,
2220
retries,
2221
0,
2222
&ipmb_seq,
2223
&seqid);
2224
if (rv)
2225
/*
2226
* We have used up all the sequence numbers,
2227
* probably, so abort.
2228
*/
2229
goto out_err;
2230
2231
ipmi_inc_stat(intf, sent_lan_commands);
2232
2233
/*
2234
* Store the sequence number in the message,
2235
* so that when the send message response
2236
* comes back we can start the timer.
2237
*/
2238
format_lan_msg(smi_msg, msg, lan_addr,
2239
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2240
ipmb_seq, source_lun);
2241
2242
/*
2243
* Copy the message into the recv message data, so we
2244
* can retransmit it later if necessary.
2245
*/
2246
memcpy(recv_msg->msg_data, smi_msg->data,
2247
smi_msg->data_size);
2248
recv_msg->msg.data = recv_msg->msg_data;
2249
recv_msg->msg.data_len = smi_msg->data_size;
2250
2251
/*
2252
* We don't unlock until here, because we need
2253
* to copy the completed message into the
2254
* recv_msg before we release the lock.
2255
* Otherwise, race conditions may bite us. I
2256
* know that's pretty paranoid, but I prefer
2257
* to be correct.
2258
*/
2259
out_err:
2260
spin_unlock_irqrestore(&intf->seq_lock, flags);
2261
}
2262
2263
return rv;
2264
}
2265
2266
/*
2267
* Separate from ipmi_request so that the user does not have to be
2268
* supplied in certain circumstances (mainly at panic time). If
2269
* messages are supplied, they will be freed, even if an error
2270
* occurs.
2271
*/
2272
static int i_ipmi_request(struct ipmi_user *user,
2273
struct ipmi_smi *intf,
2274
struct ipmi_addr *addr,
2275
long msgid,
2276
struct kernel_ipmi_msg *msg,
2277
void *user_msg_data,
2278
void *supplied_smi,
2279
struct ipmi_recv_msg *supplied_recv,
2280
int priority,
2281
unsigned char source_address,
2282
unsigned char source_lun,
2283
int retries,
2284
unsigned int retry_time_ms)
2285
{
2286
struct ipmi_smi_msg *smi_msg;
2287
struct ipmi_recv_msg *recv_msg;
2288
int run_to_completion = READ_ONCE(intf->run_to_completion);
2289
int rv = 0;
2290
2291
if (user) {
2292
if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
2293
/* Decrement will happen at the end of the routine. */
2294
rv = -EBUSY;
2295
goto out;
2296
}
2297
}
2298
2299
if (supplied_recv)
2300
recv_msg = supplied_recv;
2301
else {
2302
recv_msg = ipmi_alloc_recv_msg();
2303
if (recv_msg == NULL) {
2304
rv = -ENOMEM;
2305
goto out;
2306
}
2307
}
2308
recv_msg->user_msg_data = user_msg_data;
2309
2310
if (supplied_smi)
2311
smi_msg = supplied_smi;
2312
else {
2313
smi_msg = ipmi_alloc_smi_msg();
2314
if (smi_msg == NULL) {
2315
if (!supplied_recv)
2316
ipmi_free_recv_msg(recv_msg);
2317
rv = -ENOMEM;
2318
goto out;
2319
}
2320
}
2321
2322
if (!run_to_completion)
2323
mutex_lock(&intf->users_mutex);
2324
if (intf->in_shutdown) {
2325
rv = -ENODEV;
2326
goto out_err;
2327
}
2328
2329
recv_msg->user = user;
2330
if (user)
2331
/* The put happens when the message is freed. */
2332
kref_get(&user->refcount);
2333
recv_msg->msgid = msgid;
2334
/*
2335
* Store the message to send in the receive message so timeout
2336
* responses can get the proper response data.
2337
*/
2338
recv_msg->msg = *msg;
2339
2340
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2341
rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2342
recv_msg, retries, retry_time_ms);
2343
} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2344
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2345
source_address, source_lun,
2346
retries, retry_time_ms);
2347
} else if (is_ipmb_direct_addr(addr)) {
2348
rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2349
recv_msg, source_lun);
2350
} else if (is_lan_addr(addr)) {
2351
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2352
source_lun, retries, retry_time_ms);
2353
} else {
2354
/* Unknown address type. */
2355
ipmi_inc_stat(intf, sent_invalid_commands);
2356
rv = -EINVAL;
2357
}
2358
2359
if (rv) {
2360
out_err:
2361
ipmi_free_smi_msg(smi_msg);
2362
ipmi_free_recv_msg(recv_msg);
2363
} else {
2364
dev_dbg(intf->si_dev, "Send: %*ph\n",
2365
smi_msg->data_size, smi_msg->data);
2366
2367
smi_send(intf, intf->handlers, smi_msg, priority);
2368
}
2369
if (!run_to_completion)
2370
mutex_unlock(&intf->users_mutex);
2371
2372
out:
2373
if (rv && user)
2374
atomic_dec(&user->nr_msgs);
2375
return rv;
2376
}
2377
2378
static int check_addr(struct ipmi_smi *intf,
2379
struct ipmi_addr *addr,
2380
unsigned char *saddr,
2381
unsigned char *lun)
2382
{
2383
if (addr->channel >= IPMI_MAX_CHANNELS)
2384
return -EINVAL;
2385
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2386
*lun = intf->addrinfo[addr->channel].lun;
2387
*saddr = intf->addrinfo[addr->channel].address;
2388
return 0;
2389
}
2390
2391
int ipmi_request_settime(struct ipmi_user *user,
2392
struct ipmi_addr *addr,
2393
long msgid,
2394
struct kernel_ipmi_msg *msg,
2395
void *user_msg_data,
2396
int priority,
2397
int retries,
2398
unsigned int retry_time_ms)
2399
{
2400
unsigned char saddr = 0, lun = 0;
2401
int rv;
2402
2403
if (!user)
2404
return -EINVAL;
2405
2406
user = acquire_ipmi_user(user);
2407
if (!user)
2408
return -ENODEV;
2409
2410
rv = check_addr(user->intf, addr, &saddr, &lun);
2411
if (!rv)
2412
rv = i_ipmi_request(user,
2413
user->intf,
2414
addr,
2415
msgid,
2416
msg,
2417
user_msg_data,
2418
NULL, NULL,
2419
priority,
2420
saddr,
2421
lun,
2422
retries,
2423
retry_time_ms);
2424
2425
release_ipmi_user(user);
2426
return rv;
2427
}
2428
EXPORT_SYMBOL(ipmi_request_settime);
2429
2430
int ipmi_request_supply_msgs(struct ipmi_user *user,
2431
struct ipmi_addr *addr,
2432
long msgid,
2433
struct kernel_ipmi_msg *msg,
2434
void *user_msg_data,
2435
void *supplied_smi,
2436
struct ipmi_recv_msg *supplied_recv,
2437
int priority)
2438
{
2439
unsigned char saddr = 0, lun = 0;
2440
int rv;
2441
2442
if (!user)
2443
return -EINVAL;
2444
2445
user = acquire_ipmi_user(user);
2446
if (!user)
2447
return -ENODEV;
2448
2449
rv = check_addr(user->intf, addr, &saddr, &lun);
2450
if (!rv)
2451
rv = i_ipmi_request(user,
2452
user->intf,
2453
addr,
2454
msgid,
2455
msg,
2456
user_msg_data,
2457
supplied_smi,
2458
supplied_recv,
2459
priority,
2460
saddr,
2461
lun,
2462
-1, 0);
2463
2464
release_ipmi_user(user);
2465
return rv;
2466
}
2467
EXPORT_SYMBOL(ipmi_request_supply_msgs);
2468
2469
static void bmc_device_id_handler(struct ipmi_smi *intf,
2470
struct ipmi_recv_msg *msg)
2471
{
2472
int rv;
2473
2474
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2475
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2476
|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2477
dev_warn(intf->si_dev,
2478
"invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2479
msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2480
return;
2481
}
2482
2483
if (msg->msg.data[0]) {
2484
dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2485
msg->msg.data[0]);
2486
intf->bmc->dyn_id_set = 0;
2487
goto out;
2488
}
2489
2490
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2491
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2492
if (rv) {
2493
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2494
/* record completion code when error */
2495
intf->bmc->cc = msg->msg.data[0];
2496
intf->bmc->dyn_id_set = 0;
2497
} else {
2498
/*
2499
* Make sure the id data is available before setting
2500
* dyn_id_set.
2501
*/
2502
smp_wmb();
2503
intf->bmc->dyn_id_set = 1;
2504
}
2505
out:
2506
wake_up(&intf->waitq);
2507
}
2508
2509
static int
2510
send_get_device_id_cmd(struct ipmi_smi *intf)
2511
{
2512
struct ipmi_system_interface_addr si;
2513
struct kernel_ipmi_msg msg;
2514
2515
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2516
si.channel = IPMI_BMC_CHANNEL;
2517
si.lun = 0;
2518
2519
msg.netfn = IPMI_NETFN_APP_REQUEST;
2520
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2521
msg.data = NULL;
2522
msg.data_len = 0;
2523
2524
return i_ipmi_request(NULL,
2525
intf,
2526
(struct ipmi_addr *) &si,
2527
0,
2528
&msg,
2529
intf,
2530
NULL,
2531
NULL,
2532
0,
2533
intf->addrinfo[0].address,
2534
intf->addrinfo[0].lun,
2535
-1, 0);
2536
}
2537
2538
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2539
{
2540
int rv;
2541
unsigned int retry_count = 0;
2542
2543
intf->null_user_handler = bmc_device_id_handler;
2544
2545
retry:
2546
bmc->cc = 0;
2547
bmc->dyn_id_set = 2;
2548
2549
rv = send_get_device_id_cmd(intf);
2550
if (rv)
2551
goto out_reset_handler;
2552
2553
wait_event(intf->waitq, bmc->dyn_id_set != 2);
2554
2555
if (!bmc->dyn_id_set) {
2556
if (bmc->cc != IPMI_CC_NO_ERROR &&
2557
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2558
msleep(500);
2559
dev_warn(intf->si_dev,
2560
"BMC returned 0x%2.2x, retry get bmc device id\n",
2561
bmc->cc);
2562
goto retry;
2563
}
2564
2565
rv = -EIO; /* Something went wrong in the fetch. */
2566
}
2567
2568
/* dyn_id_set makes the id data available. */
2569
smp_rmb();
2570
2571
out_reset_handler:
2572
intf->null_user_handler = NULL;
2573
2574
return rv;
2575
}
2576
2577
/*
2578
* Fetch the device id for the bmc/interface. You must pass in either
2579
* bmc or intf, this code will get the other one. If the data has
2580
* been recently fetched, this will just use the cached data. Otherwise
2581
* it will run a new fetch.
2582
*
2583
* Except for the first time this is called (in ipmi_add_smi()),
2584
* this will always return good data;
2585
*/
2586
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2587
struct ipmi_device_id *id,
2588
bool *guid_set, guid_t *guid, int intf_num)
2589
{
2590
int rv = 0;
2591
int prev_dyn_id_set, prev_guid_set;
2592
bool intf_set = intf != NULL;
2593
2594
if (!intf) {
2595
mutex_lock(&bmc->dyn_mutex);
2596
retry_bmc_lock:
2597
if (list_empty(&bmc->intfs)) {
2598
mutex_unlock(&bmc->dyn_mutex);
2599
return -ENOENT;
2600
}
2601
intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2602
bmc_link);
2603
kref_get(&intf->refcount);
2604
mutex_unlock(&bmc->dyn_mutex);
2605
mutex_lock(&intf->bmc_reg_mutex);
2606
mutex_lock(&bmc->dyn_mutex);
2607
if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2608
bmc_link)) {
2609
mutex_unlock(&intf->bmc_reg_mutex);
2610
kref_put(&intf->refcount, intf_free);
2611
goto retry_bmc_lock;
2612
}
2613
} else {
2614
mutex_lock(&intf->bmc_reg_mutex);
2615
bmc = intf->bmc;
2616
mutex_lock(&bmc->dyn_mutex);
2617
kref_get(&intf->refcount);
2618
}
2619
2620
/* If we have a valid and current ID, just return that. */
2621
if (intf->in_bmc_register ||
2622
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2623
goto out_noprocessing;
2624
2625
prev_guid_set = bmc->dyn_guid_set;
2626
__get_guid(intf);
2627
2628
prev_dyn_id_set = bmc->dyn_id_set;
2629
rv = __get_device_id(intf, bmc);
2630
if (rv)
2631
goto out;
2632
2633
/*
2634
* The guid, device id, manufacturer id, and product id should
2635
* not change on a BMC. If it does we have to do some dancing.
2636
*/
2637
if (!intf->bmc_registered
2638
|| (!prev_guid_set && bmc->dyn_guid_set)
2639
|| (!prev_dyn_id_set && bmc->dyn_id_set)
2640
|| (prev_guid_set && bmc->dyn_guid_set
2641
&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2642
|| bmc->id.device_id != bmc->fetch_id.device_id
2643
|| bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2644
|| bmc->id.product_id != bmc->fetch_id.product_id) {
2645
struct ipmi_device_id id = bmc->fetch_id;
2646
int guid_set = bmc->dyn_guid_set;
2647
guid_t guid;
2648
2649
guid = bmc->fetch_guid;
2650
mutex_unlock(&bmc->dyn_mutex);
2651
2652
__ipmi_bmc_unregister(intf);
2653
/* Fill in the temporary BMC for good measure. */
2654
intf->bmc->id = id;
2655
intf->bmc->dyn_guid_set = guid_set;
2656
intf->bmc->guid = guid;
2657
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2658
need_waiter(intf); /* Retry later on an error. */
2659
else
2660
__scan_channels(intf, &id);
2661
2662
2663
if (!intf_set) {
2664
/*
2665
* We weren't given the interface on the
2666
* command line, so restart the operation on
2667
* the next interface for the BMC.
2668
*/
2669
mutex_unlock(&intf->bmc_reg_mutex);
2670
mutex_lock(&bmc->dyn_mutex);
2671
goto retry_bmc_lock;
2672
}
2673
2674
/* We have a new BMC, set it up. */
2675
bmc = intf->bmc;
2676
mutex_lock(&bmc->dyn_mutex);
2677
goto out_noprocessing;
2678
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2679
/* Version info changes, scan the channels again. */
2680
__scan_channels(intf, &bmc->fetch_id);
2681
2682
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2683
2684
out:
2685
if (rv && prev_dyn_id_set) {
2686
rv = 0; /* Ignore failures if we have previous data. */
2687
bmc->dyn_id_set = prev_dyn_id_set;
2688
}
2689
if (!rv) {
2690
bmc->id = bmc->fetch_id;
2691
if (bmc->dyn_guid_set)
2692
bmc->guid = bmc->fetch_guid;
2693
else if (prev_guid_set)
2694
/*
2695
* The guid used to be valid and it failed to fetch,
2696
* just use the cached value.
2697
*/
2698
bmc->dyn_guid_set = prev_guid_set;
2699
}
2700
out_noprocessing:
2701
if (!rv) {
2702
if (id)
2703
*id = bmc->id;
2704
2705
if (guid_set)
2706
*guid_set = bmc->dyn_guid_set;
2707
2708
if (guid && bmc->dyn_guid_set)
2709
*guid = bmc->guid;
2710
}
2711
2712
mutex_unlock(&bmc->dyn_mutex);
2713
mutex_unlock(&intf->bmc_reg_mutex);
2714
2715
kref_put(&intf->refcount, intf_free);
2716
return rv;
2717
}
2718
2719
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2720
struct ipmi_device_id *id,
2721
bool *guid_set, guid_t *guid)
2722
{
2723
return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2724
}
2725
2726
static ssize_t device_id_show(struct device *dev,
2727
struct device_attribute *attr,
2728
char *buf)
2729
{
2730
struct bmc_device *bmc = to_bmc_device(dev);
2731
struct ipmi_device_id id;
2732
int rv;
2733
2734
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2735
if (rv)
2736
return rv;
2737
2738
return sysfs_emit(buf, "%u\n", id.device_id);
2739
}
2740
static DEVICE_ATTR_RO(device_id);
2741
2742
static ssize_t provides_device_sdrs_show(struct device *dev,
2743
struct device_attribute *attr,
2744
char *buf)
2745
{
2746
struct bmc_device *bmc = to_bmc_device(dev);
2747
struct ipmi_device_id id;
2748
int rv;
2749
2750
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2751
if (rv)
2752
return rv;
2753
2754
return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2755
}
2756
static DEVICE_ATTR_RO(provides_device_sdrs);
2757
2758
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2759
char *buf)
2760
{
2761
struct bmc_device *bmc = to_bmc_device(dev);
2762
struct ipmi_device_id id;
2763
int rv;
2764
2765
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2766
if (rv)
2767
return rv;
2768
2769
return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2770
}
2771
static DEVICE_ATTR_RO(revision);
2772
2773
static ssize_t firmware_revision_show(struct device *dev,
2774
struct device_attribute *attr,
2775
char *buf)
2776
{
2777
struct bmc_device *bmc = to_bmc_device(dev);
2778
struct ipmi_device_id id;
2779
int rv;
2780
2781
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2782
if (rv)
2783
return rv;
2784
2785
return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2786
id.firmware_revision_2);
2787
}
2788
static DEVICE_ATTR_RO(firmware_revision);
2789
2790
static ssize_t ipmi_version_show(struct device *dev,
2791
struct device_attribute *attr,
2792
char *buf)
2793
{
2794
struct bmc_device *bmc = to_bmc_device(dev);
2795
struct ipmi_device_id id;
2796
int rv;
2797
2798
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2799
if (rv)
2800
return rv;
2801
2802
return sysfs_emit(buf, "%u.%u\n",
2803
ipmi_version_major(&id),
2804
ipmi_version_minor(&id));
2805
}
2806
static DEVICE_ATTR_RO(ipmi_version);
2807
2808
static ssize_t add_dev_support_show(struct device *dev,
2809
struct device_attribute *attr,
2810
char *buf)
2811
{
2812
struct bmc_device *bmc = to_bmc_device(dev);
2813
struct ipmi_device_id id;
2814
int rv;
2815
2816
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2817
if (rv)
2818
return rv;
2819
2820
return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2821
}
2822
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2823
NULL);
2824
2825
static ssize_t manufacturer_id_show(struct device *dev,
2826
struct device_attribute *attr,
2827
char *buf)
2828
{
2829
struct bmc_device *bmc = to_bmc_device(dev);
2830
struct ipmi_device_id id;
2831
int rv;
2832
2833
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2834
if (rv)
2835
return rv;
2836
2837
return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2838
}
2839
static DEVICE_ATTR_RO(manufacturer_id);
2840
2841
static ssize_t product_id_show(struct device *dev,
2842
struct device_attribute *attr,
2843
char *buf)
2844
{
2845
struct bmc_device *bmc = to_bmc_device(dev);
2846
struct ipmi_device_id id;
2847
int rv;
2848
2849
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2850
if (rv)
2851
return rv;
2852
2853
return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2854
}
2855
static DEVICE_ATTR_RO(product_id);
2856
2857
static ssize_t aux_firmware_rev_show(struct device *dev,
2858
struct device_attribute *attr,
2859
char *buf)
2860
{
2861
struct bmc_device *bmc = to_bmc_device(dev);
2862
struct ipmi_device_id id;
2863
int rv;
2864
2865
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2866
if (rv)
2867
return rv;
2868
2869
return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2870
id.aux_firmware_revision[3],
2871
id.aux_firmware_revision[2],
2872
id.aux_firmware_revision[1],
2873
id.aux_firmware_revision[0]);
2874
}
2875
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2876
2877
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2878
char *buf)
2879
{
2880
struct bmc_device *bmc = to_bmc_device(dev);
2881
bool guid_set;
2882
guid_t guid;
2883
int rv;
2884
2885
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2886
if (rv)
2887
return rv;
2888
if (!guid_set)
2889
return -ENOENT;
2890
2891
return sysfs_emit(buf, "%pUl\n", &guid);
2892
}
2893
static DEVICE_ATTR_RO(guid);
2894
2895
static struct attribute *bmc_dev_attrs[] = {
2896
&dev_attr_device_id.attr,
2897
&dev_attr_provides_device_sdrs.attr,
2898
&dev_attr_revision.attr,
2899
&dev_attr_firmware_revision.attr,
2900
&dev_attr_ipmi_version.attr,
2901
&dev_attr_additional_device_support.attr,
2902
&dev_attr_manufacturer_id.attr,
2903
&dev_attr_product_id.attr,
2904
&dev_attr_aux_firmware_revision.attr,
2905
&dev_attr_guid.attr,
2906
NULL
2907
};
2908
2909
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2910
struct attribute *attr, int idx)
2911
{
2912
struct device *dev = kobj_to_dev(kobj);
2913
struct bmc_device *bmc = to_bmc_device(dev);
2914
umode_t mode = attr->mode;
2915
int rv;
2916
2917
if (attr == &dev_attr_aux_firmware_revision.attr) {
2918
struct ipmi_device_id id;
2919
2920
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2921
return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2922
}
2923
if (attr == &dev_attr_guid.attr) {
2924
bool guid_set;
2925
2926
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2927
return (!rv && guid_set) ? mode : 0;
2928
}
2929
return mode;
2930
}
2931
2932
static const struct attribute_group bmc_dev_attr_group = {
2933
.attrs = bmc_dev_attrs,
2934
.is_visible = bmc_dev_attr_is_visible,
2935
};
2936
2937
static const struct attribute_group *bmc_dev_attr_groups[] = {
2938
&bmc_dev_attr_group,
2939
NULL
2940
};
2941
2942
static const struct device_type bmc_device_type = {
2943
.groups = bmc_dev_attr_groups,
2944
};
2945
2946
static int __find_bmc_guid(struct device *dev, const void *data)
2947
{
2948
const guid_t *guid = data;
2949
struct bmc_device *bmc;
2950
int rv;
2951
2952
if (dev->type != &bmc_device_type)
2953
return 0;
2954
2955
bmc = to_bmc_device(dev);
2956
rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2957
if (rv)
2958
rv = kref_get_unless_zero(&bmc->usecount);
2959
return rv;
2960
}
2961
2962
/*
2963
* Returns with the bmc's usecount incremented, if it is non-NULL.
2964
*/
2965
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2966
guid_t *guid)
2967
{
2968
struct device *dev;
2969
struct bmc_device *bmc = NULL;
2970
2971
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2972
if (dev) {
2973
bmc = to_bmc_device(dev);
2974
put_device(dev);
2975
}
2976
return bmc;
2977
}
2978
2979
struct prod_dev_id {
2980
unsigned int product_id;
2981
unsigned char device_id;
2982
};
2983
2984
static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2985
{
2986
const struct prod_dev_id *cid = data;
2987
struct bmc_device *bmc;
2988
int rv;
2989
2990
if (dev->type != &bmc_device_type)
2991
return 0;
2992
2993
bmc = to_bmc_device(dev);
2994
rv = (bmc->id.product_id == cid->product_id
2995
&& bmc->id.device_id == cid->device_id);
2996
if (rv)
2997
rv = kref_get_unless_zero(&bmc->usecount);
2998
return rv;
2999
}
3000
3001
/*
3002
* Returns with the bmc's usecount incremented, if it is non-NULL.
3003
*/
3004
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
3005
struct device_driver *drv,
3006
unsigned int product_id, unsigned char device_id)
3007
{
3008
struct prod_dev_id id = {
3009
.product_id = product_id,
3010
.device_id = device_id,
3011
};
3012
struct device *dev;
3013
struct bmc_device *bmc = NULL;
3014
3015
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
3016
if (dev) {
3017
bmc = to_bmc_device(dev);
3018
put_device(dev);
3019
}
3020
return bmc;
3021
}
3022
3023
static DEFINE_IDA(ipmi_bmc_ida);
3024
3025
static void
3026
release_bmc_device(struct device *dev)
3027
{
3028
kfree(to_bmc_device(dev));
3029
}
3030
3031
static void cleanup_bmc_work(struct work_struct *work)
3032
{
3033
struct bmc_device *bmc = container_of(work, struct bmc_device,
3034
remove_work);
3035
int id = bmc->pdev.id; /* Unregister overwrites id */
3036
3037
platform_device_unregister(&bmc->pdev);
3038
ida_free(&ipmi_bmc_ida, id);
3039
}
3040
3041
static void
3042
cleanup_bmc_device(struct kref *ref)
3043
{
3044
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3045
3046
/*
3047
* Remove the platform device in a work queue to avoid issues
3048
* with removing the device attributes while reading a device
3049
* attribute.
3050
*/
3051
queue_work(bmc_remove_work_wq, &bmc->remove_work);
3052
}
3053
3054
/*
3055
* Must be called with intf->bmc_reg_mutex held.
3056
*/
3057
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3058
{
3059
struct bmc_device *bmc = intf->bmc;
3060
3061
if (!intf->bmc_registered)
3062
return;
3063
3064
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3065
sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3066
kfree(intf->my_dev_name);
3067
intf->my_dev_name = NULL;
3068
3069
mutex_lock(&bmc->dyn_mutex);
3070
list_del(&intf->bmc_link);
3071
mutex_unlock(&bmc->dyn_mutex);
3072
intf->bmc = &intf->tmp_bmc;
3073
kref_put(&bmc->usecount, cleanup_bmc_device);
3074
intf->bmc_registered = false;
3075
}
3076
3077
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3078
{
3079
mutex_lock(&intf->bmc_reg_mutex);
3080
__ipmi_bmc_unregister(intf);
3081
mutex_unlock(&intf->bmc_reg_mutex);
3082
}
3083
3084
/*
3085
* Must be called with intf->bmc_reg_mutex held.
3086
*/
3087
static int __ipmi_bmc_register(struct ipmi_smi *intf,
3088
struct ipmi_device_id *id,
3089
bool guid_set, guid_t *guid, int intf_num)
3090
{
3091
int rv;
3092
struct bmc_device *bmc;
3093
struct bmc_device *old_bmc;
3094
3095
/*
3096
* platform_device_register() can cause bmc_reg_mutex to
3097
* be claimed because of the is_visible functions of
3098
* the attributes. Eliminate possible recursion and
3099
* release the lock.
3100
*/
3101
intf->in_bmc_register = true;
3102
mutex_unlock(&intf->bmc_reg_mutex);
3103
3104
/*
3105
* Try to find if there is an bmc_device struct
3106
* representing the interfaced BMC already
3107
*/
3108
mutex_lock(&ipmidriver_mutex);
3109
if (guid_set)
3110
old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3111
else
3112
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3113
id->product_id,
3114
id->device_id);
3115
3116
/*
3117
* If there is already an bmc_device, free the new one,
3118
* otherwise register the new BMC device
3119
*/
3120
if (old_bmc) {
3121
bmc = old_bmc;
3122
/*
3123
* Note: old_bmc already has usecount incremented by
3124
* the BMC find functions.
3125
*/
3126
intf->bmc = old_bmc;
3127
mutex_lock(&bmc->dyn_mutex);
3128
list_add_tail(&intf->bmc_link, &bmc->intfs);
3129
mutex_unlock(&bmc->dyn_mutex);
3130
3131
dev_info(intf->si_dev,
3132
"interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3133
bmc->id.manufacturer_id,
3134
bmc->id.product_id,
3135
bmc->id.device_id);
3136
} else {
3137
bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3138
if (!bmc) {
3139
rv = -ENOMEM;
3140
goto out;
3141
}
3142
INIT_LIST_HEAD(&bmc->intfs);
3143
mutex_init(&bmc->dyn_mutex);
3144
INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3145
3146
bmc->id = *id;
3147
bmc->dyn_id_set = 1;
3148
bmc->dyn_guid_set = guid_set;
3149
bmc->guid = *guid;
3150
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3151
3152
bmc->pdev.name = "ipmi_bmc";
3153
3154
rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
3155
if (rv < 0) {
3156
kfree(bmc);
3157
goto out;
3158
}
3159
3160
bmc->pdev.dev.driver = &ipmidriver.driver;
3161
bmc->pdev.id = rv;
3162
bmc->pdev.dev.release = release_bmc_device;
3163
bmc->pdev.dev.type = &bmc_device_type;
3164
kref_init(&bmc->usecount);
3165
3166
intf->bmc = bmc;
3167
mutex_lock(&bmc->dyn_mutex);
3168
list_add_tail(&intf->bmc_link, &bmc->intfs);
3169
mutex_unlock(&bmc->dyn_mutex);
3170
3171
rv = platform_device_register(&bmc->pdev);
3172
if (rv) {
3173
dev_err(intf->si_dev,
3174
"Unable to register bmc device: %d\n",
3175
rv);
3176
goto out_list_del;
3177
}
3178
3179
dev_info(intf->si_dev,
3180
"Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3181
bmc->id.manufacturer_id,
3182
bmc->id.product_id,
3183
bmc->id.device_id);
3184
}
3185
3186
/*
3187
* create symlink from system interface device to bmc device
3188
* and back.
3189
*/
3190
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3191
if (rv) {
3192
dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3193
goto out_put_bmc;
3194
}
3195
3196
if (intf_num == -1)
3197
intf_num = intf->intf_num;
3198
intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3199
if (!intf->my_dev_name) {
3200
rv = -ENOMEM;
3201
dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3202
rv);
3203
goto out_unlink1;
3204
}
3205
3206
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3207
intf->my_dev_name);
3208
if (rv) {
3209
dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3210
rv);
3211
goto out_free_my_dev_name;
3212
}
3213
3214
intf->bmc_registered = true;
3215
3216
out:
3217
mutex_unlock(&ipmidriver_mutex);
3218
mutex_lock(&intf->bmc_reg_mutex);
3219
intf->in_bmc_register = false;
3220
return rv;
3221
3222
3223
out_free_my_dev_name:
3224
kfree(intf->my_dev_name);
3225
intf->my_dev_name = NULL;
3226
3227
out_unlink1:
3228
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3229
3230
out_put_bmc:
3231
mutex_lock(&bmc->dyn_mutex);
3232
list_del(&intf->bmc_link);
3233
mutex_unlock(&bmc->dyn_mutex);
3234
intf->bmc = &intf->tmp_bmc;
3235
kref_put(&bmc->usecount, cleanup_bmc_device);
3236
goto out;
3237
3238
out_list_del:
3239
mutex_lock(&bmc->dyn_mutex);
3240
list_del(&intf->bmc_link);
3241
mutex_unlock(&bmc->dyn_mutex);
3242
intf->bmc = &intf->tmp_bmc;
3243
put_device(&bmc->pdev.dev);
3244
goto out;
3245
}
3246
3247
static int
3248
send_guid_cmd(struct ipmi_smi *intf, int chan)
3249
{
3250
struct kernel_ipmi_msg msg;
3251
struct ipmi_system_interface_addr si;
3252
3253
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3254
si.channel = IPMI_BMC_CHANNEL;
3255
si.lun = 0;
3256
3257
msg.netfn = IPMI_NETFN_APP_REQUEST;
3258
msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3259
msg.data = NULL;
3260
msg.data_len = 0;
3261
return i_ipmi_request(NULL,
3262
intf,
3263
(struct ipmi_addr *) &si,
3264
0,
3265
&msg,
3266
intf,
3267
NULL,
3268
NULL,
3269
0,
3270
intf->addrinfo[0].address,
3271
intf->addrinfo[0].lun,
3272
-1, 0);
3273
}
3274
3275
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3276
{
3277
struct bmc_device *bmc = intf->bmc;
3278
3279
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3280
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3281
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3282
/* Not for me */
3283
return;
3284
3285
if (msg->msg.data[0] != 0) {
3286
/* Error from getting the GUID, the BMC doesn't have one. */
3287
bmc->dyn_guid_set = 0;
3288
goto out;
3289
}
3290
3291
if (msg->msg.data_len < UUID_SIZE + 1) {
3292
bmc->dyn_guid_set = 0;
3293
dev_warn(intf->si_dev,
3294
"The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3295
msg->msg.data_len, UUID_SIZE + 1);
3296
goto out;
3297
}
3298
3299
import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3300
/*
3301
* Make sure the guid data is available before setting
3302
* dyn_guid_set.
3303
*/
3304
smp_wmb();
3305
bmc->dyn_guid_set = 1;
3306
out:
3307
wake_up(&intf->waitq);
3308
}
3309
3310
static void __get_guid(struct ipmi_smi *intf)
3311
{
3312
int rv;
3313
struct bmc_device *bmc = intf->bmc;
3314
3315
bmc->dyn_guid_set = 2;
3316
intf->null_user_handler = guid_handler;
3317
rv = send_guid_cmd(intf, 0);
3318
if (rv)
3319
/* Send failed, no GUID available. */
3320
bmc->dyn_guid_set = 0;
3321
else
3322
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3323
3324
/* dyn_guid_set makes the guid data available. */
3325
smp_rmb();
3326
3327
intf->null_user_handler = NULL;
3328
}
3329
3330
static int
3331
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3332
{
3333
struct kernel_ipmi_msg msg;
3334
unsigned char data[1];
3335
struct ipmi_system_interface_addr si;
3336
3337
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3338
si.channel = IPMI_BMC_CHANNEL;
3339
si.lun = 0;
3340
3341
msg.netfn = IPMI_NETFN_APP_REQUEST;
3342
msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3343
msg.data = data;
3344
msg.data_len = 1;
3345
data[0] = chan;
3346
return i_ipmi_request(NULL,
3347
intf,
3348
(struct ipmi_addr *) &si,
3349
0,
3350
&msg,
3351
intf,
3352
NULL,
3353
NULL,
3354
0,
3355
intf->addrinfo[0].address,
3356
intf->addrinfo[0].lun,
3357
-1, 0);
3358
}
3359
3360
static void
3361
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3362
{
3363
int rv = 0;
3364
int ch;
3365
unsigned int set = intf->curr_working_cset;
3366
struct ipmi_channel *chans;
3367
3368
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3369
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3370
&& (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3371
/* It's the one we want */
3372
if (msg->msg.data[0] != 0) {
3373
/* Got an error from the channel, just go on. */
3374
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3375
/*
3376
* If the MC does not support this
3377
* command, that is legal. We just
3378
* assume it has one IPMB at channel
3379
* zero.
3380
*/
3381
intf->wchannels[set].c[0].medium
3382
= IPMI_CHANNEL_MEDIUM_IPMB;
3383
intf->wchannels[set].c[0].protocol
3384
= IPMI_CHANNEL_PROTOCOL_IPMB;
3385
3386
intf->channel_list = intf->wchannels + set;
3387
intf->channels_ready = true;
3388
wake_up(&intf->waitq);
3389
goto out;
3390
}
3391
goto next_channel;
3392
}
3393
if (msg->msg.data_len < 4) {
3394
/* Message not big enough, just go on. */
3395
goto next_channel;
3396
}
3397
ch = intf->curr_channel;
3398
chans = intf->wchannels[set].c;
3399
chans[ch].medium = msg->msg.data[2] & 0x7f;
3400
chans[ch].protocol = msg->msg.data[3] & 0x1f;
3401
3402
next_channel:
3403
intf->curr_channel++;
3404
if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3405
intf->channel_list = intf->wchannels + set;
3406
intf->channels_ready = true;
3407
wake_up(&intf->waitq);
3408
} else {
3409
intf->channel_list = intf->wchannels + set;
3410
intf->channels_ready = true;
3411
rv = send_channel_info_cmd(intf, intf->curr_channel);
3412
}
3413
3414
if (rv) {
3415
/* Got an error somehow, just give up. */
3416
dev_warn(intf->si_dev,
3417
"Error sending channel information for channel %d: %d\n",
3418
intf->curr_channel, rv);
3419
3420
intf->channel_list = intf->wchannels + set;
3421
intf->channels_ready = true;
3422
wake_up(&intf->waitq);
3423
}
3424
}
3425
out:
3426
return;
3427
}
3428
3429
/*
3430
* Must be holding intf->bmc_reg_mutex to call this.
3431
*/
3432
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3433
{
3434
int rv;
3435
3436
if (ipmi_version_major(id) > 1
3437
|| (ipmi_version_major(id) == 1
3438
&& ipmi_version_minor(id) >= 5)) {
3439
unsigned int set;
3440
3441
/*
3442
* Start scanning the channels to see what is
3443
* available.
3444
*/
3445
set = !intf->curr_working_cset;
3446
intf->curr_working_cset = set;
3447
memset(&intf->wchannels[set], 0,
3448
sizeof(struct ipmi_channel_set));
3449
3450
intf->null_user_handler = channel_handler;
3451
intf->curr_channel = 0;
3452
rv = send_channel_info_cmd(intf, 0);
3453
if (rv) {
3454
dev_warn(intf->si_dev,
3455
"Error sending channel information for channel 0, %d\n",
3456
rv);
3457
intf->null_user_handler = NULL;
3458
return -EIO;
3459
}
3460
3461
/* Wait for the channel info to be read. */
3462
wait_event(intf->waitq, intf->channels_ready);
3463
intf->null_user_handler = NULL;
3464
} else {
3465
unsigned int set = intf->curr_working_cset;
3466
3467
/* Assume a single IPMB channel at zero. */
3468
intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3469
intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3470
intf->channel_list = intf->wchannels + set;
3471
intf->channels_ready = true;
3472
}
3473
3474
return 0;
3475
}
3476
3477
static void ipmi_poll(struct ipmi_smi *intf)
3478
{
3479
if (intf->handlers->poll)
3480
intf->handlers->poll(intf->send_info);
3481
/* In case something came in */
3482
handle_new_recv_msgs(intf);
3483
}
3484
3485
void ipmi_poll_interface(struct ipmi_user *user)
3486
{
3487
ipmi_poll(user->intf);
3488
}
3489
EXPORT_SYMBOL(ipmi_poll_interface);
3490
3491
static ssize_t nr_users_show(struct device *dev,
3492
struct device_attribute *attr,
3493
char *buf)
3494
{
3495
struct ipmi_smi *intf = container_of(attr,
3496
struct ipmi_smi, nr_users_devattr);
3497
3498
return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3499
}
3500
static DEVICE_ATTR_RO(nr_users);
3501
3502
static ssize_t nr_msgs_show(struct device *dev,
3503
struct device_attribute *attr,
3504
char *buf)
3505
{
3506
struct ipmi_smi *intf = container_of(attr,
3507
struct ipmi_smi, nr_msgs_devattr);
3508
struct ipmi_user *user;
3509
unsigned int count = 0;
3510
3511
mutex_lock(&intf->users_mutex);
3512
list_for_each_entry(user, &intf->users, link)
3513
count += atomic_read(&user->nr_msgs);
3514
mutex_unlock(&intf->users_mutex);
3515
3516
return sysfs_emit(buf, "%u\n", count);
3517
}
3518
static DEVICE_ATTR_RO(nr_msgs);
3519
3520
static void redo_bmc_reg(struct work_struct *work)
3521
{
3522
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3523
bmc_reg_work);
3524
3525
if (!intf->in_shutdown)
3526
bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3527
3528
kref_put(&intf->refcount, intf_free);
3529
}
3530
3531
int ipmi_add_smi(struct module *owner,
3532
const struct ipmi_smi_handlers *handlers,
3533
void *send_info,
3534
struct device *si_dev,
3535
unsigned char slave_addr)
3536
{
3537
int i, j;
3538
int rv;
3539
struct ipmi_smi *intf, *tintf;
3540
struct list_head *link;
3541
struct ipmi_device_id id;
3542
3543
/*
3544
* Make sure the driver is actually initialized, this handles
3545
* problems with initialization order.
3546
*/
3547
rv = ipmi_init_msghandler();
3548
if (rv)
3549
return rv;
3550
3551
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3552
if (!intf)
3553
return -ENOMEM;
3554
3555
intf->owner = owner;
3556
intf->bmc = &intf->tmp_bmc;
3557
INIT_LIST_HEAD(&intf->bmc->intfs);
3558
mutex_init(&intf->bmc->dyn_mutex);
3559
INIT_LIST_HEAD(&intf->bmc_link);
3560
mutex_init(&intf->bmc_reg_mutex);
3561
intf->intf_num = -1; /* Mark it invalid for now. */
3562
kref_init(&intf->refcount);
3563
INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3564
intf->si_dev = si_dev;
3565
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3566
intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3567
intf->addrinfo[j].lun = 2;
3568
}
3569
if (slave_addr != 0)
3570
intf->addrinfo[0].address = slave_addr;
3571
INIT_LIST_HEAD(&intf->user_msgs);
3572
mutex_init(&intf->user_msgs_mutex);
3573
INIT_LIST_HEAD(&intf->users);
3574
mutex_init(&intf->users_mutex);
3575
atomic_set(&intf->nr_users, 0);
3576
intf->handlers = handlers;
3577
intf->send_info = send_info;
3578
spin_lock_init(&intf->seq_lock);
3579
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3580
intf->seq_table[j].inuse = 0;
3581
intf->seq_table[j].seqid = 0;
3582
}
3583
intf->curr_seq = 0;
3584
spin_lock_init(&intf->waiting_rcv_msgs_lock);
3585
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3586
INIT_WORK(&intf->smi_work, smi_work);
3587
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3588
spin_lock_init(&intf->xmit_msgs_lock);
3589
INIT_LIST_HEAD(&intf->xmit_msgs);
3590
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3591
mutex_init(&intf->events_mutex);
3592
spin_lock_init(&intf->watch_lock);
3593
atomic_set(&intf->event_waiters, 0);
3594
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3595
INIT_LIST_HEAD(&intf->waiting_events);
3596
intf->waiting_events_count = 0;
3597
mutex_init(&intf->cmd_rcvrs_mutex);
3598
spin_lock_init(&intf->maintenance_mode_lock);
3599
INIT_LIST_HEAD(&intf->cmd_rcvrs);
3600
init_waitqueue_head(&intf->waitq);
3601
for (i = 0; i < IPMI_NUM_STATS; i++)
3602
atomic_set(&intf->stats[i], 0);
3603
3604
/*
3605
* Grab the watchers mutex so we can deliver the new interface
3606
* without races.
3607
*/
3608
mutex_lock(&smi_watchers_mutex);
3609
mutex_lock(&ipmi_interfaces_mutex);
3610
/* Look for a hole in the numbers. */
3611
i = 0;
3612
link = &ipmi_interfaces;
3613
list_for_each_entry(tintf, &ipmi_interfaces, link) {
3614
if (tintf->intf_num != i) {
3615
link = &tintf->link;
3616
break;
3617
}
3618
i++;
3619
}
3620
/* Add the new interface in numeric order. */
3621
if (i == 0)
3622
list_add(&intf->link, &ipmi_interfaces);
3623
else
3624
list_add_tail(&intf->link, link);
3625
3626
rv = handlers->start_processing(send_info, intf);
3627
if (rv)
3628
goto out_err;
3629
3630
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3631
if (rv) {
3632
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3633
goto out_err_started;
3634
}
3635
3636
mutex_lock(&intf->bmc_reg_mutex);
3637
rv = __scan_channels(intf, &id);
3638
mutex_unlock(&intf->bmc_reg_mutex);
3639
if (rv)
3640
goto out_err_bmc_reg;
3641
3642
intf->nr_users_devattr = dev_attr_nr_users;
3643
sysfs_attr_init(&intf->nr_users_devattr.attr);
3644
rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3645
if (rv)
3646
goto out_err_bmc_reg;
3647
3648
intf->nr_msgs_devattr = dev_attr_nr_msgs;
3649
sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3650
rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3651
if (rv) {
3652
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3653
goto out_err_bmc_reg;
3654
}
3655
3656
intf->intf_num = i;
3657
mutex_unlock(&ipmi_interfaces_mutex);
3658
3659
/* After this point the interface is legal to use. */
3660
call_smi_watchers(i, intf->si_dev);
3661
3662
mutex_unlock(&smi_watchers_mutex);
3663
3664
return 0;
3665
3666
out_err_bmc_reg:
3667
ipmi_bmc_unregister(intf);
3668
out_err_started:
3669
if (intf->handlers->shutdown)
3670
intf->handlers->shutdown(intf->send_info);
3671
out_err:
3672
list_del(&intf->link);
3673
mutex_unlock(&ipmi_interfaces_mutex);
3674
mutex_unlock(&smi_watchers_mutex);
3675
kref_put(&intf->refcount, intf_free);
3676
3677
return rv;
3678
}
3679
EXPORT_SYMBOL(ipmi_add_smi);
3680
3681
static void deliver_smi_err_response(struct ipmi_smi *intf,
3682
struct ipmi_smi_msg *msg,
3683
unsigned char err)
3684
{
3685
int rv;
3686
msg->rsp[0] = msg->data[0] | 4;
3687
msg->rsp[1] = msg->data[1];
3688
msg->rsp[2] = err;
3689
msg->rsp_size = 3;
3690
3691
/* This will never requeue, but it may ask us to free the message. */
3692
rv = handle_one_recv_msg(intf, msg);
3693
if (rv == 0)
3694
ipmi_free_smi_msg(msg);
3695
}
3696
3697
static void cleanup_smi_msgs(struct ipmi_smi *intf)
3698
{
3699
int i;
3700
struct seq_table *ent;
3701
struct ipmi_smi_msg *msg;
3702
struct list_head *entry;
3703
struct list_head tmplist;
3704
3705
/* Clear out our transmit queues and hold the messages. */
3706
INIT_LIST_HEAD(&tmplist);
3707
list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3708
list_splice_tail(&intf->xmit_msgs, &tmplist);
3709
3710
/* Current message first, to preserve order */
3711
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3712
/* Wait for the message to clear out. */
3713
schedule_timeout(1);
3714
}
3715
3716
/* No need for locks, the interface is down. */
3717
3718
/*
3719
* Return errors for all pending messages in queue and in the
3720
* tables waiting for remote responses.
3721
*/
3722
while (!list_empty(&tmplist)) {
3723
entry = tmplist.next;
3724
list_del(entry);
3725
msg = list_entry(entry, struct ipmi_smi_msg, link);
3726
deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3727
}
3728
3729
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3730
ent = &intf->seq_table[i];
3731
if (!ent->inuse)
3732
continue;
3733
deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3734
}
3735
}
3736
3737
void ipmi_unregister_smi(struct ipmi_smi *intf)
3738
{
3739
struct ipmi_smi_watcher *w;
3740
int intf_num;
3741
3742
if (!intf)
3743
return;
3744
3745
intf_num = intf->intf_num;
3746
mutex_lock(&ipmi_interfaces_mutex);
3747
cancel_work_sync(&intf->smi_work);
3748
/* smi_work() can no longer be in progress after this. */
3749
3750
intf->intf_num = -1;
3751
intf->in_shutdown = true;
3752
list_del(&intf->link);
3753
mutex_unlock(&ipmi_interfaces_mutex);
3754
3755
/*
3756
* At this point no users can be added to the interface and no
3757
* new messages can be sent.
3758
*/
3759
3760
if (intf->handlers->shutdown)
3761
intf->handlers->shutdown(intf->send_info);
3762
3763
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3764
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3765
3766
/*
3767
* Call all the watcher interfaces to tell them that
3768
* an interface is going away.
3769
*/
3770
mutex_lock(&smi_watchers_mutex);
3771
list_for_each_entry(w, &smi_watchers, link)
3772
w->smi_gone(intf_num);
3773
mutex_unlock(&smi_watchers_mutex);
3774
3775
mutex_lock(&intf->users_mutex);
3776
while (!list_empty(&intf->users)) {
3777
struct ipmi_user *user = list_first_entry(&intf->users,
3778
struct ipmi_user, link);
3779
3780
_ipmi_destroy_user(user);
3781
}
3782
mutex_unlock(&intf->users_mutex);
3783
3784
cleanup_smi_msgs(intf);
3785
3786
ipmi_bmc_unregister(intf);
3787
3788
kref_put(&intf->refcount, intf_free);
3789
}
3790
EXPORT_SYMBOL(ipmi_unregister_smi);
3791
3792
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3793
struct ipmi_smi_msg *msg)
3794
{
3795
struct ipmi_ipmb_addr ipmb_addr;
3796
struct ipmi_recv_msg *recv_msg;
3797
3798
/*
3799
* This is 11, not 10, because the response must contain a
3800
* completion code.
3801
*/
3802
if (msg->rsp_size < 11) {
3803
/* Message not big enough, just ignore it. */
3804
ipmi_inc_stat(intf, invalid_ipmb_responses);
3805
return 0;
3806
}
3807
3808
if (msg->rsp[2] != 0) {
3809
/* An error getting the response, just ignore it. */
3810
return 0;
3811
}
3812
3813
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3814
ipmb_addr.slave_addr = msg->rsp[6];
3815
ipmb_addr.channel = msg->rsp[3] & 0x0f;
3816
ipmb_addr.lun = msg->rsp[7] & 3;
3817
3818
/*
3819
* It's a response from a remote entity. Look up the sequence
3820
* number and handle the response.
3821
*/
3822
if (intf_find_seq(intf,
3823
msg->rsp[7] >> 2,
3824
msg->rsp[3] & 0x0f,
3825
msg->rsp[8],
3826
(msg->rsp[4] >> 2) & (~1),
3827
(struct ipmi_addr *) &ipmb_addr,
3828
&recv_msg)) {
3829
/*
3830
* We were unable to find the sequence number,
3831
* so just nuke the message.
3832
*/
3833
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3834
return 0;
3835
}
3836
3837
memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3838
/*
3839
* The other fields matched, so no need to set them, except
3840
* for netfn, which needs to be the response that was
3841
* returned, not the request value.
3842
*/
3843
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3844
recv_msg->msg.data = recv_msg->msg_data;
3845
recv_msg->msg.data_len = msg->rsp_size - 10;
3846
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3847
if (deliver_response(intf, recv_msg))
3848
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3849
else
3850
ipmi_inc_stat(intf, handled_ipmb_responses);
3851
3852
return 0;
3853
}
3854
3855
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3856
struct ipmi_smi_msg *msg)
3857
{
3858
struct cmd_rcvr *rcvr;
3859
int rv = 0;
3860
unsigned char netfn;
3861
unsigned char cmd;
3862
unsigned char chan;
3863
struct ipmi_user *user = NULL;
3864
struct ipmi_ipmb_addr *ipmb_addr;
3865
struct ipmi_recv_msg *recv_msg;
3866
3867
if (msg->rsp_size < 10) {
3868
/* Message not big enough, just ignore it. */
3869
ipmi_inc_stat(intf, invalid_commands);
3870
return 0;
3871
}
3872
3873
if (msg->rsp[2] != 0) {
3874
/* An error getting the response, just ignore it. */
3875
return 0;
3876
}
3877
3878
netfn = msg->rsp[4] >> 2;
3879
cmd = msg->rsp[8];
3880
chan = msg->rsp[3] & 0xf;
3881
3882
rcu_read_lock();
3883
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3884
if (rcvr) {
3885
user = rcvr->user;
3886
kref_get(&user->refcount);
3887
} else
3888
user = NULL;
3889
rcu_read_unlock();
3890
3891
if (user == NULL) {
3892
/* We didn't find a user, deliver an error response. */
3893
ipmi_inc_stat(intf, unhandled_commands);
3894
3895
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3896
msg->data[1] = IPMI_SEND_MSG_CMD;
3897
msg->data[2] = msg->rsp[3];
3898
msg->data[3] = msg->rsp[6];
3899
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3900
msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3901
msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3902
/* rqseq/lun */
3903
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3904
msg->data[8] = msg->rsp[8]; /* cmd */
3905
msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3906
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3907
msg->data_size = 11;
3908
3909
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3910
msg->data_size, msg->data);
3911
3912
smi_send(intf, intf->handlers, msg, 0);
3913
/*
3914
* We used the message, so return the value that
3915
* causes it to not be freed or queued.
3916
*/
3917
rv = -1;
3918
} else {
3919
recv_msg = ipmi_alloc_recv_msg();
3920
if (!recv_msg) {
3921
/*
3922
* We couldn't allocate memory for the
3923
* message, so requeue it for handling
3924
* later.
3925
*/
3926
rv = 1;
3927
kref_put(&user->refcount, free_ipmi_user);
3928
} else {
3929
/* Extract the source address from the data. */
3930
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3931
ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3932
ipmb_addr->slave_addr = msg->rsp[6];
3933
ipmb_addr->lun = msg->rsp[7] & 3;
3934
ipmb_addr->channel = msg->rsp[3] & 0xf;
3935
3936
/*
3937
* Extract the rest of the message information
3938
* from the IPMB header.
3939
*/
3940
recv_msg->user = user;
3941
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3942
recv_msg->msgid = msg->rsp[7] >> 2;
3943
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3944
recv_msg->msg.cmd = msg->rsp[8];
3945
recv_msg->msg.data = recv_msg->msg_data;
3946
3947
/*
3948
* We chop off 10, not 9 bytes because the checksum
3949
* at the end also needs to be removed.
3950
*/
3951
recv_msg->msg.data_len = msg->rsp_size - 10;
3952
memcpy(recv_msg->msg_data, &msg->rsp[9],
3953
msg->rsp_size - 10);
3954
if (deliver_response(intf, recv_msg))
3955
ipmi_inc_stat(intf, unhandled_commands);
3956
else
3957
ipmi_inc_stat(intf, handled_commands);
3958
}
3959
}
3960
3961
return rv;
3962
}
3963
3964
static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
3965
struct ipmi_smi_msg *msg)
3966
{
3967
struct cmd_rcvr *rcvr;
3968
int rv = 0;
3969
struct ipmi_user *user = NULL;
3970
struct ipmi_ipmb_direct_addr *daddr;
3971
struct ipmi_recv_msg *recv_msg;
3972
unsigned char netfn = msg->rsp[0] >> 2;
3973
unsigned char cmd = msg->rsp[3];
3974
3975
rcu_read_lock();
3976
/* We always use channel 0 for direct messages. */
3977
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
3978
if (rcvr) {
3979
user = rcvr->user;
3980
kref_get(&user->refcount);
3981
} else
3982
user = NULL;
3983
rcu_read_unlock();
3984
3985
if (user == NULL) {
3986
/* We didn't find a user, deliver an error response. */
3987
ipmi_inc_stat(intf, unhandled_commands);
3988
3989
msg->data[0] = (netfn + 1) << 2;
3990
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
3991
msg->data[1] = msg->rsp[1]; /* Addr */
3992
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
3993
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
3994
msg->data[3] = cmd;
3995
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
3996
msg->data_size = 5;
3997
3998
smi_send(intf, intf->handlers, msg, 0);
3999
/*
4000
* We used the message, so return the value that
4001
* causes it to not be freed or queued.
4002
*/
4003
rv = -1;
4004
} else {
4005
recv_msg = ipmi_alloc_recv_msg();
4006
if (!recv_msg) {
4007
/*
4008
* We couldn't allocate memory for the
4009
* message, so requeue it for handling
4010
* later.
4011
*/
4012
rv = 1;
4013
kref_put(&user->refcount, free_ipmi_user);
4014
} else {
4015
/* Extract the source address from the data. */
4016
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
4017
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4018
daddr->channel = 0;
4019
daddr->slave_addr = msg->rsp[1];
4020
daddr->rs_lun = msg->rsp[0] & 3;
4021
daddr->rq_lun = msg->rsp[2] & 3;
4022
4023
/*
4024
* Extract the rest of the message information
4025
* from the IPMB header.
4026
*/
4027
recv_msg->user = user;
4028
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4029
recv_msg->msgid = (msg->rsp[2] >> 2);
4030
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4031
recv_msg->msg.cmd = msg->rsp[3];
4032
recv_msg->msg.data = recv_msg->msg_data;
4033
4034
recv_msg->msg.data_len = msg->rsp_size - 4;
4035
memcpy(recv_msg->msg_data, msg->rsp + 4,
4036
msg->rsp_size - 4);
4037
if (deliver_response(intf, recv_msg))
4038
ipmi_inc_stat(intf, unhandled_commands);
4039
else
4040
ipmi_inc_stat(intf, handled_commands);
4041
}
4042
}
4043
4044
return rv;
4045
}
4046
4047
static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4048
struct ipmi_smi_msg *msg)
4049
{
4050
struct ipmi_recv_msg *recv_msg;
4051
struct ipmi_ipmb_direct_addr *daddr;
4052
4053
recv_msg = msg->user_data;
4054
if (recv_msg == NULL) {
4055
dev_warn(intf->si_dev,
4056
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4057
return 0;
4058
}
4059
4060
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4061
recv_msg->msgid = msg->msgid;
4062
daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4063
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4064
daddr->channel = 0;
4065
daddr->slave_addr = msg->rsp[1];
4066
daddr->rq_lun = msg->rsp[0] & 3;
4067
daddr->rs_lun = msg->rsp[2] & 3;
4068
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4069
recv_msg->msg.cmd = msg->rsp[3];
4070
memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4071
recv_msg->msg.data = recv_msg->msg_data;
4072
recv_msg->msg.data_len = msg->rsp_size - 4;
4073
deliver_local_response(intf, recv_msg);
4074
4075
return 0;
4076
}
4077
4078
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4079
struct ipmi_smi_msg *msg)
4080
{
4081
struct ipmi_lan_addr lan_addr;
4082
struct ipmi_recv_msg *recv_msg;
4083
4084
4085
/*
4086
* This is 13, not 12, because the response must contain a
4087
* completion code.
4088
*/
4089
if (msg->rsp_size < 13) {
4090
/* Message not big enough, just ignore it. */
4091
ipmi_inc_stat(intf, invalid_lan_responses);
4092
return 0;
4093
}
4094
4095
if (msg->rsp[2] != 0) {
4096
/* An error getting the response, just ignore it. */
4097
return 0;
4098
}
4099
4100
lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4101
lan_addr.session_handle = msg->rsp[4];
4102
lan_addr.remote_SWID = msg->rsp[8];
4103
lan_addr.local_SWID = msg->rsp[5];
4104
lan_addr.channel = msg->rsp[3] & 0x0f;
4105
lan_addr.privilege = msg->rsp[3] >> 4;
4106
lan_addr.lun = msg->rsp[9] & 3;
4107
4108
/*
4109
* It's a response from a remote entity. Look up the sequence
4110
* number and handle the response.
4111
*/
4112
if (intf_find_seq(intf,
4113
msg->rsp[9] >> 2,
4114
msg->rsp[3] & 0x0f,
4115
msg->rsp[10],
4116
(msg->rsp[6] >> 2) & (~1),
4117
(struct ipmi_addr *) &lan_addr,
4118
&recv_msg)) {
4119
/*
4120
* We were unable to find the sequence number,
4121
* so just nuke the message.
4122
*/
4123
ipmi_inc_stat(intf, unhandled_lan_responses);
4124
return 0;
4125
}
4126
4127
memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4128
/*
4129
* The other fields matched, so no need to set them, except
4130
* for netfn, which needs to be the response that was
4131
* returned, not the request value.
4132
*/
4133
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4134
recv_msg->msg.data = recv_msg->msg_data;
4135
recv_msg->msg.data_len = msg->rsp_size - 12;
4136
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4137
if (deliver_response(intf, recv_msg))
4138
ipmi_inc_stat(intf, unhandled_lan_responses);
4139
else
4140
ipmi_inc_stat(intf, handled_lan_responses);
4141
4142
return 0;
4143
}
4144
4145
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4146
struct ipmi_smi_msg *msg)
4147
{
4148
struct cmd_rcvr *rcvr;
4149
int rv = 0;
4150
unsigned char netfn;
4151
unsigned char cmd;
4152
unsigned char chan;
4153
struct ipmi_user *user = NULL;
4154
struct ipmi_lan_addr *lan_addr;
4155
struct ipmi_recv_msg *recv_msg;
4156
4157
if (msg->rsp_size < 12) {
4158
/* Message not big enough, just ignore it. */
4159
ipmi_inc_stat(intf, invalid_commands);
4160
return 0;
4161
}
4162
4163
if (msg->rsp[2] != 0) {
4164
/* An error getting the response, just ignore it. */
4165
return 0;
4166
}
4167
4168
netfn = msg->rsp[6] >> 2;
4169
cmd = msg->rsp[10];
4170
chan = msg->rsp[3] & 0xf;
4171
4172
rcu_read_lock();
4173
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4174
if (rcvr) {
4175
user = rcvr->user;
4176
kref_get(&user->refcount);
4177
} else
4178
user = NULL;
4179
rcu_read_unlock();
4180
4181
if (user == NULL) {
4182
/* We didn't find a user, just give up and return an error. */
4183
ipmi_inc_stat(intf, unhandled_commands);
4184
4185
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
4186
msg->data[1] = IPMI_SEND_MSG_CMD;
4187
msg->data[2] = chan;
4188
msg->data[3] = msg->rsp[4]; /* handle */
4189
msg->data[4] = msg->rsp[8]; /* rsSWID */
4190
msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
4191
msg->data[6] = ipmb_checksum(&msg->data[3], 3);
4192
msg->data[7] = msg->rsp[5]; /* rqSWID */
4193
/* rqseq/lun */
4194
msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
4195
msg->data[9] = cmd;
4196
msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
4197
msg->data[11] = ipmb_checksum(&msg->data[7], 4);
4198
msg->data_size = 12;
4199
4200
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
4201
msg->data_size, msg->data);
4202
4203
smi_send(intf, intf->handlers, msg, 0);
4204
/*
4205
* We used the message, so return the value that
4206
* causes it to not be freed or queued.
4207
*/
4208
rv = -1;
4209
} else {
4210
recv_msg = ipmi_alloc_recv_msg();
4211
if (!recv_msg) {
4212
/*
4213
* We couldn't allocate memory for the
4214
* message, so requeue it for handling later.
4215
*/
4216
rv = 1;
4217
kref_put(&user->refcount, free_ipmi_user);
4218
} else {
4219
/* Extract the source address from the data. */
4220
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4221
lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4222
lan_addr->session_handle = msg->rsp[4];
4223
lan_addr->remote_SWID = msg->rsp[8];
4224
lan_addr->local_SWID = msg->rsp[5];
4225
lan_addr->lun = msg->rsp[9] & 3;
4226
lan_addr->channel = msg->rsp[3] & 0xf;
4227
lan_addr->privilege = msg->rsp[3] >> 4;
4228
4229
/*
4230
* Extract the rest of the message information
4231
* from the IPMB header.
4232
*/
4233
recv_msg->user = user;
4234
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4235
recv_msg->msgid = msg->rsp[9] >> 2;
4236
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4237
recv_msg->msg.cmd = msg->rsp[10];
4238
recv_msg->msg.data = recv_msg->msg_data;
4239
4240
/*
4241
* We chop off 12, not 11 bytes because the checksum
4242
* at the end also needs to be removed.
4243
*/
4244
recv_msg->msg.data_len = msg->rsp_size - 12;
4245
memcpy(recv_msg->msg_data, &msg->rsp[11],
4246
msg->rsp_size - 12);
4247
if (deliver_response(intf, recv_msg))
4248
ipmi_inc_stat(intf, unhandled_commands);
4249
else
4250
ipmi_inc_stat(intf, handled_commands);
4251
}
4252
}
4253
4254
return rv;
4255
}
4256
4257
/*
4258
* This routine will handle "Get Message" command responses with
4259
* channels that use an OEM Medium. The message format belongs to
4260
* the OEM. See IPMI 2.0 specification, Chapter 6 and
4261
* Chapter 22, sections 22.6 and 22.24 for more details.
4262
*/
4263
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4264
struct ipmi_smi_msg *msg)
4265
{
4266
struct cmd_rcvr *rcvr;
4267
int rv = 0;
4268
unsigned char netfn;
4269
unsigned char cmd;
4270
unsigned char chan;
4271
struct ipmi_user *user = NULL;
4272
struct ipmi_system_interface_addr *smi_addr;
4273
struct ipmi_recv_msg *recv_msg;
4274
4275
/*
4276
* We expect the OEM SW to perform error checking
4277
* so we just do some basic sanity checks
4278
*/
4279
if (msg->rsp_size < 4) {
4280
/* Message not big enough, just ignore it. */
4281
ipmi_inc_stat(intf, invalid_commands);
4282
return 0;
4283
}
4284
4285
if (msg->rsp[2] != 0) {
4286
/* An error getting the response, just ignore it. */
4287
return 0;
4288
}
4289
4290
/*
4291
* This is an OEM Message so the OEM needs to know how
4292
* handle the message. We do no interpretation.
4293
*/
4294
netfn = msg->rsp[0] >> 2;
4295
cmd = msg->rsp[1];
4296
chan = msg->rsp[3] & 0xf;
4297
4298
rcu_read_lock();
4299
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4300
if (rcvr) {
4301
user = rcvr->user;
4302
kref_get(&user->refcount);
4303
} else
4304
user = NULL;
4305
rcu_read_unlock();
4306
4307
if (user == NULL) {
4308
/* We didn't find a user, just give up. */
4309
ipmi_inc_stat(intf, unhandled_commands);
4310
4311
/*
4312
* Don't do anything with these messages, just allow
4313
* them to be freed.
4314
*/
4315
4316
rv = 0;
4317
} else {
4318
recv_msg = ipmi_alloc_recv_msg();
4319
if (!recv_msg) {
4320
/*
4321
* We couldn't allocate memory for the
4322
* message, so requeue it for handling
4323
* later.
4324
*/
4325
rv = 1;
4326
kref_put(&user->refcount, free_ipmi_user);
4327
} else {
4328
/*
4329
* OEM Messages are expected to be delivered via
4330
* the system interface to SMS software. We might
4331
* need to visit this again depending on OEM
4332
* requirements
4333
*/
4334
smi_addr = ((struct ipmi_system_interface_addr *)
4335
&recv_msg->addr);
4336
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4337
smi_addr->channel = IPMI_BMC_CHANNEL;
4338
smi_addr->lun = msg->rsp[0] & 3;
4339
4340
recv_msg->user = user;
4341
recv_msg->user_msg_data = NULL;
4342
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4343
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4344
recv_msg->msg.cmd = msg->rsp[1];
4345
recv_msg->msg.data = recv_msg->msg_data;
4346
4347
/*
4348
* The message starts at byte 4 which follows the
4349
* Channel Byte in the "GET MESSAGE" command
4350
*/
4351
recv_msg->msg.data_len = msg->rsp_size - 4;
4352
memcpy(recv_msg->msg_data, &msg->rsp[4],
4353
msg->rsp_size - 4);
4354
if (deliver_response(intf, recv_msg))
4355
ipmi_inc_stat(intf, unhandled_commands);
4356
else
4357
ipmi_inc_stat(intf, handled_commands);
4358
}
4359
}
4360
4361
return rv;
4362
}
4363
4364
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4365
struct ipmi_smi_msg *msg)
4366
{
4367
struct ipmi_system_interface_addr *smi_addr;
4368
4369
recv_msg->msgid = 0;
4370
smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4371
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4372
smi_addr->channel = IPMI_BMC_CHANNEL;
4373
smi_addr->lun = msg->rsp[0] & 3;
4374
recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4375
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4376
recv_msg->msg.cmd = msg->rsp[1];
4377
memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4378
recv_msg->msg.data = recv_msg->msg_data;
4379
recv_msg->msg.data_len = msg->rsp_size - 3;
4380
}
4381
4382
static int handle_read_event_rsp(struct ipmi_smi *intf,
4383
struct ipmi_smi_msg *msg)
4384
{
4385
struct ipmi_recv_msg *recv_msg, *recv_msg2;
4386
struct list_head msgs;
4387
struct ipmi_user *user;
4388
int rv = 0, deliver_count = 0;
4389
4390
if (msg->rsp_size < 19) {
4391
/* Message is too small to be an IPMB event. */
4392
ipmi_inc_stat(intf, invalid_events);
4393
return 0;
4394
}
4395
4396
if (msg->rsp[2] != 0) {
4397
/* An error getting the event, just ignore it. */
4398
return 0;
4399
}
4400
4401
INIT_LIST_HEAD(&msgs);
4402
4403
mutex_lock(&intf->events_mutex);
4404
4405
ipmi_inc_stat(intf, events);
4406
4407
/*
4408
* Allocate and fill in one message for every user that is
4409
* getting events.
4410
*/
4411
mutex_lock(&intf->users_mutex);
4412
list_for_each_entry(user, &intf->users, link) {
4413
if (!user->gets_events)
4414
continue;
4415
4416
recv_msg = ipmi_alloc_recv_msg();
4417
if (!recv_msg) {
4418
mutex_unlock(&intf->users_mutex);
4419
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4420
link) {
4421
user = recv_msg->user;
4422
list_del(&recv_msg->link);
4423
ipmi_free_recv_msg(recv_msg);
4424
kref_put(&user->refcount, free_ipmi_user);
4425
}
4426
/*
4427
* We couldn't allocate memory for the
4428
* message, so requeue it for handling
4429
* later.
4430
*/
4431
rv = 1;
4432
goto out;
4433
}
4434
4435
deliver_count++;
4436
4437
copy_event_into_recv_msg(recv_msg, msg);
4438
recv_msg->user = user;
4439
kref_get(&user->refcount);
4440
list_add_tail(&recv_msg->link, &msgs);
4441
}
4442
mutex_unlock(&intf->users_mutex);
4443
4444
if (deliver_count) {
4445
/* Now deliver all the messages. */
4446
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4447
list_del(&recv_msg->link);
4448
deliver_local_response(intf, recv_msg);
4449
}
4450
} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4451
/*
4452
* No one to receive the message, put it in queue if there's
4453
* not already too many things in the queue.
4454
*/
4455
recv_msg = ipmi_alloc_recv_msg();
4456
if (!recv_msg) {
4457
/*
4458
* We couldn't allocate memory for the
4459
* message, so requeue it for handling
4460
* later.
4461
*/
4462
rv = 1;
4463
goto out;
4464
}
4465
4466
copy_event_into_recv_msg(recv_msg, msg);
4467
list_add_tail(&recv_msg->link, &intf->waiting_events);
4468
intf->waiting_events_count++;
4469
} else if (!intf->event_msg_printed) {
4470
/*
4471
* There's too many things in the queue, discard this
4472
* message.
4473
*/
4474
dev_warn(intf->si_dev,
4475
"Event queue full, discarding incoming events\n");
4476
intf->event_msg_printed = 1;
4477
}
4478
4479
out:
4480
mutex_unlock(&intf->events_mutex);
4481
4482
return rv;
4483
}
4484
4485
static int handle_bmc_rsp(struct ipmi_smi *intf,
4486
struct ipmi_smi_msg *msg)
4487
{
4488
struct ipmi_recv_msg *recv_msg;
4489
struct ipmi_system_interface_addr *smi_addr;
4490
4491
recv_msg = msg->user_data;
4492
if (recv_msg == NULL) {
4493
dev_warn(intf->si_dev,
4494
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4495
return 0;
4496
}
4497
4498
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4499
recv_msg->msgid = msg->msgid;
4500
smi_addr = ((struct ipmi_system_interface_addr *)
4501
&recv_msg->addr);
4502
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4503
smi_addr->channel = IPMI_BMC_CHANNEL;
4504
smi_addr->lun = msg->rsp[0] & 3;
4505
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4506
recv_msg->msg.cmd = msg->rsp[1];
4507
memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4508
recv_msg->msg.data = recv_msg->msg_data;
4509
recv_msg->msg.data_len = msg->rsp_size - 2;
4510
deliver_local_response(intf, recv_msg);
4511
4512
return 0;
4513
}
4514
4515
/*
4516
* Handle a received message. Return 1 if the message should be requeued,
4517
* 0 if the message should be freed, or -1 if the message should not
4518
* be freed or requeued.
4519
*/
4520
static int handle_one_recv_msg(struct ipmi_smi *intf,
4521
struct ipmi_smi_msg *msg)
4522
{
4523
int requeue = 0;
4524
int chan;
4525
unsigned char cc;
4526
bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4527
4528
dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4529
4530
if (msg->rsp_size < 2) {
4531
/* Message is too small to be correct. */
4532
dev_warn(intf->si_dev,
4533
"BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4534
(msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4535
4536
return_unspecified:
4537
/* Generate an error response for the message. */
4538
msg->rsp[0] = msg->data[0] | (1 << 2);
4539
msg->rsp[1] = msg->data[1];
4540
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4541
msg->rsp_size = 3;
4542
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4543
/* commands must have at least 4 bytes, responses 5. */
4544
if (is_cmd && (msg->rsp_size < 4)) {
4545
ipmi_inc_stat(intf, invalid_commands);
4546
goto out;
4547
}
4548
if (!is_cmd && (msg->rsp_size < 5)) {
4549
ipmi_inc_stat(intf, invalid_ipmb_responses);
4550
/* Construct a valid error response. */
4551
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4552
msg->rsp[0] |= (1 << 2); /* Make it a response */
4553
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4554
msg->rsp[1] = msg->data[1]; /* Addr */
4555
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4556
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4557
msg->rsp[3] = msg->data[3]; /* Cmd */
4558
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4559
msg->rsp_size = 5;
4560
}
4561
} else if ((msg->data_size >= 2)
4562
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4563
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
4564
&& (msg->user_data == NULL)) {
4565
4566
if (intf->in_shutdown || intf->run_to_completion)
4567
goto out;
4568
4569
/*
4570
* This is the local response to a command send, start
4571
* the timer for these. The user_data will not be
4572
* NULL if this is a response send, and we will let
4573
* response sends just go through.
4574
*/
4575
4576
/*
4577
* Check for errors, if we get certain errors (ones
4578
* that mean basically we can try again later), we
4579
* ignore them and start the timer. Otherwise we
4580
* report the error immediately.
4581
*/
4582
if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4583
&& (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4584
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4585
&& (msg->rsp[2] != IPMI_BUS_ERR)
4586
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4587
int ch = msg->rsp[3] & 0xf;
4588
struct ipmi_channel *chans;
4589
4590
/* Got an error sending the message, handle it. */
4591
4592
chans = READ_ONCE(intf->channel_list)->c;
4593
if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4594
|| (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4595
ipmi_inc_stat(intf, sent_lan_command_errs);
4596
else
4597
ipmi_inc_stat(intf, sent_ipmb_command_errs);
4598
intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4599
} else
4600
/* The message was sent, start the timer. */
4601
intf_start_seq_timer(intf, msg->msgid);
4602
requeue = 0;
4603
goto out;
4604
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4605
|| (msg->rsp[1] != msg->data[1])) {
4606
/*
4607
* The NetFN and Command in the response is not even
4608
* marginally correct.
4609
*/
4610
dev_warn_ratelimited(intf->si_dev,
4611
"BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4612
(msg->data[0] >> 2) | 1, msg->data[1],
4613
msg->rsp[0] >> 2, msg->rsp[1]);
4614
4615
goto return_unspecified;
4616
}
4617
4618
if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4619
if ((msg->data[0] >> 2) & 1) {
4620
/* It's a response to a sent response. */
4621
chan = 0;
4622
cc = msg->rsp[4];
4623
goto process_response_response;
4624
}
4625
if (is_cmd)
4626
requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4627
else
4628
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4629
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4630
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4631
&& (msg->user_data != NULL)) {
4632
/*
4633
* It's a response to a response we sent. For this we
4634
* deliver a send message response to the user.
4635
*/
4636
struct ipmi_recv_msg *recv_msg;
4637
4638
if (intf->run_to_completion)
4639
goto out;
4640
4641
chan = msg->data[2] & 0x0f;
4642
if (chan >= IPMI_MAX_CHANNELS)
4643
/* Invalid channel number */
4644
goto out;
4645
cc = msg->rsp[2];
4646
4647
process_response_response:
4648
recv_msg = msg->user_data;
4649
4650
requeue = 0;
4651
if (!recv_msg)
4652
goto out;
4653
4654
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4655
recv_msg->msg.data = recv_msg->msg_data;
4656
recv_msg->msg_data[0] = cc;
4657
recv_msg->msg.data_len = 1;
4658
deliver_local_response(intf, recv_msg);
4659
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4660
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4661
struct ipmi_channel *chans;
4662
4663
if (intf->run_to_completion)
4664
goto out;
4665
4666
/* It's from the receive queue. */
4667
chan = msg->rsp[3] & 0xf;
4668
if (chan >= IPMI_MAX_CHANNELS) {
4669
/* Invalid channel number */
4670
requeue = 0;
4671
goto out;
4672
}
4673
4674
/*
4675
* We need to make sure the channels have been initialized.
4676
* The channel_handler routine will set the "curr_channel"
4677
* equal to or greater than IPMI_MAX_CHANNELS when all the
4678
* channels for this interface have been initialized.
4679
*/
4680
if (!intf->channels_ready) {
4681
requeue = 0; /* Throw the message away */
4682
goto out;
4683
}
4684
4685
chans = READ_ONCE(intf->channel_list)->c;
4686
4687
switch (chans[chan].medium) {
4688
case IPMI_CHANNEL_MEDIUM_IPMB:
4689
if (msg->rsp[4] & 0x04) {
4690
/*
4691
* It's a response, so find the
4692
* requesting message and send it up.
4693
*/
4694
requeue = handle_ipmb_get_msg_rsp(intf, msg);
4695
} else {
4696
/*
4697
* It's a command to the SMS from some other
4698
* entity. Handle that.
4699
*/
4700
requeue = handle_ipmb_get_msg_cmd(intf, msg);
4701
}
4702
break;
4703
4704
case IPMI_CHANNEL_MEDIUM_8023LAN:
4705
case IPMI_CHANNEL_MEDIUM_ASYNC:
4706
if (msg->rsp[6] & 0x04) {
4707
/*
4708
* It's a response, so find the
4709
* requesting message and send it up.
4710
*/
4711
requeue = handle_lan_get_msg_rsp(intf, msg);
4712
} else {
4713
/*
4714
* It's a command to the SMS from some other
4715
* entity. Handle that.
4716
*/
4717
requeue = handle_lan_get_msg_cmd(intf, msg);
4718
}
4719
break;
4720
4721
default:
4722
/* Check for OEM Channels. Clients had better
4723
register for these commands. */
4724
if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4725
&& (chans[chan].medium
4726
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4727
requeue = handle_oem_get_msg_cmd(intf, msg);
4728
} else {
4729
/*
4730
* We don't handle the channel type, so just
4731
* free the message.
4732
*/
4733
requeue = 0;
4734
}
4735
}
4736
4737
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4738
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4739
/* It's an asynchronous event. */
4740
if (intf->run_to_completion)
4741
goto out;
4742
4743
requeue = handle_read_event_rsp(intf, msg);
4744
} else {
4745
/* It's a response from the local BMC. */
4746
requeue = handle_bmc_rsp(intf, msg);
4747
}
4748
4749
out:
4750
return requeue;
4751
}
4752
4753
/*
4754
* If there are messages in the queue or pretimeouts, handle them.
4755
*/
4756
static void handle_new_recv_msgs(struct ipmi_smi *intf)
4757
{
4758
struct ipmi_smi_msg *smi_msg;
4759
unsigned long flags = 0;
4760
int rv;
4761
int run_to_completion = READ_ONCE(intf->run_to_completion);
4762
4763
/* See if any waiting messages need to be processed. */
4764
if (!run_to_completion)
4765
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4766
while (!list_empty(&intf->waiting_rcv_msgs)) {
4767
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4768
struct ipmi_smi_msg, link);
4769
list_del(&smi_msg->link);
4770
if (!run_to_completion)
4771
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4772
flags);
4773
rv = handle_one_recv_msg(intf, smi_msg);
4774
if (!run_to_completion)
4775
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4776
if (rv > 0) {
4777
/*
4778
* To preserve message order, quit if we
4779
* can't handle a message. Add the message
4780
* back at the head, this is safe because this
4781
* workqueue is the only thing that pulls the
4782
* messages.
4783
*/
4784
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4785
break;
4786
} else {
4787
if (rv == 0)
4788
/* Message handled */
4789
ipmi_free_smi_msg(smi_msg);
4790
/* If rv < 0, fatal error, del but don't free. */
4791
}
4792
}
4793
if (!run_to_completion)
4794
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4795
}
4796
4797
static void smi_work(struct work_struct *t)
4798
{
4799
unsigned long flags = 0; /* keep us warning-free. */
4800
struct ipmi_smi *intf = from_work(intf, t, smi_work);
4801
int run_to_completion = READ_ONCE(intf->run_to_completion);
4802
struct ipmi_smi_msg *newmsg = NULL;
4803
struct ipmi_recv_msg *msg, *msg2;
4804
4805
/*
4806
* Start the next message if available.
4807
*
4808
* Do this here, not in the actual receiver, because we may deadlock
4809
* because the lower layer is allowed to hold locks while calling
4810
* message delivery.
4811
*/
4812
4813
if (!run_to_completion)
4814
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4815
if (intf->curr_msg == NULL && !intf->in_shutdown) {
4816
struct list_head *entry = NULL;
4817
4818
/* Pick the high priority queue first. */
4819
if (!list_empty(&intf->hp_xmit_msgs))
4820
entry = intf->hp_xmit_msgs.next;
4821
else if (!list_empty(&intf->xmit_msgs))
4822
entry = intf->xmit_msgs.next;
4823
4824
if (entry) {
4825
list_del(entry);
4826
newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4827
intf->curr_msg = newmsg;
4828
}
4829
}
4830
if (!run_to_completion)
4831
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4832
4833
if (newmsg)
4834
intf->handlers->sender(intf->send_info, newmsg);
4835
4836
handle_new_recv_msgs(intf);
4837
4838
/* Nothing below applies during panic time. */
4839
if (run_to_completion)
4840
return;
4841
4842
/*
4843
* If the pretimout count is non-zero, decrement one from it and
4844
* deliver pretimeouts to all the users.
4845
*/
4846
if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4847
struct ipmi_user *user;
4848
4849
mutex_lock(&intf->users_mutex);
4850
list_for_each_entry(user, &intf->users, link) {
4851
if (user->handler->ipmi_watchdog_pretimeout)
4852
user->handler->ipmi_watchdog_pretimeout(
4853
user->handler_data);
4854
}
4855
mutex_unlock(&intf->users_mutex);
4856
}
4857
4858
/*
4859
* Freeing the message can cause a user to be released, which
4860
* can then cause the interface to be freed. Make sure that
4861
* doesn't happen until we are ready.
4862
*/
4863
kref_get(&intf->refcount);
4864
4865
mutex_lock(&intf->user_msgs_mutex);
4866
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
4867
struct ipmi_user *user = msg->user;
4868
4869
list_del(&msg->link);
4870
4871
if (refcount_read(&user->destroyed) == 0) {
4872
ipmi_free_recv_msg(msg);
4873
} else {
4874
atomic_dec(&user->nr_msgs);
4875
user->handler->ipmi_recv_hndl(msg, user->handler_data);
4876
}
4877
}
4878
mutex_unlock(&intf->user_msgs_mutex);
4879
4880
kref_put(&intf->refcount, intf_free);
4881
}
4882
4883
/* Handle a new message from the lower layer. */
4884
void ipmi_smi_msg_received(struct ipmi_smi *intf,
4885
struct ipmi_smi_msg *msg)
4886
{
4887
unsigned long flags = 0; /* keep us warning-free. */
4888
int run_to_completion = READ_ONCE(intf->run_to_completion);
4889
4890
/*
4891
* To preserve message order, we keep a queue and deliver from
4892
* a workqueue.
4893
*/
4894
if (!run_to_completion)
4895
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4896
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4897
if (!run_to_completion)
4898
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4899
flags);
4900
4901
if (!run_to_completion)
4902
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4903
/*
4904
* We can get an asynchronous event or receive message in addition
4905
* to commands we send.
4906
*/
4907
if (msg == intf->curr_msg)
4908
intf->curr_msg = NULL;
4909
if (!run_to_completion)
4910
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4911
4912
if (run_to_completion)
4913
smi_work(&intf->smi_work);
4914
else
4915
queue_work(system_wq, &intf->smi_work);
4916
}
4917
EXPORT_SYMBOL(ipmi_smi_msg_received);
4918
4919
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4920
{
4921
if (intf->in_shutdown)
4922
return;
4923
4924
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4925
queue_work(system_wq, &intf->smi_work);
4926
}
4927
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4928
4929
static struct ipmi_smi_msg *
4930
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4931
unsigned char seq, long seqid)
4932
{
4933
struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4934
if (!smi_msg)
4935
/*
4936
* If we can't allocate the message, then just return, we
4937
* get 4 retries, so this should be ok.
4938
*/
4939
return NULL;
4940
4941
memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4942
smi_msg->data_size = recv_msg->msg.data_len;
4943
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4944
4945
dev_dbg(intf->si_dev, "Resend: %*ph\n",
4946
smi_msg->data_size, smi_msg->data);
4947
4948
return smi_msg;
4949
}
4950
4951
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4952
struct list_head *timeouts,
4953
unsigned long timeout_period,
4954
int slot, unsigned long *flags,
4955
bool *need_timer)
4956
{
4957
struct ipmi_recv_msg *msg;
4958
4959
if (intf->in_shutdown)
4960
return;
4961
4962
if (!ent->inuse)
4963
return;
4964
4965
if (timeout_period < ent->timeout) {
4966
ent->timeout -= timeout_period;
4967
*need_timer = true;
4968
return;
4969
}
4970
4971
if (ent->retries_left == 0) {
4972
/* The message has used all its retries. */
4973
ent->inuse = 0;
4974
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4975
msg = ent->recv_msg;
4976
list_add_tail(&msg->link, timeouts);
4977
if (ent->broadcast)
4978
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4979
else if (is_lan_addr(&ent->recv_msg->addr))
4980
ipmi_inc_stat(intf, timed_out_lan_commands);
4981
else
4982
ipmi_inc_stat(intf, timed_out_ipmb_commands);
4983
} else {
4984
struct ipmi_smi_msg *smi_msg;
4985
/* More retries, send again. */
4986
4987
*need_timer = true;
4988
4989
/*
4990
* Start with the max timer, set to normal timer after
4991
* the message is sent.
4992
*/
4993
ent->timeout = MAX_MSG_TIMEOUT;
4994
ent->retries_left--;
4995
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4996
ent->seqid);
4997
if (!smi_msg) {
4998
if (is_lan_addr(&ent->recv_msg->addr))
4999
ipmi_inc_stat(intf,
5000
dropped_rexmit_lan_commands);
5001
else
5002
ipmi_inc_stat(intf,
5003
dropped_rexmit_ipmb_commands);
5004
return;
5005
}
5006
5007
spin_unlock_irqrestore(&intf->seq_lock, *flags);
5008
5009
/*
5010
* Send the new message. We send with a zero
5011
* priority. It timed out, I doubt time is that
5012
* critical now, and high priority messages are really
5013
* only for messages to the local MC, which don't get
5014
* resent.
5015
*/
5016
if (intf->handlers) {
5017
if (is_lan_addr(&ent->recv_msg->addr))
5018
ipmi_inc_stat(intf,
5019
retransmitted_lan_commands);
5020
else
5021
ipmi_inc_stat(intf,
5022
retransmitted_ipmb_commands);
5023
5024
smi_send(intf, intf->handlers, smi_msg, 0);
5025
} else
5026
ipmi_free_smi_msg(smi_msg);
5027
5028
spin_lock_irqsave(&intf->seq_lock, *flags);
5029
}
5030
}
5031
5032
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5033
unsigned long timeout_period)
5034
{
5035
struct list_head timeouts;
5036
struct ipmi_recv_msg *msg, *msg2;
5037
unsigned long flags;
5038
int i;
5039
bool need_timer = false;
5040
5041
if (!intf->bmc_registered) {
5042
kref_get(&intf->refcount);
5043
if (!schedule_work(&intf->bmc_reg_work)) {
5044
kref_put(&intf->refcount, intf_free);
5045
need_timer = true;
5046
}
5047
}
5048
5049
/*
5050
* Go through the seq table and find any messages that
5051
* have timed out, putting them in the timeouts
5052
* list.
5053
*/
5054
INIT_LIST_HEAD(&timeouts);
5055
spin_lock_irqsave(&intf->seq_lock, flags);
5056
if (intf->ipmb_maintenance_mode_timeout) {
5057
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5058
intf->ipmb_maintenance_mode_timeout = 0;
5059
else
5060
intf->ipmb_maintenance_mode_timeout -= timeout_period;
5061
}
5062
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
5063
check_msg_timeout(intf, &intf->seq_table[i],
5064
&timeouts, timeout_period, i,
5065
&flags, &need_timer);
5066
spin_unlock_irqrestore(&intf->seq_lock, flags);
5067
5068
list_for_each_entry_safe(msg, msg2, &timeouts, link)
5069
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5070
5071
/*
5072
* Maintenance mode handling. Check the timeout
5073
* optimistically before we claim the lock. It may
5074
* mean a timeout gets missed occasionally, but that
5075
* only means the timeout gets extended by one period
5076
* in that case. No big deal, and it avoids the lock
5077
* most of the time.
5078
*/
5079
if (intf->auto_maintenance_timeout > 0) {
5080
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5081
if (intf->auto_maintenance_timeout > 0) {
5082
intf->auto_maintenance_timeout
5083
-= timeout_period;
5084
if (!intf->maintenance_mode
5085
&& (intf->auto_maintenance_timeout <= 0)) {
5086
intf->maintenance_mode_enable = false;
5087
maintenance_mode_update(intf);
5088
}
5089
}
5090
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5091
flags);
5092
}
5093
5094
queue_work(system_wq, &intf->smi_work);
5095
5096
return need_timer;
5097
}
5098
5099
static void ipmi_request_event(struct ipmi_smi *intf)
5100
{
5101
/* No event requests when in maintenance mode. */
5102
if (intf->maintenance_mode_enable)
5103
return;
5104
5105
if (!intf->in_shutdown)
5106
intf->handlers->request_events(intf->send_info);
5107
}
5108
5109
static struct timer_list ipmi_timer;
5110
5111
static atomic_t stop_operation;
5112
5113
static void ipmi_timeout_work(struct work_struct *work)
5114
{
5115
if (atomic_read(&stop_operation))
5116
return;
5117
5118
struct ipmi_smi *intf;
5119
bool need_timer = false;
5120
5121
if (atomic_read(&stop_operation))
5122
return;
5123
5124
mutex_lock(&ipmi_interfaces_mutex);
5125
list_for_each_entry(intf, &ipmi_interfaces, link) {
5126
if (atomic_read(&intf->event_waiters)) {
5127
intf->ticks_to_req_ev--;
5128
if (intf->ticks_to_req_ev == 0) {
5129
ipmi_request_event(intf);
5130
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5131
}
5132
need_timer = true;
5133
}
5134
5135
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5136
}
5137
mutex_unlock(&ipmi_interfaces_mutex);
5138
5139
if (need_timer)
5140
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5141
}
5142
5143
static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
5144
5145
static void ipmi_timeout(struct timer_list *unused)
5146
{
5147
if (atomic_read(&stop_operation))
5148
return;
5149
5150
queue_work(system_wq, &ipmi_timer_work);
5151
}
5152
5153
static void need_waiter(struct ipmi_smi *intf)
5154
{
5155
/* Racy, but worst case we start the timer twice. */
5156
if (!timer_pending(&ipmi_timer))
5157
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5158
}
5159
5160
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5161
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5162
5163
static void free_smi_msg(struct ipmi_smi_msg *msg)
5164
{
5165
atomic_dec(&smi_msg_inuse_count);
5166
/* Try to keep as much stuff out of the panic path as possible. */
5167
if (!oops_in_progress)
5168
kfree(msg);
5169
}
5170
5171
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5172
{
5173
struct ipmi_smi_msg *rv;
5174
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5175
if (rv) {
5176
rv->done = free_smi_msg;
5177
rv->user_data = NULL;
5178
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5179
atomic_inc(&smi_msg_inuse_count);
5180
}
5181
return rv;
5182
}
5183
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5184
5185
static void free_recv_msg(struct ipmi_recv_msg *msg)
5186
{
5187
atomic_dec(&recv_msg_inuse_count);
5188
/* Try to keep as much stuff out of the panic path as possible. */
5189
if (!oops_in_progress)
5190
kfree(msg);
5191
}
5192
5193
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
5194
{
5195
struct ipmi_recv_msg *rv;
5196
5197
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5198
if (rv) {
5199
rv->user = NULL;
5200
rv->done = free_recv_msg;
5201
atomic_inc(&recv_msg_inuse_count);
5202
}
5203
return rv;
5204
}
5205
5206
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5207
{
5208
if (msg->user && !oops_in_progress)
5209
kref_put(&msg->user->refcount, free_ipmi_user);
5210
msg->done(msg);
5211
}
5212
EXPORT_SYMBOL(ipmi_free_recv_msg);
5213
5214
static atomic_t panic_done_count = ATOMIC_INIT(0);
5215
5216
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5217
{
5218
atomic_dec(&panic_done_count);
5219
}
5220
5221
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5222
{
5223
atomic_dec(&panic_done_count);
5224
}
5225
5226
/*
5227
* Inside a panic, send a message and wait for a response.
5228
*/
5229
static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5230
struct ipmi_addr *addr,
5231
struct kernel_ipmi_msg *msg)
5232
{
5233
struct ipmi_smi_msg smi_msg;
5234
struct ipmi_recv_msg recv_msg;
5235
int rv;
5236
5237
smi_msg.done = dummy_smi_done_handler;
5238
recv_msg.done = dummy_recv_done_handler;
5239
atomic_add(2, &panic_done_count);
5240
rv = i_ipmi_request(NULL,
5241
intf,
5242
addr,
5243
0,
5244
msg,
5245
intf,
5246
&smi_msg,
5247
&recv_msg,
5248
0,
5249
intf->addrinfo[0].address,
5250
intf->addrinfo[0].lun,
5251
0, 1); /* Don't retry, and don't wait. */
5252
if (rv)
5253
atomic_sub(2, &panic_done_count);
5254
else if (intf->handlers->flush_messages)
5255
intf->handlers->flush_messages(intf->send_info);
5256
5257
while (atomic_read(&panic_done_count) != 0)
5258
ipmi_poll(intf);
5259
}
5260
5261
void ipmi_panic_request_and_wait(struct ipmi_user *user,
5262
struct ipmi_addr *addr,
5263
struct kernel_ipmi_msg *msg)
5264
{
5265
user->intf->run_to_completion = 1;
5266
_ipmi_panic_request_and_wait(user->intf, addr, msg);
5267
}
5268
EXPORT_SYMBOL(ipmi_panic_request_and_wait);
5269
5270
static void event_receiver_fetcher(struct ipmi_smi *intf,
5271
struct ipmi_recv_msg *msg)
5272
{
5273
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5274
&& (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5275
&& (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5276
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5277
/* A get event receiver command, save it. */
5278
intf->event_receiver = msg->msg.data[1];
5279
intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5280
}
5281
}
5282
5283
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5284
{
5285
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5286
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5287
&& (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5288
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5289
/*
5290
* A get device id command, save if we are an event
5291
* receiver or generator.
5292
*/
5293
intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5294
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5295
}
5296
}
5297
5298
static void send_panic_events(struct ipmi_smi *intf, char *str)
5299
{
5300
struct kernel_ipmi_msg msg;
5301
unsigned char data[16];
5302
struct ipmi_system_interface_addr *si;
5303
struct ipmi_addr addr;
5304
char *p = str;
5305
struct ipmi_ipmb_addr *ipmb;
5306
int j;
5307
5308
if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5309
return;
5310
5311
si = (struct ipmi_system_interface_addr *) &addr;
5312
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5313
si->channel = IPMI_BMC_CHANNEL;
5314
si->lun = 0;
5315
5316
/* Fill in an event telling that we have failed. */
5317
msg.netfn = 0x04; /* Sensor or Event. */
5318
msg.cmd = 2; /* Platform event command. */
5319
msg.data = data;
5320
msg.data_len = 8;
5321
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5322
data[1] = 0x03; /* This is for IPMI 1.0. */
5323
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5324
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5325
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5326
5327
/*
5328
* Put a few breadcrumbs in. Hopefully later we can add more things
5329
* to make the panic events more useful.
5330
*/
5331
if (str) {
5332
data[3] = str[0];
5333
data[6] = str[1];
5334
data[7] = str[2];
5335
}
5336
5337
/* Send the event announcing the panic. */
5338
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5339
5340
/*
5341
* On every interface, dump a bunch of OEM event holding the
5342
* string.
5343
*/
5344
if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5345
return;
5346
5347
/*
5348
* intf_num is used as an marker to tell if the
5349
* interface is valid. Thus we need a read barrier to
5350
* make sure data fetched before checking intf_num
5351
* won't be used.
5352
*/
5353
smp_rmb();
5354
5355
/*
5356
* First job here is to figure out where to send the
5357
* OEM events. There's no way in IPMI to send OEM
5358
* events using an event send command, so we have to
5359
* find the SEL to put them in and stick them in
5360
* there.
5361
*/
5362
5363
/* Get capabilities from the get device id. */
5364
intf->local_sel_device = 0;
5365
intf->local_event_generator = 0;
5366
intf->event_receiver = 0;
5367
5368
/* Request the device info from the local MC. */
5369
msg.netfn = IPMI_NETFN_APP_REQUEST;
5370
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5371
msg.data = NULL;
5372
msg.data_len = 0;
5373
intf->null_user_handler = device_id_fetcher;
5374
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5375
5376
if (intf->local_event_generator) {
5377
/* Request the event receiver from the local MC. */
5378
msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5379
msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5380
msg.data = NULL;
5381
msg.data_len = 0;
5382
intf->null_user_handler = event_receiver_fetcher;
5383
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5384
}
5385
intf->null_user_handler = NULL;
5386
5387
/*
5388
* Validate the event receiver. The low bit must not
5389
* be 1 (it must be a valid IPMB address), it cannot
5390
* be zero, and it must not be my address.
5391
*/
5392
if (((intf->event_receiver & 1) == 0)
5393
&& (intf->event_receiver != 0)
5394
&& (intf->event_receiver != intf->addrinfo[0].address)) {
5395
/*
5396
* The event receiver is valid, send an IPMB
5397
* message.
5398
*/
5399
ipmb = (struct ipmi_ipmb_addr *) &addr;
5400
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5401
ipmb->channel = 0; /* FIXME - is this right? */
5402
ipmb->lun = intf->event_receiver_lun;
5403
ipmb->slave_addr = intf->event_receiver;
5404
} else if (intf->local_sel_device) {
5405
/*
5406
* The event receiver was not valid (or was
5407
* me), but I am an SEL device, just dump it
5408
* in my SEL.
5409
*/
5410
si = (struct ipmi_system_interface_addr *) &addr;
5411
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5412
si->channel = IPMI_BMC_CHANNEL;
5413
si->lun = 0;
5414
} else
5415
return; /* No where to send the event. */
5416
5417
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5418
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5419
msg.data = data;
5420
msg.data_len = 16;
5421
5422
j = 0;
5423
while (*p) {
5424
int size = strnlen(p, 11);
5425
5426
data[0] = 0;
5427
data[1] = 0;
5428
data[2] = 0xf0; /* OEM event without timestamp. */
5429
data[3] = intf->addrinfo[0].address;
5430
data[4] = j++; /* sequence # */
5431
5432
memcpy_and_pad(data+5, 11, p, size, '\0');
5433
p += size;
5434
5435
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5436
}
5437
}
5438
5439
static int has_panicked;
5440
5441
static int panic_event(struct notifier_block *this,
5442
unsigned long event,
5443
void *ptr)
5444
{
5445
struct ipmi_smi *intf;
5446
struct ipmi_user *user;
5447
5448
if (has_panicked)
5449
return NOTIFY_DONE;
5450
has_panicked = 1;
5451
5452
/* For every registered interface, set it to run to completion. */
5453
list_for_each_entry(intf, &ipmi_interfaces, link) {
5454
if (!intf->handlers || intf->intf_num == -1)
5455
/* Interface is not ready. */
5456
continue;
5457
5458
if (!intf->handlers->poll)
5459
continue;
5460
5461
/*
5462
* If we were interrupted while locking xmit_msgs_lock or
5463
* waiting_rcv_msgs_lock, the corresponding list may be
5464
* corrupted. In this case, drop items on the list for
5465
* the safety.
5466
*/
5467
if (!spin_trylock(&intf->xmit_msgs_lock)) {
5468
INIT_LIST_HEAD(&intf->xmit_msgs);
5469
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5470
} else
5471
spin_unlock(&intf->xmit_msgs_lock);
5472
5473
if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5474
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5475
else
5476
spin_unlock(&intf->waiting_rcv_msgs_lock);
5477
5478
intf->run_to_completion = 1;
5479
if (intf->handlers->set_run_to_completion)
5480
intf->handlers->set_run_to_completion(intf->send_info,
5481
1);
5482
5483
list_for_each_entry(user, &intf->users, link) {
5484
if (user->handler->ipmi_panic_handler)
5485
user->handler->ipmi_panic_handler(
5486
user->handler_data);
5487
}
5488
5489
send_panic_events(intf, ptr);
5490
}
5491
5492
return NOTIFY_DONE;
5493
}
5494
5495
/* Must be called with ipmi_interfaces_mutex held. */
5496
static int ipmi_register_driver(void)
5497
{
5498
int rv;
5499
5500
if (drvregistered)
5501
return 0;
5502
5503
rv = driver_register(&ipmidriver.driver);
5504
if (rv)
5505
pr_err("Could not register IPMI driver\n");
5506
else
5507
drvregistered = true;
5508
return rv;
5509
}
5510
5511
static struct notifier_block panic_block = {
5512
.notifier_call = panic_event,
5513
.next = NULL,
5514
.priority = 200 /* priority: INT_MAX >= x >= 0 */
5515
};
5516
5517
static int ipmi_init_msghandler(void)
5518
{
5519
int rv;
5520
5521
mutex_lock(&ipmi_interfaces_mutex);
5522
rv = ipmi_register_driver();
5523
if (rv)
5524
goto out;
5525
if (initialized)
5526
goto out;
5527
5528
bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5529
if (!bmc_remove_work_wq) {
5530
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5531
rv = -ENOMEM;
5532
goto out;
5533
}
5534
5535
timer_setup(&ipmi_timer, ipmi_timeout, 0);
5536
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5537
5538
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5539
5540
initialized = true;
5541
5542
out:
5543
mutex_unlock(&ipmi_interfaces_mutex);
5544
return rv;
5545
}
5546
5547
static int __init ipmi_init_msghandler_mod(void)
5548
{
5549
int rv;
5550
5551
pr_info("version " IPMI_DRIVER_VERSION "\n");
5552
5553
mutex_lock(&ipmi_interfaces_mutex);
5554
rv = ipmi_register_driver();
5555
mutex_unlock(&ipmi_interfaces_mutex);
5556
5557
return rv;
5558
}
5559
5560
static void __exit cleanup_ipmi(void)
5561
{
5562
int count;
5563
5564
if (initialized) {
5565
destroy_workqueue(bmc_remove_work_wq);
5566
5567
atomic_notifier_chain_unregister(&panic_notifier_list,
5568
&panic_block);
5569
5570
/*
5571
* This can't be called if any interfaces exist, so no worry
5572
* about shutting down the interfaces.
5573
*/
5574
5575
/*
5576
* Tell the timer to stop, then wait for it to stop. This
5577
* avoids problems with race conditions removing the timer
5578
* here.
5579
*/
5580
atomic_set(&stop_operation, 1);
5581
timer_delete_sync(&ipmi_timer);
5582
cancel_work_sync(&ipmi_timer_work);
5583
5584
initialized = false;
5585
5586
/* Check for buffer leaks. */
5587
count = atomic_read(&smi_msg_inuse_count);
5588
if (count != 0)
5589
pr_warn("SMI message count %d at exit\n", count);
5590
count = atomic_read(&recv_msg_inuse_count);
5591
if (count != 0)
5592
pr_warn("recv message count %d at exit\n", count);
5593
}
5594
if (drvregistered)
5595
driver_unregister(&ipmidriver.driver);
5596
}
5597
module_exit(cleanup_ipmi);
5598
5599
module_init(ipmi_init_msghandler_mod);
5600
MODULE_LICENSE("GPL");
5601
MODULE_AUTHOR("Corey Minyard <[email protected]>");
5602
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5603
MODULE_VERSION(IPMI_DRIVER_VERSION);
5604
MODULE_SOFTDEP("post: ipmi_devintf");
5605
5606