Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/ipmi_si_intf.c
50515 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* ipmi_si.c
4
*
5
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
6
* BT).
7
*
8
* Author: MontaVista Software, Inc.
9
* Corey Minyard <[email protected]>
10
* [email protected]
11
*
12
* Copyright 2002 MontaVista Software Inc.
13
* Copyright 2006 IBM Corp., Christian Krafft <[email protected]>
14
*/
15
16
/*
17
* This file holds the "policy" for the interface to the SMI state
18
* machine. It does the configuration, handles timers and interrupts,
19
* and drives the real SMI state machine.
20
*/
21
22
#define pr_fmt(fmt) "ipmi_si: " fmt
23
24
#include <linux/module.h>
25
#include <linux/moduleparam.h>
26
#include <linux/sched.h>
27
#include <linux/seq_file.h>
28
#include <linux/timer.h>
29
#include <linux/errno.h>
30
#include <linux/spinlock.h>
31
#include <linux/slab.h>
32
#include <linux/delay.h>
33
#include <linux/list.h>
34
#include <linux/notifier.h>
35
#include <linux/mutex.h>
36
#include <linux/kthread.h>
37
#include <asm/irq.h>
38
#include <linux/interrupt.h>
39
#include <linux/rcupdate.h>
40
#include <linux/ipmi.h>
41
#include <linux/ipmi_smi.h>
42
#include "ipmi_si.h"
43
#include "ipmi_si_sm.h"
44
#include <linux/string.h>
45
#include <linux/ctype.h>
46
47
/* Measure times between events in the driver. */
48
#undef DEBUG_TIMING
49
50
/* Call every 10 ms. */
51
#define SI_TIMEOUT_TIME_USEC 10000
52
#define SI_USEC_PER_JIFFY (1000000/HZ)
53
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
54
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
55
short timeout */
56
#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
57
58
enum si_intf_state {
59
SI_NORMAL,
60
SI_GETTING_FLAGS,
61
SI_GETTING_EVENTS,
62
SI_CLEARING_FLAGS,
63
SI_GETTING_MESSAGES,
64
SI_CHECKING_ENABLES,
65
SI_SETTING_ENABLES,
66
SI_HOSED
67
/* FIXME - add watchdog stuff. */
68
};
69
70
/* Some BT-specific defines we need here. */
71
#define IPMI_BT_INTMASK_REG 2
72
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
73
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
74
75
/* 'invalid' to allow a firmware-specified interface to be disabled */
76
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
77
78
const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
79
const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
80
const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
81
82
static bool initialized;
83
84
/*
85
* Indexes into stats[] in smi_info below.
86
*/
87
enum si_stat_indexes {
88
/*
89
* Number of times the driver requested a timer while an operation
90
* was in progress.
91
*/
92
SI_STAT_short_timeouts = 0,
93
94
/*
95
* Number of times the driver requested a timer while nothing was in
96
* progress.
97
*/
98
SI_STAT_long_timeouts,
99
100
/* Number of times the interface was idle while being polled. */
101
SI_STAT_idles,
102
103
/* Number of interrupts the driver handled. */
104
SI_STAT_interrupts,
105
106
/* Number of time the driver got an ATTN from the hardware. */
107
SI_STAT_attentions,
108
109
/* Number of times the driver requested flags from the hardware. */
110
SI_STAT_flag_fetches,
111
112
/* Number of times the hardware didn't follow the state machine. */
113
SI_STAT_hosed_count,
114
115
/* Number of completed messages. */
116
SI_STAT_complete_transactions,
117
118
/* Number of IPMI events received from the hardware. */
119
SI_STAT_events,
120
121
/* Number of watchdog pretimeouts. */
122
SI_STAT_watchdog_pretimeouts,
123
124
/* Number of asynchronous messages received. */
125
SI_STAT_incoming_messages,
126
127
128
/* This *must* remain last, add new values above this. */
129
SI_NUM_STATS
130
};
131
132
struct smi_info {
133
int si_num;
134
struct ipmi_smi *intf;
135
struct si_sm_data *si_sm;
136
const struct si_sm_handlers *handlers;
137
spinlock_t si_lock;
138
struct ipmi_smi_msg *waiting_msg;
139
struct ipmi_smi_msg *curr_msg;
140
enum si_intf_state si_state;
141
142
/*
143
* Used to handle the various types of I/O that can occur with
144
* IPMI
145
*/
146
struct si_sm_io io;
147
148
/*
149
* Per-OEM handler, called from handle_flags(). Returns 1
150
* when handle_flags() needs to be re-run or 0 indicating it
151
* set si_state itself.
152
*/
153
int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155
/*
156
* Flags from the last GET_MSG_FLAGS command, used when an ATTN
157
* is set to hold the flags until we are done handling everything
158
* from the flags.
159
*/
160
#define RECEIVE_MSG_AVAIL 0x01
161
#define EVENT_MSG_BUFFER_FULL 0x02
162
#define WDT_PRE_TIMEOUT_INT 0x08
163
#define OEM0_DATA_AVAIL 0x20
164
#define OEM1_DATA_AVAIL 0x40
165
#define OEM2_DATA_AVAIL 0x80
166
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
167
OEM1_DATA_AVAIL | \
168
OEM2_DATA_AVAIL)
169
unsigned char msg_flags;
170
171
/* Does the BMC have an event buffer? */
172
bool has_event_buffer;
173
174
/*
175
* If set to true, this will request events the next time the
176
* state machine is idle.
177
*/
178
atomic_t req_events;
179
180
/*
181
* If true, run the state machine to completion on every send
182
* call. Generally used after a panic to make sure stuff goes
183
* out.
184
*/
185
bool run_to_completion;
186
187
/* The timer for this si. */
188
struct timer_list si_timer;
189
190
/* This flag is set, if the timer can be set */
191
bool timer_can_start;
192
193
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
194
bool timer_running;
195
196
/* The time (in jiffies) the last timeout occurred at. */
197
unsigned long last_timeout_jiffies;
198
199
/* Are we waiting for the events, pretimeouts, received msgs? */
200
atomic_t need_watch;
201
202
/*
203
* The driver will disable interrupts when it gets into a
204
* situation where it cannot handle messages due to lack of
205
* memory. Once that situation clears up, it will re-enable
206
* interrupts.
207
*/
208
bool interrupt_disabled;
209
210
/*
211
* Does the BMC support events?
212
*/
213
bool supports_event_msg_buff;
214
215
/*
216
* Can we disable interrupts the global enables receive irq
217
* bit? There are currently two forms of brokenness, some
218
* systems cannot disable the bit (which is technically within
219
* the spec but a bad idea) and some systems have the bit
220
* forced to zero even though interrupts work (which is
221
* clearly outside the spec). The next bool tells which form
222
* of brokenness is present.
223
*/
224
bool cannot_disable_irq;
225
226
/*
227
* Some systems are broken and cannot set the irq enable
228
* bit, even if they support interrupts.
229
*/
230
bool irq_enable_broken;
231
232
/* Is the driver in maintenance mode? */
233
bool in_maintenance_mode;
234
235
/*
236
* Did we get an attention that we did not handle?
237
*/
238
bool got_attn;
239
240
/* From the get device id response... */
241
struct ipmi_device_id device_id;
242
243
/* Have we added the device group to the device? */
244
bool dev_group_added;
245
246
/* Counters and things for the proc filesystem. */
247
atomic_t stats[SI_NUM_STATS];
248
249
struct task_struct *thread;
250
251
struct list_head link;
252
};
253
254
#define smi_inc_stat(smi, stat) \
255
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
256
#define smi_get_stat(smi, stat) \
257
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
258
259
#define IPMI_MAX_INTFS 4
260
static int force_kipmid[IPMI_MAX_INTFS];
261
static int num_force_kipmid;
262
263
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
264
static int num_max_busy_us;
265
266
static bool unload_when_empty = true;
267
268
static int try_smi_init(struct smi_info *smi);
269
static void cleanup_one_si(struct smi_info *smi_info);
270
static void cleanup_ipmi_si(void);
271
272
#ifdef DEBUG_TIMING
273
void debug_timestamp(struct smi_info *smi_info, char *msg)
274
{
275
struct timespec64 t;
276
277
ktime_get_ts64(&t);
278
dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t);
279
}
280
#else
281
#define debug_timestamp(smi_info, x)
282
#endif
283
284
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
285
static int register_xaction_notifier(struct notifier_block *nb)
286
{
287
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
288
}
289
290
static void deliver_recv_msg(struct smi_info *smi_info,
291
struct ipmi_smi_msg *msg)
292
{
293
/* Deliver the message to the upper layer. */
294
ipmi_smi_msg_received(smi_info->intf, msg);
295
}
296
297
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
298
{
299
struct ipmi_smi_msg *msg = smi_info->curr_msg;
300
301
if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
302
cCode = IPMI_ERR_UNSPECIFIED;
303
/* else use it as is */
304
305
/* Make it a response */
306
msg->rsp[0] = msg->data[0] | 4;
307
msg->rsp[1] = msg->data[1];
308
msg->rsp[2] = cCode;
309
msg->rsp_size = 3;
310
311
smi_info->curr_msg = NULL;
312
deliver_recv_msg(smi_info, msg);
313
}
314
315
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
316
{
317
int rv;
318
319
if (!smi_info->waiting_msg) {
320
smi_info->curr_msg = NULL;
321
rv = SI_SM_IDLE;
322
} else {
323
int err;
324
325
smi_info->curr_msg = smi_info->waiting_msg;
326
smi_info->waiting_msg = NULL;
327
debug_timestamp(smi_info, "Start2");
328
err = atomic_notifier_call_chain(&xaction_notifier_list,
329
0, smi_info);
330
if (err & NOTIFY_STOP_MASK) {
331
rv = SI_SM_CALL_WITHOUT_DELAY;
332
goto out;
333
}
334
err = smi_info->handlers->start_transaction(
335
smi_info->si_sm,
336
smi_info->curr_msg->data,
337
smi_info->curr_msg->data_size);
338
if (err)
339
return_hosed_msg(smi_info, err);
340
341
rv = SI_SM_CALL_WITHOUT_DELAY;
342
}
343
out:
344
return rv;
345
}
346
347
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
348
{
349
if (!smi_info->timer_can_start)
350
return;
351
smi_info->last_timeout_jiffies = jiffies;
352
mod_timer(&smi_info->si_timer, new_val);
353
smi_info->timer_running = true;
354
}
355
356
/*
357
* Start a new message and (re)start the timer and thread.
358
*/
359
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
360
unsigned int size)
361
{
362
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
363
364
if (smi_info->thread)
365
wake_up_process(smi_info->thread);
366
367
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
368
}
369
370
static void start_check_enables(struct smi_info *smi_info)
371
{
372
unsigned char msg[2];
373
374
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
375
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
376
377
start_new_msg(smi_info, msg, 2);
378
smi_info->si_state = SI_CHECKING_ENABLES;
379
}
380
381
static void start_clear_flags(struct smi_info *smi_info)
382
{
383
unsigned char msg[3];
384
385
/* Make sure the watchdog pre-timeout flag is not set at startup. */
386
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
387
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
388
msg[2] = WDT_PRE_TIMEOUT_INT;
389
390
start_new_msg(smi_info, msg, 3);
391
smi_info->si_state = SI_CLEARING_FLAGS;
392
}
393
394
static void start_get_flags(struct smi_info *smi_info)
395
{
396
unsigned char msg[2];
397
398
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
399
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
400
401
start_new_msg(smi_info, msg, 2);
402
smi_info->si_state = SI_GETTING_FLAGS;
403
}
404
405
static void start_getting_msg_queue(struct smi_info *smi_info)
406
{
407
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
408
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
409
smi_info->curr_msg->data_size = 2;
410
411
start_new_msg(smi_info, smi_info->curr_msg->data,
412
smi_info->curr_msg->data_size);
413
smi_info->si_state = SI_GETTING_MESSAGES;
414
}
415
416
static void start_getting_events(struct smi_info *smi_info)
417
{
418
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
420
smi_info->curr_msg->data_size = 2;
421
422
start_new_msg(smi_info, smi_info->curr_msg->data,
423
smi_info->curr_msg->data_size);
424
smi_info->si_state = SI_GETTING_EVENTS;
425
}
426
427
/*
428
* When we have a situtaion where we run out of memory and cannot
429
* allocate messages, we just leave them in the BMC and run the system
430
* polled until we can allocate some memory. Once we have some
431
* memory, we will re-enable the interrupt.
432
*
433
* Note that we cannot just use disable_irq(), since the interrupt may
434
* be shared.
435
*/
436
static inline bool disable_si_irq(struct smi_info *smi_info)
437
{
438
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
439
smi_info->interrupt_disabled = true;
440
start_check_enables(smi_info);
441
return true;
442
}
443
return false;
444
}
445
446
static inline bool enable_si_irq(struct smi_info *smi_info)
447
{
448
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
449
smi_info->interrupt_disabled = false;
450
start_check_enables(smi_info);
451
return true;
452
}
453
return false;
454
}
455
456
/*
457
* Allocate a message. If unable to allocate, start the interrupt
458
* disable process and return NULL. If able to allocate but
459
* interrupts are disabled, free the message and return NULL after
460
* starting the interrupt enable process.
461
*/
462
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
463
{
464
struct ipmi_smi_msg *msg;
465
466
msg = ipmi_alloc_smi_msg();
467
if (!msg) {
468
if (!disable_si_irq(smi_info))
469
smi_info->si_state = SI_NORMAL;
470
} else if (enable_si_irq(smi_info)) {
471
ipmi_free_smi_msg(msg);
472
msg = NULL;
473
}
474
return msg;
475
}
476
477
static void handle_flags(struct smi_info *smi_info)
478
{
479
retry:
480
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
481
/* Watchdog pre-timeout */
482
smi_inc_stat(smi_info, watchdog_pretimeouts);
483
484
start_clear_flags(smi_info);
485
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
486
ipmi_smi_watchdog_pretimeout(smi_info->intf);
487
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
488
/* Messages available. */
489
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
490
if (!smi_info->curr_msg)
491
return;
492
493
start_getting_msg_queue(smi_info);
494
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
495
/* Events available. */
496
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
497
if (!smi_info->curr_msg)
498
return;
499
500
start_getting_events(smi_info);
501
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
502
smi_info->oem_data_avail_handler) {
503
if (smi_info->oem_data_avail_handler(smi_info))
504
goto retry;
505
} else
506
smi_info->si_state = SI_NORMAL;
507
}
508
509
/*
510
* Global enables we care about.
511
*/
512
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
513
IPMI_BMC_EVT_MSG_INTR)
514
515
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
516
bool *irq_on)
517
{
518
u8 enables = 0;
519
520
if (smi_info->supports_event_msg_buff)
521
enables |= IPMI_BMC_EVT_MSG_BUFF;
522
523
if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
524
smi_info->cannot_disable_irq) &&
525
!smi_info->irq_enable_broken)
526
enables |= IPMI_BMC_RCV_MSG_INTR;
527
528
if (smi_info->supports_event_msg_buff &&
529
smi_info->io.irq && !smi_info->interrupt_disabled &&
530
!smi_info->irq_enable_broken)
531
enables |= IPMI_BMC_EVT_MSG_INTR;
532
533
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
534
535
return enables;
536
}
537
538
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
539
{
540
u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
541
542
irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
543
544
if ((bool)irqstate == irq_on)
545
return;
546
547
if (irq_on)
548
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
549
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
550
else
551
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
552
}
553
554
static void handle_transaction_done(struct smi_info *smi_info)
555
{
556
struct ipmi_smi_msg *msg;
557
558
debug_timestamp(smi_info, "Done");
559
switch (smi_info->si_state) {
560
case SI_NORMAL:
561
if (!smi_info->curr_msg)
562
break;
563
564
smi_info->curr_msg->rsp_size
565
= smi_info->handlers->get_result(
566
smi_info->si_sm,
567
smi_info->curr_msg->rsp,
568
IPMI_MAX_MSG_LENGTH);
569
570
/*
571
* Do this here becase deliver_recv_msg() releases the
572
* lock, and a new message can be put in during the
573
* time the lock is released.
574
*/
575
msg = smi_info->curr_msg;
576
smi_info->curr_msg = NULL;
577
deliver_recv_msg(smi_info, msg);
578
break;
579
580
case SI_GETTING_FLAGS:
581
{
582
unsigned char msg[4];
583
unsigned int len;
584
585
/* We got the flags from the SMI, now handle them. */
586
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
587
if (msg[2] != 0) {
588
/* Error fetching flags, just give up for now. */
589
smi_info->si_state = SI_NORMAL;
590
} else if (len < 4) {
591
/*
592
* Hmm, no flags. That's technically illegal, but
593
* don't use uninitialized data.
594
*/
595
smi_info->si_state = SI_NORMAL;
596
} else {
597
smi_info->msg_flags = msg[3];
598
handle_flags(smi_info);
599
}
600
break;
601
}
602
603
case SI_CLEARING_FLAGS:
604
{
605
unsigned char msg[3];
606
607
/* We cleared the flags. */
608
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
609
if (msg[2] != 0) {
610
/* Error clearing flags */
611
dev_warn_ratelimited(smi_info->io.dev,
612
"Error clearing flags: %2.2x\n", msg[2]);
613
}
614
smi_info->si_state = SI_NORMAL;
615
break;
616
}
617
618
case SI_GETTING_EVENTS:
619
{
620
smi_info->curr_msg->rsp_size
621
= smi_info->handlers->get_result(
622
smi_info->si_sm,
623
smi_info->curr_msg->rsp,
624
IPMI_MAX_MSG_LENGTH);
625
626
/*
627
* Do this here becase deliver_recv_msg() releases the
628
* lock, and a new message can be put in during the
629
* time the lock is released.
630
*/
631
msg = smi_info->curr_msg;
632
smi_info->curr_msg = NULL;
633
if (msg->rsp[2] != 0) {
634
/* Error getting event, probably done. */
635
msg->done(msg);
636
637
/* Take off the event flag. */
638
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
639
handle_flags(smi_info);
640
} else {
641
smi_inc_stat(smi_info, events);
642
643
/*
644
* Do this before we deliver the message
645
* because delivering the message releases the
646
* lock and something else can mess with the
647
* state.
648
*/
649
handle_flags(smi_info);
650
651
deliver_recv_msg(smi_info, msg);
652
}
653
break;
654
}
655
656
case SI_GETTING_MESSAGES:
657
{
658
smi_info->curr_msg->rsp_size
659
= smi_info->handlers->get_result(
660
smi_info->si_sm,
661
smi_info->curr_msg->rsp,
662
IPMI_MAX_MSG_LENGTH);
663
664
/*
665
* Do this here becase deliver_recv_msg() releases the
666
* lock, and a new message can be put in during the
667
* time the lock is released.
668
*/
669
msg = smi_info->curr_msg;
670
smi_info->curr_msg = NULL;
671
if (msg->rsp[2] != 0) {
672
/* Error getting event, probably done. */
673
msg->done(msg);
674
675
/* Take off the msg flag. */
676
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
677
handle_flags(smi_info);
678
} else {
679
smi_inc_stat(smi_info, incoming_messages);
680
681
/*
682
* Do this before we deliver the message
683
* because delivering the message releases the
684
* lock and something else can mess with the
685
* state.
686
*/
687
handle_flags(smi_info);
688
689
deliver_recv_msg(smi_info, msg);
690
}
691
break;
692
}
693
694
case SI_CHECKING_ENABLES:
695
{
696
unsigned char msg[4];
697
u8 enables;
698
bool irq_on;
699
700
/* We got the flags from the SMI, now handle them. */
701
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
702
if (msg[2] != 0) {
703
dev_warn_ratelimited(smi_info->io.dev,
704
"Couldn't get irq info: %x,\n"
705
"Maybe ok, but ipmi might run very slowly.\n",
706
msg[2]);
707
smi_info->si_state = SI_NORMAL;
708
break;
709
}
710
enables = current_global_enables(smi_info, 0, &irq_on);
711
if (smi_info->io.si_info->type == SI_BT)
712
/* BT has its own interrupt enable bit. */
713
check_bt_irq(smi_info, irq_on);
714
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
715
/* Enables are not correct, fix them. */
716
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
717
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
718
msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
719
smi_info->handlers->start_transaction(
720
smi_info->si_sm, msg, 3);
721
smi_info->si_state = SI_SETTING_ENABLES;
722
} else if (smi_info->supports_event_msg_buff) {
723
smi_info->curr_msg = ipmi_alloc_smi_msg();
724
if (!smi_info->curr_msg) {
725
smi_info->si_state = SI_NORMAL;
726
break;
727
}
728
start_getting_events(smi_info);
729
} else {
730
smi_info->si_state = SI_NORMAL;
731
}
732
break;
733
}
734
735
case SI_SETTING_ENABLES:
736
{
737
unsigned char msg[4];
738
739
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
740
if (msg[2] != 0)
741
dev_warn_ratelimited(smi_info->io.dev,
742
"Could not set the global enables: 0x%x.\n",
743
msg[2]);
744
745
if (smi_info->supports_event_msg_buff) {
746
smi_info->curr_msg = ipmi_alloc_smi_msg();
747
if (!smi_info->curr_msg) {
748
smi_info->si_state = SI_NORMAL;
749
break;
750
}
751
start_getting_events(smi_info);
752
} else {
753
smi_info->si_state = SI_NORMAL;
754
}
755
break;
756
}
757
case SI_HOSED: /* Shouldn't happen. */
758
break;
759
}
760
}
761
762
/*
763
* Called on timeouts and events. Timeouts should pass the elapsed
764
* time, interrupts should pass in zero. Must be called with
765
* si_lock held and interrupts disabled.
766
*/
767
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
768
int time)
769
{
770
enum si_sm_result si_sm_result;
771
772
restart:
773
if (smi_info->si_state == SI_HOSED)
774
/* Just in case, hosed state is only left from the timeout. */
775
return SI_SM_HOSED;
776
777
/*
778
* There used to be a loop here that waited a little while
779
* (around 25us) before giving up. That turned out to be
780
* pointless, the minimum delays I was seeing were in the 300us
781
* range, which is far too long to wait in an interrupt. So
782
* we just run until the state machine tells us something
783
* happened or it needs a delay.
784
*/
785
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
786
time = 0;
787
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
788
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
789
790
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
791
smi_inc_stat(smi_info, complete_transactions);
792
793
handle_transaction_done(smi_info);
794
goto restart;
795
} else if (si_sm_result == SI_SM_HOSED) {
796
smi_inc_stat(smi_info, hosed_count);
797
798
/*
799
* Do the before return_hosed_msg, because that
800
* releases the lock. We just disable operations for
801
* a while and retry in hosed state.
802
*/
803
smi_info->si_state = SI_HOSED;
804
if (smi_info->curr_msg != NULL) {
805
/*
806
* If we were handling a user message, format
807
* a response to send to the upper layer to
808
* tell it about the error.
809
*/
810
return_hosed_msg(smi_info, IPMI_BUS_ERR);
811
}
812
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
813
goto out;
814
}
815
816
/*
817
* We prefer handling attn over new messages. But don't do
818
* this if there is not yet an upper layer to handle anything.
819
*/
820
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
821
if (smi_info->si_state != SI_NORMAL) {
822
/*
823
* We got an ATTN, but we are doing something else.
824
* Handle the ATTN later.
825
*/
826
smi_info->got_attn = true;
827
} else {
828
smi_info->got_attn = false;
829
smi_inc_stat(smi_info, attentions);
830
831
/*
832
* Got a attn, send down a get message flags to see
833
* what's causing it. It would be better to handle
834
* this in the upper layer, but due to the way
835
* interrupts work with the SMI, that's not really
836
* possible.
837
*/
838
start_get_flags(smi_info);
839
goto restart;
840
}
841
}
842
843
/* If we are currently idle, try to start the next message. */
844
if (si_sm_result == SI_SM_IDLE) {
845
smi_inc_stat(smi_info, idles);
846
847
si_sm_result = start_next_msg(smi_info);
848
if (si_sm_result != SI_SM_IDLE)
849
goto restart;
850
}
851
852
if ((si_sm_result == SI_SM_IDLE)
853
&& (atomic_read(&smi_info->req_events))) {
854
/*
855
* We are idle and the upper layer requested that I fetch
856
* events, so do so.
857
*/
858
atomic_set(&smi_info->req_events, 0);
859
860
/*
861
* Take this opportunity to check the interrupt and
862
* message enable state for the BMC. The BMC can be
863
* asynchronously reset, and may thus get interrupts
864
* disable and messages disabled.
865
*/
866
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
867
start_check_enables(smi_info);
868
} else {
869
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
870
if (!smi_info->curr_msg)
871
goto out;
872
873
start_getting_events(smi_info);
874
}
875
goto restart;
876
}
877
878
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
879
/* Ok it if fails, the timer will just go off. */
880
if (timer_delete(&smi_info->si_timer))
881
smi_info->timer_running = false;
882
}
883
884
out:
885
return si_sm_result;
886
}
887
888
static void check_start_timer_thread(struct smi_info *smi_info)
889
{
890
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
891
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
892
893
if (smi_info->thread)
894
wake_up_process(smi_info->thread);
895
896
start_next_msg(smi_info);
897
smi_event_handler(smi_info, 0);
898
}
899
}
900
901
static void flush_messages(void *send_info)
902
{
903
struct smi_info *smi_info = send_info;
904
enum si_sm_result result;
905
906
/*
907
* Currently, this function is called only in run-to-completion
908
* mode. This means we are single-threaded, no need for locks.
909
*/
910
result = smi_event_handler(smi_info, 0);
911
while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
912
udelay(SI_SHORT_TIMEOUT_USEC);
913
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
914
}
915
}
916
917
static int sender(void *send_info, struct ipmi_smi_msg *msg)
918
{
919
struct smi_info *smi_info = send_info;
920
unsigned long flags;
921
922
debug_timestamp(smi_info, "Enqueue");
923
924
if (smi_info->si_state == SI_HOSED)
925
return IPMI_BUS_ERR;
926
927
if (smi_info->run_to_completion) {
928
/*
929
* If we are running to completion, start it. Upper
930
* layer will call flush_messages to clear it out.
931
*/
932
smi_info->waiting_msg = msg;
933
return IPMI_CC_NO_ERROR;
934
}
935
936
spin_lock_irqsave(&smi_info->si_lock, flags);
937
/*
938
* The following two lines don't need to be under the lock for
939
* the lock's sake, but they do need SMP memory barriers to
940
* avoid getting things out of order. We are already claiming
941
* the lock, anyway, so just do it under the lock to avoid the
942
* ordering problem.
943
*/
944
BUG_ON(smi_info->waiting_msg);
945
smi_info->waiting_msg = msg;
946
check_start_timer_thread(smi_info);
947
spin_unlock_irqrestore(&smi_info->si_lock, flags);
948
return IPMI_CC_NO_ERROR;
949
}
950
951
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
952
{
953
struct smi_info *smi_info = send_info;
954
955
smi_info->run_to_completion = i_run_to_completion;
956
if (i_run_to_completion)
957
flush_messages(smi_info);
958
}
959
960
/*
961
* Use -1 as a special constant to tell that we are spinning in kipmid
962
* looking for something and not delaying between checks
963
*/
964
#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
965
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
966
const struct smi_info *smi_info,
967
ktime_t *busy_until)
968
{
969
unsigned int max_busy_us = 0;
970
971
if (smi_info->si_num < num_max_busy_us)
972
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
973
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
974
*busy_until = IPMI_TIME_NOT_BUSY;
975
else if (*busy_until == IPMI_TIME_NOT_BUSY) {
976
*busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
977
} else {
978
if (unlikely(ktime_get() > *busy_until)) {
979
*busy_until = IPMI_TIME_NOT_BUSY;
980
return false;
981
}
982
}
983
return true;
984
}
985
986
987
/*
988
* A busy-waiting loop for speeding up IPMI operation.
989
*
990
* Lousy hardware makes this hard. This is only enabled for systems
991
* that are not BT and do not have interrupts. It starts spinning
992
* when an operation is complete or until max_busy tells it to stop
993
* (if that is enabled). See the paragraph on kimid_max_busy_us in
994
* Documentation/driver-api/ipmi.rst for details.
995
*/
996
static int ipmi_thread(void *data)
997
{
998
struct smi_info *smi_info = data;
999
unsigned long flags;
1000
enum si_sm_result smi_result;
1001
ktime_t busy_until = IPMI_TIME_NOT_BUSY;
1002
1003
set_user_nice(current, MAX_NICE);
1004
while (!kthread_should_stop()) {
1005
int busy_wait;
1006
1007
spin_lock_irqsave(&(smi_info->si_lock), flags);
1008
smi_result = smi_event_handler(smi_info, 0);
1009
1010
/*
1011
* If the driver is doing something, there is a possible
1012
* race with the timer. If the timer handler see idle,
1013
* and the thread here sees something else, the timer
1014
* handler won't restart the timer even though it is
1015
* required. So start it here if necessary.
1016
*/
1017
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1018
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1019
1020
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1021
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1022
&busy_until);
1023
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1024
; /* do nothing */
1025
} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
1026
/*
1027
* In maintenance mode we run as fast as
1028
* possible to allow firmware updates to
1029
* complete as fast as possible, but normally
1030
* don't bang on the scheduler.
1031
*/
1032
if (smi_info->in_maintenance_mode)
1033
schedule();
1034
else
1035
usleep_range(100, 200);
1036
} else if (smi_result == SI_SM_IDLE) {
1037
if (atomic_read(&smi_info->need_watch)) {
1038
schedule_timeout_interruptible(100);
1039
} else {
1040
/* Wait to be woken up when we are needed. */
1041
__set_current_state(TASK_INTERRUPTIBLE);
1042
schedule();
1043
}
1044
} else {
1045
schedule_timeout_interruptible(1);
1046
}
1047
}
1048
return 0;
1049
}
1050
1051
1052
static void poll(void *send_info)
1053
{
1054
struct smi_info *smi_info = send_info;
1055
unsigned long flags = 0;
1056
bool run_to_completion = smi_info->run_to_completion;
1057
1058
/*
1059
* Make sure there is some delay in the poll loop so we can
1060
* drive time forward and timeout things.
1061
*/
1062
udelay(10);
1063
if (!run_to_completion)
1064
spin_lock_irqsave(&smi_info->si_lock, flags);
1065
smi_event_handler(smi_info, 10);
1066
if (!run_to_completion)
1067
spin_unlock_irqrestore(&smi_info->si_lock, flags);
1068
}
1069
1070
static void request_events(void *send_info)
1071
{
1072
struct smi_info *smi_info = send_info;
1073
1074
if (!smi_info->has_event_buffer)
1075
return;
1076
1077
atomic_set(&smi_info->req_events, 1);
1078
}
1079
1080
static void set_need_watch(void *send_info, unsigned int watch_mask)
1081
{
1082
struct smi_info *smi_info = send_info;
1083
unsigned long flags;
1084
int enable;
1085
1086
enable = !!watch_mask;
1087
1088
atomic_set(&smi_info->need_watch, enable);
1089
spin_lock_irqsave(&smi_info->si_lock, flags);
1090
check_start_timer_thread(smi_info);
1091
spin_unlock_irqrestore(&smi_info->si_lock, flags);
1092
}
1093
1094
static void smi_timeout(struct timer_list *t)
1095
{
1096
struct smi_info *smi_info = timer_container_of(smi_info, t,
1097
si_timer);
1098
enum si_sm_result smi_result;
1099
unsigned long flags;
1100
unsigned long jiffies_now;
1101
long time_diff;
1102
long timeout;
1103
1104
spin_lock_irqsave(&(smi_info->si_lock), flags);
1105
debug_timestamp(smi_info, "Timer");
1106
1107
if (smi_info->si_state == SI_HOSED)
1108
/* Try something to see if the BMC is now operational. */
1109
start_get_flags(smi_info);
1110
1111
jiffies_now = jiffies;
1112
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1113
* SI_USEC_PER_JIFFY);
1114
smi_result = smi_event_handler(smi_info, time_diff);
1115
1116
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1117
/* Running with interrupts, only do long timeouts. */
1118
timeout = jiffies + SI_TIMEOUT_JIFFIES;
1119
smi_inc_stat(smi_info, long_timeouts);
1120
} else if (smi_result == SI_SM_CALL_WITH_DELAY) {
1121
/*
1122
* If the state machine asks for a short delay, then shorten
1123
* the timer timeout.
1124
*/
1125
smi_inc_stat(smi_info, short_timeouts);
1126
timeout = jiffies + 1;
1127
} else {
1128
smi_inc_stat(smi_info, long_timeouts);
1129
timeout = jiffies + SI_TIMEOUT_JIFFIES;
1130
}
1131
1132
if (smi_result != SI_SM_IDLE)
1133
smi_mod_timer(smi_info, timeout);
1134
else
1135
smi_info->timer_running = false;
1136
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1137
}
1138
1139
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1140
{
1141
struct smi_info *smi_info = data;
1142
unsigned long flags;
1143
1144
if (smi_info->io.si_info->type == SI_BT)
1145
/* We need to clear the IRQ flag for the BT interface. */
1146
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1147
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1148
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1149
1150
spin_lock_irqsave(&(smi_info->si_lock), flags);
1151
1152
smi_inc_stat(smi_info, interrupts);
1153
1154
debug_timestamp(smi_info, "Interrupt");
1155
1156
smi_event_handler(smi_info, 0);
1157
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1158
return IRQ_HANDLED;
1159
}
1160
1161
static int smi_start_processing(void *send_info,
1162
struct ipmi_smi *intf)
1163
{
1164
struct smi_info *new_smi = send_info;
1165
int enable = 0;
1166
1167
new_smi->intf = intf;
1168
1169
/* Set up the timer that drives the interface. */
1170
timer_setup(&new_smi->si_timer, smi_timeout, 0);
1171
new_smi->timer_can_start = true;
1172
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1173
1174
/* Try to claim any interrupts. */
1175
if (new_smi->io.irq_setup) {
1176
new_smi->io.irq_handler_data = new_smi;
1177
new_smi->io.irq_setup(&new_smi->io);
1178
}
1179
1180
/*
1181
* Check if the user forcefully enabled the daemon.
1182
*/
1183
if (new_smi->si_num < num_force_kipmid)
1184
enable = force_kipmid[new_smi->si_num];
1185
/*
1186
* The BT interface is efficient enough to not need a thread,
1187
* and there is no need for a thread if we have interrupts.
1188
*/
1189
else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
1190
enable = 1;
1191
1192
if (enable) {
1193
new_smi->thread = kthread_run(ipmi_thread, new_smi,
1194
"kipmi%d", new_smi->si_num);
1195
if (IS_ERR(new_smi->thread)) {
1196
dev_notice(new_smi->io.dev,
1197
"Could not start kernel thread due to error %ld, only using timers to drive the interface\n",
1198
PTR_ERR(new_smi->thread));
1199
new_smi->thread = NULL;
1200
}
1201
}
1202
1203
return 0;
1204
}
1205
1206
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1207
{
1208
struct smi_info *smi = send_info;
1209
1210
data->addr_src = smi->io.addr_source;
1211
data->dev = smi->io.dev;
1212
data->addr_info = smi->io.addr_info;
1213
get_device(smi->io.dev);
1214
1215
return 0;
1216
}
1217
1218
static void set_maintenance_mode(void *send_info, bool enable)
1219
{
1220
struct smi_info *smi_info = send_info;
1221
1222
if (!enable)
1223
atomic_set(&smi_info->req_events, 0);
1224
smi_info->in_maintenance_mode = enable;
1225
}
1226
1227
static void shutdown_smi(void *send_info);
1228
static const struct ipmi_smi_handlers handlers = {
1229
.owner = THIS_MODULE,
1230
.start_processing = smi_start_processing,
1231
.shutdown = shutdown_smi,
1232
.get_smi_info = get_smi_info,
1233
.sender = sender,
1234
.request_events = request_events,
1235
.set_need_watch = set_need_watch,
1236
.set_maintenance_mode = set_maintenance_mode,
1237
.set_run_to_completion = set_run_to_completion,
1238
.flush_messages = flush_messages,
1239
.poll = poll,
1240
};
1241
1242
static LIST_HEAD(smi_infos);
1243
static DEFINE_MUTEX(smi_infos_lock);
1244
static int smi_num; /* Used to sequence the SMIs */
1245
1246
static const char * const addr_space_to_str[] = { "i/o", "mem" };
1247
1248
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1249
MODULE_PARM_DESC(force_kipmid,
1250
"Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm.");
1251
module_param(unload_when_empty, bool, 0);
1252
MODULE_PARM_DESC(unload_when_empty,
1253
"Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod.");
1254
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1255
MODULE_PARM_DESC(kipmid_max_busy_us,
1256
"Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time.");
1257
1258
void ipmi_irq_finish_setup(struct si_sm_io *io)
1259
{
1260
if (io->si_info->type == SI_BT)
1261
/* Enable the interrupt in the BT interface. */
1262
io->outputb(io, IPMI_BT_INTMASK_REG,
1263
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1264
}
1265
1266
void ipmi_irq_start_cleanup(struct si_sm_io *io)
1267
{
1268
if (io->si_info->type == SI_BT)
1269
/* Disable the interrupt in the BT interface. */
1270
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1271
}
1272
1273
static void std_irq_cleanup(struct si_sm_io *io)
1274
{
1275
ipmi_irq_start_cleanup(io);
1276
free_irq(io->irq, io->irq_handler_data);
1277
}
1278
1279
int ipmi_std_irq_setup(struct si_sm_io *io)
1280
{
1281
int rv;
1282
1283
if (!io->irq)
1284
return 0;
1285
1286
rv = request_irq(io->irq,
1287
ipmi_si_irq_handler,
1288
IRQF_SHARED,
1289
SI_DEVICE_NAME,
1290
io->irq_handler_data);
1291
if (rv) {
1292
dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n",
1293
SI_DEVICE_NAME, io->irq);
1294
io->irq = 0;
1295
} else {
1296
io->irq_cleanup = std_irq_cleanup;
1297
ipmi_irq_finish_setup(io);
1298
dev_info(io->dev, "Using irq %d\n", io->irq);
1299
}
1300
1301
return rv;
1302
}
1303
1304
static int wait_for_msg_done(struct smi_info *smi_info)
1305
{
1306
enum si_sm_result smi_result;
1307
1308
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1309
for (;;) {
1310
if (smi_result == SI_SM_CALL_WITH_DELAY ||
1311
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1312
schedule_timeout_uninterruptible(1);
1313
smi_result = smi_info->handlers->event(
1314
smi_info->si_sm, jiffies_to_usecs(1));
1315
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1316
smi_result = smi_info->handlers->event(
1317
smi_info->si_sm, 0);
1318
} else
1319
break;
1320
}
1321
if (smi_result == SI_SM_HOSED)
1322
/*
1323
* We couldn't get the state machine to run, so whatever's at
1324
* the port is probably not an IPMI SMI interface.
1325
*/
1326
return -ENODEV;
1327
1328
return 0;
1329
}
1330
1331
static int try_get_dev_id(struct smi_info *smi_info)
1332
{
1333
unsigned char msg[2];
1334
unsigned char *resp;
1335
unsigned long resp_len;
1336
int rv = 0;
1337
unsigned int retry_count = 0;
1338
1339
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1340
if (!resp)
1341
return -ENOMEM;
1342
1343
/*
1344
* Do a Get Device ID command, since it comes back with some
1345
* useful info.
1346
*/
1347
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1348
msg[1] = IPMI_GET_DEVICE_ID_CMD;
1349
1350
retry:
1351
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1352
1353
rv = wait_for_msg_done(smi_info);
1354
if (rv)
1355
goto out;
1356
1357
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1358
resp, IPMI_MAX_MSG_LENGTH);
1359
1360
/* Check and record info from the get device id, in case we need it. */
1361
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1362
resp + 2, resp_len - 2, &smi_info->device_id);
1363
if (rv) {
1364
/* record completion code */
1365
unsigned char cc = *(resp + 2);
1366
1367
if (cc != IPMI_CC_NO_ERROR &&
1368
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
1369
dev_warn_ratelimited(smi_info->io.dev,
1370
"BMC returned 0x%2.2x, retry get bmc device id\n",
1371
cc);
1372
goto retry;
1373
}
1374
}
1375
1376
out:
1377
kfree(resp);
1378
return rv;
1379
}
1380
1381
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1382
{
1383
unsigned char msg[3];
1384
unsigned char *resp;
1385
unsigned long resp_len;
1386
int rv;
1387
1388
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1389
if (!resp)
1390
return -ENOMEM;
1391
1392
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1393
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1394
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1395
1396
rv = wait_for_msg_done(smi_info);
1397
if (rv) {
1398
dev_warn(smi_info->io.dev,
1399
"Error getting response from get global enables command: %d\n",
1400
rv);
1401
goto out;
1402
}
1403
1404
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1405
resp, IPMI_MAX_MSG_LENGTH);
1406
1407
if (resp_len < 4 ||
1408
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1409
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1410
resp[2] != 0) {
1411
dev_warn(smi_info->io.dev,
1412
"Invalid return from get global enables command: %ld %x %x %x\n",
1413
resp_len, resp[0], resp[1], resp[2]);
1414
rv = -EINVAL;
1415
goto out;
1416
} else {
1417
*enables = resp[3];
1418
}
1419
1420
out:
1421
kfree(resp);
1422
return rv;
1423
}
1424
1425
/*
1426
* Returns 1 if it gets an error from the command.
1427
*/
1428
static int set_global_enables(struct smi_info *smi_info, u8 enables)
1429
{
1430
unsigned char msg[3];
1431
unsigned char *resp;
1432
unsigned long resp_len;
1433
int rv;
1434
1435
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1436
if (!resp)
1437
return -ENOMEM;
1438
1439
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1440
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1441
msg[2] = enables;
1442
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1443
1444
rv = wait_for_msg_done(smi_info);
1445
if (rv) {
1446
dev_warn(smi_info->io.dev,
1447
"Error getting response from set global enables command: %d\n",
1448
rv);
1449
goto out;
1450
}
1451
1452
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1453
resp, IPMI_MAX_MSG_LENGTH);
1454
1455
if (resp_len < 3 ||
1456
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1457
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1458
dev_warn(smi_info->io.dev,
1459
"Invalid return from set global enables command: %ld %x %x\n",
1460
resp_len, resp[0], resp[1]);
1461
rv = -EINVAL;
1462
goto out;
1463
}
1464
1465
if (resp[2] != 0)
1466
rv = 1;
1467
1468
out:
1469
kfree(resp);
1470
return rv;
1471
}
1472
1473
/*
1474
* Some BMCs do not support clearing the receive irq bit in the global
1475
* enables (even if they don't support interrupts on the BMC). Check
1476
* for this and handle it properly.
1477
*/
1478
static void check_clr_rcv_irq(struct smi_info *smi_info)
1479
{
1480
u8 enables = 0;
1481
int rv;
1482
1483
rv = get_global_enables(smi_info, &enables);
1484
if (!rv) {
1485
if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1486
/* Already clear, should work ok. */
1487
return;
1488
1489
enables &= ~IPMI_BMC_RCV_MSG_INTR;
1490
rv = set_global_enables(smi_info, enables);
1491
}
1492
1493
if (rv < 0) {
1494
dev_err(smi_info->io.dev,
1495
"Cannot check clearing the rcv irq: %d\n", rv);
1496
return;
1497
}
1498
1499
if (rv) {
1500
/*
1501
* An error when setting the event buffer bit means
1502
* clearing the bit is not supported.
1503
*/
1504
dev_warn(smi_info->io.dev,
1505
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1506
smi_info->cannot_disable_irq = true;
1507
}
1508
}
1509
1510
/*
1511
* Some BMCs do not support setting the interrupt bits in the global
1512
* enables even if they support interrupts. Clearly bad, but we can
1513
* compensate.
1514
*/
1515
static void check_set_rcv_irq(struct smi_info *smi_info)
1516
{
1517
u8 enables = 0;
1518
int rv;
1519
1520
if (!smi_info->io.irq)
1521
return;
1522
1523
rv = get_global_enables(smi_info, &enables);
1524
if (!rv) {
1525
enables |= IPMI_BMC_RCV_MSG_INTR;
1526
rv = set_global_enables(smi_info, enables);
1527
}
1528
1529
if (rv < 0) {
1530
dev_err(smi_info->io.dev,
1531
"Cannot check setting the rcv irq: %d\n", rv);
1532
return;
1533
}
1534
1535
if (rv) {
1536
/*
1537
* An error when setting the event buffer bit means
1538
* setting the bit is not supported.
1539
*/
1540
dev_warn(smi_info->io.dev,
1541
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1542
smi_info->cannot_disable_irq = true;
1543
smi_info->irq_enable_broken = true;
1544
}
1545
}
1546
1547
static int try_enable_event_buffer(struct smi_info *smi_info)
1548
{
1549
unsigned char msg[3];
1550
unsigned char *resp;
1551
unsigned long resp_len;
1552
int rv = 0;
1553
1554
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1555
if (!resp)
1556
return -ENOMEM;
1557
1558
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1559
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1560
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1561
1562
rv = wait_for_msg_done(smi_info);
1563
if (rv) {
1564
pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
1565
goto out;
1566
}
1567
1568
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1569
resp, IPMI_MAX_MSG_LENGTH);
1570
1571
if (resp_len < 4 ||
1572
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1573
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1574
resp[2] != 0) {
1575
pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
1576
rv = -EINVAL;
1577
goto out;
1578
}
1579
1580
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1581
/* buffer is already enabled, nothing to do. */
1582
smi_info->supports_event_msg_buff = true;
1583
goto out;
1584
}
1585
1586
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1587
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1588
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1589
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1590
1591
rv = wait_for_msg_done(smi_info);
1592
if (rv) {
1593
pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
1594
goto out;
1595
}
1596
1597
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1598
resp, IPMI_MAX_MSG_LENGTH);
1599
1600
if (resp_len < 3 ||
1601
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1602
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1603
pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
1604
rv = -EINVAL;
1605
goto out;
1606
}
1607
1608
if (resp[2] != 0)
1609
/*
1610
* An error when setting the event buffer bit means
1611
* that the event buffer is not supported.
1612
*/
1613
rv = -ENOENT;
1614
else
1615
smi_info->supports_event_msg_buff = true;
1616
1617
out:
1618
kfree(resp);
1619
return rv;
1620
}
1621
1622
#define IPMI_SI_ATTR(name) \
1623
static ssize_t name##_show(struct device *dev, \
1624
struct device_attribute *attr, \
1625
char *buf) \
1626
{ \
1627
struct smi_info *smi_info = dev_get_drvdata(dev); \
1628
\
1629
return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
1630
} \
1631
static DEVICE_ATTR_RO(name)
1632
1633
static ssize_t type_show(struct device *dev,
1634
struct device_attribute *attr,
1635
char *buf)
1636
{
1637
struct smi_info *smi_info = dev_get_drvdata(dev);
1638
1639
return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
1640
}
1641
static DEVICE_ATTR_RO(type);
1642
1643
static ssize_t interrupts_enabled_show(struct device *dev,
1644
struct device_attribute *attr,
1645
char *buf)
1646
{
1647
struct smi_info *smi_info = dev_get_drvdata(dev);
1648
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1649
1650
return sysfs_emit(buf, "%d\n", enabled);
1651
}
1652
static DEVICE_ATTR_RO(interrupts_enabled);
1653
1654
IPMI_SI_ATTR(short_timeouts);
1655
IPMI_SI_ATTR(long_timeouts);
1656
IPMI_SI_ATTR(idles);
1657
IPMI_SI_ATTR(interrupts);
1658
IPMI_SI_ATTR(attentions);
1659
IPMI_SI_ATTR(flag_fetches);
1660
IPMI_SI_ATTR(hosed_count);
1661
IPMI_SI_ATTR(complete_transactions);
1662
IPMI_SI_ATTR(events);
1663
IPMI_SI_ATTR(watchdog_pretimeouts);
1664
IPMI_SI_ATTR(incoming_messages);
1665
1666
static ssize_t params_show(struct device *dev,
1667
struct device_attribute *attr,
1668
char *buf)
1669
{
1670
struct smi_info *smi_info = dev_get_drvdata(dev);
1671
1672
return sysfs_emit(buf,
1673
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1674
si_to_str[smi_info->io.si_info->type],
1675
addr_space_to_str[smi_info->io.addr_space],
1676
smi_info->io.addr_data,
1677
smi_info->io.regspacing,
1678
smi_info->io.regsize,
1679
smi_info->io.regshift,
1680
smi_info->io.irq,
1681
smi_info->io.slave_addr);
1682
}
1683
static DEVICE_ATTR_RO(params);
1684
1685
static struct attribute *ipmi_si_dev_attrs[] = {
1686
&dev_attr_type.attr,
1687
&dev_attr_interrupts_enabled.attr,
1688
&dev_attr_short_timeouts.attr,
1689
&dev_attr_long_timeouts.attr,
1690
&dev_attr_idles.attr,
1691
&dev_attr_interrupts.attr,
1692
&dev_attr_attentions.attr,
1693
&dev_attr_flag_fetches.attr,
1694
&dev_attr_hosed_count.attr,
1695
&dev_attr_complete_transactions.attr,
1696
&dev_attr_events.attr,
1697
&dev_attr_watchdog_pretimeouts.attr,
1698
&dev_attr_incoming_messages.attr,
1699
&dev_attr_params.attr,
1700
NULL
1701
};
1702
1703
static const struct attribute_group ipmi_si_dev_attr_group = {
1704
.attrs = ipmi_si_dev_attrs,
1705
};
1706
1707
/*
1708
* oem_data_avail_to_receive_msg_avail
1709
* @info - smi_info structure with msg_flags set
1710
*
1711
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1712
* Returns 1 indicating need to re-run handle_flags().
1713
*/
1714
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1715
{
1716
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1717
RECEIVE_MSG_AVAIL);
1718
return 1;
1719
}
1720
1721
/*
1722
* setup_dell_poweredge_oem_data_handler
1723
* @info - smi_info.device_id must be populated
1724
*
1725
* Systems that match, but have firmware version < 1.40 may assert
1726
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1727
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1728
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1729
* as RECEIVE_MSG_AVAIL instead.
1730
*
1731
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
1732
* assert the OEM[012] bits, and if it did, the driver would have to
1733
* change to handle that properly, we don't actually check for the
1734
* firmware version.
1735
* Device ID = 0x20 BMC on PowerEdge 8G servers
1736
* Device Revision = 0x80
1737
* Firmware Revision1 = 0x01 BMC version 1.40
1738
* Firmware Revision2 = 0x40 BCD encoded
1739
* IPMI Version = 0x51 IPMI 1.5
1740
* Manufacturer ID = A2 02 00 Dell IANA
1741
*
1742
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1743
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1744
*
1745
*/
1746
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
1747
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1748
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1749
#define DELL_IANA_MFR_ID 0x0002a2
1750
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1751
{
1752
struct ipmi_device_id *id = &smi_info->device_id;
1753
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1754
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
1755
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1756
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1757
smi_info->oem_data_avail_handler =
1758
oem_data_avail_to_receive_msg_avail;
1759
} else if (ipmi_version_major(id) < 1 ||
1760
(ipmi_version_major(id) == 1 &&
1761
ipmi_version_minor(id) < 5)) {
1762
smi_info->oem_data_avail_handler =
1763
oem_data_avail_to_receive_msg_avail;
1764
}
1765
}
1766
}
1767
1768
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1769
static void return_hosed_msg_badsize(struct smi_info *smi_info)
1770
{
1771
struct ipmi_smi_msg *msg = smi_info->curr_msg;
1772
1773
/* Make it a response */
1774
msg->rsp[0] = msg->data[0] | 4;
1775
msg->rsp[1] = msg->data[1];
1776
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1777
msg->rsp_size = 3;
1778
smi_info->curr_msg = NULL;
1779
deliver_recv_msg(smi_info, msg);
1780
}
1781
1782
/*
1783
* dell_poweredge_bt_xaction_handler
1784
* @info - smi_info.device_id must be populated
1785
*
1786
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1787
* not respond to a Get SDR command if the length of the data
1788
* requested is exactly 0x3A, which leads to command timeouts and no
1789
* data returned. This intercepts such commands, and causes userspace
1790
* callers to try again with a different-sized buffer, which succeeds.
1791
*/
1792
1793
#define STORAGE_NETFN 0x0A
1794
#define STORAGE_CMD_GET_SDR 0x23
1795
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1796
unsigned long unused,
1797
void *in)
1798
{
1799
struct smi_info *smi_info = in;
1800
unsigned char *data = smi_info->curr_msg->data;
1801
unsigned int size = smi_info->curr_msg->data_size;
1802
if (size >= 8 &&
1803
(data[0]>>2) == STORAGE_NETFN &&
1804
data[1] == STORAGE_CMD_GET_SDR &&
1805
data[7] == 0x3A) {
1806
return_hosed_msg_badsize(smi_info);
1807
return NOTIFY_STOP;
1808
}
1809
return NOTIFY_DONE;
1810
}
1811
1812
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1813
.notifier_call = dell_poweredge_bt_xaction_handler,
1814
};
1815
1816
/*
1817
* setup_dell_poweredge_bt_xaction_handler
1818
* @info - smi_info.device_id must be filled in already
1819
*
1820
* Fills in smi_info.device_id.start_transaction_pre_hook
1821
* when we know what function to use there.
1822
*/
1823
static void
1824
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1825
{
1826
struct ipmi_device_id *id = &smi_info->device_id;
1827
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1828
smi_info->io.si_info->type == SI_BT)
1829
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1830
}
1831
1832
/*
1833
* setup_oem_data_handler
1834
* @info - smi_info.device_id must be filled in already
1835
*
1836
* Fills in smi_info.device_id.oem_data_available_handler
1837
* when we know what function to use there.
1838
*/
1839
1840
static void setup_oem_data_handler(struct smi_info *smi_info)
1841
{
1842
setup_dell_poweredge_oem_data_handler(smi_info);
1843
}
1844
1845
static void setup_xaction_handlers(struct smi_info *smi_info)
1846
{
1847
setup_dell_poweredge_bt_xaction_handler(smi_info);
1848
}
1849
1850
static void check_for_broken_irqs(struct smi_info *smi_info)
1851
{
1852
check_clr_rcv_irq(smi_info);
1853
check_set_rcv_irq(smi_info);
1854
}
1855
1856
static inline void stop_timer_and_thread(struct smi_info *smi_info)
1857
{
1858
if (smi_info->thread != NULL) {
1859
kthread_stop(smi_info->thread);
1860
smi_info->thread = NULL;
1861
}
1862
1863
smi_info->timer_can_start = false;
1864
timer_delete_sync(&smi_info->si_timer);
1865
}
1866
1867
static struct smi_info *find_dup_si(struct smi_info *info)
1868
{
1869
struct smi_info *e;
1870
1871
list_for_each_entry(e, &smi_infos, link) {
1872
if (e->io.addr_space != info->io.addr_space)
1873
continue;
1874
if (e->io.addr_data == info->io.addr_data) {
1875
/*
1876
* This is a cheap hack, ACPI doesn't have a defined
1877
* slave address but SMBIOS does. Pick it up from
1878
* any source that has it available.
1879
*/
1880
if (info->io.slave_addr && !e->io.slave_addr)
1881
e->io.slave_addr = info->io.slave_addr;
1882
return e;
1883
}
1884
}
1885
1886
return NULL;
1887
}
1888
1889
int ipmi_si_add_smi(struct si_sm_io *io)
1890
{
1891
int rv = 0;
1892
struct smi_info *new_smi, *dup;
1893
1894
/*
1895
* If the user gave us a hard-coded device at the same
1896
* address, they presumably want us to use it and not what is
1897
* in the firmware.
1898
*/
1899
if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
1900
ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
1901
dev_info(io->dev,
1902
"Hard-coded device at this address already exists");
1903
return -ENODEV;
1904
}
1905
1906
if (!io->io_setup) {
1907
if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1908
io->addr_space == IPMI_IO_ADDR_SPACE) {
1909
io->io_setup = ipmi_si_port_setup;
1910
} else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
1911
io->io_setup = ipmi_si_mem_setup;
1912
} else {
1913
return -EINVAL;
1914
}
1915
}
1916
1917
new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1918
if (!new_smi)
1919
return -ENOMEM;
1920
spin_lock_init(&new_smi->si_lock);
1921
1922
new_smi->io = *io;
1923
1924
mutex_lock(&smi_infos_lock);
1925
dup = find_dup_si(new_smi);
1926
if (dup) {
1927
if (new_smi->io.addr_source == SI_ACPI &&
1928
dup->io.addr_source == SI_SMBIOS) {
1929
/* We prefer ACPI over SMBIOS. */
1930
dev_info(dup->io.dev,
1931
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1932
si_to_str[new_smi->io.si_info->type]);
1933
cleanup_one_si(dup);
1934
} else {
1935
dev_info(new_smi->io.dev,
1936
"%s-specified %s state machine: duplicate\n",
1937
ipmi_addr_src_to_str(new_smi->io.addr_source),
1938
si_to_str[new_smi->io.si_info->type]);
1939
rv = -EBUSY;
1940
kfree(new_smi);
1941
goto out_err;
1942
}
1943
}
1944
1945
pr_info("Adding %s-specified %s state machine\n",
1946
ipmi_addr_src_to_str(new_smi->io.addr_source),
1947
si_to_str[new_smi->io.si_info->type]);
1948
1949
list_add_tail(&new_smi->link, &smi_infos);
1950
1951
if (initialized)
1952
rv = try_smi_init(new_smi);
1953
out_err:
1954
mutex_unlock(&smi_infos_lock);
1955
return rv;
1956
}
1957
1958
/*
1959
* Try to start up an interface. Must be called with smi_infos_lock
1960
* held, primarily to keep smi_num consistent, we only one to do these
1961
* one at a time.
1962
*/
1963
static int try_smi_init(struct smi_info *new_smi)
1964
{
1965
int rv = 0;
1966
int i;
1967
1968
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1969
ipmi_addr_src_to_str(new_smi->io.addr_source),
1970
si_to_str[new_smi->io.si_info->type],
1971
addr_space_to_str[new_smi->io.addr_space],
1972
new_smi->io.addr_data,
1973
new_smi->io.slave_addr, new_smi->io.irq);
1974
1975
switch (new_smi->io.si_info->type) {
1976
case SI_KCS:
1977
new_smi->handlers = &kcs_smi_handlers;
1978
break;
1979
1980
case SI_SMIC:
1981
new_smi->handlers = &smic_smi_handlers;
1982
break;
1983
1984
case SI_BT:
1985
new_smi->handlers = &bt_smi_handlers;
1986
break;
1987
1988
default:
1989
/* No support for anything else yet. */
1990
rv = -EIO;
1991
goto out_err;
1992
}
1993
1994
new_smi->si_num = smi_num;
1995
1996
/* Do this early so it's available for logs. */
1997
if (!new_smi->io.dev) {
1998
pr_err("IPMI interface added with no device\n");
1999
rv = -EIO;
2000
goto out_err;
2001
}
2002
2003
/* Allocate the state machine's data and initialize it. */
2004
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2005
if (!new_smi->si_sm) {
2006
rv = -ENOMEM;
2007
goto out_err;
2008
}
2009
new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
2010
&new_smi->io);
2011
2012
/* Now that we know the I/O size, we can set up the I/O. */
2013
rv = new_smi->io.io_setup(&new_smi->io);
2014
if (rv) {
2015
dev_err(new_smi->io.dev, "Could not set up I/O space\n");
2016
goto out_err;
2017
}
2018
2019
/* Do low-level detection first. */
2020
if (new_smi->handlers->detect(new_smi->si_sm)) {
2021
if (new_smi->io.addr_source)
2022
dev_err(new_smi->io.dev,
2023
"Interface detection failed\n");
2024
rv = -ENODEV;
2025
goto out_err;
2026
}
2027
2028
/*
2029
* Attempt a get device id command. If it fails, we probably
2030
* don't have a BMC here.
2031
*/
2032
rv = try_get_dev_id(new_smi);
2033
if (rv) {
2034
if (new_smi->io.addr_source)
2035
dev_err(new_smi->io.dev,
2036
"There appears to be no BMC at this location\n");
2037
goto out_err;
2038
}
2039
2040
setup_oem_data_handler(new_smi);
2041
setup_xaction_handlers(new_smi);
2042
check_for_broken_irqs(new_smi);
2043
2044
new_smi->waiting_msg = NULL;
2045
new_smi->curr_msg = NULL;
2046
atomic_set(&new_smi->req_events, 0);
2047
new_smi->run_to_completion = false;
2048
for (i = 0; i < SI_NUM_STATS; i++)
2049
atomic_set(&new_smi->stats[i], 0);
2050
2051
new_smi->interrupt_disabled = true;
2052
atomic_set(&new_smi->need_watch, 0);
2053
2054
rv = try_enable_event_buffer(new_smi);
2055
if (rv == 0)
2056
new_smi->has_event_buffer = true;
2057
2058
/*
2059
* Start clearing the flags before we enable interrupts or the
2060
* timer to avoid racing with the timer.
2061
*/
2062
start_clear_flags(new_smi);
2063
2064
/*
2065
* IRQ is defined to be set when non-zero. req_events will
2066
* cause a global flags check that will enable interrupts.
2067
*/
2068
if (new_smi->io.irq) {
2069
new_smi->interrupt_disabled = false;
2070
atomic_set(&new_smi->req_events, 1);
2071
}
2072
2073
dev_set_drvdata(new_smi->io.dev, new_smi);
2074
rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2075
if (rv) {
2076
dev_err(new_smi->io.dev,
2077
"Unable to add device attributes: error %d\n",
2078
rv);
2079
goto out_err;
2080
}
2081
new_smi->dev_group_added = true;
2082
2083
rv = ipmi_register_smi(&handlers,
2084
new_smi,
2085
new_smi->io.dev,
2086
new_smi->io.slave_addr);
2087
if (rv) {
2088
dev_err(new_smi->io.dev,
2089
"Unable to register device: error %d\n",
2090
rv);
2091
goto out_err;
2092
}
2093
2094
/* Don't increment till we know we have succeeded. */
2095
smi_num++;
2096
2097
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2098
si_to_str[new_smi->io.si_info->type]);
2099
2100
WARN_ON(new_smi->io.dev->init_name != NULL);
2101
2102
out_err:
2103
if (rv && new_smi->io.io_cleanup) {
2104
new_smi->io.io_cleanup(&new_smi->io);
2105
new_smi->io.io_cleanup = NULL;
2106
}
2107
2108
if (rv && new_smi->si_sm) {
2109
kfree(new_smi->si_sm);
2110
new_smi->si_sm = NULL;
2111
}
2112
2113
return rv;
2114
}
2115
2116
/*
2117
* Devices in the same address space at the same address are the same.
2118
*/
2119
static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
2120
{
2121
return (e1->io.addr_space == e2->io.addr_space &&
2122
e1->io.addr_data == e2->io.addr_data);
2123
}
2124
2125
static int __init init_ipmi_si(void)
2126
{
2127
struct smi_info *e, *e2;
2128
2129
if (initialized)
2130
return 0;
2131
2132
ipmi_hardcode_init();
2133
2134
pr_info("IPMI System Interface driver\n");
2135
2136
ipmi_si_platform_init();
2137
2138
ipmi_si_pci_init();
2139
2140
ipmi_si_ls2k_init();
2141
2142
ipmi_si_parisc_init();
2143
2144
mutex_lock(&smi_infos_lock);
2145
2146
/*
2147
* Scan through all the devices. We prefer devices with
2148
* interrupts, so go through those first in case there are any
2149
* duplicates that don't have the interrupt set.
2150
*/
2151
list_for_each_entry(e, &smi_infos, link) {
2152
bool dup = false;
2153
2154
/* Register ones with interrupts first. */
2155
if (!e->io.irq)
2156
continue;
2157
2158
/*
2159
* Go through the ones we have already seen to see if this
2160
* is a dup.
2161
*/
2162
list_for_each_entry(e2, &smi_infos, link) {
2163
if (e2 == e)
2164
break;
2165
if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
2166
dup = true;
2167
break;
2168
}
2169
}
2170
if (!dup)
2171
try_smi_init(e);
2172
}
2173
2174
/*
2175
* Now try devices without interrupts.
2176
*/
2177
list_for_each_entry(e, &smi_infos, link) {
2178
bool dup = false;
2179
2180
if (e->io.irq)
2181
continue;
2182
2183
/*
2184
* Go through the ones we have already seen to see if
2185
* this is a dup. We have already looked at the ones
2186
* with interrupts.
2187
*/
2188
list_for_each_entry(e2, &smi_infos, link) {
2189
if (!e2->io.irq)
2190
continue;
2191
if (ipmi_smi_info_same(e, e2)) {
2192
dup = true;
2193
break;
2194
}
2195
}
2196
list_for_each_entry(e2, &smi_infos, link) {
2197
if (e2 == e)
2198
break;
2199
if (ipmi_smi_info_same(e, e2)) {
2200
dup = true;
2201
break;
2202
}
2203
}
2204
if (!dup)
2205
try_smi_init(e);
2206
}
2207
2208
initialized = true;
2209
mutex_unlock(&smi_infos_lock);
2210
2211
mutex_lock(&smi_infos_lock);
2212
if (unload_when_empty && list_empty(&smi_infos)) {
2213
mutex_unlock(&smi_infos_lock);
2214
cleanup_ipmi_si();
2215
pr_warn("Unable to find any System Interface(s)\n");
2216
return -ENODEV;
2217
} else {
2218
mutex_unlock(&smi_infos_lock);
2219
return 0;
2220
}
2221
}
2222
module_init(init_ipmi_si);
2223
2224
static void wait_msg_processed(struct smi_info *smi_info)
2225
{
2226
unsigned long jiffies_now;
2227
long time_diff;
2228
2229
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2230
jiffies_now = jiffies;
2231
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
2232
* SI_USEC_PER_JIFFY);
2233
smi_event_handler(smi_info, time_diff);
2234
schedule_timeout_uninterruptible(1);
2235
}
2236
}
2237
2238
static void shutdown_smi(void *send_info)
2239
{
2240
struct smi_info *smi_info = send_info;
2241
2242
if (smi_info->dev_group_added) {
2243
device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2244
smi_info->dev_group_added = false;
2245
}
2246
if (smi_info->io.dev)
2247
dev_set_drvdata(smi_info->io.dev, NULL);
2248
2249
/*
2250
* Make sure that interrupts, the timer and the thread are
2251
* stopped and will not run again.
2252
*/
2253
smi_info->interrupt_disabled = true;
2254
if (smi_info->io.irq_cleanup) {
2255
smi_info->io.irq_cleanup(&smi_info->io);
2256
smi_info->io.irq_cleanup = NULL;
2257
}
2258
stop_timer_and_thread(smi_info);
2259
2260
/*
2261
* Wait until we know that we are out of any interrupt
2262
* handlers might have been running before we freed the
2263
* interrupt.
2264
*/
2265
synchronize_rcu();
2266
2267
/*
2268
* Timeouts are stopped, now make sure the interrupts are off
2269
* in the BMC. Note that timers and CPU interrupts are off,
2270
* so no need for locks.
2271
*/
2272
wait_msg_processed(smi_info);
2273
2274
if (smi_info->handlers)
2275
disable_si_irq(smi_info);
2276
2277
wait_msg_processed(smi_info);
2278
2279
if (smi_info->handlers)
2280
smi_info->handlers->cleanup(smi_info->si_sm);
2281
2282
if (smi_info->io.io_cleanup) {
2283
smi_info->io.io_cleanup(&smi_info->io);
2284
smi_info->io.io_cleanup = NULL;
2285
}
2286
2287
kfree(smi_info->si_sm);
2288
smi_info->si_sm = NULL;
2289
2290
smi_info->intf = NULL;
2291
}
2292
2293
/*
2294
* Must be called with smi_infos_lock held, to serialize the
2295
* smi_info->intf check.
2296
*/
2297
static void cleanup_one_si(struct smi_info *smi_info)
2298
{
2299
if (!smi_info)
2300
return;
2301
2302
list_del(&smi_info->link);
2303
ipmi_unregister_smi(smi_info->intf);
2304
kfree(smi_info);
2305
}
2306
2307
void ipmi_si_remove_by_dev(struct device *dev)
2308
{
2309
struct smi_info *e;
2310
2311
mutex_lock(&smi_infos_lock);
2312
list_for_each_entry(e, &smi_infos, link) {
2313
if (e->io.dev == dev) {
2314
cleanup_one_si(e);
2315
break;
2316
}
2317
}
2318
mutex_unlock(&smi_infos_lock);
2319
}
2320
2321
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2322
unsigned long addr)
2323
{
2324
/* remove */
2325
struct smi_info *e, *tmp_e;
2326
struct device *dev = NULL;
2327
2328
mutex_lock(&smi_infos_lock);
2329
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2330
if (e->io.addr_space != addr_space)
2331
continue;
2332
if (e->io.si_info->type != si_type)
2333
continue;
2334
if (e->io.addr_data == addr) {
2335
dev = get_device(e->io.dev);
2336
cleanup_one_si(e);
2337
}
2338
}
2339
mutex_unlock(&smi_infos_lock);
2340
2341
return dev;
2342
}
2343
2344
static void cleanup_ipmi_si(void)
2345
{
2346
struct smi_info *e, *tmp_e;
2347
2348
if (!initialized)
2349
return;
2350
2351
ipmi_si_pci_shutdown();
2352
2353
ipmi_si_ls2k_shutdown();
2354
2355
ipmi_si_parisc_shutdown();
2356
2357
ipmi_si_platform_shutdown();
2358
2359
mutex_lock(&smi_infos_lock);
2360
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2361
cleanup_one_si(e);
2362
mutex_unlock(&smi_infos_lock);
2363
2364
ipmi_si_hardcode_exit();
2365
ipmi_si_hotmod_exit();
2366
}
2367
module_exit(cleanup_ipmi_si);
2368
2369
MODULE_ALIAS("platform:dmi-ipmi-si");
2370
MODULE_LICENSE("GPL");
2371
MODULE_AUTHOR("Corey Minyard <[email protected]>");
2372
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
2373
2374