Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/kernel/irq/spurious.c
10818 views
1
/*
2
* linux/kernel/irq/spurious.c
3
*
4
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5
*
6
* This file contains spurious interrupt handling.
7
*/
8
9
#include <linux/jiffies.h>
10
#include <linux/irq.h>
11
#include <linux/module.h>
12
#include <linux/kallsyms.h>
13
#include <linux/interrupt.h>
14
#include <linux/moduleparam.h>
15
#include <linux/timer.h>
16
17
#include "internals.h"
18
19
static int irqfixup __read_mostly;
20
21
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
22
static void poll_spurious_irqs(unsigned long dummy);
23
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
24
static int irq_poll_cpu;
25
static atomic_t irq_poll_active;
26
27
/*
28
* We wait here for a poller to finish.
29
*
30
* If the poll runs on this CPU, then we yell loudly and return
31
* false. That will leave the interrupt line disabled in the worst
32
* case, but it should never happen.
33
*
34
* We wait until the poller is done and then recheck disabled and
35
* action (about to be disabled). Only if it's still active, we return
36
* true and let the handler run.
37
*/
38
bool irq_wait_for_poll(struct irq_desc *desc)
39
{
40
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
41
"irq poll in progress on cpu %d for irq %d\n",
42
smp_processor_id(), desc->irq_data.irq))
43
return false;
44
45
#ifdef CONFIG_SMP
46
do {
47
raw_spin_unlock(&desc->lock);
48
while (irqd_irq_inprogress(&desc->irq_data))
49
cpu_relax();
50
raw_spin_lock(&desc->lock);
51
} while (irqd_irq_inprogress(&desc->irq_data));
52
/* Might have been disabled in meantime */
53
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54
#else
55
return false;
56
#endif
57
}
58
59
60
/*
61
* Recovery handler for misrouted interrupts.
62
*/
63
static int try_one_irq(int irq, struct irq_desc *desc, bool force)
64
{
65
irqreturn_t ret = IRQ_NONE;
66
struct irqaction *action;
67
68
raw_spin_lock(&desc->lock);
69
70
/* PER_CPU and nested thread interrupts are never polled */
71
if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
72
goto out;
73
74
/*
75
* Do not poll disabled interrupts unless the spurious
76
* disabled poller asks explicitely.
77
*/
78
if (irqd_irq_disabled(&desc->irq_data) && !force)
79
goto out;
80
81
/*
82
* All handlers must agree on IRQF_SHARED, so we test just the
83
* first. Check for action->next as well.
84
*/
85
action = desc->action;
86
if (!action || !(action->flags & IRQF_SHARED) ||
87
(action->flags & __IRQF_TIMER) || !action->next)
88
goto out;
89
90
/* Already running on another processor */
91
if (irqd_irq_inprogress(&desc->irq_data)) {
92
/*
93
* Already running: If it is shared get the other
94
* CPU to go looking for our mystery interrupt too
95
*/
96
desc->istate |= IRQS_PENDING;
97
goto out;
98
}
99
100
/* Mark it poll in progress */
101
desc->istate |= IRQS_POLL_INPROGRESS;
102
do {
103
if (handle_irq_event(desc) == IRQ_HANDLED)
104
ret = IRQ_HANDLED;
105
action = desc->action;
106
} while ((desc->istate & IRQS_PENDING) && action);
107
desc->istate &= ~IRQS_POLL_INPROGRESS;
108
out:
109
raw_spin_unlock(&desc->lock);
110
return ret == IRQ_HANDLED;
111
}
112
113
static int misrouted_irq(int irq)
114
{
115
struct irq_desc *desc;
116
int i, ok = 0;
117
118
if (atomic_inc_return(&irq_poll_active) == 1)
119
goto out;
120
121
irq_poll_cpu = smp_processor_id();
122
123
for_each_irq_desc(i, desc) {
124
if (!i)
125
continue;
126
127
if (i == irq) /* Already tried */
128
continue;
129
130
if (try_one_irq(i, desc, false))
131
ok = 1;
132
}
133
out:
134
atomic_dec(&irq_poll_active);
135
/* So the caller can adjust the irq error counts */
136
return ok;
137
}
138
139
static void poll_spurious_irqs(unsigned long dummy)
140
{
141
struct irq_desc *desc;
142
int i;
143
144
if (atomic_inc_return(&irq_poll_active) != 1)
145
goto out;
146
irq_poll_cpu = smp_processor_id();
147
148
for_each_irq_desc(i, desc) {
149
unsigned int state;
150
151
if (!i)
152
continue;
153
154
/* Racy but it doesn't matter */
155
state = desc->istate;
156
barrier();
157
if (!(state & IRQS_SPURIOUS_DISABLED))
158
continue;
159
160
local_irq_disable();
161
try_one_irq(i, desc, true);
162
local_irq_enable();
163
}
164
out:
165
atomic_dec(&irq_poll_active);
166
mod_timer(&poll_spurious_irq_timer,
167
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
168
}
169
170
static inline int bad_action_ret(irqreturn_t action_ret)
171
{
172
if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173
return 0;
174
return 1;
175
}
176
177
/*
178
* If 99,900 of the previous 100,000 interrupts have not been handled
179
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
180
* and try to turn the IRQ off.
181
*
182
* (The other 100-of-100,000 interrupts may have been a correctly
183
* functioning device sharing an IRQ with the failing one)
184
*/
185
static void
186
__report_bad_irq(unsigned int irq, struct irq_desc *desc,
187
irqreturn_t action_ret)
188
{
189
struct irqaction *action;
190
unsigned long flags;
191
192
if (bad_action_ret(action_ret)) {
193
printk(KERN_ERR "irq event %d: bogus return value %x\n",
194
irq, action_ret);
195
} else {
196
printk(KERN_ERR "irq %d: nobody cared (try booting with "
197
"the \"irqpoll\" option)\n", irq);
198
}
199
dump_stack();
200
printk(KERN_ERR "handlers:\n");
201
202
/*
203
* We need to take desc->lock here. note_interrupt() is called
204
* w/o desc->lock held, but IRQ_PROGRESS set. We might race
205
* with something else removing an action. It's ok to take
206
* desc->lock here. See synchronize_irq().
207
*/
208
raw_spin_lock_irqsave(&desc->lock, flags);
209
action = desc->action;
210
while (action) {
211
printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
212
if (action->thread_fn)
213
printk(KERN_CONT " threaded [<%p>] %pf",
214
action->thread_fn, action->thread_fn);
215
printk(KERN_CONT "\n");
216
action = action->next;
217
}
218
raw_spin_unlock_irqrestore(&desc->lock, flags);
219
}
220
221
static void
222
report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
223
{
224
static int count = 100;
225
226
if (count > 0) {
227
count--;
228
__report_bad_irq(irq, desc, action_ret);
229
}
230
}
231
232
static inline int
233
try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
234
irqreturn_t action_ret)
235
{
236
struct irqaction *action;
237
238
if (!irqfixup)
239
return 0;
240
241
/* We didn't actually handle the IRQ - see if it was misrouted? */
242
if (action_ret == IRQ_NONE)
243
return 1;
244
245
/*
246
* But for 'irqfixup == 2' we also do it for handled interrupts if
247
* they are marked as IRQF_IRQPOLL (or for irq zero, which is the
248
* traditional PC timer interrupt.. Legacy)
249
*/
250
if (irqfixup < 2)
251
return 0;
252
253
if (!irq)
254
return 1;
255
256
/*
257
* Since we don't get the descriptor lock, "action" can
258
* change under us. We don't really care, but we don't
259
* want to follow a NULL pointer. So tell the compiler to
260
* just load it once by using a barrier.
261
*/
262
action = desc->action;
263
barrier();
264
return action && (action->flags & IRQF_IRQPOLL);
265
}
266
267
void note_interrupt(unsigned int irq, struct irq_desc *desc,
268
irqreturn_t action_ret)
269
{
270
if (desc->istate & IRQS_POLL_INPROGRESS)
271
return;
272
273
/* we get here again via the threaded handler */
274
if (action_ret == IRQ_WAKE_THREAD)
275
return;
276
277
if (bad_action_ret(action_ret)) {
278
report_bad_irq(irq, desc, action_ret);
279
return;
280
}
281
282
if (unlikely(action_ret == IRQ_NONE)) {
283
/*
284
* If we are seeing only the odd spurious IRQ caused by
285
* bus asynchronicity then don't eventually trigger an error,
286
* otherwise the counter becomes a doomsday timer for otherwise
287
* working systems
288
*/
289
if (time_after(jiffies, desc->last_unhandled + HZ/10))
290
desc->irqs_unhandled = 1;
291
else
292
desc->irqs_unhandled++;
293
desc->last_unhandled = jiffies;
294
}
295
296
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
297
int ok = misrouted_irq(irq);
298
if (action_ret == IRQ_NONE)
299
desc->irqs_unhandled -= ok;
300
}
301
302
desc->irq_count++;
303
if (likely(desc->irq_count < 100000))
304
return;
305
306
desc->irq_count = 0;
307
if (unlikely(desc->irqs_unhandled > 99900)) {
308
/*
309
* The interrupt is stuck
310
*/
311
__report_bad_irq(irq, desc, action_ret);
312
/*
313
* Now kill the IRQ
314
*/
315
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
316
desc->istate |= IRQS_SPURIOUS_DISABLED;
317
desc->depth++;
318
irq_disable(desc);
319
320
mod_timer(&poll_spurious_irq_timer,
321
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
322
}
323
desc->irqs_unhandled = 0;
324
}
325
326
int noirqdebug __read_mostly;
327
328
int noirqdebug_setup(char *str)
329
{
330
noirqdebug = 1;
331
printk(KERN_INFO "IRQ lockup detection disabled\n");
332
333
return 1;
334
}
335
336
__setup("noirqdebug", noirqdebug_setup);
337
module_param(noirqdebug, bool, 0644);
338
MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
339
340
static int __init irqfixup_setup(char *str)
341
{
342
irqfixup = 1;
343
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
344
printk(KERN_WARNING "This may impact system performance.\n");
345
346
return 1;
347
}
348
349
__setup("irqfixup", irqfixup_setup);
350
module_param(irqfixup, int, 0644);
351
352
static int __init irqpoll_setup(char *str)
353
{
354
irqfixup = 2;
355
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
356
"enabled\n");
357
printk(KERN_WARNING "This may significantly impact system "
358
"performance\n");
359
return 1;
360
}
361
362
__setup("irqpoll", irqpoll_setup);
363
364