Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_intr.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 1997, Stefan Esser <[email protected]>
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice unmodified, this list of conditions, and the following
12
* disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/cdefs.h>
30
#include "opt_ddb.h"
31
#include "opt_hwpmc_hooks.h"
32
#include "opt_kstack_usage_prof.h"
33
34
#include <sys/param.h>
35
#include <sys/bus.h>
36
#include <sys/conf.h>
37
#include <sys/cpuset.h>
38
#include <sys/rtprio.h>
39
#include <sys/systm.h>
40
#include <sys/interrupt.h>
41
#include <sys/kernel.h>
42
#include <sys/kthread.h>
43
#include <sys/ktr.h>
44
#include <sys/limits.h>
45
#include <sys/lock.h>
46
#include <sys/malloc.h>
47
#include <sys/mutex.h>
48
#include <sys/priv.h>
49
#include <sys/proc.h>
50
#include <sys/epoch.h>
51
#include <sys/random.h>
52
#include <sys/resourcevar.h>
53
#include <sys/sched.h>
54
#include <sys/smp.h>
55
#include <sys/stdarg.h>
56
#include <sys/sysctl.h>
57
#include <sys/syslog.h>
58
#include <sys/unistd.h>
59
#include <sys/vmmeter.h>
60
#include <machine/atomic.h>
61
#include <machine/cpu.h>
62
#include <machine/md_var.h>
63
#include <machine/smp.h>
64
#ifdef DDB
65
#include <ddb/ddb.h>
66
#include <ddb/db_sym.h>
67
#endif
68
69
/*
70
* Describe an interrupt thread. There is one of these per interrupt event.
71
*/
72
struct intr_thread {
73
struct intr_event *it_event;
74
struct thread *it_thread; /* Kernel thread. */
75
int it_flags; /* (j) IT_* flags. */
76
int it_need; /* Needs service. */
77
int it_waiting; /* Waiting in the runq. */
78
};
79
80
/* Interrupt thread flags kept in it_flags */
81
#define IT_DEAD 0x000001 /* Thread is waiting to exit. */
82
#define IT_WAIT 0x000002 /* Thread is waiting for completion. */
83
84
struct intr_entropy {
85
struct thread *td;
86
uintptr_t event;
87
};
88
89
struct intr_event *clk_intr_event;
90
struct proc *intrproc;
91
92
static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
93
94
static int intr_storm_threshold = 0;
95
SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
96
&intr_storm_threshold, 0,
97
"Number of consecutive interrupts before storm protection is enabled");
98
static int intr_epoch_batch = 1000;
99
SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
100
0, "Maximum interrupt handler executions without re-entering epoch(9)");
101
#ifdef HWPMC_HOOKS
102
static int intr_hwpmc_waiting_report_threshold = 1;
103
SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
104
&intr_hwpmc_waiting_report_threshold, 1,
105
"Threshold for reporting number of events in a workq");
106
#define PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL)
107
#endif
108
static TAILQ_HEAD(, intr_event) event_list =
109
TAILQ_HEAD_INITIALIZER(event_list);
110
static struct mtx event_lock;
111
MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
112
113
static void intr_event_update(struct intr_event *ie);
114
static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame);
115
static struct intr_thread *ithread_create(const char *name);
116
static void ithread_destroy(struct intr_thread *ithread);
117
static void ithread_execute_handlers(struct proc *p,
118
struct intr_event *ie);
119
static void ithread_loop(void *);
120
static void ithread_update(struct intr_thread *ithd);
121
static void start_softintr(void *);
122
123
#ifdef HWPMC_HOOKS
124
#include <sys/pmckern.h>
125
PMC_SOFT_DEFINE( , , intr, all);
126
PMC_SOFT_DEFINE( , , intr, ithread);
127
PMC_SOFT_DEFINE( , , intr, filter);
128
PMC_SOFT_DEFINE( , , intr, stray);
129
PMC_SOFT_DEFINE( , , intr, schedule);
130
PMC_SOFT_DEFINE( , , intr, waiting);
131
132
#define PMC_SOFT_CALL_INTR_HLPR(event, frame) \
133
do { \
134
if (frame != NULL) \
135
PMC_SOFT_CALL_TF( , , intr, event, frame); \
136
else \
137
PMC_SOFT_CALL( , , intr, event); \
138
} while (0)
139
#endif
140
141
/* Map an interrupt type to an ithread priority. */
142
u_char
143
intr_priority(enum intr_type flags)
144
{
145
u_char pri;
146
147
flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
148
INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
149
switch (flags) {
150
case INTR_TYPE_TTY:
151
pri = PI_TTY;
152
break;
153
case INTR_TYPE_BIO:
154
pri = PI_DISK;
155
break;
156
case INTR_TYPE_NET:
157
pri = PI_NET;
158
break;
159
case INTR_TYPE_CAM:
160
pri = PI_DISK;
161
break;
162
case INTR_TYPE_AV:
163
pri = PI_AV;
164
break;
165
case INTR_TYPE_CLK:
166
pri = PI_REALTIME;
167
break;
168
case INTR_TYPE_MISC:
169
pri = PI_DULL; /* don't care */
170
break;
171
default:
172
/* We didn't specify an interrupt level. */
173
panic("intr_priority: no interrupt type in flags");
174
}
175
176
return pri;
177
}
178
179
/*
180
* Update an ithread based on the associated intr_event.
181
*/
182
static void
183
ithread_update(struct intr_thread *ithd)
184
{
185
struct intr_event *ie;
186
struct thread *td;
187
u_char pri;
188
189
ie = ithd->it_event;
190
td = ithd->it_thread;
191
mtx_assert(&ie->ie_lock, MA_OWNED);
192
193
/* Determine the overall priority of this event. */
194
if (CK_SLIST_EMPTY(&ie->ie_handlers))
195
pri = PRI_MAX_ITHD;
196
else
197
pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
198
199
/* Update name and priority. */
200
strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
201
#ifdef KTR
202
sched_clear_tdname(td);
203
#endif
204
thread_lock(td);
205
sched_ithread_prio(td, pri);
206
thread_unlock(td);
207
}
208
209
/*
210
* Regenerate the full name of an interrupt event and update its priority.
211
*/
212
static void
213
intr_event_update(struct intr_event *ie)
214
{
215
struct intr_handler *ih;
216
char *last;
217
int missed, space, flags;
218
219
/* Start off with no entropy and just the name of the event. */
220
mtx_assert(&ie->ie_lock, MA_OWNED);
221
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
222
flags = 0;
223
missed = 0;
224
space = 1;
225
226
/* Run through all the handlers updating values. */
227
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
228
if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
229
sizeof(ie->ie_fullname)) {
230
strcat(ie->ie_fullname, " ");
231
strcat(ie->ie_fullname, ih->ih_name);
232
space = 0;
233
} else
234
missed++;
235
flags |= ih->ih_flags;
236
}
237
ie->ie_hflags = flags;
238
239
/*
240
* If there is only one handler and its name is too long, just copy in
241
* as much of the end of the name (includes the unit number) as will
242
* fit. Otherwise, we have multiple handlers and not all of the names
243
* will fit. Add +'s to indicate missing names. If we run out of room
244
* and still have +'s to add, change the last character from a + to a *.
245
*/
246
if (missed == 1 && space == 1) {
247
ih = CK_SLIST_FIRST(&ie->ie_handlers);
248
missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
249
sizeof(ie->ie_fullname);
250
strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
251
strcat(ie->ie_fullname, &ih->ih_name[missed]);
252
missed = 0;
253
}
254
last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
255
while (missed-- > 0) {
256
if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
257
if (*last == '+') {
258
*last = '*';
259
break;
260
} else
261
*last = '+';
262
} else if (space) {
263
strcat(ie->ie_fullname, " +");
264
space = 0;
265
} else
266
strcat(ie->ie_fullname, "+");
267
}
268
269
/*
270
* If this event has an ithread, update it's priority and
271
* name.
272
*/
273
if (ie->ie_thread != NULL)
274
ithread_update(ie->ie_thread);
275
CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
276
}
277
278
int
279
intr_event_create(struct intr_event **event, void *source, int flags, u_int irq,
280
void (*pre_ithread)(void *), void (*post_ithread)(void *),
281
void (*post_filter)(void *), int (*assign_cpu)(void *, int),
282
const char *fmt, ...)
283
{
284
struct intr_event *ie;
285
va_list ap;
286
287
/* The only valid flag during creation is IE_SOFT. */
288
if ((flags & ~IE_SOFT) != 0)
289
return (EINVAL);
290
ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
291
ie->ie_source = source;
292
ie->ie_pre_ithread = pre_ithread;
293
ie->ie_post_ithread = post_ithread;
294
ie->ie_post_filter = post_filter;
295
ie->ie_assign_cpu = assign_cpu;
296
ie->ie_flags = flags;
297
ie->ie_irq = irq;
298
ie->ie_cpu = NOCPU;
299
CK_SLIST_INIT(&ie->ie_handlers);
300
mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
301
302
va_start(ap, fmt);
303
vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
304
va_end(ap);
305
strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
306
mtx_lock(&event_lock);
307
TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
308
mtx_unlock(&event_lock);
309
if (event != NULL)
310
*event = ie;
311
CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
312
return (0);
313
}
314
315
/*
316
* Bind an interrupt event to the specified CPU. Note that not all
317
* platforms support binding an interrupt to a CPU. For those
318
* platforms this request will fail. Using a cpu id of NOCPU unbinds
319
* the interrupt event.
320
*/
321
static int
322
_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
323
{
324
lwpid_t id;
325
int error;
326
327
/* Need a CPU to bind to. */
328
if (cpu != NOCPU && CPU_ABSENT(cpu))
329
return (EINVAL);
330
331
if (ie->ie_assign_cpu == NULL)
332
return (EOPNOTSUPP);
333
334
error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
335
if (error)
336
return (error);
337
338
/*
339
* If we have any ithreads try to set their mask first to verify
340
* permissions, etc.
341
*/
342
if (bindithread) {
343
mtx_lock(&ie->ie_lock);
344
if (ie->ie_thread != NULL) {
345
id = ie->ie_thread->it_thread->td_tid;
346
mtx_unlock(&ie->ie_lock);
347
error = cpuset_setithread(id, cpu);
348
if (error)
349
return (error);
350
} else
351
mtx_unlock(&ie->ie_lock);
352
}
353
if (bindirq)
354
error = ie->ie_assign_cpu(ie->ie_source, cpu);
355
if (error) {
356
if (bindithread) {
357
mtx_lock(&ie->ie_lock);
358
if (ie->ie_thread != NULL) {
359
cpu = ie->ie_cpu;
360
id = ie->ie_thread->it_thread->td_tid;
361
mtx_unlock(&ie->ie_lock);
362
(void)cpuset_setithread(id, cpu);
363
} else
364
mtx_unlock(&ie->ie_lock);
365
}
366
return (error);
367
}
368
369
if (bindirq) {
370
mtx_lock(&ie->ie_lock);
371
ie->ie_cpu = cpu;
372
mtx_unlock(&ie->ie_lock);
373
}
374
375
return (error);
376
}
377
378
/*
379
* Bind an interrupt event to the specified CPU. For supported platforms, any
380
* associated ithreads as well as the primary interrupt context will be bound
381
* to the specificed CPU.
382
*/
383
int
384
intr_event_bind(struct intr_event *ie, int cpu)
385
{
386
387
return (_intr_event_bind(ie, cpu, true, true));
388
}
389
390
/*
391
* Bind an interrupt event to the specified CPU, but do not bind associated
392
* ithreads.
393
*/
394
int
395
intr_event_bind_irqonly(struct intr_event *ie, int cpu)
396
{
397
398
return (_intr_event_bind(ie, cpu, true, false));
399
}
400
401
/*
402
* Bind an interrupt event's ithread to the specified CPU.
403
*/
404
int
405
intr_event_bind_ithread(struct intr_event *ie, int cpu)
406
{
407
408
return (_intr_event_bind(ie, cpu, false, true));
409
}
410
411
/*
412
* Bind an interrupt event's ithread to the specified cpuset.
413
*/
414
int
415
intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
416
{
417
lwpid_t id;
418
419
mtx_lock(&ie->ie_lock);
420
if (ie->ie_thread != NULL) {
421
id = ie->ie_thread->it_thread->td_tid;
422
mtx_unlock(&ie->ie_lock);
423
return (cpuset_setthread(id, cs));
424
} else {
425
mtx_unlock(&ie->ie_lock);
426
}
427
return (ENODEV);
428
}
429
430
static struct intr_event *
431
intr_lookup(int irq)
432
{
433
struct intr_event *ie;
434
435
mtx_lock(&event_lock);
436
TAILQ_FOREACH(ie, &event_list, ie_list)
437
if (ie->ie_irq == irq &&
438
(ie->ie_flags & IE_SOFT) == 0 &&
439
CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
440
break;
441
mtx_unlock(&event_lock);
442
return (ie);
443
}
444
445
int
446
intr_setaffinity(int irq, int mode, const void *m)
447
{
448
struct intr_event *ie;
449
const cpuset_t *mask;
450
int cpu, n;
451
452
mask = m;
453
cpu = NOCPU;
454
/*
455
* If we're setting all cpus we can unbind. Otherwise make sure
456
* only one cpu is in the set.
457
*/
458
if (CPU_CMP(cpuset_root, mask)) {
459
for (n = 0; n < CPU_SETSIZE; n++) {
460
if (!CPU_ISSET(n, mask))
461
continue;
462
if (cpu != NOCPU)
463
return (EINVAL);
464
cpu = n;
465
}
466
}
467
ie = intr_lookup(irq);
468
if (ie == NULL)
469
return (ESRCH);
470
switch (mode) {
471
case CPU_WHICH_IRQ:
472
return (intr_event_bind(ie, cpu));
473
case CPU_WHICH_INTRHANDLER:
474
return (intr_event_bind_irqonly(ie, cpu));
475
case CPU_WHICH_ITHREAD:
476
return (intr_event_bind_ithread(ie, cpu));
477
default:
478
return (EINVAL);
479
}
480
}
481
482
int
483
intr_getaffinity(int irq, int mode, void *m)
484
{
485
struct intr_event *ie;
486
struct thread *td;
487
struct proc *p;
488
cpuset_t *mask;
489
lwpid_t id;
490
int error;
491
492
mask = m;
493
ie = intr_lookup(irq);
494
if (ie == NULL)
495
return (ESRCH);
496
497
error = 0;
498
CPU_ZERO(mask);
499
switch (mode) {
500
case CPU_WHICH_IRQ:
501
case CPU_WHICH_INTRHANDLER:
502
mtx_lock(&ie->ie_lock);
503
if (ie->ie_cpu == NOCPU)
504
CPU_COPY(cpuset_root, mask);
505
else
506
CPU_SET(ie->ie_cpu, mask);
507
mtx_unlock(&ie->ie_lock);
508
break;
509
case CPU_WHICH_ITHREAD:
510
mtx_lock(&ie->ie_lock);
511
if (ie->ie_thread == NULL) {
512
mtx_unlock(&ie->ie_lock);
513
CPU_COPY(cpuset_root, mask);
514
} else {
515
id = ie->ie_thread->it_thread->td_tid;
516
mtx_unlock(&ie->ie_lock);
517
error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
518
if (error != 0)
519
return (error);
520
CPU_COPY(&td->td_cpuset->cs_mask, mask);
521
PROC_UNLOCK(p);
522
}
523
default:
524
return (EINVAL);
525
}
526
return (0);
527
}
528
529
int
530
intr_event_destroy(struct intr_event *ie)
531
{
532
533
if (ie == NULL)
534
return (EINVAL);
535
536
mtx_lock(&event_lock);
537
mtx_lock(&ie->ie_lock);
538
if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
539
mtx_unlock(&ie->ie_lock);
540
mtx_unlock(&event_lock);
541
return (EBUSY);
542
}
543
TAILQ_REMOVE(&event_list, ie, ie_list);
544
mtx_unlock(&event_lock);
545
if (ie->ie_thread != NULL)
546
ithread_destroy(ie->ie_thread);
547
mtx_unlock(&ie->ie_lock);
548
mtx_destroy(&ie->ie_lock);
549
free(ie, M_ITHREAD);
550
return (0);
551
}
552
553
static struct intr_thread *
554
ithread_create(const char *name)
555
{
556
struct intr_thread *ithd;
557
struct thread *td;
558
int error;
559
560
ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
561
562
error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
563
&td, RFSTOPPED | RFHIGHPID,
564
0, "intr", "%s", name);
565
if (error)
566
panic("kproc_create() failed with %d", error);
567
thread_lock(td);
568
sched_class(td, PRI_ITHD);
569
TD_SET_IWAIT(td);
570
thread_unlock(td);
571
td->td_pflags |= TDP_ITHREAD;
572
ithd->it_thread = td;
573
CTR2(KTR_INTR, "%s: created %s", __func__, name);
574
return (ithd);
575
}
576
577
static void
578
ithread_destroy(struct intr_thread *ithread)
579
{
580
struct intr_event *ie;
581
struct thread *td;
582
583
td = ithread->it_thread;
584
ie = ithread->it_event;
585
586
mtx_assert(&ie->ie_lock, MA_OWNED);
587
588
CTR2(KTR_INTR, "%s: killing %s", __func__, ie->ie_name);
589
590
thread_lock(td);
591
ithread->it_flags |= IT_DEAD;
592
if (TD_AWAITING_INTR(td)) {
593
TD_CLR_IWAIT(td);
594
sched_wakeup(td, SRQ_INTR);
595
} else
596
thread_unlock(td);
597
while (ie->ie_thread != NULL)
598
msleep(ithread, &ie->ie_lock, 0, "ithd_dth", 0);
599
}
600
601
int
602
intr_event_add_handler(struct intr_event *ie, const char *name,
603
driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
604
enum intr_type flags, void **cookiep)
605
{
606
struct intr_handler *ih, *temp_ih;
607
struct intr_handler **prevptr;
608
struct intr_thread *it;
609
610
if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
611
return (EINVAL);
612
613
if ((flags & INTR_SLEEPABLE) != 0 && (flags & INTR_EXCL) == 0) {
614
printf("%s: INTR_SLEEPABLE requires INTR_EXCL to be set\n",
615
__func__);
616
return (EINVAL);
617
}
618
619
/* Allocate and populate an interrupt handler structure. */
620
ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
621
ih->ih_filter = filter;
622
ih->ih_handler = handler;
623
ih->ih_argument = arg;
624
strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
625
ih->ih_event = ie;
626
ih->ih_pri = pri;
627
if (flags & INTR_EXCL)
628
ih->ih_flags = IH_EXCLUSIVE;
629
if (flags & INTR_MPSAFE)
630
ih->ih_flags |= IH_MPSAFE;
631
if (flags & INTR_ENTROPY)
632
ih->ih_flags |= IH_ENTROPY;
633
if (flags & INTR_TYPE_NET)
634
ih->ih_flags |= IH_NET;
635
636
/* We can only have one exclusive or sleepable handler in a event. */
637
mtx_lock(&ie->ie_lock);
638
if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
639
if ((flags & (INTR_EXCL | INTR_SLEEPABLE)) ||
640
(CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
641
mtx_unlock(&ie->ie_lock);
642
free(ih, M_ITHREAD);
643
return (EINVAL);
644
}
645
}
646
if (flags & INTR_SLEEPABLE)
647
ie->ie_flags |= IE_SLEEPABLE;
648
649
/* Create a thread if we need one. */
650
while (ie->ie_thread == NULL && handler != NULL) {
651
if (ie->ie_flags & IE_ADDING_THREAD)
652
msleep(ie, &ie->ie_lock, 0, "ithread", 0);
653
else {
654
ie->ie_flags |= IE_ADDING_THREAD;
655
mtx_unlock(&ie->ie_lock);
656
it = ithread_create("intr: newborn");
657
mtx_lock(&ie->ie_lock);
658
ie->ie_flags &= ~IE_ADDING_THREAD;
659
ie->ie_thread = it;
660
it->it_event = ie;
661
ithread_update(it);
662
wakeup(ie);
663
}
664
}
665
666
/* Add the new handler to the event in priority order. */
667
CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
668
if (temp_ih->ih_pri > ih->ih_pri)
669
break;
670
}
671
CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
672
673
intr_event_update(ie);
674
675
CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
676
ie->ie_name);
677
mtx_unlock(&ie->ie_lock);
678
679
if (cookiep != NULL)
680
*cookiep = ih;
681
return (0);
682
}
683
684
/*
685
* Append a description preceded by a ':' to the name of the specified
686
* interrupt handler.
687
*/
688
int
689
intr_event_describe_handler(struct intr_event *ie, void *cookie,
690
const char *descr)
691
{
692
struct intr_handler *ih;
693
size_t space;
694
char *start;
695
696
mtx_lock(&ie->ie_lock);
697
#ifdef INVARIANTS
698
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
699
if (ih == cookie)
700
break;
701
}
702
if (ih == NULL) {
703
mtx_unlock(&ie->ie_lock);
704
panic("handler %p not found in interrupt event %p", cookie, ie);
705
}
706
#endif
707
ih = cookie;
708
709
/*
710
* Look for an existing description by checking for an
711
* existing ":". This assumes device names do not include
712
* colons. If one is found, prepare to insert the new
713
* description at that point. If one is not found, find the
714
* end of the name to use as the insertion point.
715
*/
716
start = strchr(ih->ih_name, ':');
717
if (start == NULL)
718
start = strchr(ih->ih_name, 0);
719
720
/*
721
* See if there is enough remaining room in the string for the
722
* description + ":". The "- 1" leaves room for the trailing
723
* '\0'. The "+ 1" accounts for the colon.
724
*/
725
space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
726
if (strlen(descr) + 1 > space) {
727
mtx_unlock(&ie->ie_lock);
728
return (ENOSPC);
729
}
730
731
/* Append a colon followed by the description. */
732
*start = ':';
733
strcpy(start + 1, descr);
734
intr_event_update(ie);
735
mtx_unlock(&ie->ie_lock);
736
return (0);
737
}
738
739
/*
740
* Return the ie_source field from the intr_event an intr_handler is
741
* associated with.
742
*/
743
void *
744
intr_handler_source(void *cookie)
745
{
746
struct intr_handler *ih;
747
struct intr_event *ie;
748
749
ih = (struct intr_handler *)cookie;
750
if (ih == NULL)
751
return (NULL);
752
ie = ih->ih_event;
753
KASSERT(ie != NULL,
754
("interrupt handler \"%s\" has a NULL interrupt event",
755
ih->ih_name));
756
return (ie->ie_source);
757
}
758
759
/*
760
* If intr_event_handle() is running in the ISR context at the time of the call,
761
* then wait for it to complete.
762
*/
763
static void
764
intr_event_barrier(struct intr_event *ie)
765
{
766
int phase;
767
768
mtx_assert(&ie->ie_lock, MA_OWNED);
769
phase = ie->ie_phase;
770
771
/*
772
* Switch phase to direct future interrupts to the other active counter.
773
* Make sure that any preceding stores are visible before the switch.
774
*/
775
KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
776
atomic_store_rel_int(&ie->ie_phase, !phase);
777
778
/*
779
* This code cooperates with wait-free iteration of ie_handlers
780
* in intr_event_handle.
781
* Make sure that the removal and the phase update are not reordered
782
* with the active count check.
783
* Note that no combination of acquire and release fences can provide
784
* that guarantee as Store->Load sequences can always be reordered.
785
*/
786
atomic_thread_fence_seq_cst();
787
788
/*
789
* Now wait on the inactive phase.
790
* The acquire fence is needed so that all post-barrier accesses
791
* are after the check.
792
*/
793
while (ie->ie_active[phase] > 0)
794
cpu_spinwait();
795
atomic_thread_fence_acq();
796
}
797
798
static void
799
intr_handler_barrier(struct intr_handler *handler)
800
{
801
struct intr_event *ie;
802
803
ie = handler->ih_event;
804
mtx_assert(&ie->ie_lock, MA_OWNED);
805
KASSERT((handler->ih_flags & IH_DEAD) == 0,
806
("update for a removed handler"));
807
808
if (ie->ie_thread == NULL) {
809
intr_event_barrier(ie);
810
return;
811
}
812
if ((handler->ih_flags & IH_CHANGED) == 0) {
813
handler->ih_flags |= IH_CHANGED;
814
intr_event_schedule_thread(ie, NULL);
815
}
816
while ((handler->ih_flags & IH_CHANGED) != 0)
817
msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
818
}
819
820
/*
821
* Sleep until an ithread finishes executing an interrupt handler.
822
*
823
* XXX Doesn't currently handle interrupt filters or fast interrupt
824
* handlers. This is intended for LinuxKPI drivers only.
825
* Do not use in BSD code.
826
*/
827
void
828
_intr_drain(int irq)
829
{
830
struct intr_event *ie;
831
struct intr_thread *ithd;
832
struct thread *td;
833
834
ie = intr_lookup(irq);
835
if (ie == NULL)
836
return;
837
if (ie->ie_thread == NULL)
838
return;
839
ithd = ie->ie_thread;
840
td = ithd->it_thread;
841
/*
842
* We set the flag and wait for it to be cleared to avoid
843
* long delays with potentially busy interrupt handlers
844
* were we to only sample TD_AWAITING_INTR() every tick.
845
*/
846
thread_lock(td);
847
if (!TD_AWAITING_INTR(td)) {
848
ithd->it_flags |= IT_WAIT;
849
while (ithd->it_flags & IT_WAIT) {
850
thread_unlock(td);
851
pause("idrain", 1);
852
thread_lock(td);
853
}
854
}
855
thread_unlock(td);
856
return;
857
}
858
859
int
860
intr_event_remove_handler(void *cookie)
861
{
862
struct intr_handler *handler = (struct intr_handler *)cookie;
863
struct intr_event *ie;
864
struct intr_handler *ih;
865
struct intr_handler **prevptr;
866
867
if (handler == NULL)
868
return (EINVAL);
869
ie = handler->ih_event;
870
KASSERT(ie != NULL,
871
("interrupt handler \"%s\" has a NULL interrupt event",
872
handler->ih_name));
873
874
mtx_lock(&ie->ie_lock);
875
CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
876
ie->ie_name);
877
CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
878
if (ih == handler)
879
break;
880
}
881
if (ih == NULL) {
882
panic("interrupt handler \"%s\" not found in "
883
"interrupt event \"%s\"", handler->ih_name, ie->ie_name);
884
}
885
886
if (ie->ie_thread == NULL) {
887
/*
888
* If there is no ithread, then directly remove the handler.
889
* Note that intr_event_handle() iterates ie_handlers in a
890
* lock-less fashion, so care needs to be taken to keep
891
* ie_handlers consistent and to free the removed handler only
892
* when ie_handlers is quiescent.
893
*/
894
CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
895
intr_event_barrier(ie);
896
} else {
897
/*
898
* Let the interrupt thread do the job. The interrupt source is
899
* disabled when the interrupt thread is running, so it does not
900
* have to worry about interaction with intr_event_handle().
901
*/
902
KASSERT((handler->ih_flags & IH_DEAD) == 0,
903
("duplicate handle remove"));
904
handler->ih_flags |= IH_DEAD;
905
intr_event_schedule_thread(ie, NULL);
906
while (handler->ih_flags & IH_DEAD)
907
msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
908
}
909
intr_event_update(ie);
910
mtx_unlock(&ie->ie_lock);
911
free(handler, M_ITHREAD);
912
return (0);
913
}
914
915
int
916
intr_event_suspend_handler(void *cookie)
917
{
918
struct intr_handler *handler = (struct intr_handler *)cookie;
919
struct intr_event *ie;
920
921
if (handler == NULL)
922
return (EINVAL);
923
ie = handler->ih_event;
924
KASSERT(ie != NULL,
925
("interrupt handler \"%s\" has a NULL interrupt event",
926
handler->ih_name));
927
mtx_lock(&ie->ie_lock);
928
handler->ih_flags |= IH_SUSP;
929
intr_handler_barrier(handler);
930
mtx_unlock(&ie->ie_lock);
931
return (0);
932
}
933
934
int
935
intr_event_resume_handler(void *cookie)
936
{
937
struct intr_handler *handler = (struct intr_handler *)cookie;
938
struct intr_event *ie;
939
940
if (handler == NULL)
941
return (EINVAL);
942
ie = handler->ih_event;
943
KASSERT(ie != NULL,
944
("interrupt handler \"%s\" has a NULL interrupt event",
945
handler->ih_name));
946
947
/*
948
* intr_handler_barrier() acts not only as a barrier,
949
* it also allows to check for any pending interrupts.
950
*/
951
mtx_lock(&ie->ie_lock);
952
handler->ih_flags &= ~IH_SUSP;
953
intr_handler_barrier(handler);
954
mtx_unlock(&ie->ie_lock);
955
return (0);
956
}
957
958
static int
959
intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
960
{
961
struct intr_entropy entropy;
962
struct intr_thread *it;
963
struct thread *td;
964
struct thread *ctd;
965
966
/*
967
* If no ithread or no handlers, then we have a stray interrupt.
968
*/
969
if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
970
ie->ie_thread == NULL)
971
return (EINVAL);
972
973
ctd = curthread;
974
it = ie->ie_thread;
975
td = it->it_thread;
976
977
/*
978
* If any of the handlers for this ithread claim to be good
979
* sources of entropy, then gather some.
980
*/
981
if (ie->ie_hflags & IH_ENTROPY) {
982
entropy.event = (uintptr_t)ie;
983
entropy.td = ctd;
984
random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
985
}
986
987
KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
988
989
/*
990
* Set it_need to tell the thread to keep running if it is already
991
* running. Then, lock the thread and see if we actually need to
992
* put it on the runqueue.
993
*
994
* Use store_rel to arrange that the store to ih_need in
995
* swi_sched() is before the store to it_need and prepare for
996
* transfer of this order to loads in the ithread.
997
*/
998
atomic_store_rel_int(&it->it_need, 1);
999
thread_lock(td);
1000
if (TD_AWAITING_INTR(td)) {
1001
#ifdef HWPMC_HOOKS
1002
it->it_waiting = 0;
1003
if (PMC_HOOK_INSTALLED_ANY())
1004
PMC_SOFT_CALL_INTR_HLPR(schedule, frame);
1005
#endif
1006
CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1007
td->td_name);
1008
TD_CLR_IWAIT(td);
1009
sched_wakeup(td, SRQ_INTR);
1010
} else {
1011
#ifdef HWPMC_HOOKS
1012
it->it_waiting++;
1013
if (PMC_HOOK_INSTALLED_ANY() &&
1014
(it->it_waiting >= intr_hwpmc_waiting_report_threshold))
1015
PMC_SOFT_CALL_INTR_HLPR(waiting, frame);
1016
#endif
1017
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1018
__func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
1019
thread_unlock(td);
1020
}
1021
1022
return (0);
1023
}
1024
1025
/*
1026
* Allow interrupt event binding for software interrupt handlers -- a no-op,
1027
* since interrupts are generated in software rather than being directed by
1028
* a PIC.
1029
*/
1030
static int
1031
swi_assign_cpu(void *arg, int cpu)
1032
{
1033
1034
return (0);
1035
}
1036
1037
/*
1038
* Add a software interrupt handler to a specified event. If a given event
1039
* is not specified, then a new event is created.
1040
*/
1041
int
1042
swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1043
void *arg, int pri, enum intr_type flags, void **cookiep)
1044
{
1045
struct intr_event *ie;
1046
int error = 0;
1047
1048
if (flags & INTR_ENTROPY)
1049
return (EINVAL);
1050
1051
ie = (eventp != NULL) ? *eventp : NULL;
1052
1053
if (ie != NULL) {
1054
if (!(ie->ie_flags & IE_SOFT))
1055
return (EINVAL);
1056
} else {
1057
error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1058
NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1059
if (error)
1060
return (error);
1061
if (eventp != NULL)
1062
*eventp = ie;
1063
}
1064
if (handler != NULL) {
1065
error = intr_event_add_handler(ie, name, NULL, handler, arg,
1066
PI_SWI(pri), flags, cookiep);
1067
}
1068
return (error);
1069
}
1070
1071
/*
1072
* Schedule a software interrupt thread.
1073
*/
1074
void
1075
swi_sched(void *cookie, int flags)
1076
{
1077
struct intr_handler *ih = (struct intr_handler *)cookie;
1078
struct intr_event *ie = ih->ih_event;
1079
struct intr_entropy entropy;
1080
int error __unused;
1081
1082
CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1083
ih->ih_need);
1084
1085
if ((flags & SWI_FROMNMI) == 0) {
1086
entropy.event = (uintptr_t)ih;
1087
entropy.td = curthread;
1088
random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1089
}
1090
1091
/*
1092
* Set ih_need for this handler so that if the ithread is already
1093
* running it will execute this handler on the next pass. Otherwise,
1094
* it will execute it the next time it runs.
1095
*/
1096
ih->ih_need = 1;
1097
1098
if (flags & SWI_DELAY)
1099
return;
1100
1101
if (flags & SWI_FROMNMI) {
1102
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1103
KASSERT(ie == clk_intr_event,
1104
("SWI_FROMNMI used not with clk_intr_event"));
1105
ipi_self_from_nmi(IPI_SWI);
1106
#endif
1107
} else {
1108
VM_CNT_INC(v_soft);
1109
error = intr_event_schedule_thread(ie, NULL);
1110
KASSERT(error == 0, ("stray software interrupt"));
1111
}
1112
}
1113
1114
/*
1115
* Remove a software interrupt handler. Currently this code does not
1116
* remove the associated interrupt event if it becomes empty. Calling code
1117
* may do so manually via intr_event_destroy(), but that's not really
1118
* an optimal interface.
1119
*/
1120
int
1121
swi_remove(void *cookie)
1122
{
1123
1124
return (intr_event_remove_handler(cookie));
1125
}
1126
1127
static void
1128
intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1129
{
1130
struct intr_handler *ih, *ihn, *ihp;
1131
1132
ihp = NULL;
1133
CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1134
/*
1135
* If this handler is marked for death, remove it from
1136
* the list of handlers and wake up the sleeper.
1137
*/
1138
if (ih->ih_flags & IH_DEAD) {
1139
mtx_lock(&ie->ie_lock);
1140
if (ihp == NULL)
1141
CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1142
else
1143
CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1144
ih->ih_flags &= ~IH_DEAD;
1145
wakeup(ih);
1146
mtx_unlock(&ie->ie_lock);
1147
continue;
1148
}
1149
1150
/*
1151
* Now that we know that the current element won't be removed
1152
* update the previous element.
1153
*/
1154
ihp = ih;
1155
1156
if ((ih->ih_flags & IH_CHANGED) != 0) {
1157
mtx_lock(&ie->ie_lock);
1158
ih->ih_flags &= ~IH_CHANGED;
1159
wakeup(ih);
1160
mtx_unlock(&ie->ie_lock);
1161
}
1162
1163
/* Skip filter only handlers */
1164
if (ih->ih_handler == NULL)
1165
continue;
1166
1167
/* Skip suspended handlers */
1168
if ((ih->ih_flags & IH_SUSP) != 0)
1169
continue;
1170
1171
/*
1172
* For software interrupt threads, we only execute
1173
* handlers that have their need flag set. Hardware
1174
* interrupt threads always invoke all of their handlers.
1175
*
1176
* ih_need can only be 0 or 1. Failed cmpset below
1177
* means that there is no request to execute handlers,
1178
* so a retry of the cmpset is not needed.
1179
*/
1180
if ((ie->ie_flags & IE_SOFT) != 0 &&
1181
atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1182
continue;
1183
1184
/* Execute this handler. */
1185
CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1186
__func__, p->p_pid, (void *)ih->ih_handler,
1187
ih->ih_argument, ih->ih_name, ih->ih_flags);
1188
1189
if (!(ih->ih_flags & IH_MPSAFE))
1190
mtx_lock(&Giant);
1191
ih->ih_handler(ih->ih_argument);
1192
if (!(ih->ih_flags & IH_MPSAFE))
1193
mtx_unlock(&Giant);
1194
}
1195
}
1196
1197
static void
1198
ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1199
{
1200
1201
/* Only specifically marked sleepable interrupt handlers can sleep. */
1202
if (!(ie->ie_flags & (IE_SOFT | IE_SLEEPABLE)))
1203
THREAD_NO_SLEEPING();
1204
intr_event_execute_handlers(p, ie);
1205
if (!(ie->ie_flags & (IE_SOFT | IE_SLEEPABLE)))
1206
THREAD_SLEEPING_OK();
1207
1208
/*
1209
* Interrupt storm handling:
1210
*
1211
* If this interrupt source is currently storming, then throttle
1212
* it to only fire the handler once per clock tick.
1213
*
1214
* If this interrupt source is not currently storming, but the
1215
* number of back to back interrupts exceeds the storm threshold,
1216
* then enter storming mode.
1217
*/
1218
if (__predict_false(intr_storm_threshold != 0 &&
1219
ie->ie_count >= intr_storm_threshold &&
1220
(ie->ie_flags & IE_SOFT) == 0)) {
1221
/* Report the message only once every second. */
1222
if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1223
printf(
1224
"interrupt storm detected on \"%s\"; throttling interrupt source\n",
1225
ie->ie_name);
1226
}
1227
pause("istorm", 1);
1228
} else
1229
ie->ie_count++;
1230
1231
/*
1232
* Now that all the handlers have had a chance to run, reenable
1233
* the interrupt source.
1234
*/
1235
if (ie->ie_post_ithread != NULL)
1236
ie->ie_post_ithread(ie->ie_source);
1237
}
1238
1239
/*
1240
* This is the main code for interrupt threads.
1241
*/
1242
static void
1243
ithread_loop(void *arg)
1244
{
1245
struct epoch_tracker et;
1246
struct intr_thread *ithd;
1247
struct intr_event *ie;
1248
struct thread *td;
1249
struct proc *p;
1250
int epoch_count;
1251
bool needs_epoch;
1252
1253
td = curthread;
1254
p = td->td_proc;
1255
ithd = (struct intr_thread *)arg;
1256
KASSERT(ithd->it_thread == td,
1257
("%s: ithread and proc linkage out of sync", __func__));
1258
ie = ithd->it_event;
1259
ie->ie_count = 0;
1260
1261
/*
1262
* As long as we have interrupts outstanding, go through the
1263
* list of handlers, giving each one a go at it.
1264
*/
1265
for (;;) {
1266
/*
1267
* If we are an orphaned thread, then just die.
1268
*/
1269
if (__predict_false((ithd->it_flags & IT_DEAD) != 0)) {
1270
CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1271
p->p_pid, td->td_name);
1272
mtx_lock(&ie->ie_lock);
1273
ie->ie_thread = NULL;
1274
wakeup(ithd);
1275
mtx_unlock(&ie->ie_lock);
1276
1277
free(ithd, M_ITHREAD);
1278
kthread_exit();
1279
}
1280
1281
/*
1282
* Service interrupts. If another interrupt arrives while
1283
* we are running, it will set it_need to note that we
1284
* should make another pass.
1285
*
1286
* The load_acq part of the following cmpset ensures
1287
* that the load of ih_need in ithread_execute_handlers()
1288
* is ordered after the load of it_need here.
1289
*/
1290
needs_epoch =
1291
(atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1292
if (needs_epoch) {
1293
epoch_count = 0;
1294
NET_EPOCH_ENTER(et);
1295
}
1296
while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1297
ithread_execute_handlers(p, ie);
1298
if (needs_epoch &&
1299
++epoch_count >= intr_epoch_batch) {
1300
NET_EPOCH_EXIT(et);
1301
epoch_count = 0;
1302
NET_EPOCH_ENTER(et);
1303
}
1304
}
1305
if (needs_epoch)
1306
NET_EPOCH_EXIT(et);
1307
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1308
mtx_assert(&Giant, MA_NOTOWNED);
1309
1310
/*
1311
* Processed all our interrupts. Now get the sched
1312
* lock. This may take a while and it_need may get
1313
* set again, so we have to check it again.
1314
*/
1315
thread_lock(td);
1316
if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1317
(ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1318
TD_SET_IWAIT(td);
1319
ie->ie_count = 0;
1320
mi_switch(SW_VOL | SWT_IWAIT);
1321
} else if ((ithd->it_flags & IT_WAIT) != 0) {
1322
ithd->it_flags &= ~IT_WAIT;
1323
thread_unlock(td);
1324
wakeup(ithd);
1325
} else
1326
thread_unlock(td);
1327
}
1328
}
1329
1330
/*
1331
* Main interrupt handling body.
1332
*
1333
* Input:
1334
* o ie: the event connected to this interrupt.
1335
--------------------------------------------------------------------------------
1336
* o frame: the current trap frame. If the client interrupt
1337
* handler needs this frame, they should get it
1338
* via curthread->td_intr_frame.
1339
*
1340
* Return value:
1341
* o 0: everything ok.
1342
* o EINVAL: stray interrupt.
1343
*/
1344
int
1345
intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1346
{
1347
struct intr_handler *ih;
1348
struct trapframe *oldframe;
1349
struct thread *td;
1350
int phase;
1351
int ret;
1352
bool filter, thread;
1353
1354
td = curthread;
1355
1356
#ifdef KSTACK_USAGE_PROF
1357
intr_prof_stack_use(td, frame);
1358
#endif
1359
1360
/* An interrupt with no event or handlers is a stray interrupt. */
1361
if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1362
return (EINVAL);
1363
1364
/*
1365
* Execute fast interrupt handlers directly.
1366
*/
1367
td->td_intr_nesting_level++;
1368
filter = false;
1369
thread = false;
1370
ret = 0;
1371
critical_enter();
1372
oldframe = td->td_intr_frame;
1373
td->td_intr_frame = frame;
1374
1375
phase = ie->ie_phase;
1376
atomic_add_int(&ie->ie_active[phase], 1);
1377
1378
/*
1379
* This fence is required to ensure that no later loads are
1380
* re-ordered before the ie_active store.
1381
*/
1382
atomic_thread_fence_seq_cst();
1383
1384
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1385
if ((ih->ih_flags & IH_SUSP) != 0)
1386
continue;
1387
if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1388
continue;
1389
if (ih->ih_filter == NULL) {
1390
thread = true;
1391
continue;
1392
}
1393
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1394
ih->ih_filter, ih->ih_argument, ih->ih_name);
1395
ret = ih->ih_filter(ih->ih_argument);
1396
#ifdef HWPMC_HOOKS
1397
PMC_SOFT_CALL_TF( , , intr, all, frame);
1398
#endif
1399
KASSERT(ret == FILTER_STRAY ||
1400
((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1401
(ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1402
("%s: incorrect return value %#x from %s", __func__, ret,
1403
ih->ih_name));
1404
filter = filter || ret == FILTER_HANDLED;
1405
#ifdef HWPMC_HOOKS
1406
if (ret & FILTER_SCHEDULE_THREAD)
1407
PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1408
else if (ret & FILTER_HANDLED)
1409
PMC_SOFT_CALL_TF( , , intr, filter, frame);
1410
else if (ret == FILTER_STRAY)
1411
PMC_SOFT_CALL_TF( , , intr, stray, frame);
1412
#endif
1413
1414
/*
1415
* Wrapper handler special handling:
1416
*
1417
* in some particular cases (like pccard and pccbb),
1418
* the _real_ device handler is wrapped in a couple of
1419
* functions - a filter wrapper and an ithread wrapper.
1420
* In this case (and just in this case), the filter wrapper
1421
* could ask the system to schedule the ithread and mask
1422
* the interrupt source if the wrapped handler is composed
1423
* of just an ithread handler.
1424
*
1425
* TODO: write a generic wrapper to avoid people rolling
1426
* their own.
1427
*/
1428
if (!thread) {
1429
if (ret == FILTER_SCHEDULE_THREAD)
1430
thread = true;
1431
}
1432
}
1433
atomic_add_rel_int(&ie->ie_active[phase], -1);
1434
1435
td->td_intr_frame = oldframe;
1436
1437
if (thread) {
1438
if (ie->ie_pre_ithread != NULL)
1439
ie->ie_pre_ithread(ie->ie_source);
1440
} else {
1441
if (ie->ie_post_filter != NULL)
1442
ie->ie_post_filter(ie->ie_source);
1443
}
1444
1445
/* Schedule the ithread if needed. */
1446
if (thread) {
1447
int error __unused;
1448
1449
error = intr_event_schedule_thread(ie, frame);
1450
KASSERT(error == 0, ("bad stray interrupt"));
1451
}
1452
critical_exit();
1453
td->td_intr_nesting_level--;
1454
#ifdef notyet
1455
/* The interrupt is not aknowledged by any filter and has no ithread. */
1456
if (!thread && !filter)
1457
return (EINVAL);
1458
#endif
1459
return (0);
1460
}
1461
1462
#ifdef DDB
1463
/*
1464
* Dump details about an interrupt handler
1465
*/
1466
static void
1467
db_dump_intrhand(struct intr_handler *ih)
1468
{
1469
int comma;
1470
1471
db_printf("\t%-10s ", ih->ih_name);
1472
switch (ih->ih_pri) {
1473
case PI_REALTIME:
1474
db_printf("CLK ");
1475
break;
1476
case PI_INTR:
1477
db_printf("INTR");
1478
break;
1479
default:
1480
if (ih->ih_pri >= PI_SOFT)
1481
db_printf("SWI ");
1482
else
1483
db_printf("%4u", ih->ih_pri);
1484
break;
1485
}
1486
db_printf(" ");
1487
if (ih->ih_filter != NULL) {
1488
db_printf("[F]");
1489
db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1490
}
1491
if (ih->ih_handler != NULL) {
1492
if (ih->ih_filter != NULL)
1493
db_printf(",");
1494
db_printf("[H]");
1495
db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1496
}
1497
db_printf("(%p)", ih->ih_argument);
1498
if (ih->ih_need ||
1499
(ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1500
IH_MPSAFE)) != 0) {
1501
db_printf(" {");
1502
comma = 0;
1503
if (ih->ih_flags & IH_EXCLUSIVE) {
1504
if (comma)
1505
db_printf(", ");
1506
db_printf("EXCL");
1507
comma = 1;
1508
}
1509
if (ih->ih_flags & IH_ENTROPY) {
1510
if (comma)
1511
db_printf(", ");
1512
db_printf("ENTROPY");
1513
comma = 1;
1514
}
1515
if (ih->ih_flags & IH_DEAD) {
1516
if (comma)
1517
db_printf(", ");
1518
db_printf("DEAD");
1519
comma = 1;
1520
}
1521
if (ih->ih_flags & IH_MPSAFE) {
1522
if (comma)
1523
db_printf(", ");
1524
db_printf("MPSAFE");
1525
comma = 1;
1526
}
1527
if (ih->ih_need) {
1528
if (comma)
1529
db_printf(", ");
1530
db_printf("NEED");
1531
}
1532
db_printf("}");
1533
}
1534
db_printf("\n");
1535
}
1536
1537
/*
1538
* Dump details about a event.
1539
*/
1540
void
1541
db_dump_intr_event(struct intr_event *ie, int handlers)
1542
{
1543
struct intr_handler *ih;
1544
struct intr_thread *it;
1545
int comma;
1546
1547
db_printf("%s ", ie->ie_fullname);
1548
it = ie->ie_thread;
1549
if (it != NULL)
1550
db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1551
else
1552
db_printf("(no thread)");
1553
if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1554
(it != NULL && it->it_need)) {
1555
db_printf(" {");
1556
comma = 0;
1557
if (ie->ie_flags & IE_SOFT) {
1558
db_printf("SOFT");
1559
comma = 1;
1560
}
1561
if (ie->ie_flags & IE_ADDING_THREAD) {
1562
if (comma)
1563
db_printf(", ");
1564
db_printf("ADDING_THREAD");
1565
comma = 1;
1566
}
1567
if (it != NULL && it->it_need) {
1568
if (comma)
1569
db_printf(", ");
1570
db_printf("NEED");
1571
}
1572
db_printf("}");
1573
}
1574
db_printf("\n");
1575
1576
if (handlers)
1577
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1578
db_dump_intrhand(ih);
1579
}
1580
1581
/*
1582
* Dump data about interrupt handlers
1583
*/
1584
DB_SHOW_COMMAND_FLAGS(intr, db_show_intr, DB_CMD_MEMSAFE)
1585
{
1586
struct intr_event *ie;
1587
int all, verbose;
1588
1589
verbose = strchr(modif, 'v') != NULL;
1590
all = strchr(modif, 'a') != NULL;
1591
TAILQ_FOREACH(ie, &event_list, ie_list) {
1592
if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1593
continue;
1594
db_dump_intr_event(ie, verbose);
1595
if (db_pager_quit)
1596
break;
1597
}
1598
}
1599
#endif /* DDB */
1600
1601
/*
1602
* Start standard software interrupt threads
1603
*/
1604
static void
1605
start_softintr(void *dummy)
1606
{
1607
1608
if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK,
1609
INTR_MPSAFE, NULL))
1610
panic("died while creating clk swi ithread");
1611
}
1612
SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1613
NULL);
1614
1615
/*
1616
* Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1617
* The data for this machine dependent, and the declarations are in machine
1618
* dependent code. The layout of intrnames and intrcnt however is machine
1619
* independent.
1620
*
1621
* We do not know the length of intrcnt and intrnames at compile time, so
1622
* calculate things at run time.
1623
*/
1624
static int
1625
sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1626
{
1627
return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1628
}
1629
1630
SYSCTL_PROC(_hw, OID_AUTO, intrnames,
1631
CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1632
sysctl_intrnames, "",
1633
"Interrupt Names");
1634
1635
static int
1636
sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1637
{
1638
#ifdef SCTL_MASK32
1639
uint32_t *intrcnt32;
1640
unsigned i;
1641
int error;
1642
1643
if (req->flags & SCTL_MASK32) {
1644
if (!req->oldptr)
1645
return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1646
intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1647
if (intrcnt32 == NULL)
1648
return (ENOMEM);
1649
for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1650
intrcnt32[i] = intrcnt[i];
1651
error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1652
free(intrcnt32, M_TEMP);
1653
return (error);
1654
}
1655
#endif
1656
return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1657
}
1658
1659
SYSCTL_PROC(_hw, OID_AUTO, intrcnt,
1660
CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1661
sysctl_intrcnt, "",
1662
"Interrupt Counts");
1663
1664
#ifdef DDB
1665
/*
1666
* DDB command to dump the interrupt statistics.
1667
*/
1668
DB_SHOW_COMMAND_FLAGS(intrcnt, db_show_intrcnt, DB_CMD_MEMSAFE)
1669
{
1670
u_long *i;
1671
char *cp;
1672
u_int j;
1673
1674
cp = intrnames;
1675
j = 0;
1676
for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1677
i++, j++) {
1678
if (*cp == '\0')
1679
break;
1680
if (*i != 0)
1681
db_printf("%s\t%lu\n", cp, *i);
1682
cp += strlen(cp) + 1;
1683
}
1684
}
1685
#endif
1686
1687