Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/um/kernel/irq.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2017 - Cambridge Greys Ltd
4
* Copyright (C) 2011 - 2014 Cisco Systems Inc
5
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8
*/
9
10
#include <linux/cpumask.h>
11
#include <linux/hardirq.h>
12
#include <linux/interrupt.h>
13
#include <linux/kernel_stat.h>
14
#include <linux/module.h>
15
#include <linux/sched.h>
16
#include <linux/seq_file.h>
17
#include <linux/slab.h>
18
#include <as-layout.h>
19
#include <kern_util.h>
20
#include <os.h>
21
#include <irq_user.h>
22
#include <irq_kern.h>
23
#include <linux/time-internal.h>
24
25
26
/* When epoll triggers we do not know why it did so
27
* we can also have different IRQs for read and write.
28
* This is why we keep a small irq_reg array for each fd -
29
* one entry per IRQ type
30
*/
31
struct irq_reg {
32
void *id;
33
int irq;
34
/* it's cheaper to store this than to query it */
35
int events;
36
bool active;
37
bool pending;
38
bool wakeup;
39
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
40
bool pending_event;
41
void (*timetravel_handler)(int, int, void *,
42
struct time_travel_event *);
43
struct time_travel_event event;
44
#endif
45
};
46
47
struct irq_entry {
48
struct list_head list;
49
int fd;
50
struct irq_reg reg[NUM_IRQ_TYPES];
51
bool suspended;
52
bool sigio_workaround;
53
};
54
55
static DEFINE_RAW_SPINLOCK(irq_lock);
56
static LIST_HEAD(active_fds);
57
static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
58
static bool irqs_suspended;
59
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
60
static bool irqs_pending;
61
#endif
62
63
static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
64
{
65
/*
66
* irq->active guards against reentry
67
* irq->pending accumulates pending requests
68
* if pending is raised the irq_handler is re-run
69
* until pending is cleared
70
*/
71
if (irq->active) {
72
irq->active = false;
73
74
do {
75
irq->pending = false;
76
do_IRQ(irq->irq, regs);
77
} while (irq->pending);
78
79
irq->active = true;
80
} else {
81
irq->pending = true;
82
}
83
}
84
85
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
86
static void irq_event_handler(struct time_travel_event *ev)
87
{
88
struct irq_reg *reg = container_of(ev, struct irq_reg, event);
89
90
/* do nothing if suspended; just cause a wakeup and mark as pending */
91
if (irqs_suspended) {
92
irqs_pending = true;
93
reg->pending_event = true;
94
return;
95
}
96
97
generic_handle_irq(reg->irq);
98
}
99
100
static bool irq_do_timetravel_handler(struct irq_entry *entry,
101
enum um_irq_type t)
102
{
103
struct irq_reg *reg = &entry->reg[t];
104
105
if (!reg->timetravel_handler)
106
return false;
107
108
/*
109
* Handle all messages - we might get multiple even while
110
* interrupts are already suspended, due to suspend order
111
* etc. Note that time_travel_add_irq_event() will not add
112
* an event twice, if it's pending already "first wins".
113
*/
114
reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
115
116
if (!reg->event.pending)
117
return false;
118
119
return true;
120
}
121
122
static void irq_do_pending_events(bool timetravel_handlers_only)
123
{
124
struct irq_entry *entry;
125
126
if (!irqs_pending || timetravel_handlers_only)
127
return;
128
129
irqs_pending = false;
130
131
list_for_each_entry(entry, &active_fds, list) {
132
enum um_irq_type t;
133
134
for (t = 0; t < NUM_IRQ_TYPES; t++) {
135
struct irq_reg *reg = &entry->reg[t];
136
137
/*
138
* Any timetravel_handler was invoked already, just
139
* directly run the IRQ.
140
*/
141
if (reg->pending_event) {
142
irq_enter();
143
generic_handle_irq(reg->irq);
144
irq_exit();
145
reg->pending_event = false;
146
}
147
}
148
}
149
}
150
#else
151
static bool irq_do_timetravel_handler(struct irq_entry *entry,
152
enum um_irq_type t)
153
{
154
return false;
155
}
156
157
static void irq_do_pending_events(bool timetravel_handlers_only)
158
{
159
}
160
#endif
161
162
static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
163
struct uml_pt_regs *regs,
164
bool timetravel_handlers_only)
165
{
166
struct irq_reg *reg = &entry->reg[t];
167
168
if (!reg->events)
169
return;
170
171
if (os_epoll_triggered(idx, reg->events) <= 0)
172
return;
173
174
if (irq_do_timetravel_handler(entry, t))
175
return;
176
177
/*
178
* If we're called to only run time-travel handlers then don't
179
* actually proceed but mark sigio as pending (if applicable).
180
* For suspend/resume, timetravel_handlers_only may be true
181
* despite time-travel not being configured and used.
182
*/
183
if (timetravel_handlers_only) {
184
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
185
reg->pending_event = true;
186
irqs_pending = true;
187
mark_sigio_pending();
188
#endif
189
return;
190
}
191
192
irq_io_loop(reg, regs);
193
}
194
195
static void _sigio_handler(struct uml_pt_regs *regs,
196
bool timetravel_handlers_only)
197
{
198
struct irq_entry *irq_entry;
199
int n, i;
200
201
if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
202
return;
203
204
/* Flush out pending events that were ignored due to time-travel. */
205
if (!irqs_suspended)
206
irq_do_pending_events(timetravel_handlers_only);
207
208
while (1) {
209
/* This is now lockless - epoll keeps back-referencesto the irqs
210
* which have trigger it so there is no need to walk the irq
211
* list and lock it every time. We avoid locking by turning off
212
* IO for a specific fd by executing os_del_epoll_fd(fd) before
213
* we do any changes to the actual data structures
214
*/
215
n = os_waiting_for_events_epoll();
216
217
if (n <= 0) {
218
if (n == -EINTR)
219
continue;
220
else
221
break;
222
}
223
224
for (i = 0; i < n ; i++) {
225
enum um_irq_type t;
226
227
irq_entry = os_epoll_get_data_pointer(i);
228
229
for (t = 0; t < NUM_IRQ_TYPES; t++)
230
sigio_reg_handler(i, irq_entry, t, regs,
231
timetravel_handlers_only);
232
}
233
}
234
235
if (!timetravel_handlers_only)
236
free_irqs();
237
}
238
239
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
240
void *mc)
241
{
242
preempt_disable();
243
_sigio_handler(regs, irqs_suspended);
244
preempt_enable();
245
}
246
247
static struct irq_entry *get_irq_entry_by_fd(int fd)
248
{
249
struct irq_entry *walk;
250
251
lockdep_assert_held(&irq_lock);
252
253
list_for_each_entry(walk, &active_fds, list) {
254
if (walk->fd == fd)
255
return walk;
256
}
257
258
return NULL;
259
}
260
261
static void remove_irq_entry(struct irq_entry *to_free, bool remove)
262
{
263
if (!to_free)
264
return;
265
266
if (remove)
267
os_del_epoll_fd(to_free->fd);
268
list_del(&to_free->list);
269
}
270
271
static bool update_irq_entry(struct irq_entry *entry)
272
{
273
enum um_irq_type i;
274
int events = 0;
275
276
for (i = 0; i < NUM_IRQ_TYPES; i++)
277
events |= entry->reg[i].events;
278
279
if (events) {
280
/* will modify (instead of add) if needed */
281
os_add_epoll_fd(events, entry->fd, entry);
282
return true;
283
}
284
285
os_del_epoll_fd(entry->fd);
286
return false;
287
}
288
289
static struct irq_entry *update_or_remove_irq_entry(struct irq_entry *entry)
290
{
291
if (update_irq_entry(entry))
292
return NULL;
293
remove_irq_entry(entry, false);
294
return entry;
295
}
296
297
static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
298
void (*timetravel_handler)(int, int, void *,
299
struct time_travel_event *))
300
{
301
struct irq_entry *irq_entry, *to_free = NULL;
302
int err, events = os_event_mask(type);
303
unsigned long flags;
304
305
err = os_set_fd_async(fd);
306
if (err < 0)
307
goto out;
308
309
raw_spin_lock_irqsave(&irq_lock, flags);
310
irq_entry = get_irq_entry_by_fd(fd);
311
if (irq_entry) {
312
already:
313
/* cannot register the same FD twice with the same type */
314
if (WARN_ON(irq_entry->reg[type].events)) {
315
err = -EALREADY;
316
goto out_unlock;
317
}
318
319
/* temporarily disable to avoid IRQ-side locking */
320
os_del_epoll_fd(fd);
321
} else {
322
struct irq_entry *new;
323
324
/* don't restore interrupts */
325
raw_spin_unlock(&irq_lock);
326
new = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
327
if (!new) {
328
local_irq_restore(flags);
329
return -ENOMEM;
330
}
331
raw_spin_lock(&irq_lock);
332
irq_entry = get_irq_entry_by_fd(fd);
333
if (irq_entry) {
334
to_free = new;
335
goto already;
336
}
337
irq_entry = new;
338
irq_entry->fd = fd;
339
list_add_tail(&irq_entry->list, &active_fds);
340
maybe_sigio_broken(fd);
341
}
342
343
irq_entry->reg[type].id = dev_id;
344
irq_entry->reg[type].irq = irq;
345
irq_entry->reg[type].active = true;
346
irq_entry->reg[type].events = events;
347
348
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
349
if (um_irq_timetravel_handler_used()) {
350
irq_entry->reg[type].timetravel_handler = timetravel_handler;
351
irq_entry->reg[type].event.fn = irq_event_handler;
352
}
353
#endif
354
355
WARN_ON(!update_irq_entry(irq_entry));
356
err = 0;
357
out_unlock:
358
raw_spin_unlock_irqrestore(&irq_lock, flags);
359
out:
360
kfree(to_free);
361
return err;
362
}
363
364
/*
365
* Remove the entry or entries for a specific FD, if you
366
* don't want to remove all the possible entries then use
367
* um_free_irq() or deactivate_fd() instead.
368
*/
369
void free_irq_by_fd(int fd)
370
{
371
struct irq_entry *to_free;
372
unsigned long flags;
373
374
raw_spin_lock_irqsave(&irq_lock, flags);
375
to_free = get_irq_entry_by_fd(fd);
376
remove_irq_entry(to_free, true);
377
raw_spin_unlock_irqrestore(&irq_lock, flags);
378
kfree(to_free);
379
}
380
EXPORT_SYMBOL(free_irq_by_fd);
381
382
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
383
{
384
struct irq_entry *entry, *to_free = NULL;
385
unsigned long flags;
386
387
raw_spin_lock_irqsave(&irq_lock, flags);
388
list_for_each_entry(entry, &active_fds, list) {
389
enum um_irq_type i;
390
391
for (i = 0; i < NUM_IRQ_TYPES; i++) {
392
struct irq_reg *reg = &entry->reg[i];
393
394
if (!reg->events)
395
continue;
396
if (reg->irq != irq)
397
continue;
398
if (reg->id != dev)
399
continue;
400
401
os_del_epoll_fd(entry->fd);
402
reg->events = 0;
403
to_free = update_or_remove_irq_entry(entry);
404
goto out;
405
}
406
}
407
out:
408
raw_spin_unlock_irqrestore(&irq_lock, flags);
409
kfree(to_free);
410
}
411
412
void deactivate_fd(int fd, int irqnum)
413
{
414
struct irq_entry *entry;
415
unsigned long flags;
416
enum um_irq_type i;
417
418
os_del_epoll_fd(fd);
419
420
raw_spin_lock_irqsave(&irq_lock, flags);
421
entry = get_irq_entry_by_fd(fd);
422
if (!entry)
423
goto out;
424
425
for (i = 0; i < NUM_IRQ_TYPES; i++) {
426
if (!entry->reg[i].events)
427
continue;
428
if (entry->reg[i].irq == irqnum)
429
entry->reg[i].events = 0;
430
}
431
432
entry = update_or_remove_irq_entry(entry);
433
out:
434
raw_spin_unlock_irqrestore(&irq_lock, flags);
435
kfree(entry);
436
437
ignore_sigio_fd(fd);
438
}
439
EXPORT_SYMBOL(deactivate_fd);
440
441
/*
442
* Called just before shutdown in order to provide a clean exec
443
* environment in case the system is rebooting. No locking because
444
* that would cause a pointless shutdown hang if something hadn't
445
* released the lock.
446
*/
447
int deactivate_all_fds(void)
448
{
449
struct irq_entry *entry;
450
451
/* Stop IO. The IRQ loop has no lock so this is our
452
* only way of making sure we are safe to dispose
453
* of all IRQ handlers
454
*/
455
os_set_ioignore();
456
457
/* we can no longer call kfree() here so just deactivate */
458
list_for_each_entry(entry, &active_fds, list)
459
os_del_epoll_fd(entry->fd);
460
os_close_epoll_fd();
461
return 0;
462
}
463
464
/*
465
* do_IRQ handles all normal device IRQs (the special
466
* SMP cross-CPU interrupts have their own specific
467
* handlers).
468
*/
469
unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
470
{
471
struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
472
irq_enter();
473
generic_handle_irq(irq);
474
irq_exit();
475
set_irq_regs(old_regs);
476
return 1;
477
}
478
479
void um_free_irq(int irq, void *dev)
480
{
481
if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
482
"freeing invalid irq %d", irq))
483
return;
484
485
free_irq_by_irq_and_dev(irq, dev);
486
free_irq(irq, dev);
487
clear_bit(irq, irqs_allocated);
488
}
489
EXPORT_SYMBOL(um_free_irq);
490
491
static int
492
_um_request_irq(int irq, int fd, enum um_irq_type type,
493
irq_handler_t handler, unsigned long irqflags,
494
const char *devname, void *dev_id,
495
void (*timetravel_handler)(int, int, void *,
496
struct time_travel_event *))
497
{
498
int err;
499
500
if (irq == UM_IRQ_ALLOC) {
501
int i;
502
503
for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
504
if (!test_and_set_bit(i, irqs_allocated)) {
505
irq = i;
506
break;
507
}
508
}
509
}
510
511
if (irq < 0)
512
return -ENOSPC;
513
514
if (fd != -1) {
515
err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
516
if (err)
517
goto error;
518
}
519
520
err = request_irq(irq, handler, irqflags, devname, dev_id);
521
if (err < 0)
522
goto error;
523
524
return irq;
525
error:
526
clear_bit(irq, irqs_allocated);
527
return err;
528
}
529
530
int um_request_irq(int irq, int fd, enum um_irq_type type,
531
irq_handler_t handler, unsigned long irqflags,
532
const char *devname, void *dev_id)
533
{
534
return _um_request_irq(irq, fd, type, handler, irqflags,
535
devname, dev_id, NULL);
536
}
537
EXPORT_SYMBOL(um_request_irq);
538
539
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
540
int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
541
irq_handler_t handler, unsigned long irqflags,
542
const char *devname, void *dev_id,
543
void (*timetravel_handler)(int, int, void *,
544
struct time_travel_event *))
545
{
546
return _um_request_irq(irq, fd, type, handler, irqflags,
547
devname, dev_id, timetravel_handler);
548
}
549
EXPORT_SYMBOL(um_request_irq_tt);
550
551
void sigio_run_timetravel_handlers(void)
552
{
553
_sigio_handler(NULL, true);
554
}
555
#endif
556
557
#ifdef CONFIG_PM_SLEEP
558
void um_irqs_suspend(void)
559
{
560
struct irq_entry *entry;
561
unsigned long flags;
562
563
irqs_suspended = true;
564
565
raw_spin_lock_irqsave(&irq_lock, flags);
566
list_for_each_entry(entry, &active_fds, list) {
567
enum um_irq_type t;
568
bool clear = true;
569
570
for (t = 0; t < NUM_IRQ_TYPES; t++) {
571
if (!entry->reg[t].events)
572
continue;
573
574
/*
575
* For the SIGIO_WRITE_IRQ, which is used to handle the
576
* SIGIO workaround thread, we need special handling:
577
* enable wake for it itself, but below we tell it about
578
* any FDs that should be suspended.
579
*/
580
if (entry->reg[t].wakeup ||
581
entry->reg[t].irq == SIGIO_WRITE_IRQ
582
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
583
|| entry->reg[t].timetravel_handler
584
#endif
585
) {
586
clear = false;
587
break;
588
}
589
}
590
591
if (clear) {
592
entry->suspended = true;
593
os_clear_fd_async(entry->fd);
594
entry->sigio_workaround =
595
!__ignore_sigio_fd(entry->fd);
596
}
597
}
598
raw_spin_unlock_irqrestore(&irq_lock, flags);
599
}
600
601
void um_irqs_resume(void)
602
{
603
struct irq_entry *entry;
604
unsigned long flags;
605
606
607
raw_spin_lock_irqsave(&irq_lock, flags);
608
list_for_each_entry(entry, &active_fds, list) {
609
if (entry->suspended) {
610
int err = os_set_fd_async(entry->fd);
611
612
WARN(err < 0, "os_set_fd_async returned %d\n", err);
613
entry->suspended = false;
614
615
if (entry->sigio_workaround) {
616
err = __add_sigio_fd(entry->fd);
617
WARN(err < 0, "add_sigio_returned %d\n", err);
618
}
619
}
620
}
621
raw_spin_unlock_irqrestore(&irq_lock, flags);
622
623
irqs_suspended = false;
624
send_sigio_to_self();
625
}
626
627
static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
628
{
629
struct irq_entry *entry;
630
unsigned long flags;
631
632
raw_spin_lock_irqsave(&irq_lock, flags);
633
list_for_each_entry(entry, &active_fds, list) {
634
enum um_irq_type t;
635
636
for (t = 0; t < NUM_IRQ_TYPES; t++) {
637
if (!entry->reg[t].events)
638
continue;
639
640
if (entry->reg[t].irq != d->irq)
641
continue;
642
entry->reg[t].wakeup = on;
643
goto unlock;
644
}
645
}
646
unlock:
647
raw_spin_unlock_irqrestore(&irq_lock, flags);
648
return 0;
649
}
650
#else
651
#define normal_irq_set_wake NULL
652
#endif
653
654
/*
655
* irq_chip must define at least enable/disable and ack when
656
* the edge handler is used.
657
*/
658
static void dummy(struct irq_data *d)
659
{
660
}
661
662
/* This is used for everything other than the timer. */
663
static struct irq_chip normal_irq_type = {
664
.name = "SIGIO",
665
.irq_disable = dummy,
666
.irq_enable = dummy,
667
.irq_ack = dummy,
668
.irq_mask = dummy,
669
.irq_unmask = dummy,
670
.irq_set_wake = normal_irq_set_wake,
671
};
672
673
static struct irq_chip alarm_irq_type = {
674
.name = "SIGALRM",
675
.irq_disable = dummy,
676
.irq_enable = dummy,
677
.irq_ack = dummy,
678
.irq_mask = dummy,
679
.irq_unmask = dummy,
680
};
681
682
void __init init_IRQ(void)
683
{
684
int i;
685
686
irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
687
688
for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
689
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
690
/* Initialize EPOLL Loop */
691
os_setup_epoll();
692
}
693
694
void sigchld_handler(int sig, struct siginfo *unused_si,
695
struct uml_pt_regs *regs, void *mc)
696
{
697
do_IRQ(SIGCHLD_IRQ, regs);
698
}
699
700