Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/sgi-ip30/ip30-irq.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
4
*/
5
#include <linux/errno.h>
6
#include <linux/init.h>
7
#include <linux/interrupt.h>
8
#include <linux/irq.h>
9
#include <linux/irqdomain.h>
10
#include <linux/percpu.h>
11
#include <linux/spinlock.h>
12
#include <linux/tick.h>
13
#include <linux/types.h>
14
15
#include <asm/irq_cpu.h>
16
#include <asm/sgi/heart.h>
17
18
#include "ip30-common.h"
19
20
struct heart_irq_data {
21
u64 *irq_mask;
22
int cpu;
23
};
24
25
static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
26
27
static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
28
29
static inline int heart_alloc_int(void)
30
{
31
int bit;
32
33
again:
34
bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
35
if (bit >= HEART_NUM_IRQS)
36
return -ENOSPC;
37
38
if (test_and_set_bit(bit, heart_irq_map))
39
goto again;
40
41
return bit;
42
}
43
44
static void ip30_error_irq(struct irq_desc *desc)
45
{
46
u64 pending, mask, cause, error_irqs, err_reg;
47
int cpu = smp_processor_id();
48
int i;
49
50
pending = heart_read(&heart_regs->isr);
51
mask = heart_read(&heart_regs->imr[cpu]);
52
cause = heart_read(&heart_regs->cause);
53
error_irqs = (pending & HEART_L4_INT_MASK & mask);
54
55
/* Bail if there's nothing to process (how did we get here, then?) */
56
if (unlikely(!error_irqs))
57
return;
58
59
/* Prevent any of the error IRQs from firing again. */
60
heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
61
62
/* Ack all error IRQs. */
63
heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
64
65
/*
66
* If we also have a cause value, then something happened, so loop
67
* through the error IRQs and report a "heart attack" for each one
68
* and print the value of the HEART cause register. This is really
69
* primitive right now, but it should hopefully work until a more
70
* robust error handling routine can be put together.
71
*
72
* Refer to heart.h for the HC_* macros to work out the cause
73
* that got us here.
74
*/
75
if (cause) {
76
pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
77
cpu, pending, mask, cause);
78
79
if (cause & HC_COR_MEM_ERR) {
80
err_reg = heart_read(&heart_regs->mem_err_addr);
81
pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
82
}
83
84
/* i = 63; i >= 51; i-- */
85
for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
86
if ((pending >> i) & 1)
87
pr_alert(" HEART Error IRQ #%d\n", i);
88
89
/* XXX: Seems possible to loop forever here, so panic(). */
90
panic("IP30: Fatal Error !\n");
91
}
92
93
/* Unmask the error IRQs. */
94
heart_write(mask, &heart_regs->imr[cpu]);
95
}
96
97
static void ip30_normal_irq(struct irq_desc *desc)
98
{
99
int cpu = smp_processor_id();
100
struct irq_domain *domain;
101
u64 pend, mask;
102
int ret;
103
104
pend = heart_read(&heart_regs->isr);
105
mask = (heart_read(&heart_regs->imr[cpu]) &
106
(HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
107
108
pend &= mask;
109
if (unlikely(!pend))
110
return;
111
112
#ifdef CONFIG_SMP
113
if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
114
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
115
&heart_regs->clear_isr);
116
scheduler_ipi();
117
} else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
118
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
119
&heart_regs->clear_isr);
120
scheduler_ipi();
121
} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
122
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
123
&heart_regs->clear_isr);
124
generic_smp_call_function_interrupt();
125
} else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
126
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
127
&heart_regs->clear_isr);
128
generic_smp_call_function_interrupt();
129
} else
130
#endif
131
{
132
domain = irq_desc_get_handler_data(desc);
133
ret = generic_handle_domain_irq(domain, __ffs(pend));
134
if (ret)
135
spurious_interrupt();
136
}
137
}
138
139
static void ip30_ack_heart_irq(struct irq_data *d)
140
{
141
heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
142
}
143
144
static void ip30_mask_heart_irq(struct irq_data *d)
145
{
146
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
147
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
148
149
clear_bit(d->hwirq, mask);
150
heart_write(*mask, &heart_regs->imr[hd->cpu]);
151
}
152
153
static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
154
{
155
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
156
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
157
158
clear_bit(d->hwirq, mask);
159
heart_write(*mask, &heart_regs->imr[hd->cpu]);
160
heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
161
}
162
163
static void ip30_unmask_heart_irq(struct irq_data *d)
164
{
165
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
166
unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
167
168
set_bit(d->hwirq, mask);
169
heart_write(*mask, &heart_regs->imr[hd->cpu]);
170
}
171
172
static int ip30_set_heart_irq_affinity(struct irq_data *d,
173
const struct cpumask *mask, bool force)
174
{
175
struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
176
177
if (!hd)
178
return -EINVAL;
179
180
if (irqd_is_started(d))
181
ip30_mask_and_ack_heart_irq(d);
182
183
hd->cpu = cpumask_first_and(mask, cpu_online_mask);
184
185
if (irqd_is_started(d))
186
ip30_unmask_heart_irq(d);
187
188
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
189
190
return 0;
191
}
192
193
static struct irq_chip heart_irq_chip = {
194
.name = "HEART",
195
.irq_ack = ip30_ack_heart_irq,
196
.irq_mask = ip30_mask_heart_irq,
197
.irq_mask_ack = ip30_mask_and_ack_heart_irq,
198
.irq_unmask = ip30_unmask_heart_irq,
199
.irq_set_affinity = ip30_set_heart_irq_affinity,
200
};
201
202
static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
203
unsigned int nr_irqs, void *arg)
204
{
205
struct irq_alloc_info *info = arg;
206
struct heart_irq_data *hd;
207
int hwirq;
208
209
if (nr_irqs > 1 || !info)
210
return -EINVAL;
211
212
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
213
if (!hd)
214
return -ENOMEM;
215
216
hwirq = heart_alloc_int();
217
if (hwirq < 0) {
218
kfree(hd);
219
return -EAGAIN;
220
}
221
irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
222
handle_level_irq, NULL, NULL);
223
224
return 0;
225
}
226
227
static void heart_domain_free(struct irq_domain *domain,
228
unsigned int virq, unsigned int nr_irqs)
229
{
230
struct irq_data *irqd;
231
232
if (nr_irqs > 1)
233
return;
234
235
irqd = irq_domain_get_irq_data(domain, virq);
236
if (irqd) {
237
clear_bit(irqd->hwirq, heart_irq_map);
238
kfree(irqd->chip_data);
239
}
240
}
241
242
static const struct irq_domain_ops heart_domain_ops = {
243
.alloc = heart_domain_alloc,
244
.free = heart_domain_free,
245
};
246
247
void __init ip30_install_ipi(void)
248
{
249
int cpu = smp_processor_id();
250
unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
251
252
set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
253
heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
254
&heart_regs->clear_isr);
255
set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
256
heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
257
&heart_regs->clear_isr);
258
259
heart_write(*mask, &heart_regs->imr[cpu]);
260
}
261
262
void __init arch_init_irq(void)
263
{
264
struct irq_domain *domain;
265
struct fwnode_handle *fn;
266
unsigned long *mask;
267
int i;
268
269
mips_cpu_irq_init();
270
271
/* Mask all IRQs. */
272
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
273
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
274
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
275
heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
276
277
/* Ack everything. */
278
heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
279
280
/* Enable specific HEART error IRQs for each CPU. */
281
mask = &per_cpu(irq_enable_mask, 0);
282
*mask |= HEART_CPU0_ERR_MASK;
283
heart_write(*mask, &heart_regs->imr[0]);
284
mask = &per_cpu(irq_enable_mask, 1);
285
*mask |= HEART_CPU1_ERR_MASK;
286
heart_write(*mask, &heart_regs->imr[1]);
287
288
/*
289
* Some HEART bits are reserved by hardware or by software convention.
290
* Mark these as reserved right away so they won't be accidentally
291
* used later.
292
*/
293
set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
294
set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
295
set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
296
set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
297
set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
298
set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
299
set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
300
set_bit(HEART_L3_INT_TIMER, heart_irq_map);
301
302
/* Reserve the error interrupts (#51 to #63). */
303
for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
304
set_bit(i, heart_irq_map);
305
306
fn = irq_domain_alloc_named_fwnode("HEART");
307
WARN_ON(fn == NULL);
308
if (!fn)
309
return;
310
domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
311
&heart_domain_ops, NULL);
312
WARN_ON(domain == NULL);
313
if (!domain)
314
return;
315
316
irq_set_default_domain(domain);
317
318
irq_set_percpu_devid(IP30_HEART_L0_IRQ);
319
irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
320
domain);
321
irq_set_percpu_devid(IP30_HEART_L1_IRQ);
322
irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
323
domain);
324
irq_set_percpu_devid(IP30_HEART_L2_IRQ);
325
irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
326
domain);
327
irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
328
irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
329
domain);
330
}
331
332