Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mips/sibyte/sb1250/irq.c
15116 views
1
/*
2
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation; either version 2
7
* of the License, or (at your option) any later version.
8
*
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
13
*
14
* You should have received a copy of the GNU General Public License
15
* along with this program; if not, write to the Free Software
16
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
*/
18
#include <linux/kernel.h>
19
#include <linux/init.h>
20
#include <linux/linkage.h>
21
#include <linux/interrupt.h>
22
#include <linux/spinlock.h>
23
#include <linux/smp.h>
24
#include <linux/mm.h>
25
#include <linux/kernel_stat.h>
26
27
#include <asm/errno.h>
28
#include <asm/signal.h>
29
#include <asm/system.h>
30
#include <asm/time.h>
31
#include <asm/io.h>
32
33
#include <asm/sibyte/sb1250_regs.h>
34
#include <asm/sibyte/sb1250_int.h>
35
#include <asm/sibyte/sb1250_uart.h>
36
#include <asm/sibyte/sb1250_scd.h>
37
#include <asm/sibyte/sb1250.h>
38
39
/*
40
* These are the routines that handle all the low level interrupt stuff.
41
* Actions handled here are: initialization of the interrupt map, requesting of
42
* interrupt lines by handlers, dispatching if interrupts to handlers, probing
43
* for interrupt lines
44
*/
45
46
#ifdef CONFIG_SIBYTE_HAS_LDT
47
extern unsigned long ldt_eoi_space;
48
#endif
49
50
/* Store the CPU id (not the logical number) */
51
int sb1250_irq_owner[SB1250_NR_IRQS];
52
53
static DEFINE_RAW_SPINLOCK(sb1250_imr_lock);
54
55
void sb1250_mask_irq(int cpu, int irq)
56
{
57
unsigned long flags;
58
u64 cur_ints;
59
60
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
61
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
62
R_IMR_INTERRUPT_MASK));
63
cur_ints |= (((u64) 1) << irq);
64
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
65
R_IMR_INTERRUPT_MASK));
66
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
67
}
68
69
void sb1250_unmask_irq(int cpu, int irq)
70
{
71
unsigned long flags;
72
u64 cur_ints;
73
74
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
75
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
76
R_IMR_INTERRUPT_MASK));
77
cur_ints &= ~(((u64) 1) << irq);
78
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
79
R_IMR_INTERRUPT_MASK));
80
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
81
}
82
83
#ifdef CONFIG_SMP
84
static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
85
bool force)
86
{
87
int i = 0, old_cpu, cpu, int_on;
88
unsigned int irq = d->irq;
89
u64 cur_ints;
90
unsigned long flags;
91
92
i = cpumask_first(mask);
93
94
/* Convert logical CPU to physical CPU */
95
cpu = cpu_logical_map(i);
96
97
/* Protect against other affinity changers and IMR manipulation */
98
raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
99
100
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
101
old_cpu = sb1250_irq_owner[irq];
102
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
103
R_IMR_INTERRUPT_MASK));
104
int_on = !(cur_ints & (((u64) 1) << irq));
105
if (int_on) {
106
/* If it was on, mask it */
107
cur_ints |= (((u64) 1) << irq);
108
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
109
R_IMR_INTERRUPT_MASK));
110
}
111
sb1250_irq_owner[irq] = cpu;
112
if (int_on) {
113
/* unmask for the new CPU */
114
cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
115
R_IMR_INTERRUPT_MASK));
116
cur_ints &= ~(((u64) 1) << irq);
117
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
118
R_IMR_INTERRUPT_MASK));
119
}
120
raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
121
122
return 0;
123
}
124
#endif
125
126
static void enable_sb1250_irq(struct irq_data *d)
127
{
128
unsigned int irq = d->irq;
129
130
sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
131
}
132
133
134
static void ack_sb1250_irq(struct irq_data *d)
135
{
136
unsigned int irq = d->irq;
137
#ifdef CONFIG_SIBYTE_HAS_LDT
138
u64 pending;
139
140
/*
141
* If the interrupt was an HT interrupt, now is the time to
142
* clear it. NOTE: we assume the HT bridge was set up to
143
* deliver the interrupts to all CPUs (which makes affinity
144
* changing easier for us)
145
*/
146
pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq],
147
R_IMR_LDT_INTERRUPT)));
148
pending &= ((u64)1 << (irq));
149
if (pending) {
150
int i;
151
for (i=0; i<NR_CPUS; i++) {
152
int cpu;
153
#ifdef CONFIG_SMP
154
cpu = cpu_logical_map(i);
155
#else
156
cpu = i;
157
#endif
158
/*
159
* Clear for all CPUs so an affinity switch
160
* doesn't find an old status
161
*/
162
__raw_writeq(pending,
163
IOADDR(A_IMR_REGISTER(cpu,
164
R_IMR_LDT_INTERRUPT_CLR)));
165
}
166
167
/*
168
* Generate EOI. For Pass 1 parts, EOI is a nop. For
169
* Pass 2, the LDT world may be edge-triggered, but
170
* this EOI shouldn't hurt. If they are
171
* level-sensitive, the EOI is required.
172
*/
173
*(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0;
174
}
175
#endif
176
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
177
}
178
179
static struct irq_chip sb1250_irq_type = {
180
.name = "SB1250-IMR",
181
.irq_mask_ack = ack_sb1250_irq,
182
.irq_unmask = enable_sb1250_irq,
183
#ifdef CONFIG_SMP
184
.irq_set_affinity = sb1250_set_affinity
185
#endif
186
};
187
188
void __init init_sb1250_irqs(void)
189
{
190
int i;
191
192
for (i = 0; i < SB1250_NR_IRQS; i++) {
193
irq_set_chip_and_handler(i, &sb1250_irq_type,
194
handle_level_irq);
195
sb1250_irq_owner[i] = 0;
196
}
197
}
198
199
200
/*
201
* arch_init_irq is called early in the boot sequence from init/main.c via
202
* init_IRQ. It is responsible for setting up the interrupt mapper and
203
* installing the handler that will be responsible for dispatching interrupts
204
* to the "right" place.
205
*/
206
/*
207
* For now, map all interrupts to IP[2]. We could save
208
* some cycles by parceling out system interrupts to different
209
* IP lines, but keep it simple for bringup. We'll also direct
210
* all interrupts to a single CPU; we should probably route
211
* PCI and LDT to one cpu and everything else to the other
212
* to balance the load a bit.
213
*
214
* On the second cpu, everything is set to IP5, which is
215
* ignored, EXCEPT the mailbox interrupt. That one is
216
* set to IP[2] so it is handled. This is needed so we
217
* can do cross-cpu function calls, as required by SMP
218
*/
219
220
#define IMR_IP2_VAL K_INT_MAP_I0
221
#define IMR_IP3_VAL K_INT_MAP_I1
222
#define IMR_IP4_VAL K_INT_MAP_I2
223
#define IMR_IP5_VAL K_INT_MAP_I3
224
#define IMR_IP6_VAL K_INT_MAP_I4
225
226
void __init arch_init_irq(void)
227
{
228
229
unsigned int i;
230
u64 tmp;
231
unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
232
STATUSF_IP1 | STATUSF_IP0;
233
234
/* Default everything to IP2 */
235
for (i = 0; i < SB1250_NR_IRQS; i++) { /* was I0 */
236
__raw_writeq(IMR_IP2_VAL,
237
IOADDR(A_IMR_REGISTER(0,
238
R_IMR_INTERRUPT_MAP_BASE) +
239
(i << 3)));
240
__raw_writeq(IMR_IP2_VAL,
241
IOADDR(A_IMR_REGISTER(1,
242
R_IMR_INTERRUPT_MAP_BASE) +
243
(i << 3)));
244
}
245
246
init_sb1250_irqs();
247
248
/*
249
* Map the high 16 bits of the mailbox registers to IP[3], for
250
* inter-cpu messages
251
*/
252
/* Was I1 */
253
__raw_writeq(IMR_IP3_VAL,
254
IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
255
(K_INT_MBOX_0 << 3)));
256
__raw_writeq(IMR_IP3_VAL,
257
IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
258
(K_INT_MBOX_0 << 3)));
259
260
/* Clear the mailboxes. The firmware may leave them dirty */
261
__raw_writeq(0xffffffffffffffffULL,
262
IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
263
__raw_writeq(0xffffffffffffffffULL,
264
IOADDR(A_IMR_REGISTER(1, R_IMR_MAILBOX_CLR_CPU)));
265
266
/* Mask everything except the mailbox registers for both cpus */
267
tmp = ~((u64) 0) ^ (((u64) 1) << K_INT_MBOX_0);
268
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MASK)));
269
__raw_writeq(tmp, IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MASK)));
270
271
/*
272
* Note that the timer interrupts are also mapped, but this is
273
* done in sb1250_time_init(). Also, the profiling driver
274
* does its own management of IP7.
275
*/
276
277
/* Enable necessary IPs, disable the rest */
278
change_c0_status(ST0_IM, imask);
279
}
280
281
extern void sb1250_mailbox_interrupt(void);
282
283
static inline void dispatch_ip2(void)
284
{
285
unsigned int cpu = smp_processor_id();
286
unsigned long long mask;
287
288
/*
289
* Default...we've hit an IP[2] interrupt, which means we've got to
290
* check the 1250 interrupt registers to figure out what to do. Need
291
* to detect which CPU we're on, now that smp_affinity is supported.
292
*/
293
mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
294
R_IMR_INTERRUPT_STATUS_BASE)));
295
if (mask)
296
do_IRQ(fls64(mask) - 1);
297
}
298
299
asmlinkage void plat_irq_dispatch(void)
300
{
301
unsigned int cpu = smp_processor_id();
302
unsigned int pending;
303
304
/*
305
* What a pain. We have to be really careful saving the upper 32 bits
306
* of any * register across function calls if we don't want them
307
* trashed--since were running in -o32, the calling routing never saves
308
* the full 64 bits of a register across a function call. Being the
309
* interrupt handler, we're guaranteed that interrupts are disabled
310
* during this code so we don't have to worry about random interrupts
311
* blasting the high 32 bits.
312
*/
313
314
pending = read_c0_cause() & read_c0_status() & ST0_IM;
315
316
if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
317
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
318
else if (pending & CAUSEF_IP4)
319
do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
320
321
#ifdef CONFIG_SMP
322
else if (pending & CAUSEF_IP3)
323
sb1250_mailbox_interrupt();
324
#endif
325
326
else if (pending & CAUSEF_IP2)
327
dispatch_ip2();
328
else
329
spurious_interrupt();
330
}
331
332