Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/mach-common/ints-priority.c
10817 views
1
/*
2
* Set up the interrupt priorities
3
*
4
* Copyright 2004-2009 Analog Devices Inc.
5
* 2003 Bas Vermeulen <[email protected]>
6
* 2002 Arcturus Networks Inc. MaTed <[email protected]>
7
* 2000-2001 Lineo, Inc. D. Jefff Dionne <[email protected]>
8
* 1999 D. Jeff Dionne <[email protected]>
9
* 1996 Roman Zippel
10
*
11
* Licensed under the GPL-2
12
*/
13
14
#include <linux/module.h>
15
#include <linux/kernel_stat.h>
16
#include <linux/seq_file.h>
17
#include <linux/irq.h>
18
#include <linux/sched.h>
19
#ifdef CONFIG_IPIPE
20
#include <linux/ipipe.h>
21
#endif
22
#include <asm/traps.h>
23
#include <asm/blackfin.h>
24
#include <asm/gpio.h>
25
#include <asm/irq_handler.h>
26
#include <asm/dpmc.h>
27
28
#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
29
30
/*
31
* NOTES:
32
* - we have separated the physical Hardware interrupt from the
33
* levels that the LINUX kernel sees (see the description in irq.h)
34
* -
35
*/
36
37
#ifndef CONFIG_SMP
38
/* Initialize this to an actual value to force it into the .data
39
* section so that we know it is properly initialized at entry into
40
* the kernel but before bss is initialized to zero (which is where
41
* it would live otherwise). The 0x1f magic represents the IRQs we
42
* cannot actually mask out in hardware.
43
*/
44
unsigned long bfin_irq_flags = 0x1f;
45
EXPORT_SYMBOL(bfin_irq_flags);
46
#endif
47
48
#ifdef CONFIG_PM
49
unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
50
unsigned vr_wakeup;
51
#endif
52
53
static struct ivgx {
54
/* irq number for request_irq, available in mach-bf5xx/irq.h */
55
unsigned int irqno;
56
/* corresponding bit in the SIC_ISR register */
57
unsigned int isrflag;
58
} ivg_table[NR_PERI_INTS];
59
60
static struct ivg_slice {
61
/* position of first irq in ivg_table for given ivg */
62
struct ivgx *ifirst;
63
struct ivgx *istop;
64
} ivg7_13[IVG13 - IVG7 + 1];
65
66
67
/*
68
* Search SIC_IAR and fill tables with the irqvalues
69
* and their positions in the SIC_ISR register.
70
*/
71
static void __init search_IAR(void)
72
{
73
unsigned ivg, irq_pos = 0;
74
for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
75
int irqN;
76
77
ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
78
79
for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
80
int irqn;
81
u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
82
#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
83
defined(CONFIG_BF538) || defined(CONFIG_BF539)
84
((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
85
#else
86
(irqN >> 3)
87
#endif
88
);
89
90
for (irqn = irqN; irqn < irqN + 4; ++irqn) {
91
int iar_shift = (irqn & 7) * 4;
92
if (ivg == (0xf & (iar >> iar_shift))) {
93
ivg_table[irq_pos].irqno = IVG7 + irqn;
94
ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
95
ivg7_13[ivg].istop++;
96
irq_pos++;
97
}
98
}
99
}
100
}
101
}
102
103
/*
104
* This is for core internal IRQs
105
*/
106
107
void bfin_ack_noop(struct irq_data *d)
108
{
109
/* Dummy function. */
110
}
111
112
static void bfin_core_mask_irq(struct irq_data *d)
113
{
114
bfin_irq_flags &= ~(1 << d->irq);
115
if (!hard_irqs_disabled())
116
hard_local_irq_enable();
117
}
118
119
static void bfin_core_unmask_irq(struct irq_data *d)
120
{
121
bfin_irq_flags |= 1 << d->irq;
122
/*
123
* If interrupts are enabled, IMASK must contain the same value
124
* as bfin_irq_flags. Make sure that invariant holds. If interrupts
125
* are currently disabled we need not do anything; one of the
126
* callers will take care of setting IMASK to the proper value
127
* when reenabling interrupts.
128
* local_irq_enable just does "STI bfin_irq_flags", so it's exactly
129
* what we need.
130
*/
131
if (!hard_irqs_disabled())
132
hard_local_irq_enable();
133
return;
134
}
135
136
void bfin_internal_mask_irq(unsigned int irq)
137
{
138
unsigned long flags = hard_local_irq_save();
139
140
#ifdef SIC_IMASK0
141
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
142
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
143
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
144
~(1 << mask_bit));
145
# ifdef CONFIG_SMP
146
bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
147
~(1 << mask_bit));
148
# endif
149
#else
150
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
151
~(1 << SIC_SYSIRQ(irq)));
152
#endif
153
154
hard_local_irq_restore(flags);
155
}
156
157
static void bfin_internal_mask_irq_chip(struct irq_data *d)
158
{
159
bfin_internal_mask_irq(d->irq);
160
}
161
162
#ifdef CONFIG_SMP
163
static void bfin_internal_unmask_irq_affinity(unsigned int irq,
164
const struct cpumask *affinity)
165
#else
166
void bfin_internal_unmask_irq(unsigned int irq)
167
#endif
168
{
169
unsigned long flags = hard_local_irq_save();
170
171
#ifdef SIC_IMASK0
172
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
173
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
174
# ifdef CONFIG_SMP
175
if (cpumask_test_cpu(0, affinity))
176
# endif
177
bfin_write_SIC_IMASK(mask_bank,
178
bfin_read_SIC_IMASK(mask_bank) |
179
(1 << mask_bit));
180
# ifdef CONFIG_SMP
181
if (cpumask_test_cpu(1, affinity))
182
bfin_write_SICB_IMASK(mask_bank,
183
bfin_read_SICB_IMASK(mask_bank) |
184
(1 << mask_bit));
185
# endif
186
#else
187
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
188
(1 << SIC_SYSIRQ(irq)));
189
#endif
190
191
hard_local_irq_restore(flags);
192
}
193
194
#ifdef CONFIG_SMP
195
static void bfin_internal_unmask_irq_chip(struct irq_data *d)
196
{
197
bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
198
}
199
200
static int bfin_internal_set_affinity(struct irq_data *d,
201
const struct cpumask *mask, bool force)
202
{
203
bfin_internal_mask_irq(d->irq);
204
bfin_internal_unmask_irq_affinity(d->irq, mask);
205
206
return 0;
207
}
208
#else
209
static void bfin_internal_unmask_irq_chip(struct irq_data *d)
210
{
211
bfin_internal_unmask_irq(d->irq);
212
}
213
#endif
214
215
#ifdef CONFIG_PM
216
int bfin_internal_set_wake(unsigned int irq, unsigned int state)
217
{
218
u32 bank, bit, wakeup = 0;
219
unsigned long flags;
220
bank = SIC_SYSIRQ(irq) / 32;
221
bit = SIC_SYSIRQ(irq) % 32;
222
223
switch (irq) {
224
#ifdef IRQ_RTC
225
case IRQ_RTC:
226
wakeup |= WAKE;
227
break;
228
#endif
229
#ifdef IRQ_CAN0_RX
230
case IRQ_CAN0_RX:
231
wakeup |= CANWE;
232
break;
233
#endif
234
#ifdef IRQ_CAN1_RX
235
case IRQ_CAN1_RX:
236
wakeup |= CANWE;
237
break;
238
#endif
239
#ifdef IRQ_USB_INT0
240
case IRQ_USB_INT0:
241
wakeup |= USBWE;
242
break;
243
#endif
244
#ifdef CONFIG_BF54x
245
case IRQ_CNT:
246
wakeup |= ROTWE;
247
break;
248
#endif
249
default:
250
break;
251
}
252
253
flags = hard_local_irq_save();
254
255
if (state) {
256
bfin_sic_iwr[bank] |= (1 << bit);
257
vr_wakeup |= wakeup;
258
259
} else {
260
bfin_sic_iwr[bank] &= ~(1 << bit);
261
vr_wakeup &= ~wakeup;
262
}
263
264
hard_local_irq_restore(flags);
265
266
return 0;
267
}
268
269
static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
270
{
271
return bfin_internal_set_wake(d->irq, state);
272
}
273
#else
274
# define bfin_internal_set_wake_chip NULL
275
#endif
276
277
static struct irq_chip bfin_core_irqchip = {
278
.name = "CORE",
279
.irq_ack = bfin_ack_noop,
280
.irq_mask = bfin_core_mask_irq,
281
.irq_unmask = bfin_core_unmask_irq,
282
};
283
284
static struct irq_chip bfin_internal_irqchip = {
285
.name = "INTN",
286
.irq_ack = bfin_ack_noop,
287
.irq_mask = bfin_internal_mask_irq_chip,
288
.irq_unmask = bfin_internal_unmask_irq_chip,
289
.irq_mask_ack = bfin_internal_mask_irq_chip,
290
.irq_disable = bfin_internal_mask_irq_chip,
291
.irq_enable = bfin_internal_unmask_irq_chip,
292
#ifdef CONFIG_SMP
293
.irq_set_affinity = bfin_internal_set_affinity,
294
#endif
295
.irq_set_wake = bfin_internal_set_wake_chip,
296
};
297
298
void bfin_handle_irq(unsigned irq)
299
{
300
#ifdef CONFIG_IPIPE
301
struct pt_regs regs; /* Contents not used. */
302
ipipe_trace_irq_entry(irq);
303
__ipipe_handle_irq(irq, &regs);
304
ipipe_trace_irq_exit(irq);
305
#else /* !CONFIG_IPIPE */
306
generic_handle_irq(irq);
307
#endif /* !CONFIG_IPIPE */
308
}
309
310
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
311
static int mac_stat_int_mask;
312
313
static void bfin_mac_status_ack_irq(unsigned int irq)
314
{
315
switch (irq) {
316
case IRQ_MAC_MMCINT:
317
bfin_write_EMAC_MMC_TIRQS(
318
bfin_read_EMAC_MMC_TIRQE() &
319
bfin_read_EMAC_MMC_TIRQS());
320
bfin_write_EMAC_MMC_RIRQS(
321
bfin_read_EMAC_MMC_RIRQE() &
322
bfin_read_EMAC_MMC_RIRQS());
323
break;
324
case IRQ_MAC_RXFSINT:
325
bfin_write_EMAC_RX_STKY(
326
bfin_read_EMAC_RX_IRQE() &
327
bfin_read_EMAC_RX_STKY());
328
break;
329
case IRQ_MAC_TXFSINT:
330
bfin_write_EMAC_TX_STKY(
331
bfin_read_EMAC_TX_IRQE() &
332
bfin_read_EMAC_TX_STKY());
333
break;
334
case IRQ_MAC_WAKEDET:
335
bfin_write_EMAC_WKUP_CTL(
336
bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
337
break;
338
default:
339
/* These bits are W1C */
340
bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
341
break;
342
}
343
}
344
345
static void bfin_mac_status_mask_irq(struct irq_data *d)
346
{
347
unsigned int irq = d->irq;
348
349
mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
350
#ifdef BF537_FAMILY
351
switch (irq) {
352
case IRQ_MAC_PHYINT:
353
bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
354
break;
355
default:
356
break;
357
}
358
#else
359
if (!mac_stat_int_mask)
360
bfin_internal_mask_irq(IRQ_MAC_ERROR);
361
#endif
362
bfin_mac_status_ack_irq(irq);
363
}
364
365
static void bfin_mac_status_unmask_irq(struct irq_data *d)
366
{
367
unsigned int irq = d->irq;
368
369
#ifdef BF537_FAMILY
370
switch (irq) {
371
case IRQ_MAC_PHYINT:
372
bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
373
break;
374
default:
375
break;
376
}
377
#else
378
if (!mac_stat_int_mask)
379
bfin_internal_unmask_irq(IRQ_MAC_ERROR);
380
#endif
381
mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
382
}
383
384
#ifdef CONFIG_PM
385
int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
386
{
387
#ifdef BF537_FAMILY
388
return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
389
#else
390
return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
391
#endif
392
}
393
#else
394
# define bfin_mac_status_set_wake NULL
395
#endif
396
397
static struct irq_chip bfin_mac_status_irqchip = {
398
.name = "MACST",
399
.irq_ack = bfin_ack_noop,
400
.irq_mask_ack = bfin_mac_status_mask_irq,
401
.irq_mask = bfin_mac_status_mask_irq,
402
.irq_unmask = bfin_mac_status_unmask_irq,
403
.irq_set_wake = bfin_mac_status_set_wake,
404
};
405
406
void bfin_demux_mac_status_irq(unsigned int int_err_irq,
407
struct irq_desc *inta_desc)
408
{
409
int i, irq = 0;
410
u32 status = bfin_read_EMAC_SYSTAT();
411
412
for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
413
if (status & (1L << i)) {
414
irq = IRQ_MAC_PHYINT + i;
415
break;
416
}
417
418
if (irq) {
419
if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
420
bfin_handle_irq(irq);
421
} else {
422
bfin_mac_status_ack_irq(irq);
423
pr_debug("IRQ %d:"
424
" MASKED MAC ERROR INTERRUPT ASSERTED\n",
425
irq);
426
}
427
} else
428
printk(KERN_ERR
429
"%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
430
" INTERRUPT ASSERTED BUT NO SOURCE FOUND"
431
"(EMAC_SYSTAT=0x%X)\n",
432
__func__, __FILE__, __LINE__, status);
433
}
434
#endif
435
436
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
437
{
438
#ifdef CONFIG_IPIPE
439
handle = handle_level_irq;
440
#endif
441
__irq_set_handler_locked(irq, handle);
442
}
443
444
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
445
extern void bfin_gpio_irq_prepare(unsigned gpio);
446
447
#if !defined(CONFIG_BF54x)
448
449
static void bfin_gpio_ack_irq(struct irq_data *d)
450
{
451
/* AFAIK ack_irq in case mask_ack is provided
452
* get's only called for edge sense irqs
453
*/
454
set_gpio_data(irq_to_gpio(d->irq), 0);
455
}
456
457
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
458
{
459
unsigned int irq = d->irq;
460
u32 gpionr = irq_to_gpio(irq);
461
462
if (!irqd_is_level_type(d))
463
set_gpio_data(gpionr, 0);
464
465
set_gpio_maska(gpionr, 0);
466
}
467
468
static void bfin_gpio_mask_irq(struct irq_data *d)
469
{
470
set_gpio_maska(irq_to_gpio(d->irq), 0);
471
}
472
473
static void bfin_gpio_unmask_irq(struct irq_data *d)
474
{
475
set_gpio_maska(irq_to_gpio(d->irq), 1);
476
}
477
478
static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
479
{
480
u32 gpionr = irq_to_gpio(d->irq);
481
482
if (__test_and_set_bit(gpionr, gpio_enabled))
483
bfin_gpio_irq_prepare(gpionr);
484
485
bfin_gpio_unmask_irq(d);
486
487
return 0;
488
}
489
490
static void bfin_gpio_irq_shutdown(struct irq_data *d)
491
{
492
u32 gpionr = irq_to_gpio(d->irq);
493
494
bfin_gpio_mask_irq(d);
495
__clear_bit(gpionr, gpio_enabled);
496
bfin_gpio_irq_free(gpionr);
497
}
498
499
static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
500
{
501
unsigned int irq = d->irq;
502
int ret;
503
char buf[16];
504
u32 gpionr = irq_to_gpio(irq);
505
506
if (type == IRQ_TYPE_PROBE) {
507
/* only probe unenabled GPIO interrupt lines */
508
if (test_bit(gpionr, gpio_enabled))
509
return 0;
510
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
511
}
512
513
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
514
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
515
516
snprintf(buf, 16, "gpio-irq%d", irq);
517
ret = bfin_gpio_irq_request(gpionr, buf);
518
if (ret)
519
return ret;
520
521
if (__test_and_set_bit(gpionr, gpio_enabled))
522
bfin_gpio_irq_prepare(gpionr);
523
524
} else {
525
__clear_bit(gpionr, gpio_enabled);
526
return 0;
527
}
528
529
set_gpio_inen(gpionr, 0);
530
set_gpio_dir(gpionr, 0);
531
532
if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
533
== (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
534
set_gpio_both(gpionr, 1);
535
else
536
set_gpio_both(gpionr, 0);
537
538
if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
539
set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
540
else
541
set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
542
543
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
544
set_gpio_edge(gpionr, 1);
545
set_gpio_inen(gpionr, 1);
546
set_gpio_data(gpionr, 0);
547
548
} else {
549
set_gpio_edge(gpionr, 0);
550
set_gpio_inen(gpionr, 1);
551
}
552
553
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
554
bfin_set_irq_handler(irq, handle_edge_irq);
555
else
556
bfin_set_irq_handler(irq, handle_level_irq);
557
558
return 0;
559
}
560
561
#ifdef CONFIG_PM
562
static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
563
{
564
return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
565
}
566
#else
567
# define bfin_gpio_set_wake NULL
568
#endif
569
570
static void bfin_demux_gpio_block(unsigned int irq)
571
{
572
unsigned int gpio, mask;
573
574
gpio = irq_to_gpio(irq);
575
mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
576
577
while (mask) {
578
if (mask & 1)
579
bfin_handle_irq(irq);
580
irq++;
581
mask >>= 1;
582
}
583
}
584
585
void bfin_demux_gpio_irq(unsigned int inta_irq,
586
struct irq_desc *desc)
587
{
588
unsigned int irq;
589
590
switch (inta_irq) {
591
#if defined(BF537_FAMILY)
592
case IRQ_PF_INTA_PG_INTA:
593
bfin_demux_gpio_block(IRQ_PF0);
594
irq = IRQ_PG0;
595
break;
596
case IRQ_PH_INTA_MAC_RX:
597
irq = IRQ_PH0;
598
break;
599
#elif defined(BF533_FAMILY)
600
case IRQ_PROG_INTA:
601
irq = IRQ_PF0;
602
break;
603
#elif defined(BF538_FAMILY)
604
case IRQ_PORTF_INTA:
605
irq = IRQ_PF0;
606
break;
607
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
608
case IRQ_PORTF_INTA:
609
irq = IRQ_PF0;
610
break;
611
case IRQ_PORTG_INTA:
612
irq = IRQ_PG0;
613
break;
614
case IRQ_PORTH_INTA:
615
irq = IRQ_PH0;
616
break;
617
#elif defined(CONFIG_BF561)
618
case IRQ_PROG0_INTA:
619
irq = IRQ_PF0;
620
break;
621
case IRQ_PROG1_INTA:
622
irq = IRQ_PF16;
623
break;
624
case IRQ_PROG2_INTA:
625
irq = IRQ_PF32;
626
break;
627
#endif
628
default:
629
BUG();
630
return;
631
}
632
633
bfin_demux_gpio_block(irq);
634
}
635
636
#else /* CONFIG_BF54x */
637
638
#define NR_PINT_SYS_IRQS 4
639
#define NR_PINT_BITS 32
640
#define NR_PINTS 160
641
#define IRQ_NOT_AVAIL 0xFF
642
643
#define PINT_2_BANK(x) ((x) >> 5)
644
#define PINT_2_BIT(x) ((x) & 0x1F)
645
#define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
646
647
static unsigned char irq2pint_lut[NR_PINTS];
648
static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
649
650
struct pin_int_t {
651
unsigned int mask_set;
652
unsigned int mask_clear;
653
unsigned int request;
654
unsigned int assign;
655
unsigned int edge_set;
656
unsigned int edge_clear;
657
unsigned int invert_set;
658
unsigned int invert_clear;
659
unsigned int pinstate;
660
unsigned int latch;
661
};
662
663
static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
664
(struct pin_int_t *)PINT0_MASK_SET,
665
(struct pin_int_t *)PINT1_MASK_SET,
666
(struct pin_int_t *)PINT2_MASK_SET,
667
(struct pin_int_t *)PINT3_MASK_SET,
668
};
669
670
inline unsigned int get_irq_base(u32 bank, u8 bmap)
671
{
672
unsigned int irq_base;
673
674
if (bank < 2) { /*PA-PB */
675
irq_base = IRQ_PA0 + bmap * 16;
676
} else { /*PC-PJ */
677
irq_base = IRQ_PC0 + bmap * 16;
678
}
679
680
return irq_base;
681
}
682
683
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
684
void init_pint_lut(void)
685
{
686
u16 bank, bit, irq_base, bit_pos;
687
u32 pint_assign;
688
u8 bmap;
689
690
memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
691
692
for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
693
694
pint_assign = pint[bank]->assign;
695
696
for (bit = 0; bit < NR_PINT_BITS; bit++) {
697
698
bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
699
700
irq_base = get_irq_base(bank, bmap);
701
702
irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
703
bit_pos = bit + bank * NR_PINT_BITS;
704
705
pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
706
irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
707
}
708
}
709
}
710
711
static void bfin_gpio_ack_irq(struct irq_data *d)
712
{
713
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
714
u32 pintbit = PINT_BIT(pint_val);
715
u32 bank = PINT_2_BANK(pint_val);
716
717
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
718
if (pint[bank]->invert_set & pintbit)
719
pint[bank]->invert_clear = pintbit;
720
else
721
pint[bank]->invert_set = pintbit;
722
}
723
pint[bank]->request = pintbit;
724
725
}
726
727
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
728
{
729
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
730
u32 pintbit = PINT_BIT(pint_val);
731
u32 bank = PINT_2_BANK(pint_val);
732
733
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
734
if (pint[bank]->invert_set & pintbit)
735
pint[bank]->invert_clear = pintbit;
736
else
737
pint[bank]->invert_set = pintbit;
738
}
739
740
pint[bank]->request = pintbit;
741
pint[bank]->mask_clear = pintbit;
742
}
743
744
static void bfin_gpio_mask_irq(struct irq_data *d)
745
{
746
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
747
748
pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
749
}
750
751
static void bfin_gpio_unmask_irq(struct irq_data *d)
752
{
753
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
754
u32 pintbit = PINT_BIT(pint_val);
755
u32 bank = PINT_2_BANK(pint_val);
756
757
pint[bank]->mask_set = pintbit;
758
}
759
760
static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
761
{
762
unsigned int irq = d->irq;
763
u32 gpionr = irq_to_gpio(irq);
764
u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
765
766
if (pint_val == IRQ_NOT_AVAIL) {
767
printk(KERN_ERR
768
"GPIO IRQ %d :Not in PINT Assign table "
769
"Reconfigure Interrupt to Port Assignemt\n", irq);
770
return -ENODEV;
771
}
772
773
if (__test_and_set_bit(gpionr, gpio_enabled))
774
bfin_gpio_irq_prepare(gpionr);
775
776
bfin_gpio_unmask_irq(d);
777
778
return 0;
779
}
780
781
static void bfin_gpio_irq_shutdown(struct irq_data *d)
782
{
783
u32 gpionr = irq_to_gpio(d->irq);
784
785
bfin_gpio_mask_irq(d);
786
__clear_bit(gpionr, gpio_enabled);
787
bfin_gpio_irq_free(gpionr);
788
}
789
790
static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
791
{
792
unsigned int irq = d->irq;
793
int ret;
794
char buf[16];
795
u32 gpionr = irq_to_gpio(irq);
796
u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
797
u32 pintbit = PINT_BIT(pint_val);
798
u32 bank = PINT_2_BANK(pint_val);
799
800
if (pint_val == IRQ_NOT_AVAIL)
801
return -ENODEV;
802
803
if (type == IRQ_TYPE_PROBE) {
804
/* only probe unenabled GPIO interrupt lines */
805
if (test_bit(gpionr, gpio_enabled))
806
return 0;
807
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
808
}
809
810
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
811
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
812
813
snprintf(buf, 16, "gpio-irq%d", irq);
814
ret = bfin_gpio_irq_request(gpionr, buf);
815
if (ret)
816
return ret;
817
818
if (__test_and_set_bit(gpionr, gpio_enabled))
819
bfin_gpio_irq_prepare(gpionr);
820
821
} else {
822
__clear_bit(gpionr, gpio_enabled);
823
return 0;
824
}
825
826
if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
827
pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
828
else
829
pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
830
831
if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
832
== (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
833
if (gpio_get_value(gpionr))
834
pint[bank]->invert_set = pintbit;
835
else
836
pint[bank]->invert_clear = pintbit;
837
}
838
839
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
840
pint[bank]->edge_set = pintbit;
841
bfin_set_irq_handler(irq, handle_edge_irq);
842
} else {
843
pint[bank]->edge_clear = pintbit;
844
bfin_set_irq_handler(irq, handle_level_irq);
845
}
846
847
return 0;
848
}
849
850
#ifdef CONFIG_PM
851
static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
852
{
853
u32 pint_irq;
854
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
855
u32 bank = PINT_2_BANK(pint_val);
856
857
switch (bank) {
858
case 0:
859
pint_irq = IRQ_PINT0;
860
break;
861
case 2:
862
pint_irq = IRQ_PINT2;
863
break;
864
case 3:
865
pint_irq = IRQ_PINT3;
866
break;
867
case 1:
868
pint_irq = IRQ_PINT1;
869
break;
870
default:
871
return -EINVAL;
872
}
873
874
bfin_internal_set_wake(pint_irq, state);
875
876
return 0;
877
}
878
#else
879
# define bfin_gpio_set_wake NULL
880
#endif
881
882
void bfin_demux_gpio_irq(unsigned int inta_irq,
883
struct irq_desc *desc)
884
{
885
u32 bank, pint_val;
886
u32 request, irq;
887
888
switch (inta_irq) {
889
case IRQ_PINT0:
890
bank = 0;
891
break;
892
case IRQ_PINT2:
893
bank = 2;
894
break;
895
case IRQ_PINT3:
896
bank = 3;
897
break;
898
case IRQ_PINT1:
899
bank = 1;
900
break;
901
default:
902
return;
903
}
904
905
pint_val = bank * NR_PINT_BITS;
906
907
request = pint[bank]->request;
908
909
while (request) {
910
if (request & 1) {
911
irq = pint2irq_lut[pint_val] + SYS_IRQS;
912
bfin_handle_irq(irq);
913
}
914
pint_val++;
915
request >>= 1;
916
}
917
918
}
919
#endif
920
921
static struct irq_chip bfin_gpio_irqchip = {
922
.name = "GPIO",
923
.irq_ack = bfin_gpio_ack_irq,
924
.irq_mask = bfin_gpio_mask_irq,
925
.irq_mask_ack = bfin_gpio_mask_ack_irq,
926
.irq_unmask = bfin_gpio_unmask_irq,
927
.irq_disable = bfin_gpio_mask_irq,
928
.irq_enable = bfin_gpio_unmask_irq,
929
.irq_set_type = bfin_gpio_irq_type,
930
.irq_startup = bfin_gpio_irq_startup,
931
.irq_shutdown = bfin_gpio_irq_shutdown,
932
.irq_set_wake = bfin_gpio_set_wake,
933
};
934
935
void __cpuinit init_exception_vectors(void)
936
{
937
/* cannot program in software:
938
* evt0 - emulation (jtag)
939
* evt1 - reset
940
*/
941
bfin_write_EVT2(evt_nmi);
942
bfin_write_EVT3(trap);
943
bfin_write_EVT5(evt_ivhw);
944
bfin_write_EVT6(evt_timer);
945
bfin_write_EVT7(evt_evt7);
946
bfin_write_EVT8(evt_evt8);
947
bfin_write_EVT9(evt_evt9);
948
bfin_write_EVT10(evt_evt10);
949
bfin_write_EVT11(evt_evt11);
950
bfin_write_EVT12(evt_evt12);
951
bfin_write_EVT13(evt_evt13);
952
bfin_write_EVT14(evt_evt14);
953
bfin_write_EVT15(evt_system_call);
954
CSYNC();
955
}
956
957
/*
958
* This function should be called during kernel startup to initialize
959
* the BFin IRQ handling routines.
960
*/
961
962
int __init init_arch_irq(void)
963
{
964
int irq;
965
unsigned long ilat = 0;
966
967
/* Disable all the peripheral intrs - page 4-29 HW Ref manual */
968
#ifdef SIC_IMASK0
969
bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
970
bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
971
# ifdef SIC_IMASK2
972
bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
973
# endif
974
# ifdef CONFIG_SMP
975
bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
976
bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
977
# endif
978
#else
979
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
980
#endif
981
982
local_irq_disable();
983
984
#ifdef CONFIG_BF54x
985
# ifdef CONFIG_PINTx_REASSIGN
986
pint[0]->assign = CONFIG_PINT0_ASSIGN;
987
pint[1]->assign = CONFIG_PINT1_ASSIGN;
988
pint[2]->assign = CONFIG_PINT2_ASSIGN;
989
pint[3]->assign = CONFIG_PINT3_ASSIGN;
990
# endif
991
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
992
init_pint_lut();
993
#endif
994
995
for (irq = 0; irq <= SYS_IRQS; irq++) {
996
if (irq <= IRQ_CORETMR)
997
irq_set_chip(irq, &bfin_core_irqchip);
998
else
999
irq_set_chip(irq, &bfin_internal_irqchip);
1000
1001
switch (irq) {
1002
#if defined(BF537_FAMILY)
1003
case IRQ_PH_INTA_MAC_RX:
1004
case IRQ_PF_INTA_PG_INTA:
1005
#elif defined(BF533_FAMILY)
1006
case IRQ_PROG_INTA:
1007
#elif defined(CONFIG_BF54x)
1008
case IRQ_PINT0:
1009
case IRQ_PINT1:
1010
case IRQ_PINT2:
1011
case IRQ_PINT3:
1012
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1013
case IRQ_PORTF_INTA:
1014
case IRQ_PORTG_INTA:
1015
case IRQ_PORTH_INTA:
1016
#elif defined(CONFIG_BF561)
1017
case IRQ_PROG0_INTA:
1018
case IRQ_PROG1_INTA:
1019
case IRQ_PROG2_INTA:
1020
#elif defined(BF538_FAMILY)
1021
case IRQ_PORTF_INTA:
1022
#endif
1023
irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1024
break;
1025
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1026
case IRQ_MAC_ERROR:
1027
irq_set_chained_handler(irq,
1028
bfin_demux_mac_status_irq);
1029
break;
1030
#endif
1031
#ifdef CONFIG_SMP
1032
case IRQ_SUPPLE_0:
1033
case IRQ_SUPPLE_1:
1034
irq_set_handler(irq, handle_percpu_irq);
1035
break;
1036
#endif
1037
1038
#ifdef CONFIG_TICKSOURCE_CORETMR
1039
case IRQ_CORETMR:
1040
# ifdef CONFIG_SMP
1041
irq_set_handler(irq, handle_percpu_irq);
1042
# else
1043
irq_set_handler(irq, handle_simple_irq);
1044
# endif
1045
break;
1046
#endif
1047
1048
#ifdef CONFIG_TICKSOURCE_GPTMR0
1049
case IRQ_TIMER0:
1050
irq_set_handler(irq, handle_simple_irq);
1051
break;
1052
#endif
1053
1054
default:
1055
#ifdef CONFIG_IPIPE
1056
irq_set_handler(irq, handle_level_irq);
1057
#else
1058
irq_set_handler(irq, handle_simple_irq);
1059
#endif
1060
break;
1061
}
1062
}
1063
1064
init_mach_irq();
1065
1066
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1067
for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1068
irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1069
handle_level_irq);
1070
#endif
1071
/* if configured as edge, then will be changed to do_edge_IRQ */
1072
for (irq = GPIO_IRQ_BASE;
1073
irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1074
irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1075
handle_level_irq);
1076
1077
bfin_write_IMASK(0);
1078
CSYNC();
1079
ilat = bfin_read_ILAT();
1080
CSYNC();
1081
bfin_write_ILAT(ilat);
1082
CSYNC();
1083
1084
printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1085
/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1086
* local_irq_enable()
1087
*/
1088
program_IAR();
1089
/* Therefore it's better to setup IARs before interrupts enabled */
1090
search_IAR();
1091
1092
/* Enable interrupts IVG7-15 */
1093
bfin_irq_flags |= IMASK_IVG15 |
1094
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1095
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1096
1097
/* This implicitly covers ANOMALY_05000171
1098
* Boot-ROM code modifies SICA_IWRx wakeup registers
1099
*/
1100
#ifdef SIC_IWR0
1101
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1102
# ifdef SIC_IWR1
1103
/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1104
* will screw up the bootrom as it relies on MDMA0/1 waking it
1105
* up from IDLE instructions. See this report for more info:
1106
* http://blackfin.uclinux.org/gf/tracker/4323
1107
*/
1108
if (ANOMALY_05000435)
1109
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1110
else
1111
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1112
# endif
1113
# ifdef SIC_IWR2
1114
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1115
# endif
1116
#else
1117
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1118
#endif
1119
1120
return 0;
1121
}
1122
1123
#ifdef CONFIG_DO_IRQ_L1
1124
__attribute__((l1_text))
1125
#endif
1126
static int vec_to_irq(int vec)
1127
{
1128
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1129
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1130
unsigned long sic_status[3];
1131
1132
if (likely(vec == EVT_IVTMR_P))
1133
return IRQ_CORETMR;
1134
1135
#ifdef SIC_ISR
1136
sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1137
#else
1138
if (smp_processor_id()) {
1139
# ifdef SICB_ISR0
1140
/* This will be optimized out in UP mode. */
1141
sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1142
sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1143
# endif
1144
} else {
1145
sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1146
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1147
}
1148
#endif
1149
#ifdef SIC_ISR2
1150
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1151
#endif
1152
1153
for (;; ivg++) {
1154
if (ivg >= ivg_stop)
1155
return -1;
1156
#ifdef SIC_ISR
1157
if (sic_status[0] & ivg->isrflag)
1158
#else
1159
if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1160
#endif
1161
return ivg->irqno;
1162
}
1163
}
1164
1165
#ifdef CONFIG_DO_IRQ_L1
1166
__attribute__((l1_text))
1167
#endif
1168
void do_irq(int vec, struct pt_regs *fp)
1169
{
1170
int irq = vec_to_irq(vec);
1171
if (irq == -1)
1172
return;
1173
asm_do_IRQ(irq, fp);
1174
}
1175
1176
#ifdef CONFIG_IPIPE
1177
1178
int __ipipe_get_irq_priority(unsigned irq)
1179
{
1180
int ient, prio;
1181
1182
if (irq <= IRQ_CORETMR)
1183
return irq;
1184
1185
for (ient = 0; ient < NR_PERI_INTS; ient++) {
1186
struct ivgx *ivg = ivg_table + ient;
1187
if (ivg->irqno == irq) {
1188
for (prio = 0; prio <= IVG13-IVG7; prio++) {
1189
if (ivg7_13[prio].ifirst <= ivg &&
1190
ivg7_13[prio].istop > ivg)
1191
return IVG7 + prio;
1192
}
1193
}
1194
}
1195
1196
return IVG15;
1197
}
1198
1199
/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1200
#ifdef CONFIG_DO_IRQ_L1
1201
__attribute__((l1_text))
1202
#endif
1203
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1204
{
1205
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1206
struct ipipe_domain *this_domain = __ipipe_current_domain;
1207
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1208
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1209
int irq, s = 0;
1210
1211
irq = vec_to_irq(vec);
1212
if (irq == -1)
1213
return 0;
1214
1215
if (irq == IRQ_SYSTMR) {
1216
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1217
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1218
#endif
1219
/* This is basically what we need from the register frame. */
1220
__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1221
__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1222
if (this_domain != ipipe_root_domain)
1223
__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1224
else
1225
__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1226
}
1227
1228
/*
1229
* We don't want Linux interrupt handlers to run at the
1230
* current core priority level (i.e. < EVT15), since this
1231
* might delay other interrupts handled by a high priority
1232
* domain. Here is what we do instead:
1233
*
1234
* - we raise the SYNCDEFER bit to prevent
1235
* __ipipe_handle_irq() to sync the pipeline for the root
1236
* stage for the incoming interrupt. Upon return, that IRQ is
1237
* pending in the interrupt log.
1238
*
1239
* - we raise the TIF_IRQ_SYNC bit for the current thread, so
1240
* that _schedule_and_signal_from_int will eventually sync the
1241
* pipeline from EVT15.
1242
*/
1243
if (this_domain == ipipe_root_domain) {
1244
s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1245
barrier();
1246
}
1247
1248
ipipe_trace_irq_entry(irq);
1249
__ipipe_handle_irq(irq, regs);
1250
ipipe_trace_irq_exit(irq);
1251
1252
if (user_mode(regs) &&
1253
!ipipe_test_foreign_stack() &&
1254
(current->ipipe_flags & PF_EVTRET) != 0) {
1255
/*
1256
* Testing for user_regs() does NOT fully eliminate
1257
* foreign stack contexts, because of the forged
1258
* interrupt returns we do through
1259
* __ipipe_call_irqtail. In that case, we might have
1260
* preempted a foreign stack context in a high
1261
* priority domain, with a single interrupt level now
1262
* pending after the irqtail unwinding is done. In
1263
* which case user_mode() is now true, and the event
1264
* gets dispatched spuriously.
1265
*/
1266
current->ipipe_flags &= ~PF_EVTRET;
1267
__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1268
}
1269
1270
if (this_domain == ipipe_root_domain) {
1271
set_thread_flag(TIF_IRQ_SYNC);
1272
if (!s) {
1273
__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1274
return !test_bit(IPIPE_STALL_FLAG, &p->status);
1275
}
1276
}
1277
1278
return 0;
1279
}
1280
1281
#endif /* CONFIG_IPIPE */
1282
1283