Path: blob/master/arch/blackfin/mach-common/interrupt.S
10817 views
/*1* Interrupt Entries2*3* Copyright 2005-2009 Analog Devices Inc.4* D. Jeff Dionne <[email protected]>5* Kenneth Albanowski <[email protected]>6*7* Licensed under the GPL-2 or later.8*/910#include <asm/blackfin.h>11#include <mach/irq.h>12#include <linux/linkage.h>13#include <asm/entry.h>14#include <asm/asm-offsets.h>15#include <asm/trace.h>16#include <asm/traps.h>17#include <asm/thread_info.h>1819#include <asm/context.S>2021.extern _ret_from_exception2223#ifdef CONFIG_I_ENTRY_L124.section .l1.text25#else26.text27#endif2829.align 4 /* just in case */3031/* Common interrupt entry code. First we do CLI, then push32* RETI, to keep interrupts disabled, but to allow this state to be changed33* by local_bh_enable.34* R0 contains the interrupt number, while R1 may contain the value of IPEND,35* or garbage if IPEND won't be needed by the ISR. */36__common_int_entry:37[--sp] = fp;38[--sp] = usp;3940[--sp] = i0;41[--sp] = i1;42[--sp] = i2;43[--sp] = i3;4445[--sp] = m0;46[--sp] = m1;47[--sp] = m2;48[--sp] = m3;4950[--sp] = l0;51[--sp] = l1;52[--sp] = l2;53[--sp] = l3;5455[--sp] = b0;56[--sp] = b1;57[--sp] = b2;58[--sp] = b3;59[--sp] = a0.x;60[--sp] = a0.w;61[--sp] = a1.x;62[--sp] = a1.w;6364[--sp] = LC0;65[--sp] = LC1;66[--sp] = LT0;67[--sp] = LT1;68[--sp] = LB0;69[--sp] = LB1;7071[--sp] = ASTAT;7273[--sp] = r0; /* Skip reserved */74[--sp] = RETS;75r2 = RETI;76[--sp] = r2;77[--sp] = RETX;78[--sp] = RETN;79[--sp] = RETE;80[--sp] = SEQSTAT;81[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */8283/* Switch to other method of keeping interrupts disabled. */84#ifdef CONFIG_DEBUG_HWERR85r1 = 0x3f;86sti r1;87#else88cli r1;89#endif90#ifdef CONFIG_TRACE_IRQFLAGS91[--sp] = r0;92sp += -12;93call _trace_hardirqs_off;94sp += 12;95r0 = [sp++];96#endif97[--sp] = RETI; /* orig_pc */98/* Clear all L registers. */99r1 = 0 (x);100l0 = r1;101l1 = r1;102l2 = r1;103l3 = r1;104#ifdef CONFIG_FRAME_POINTER105fp = 0;106#endif107108ANOMALY_283_315_WORKAROUND(p5, r7)109110r1 = sp;111SP += -12;112#ifdef CONFIG_IPIPE113call ___ipipe_grab_irq114SP += 12;115cc = r0 == 0;116if cc jump .Lcommon_restore_context;117#else /* CONFIG_IPIPE */118119#ifdef CONFIG_PREEMPT120r7 = sp;121r4.l = lo(ALIGN_PAGE_MASK);122r4.h = hi(ALIGN_PAGE_MASK);123r7 = r7 & r4;124p5 = r7;125r7 = [p5 + TI_PREEMPT]; /* get preempt count */126r7 += 1; /* increment it */127[p5 + TI_PREEMPT] = r7;128#endif129pseudo_long_call _do_irq, p2;130131#ifdef CONFIG_PREEMPT132r7 += -1;133[p5 + TI_PREEMPT] = r7; /* restore preempt count */134#endif135136SP += 12;137#endif /* CONFIG_IPIPE */138pseudo_long_call _return_from_int, p2;139.Lcommon_restore_context:140RESTORE_CONTEXT141rti;142143/* interrupt routine for ivhw - 5 */144ENTRY(_evt_ivhw)145/* In case a single action kicks off multiple memory transactions, (like146* a cache line fetch, - this can cause multiple hardware errors, let's147* catch them all. First - make sure all the actions are complete, and148* the core sees the hardware errors.149*/150SSYNC;151SSYNC;152153SAVE_ALL_SYS154#ifdef CONFIG_FRAME_POINTER155fp = 0;156#endif157158ANOMALY_283_315_WORKAROUND(p5, r7)159160/* Handle all stacked hardware errors161* To make sure we don't hang forever, only do it 10 times162*/163R0 = 0;164R2 = 10;1651:166P0.L = LO(ILAT);167P0.H = HI(ILAT);168R1 = [P0];169CC = BITTST(R1, EVT_IVHW_P);170IF ! CC JUMP 2f;171/* OK a hardware error is pending - clear it */172R1 = EVT_IVHW_P;173[P0] = R1;174R0 += 1;175CC = R1 == R2;176if CC JUMP 2f;177JUMP 1b;1782:179# We are going to dump something out, so make sure we print IPEND properly180p2.l = lo(IPEND);181p2.h = hi(IPEND);182r0 = [p2];183[sp + PT_IPEND] = r0;184185/* set the EXCAUSE to HWERR for trap_c */186r0 = [sp + PT_SEQSTAT];187R1.L = LO(VEC_HWERR);188R1.H = HI(VEC_HWERR);189R0 = R0 | R1;190[sp + PT_SEQSTAT] = R0;191192r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */193SP += -12;194pseudo_long_call _trap_c, p5;195SP += 12;196197#ifdef EBIU_ERRMST198/* make sure EBIU_ERRMST is clear */199p0.l = LO(EBIU_ERRMST);200p0.h = HI(EBIU_ERRMST);201r0.l = (CORE_ERROR | CORE_MERROR);202w[p0] = r0.l;203#endif204205pseudo_long_call _ret_from_exception, p2;206207.Lcommon_restore_all_sys:208RESTORE_ALL_SYS209rti;210ENDPROC(_evt_ivhw)211212/* Interrupt routine for evt2 (NMI).213* For inner circle type details, please see:214* http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi215*/216ENTRY(_evt_nmi)217#ifndef CONFIG_NMI_WATCHDOG218.weak _evt_nmi219#else220/* Not take account of CPLBs, this handler will not return */221SAVE_ALL_SYS222r0 = sp;223r1 = retn;224[sp + PT_PC] = r1;225trace_buffer_save(p4,r5);226227ANOMALY_283_315_WORKAROUND(p4, r5)228229SP += -12;230call _do_nmi;231SP += 12;2321:233jump 1b;234#endif235rtn;236ENDPROC(_evt_nmi)237238/* interrupt routine for core timer - 6 */239ENTRY(_evt_timer)240TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)241242/* interrupt routine for evt7 - 7 */243ENTRY(_evt_evt7)244INTERRUPT_ENTRY(EVT_IVG7_P)245ENTRY(_evt_evt8)246INTERRUPT_ENTRY(EVT_IVG8_P)247ENTRY(_evt_evt9)248INTERRUPT_ENTRY(EVT_IVG9_P)249ENTRY(_evt_evt10)250INTERRUPT_ENTRY(EVT_IVG10_P)251ENTRY(_evt_evt11)252INTERRUPT_ENTRY(EVT_IVG11_P)253ENTRY(_evt_evt12)254INTERRUPT_ENTRY(EVT_IVG12_P)255ENTRY(_evt_evt13)256INTERRUPT_ENTRY(EVT_IVG13_P)257258259/* interrupt routine for system_call - 15 */260ENTRY(_evt_system_call)261SAVE_CONTEXT_SYSCALL262#ifdef CONFIG_FRAME_POINTER263fp = 0;264#endif265pseudo_long_call _system_call, p2;266jump .Lcommon_restore_context;267ENDPROC(_evt_system_call)268269#ifdef CONFIG_IPIPE270/*271* __ipipe_call_irqtail: lowers the current priority level to EVT15272* before running a user-defined routine, then raises the priority273* level to EVT14 to prepare the caller for a normal interrupt274* return through RTI.275*276* We currently use this feature in two occasions:277*278* - before branching to __ipipe_irq_tail_hook as requested by a high279* priority domain after the pipeline delivered an interrupt,280* e.g. such as Xenomai, in order to start its rescheduling281* procedure, since we may not switch tasks when IRQ levels are282* nested on the Blackfin, so we have to fake an interrupt return283* so that we may reschedule immediately.284*285* - before branching to __ipipe_sync_root(), in order to play any interrupt286* pending for the root domain (i.e. the Linux kernel). This lowers287* the core priority level enough so that Linux IRQ handlers may288* never delay interrupts handled by high priority domains; we defer289* those handlers until this point instead. This is a substitute290* to using a threaded interrupt model for the Linux kernel.291*292* r0: address of user-defined routine293* context: caller must have preempted EVT15, hw interrupts must be off.294*/295ENTRY(___ipipe_call_irqtail)296p0 = r0;297r0.l = 1f;298r0.h = 1f;299reti = r0;300rti;3011:302[--sp] = rets;303[--sp] = ( r7:4, p5:3 );304sp += -12;305call (p0);306sp += 12;307( r7:4, p5:3 ) = [sp++];308rets = [sp++];309310#ifdef CONFIG_DEBUG_HWERR311/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */312r0 = (EVT_IVG14 | EVT_IVHW | \313EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);314#else315/* Only enable irq14 interrupt, until we transition to _evt_evt14 */316r0 = (EVT_IVG14 | \317EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);318#endif319sti r0;320raise 14; /* Branches to _evt_evt14 */3212:322jump 2b; /* Likely paranoid. */323ENDPROC(___ipipe_call_irqtail)324325#endif /* CONFIG_IPIPE */326327328