/*1* This file is subject to the terms and conditions of the GNU General Public2* License. See the file "COPYING" in the main directory of this archive3* for more details.4*5* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle6* Copyright (C) 1999, 2000 Silicon Graphics, Inc.7* Copyright (C) 2002, 2007 Maciej W. Rozycki8* Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.9*/10#include <linux/init.h>1112#include <asm/asm.h>13#include <asm/asmmacro.h>14#include <asm/cacheops.h>15#include <asm/irqflags.h>16#include <asm/regdef.h>17#include <asm/fpregdef.h>18#include <asm/mipsregs.h>19#include <asm/stackframe.h>20#include <asm/sync.h>21#include <asm/thread_info.h>2223__INIT2425/*26* General exception vector for all other CPUs.27*28* Be careful when changing this, it has to be at most 128 bytes29* to fit into space reserved for the exception handler.30*/31NESTED(except_vec3_generic, 0, sp)32.set push33.set noat34mfc0 k1, CP0_CAUSE35andi k1, k1, 0x7c36#ifdef CONFIG_64BIT37dsll k1, k1, 138#endif39PTR_L k0, exception_handlers(k1)40jr k041.set pop42END(except_vec3_generic)4344/*45* General exception handler for CPUs with virtual coherency exception.46*47* Be careful when changing this, it has to be at most 256 (as a special48* exception) bytes to fit into space reserved for the exception handler.49*/50NESTED(except_vec3_r4000, 0, sp)51.set push52.set arch=r400053.set noat54mfc0 k1, CP0_CAUSE55li k0, 31<<256andi k1, k1, 0x7c57.set push58.set noreorder59.set nomacro60beq k1, k0, handle_vced61li k0, 14<<262beq k1, k0, handle_vcei63#ifdef CONFIG_64BIT64dsll k1, k1, 165#endif66.set pop67PTR_L k0, exception_handlers(k1)68jr k06970/*71* Big shit, we now may have two dirty primary cache lines for the same72* physical address. We can safely invalidate the line pointed to by73* c0_badvaddr because after return from this exception handler the74* load / store will be re-executed.75*/76handle_vced:77MFC0 k0, CP0_BADVADDR78li k1, -4 # Is this ...79and k0, k1 # ... really needed?80mtc0 zero, CP0_TAGLO81cache Index_Store_Tag_D, (k0)82cache Hit_Writeback_Inv_SD, (k0)83#ifdef CONFIG_PROC_FS84PTR_LA k0, vced_count85lw k1, (k0)86addiu k1, 187sw k1, (k0)88#endif89eret9091handle_vcei:92MFC0 k0, CP0_BADVADDR93cache Hit_Writeback_Inv_SD, (k0) # also cleans pi94#ifdef CONFIG_PROC_FS95PTR_LA k0, vcei_count96lw k1, (k0)97addiu k1, 198sw k1, (k0)99#endif100eret101.set pop102END(except_vec3_r4000)103104__FINIT105106.section .cpuidle.text,"ax"107/* Align to 32 bytes for the maximum idle interrupt region size. */108.align 5109LEAF(r4k_wait)110/* Keep the ISA bit clear for calculations on local labels here. */1110: .fill 0112/* Start of idle interrupt region. */113local_irq_enable114/*115* If an interrupt lands here, before going idle on the next116* instruction, we must *NOT* go idle since the interrupt could117* have set TIF_NEED_RESCHED or caused a timer to need resched.118* Fall through -- see skipover_handler below -- and have the119* idle loop take care of things.120*/1211: .fill 0122/* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */123.if 1b - 0b > 32124.error "overlong idle interrupt region"125.elseif 1b - 0b > 8126.align 4127.endif1282: .fill 0129.equ r4k_wait_idle_size, 2b - 0b130/* End of idle interrupt region; size has to be a power of 2. */131.set MIPS_ISA_ARCH_LEVEL_RAW132r4k_wait_insn:133wait134r4k_wait_exit:135.set mips0136local_irq_disable137jr ra138END(r4k_wait)139.previous140141.macro BUILD_SKIPOVER_PROLOGUE handler142FEXPORT(skipover_\handler)143.set push144.set noat145MFC0 k0, CP0_EPC146/* Subtract/add 2 to let the ISA bit propagate through the mask. */147PTR_LA k1, r4k_wait_insn - 2148ori k0, r4k_wait_idle_size - 2149.set noreorder150bne k0, k1, \handler151PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2152.set reorder153MTC0 k0, CP0_EPC154.set pop155.endm156157.align 5158BUILD_SKIPOVER_PROLOGUE handle_int159NESTED(handle_int, PT_SIZE, sp)160.cfi_signal_frame161#ifdef CONFIG_TRACE_IRQFLAGS162/*163* Check to see if the interrupted code has just disabled164* interrupts and ignore this interrupt for now if so.165*166* local_irq_disable() disables interrupts and then calls167* trace_hardirqs_off() to track the state. If an interrupt is taken168* after interrupts are disabled but before the state is updated169* it will appear to restore_all that it is incorrectly returning with170* interrupts disabled171*/172.set push173.set noat174mfc0 k0, CP0_STATUS175#if defined(CONFIG_CPU_R3000)176and k0, ST0_IEP177bnez k0, 1f178179mfc0 k0, CP0_EPC180.set noreorder181j k0182rfe183#else184and k0, ST0_IE185bnez k0, 1f186187eret188#endif1891:190.set pop191#endif192SAVE_ALL docfi=1193CLI194TRACE_IRQS_OFF195196LONG_L s0, TI_REGS($28)197LONG_S sp, TI_REGS($28)198199/*200* SAVE_ALL ensures we are using a valid kernel stack for the thread.201* Check if we are already using the IRQ stack.202*/203move s1, sp # Preserve the sp204205/* Get IRQ stack for this CPU */206ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG207#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)208lui k1, %hi(irq_stack)209#else210lui k1, %highest(irq_stack)211daddiu k1, %higher(irq_stack)212dsll k1, 16213daddiu k1, %hi(irq_stack)214dsll k1, 16215#endif216LONG_SRL k0, SMP_CPUID_PTRSHIFT217LONG_ADDU k1, k0218LONG_L t0, %lo(irq_stack)(k1)219220# Check if already on IRQ stack221PTR_LI t1, ~(_THREAD_SIZE-1)222and t1, t1, sp223beq t0, t1, 2f224225/* Switch to IRQ stack */226li t1, _IRQ_STACK_START227PTR_ADD sp, t0, t1228229/* Save task's sp on IRQ stack so that unwinding can follow it */230LONG_S s1, 0(sp)2312:232jal plat_irq_dispatch233234/* Restore sp */235move sp, s1236237j ret_from_irq238#ifdef CONFIG_CPU_MICROMIPS239nop240#endif241END(handle_int)242243__INIT244245/*246* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.247* This is a dedicated interrupt exception vector which reduces the248* interrupt processing overhead. The jump instruction will be replaced249* at the initialization time.250*251* Be careful when changing this, it has to be at most 128 bytes252* to fit into space reserved for the exception handler.253*/254NESTED(except_vec4, 0, sp)2551: j 1b /* Dummy, will be replaced */256END(except_vec4)257258/*259* EJTAG debug exception handler.260* The EJTAG debug exception entry point is 0xbfc00480, which261* normally is in the boot PROM, so the boot PROM must do an262* unconditional jump to this vector.263*/264NESTED(except_vec_ejtag_debug, 0, sp)265j ejtag_debug_handler266#ifdef CONFIG_CPU_MICROMIPS267nop268#endif269END(except_vec_ejtag_debug)270271__FINIT272273/*274* Vectored interrupt handler.275* This prototype is copied to ebase + n*IntCtl.VS and patched276* to invoke the handler277*/278BUILD_SKIPOVER_PROLOGUE except_vec_vi279NESTED(except_vec_vi, 0, sp)280SAVE_SOME docfi=1281SAVE_AT docfi=1282.set push283.set noreorder284PTR_LA v1, except_vec_vi_handler285jr v1286FEXPORT(except_vec_vi_ori)287ori v0, zero, 0 /* Offset in vi_handlers[] */288.set pop289END(except_vec_vi)290EXPORT(except_vec_vi_end)291292/*293* Common Vectored Interrupt code294* Complete the register saves and invoke the handler, $v0 holds295* offset into vi_handlers[]296*/297NESTED(except_vec_vi_handler, 0, sp)298SAVE_TEMP299SAVE_STATIC300CLI301#ifdef CONFIG_TRACE_IRQFLAGS302move s0, v0303TRACE_IRQS_OFF304move v0, s0305#endif306307LONG_L s0, TI_REGS($28)308LONG_S sp, TI_REGS($28)309310/*311* SAVE_ALL ensures we are using a valid kernel stack for the thread.312* Check if we are already using the IRQ stack.313*/314move s1, sp # Preserve the sp315316/* Get IRQ stack for this CPU */317ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG318#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)319lui k1, %hi(irq_stack)320#else321lui k1, %highest(irq_stack)322daddiu k1, %higher(irq_stack)323dsll k1, 16324daddiu k1, %hi(irq_stack)325dsll k1, 16326#endif327LONG_SRL k0, SMP_CPUID_PTRSHIFT328LONG_ADDU k1, k0329LONG_L t0, %lo(irq_stack)(k1)330331# Check if already on IRQ stack332PTR_LI t1, ~(_THREAD_SIZE-1)333and t1, t1, sp334beq t0, t1, 2f335336/* Switch to IRQ stack */337li t1, _IRQ_STACK_START338PTR_ADD sp, t0, t1339340/* Save task's sp on IRQ stack so that unwinding can follow it */341LONG_S s1, 0(sp)3422:343PTR_L v0, vi_handlers(v0)344jalr v0345346/* Restore sp */347move sp, s1348349j ret_from_irq350END(except_vec_vi_handler)351352/*353* EJTAG debug exception handler.354*/355NESTED(ejtag_debug_handler, PT_SIZE, sp)356.set push357.set noat358MTC0 k0, CP0_DESAVE359mfc0 k0, CP0_DEBUG360361andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.362beqz k0, ejtag_return363364#ifdef CONFIG_SMP3651: PTR_LA k0, ejtag_debug_buffer_spinlock366__SYNC(full, loongson3_war)3672: ll k0, 0(k0)368bnez k0, 2b369PTR_LA k0, ejtag_debug_buffer_spinlock370sc k0, 0(k0)371beqz k0, 1b372# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC373sync374# endif375376PTR_LA k0, ejtag_debug_buffer377LONG_S k1, 0(k0)378379ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG380PTR_SRL k1, SMP_CPUID_PTRSHIFT381PTR_SLL k1, LONGLOG382PTR_LA k0, ejtag_debug_buffer_per_cpu383PTR_ADDU k0, k1384385PTR_LA k1, ejtag_debug_buffer386LONG_L k1, 0(k1)387LONG_S k1, 0(k0)388389PTR_LA k0, ejtag_debug_buffer_spinlock390sw zero, 0(k0)391#else392PTR_LA k0, ejtag_debug_buffer393LONG_S k1, 0(k0)394#endif395396SAVE_ALL397move a0, sp398jal ejtag_exception_handler399RESTORE_ALL400401#ifdef CONFIG_SMP402ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG403PTR_SRL k1, SMP_CPUID_PTRSHIFT404PTR_SLL k1, LONGLOG405PTR_LA k0, ejtag_debug_buffer_per_cpu406PTR_ADDU k0, k1407LONG_L k1, 0(k0)408#else409PTR_LA k0, ejtag_debug_buffer410LONG_L k1, 0(k0)411#endif412413ejtag_return:414back_to_back_c0_hazard415MFC0 k0, CP0_DESAVE416.set mips32417deret418.set pop419END(ejtag_debug_handler)420421/*422* This buffer is reserved for the use of the EJTAG debug423* handler.424*/425.data426EXPORT(ejtag_debug_buffer)427.fill LONGSIZE428#ifdef CONFIG_SMP429EXPORT(ejtag_debug_buffer_spinlock)430.fill LONGSIZE431EXPORT(ejtag_debug_buffer_per_cpu)432.fill LONGSIZE * NR_CPUS433#endif434.previous435436__INIT437438/*439* NMI debug exception handler for MIPS reference boards.440* The NMI debug exception entry point is 0xbfc00000, which441* normally is in the boot PROM, so the boot PROM must do a442* unconditional jump to this vector.443*/444NESTED(except_vec_nmi, 0, sp)445j nmi_handler446#ifdef CONFIG_CPU_MICROMIPS447nop448#endif449END(except_vec_nmi)450451__FINIT452453NESTED(nmi_handler, PT_SIZE, sp)454.cfi_signal_frame455.set push456.set noat457/*458* Clear ERL - restore segment mapping459* Clear BEV - required for page fault exception handler to work460*/461mfc0 k0, CP0_STATUS462ori k0, k0, ST0_EXL463li k1, ~(ST0_BEV | ST0_ERL)464and k0, k0, k1465mtc0 k0, CP0_STATUS466_ehb467SAVE_ALL468move a0, sp469jal nmi_exception_handler470/* nmi_exception_handler never returns */471.set pop472END(nmi_handler)473474.macro __build_clear_none475.endm476477.macro __build_clear_sti478TRACE_IRQS_ON479STI480.endm481482.macro __build_clear_cli483CLI484TRACE_IRQS_OFF485.endm486487.macro __build_clear_fpe488CLI489TRACE_IRQS_OFF490.set push491/* gas fails to assemble cfc1 for some archs (octeon).*/ \492.set mips1493.set hardfloat494cfc1 a1, fcr31495.set pop496.endm497498.macro __build_clear_msa_fpe499CLI500TRACE_IRQS_OFF501_cfcmsa a1, MSA_CSR502.endm503504.macro __build_clear_ade505MFC0 t0, CP0_BADVADDR506PTR_S t0, PT_BVADDR(sp)507KMODE508.endm509510.macro __build_clear_gsexc511.set push512/*513* We need to specify a selector to access the CP0.Diag1 (GSCause)514* register. All GSExc-equipped processors have MIPS32.515*/516.set mips32517mfc0 a1, CP0_DIAGNOSTIC1518.set pop519TRACE_IRQS_ON520STI521.endm522523.macro __BUILD_silent exception524.endm525526/* Gas tries to parse the ASM_PRINT argument as a string containing527string escapes and emits bogus warnings if it believes to528recognize an unknown escape code. So make the arguments529start with an n and gas will believe \n is ok ... */530.macro __BUILD_verbose nexception531LONG_L a1, PT_EPC(sp)532#ifdef CONFIG_32BIT533ASM_PRINT("Got \nexception at %08lx\012")534#endif535#ifdef CONFIG_64BIT536ASM_PRINT("Got \nexception at %016lx\012")537#endif538.endm539540.macro __BUILD_count exception541LONG_L t0,exception_count_\exception542LONG_ADDIU t0, 1543LONG_S t0,exception_count_\exception544.comm exception_count\exception, 8, 8545.endm546547.macro __BUILD_HANDLER exception handler clear verbose ext548.align 5549NESTED(handle_\exception, PT_SIZE, sp)550.cfi_signal_frame551.set noat552SAVE_ALL553FEXPORT(handle_\exception\ext)554__build_clear_\clear555.set at556__BUILD_\verbose \exception557move a0, sp558jal do_\handler559j ret_from_exception560END(handle_\exception)561.endm562563.macro BUILD_HANDLER exception handler clear verbose564__BUILD_HANDLER \exception \handler \clear \verbose _int565.endm566567BUILD_HANDLER adel ade ade silent /* #4 */568BUILD_HANDLER ades ade ade silent /* #5 */569BUILD_HANDLER ibe be cli silent /* #6 */570BUILD_HANDLER dbe be cli silent /* #7 */571BUILD_HANDLER bp bp sti silent /* #9 */572BUILD_HANDLER ri ri sti silent /* #10 */573BUILD_HANDLER cpu cpu sti silent /* #11 */574BUILD_HANDLER ov ov sti silent /* #12 */575BUILD_HANDLER tr tr sti silent /* #13 */576BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */577#ifdef CONFIG_MIPS_FP_SUPPORT578BUILD_HANDLER fpe fpe fpe silent /* #15 */579#endif580BUILD_HANDLER ftlb ftlb none silent /* #16 */581BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */582BUILD_HANDLER msa msa sti silent /* #21 */583BUILD_HANDLER mdmx mdmx sti silent /* #22 */584#ifdef CONFIG_HARDWARE_WATCHPOINTS585/*586* For watch, interrupts will be enabled after the watch587* registers are read.588*/589BUILD_HANDLER watch watch cli silent /* #23 */590#else591BUILD_HANDLER watch watch sti verbose /* #23 */592#endif593BUILD_HANDLER mcheck mcheck cli verbose /* #24 */594BUILD_HANDLER mt mt sti silent /* #25 */595BUILD_HANDLER dsp dsp sti silent /* #26 */596BUILD_HANDLER reserved reserved sti verbose /* others */597598.align 5599LEAF(handle_ri_rdhwr_tlbp)600.set push601.set noat602.set noreorder603/* check if TLB contains a entry for EPC */604MFC0 k1, CP0_ENTRYHI605andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX606MFC0 k0, CP0_EPC607PTR_SRL k0, _PAGE_SHIFT + 1608PTR_SLL k0, _PAGE_SHIFT + 1609or k1, k0610MTC0 k1, CP0_ENTRYHI611mtc0_tlbw_hazard612tlbp613tlb_probe_hazard614mfc0 k1, CP0_INDEX615.set pop616bltz k1, handle_ri /* slow path */617/* fall thru */618END(handle_ri_rdhwr_tlbp)619620LEAF(handle_ri_rdhwr)621.set push622.set noat623.set noreorder624/* MIPS32: 0x7c03e83b: rdhwr v1,$29 */625/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */626MFC0 k1, CP0_EPC627#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)628and k0, k1, 1629beqz k0, 1f630xor k1, k0631lhu k0, (k1)632lhu k1, 2(k1)633ins k1, k0, 16, 16634lui k0, 0x007d635b docheck636ori k0, 0x6b3c6371:638lui k0, 0x7c03639lw k1, (k1)640ori k0, 0xe83b641#else642andi k0, k1, 1643bnez k0, handle_ri644lui k0, 0x7c03645lw k1, (k1)646ori k0, 0xe83b647#endif648.set reorder649docheck:650bne k0, k1, handle_ri /* if not ours */651652isrdhwr:653/* The insn is rdhwr. No need to check CAUSE.BD here. */654get_saved_sp /* k1 := current_thread_info */655.set noreorder656MFC0 k0, CP0_EPC657#if defined(CONFIG_CPU_R3000)658ori k1, _THREAD_MASK659xori k1, _THREAD_MASK660LONG_L v1, TI_TP_VALUE(k1)661LONG_ADDIU k0, 4662jr k0663rfe664#else665#ifndef CONFIG_CPU_DADDI_WORKAROUNDS666LONG_ADDIU k0, 4 /* stall on $k0 */667#else668.set at=v1669LONG_ADDIU k0, 4670.set noat671#endif672MTC0 k0, CP0_EPC673/* I hope three instructions between MTC0 and ERET are enough... */674ori k1, _THREAD_MASK675xori k1, _THREAD_MASK676LONG_L v1, TI_TP_VALUE(k1)677.set push678.set arch=r4000679eret680.set pop681#endif682.set pop683END(handle_ri_rdhwr)684685#ifdef CONFIG_CPU_R4X00_BUGS64686/* A temporary overflow handler used by check_daddi(). */687688__INIT689690BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */691#endif692693694