Path: blob/master/arch/powerpc/kernel/exceptions-64s.S
10817 views
/*1* This file contains the 64-bit "server" PowerPC variant2* of the low level exception handling including exception3* vectors, exception return, part of the slb and stab4* handling and other fixed offset specific things.5*6* This file is meant to be #included from head_64.S due to7* position dependent assembly.8*9* Most of this originates from head_64.S and thus has the same10* copyright history.11*12*/1314#include <asm/exception-64s.h>15#include <asm/ptrace.h>1617/*18* We layout physical memory as follows:19* 0x0000 - 0x00ff : Secondary processor spin code20* 0x0100 - 0x2fff : pSeries Interrupt prologs21* 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs22* 0x6000 - 0x6fff : Initial (CPU0) segment table23* 0x7000 - 0x7fff : FWNMI data area24* 0x8000 - : Early init and support code25*/2627/*28* This is the start of the interrupt handlers for pSeries29* This code runs with relocation off.30* Code from here to __end_interrupts gets copied down to real31* address 0x100 when we are running a relocatable kernel.32* Therefore any relative branches in this section must only33* branch to labels in this section.34*/35. = 0x10036.globl __start_interrupts37__start_interrupts:3839.globl system_reset_pSeries;40system_reset_pSeries:41HMT_MEDIUM;42DO_KVM 0x100;43SET_SCRATCH0(r13)44#ifdef CONFIG_PPC_P7_NAP45BEGIN_FTR_SECTION46/* Running native on arch 2.06 or later, check if we are47* waking up from nap. We only handle no state loss and48* supervisor state loss. We do -not- handle hypervisor49* state loss at this time.50*/51mfspr r13,SPRN_SRR152rlwinm r13,r13,47-31,30,3153cmpwi cr0,r13,154bne 1f55b .power7_wakeup_noloss561: cmpwi cr0,r13,257bne 1f58b .power7_wakeup_loss59/* Total loss of HV state is fatal, we could try to use the60* PIR to locate a PACA, then use an emergency stack etc...61* but for now, let's just stay stuck here62*/631: cmpwi cr0,r13,364beq .65END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)66#endif /* CONFIG_PPC_P7_NAP */67EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)6869. = 0x20070_machine_check_pSeries:71HMT_MEDIUM72DO_KVM 0x20073SET_SCRATCH0(r13)74EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)7576. = 0x30077.globl data_access_pSeries78data_access_pSeries:79HMT_MEDIUM80DO_KVM 0x30081SET_SCRATCH0(r13)82BEGIN_FTR_SECTION83GET_PACA(r13)84std r9,PACA_EXSLB+EX_R9(r13)85std r10,PACA_EXSLB+EX_R10(r13)86mfspr r10,SPRN_DAR87mfspr r9,SPRN_DSISR88srdi r10,r10,6089rlwimi r10,r9,16,0x2090mfcr r991cmpwi r10,0x2c92beq do_stab_bolted_pSeries93ld r10,PACA_EXSLB+EX_R10(r13)94std r11,PACA_EXGEN+EX_R11(r13)95ld r11,PACA_EXSLB+EX_R9(r13)96std r12,PACA_EXGEN+EX_R12(r13)97GET_SCRATCH0(r12)98std r10,PACA_EXGEN+EX_R10(r13)99std r11,PACA_EXGEN+EX_R9(r13)100std r12,PACA_EXGEN+EX_R13(r13)101EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)102FTR_SECTION_ELSE103EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)104ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)105106. = 0x380107.globl data_access_slb_pSeries108data_access_slb_pSeries:109HMT_MEDIUM110DO_KVM 0x380111SET_SCRATCH0(r13)112GET_PACA(r13)113std r3,PACA_EXSLB+EX_R3(r13)114mfspr r3,SPRN_DAR115std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */116mfcr r9117#ifdef __DISABLED__118/* Keep that around for when we re-implement dynamic VSIDs */119cmpdi r3,0120bge slb_miss_user_pseries121#endif /* __DISABLED__ */122std r10,PACA_EXSLB+EX_R10(r13)123std r11,PACA_EXSLB+EX_R11(r13)124std r12,PACA_EXSLB+EX_R12(r13)125GET_SCRATCH0(r10)126std r10,PACA_EXSLB+EX_R13(r13)127mfspr r12,SPRN_SRR1 /* and SRR1 */128#ifndef CONFIG_RELOCATABLE129b .slb_miss_realmode130#else131/*132* We can't just use a direct branch to .slb_miss_realmode133* because the distance from here to there depends on where134* the kernel ends up being put.135*/136mfctr r11137ld r10,PACAKBASE(r13)138LOAD_HANDLER(r10, .slb_miss_realmode)139mtctr r10140bctr141#endif142143STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)144145. = 0x480146.globl instruction_access_slb_pSeries147instruction_access_slb_pSeries:148HMT_MEDIUM149DO_KVM 0x480150SET_SCRATCH0(r13)151GET_PACA(r13)152std r3,PACA_EXSLB+EX_R3(r13)153mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */154std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */155mfcr r9156#ifdef __DISABLED__157/* Keep that around for when we re-implement dynamic VSIDs */158cmpdi r3,0159bge slb_miss_user_pseries160#endif /* __DISABLED__ */161std r10,PACA_EXSLB+EX_R10(r13)162std r11,PACA_EXSLB+EX_R11(r13)163std r12,PACA_EXSLB+EX_R12(r13)164GET_SCRATCH0(r10)165std r10,PACA_EXSLB+EX_R13(r13)166mfspr r12,SPRN_SRR1 /* and SRR1 */167#ifndef CONFIG_RELOCATABLE168b .slb_miss_realmode169#else170mfctr r11171ld r10,PACAKBASE(r13)172LOAD_HANDLER(r10, .slb_miss_realmode)173mtctr r10174bctr175#endif176177/* We open code these as we can't have a ". = x" (even with178* x = "." within a feature section179*/180. = 0x500;181.globl hardware_interrupt_pSeries;182.globl hardware_interrupt_hv;183hardware_interrupt_pSeries:184hardware_interrupt_hv:185BEGIN_FTR_SECTION186_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)187FTR_SECTION_ELSE188_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)189ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)190191STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)192STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)193STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)194195MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)196MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)197198STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)199STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)200201. = 0xc00202.globl system_call_pSeries203system_call_pSeries:204HMT_MEDIUM205DO_KVM 0xc00206BEGIN_FTR_SECTION207cmpdi r0,0x1ebe208beq- 1f209END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)210mr r9,r13211GET_PACA(r13)212mfspr r11,SPRN_SRR0213mfspr r12,SPRN_SRR1214ld r10,PACAKBASE(r13)215LOAD_HANDLER(r10, system_call_entry)216mtspr SPRN_SRR0,r10217ld r10,PACAKMSR(r13)218mtspr SPRN_SRR1,r10219rfid220b . /* prevent speculative execution */221222/* Fast LE/BE switch system call */2231: mfspr r12,SPRN_SRR1224xori r12,r12,MSR_LE225mtspr SPRN_SRR1,r12226rfid /* return to userspace */227b .228229STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)230231/* At 0xe??? we have a bunch of hypervisor exceptions, we branch232* out of line to handle them233*/234. = 0xe00235b h_data_storage_hv236. = 0xe20237b h_instr_storage_hv238. = 0xe40239b emulation_assist_hv240. = 0xe50241b hmi_exception_hv242. = 0xe60243b hmi_exception_hv244245/* We need to deal with the Altivec unavailable exception246* here which is at 0xf20, thus in the middle of the247* prolog code of the PerformanceMonitor one. A little248* trickery is thus necessary249*/250performance_monitor_pSeries_1:251. = 0xf00252b performance_monitor_pSeries253254altivec_unavailable_pSeries_1:255. = 0xf20256b altivec_unavailable_pSeries257258vsx_unavailable_pSeries_1:259. = 0xf40260b vsx_unavailable_pSeries261262#ifdef CONFIG_CBE_RAS263STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)264#endif /* CONFIG_CBE_RAS */265STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)266#ifdef CONFIG_CBE_RAS267STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)268#endif /* CONFIG_CBE_RAS */269STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)270#ifdef CONFIG_CBE_RAS271STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)272#endif /* CONFIG_CBE_RAS */273274. = 0x3000275276/*** Out of line interrupts support ***/277278/* moved from 0xe00 */279STD_EXCEPTION_HV(., 0xe00, h_data_storage)280STD_EXCEPTION_HV(., 0xe20, h_instr_storage)281STD_EXCEPTION_HV(., 0xe40, emulation_assist)282STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */283284/* moved from 0xf00 */285STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)286STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)287STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)288289/*290* An interrupt came in while soft-disabled; clear EE in SRR1,291* clear paca->hard_enabled and return.292*/293masked_interrupt:294stb r10,PACAHARDIRQEN(r13)295mtcrf 0x80,r9296ld r9,PACA_EXGEN+EX_R9(r13)297mfspr r10,SPRN_SRR1298rldicl r10,r10,48,1 /* clear MSR_EE */299rotldi r10,r10,16300mtspr SPRN_SRR1,r10301ld r10,PACA_EXGEN+EX_R10(r13)302GET_SCRATCH0(r13)303rfid304b .305306masked_Hinterrupt:307stb r10,PACAHARDIRQEN(r13)308mtcrf 0x80,r9309ld r9,PACA_EXGEN+EX_R9(r13)310mfspr r10,SPRN_HSRR1311rldicl r10,r10,48,1 /* clear MSR_EE */312rotldi r10,r10,16313mtspr SPRN_HSRR1,r10314ld r10,PACA_EXGEN+EX_R10(r13)315GET_SCRATCH0(r13)316hrfid317b .318319.align 7320do_stab_bolted_pSeries:321std r11,PACA_EXSLB+EX_R11(r13)322std r12,PACA_EXSLB+EX_R12(r13)323GET_SCRATCH0(r10)324std r10,PACA_EXSLB+EX_R13(r13)325EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)326327#ifdef CONFIG_PPC_PSERIES328/*329* Vectors for the FWNMI option. Share common code.330*/331.globl system_reset_fwnmi332.align 7333system_reset_fwnmi:334HMT_MEDIUM335SET_SCRATCH0(r13) /* save r13 */336EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)337338.globl machine_check_fwnmi339.align 7340machine_check_fwnmi:341HMT_MEDIUM342SET_SCRATCH0(r13) /* save r13 */343EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)344345#endif /* CONFIG_PPC_PSERIES */346347#ifdef __DISABLED__348/*349* This is used for when the SLB miss handler has to go virtual,350* which doesn't happen for now anymore but will once we re-implement351* dynamic VSIDs for shared page tables352*/353slb_miss_user_pseries:354std r10,PACA_EXGEN+EX_R10(r13)355std r11,PACA_EXGEN+EX_R11(r13)356std r12,PACA_EXGEN+EX_R12(r13)357GET_SCRATCH0(r10)358ld r11,PACA_EXSLB+EX_R9(r13)359ld r12,PACA_EXSLB+EX_R3(r13)360std r10,PACA_EXGEN+EX_R13(r13)361std r11,PACA_EXGEN+EX_R9(r13)362std r12,PACA_EXGEN+EX_R3(r13)363clrrdi r12,r13,32364mfmsr r10365mfspr r11,SRR0 /* save SRR0 */366ori r12,r12,slb_miss_user_common@l /* virt addr of handler */367ori r10,r10,MSR_IR|MSR_DR|MSR_RI368mtspr SRR0,r12369mfspr r12,SRR1 /* and SRR1 */370mtspr SRR1,r10371rfid372b . /* prevent spec. execution */373#endif /* __DISABLED__ */374375/* KVM's trampoline code needs to be close to the interrupt handlers */376377#ifdef CONFIG_KVM_BOOK3S_64_HANDLER378#include "../kvm/book3s_rmhandlers.S"379#endif380381.align 7382.globl __end_interrupts383__end_interrupts:384385/*386* Code from here down to __end_handlers is invoked from the387* exception prologs above. Because the prologs assemble the388* addresses of these handlers using the LOAD_HANDLER macro,389* which uses an addi instruction, these handlers must be in390* the first 32k of the kernel image.391*/392393/*** Common interrupt handlers ***/394395STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)396397/*398* Machine check is different because we use a different399* save area: PACA_EXMC instead of PACA_EXGEN.400*/401.align 7402.globl machine_check_common403machine_check_common:404EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)405FINISH_NAP406DISABLE_INTS407bl .save_nvgprs408addi r3,r1,STACK_FRAME_OVERHEAD409bl .machine_check_exception410b .ret_from_except411412STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)413STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)414STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)415STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)416STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)417STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)418STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)419STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)420STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)421#ifdef CONFIG_ALTIVEC422STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)423#else424STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)425#endif426#ifdef CONFIG_CBE_RAS427STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)428STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)429STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)430#endif /* CONFIG_CBE_RAS */431432.align 7433system_call_entry:434b system_call_common435436/*437* Here we have detected that the kernel stack pointer is bad.438* R9 contains the saved CR, r13 points to the paca,439* r10 contains the (bad) kernel stack pointer,440* r11 and r12 contain the saved SRR0 and SRR1.441* We switch to using an emergency stack, save the registers there,442* and call kernel_bad_stack(), which panics.443*/444bad_stack:445ld r1,PACAEMERGSP(r13)446subi r1,r1,64+INT_FRAME_SIZE447std r9,_CCR(r1)448std r10,GPR1(r1)449std r11,_NIP(r1)450std r12,_MSR(r1)451mfspr r11,SPRN_DAR452mfspr r12,SPRN_DSISR453std r11,_DAR(r1)454std r12,_DSISR(r1)455mflr r10456mfctr r11457mfxer r12458std r10,_LINK(r1)459std r11,_CTR(r1)460std r12,_XER(r1)461SAVE_GPR(0,r1)462SAVE_GPR(2,r1)463ld r10,EX_R3(r3)464std r10,GPR3(r1)465SAVE_GPR(4,r1)466SAVE_4GPRS(5,r1)467ld r9,EX_R9(r3)468ld r10,EX_R10(r3)469SAVE_2GPRS(9,r1)470ld r9,EX_R11(r3)471ld r10,EX_R12(r3)472ld r11,EX_R13(r3)473std r9,GPR11(r1)474std r10,GPR12(r1)475std r11,GPR13(r1)476BEGIN_FTR_SECTION477ld r10,EX_CFAR(r3)478std r10,ORIG_GPR3(r1)479END_FTR_SECTION_IFSET(CPU_FTR_CFAR)480SAVE_8GPRS(14,r1)481SAVE_10GPRS(22,r1)482lhz r12,PACA_TRAP_SAVE(r13)483std r12,_TRAP(r1)484addi r11,r1,INT_FRAME_SIZE485std r11,0(r1)486li r12,0487std r12,0(r11)488ld r2,PACATOC(r13)489ld r11,exception_marker@toc(r2)490std r12,RESULT(r1)491std r11,STACK_FRAME_OVERHEAD-16(r1)4921: addi r3,r1,STACK_FRAME_OVERHEAD493bl .kernel_bad_stack494b 1b495496/*497* Here r13 points to the paca, r9 contains the saved CR,498* SRR0 and SRR1 are saved in r11 and r12,499* r9 - r13 are saved in paca->exgen.500*/501.align 7502.globl data_access_common503data_access_common:504mfspr r10,SPRN_DAR505std r10,PACA_EXGEN+EX_DAR(r13)506mfspr r10,SPRN_DSISR507stw r10,PACA_EXGEN+EX_DSISR(r13)508EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)509ld r3,PACA_EXGEN+EX_DAR(r13)510lwz r4,PACA_EXGEN+EX_DSISR(r13)511li r5,0x300512b .do_hash_page /* Try to handle as hpte fault */513514.align 7515.globl h_data_storage_common516h_data_storage_common:517mfspr r10,SPRN_HDAR518std r10,PACA_EXGEN+EX_DAR(r13)519mfspr r10,SPRN_HDSISR520stw r10,PACA_EXGEN+EX_DSISR(r13)521EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)522bl .save_nvgprs523addi r3,r1,STACK_FRAME_OVERHEAD524bl .unknown_exception525b .ret_from_except526527.align 7528.globl instruction_access_common529instruction_access_common:530EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)531ld r3,_NIP(r1)532andis. r4,r12,0x5820533li r5,0x400534b .do_hash_page /* Try to handle as hpte fault */535536STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)537538/*539* Here is the common SLB miss user that is used when going to virtual540* mode for SLB misses, that is currently not used541*/542#ifdef __DISABLED__543.align 7544.globl slb_miss_user_common545slb_miss_user_common:546mflr r10547std r3,PACA_EXGEN+EX_DAR(r13)548stw r9,PACA_EXGEN+EX_CCR(r13)549std r10,PACA_EXGEN+EX_LR(r13)550std r11,PACA_EXGEN+EX_SRR0(r13)551bl .slb_allocate_user552553ld r10,PACA_EXGEN+EX_LR(r13)554ld r3,PACA_EXGEN+EX_R3(r13)555lwz r9,PACA_EXGEN+EX_CCR(r13)556ld r11,PACA_EXGEN+EX_SRR0(r13)557mtlr r10558beq- slb_miss_fault559560andi. r10,r12,MSR_RI /* check for unrecoverable exception */561beq- unrecov_user_slb562mfmsr r10563564.machine push565.machine "power4"566mtcrf 0x80,r9567.machine pop568569clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */570mtmsrd r10,1571572mtspr SRR0,r11573mtspr SRR1,r12574575ld r9,PACA_EXGEN+EX_R9(r13)576ld r10,PACA_EXGEN+EX_R10(r13)577ld r11,PACA_EXGEN+EX_R11(r13)578ld r12,PACA_EXGEN+EX_R12(r13)579ld r13,PACA_EXGEN+EX_R13(r13)580rfid581b .582583slb_miss_fault:584EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)585ld r4,PACA_EXGEN+EX_DAR(r13)586li r5,0587std r4,_DAR(r1)588std r5,_DSISR(r1)589b handle_page_fault590591unrecov_user_slb:592EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)593DISABLE_INTS594bl .save_nvgprs5951: addi r3,r1,STACK_FRAME_OVERHEAD596bl .unrecoverable_exception597b 1b598599#endif /* __DISABLED__ */600601602/*603* r13 points to the PACA, r9 contains the saved CR,604* r12 contain the saved SRR1, SRR0 is still ready for return605* r3 has the faulting address606* r9 - r13 are saved in paca->exslb.607* r3 is saved in paca->slb_r3608* We assume we aren't going to take any exceptions during this procedure.609*/610_GLOBAL(slb_miss_realmode)611mflr r10612#ifdef CONFIG_RELOCATABLE613mtctr r11614#endif615616stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */617std r10,PACA_EXSLB+EX_LR(r13) /* save LR */618619bl .slb_allocate_realmode620621/* All done -- return from exception. */622623ld r10,PACA_EXSLB+EX_LR(r13)624ld r3,PACA_EXSLB+EX_R3(r13)625lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */626#ifdef CONFIG_PPC_ISERIES627BEGIN_FW_FTR_SECTION628ld r11,PACALPPACAPTR(r13)629ld r11,LPPACASRR0(r11) /* get SRR0 value */630END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)631#endif /* CONFIG_PPC_ISERIES */632633mtlr r10634635andi. r10,r12,MSR_RI /* check for unrecoverable exception */636beq- 2f637638.machine push639.machine "power4"640mtcrf 0x80,r9641mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */642.machine pop643644#ifdef CONFIG_PPC_ISERIES645BEGIN_FW_FTR_SECTION646mtspr SPRN_SRR0,r11647mtspr SPRN_SRR1,r12648END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)649#endif /* CONFIG_PPC_ISERIES */650ld r9,PACA_EXSLB+EX_R9(r13)651ld r10,PACA_EXSLB+EX_R10(r13)652ld r11,PACA_EXSLB+EX_R11(r13)653ld r12,PACA_EXSLB+EX_R12(r13)654ld r13,PACA_EXSLB+EX_R13(r13)655rfid656b . /* prevent speculative execution */6576582:659#ifdef CONFIG_PPC_ISERIES660BEGIN_FW_FTR_SECTION661b unrecov_slb662END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)663#endif /* CONFIG_PPC_ISERIES */664mfspr r11,SPRN_SRR0665ld r10,PACAKBASE(r13)666LOAD_HANDLER(r10,unrecov_slb)667mtspr SPRN_SRR0,r10668ld r10,PACAKMSR(r13)669mtspr SPRN_SRR1,r10670rfid671b .672673unrecov_slb:674EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)675DISABLE_INTS676bl .save_nvgprs6771: addi r3,r1,STACK_FRAME_OVERHEAD678bl .unrecoverable_exception679b 1b680681.align 7682.globl hardware_interrupt_common683.globl hardware_interrupt_entry684hardware_interrupt_common:685EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)686FINISH_NAP687hardware_interrupt_entry:688DISABLE_INTS689BEGIN_FTR_SECTION690bl .ppc64_runlatch_on691END_FTR_SECTION_IFSET(CPU_FTR_CTRL)692addi r3,r1,STACK_FRAME_OVERHEAD693bl .do_IRQ694b .ret_from_except_lite695696#ifdef CONFIG_PPC_970_NAP697power4_fixup_nap:698andc r9,r9,r10699std r9,TI_LOCAL_FLAGS(r11)700ld r10,_LINK(r1) /* make idle task do the */701std r10,_NIP(r1) /* equivalent of a blr */702blr703#endif704705.align 7706.globl alignment_common707alignment_common:708mfspr r10,SPRN_DAR709std r10,PACA_EXGEN+EX_DAR(r13)710mfspr r10,SPRN_DSISR711stw r10,PACA_EXGEN+EX_DSISR(r13)712EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)713ld r3,PACA_EXGEN+EX_DAR(r13)714lwz r4,PACA_EXGEN+EX_DSISR(r13)715std r3,_DAR(r1)716std r4,_DSISR(r1)717bl .save_nvgprs718addi r3,r1,STACK_FRAME_OVERHEAD719ENABLE_INTS720bl .alignment_exception721b .ret_from_except722723.align 7724.globl program_check_common725program_check_common:726EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)727bl .save_nvgprs728addi r3,r1,STACK_FRAME_OVERHEAD729ENABLE_INTS730bl .program_check_exception731b .ret_from_except732733.align 7734.globl fp_unavailable_common735fp_unavailable_common:736EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)737bne 1f /* if from user, just load it up */738bl .save_nvgprs739addi r3,r1,STACK_FRAME_OVERHEAD740ENABLE_INTS741bl .kernel_fp_unavailable_exception742BUG_OPCODE7431: bl .load_up_fpu744b fast_exception_return745746.align 7747.globl altivec_unavailable_common748altivec_unavailable_common:749EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)750#ifdef CONFIG_ALTIVEC751BEGIN_FTR_SECTION752beq 1f753bl .load_up_altivec754b fast_exception_return7551:756END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)757#endif758bl .save_nvgprs759addi r3,r1,STACK_FRAME_OVERHEAD760ENABLE_INTS761bl .altivec_unavailable_exception762b .ret_from_except763764.align 7765.globl vsx_unavailable_common766vsx_unavailable_common:767EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)768#ifdef CONFIG_VSX769BEGIN_FTR_SECTION770bne .load_up_vsx7711:772END_FTR_SECTION_IFSET(CPU_FTR_VSX)773#endif774bl .save_nvgprs775addi r3,r1,STACK_FRAME_OVERHEAD776ENABLE_INTS777bl .vsx_unavailable_exception778b .ret_from_except779780.align 7781.globl __end_handlers782__end_handlers:783784/*785* Return from an exception with minimal checks.786* The caller is assumed to have done EXCEPTION_PROLOG_COMMON.787* If interrupts have been enabled, or anything has been788* done that might have changed the scheduling status of789* any task or sent any task a signal, you should use790* ret_from_except or ret_from_except_lite instead of this.791*/792fast_exc_return_irq: /* restores irq state too */793ld r3,SOFTE(r1)794TRACE_AND_RESTORE_IRQ(r3);795ld r12,_MSR(r1)796rldicl r4,r12,49,63 /* get MSR_EE to LSB */797stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */798b 1f799800.globl fast_exception_return801fast_exception_return:802ld r12,_MSR(r1)8031: ld r11,_NIP(r1)804andi. r3,r12,MSR_RI /* check if RI is set */805beq- unrecov_fer806807#ifdef CONFIG_VIRT_CPU_ACCOUNTING808andi. r3,r12,MSR_PR809beq 2f810ACCOUNT_CPU_USER_EXIT(r3, r4)8112:812#endif813814ld r3,_CCR(r1)815ld r4,_LINK(r1)816ld r5,_CTR(r1)817ld r6,_XER(r1)818mtcr r3819mtlr r4820mtctr r5821mtxer r6822REST_GPR(0, r1)823REST_8GPRS(2, r1)824825mfmsr r10826rldicl r10,r10,48,1 /* clear EE */827rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */828mtmsrd r10,1829830mtspr SPRN_SRR1,r12831mtspr SPRN_SRR0,r11832REST_4GPRS(10, r1)833ld r1,GPR1(r1)834rfid835b . /* prevent speculative execution */836837unrecov_fer:838bl .save_nvgprs8391: addi r3,r1,STACK_FRAME_OVERHEAD840bl .unrecoverable_exception841b 1b842843844/*845* Hash table stuff846*/847.align 7848_STATIC(do_hash_page)849std r3,_DAR(r1)850std r4,_DSISR(r1)851852andis. r0,r4,0xa410 /* weird error? */853bne- handle_page_fault /* if not, try to insert a HPTE */854andis. r0,r4,DSISR_DABRMATCH@h855bne- handle_dabr_fault856857BEGIN_FTR_SECTION858andis. r0,r4,0x0020 /* Is it a segment table fault? */859bne- do_ste_alloc /* If so handle it */860END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)861862clrrdi r11,r1,THREAD_SHIFT863lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */864andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */865bne 77f /* then don't call hash_page now */866867/*868* On iSeries, we soft-disable interrupts here, then869* hard-enable interrupts so that the hash_page code can spin on870* the hash_table_lock without problems on a shared processor.871*/872DISABLE_INTS873874/*875* Currently, trace_hardirqs_off() will be called by DISABLE_INTS876* and will clobber volatile registers when irq tracing is enabled877* so we need to reload them. It may be possible to be smarter here878* and move the irq tracing elsewhere but let's keep it simple for879* now880*/881#ifdef CONFIG_TRACE_IRQFLAGS882ld r3,_DAR(r1)883ld r4,_DSISR(r1)884ld r5,_TRAP(r1)885ld r12,_MSR(r1)886clrrdi r5,r5,4887#endif /* CONFIG_TRACE_IRQFLAGS */888/*889* We need to set the _PAGE_USER bit if MSR_PR is set or if we are890* accessing a userspace segment (even from the kernel). We assume891* kernel addresses always have the high bit set.892*/893rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */894rotldi r0,r3,15 /* Move high bit into MSR_PR posn */895orc r0,r12,r0 /* MSR_PR | ~high_bit */896rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */897ori r4,r4,1 /* add _PAGE_PRESENT */898rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */899900/*901* r3 contains the faulting address902* r4 contains the required access permissions903* r5 contains the trap number904*905* at return r3 = 0 for success906*/907bl .hash_page /* build HPTE if possible */908cmpdi r3,0 /* see if hash_page succeeded */909910BEGIN_FW_FTR_SECTION911/*912* If we had interrupts soft-enabled at the point where the913* DSI/ISI occurred, and an interrupt came in during hash_page,914* handle it now.915* We jump to ret_from_except_lite rather than fast_exception_return916* because ret_from_except_lite will check for and handle pending917* interrupts if necessary.918*/919beq 13f920END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)921922BEGIN_FW_FTR_SECTION923/*924* Here we have interrupts hard-disabled, so it is sufficient925* to restore paca->{soft,hard}_enable and get out.926*/927beq fast_exc_return_irq /* Return from exception on success */928END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)929930/* For a hash failure, we don't bother re-enabling interrupts */931ble- 12f932933/*934* hash_page couldn't handle it, set soft interrupt enable back935* to what it was before the trap. Note that .arch_local_irq_restore936* handles any interrupts pending at this point.937*/938ld r3,SOFTE(r1)939TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)940bl .arch_local_irq_restore941b 11f942943/* We have a data breakpoint exception - handle it */944handle_dabr_fault:945bl .save_nvgprs946ld r4,_DAR(r1)947ld r5,_DSISR(r1)948addi r3,r1,STACK_FRAME_OVERHEAD949bl .do_dabr950b .ret_from_except_lite951952/* Here we have a page fault that hash_page can't handle. */953handle_page_fault:954ENABLE_INTS95511: ld r4,_DAR(r1)956ld r5,_DSISR(r1)957addi r3,r1,STACK_FRAME_OVERHEAD958bl .do_page_fault959cmpdi r3,0960beq+ 13f961bl .save_nvgprs962mr r5,r3963addi r3,r1,STACK_FRAME_OVERHEAD964lwz r4,_DAR(r1)965bl .bad_page_fault966b .ret_from_except96796813: b .ret_from_except_lite969970/* We have a page fault that hash_page could handle but HV refused971* the PTE insertion972*/97312: bl .save_nvgprs974mr r5,r3975addi r3,r1,STACK_FRAME_OVERHEAD976ld r4,_DAR(r1)977bl .low_hash_fault978b .ret_from_except979980/*981* We come here as a result of a DSI at a point where we don't want982* to call hash_page, such as when we are accessing memory (possibly983* user memory) inside a PMU interrupt that occurred while interrupts984* were soft-disabled. We want to invoke the exception handler for985* the access, or panic if there isn't a handler.986*/98777: bl .save_nvgprs988mr r4,r3989addi r3,r1,STACK_FRAME_OVERHEAD990li r5,SIGSEGV991bl .bad_page_fault992b .ret_from_except993994/* here we have a segment miss */995do_ste_alloc:996bl .ste_allocate /* try to insert stab entry */997cmpdi r3,0998bne- handle_page_fault999b fast_exception_return10001001/*1002* r13 points to the PACA, r9 contains the saved CR,1003* r11 and r12 contain the saved SRR0 and SRR1.1004* r9 - r13 are saved in paca->exslb.1005* We assume we aren't going to take any exceptions during this procedure.1006* We assume (DAR >> 60) == 0xc.1007*/1008.align 71009_GLOBAL(do_stab_bolted)1010stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */1011std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */10121013/* Hash to the primary group */1014ld r10,PACASTABVIRT(r13)1015mfspr r11,SPRN_DAR1016srdi r11,r11,281017rldimi r10,r11,7,52 /* r10 = first ste of the group */10181019/* Calculate VSID */1020/* This is a kernel address, so protovsid = ESID */1021ASM_VSID_SCRAMBLE(r11, r9, 256M)1022rldic r9,r11,12,16 /* r9 = vsid << 12 */10231024/* Search the primary group for a free entry */10251: ld r11,0(r10) /* Test valid bit of the current ste */1026andi. r11,r11,0x801027beq 2f1028addi r10,r10,161029andi. r11,r10,0x701030bne 1b10311032/* Stick for only searching the primary group for now. */1033/* At least for now, we use a very simple random castout scheme */1034/* Use the TB as a random number ; OR in 1 to avoid entry 0 */1035mftb r111036rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */1037ori r11,r11,0x1010381039/* r10 currently points to an ste one past the group of interest */1040/* make it point to the randomly selected entry */1041subi r10,r10,1281042or r10,r10,r11 /* r10 is the entry to invalidate */10431044isync /* mark the entry invalid */1045ld r11,0(r10)1046rldicl r11,r11,56,1 /* clear the valid bit */1047rotldi r11,r11,81048std r11,0(r10)1049sync10501051clrrdi r11,r11,28 /* Get the esid part of the ste */1052slbie r11105310542: std r9,8(r10) /* Store the vsid part of the ste */1055eieio10561057mfspr r11,SPRN_DAR /* Get the new esid */1058clrrdi r11,r11,28 /* Permits a full 32b of ESID */1059ori r11,r11,0x90 /* Turn on valid and kp */1060std r11,0(r10) /* Put new entry back into the stab */10611062sync10631064/* All done -- return from exception. */1065lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */1066ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */10671068andi. r10,r12,MSR_RI1069beq- unrecov_slb10701071mtcrf 0x80,r9 /* restore CR */10721073mfmsr r101074clrrdi r10,r10,21075mtmsrd r10,110761077mtspr SPRN_SRR0,r111078mtspr SPRN_SRR1,r121079ld r9,PACA_EXSLB+EX_R9(r13)1080ld r10,PACA_EXSLB+EX_R10(r13)1081ld r11,PACA_EXSLB+EX_R11(r13)1082ld r12,PACA_EXSLB+EX_R12(r13)1083ld r13,PACA_EXSLB+EX_R13(r13)1084rfid1085b . /* prevent speculative execution */10861087#ifdef CONFIG_PPC_PSERIES1088/*1089* Data area reserved for FWNMI option.1090* This address (0x7000) is fixed by the RPA.1091*/1092.= 0x70001093.globl fwnmi_data_area1094fwnmi_data_area:1095#endif /* CONFIG_PPC_PSERIES */10961097/* iSeries does not use the FWNMI stuff, so it is safe to put1098* this here, even if we later allow kernels that will boot on1099* both pSeries and iSeries */1100#ifdef CONFIG_PPC_ISERIES1101. = LPARMAP_PHYS1102.globl xLparMap1103xLparMap:1104.quad HvEsidsToMap /* xNumberEsids */1105.quad HvRangesToMap /* xNumberRanges */1106.quad STAB0_PAGE /* xSegmentTableOffs */1107.zero 40 /* xRsvd */1108/* xEsids (HvEsidsToMap entries of 2 quads) */1109.quad PAGE_OFFSET_ESID /* xKernelEsid */1110.quad PAGE_OFFSET_VSID /* xKernelVsid */1111.quad VMALLOC_START_ESID /* xKernelEsid */1112.quad VMALLOC_START_VSID /* xKernelVsid */1113/* xRanges (HvRangesToMap entries of 3 quads) */1114.quad HvPagesToMap /* xPages */1115.quad 0 /* xOffset */1116.quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */11171118#endif /* CONFIG_PPC_ISERIES */11191120#ifdef CONFIG_PPC_PSERIES1121. = 0x80001122#endif /* CONFIG_PPC_PSERIES */11231124/*1125* Space for CPU0's segment table.1126*1127* On iSeries, the hypervisor must fill in at least one entry before1128* we get control (with relocate on). The address is given to the hv1129* as a page number (see xLparMap above), so this must be at a1130* fixed address (the linker can't compute (u64)&initial_stab >>1131* PAGE_SHIFT).1132*/1133. = STAB0_OFFSET /* 0x8000 */1134.globl initial_stab1135initial_stab:1136.space 4096113711381139