/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* PowerPC version3* Copyright (C) 1995-1996 Gary Thomas ([email protected])4*5* Rewritten by Cort Dougan ([email protected]) for PReP6* Copyright (C) 1996 Cort Dougan <[email protected]>7* Adapted for Power Macintosh by Paul Mackerras.8* Low-level exception handlers and MMU support9* rewritten by Paul Mackerras.10* Copyright (C) 1996 Paul Mackerras.11*12* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and13* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com14*15* This file contains the entry point for the 64-bit kernel along16* with some early initialization code common to all 64-bit powerpc17* variants.18*/1920#include <linux/linkage.h>21#include <linux/threads.h>22#include <linux/init.h>23#include <asm/reg.h>24#include <asm/page.h>25#include <asm/mmu.h>26#include <asm/ppc_asm.h>27#include <asm/head-64.h>28#include <asm/asm-offsets.h>29#include <asm/bug.h>30#include <asm/cputable.h>31#include <asm/setup.h>32#include <asm/hvcall.h>33#include <asm/thread_info.h>34#include <asm/firmware.h>35#include <asm/page_64.h>36#include <asm/irqflags.h>37#include <asm/kvm_book3s_asm.h>38#include <asm/ptrace.h>39#include <asm/hw_irq.h>40#include <asm/cputhreads.h>41#include <asm/ppc-opcode.h>42#include <asm/feature-fixups.h>43#ifdef CONFIG_PPC_BOOK3S44#include <asm/exception-64s.h>45#else46#include <asm/exception-64e.h>47#endif4849/* The physical memory is laid out such that the secondary processor50* spin code sits at 0x0000...0x00ff. On server, the vectors follow51* using the layout described in exceptions-64s.S52*/5354/*55* Entering into this code we make the following assumptions:56*57* For pSeries or server processors:58* 1. The MMU is off & open firmware is running in real mode.59* 2. The primary CPU enters at __start.60* 3. If the RTAS supports "query-cpu-stopped-state", then secondary61* CPUs will enter as directed by "start-cpu" RTAS call, which is62* generic_secondary_smp_init, with PIR in r3.63* 4. Else the secondary CPUs will enter at secondary_hold (0x60) as64* directed by the "start-cpu" RTS call, with PIR in r3.65* -or- For OPAL entry:66* 1. The MMU is off, processor in HV mode.67* 2. The primary CPU enters at 0 with device-tree in r3, OPAL base68* in r8, and entry in r9 for debugging purposes.69* 3. Secondary CPUs enter as directed by OPAL_START_CPU call, which70* is at generic_secondary_smp_init, with PIR in r3.71*72* For Book3E processors:73* 1. The MMU is on running in AS0 in a state defined in ePAPR74* 2. The kernel is entered at __start75*/7677/*78* boot_from_prom and prom_init run at the physical address. Everything79* after prom and kexec entry run at the virtual address (PAGE_OFFSET).80* Secondaries run at the virtual address from generic_secondary_common_init81* onward.82*/8384OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)85USE_FIXED_SECTION(first_256B)86/*87* Offsets are relative from the start of fixed section, and88* first_256B starts at 0. Offsets are a bit easier to use here89* than the fixed section entry macros.90*/91. = 0x092_GLOBAL(__start)93/* NOP this out unconditionally */94BEGIN_FTR_SECTION95FIXUP_ENDIAN96b __start_initialization_multiplatform97END_FTR_SECTION(0, 1)9899/* Catch branch to 0 in real mode */100trap101102/* Secondary processors spin on this value until it becomes non-zero.103* When non-zero, it contains the real address of the function the cpu104* should jump to.105*/106.balign 8107.globl __secondary_hold_spinloop108__secondary_hold_spinloop:109.8byte 0x0110111/* Secondary processors write this value with their cpu # */112/* after they enter the spin loop immediately below. */113.globl __secondary_hold_acknowledge114__secondary_hold_acknowledge:115.8byte 0x0116117#ifdef CONFIG_RELOCATABLE118/* This flag is set to 1 by a loader if the kernel should run119* at the loaded address instead of the linked address. This120* is used by kexec-tools to keep the kdump kernel in the121* crash_kernel region. The loader is responsible for122* observing the alignment requirement.123*/124125#ifdef CONFIG_RELOCATABLE_TEST126#define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */127#else128#define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */129#endif130131/* Do not move this variable as kexec-tools knows about it. */132. = 0x5c133.globl __run_at_load134__run_at_load:135DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)136.long RUN_AT_LOAD_DEFAULT137#endif138139. = 0x60140/*141* The following code is used to hold secondary processors142* in a spin loop after they have entered the kernel, but143* before the bulk of the kernel has been relocated. This code144* is relocated to physical address 0x60 before prom_init is run.145* All of it must fit below the first exception vector at 0x100.146* Use .globl here not _GLOBAL because we want __secondary_hold147* to be the actual text address, not a descriptor.148*/149.globl __secondary_hold150__secondary_hold:151FIXUP_ENDIAN152#ifndef CONFIG_PPC_BOOK3E_64153mfmsr r24154ori r24,r24,MSR_RI155mtmsrd r24 /* RI on */156#endif157/* Grab our physical cpu number */158mr r24,r3159/* stash r4 for book3e */160mr r25,r4161162/* Tell the master cpu we're here */163/* Relocation is off & we are located at an address less */164/* than 0x100, so only need to grab low order offset. */165std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)166sync167168/* All secondary cpus wait here until told to start. */169100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0)170cmpdi 0,r12,0171beq 100b172173#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)174#ifdef CONFIG_PPC_BOOK3E_64175tovirt(r12,r12)176#endif177mtctr r12178mr r3,r24179/*180* it may be the case that other platforms have r4 right to181* begin with, this gives us some safety in case it is not182*/183#ifdef CONFIG_PPC_BOOK3E_64184mr r4,r25185#else186li r4,0187#endif188/* Make sure that patched code is visible */189isync190bctr191#else1920: trap193EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0194#endif195CLOSE_FIXED_SECTION(first_256B)196197/*198* On server, we include the exception vectors code here as it199* relies on absolute addressing which is only possible within200* this compilation unit201*/202#ifdef CONFIG_PPC_BOOK3S203#include "exceptions-64s.S"204#else205OPEN_TEXT_SECTION(0x100)206#endif207208USE_TEXT_SECTION()209210#include "interrupt_64.S"211212#ifdef CONFIG_PPC_BOOK3E_64213/*214* The booting_thread_hwid holds the thread id we want to boot in cpu215* hotplug case. It is set by cpu hotplug code, and is invalid by default.216* The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]217* bit field.218*/219.globl booting_thread_hwid220booting_thread_hwid:221.long INVALID_THREAD_HWID222.align 3223/*224* start a thread in the same core225* input parameters:226* r3 = the thread physical id227* r4 = the entry point where thread starts228*/229_GLOBAL(book3e_start_thread)230LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)231cmpwi r3, 0232beq 10f233cmpwi r3, 1234beq 11f235/* If the thread id is invalid, just exit. */236b 13f23710:238MTTMR(TMRN_IMSR0, 5)239MTTMR(TMRN_INIA0, 4)240b 12f24111:242MTTMR(TMRN_IMSR1, 5)243MTTMR(TMRN_INIA1, 4)24412:245isync246li r6, 1247sld r6, r6, r3248mtspr SPRN_TENS, r624913:250blr251252/*253* stop a thread in the same core254* input parameter:255* r3 = the thread physical id256*/257_GLOBAL(book3e_stop_thread)258cmpwi r3, 0259beq 10f260cmpwi r3, 1261beq 10f262/* If the thread id is invalid, just exit. */263b 13f26410:265li r4, 1266sld r4, r4, r3267mtspr SPRN_TENC, r426813:269blr270271_GLOBAL(fsl_secondary_thread_init)272mfspr r4,SPRN_BUCSR273274/* Enable branch prediction */275lis r3,BUCSR_INIT@h276ori r3,r3,BUCSR_INIT@l277mtspr SPRN_BUCSR,r3278isync279280/*281* Fix PIR to match the linear numbering in the device tree.282*283* On e6500, the reset value of PIR uses the low three bits for284* the thread within a core, and the upper bits for the core285* number. There are two threads per core, so shift everything286* but the low bit right by two bits so that the cpu numbering is287* continuous.288*289* If the old value of BUCSR is non-zero, this thread has run290* before. Thus, we assume we are coming from kexec or a similar291* scenario, and PIR is already set to the correct value. This292* is a bit of a hack, but there are limited opportunities for293* getting information into the thread and the alternatives294* seemed like they'd be overkill. We can't tell just by looking295* at the old PIR value which state it's in, since the same value296* could be valid for one thread out of reset and for a different297* thread in Linux.298*/299300mfspr r3, SPRN_PIR301cmpwi r4,0302bne 1f303rlwimi r3, r3, 30, 2, 30304mtspr SPRN_PIR, r33051:306mr r24,r3307308/* turn on 64-bit mode */309bl enable_64b_mode310311/* Book3E initialization */312mr r3,r24313bl book3e_secondary_thread_init314bl relative_toc315316b generic_secondary_common_init317318#endif /* CONFIG_PPC_BOOK3E_64 */319320/*321* On pSeries and most other platforms, secondary processors spin322* in the following code.323* At entry, r3 = this processor's number (physical cpu id)324*325* On Book3E, r4 = 1 to indicate that the initial TLB entry for326* this core already exists (setup via some other mechanism such327* as SCOM before entry).328*/329_GLOBAL(generic_secondary_smp_init)330FIXUP_ENDIAN331332li r13,0333334/* Poison TOC */335li r2,-1336337mr r24,r3338mr r25,r4339340/* turn on 64-bit mode */341bl enable_64b_mode342343#ifdef CONFIG_PPC_BOOK3E_64344/* Book3E initialization */345mr r3,r24346mr r4,r25347bl book3e_secondary_core_init348/* Now NIA and r2 are relocated to PAGE_OFFSET if not already */349/*350* After common core init has finished, check if the current thread is the351* one we wanted to boot. If not, start the specified thread and stop the352* current thread.353*/354LOAD_REG_ADDR(r4, booting_thread_hwid)355lwz r3, 0(r4)356li r5, INVALID_THREAD_HWID357cmpw r3, r5358beq 20f359360/*361* The value of booting_thread_hwid has been stored in r3,362* so make it invalid.363*/364stw r5, 0(r4)365366/*367* Get the current thread id and check if it is the one we wanted.368* If not, start the one specified in booting_thread_hwid and stop369* the current thread.370*/371mfspr r8, SPRN_TIR372cmpw r3, r8373beq 20f374375/* start the specified thread */376LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init))377bl book3e_start_thread378379/* stop the current thread */380mr r3, r8381bl book3e_stop_thread38210:383b 10b38420:385#else386/* Now the MMU is off, can branch to our PAGE_OFFSET address */387bcl 20,31,$+43881: mflr r11389addi r11,r11,(2f - 1b)390tovirt(r11, r11)391mtctr r11392bctr3932:394bl relative_toc395#endif396397generic_secondary_common_init:398/* Set up a paca value for this processor. Since we have the399* physical cpu id in r24, we need to search the pacas to find400* which logical id maps to our physical one.401*/402#ifndef CONFIG_SMP403b kexec_wait /* wait for next kernel if !SMP */404#else405LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */406ld r8,0(r8) /* Get base vaddr of array */407#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)408LOAD_REG_IMMEDIATE(r7, NR_CPUS)409#else410LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */411lwz r7,0(r7) /* also the max paca allocated */412#endif413li r5,0 /* logical cpu id */4141:415sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */416ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */417lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */418cmpw r6,r24 /* Compare to our id */419beq 2f420addi r5,r5,1421cmpw r5,r7 /* Check if more pacas exist */422blt 1b423424mr r3,r24 /* not found, copy phys to r3 */425b kexec_wait /* next kernel might do better */4264272: SET_PACA(r13)428#ifdef CONFIG_PPC_BOOK3E_64429addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */430mtspr SPRN_SPRG_TLB_EXFRAME,r12431#endif432433/* From now on, r24 is expected to be logical cpuid */434mr r24,r5435436/* Create a temp kernel stack for use before relocation is on. */437ld r1,PACAEMERGSP(r13)438subi r1,r1,STACK_FRAME_MIN_SIZE439440/* See if we need to call a cpu state restore handler */441LOAD_REG_ADDR(r23, cur_cpu_spec)442ld r23,0(r23)443ld r12,CPU_SPEC_RESTORE(r23)444cmpdi 0,r12,0445beq 3f446#ifdef CONFIG_PPC64_ELF_ABI_V1447ld r12,0(r12)448#endif449mtctr r12450bctrl4514523: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */453lwarx r4,0,r3454subi r4,r4,1455stwcx. r4,0,r3456bne 3b457isync4584594: HMT_LOW460lbz r23,PACAPROCSTART(r13) /* Test if this processor should */461/* start. */462cmpwi 0,r23,0463beq 4b /* Loop until told to go */464465sync /* order paca.run and cur_cpu_spec */466isync /* In case code patching happened */467468b __secondary_start469#endif /* SMP */470471/*472* Turn the MMU off.473* Assumes we're mapped EA == RA if the MMU is on.474*/475#ifdef CONFIG_PPC_BOOK3S476SYM_FUNC_START_LOCAL(__mmu_off)477mfmsr r3478andi. r0,r3,MSR_IR|MSR_DR479beqlr480mflr r4481andc r3,r3,r0482mtspr SPRN_SRR0,r4483mtspr SPRN_SRR1,r3484sync485rfid486b . /* prevent speculative execution */487SYM_FUNC_END(__mmu_off)488489SYM_FUNC_START_LOCAL(start_initialization_book3s)490mflr r25491492/* Setup some critical 970 SPRs before switching MMU off */493mfspr r0,SPRN_PVR494srwi r0,r0,16495cmpwi r0,0x39 /* 970 */496beq 1f497cmpwi r0,0x3c /* 970FX */498beq 1f499cmpwi r0,0x44 /* 970MP */500beq 1f501cmpwi r0,0x45 /* 970GX */502bne 2f5031: bl __cpu_preinit_ppc9705042:505506/* Switch off MMU if not already off */507bl __mmu_off508509/* Now the MMU is off, can return to our PAGE_OFFSET address */510tovirt(r25,r25)511mtlr r25512blr513SYM_FUNC_END(start_initialization_book3s)514#endif515516/*517* Here is our main kernel entry point. We support currently 2 kind of entries518* depending on the value of r5.519*520* r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content521* in r3...r7522*523* r5 == NULL -> kexec style entry. r3 is a physical pointer to the524* DT block, r4 is a physical pointer to the kernel itself525*526*/527__start_initialization_multiplatform:528/* Make sure we are running in 64 bits mode */529bl enable_64b_mode530531/* Zero r13 (paca) so early program check / mce don't use it */532li r13,0533534/* Poison TOC */535li r2,-1536537/*538* Are we booted from a PROM Of-type client-interface ?539*/540cmpldi cr0,r5,0541beq 1f542b __boot_from_prom /* yes -> prom */5431:544/* Save parameters */545mr r31,r3546mr r30,r4547#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL548/* Save OPAL entry */549mr r28,r8550mr r29,r9551#endif552553/* Get TOC pointer (current runtime address) */554bl relative_toc555556/* These functions return to the virtual (PAGE_OFFSET) address */557#ifdef CONFIG_PPC_BOOK3E_64558bl start_initialization_book3e559#else560bl start_initialization_book3s561#endif /* CONFIG_PPC_BOOK3E_64 */562563/* Get TOC pointer, virtual */564bl relative_toc565566/* find out where we are now */567568/* OPAL doesn't pass base address in r4, have to derive it. */569bcl 20,31,$+45700: mflr r26 /* r26 = runtime addr here */571addis r26,r26,(_stext - 0b)@ha572addi r26,r26,(_stext - 0b)@l /* current runtime base addr */573574b __after_prom_start575576__REF577__boot_from_prom:578#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE579/* Get TOC pointer, non-virtual */580bl relative_toc581582/* find out where we are now */583bcl 20,31,$+45840: mflr r26 /* r26 = runtime addr here */585addis r26,r26,(_stext - 0b)@ha586addi r26,r26,(_stext - 0b)@l /* current runtime base addr */587588/* Save parameters */589mr r31,r3590mr r30,r4591mr r29,r5592mr r28,r6593mr r27,r7594595/*596* Align the stack to 16-byte boundary597* Depending on the size and layout of the ELF sections in the initial598* boot binary, the stack pointer may be unaligned on PowerMac599*/600rldicr r1,r1,0,59601602#ifdef CONFIG_RELOCATABLE603/* Relocate code for where we are now */604mr r3,r26605bl relocate606#endif607608/* Restore parameters */609mr r3,r31610mr r4,r30611mr r5,r29612mr r6,r28613mr r7,r27614615/* Do all of the interaction with OF client interface */616mr r8,r26617bl CFUNC(prom_init)618#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */619620/* We never return. We also hit that trap if trying to boot621* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */622trap623.previous624625__after_prom_start:626#ifdef CONFIG_RELOCATABLE627/* process relocations for the final address of the kernel */628lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)629cmplwi cr0,r7,1 /* flagged to stay where we are ? */630mr r25,r26 /* then use current kernel base */631beq 1f632LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */6331: mr r3,r25634bl relocate635#if defined(CONFIG_PPC_BOOK3E_64)636/* IVPR needs to be set after relocation. */637bl init_core_book3e638#endif639#endif640641/*642* We need to run with _stext at physical address PHYSICAL_START.643* This will leave some code in the first 256B of644* real memory, which are reserved for software use.645*646* Note: This process overwrites the OF exception vectors.647*/648LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)649mr r4,r26 /* Load the virtual source address into r4 */650cmpld r3,r4 /* Check if source == dest */651beq 9f /* If so skip the copy */652li r6,0x100 /* Start offset, the first 0x100 */653/* bytes were copied earlier. */654655#ifdef CONFIG_RELOCATABLE656/*657* Check if the kernel has to be running as relocatable kernel based on the658* variable __run_at_load, if it is set the kernel is treated as relocatable659* kernel, otherwise it will be moved to PHYSICAL_START660*/661lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)662cmplwi cr0,r7,1663bne 3f664665#ifdef CONFIG_PPC_BOOK3E_64666LOAD_REG_ADDR(r5, __end_interrupts)667LOAD_REG_ADDR(r11, _stext)668sub r5,r5,r11669#else670/* just copy interrupts */671LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))672#endif673b 5f6743:675#endif676/* # bytes of memory to copy */677lis r5,(ABS_ADDR(copy_to_here, text))@ha678addi r5,r5,(ABS_ADDR(copy_to_here, text))@l679680bl copy_and_flush /* copy the first n bytes */681/* this includes the code being */682/* executed here. */683/* Jump to the copy of this code that we just made */684addis r8,r3,(ABS_ADDR(4f, text))@ha685addi r12,r8,(ABS_ADDR(4f, text))@l686mtctr r12687bctr688689.balign 8690p_end: .8byte _end - copy_to_here6916924:693/*694* Now copy the rest of the kernel up to _end, add695* _end - copy_to_here to the copy limit and run again.696*/697addis r8,r26,(ABS_ADDR(p_end, text))@ha698ld r8,(ABS_ADDR(p_end, text))@l(r8)699add r5,r5,r87005: bl copy_and_flush /* copy the rest */7017029: b start_here_multiplatform703704/*705* Copy routine used to copy the kernel to start at physical address 0706* and flush and invalidate the caches as needed.707* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset708* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.709*710* Note: this routine *only* clobbers r0, r6 and lr711*/712_GLOBAL(copy_and_flush)713addi r5,r5,-8714addi r6,r6,-87154: li r0,8 /* Use the smallest common */716/* denominator cache line */717/* size. This results in */718/* extra cache line flushes */719/* but operation is correct. */720/* Can't get cache line size */721/* from NACA as it is being */722/* moved too. */723724mtctr r0 /* put # words/line in ctr */7253: addi r6,r6,8 /* copy a cache line */726ldx r0,r6,r4727stdx r0,r6,r3728bdnz 3b729dcbst r6,r3 /* write it to memory */730sync731icbi r6,r3 /* flush the icache line */732cmpld 0,r6,r5733blt 4b734sync735addi r5,r5,8736addi r6,r6,8737isync738blr739740_ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */741742.align 8743copy_to_here:744745#ifdef CONFIG_SMP746#ifdef CONFIG_PPC_PMAC747/*748* On PowerMac, secondary processors starts from the reset vector, which749* is temporarily turned into a call to one of the functions below.750*/751.section ".text";752.align 2 ;753754.globl __secondary_start_pmac_0755__secondary_start_pmac_0:756/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */757li r24,0758b 1f759li r24,1760b 1f761li r24,2762b 1f763li r24,37641:765766_GLOBAL(pmac_secondary_start)767/* turn on 64-bit mode */768bl enable_64b_mode769770li r0,0771mfspr r3,SPRN_HID4772rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */773sync774mtspr SPRN_HID4,r3775isync776sync777slbia778779/* Branch to our PAGE_OFFSET address */780bcl 20,31,$+47811: mflr r11782addi r11,r11,(2f - 1b)783tovirt(r11, r11)784mtctr r11785bctr7862:787bl relative_toc788789/* Copy some CPU settings from CPU 0 */790bl __restore_cpu_ppc970791792/* pSeries do that early though I don't think we really need it */793mfmsr r3794ori r3,r3,MSR_RI795mtmsrd r3 /* RI on */796797/* Set up a paca value for this processor. */798LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */799ld r4,0(r4) /* Get base vaddr of paca_ptrs array */800sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */801ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */802SET_PACA(r13) /* Save vaddr of paca in an SPRG*/803804/* Mark interrupts soft and hard disabled (they might be enabled805* in the PACA when doing hotplug)806*/807li r0,IRQS_DISABLED808stb r0,PACAIRQSOFTMASK(r13)809li r0,PACA_IRQ_HARD_DIS810stb r0,PACAIRQHAPPENED(r13)811812/* Create a temp kernel stack for use before relocation is on. */813ld r1,PACAEMERGSP(r13)814subi r1,r1,STACK_FRAME_MIN_SIZE815816b __secondary_start817818#endif /* CONFIG_PPC_PMAC */819820/*821* This function is called after the master CPU has released the822* secondary processors. The execution environment is relocation off.823* The paca for this processor has the following fields initialized at824* this point:825* 1. Processor number826* 2. Segment table pointer (virtual address)827* On entry the following are set:828* r1 = stack pointer (real addr of temp stack)829* r24 = cpu# (in Linux terms)830* r13 = paca virtual address831* SPRG_PACA = paca virtual address832*/833.section ".text";834.align 2 ;835836.globl __secondary_start837__secondary_start:838/* Set thread priority to MEDIUM */839HMT_MEDIUM840841/*842* Do early setup for this CPU, in particular initialising the MMU so we843* can turn it on below. This is a call to C, which is OK, we're still844* running on the emergency stack.845*/846bl CFUNC(early_setup_secondary)847848/*849* The primary has initialized our kernel stack for us in the paca, grab850* it and put it in r1. We must *not* use it until we turn on the MMU851* below, because it may not be inside the RMO.852*/853ld r1, PACAKSAVE(r13)854855/* Clear backchain so we get nice backtraces */856li r7,0857mtlr r7858859/* Mark interrupts soft and hard disabled (they might be enabled860* in the PACA when doing hotplug)861*/862li r7,IRQS_DISABLED863stb r7,PACAIRQSOFTMASK(r13)864li r0,PACA_IRQ_HARD_DIS865stb r0,PACAIRQHAPPENED(r13)866867/* enable MMU and jump to start_secondary */868LOAD_REG_ADDR(r3, start_secondary_prolog)869LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)870871mtspr SPRN_SRR0,r3872mtspr SPRN_SRR1,r4873RFI_TO_KERNEL874b . /* prevent speculative execution */875876/*877* Running with relocation on at this point. All we want to do is878* zero the stack back-chain pointer and get the TOC virtual address879* before going into C code.880*/881start_secondary_prolog:882LOAD_PACA_TOC()883li r3,0884std r3,0(r1) /* Zero the stack frame pointer */885bl CFUNC(start_secondary)886b .887/*888* Reset stack pointer and call start_secondary889* to continue with online operation when woken up890* from cede in cpu offline.891*/892_GLOBAL(start_secondary_resume)893ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */894li r3,0895std r3,0(r1) /* Zero the stack frame pointer */896bl CFUNC(start_secondary)897b .898#endif899900/*901* This subroutine clobbers r11 and r12902*/903SYM_FUNC_START_LOCAL(enable_64b_mode)904mfmsr r11 /* grab the current MSR */905#ifdef CONFIG_PPC_BOOK3E_64906oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */907mtmsr r11908#else /* CONFIG_PPC_BOOK3E_64 */909LOAD_REG_IMMEDIATE(r12, MSR_64BIT)910or r11,r11,r12911mtmsrd r11912isync913#endif914blr915SYM_FUNC_END(enable_64b_mode)916917/*918* This puts the TOC pointer into r2, offset by 0x8000 (as expected919* by the toolchain). It computes the correct value for wherever we920* are running at the moment, using position-independent code.921*922* Note: The compiler constructs pointers using offsets from the923* TOC in -mcmodel=medium mode. After we relocate to 0 but before924* the MMU is on we need our TOC to be a virtual address otherwise925* these pointers will be real addresses which may get stored and926* accessed later with the MMU on. We branch to the virtual address927* while still in real mode then call relative_toc again to handle928* this.929*/930_GLOBAL(relative_toc)931#ifdef CONFIG_PPC_KERNEL_PCREL932tdnei r2,-1933blr934#else935mflr r0936bcl 20,31,$+49370: mflr r11938ld r2,(p_toc - 0b)(r11)939add r2,r2,r11940mtlr r0941blr942943.balign 8944p_toc: .8byte .TOC. - 0b945#endif946947/*948* This is where the main kernel code starts.949*/950__REF951start_here_multiplatform:952/* Adjust TOC for moved kernel. Could adjust when moving it instead. */953bl relative_toc954955/* Clear out the BSS. It may have been done in prom_init,956* already but that's irrelevant since prom_init will soon957* be detached from the kernel completely. Besides, we need958* to clear it now for kexec-style entry.959*/960LOAD_REG_ADDR(r11,__bss_stop)961LOAD_REG_ADDR(r8,__bss_start)962sub r11,r11,r8 /* bss size */963addi r11,r11,7 /* round up to an even double word */964srdi. r11,r11,3 /* shift right by 3 */965beq 4f966addi r8,r8,-8967li r0,0968mtctr r11 /* zero this many doublewords */9693: stdu r0,8(r8)970bdnz 3b9714:972973#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL974/* Setup OPAL entry */975LOAD_REG_ADDR(r11, opal)976std r28,0(r11);977std r29,8(r11);978#endif979980#ifndef CONFIG_PPC_BOOK3E_64981mfmsr r6982ori r6,r6,MSR_RI983mtmsrd r6 /* RI on */984#endif985986#ifdef CONFIG_RELOCATABLE987/* Save the physical address we're running at in kernstart_addr */988LOAD_REG_ADDR(r4, kernstart_addr)989clrldi r0,r25,2990std r0,0(r4)991#endif992993/* set up a stack pointer */994LOAD_REG_ADDR(r3,init_thread_union)995LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)996add r1,r3,r1997li r0,0998stdu r0,-STACK_FRAME_MIN_SIZE(r1)9991000/*1001* Do very early kernel initializations, including initial hash table1002* and SLB setup before we turn on relocation.1003*/10041005#ifdef CONFIG_KASAN1006bl CFUNC(kasan_early_init)1007#endif1008/* Restore parameters passed from prom_init/kexec */1009mr r3,r311010LOAD_REG_ADDR(r12, DOTSYM(early_setup))1011mtctr r121012bctrl /* also sets r13 and SPRG_PACA */10131014LOAD_REG_ADDR(r3, start_here_common)1015ld r4,PACAKMSR(r13)1016mtspr SPRN_SRR0,r31017mtspr SPRN_SRR1,r41018RFI_TO_KERNEL1019b . /* prevent speculative execution */10201021/* This is where all platforms converge execution */10221023start_here_common:1024/* relocation is on at this point */1025std r1,PACAKSAVE(r13)10261027/* Load the TOC (virtual address) */1028LOAD_PACA_TOC()10291030/* Mark interrupts soft and hard disabled (they might be enabled1031* in the PACA when doing hotplug)1032*/1033li r0,IRQS_DISABLED1034stb r0,PACAIRQSOFTMASK(r13)1035li r0,PACA_IRQ_HARD_DIS1036stb r0,PACAIRQHAPPENED(r13)10371038/* Generic kernel entry */1039bl CFUNC(start_kernel)10401041/* Not reached */10420: trap1043EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 01044.previous104510461047