1/*-2* Copyright (C) 2010-2016 Nathan Whitehorn3* All rights reserved.4*5* Redistribution and use in source and binary forms, with or without6* modification, are permitted provided that the following conditions7* are met:8* 1. Redistributions of source code must retain the above copyright9* notice, this list of conditions and the following disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR15* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES16* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.17* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,18* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,19* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;20* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,21* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR22* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF23* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.24*/2526#include "assym.inc"2728#include <sys/syscall.h>2930#include <machine/trap.h>31#include <machine/param.h>32#include <machine/spr.h>33#include <machine/asm.h>34#include <machine/vmparam.h>3536#ifdef _CALL_ELF37.abiversion _CALL_ELF38#endif3940/* Glue for linker script */41.globl kernbase42.set kernbase, KERNBASE4344/*45* Globals46*/47.data48.align 349GLOBAL(__startkernel)50.llong begin51GLOBAL(__endkernel)52.llong end53GLOBAL(can_wakeup)54.llong 0x05556.align 457#define TMPSTKSZ 16384 /* 16K temporary stack */58GLOBAL(tmpstk)59.space TMPSTKSZ6061TOC_ENTRY(tmpstk)62TOC_ENTRY(can_wakeup)6364#ifdef KDB65#define TRAPSTKSZ 8192 /* 8k trap stack */66GLOBAL(trapstk)67.space TRAPSTKSZ68TOC_ENTRY(trapstk)69#endif707172/*73* Entry point for bootloaders that do not fully implement ELF and start74* at the beginning of the image (kexec, notably). In its own section so75* that it ends up before any linker-generated call stubs and actually at76* the beginning of the image. kexec on some systems also enters at77* (start of image) + 0x60, so put a spin loop there.78*/79.section ".text.kboot", "x", @progbits80kbootentry:81#ifdef __LITTLE_ENDIAN__82RETURN_TO_NATIVE_ENDIAN83#endif84b __start85. = kbootentry + 0x40 /* Magic address used in platform layer */86.global smp_spin_sem87ap_kexec_spin_sem:88.long -189. = kbootentry + 0x60 /* Entry point for kexec APs */90ap_kexec_start: /* At 0x60 past start, copied to 0x60 by kexec */91/* r3 set to CPU ID by kexec */9293/* Invalidate icache for low-memory copy and jump there */94li %r0,0x8095dcbst 0,%r096sync97icbi 0,%r098isync99ba 0x80 /* Absolute branch to next inst */100101. = kbootentry + 0x80 /* Aligned to cache line */1021: or 31,31,31 /* yield */103sync104lwz %r1,0x40(0) /* Spin on ap_kexec_spin_sem */105cmpw %r1,%r3 /* Until it equals our CPU ID */106bne 1b107108/* Released */109or 2,2,2 /* unyield */110111/* Make sure that it will be software reset. Clear SRR1 */112li %r1,0113mtsrr1 %r1114ba EXC_RST115116/*117* Now start the real text section118*/119120.text121.globl btext122btext:123124/*125* Main kernel entry point.126*127* Calling convention:128* r3: Flattened Device Tree pointer (or zero)129* r4: ignored130* r5: OF client interface pointer (or zero)131* r6: Loader metadata pointer (or zero)132* r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata133*/134.text135_NAKED_ENTRY(__start)136137#ifdef __LITTLE_ENDIAN__138RETURN_TO_NATIVE_ENDIAN139#endif140/* Set 64-bit mode if not yet set before branching to C */141mfmsr %r20142li %r21,1143insrdi %r20,%r21,1,0144mtmsrd %r20145isync146nop /* Make this block a multiple of 8 bytes */147148/* Set up the TOC pointer */149b 0f150.align 31510: nop152bl 1f153.llong __tocbase + 0x8000 - .1541: mflr %r2155ld %r1,0(%r2)156add %r2,%r1,%r2157158/* Get load offset */159ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */160subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */161162/* Set up the stack pointer */163bl 1f164.llong tmpstk + TMPSTKSZ - 96 - .1651: mflr %r30166ld %r1,0(%r30)167add %r1,%r1,%r30168nop169170/* Relocate kernel */171std %r3,48(%r1)172std %r4,56(%r1)173std %r5,64(%r1)174std %r6,72(%r1)175std %r7,80(%r1)176177bl 1f178.llong _DYNAMIC-.1791: mflr %r3180ld %r4,0(%r3)181add %r3,%r4,%r3182mr %r4,%r31183bl elf_reloc_self184nop185ld %r3,48(%r1)186ld %r4,56(%r1)187ld %r5,64(%r1)188ld %r6,72(%r1)189ld %r7,80(%r1)190191/* Begin CPU init */192mr %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */193bl powerpc_init194nop195196/* Set stack pointer to new value and branch to mi_startup */197mr %r1, %r3198li %r3, 0199std %r3, 0(%r1)200bl mi_startup201nop202203/* Unreachable */204b .205_END(__start)206207ASENTRY_NOPROF(__restartkernel_virtual)208/*209* When coming in via this entry point, we need to alter the SLB to210* shadow the segment register emulation entries in DMAP space.211* We need to do this dance because we are running with virtual-mode212* OpenFirmware and have not yet taken over the MMU.213*214* Assumptions:215* 1) The kernel is currently identity-mapped.216* 2) We are currently executing at an address compatible with217* real mode.218* 3) The first 16 SLB entries are emulating SRs.219* 4) The rest of the SLB is not in use.220* 5) OpenFirmware is not manipulating the SLB at runtime.221* 6) We are running on 64-bit AIM.222*223* Tested on a G5.224*/225mfmsr %r14226/* Switch to real mode because we are about to mess with the SLB. */227andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l228mtmsr %r14229isync230/* Prepare variables for later use. */231li %r14, 0232li %r18, 0233oris %r18, %r18, 0xc000234sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */2351:236/*237* Loop over the first 16 SLB entries.238* Offset the SLBE into the DMAP, add 16 to the index, and write239* it back to the SLB.240*/241/* XXX add more safety checks */242slbmfev %r15, %r14243slbmfee %r16, %r14244or %r16, %r16, %r14 /* index is 0-15 */245ori %r16, %r16, 0x10 /* add 16 to index. */246or %r16, %r16, %r18 /* SLBE DMAP offset */247rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */248249isync250slbie %r17251/* isync */252slbmte %r15, %r16253isync254addi %r14, %r14, 1255cmpdi %r14, 16256blt 1b257258/*259* Now that we are set up with a temporary direct map, we can260* continue with __restartkernel. Translation will be switched261* back on at the rfid, at which point we will be executing from262* the temporary direct map we just installed, until the kernel263* takes over responsibility for the MMU.264*/265bl __restartkernel266nop267ASEND(__restartkernel_virtual)268269ASENTRY_NOPROF(__restartkernel)270/*271* r3-r7: arguments to go to __start272* r8: offset from current kernel address to apply273* r9: MSR to set when (atomically) jumping to __start + r8274*/275mtsrr1 %r9276bl 1f2771: mflr %r25278add %r25,%r8,%r25279addi %r25,%r25,2f-1b280mtsrr0 %r25281rfid2822: bl __start283nop284ASEND(__restartkernel)285286#include <powerpc/aim/trap_subr64.S>287288289