/* SPDX-License-Identifier: GPL-2.0-only */1/*2* linux/arch/arm/kernel/head.S3*4* Copyright (C) 1994-2002 Russell King5* Copyright (c) 2003 ARM Limited6* All Rights Reserved7*8* Kernel startup code for all 32-bit CPUs9*/10#include <linux/linkage.h>11#include <linux/init.h>12#include <linux/pgtable.h>1314#include <asm/assembler.h>15#include <asm/cp15.h>16#include <asm/domain.h>17#include <asm/ptrace.h>18#include <asm/asm-offsets.h>19#include <asm/page.h>20#include <asm/thread_info.h>2122#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)23#include CONFIG_DEBUG_LL_INCLUDE24#endif25/*26* swapper_pg_dir is the virtual address of the initial page table.27* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must28* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect29* the least significant 16 bits to be 0x8000, but we could probably30* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.31*/32#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)33#if (KERNEL_RAM_VADDR & 0xffff) != 0x800034#error KERNEL_RAM_VADDR must start at 0xXXXX800035#endif3637#ifdef CONFIG_ARM_LPAE38/* LPAE requires an additional page for the PGD */39#define PG_DIR_SIZE 0x500040#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */41#else42#define PG_DIR_SIZE 0x400043#define PMD_ENTRY_ORDER 244#endif4546.globl swapper_pg_dir47.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE4849/*50* This needs to be assigned at runtime when the linker symbols are51* resolved. These are unsigned 64bit really, but in this assembly code52* We store them as 32bit.53*/54.pushsection .data55.align 256.globl kernel_sec_start57.globl kernel_sec_end58kernel_sec_start:59.long 060.long 061kernel_sec_end:62.long 063.long 064.popsection6566.macro pgtbl, rd, phys67add \rd, \phys, #TEXT_OFFSET68sub \rd, \rd, #PG_DIR_SIZE69.endm7071/*72* Kernel startup entry point.73* ---------------------------74*75* This is normally called from the decompressor code. The requirements76* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,77* r1 = machine nr, r2 = atags or dtb pointer.78*79* This code is mostly position independent, so if you link the kernel at80* 0xc0008000, you call this at __pa(0xc0008000).81*82* See linux/arch/arm/tools/mach-types for the complete list of machine83* numbers for r1.84*85* We're trying to keep crap to a minimum; DO NOT add any machine specific86* crap here - that's what the boot loader (or in extreme, well justified87* circumstances, zImage) is for.88*/89.arm9091__HEAD92ENTRY(stext)93ARM_BE8(setend be ) @ ensure we are in BE8 mode9495THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.96THUMB( bx r9 ) @ If this is a Thumb-2 kernel,97THUMB( .thumb ) @ switch to Thumb now.98THUMB(1: )99100#ifdef CONFIG_ARM_VIRT_EXT101bl __hyp_stub_install102#endif103@ ensure svc mode and all interrupts masked104safe_svcmode_maskall r9105106mrc p15, 0, r9, c0, c0 @ get processor id107bl __lookup_processor_type @ r5=procinfo r9=cpuid108movs r10, r5 @ invalid processor (r5=0)?109THUMB( it eq ) @ force fixup-able long branch encoding110beq __error_p @ yes, error 'p'111112#ifdef CONFIG_ARM_LPAE113mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0114and r3, r3, #0xf @ extract VMSA support115cmp r3, #5 @ long-descriptor translation table format?116THUMB( it lo ) @ force fixup-able long branch encoding117blo __error_lpae @ only classic page table format118#endif119120#ifndef CONFIG_XIP_KERNEL121adr_l r8, _text @ __pa(_text)122sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET123#else124ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case125#endif126127/*128* r1 = machine no, r2 = atags or dtb,129* r8 = phys_offset, r9 = cpuid, r10 = procinfo130*/131bl __vet_atags132#ifdef CONFIG_SMP_ON_UP133bl __fixup_smp134#endif135#ifdef CONFIG_ARM_PATCH_PHYS_VIRT136bl __fixup_pv_table137#endif138bl __create_page_tables139140/*141* The following calls CPU specific code in a position independent142* manner. See arch/arm/mm/proc-*.S for details. r10 = base of143* xxx_proc_info structure selected by __lookup_processor_type144* above.145*146* The processor init function will be called with:147* r1 - machine type148* r2 - boot data (atags/dt) pointer149* r4 - translation table base (low word)150* r5 - translation table base (high word, if LPAE)151* r8 - translation table base 1 (pfn if LPAE)152* r9 - cpuid153* r13 - virtual address for __enable_mmu -> __turn_mmu_on154*155* On return, the CPU will be ready for the MMU to be turned on,156* r0 will hold the CPU control register value, r1, r2, r4, and157* r9 will be preserved. r5 will also be preserved if LPAE.158*/159ldr r13, =__mmap_switched @ address to jump to after160@ mmu has been enabled161badr lr, 1f @ return (PIC) address162#ifdef CONFIG_ARM_LPAE163mov r5, #0 @ high TTBR0164mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn165#else166mov r8, r4 @ set TTBR1 to swapper_pg_dir167#endif168ldr r12, [r10, #PROCINFO_INITFUNC]169add r12, r12, r10170ret r121711: b __enable_mmu172ENDPROC(stext)173.ltorg174175/*176* Setup the initial page tables. We only setup the barest177* amount which are required to get the kernel running, which178* generally means mapping in the kernel code.179*180* r8 = phys_offset, r9 = cpuid, r10 = procinfo181*182* Returns:183* r0, r3, r5-r7 corrupted184* r4 = physical page table address185*/186__create_page_tables:187pgtbl r4, r8 @ page table address188189/*190* Clear the swapper page table191*/192mov r0, r4193mov r3, #0194add r6, r0, #PG_DIR_SIZE1951: str r3, [r0], #4196str r3, [r0], #4197str r3, [r0], #4198str r3, [r0], #4199teq r0, r6200bne 1b201202#ifdef CONFIG_ARM_LPAE203/*204* Build the PGD table (first level) to point to the PMD table. A PGD205* entry is 64-bit wide.206*/207mov r0, r4208add r3, r4, #0x1000 @ first PMD table address209orr r3, r3, #3 @ PGD block type210mov r6, #4 @ PTRS_PER_PGD211mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER2121:213#ifdef CONFIG_CPU_ENDIAN_BE8214str r7, [r0], #4 @ set top PGD entry bits215str r3, [r0], #4 @ set bottom PGD entry bits216#else217str r3, [r0], #4 @ set bottom PGD entry bits218str r7, [r0], #4 @ set top PGD entry bits219#endif220add r3, r3, #0x1000 @ next PMD table221subs r6, r6, #1222bne 1b223224add r4, r4, #0x1000 @ point to the PMD tables225#ifdef CONFIG_CPU_ENDIAN_BE8226add r4, r4, #4 @ we only write the bottom word227#endif228#endif229230ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags231232/*233* Create identity mapping to cater for __enable_mmu.234* This identity mapping will be removed by paging_init().235*/236adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on)237adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end)238mov r5, r5, lsr #SECTION_SHIFT239mov r6, r6, lsr #SECTION_SHIFT2402411: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base242str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping243cmp r5, r6244addlo r5, r5, #1 @ next section245blo 1b246247/*248* The main matter: map in the kernel using section mappings, and249* set two variables to indicate the physical start and end of the250* kernel.251*/252add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)253ldr r6, =(_end - 1)254255/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */256#ifndef CONFIG_XIP_KERNEL257adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)258#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32259str r8, [r5, #4] @ Save physical start of kernel (BE)260#else261str r8, [r5] @ Save physical start of kernel (LE)262#endif263#endif264orr r3, r8, r7 @ Add the MMU flags265add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)2661: str r3, [r0], #1 << PMD_ENTRY_ORDER267add r3, r3, #1 << SECTION_SHIFT268cmp r0, r6269bls 1b270#ifndef CONFIG_XIP_KERNEL271eor r3, r3, r7 @ Remove the MMU flags272adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)273#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32274str r3, [r5, #4] @ Save physical end of kernel (BE)275#else276str r3, [r5] @ Save physical end of kernel (LE)277#endif278#else279/*280* Map the kernel image separately as it is not located in RAM.281*/282#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)283mov r3, pc284mov r3, r3, lsr #SECTION_SHIFT285orr r3, r7, r3, lsl #SECTION_SHIFT286add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER)287str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]!288ldr r6, =(_edata_loc - 1)289add r0, r0, #1 << PMD_ENTRY_ORDER290add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)2911: cmp r0, r6292add r3, r3, #1 << SECTION_SHIFT293strls r3, [r0], #1 << PMD_ENTRY_ORDER294bls 1b295#endif296297/*298* Then map boot params address in r2 if specified.299* We map 2 sections in case the ATAGs/DTB crosses a section boundary.300*/301mov r0, r2, lsr #SECTION_SHIFT302cmp r2, #0303ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER)304addne r3, r3, r4305orrne r6, r7, r0, lsl #SECTION_SHIFT306strne r6, [r3], #1 << PMD_ENTRY_ORDER307addne r6, r6, #1 << SECTION_SHIFT308strne r6, [r3]309310#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)311sub r4, r4, #4 @ Fixup page table pointer312@ for 64-bit descriptors313#endif314315#ifdef CONFIG_DEBUG_LL316#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)317/*318* Map in IO space for serial debugging.319* This allows debug messages to be output320* via a serial console before paging_init.321*/322addruart r7, r3, r0323324mov r3, r3, lsr #SECTION_SHIFT325mov r3, r3, lsl #PMD_ENTRY_ORDER326327add r0, r4, r3328mov r3, r7, lsr #SECTION_SHIFT329ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags330orr r3, r7, r3, lsl #SECTION_SHIFT331#ifdef CONFIG_ARM_LPAE332mov r7, #1 << (54 - 32) @ XN333#ifdef CONFIG_CPU_ENDIAN_BE8334str r7, [r0], #4335str r3, [r0], #4336#else337str r3, [r0], #4338str r7, [r0], #4339#endif340#else341orr r3, r3, #PMD_SECT_XN342str r3, [r0], #4343#endif344345#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */346/* we don't need any serial debugging mappings */347ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags348#endif349350#if defined(CONFIG_ARCH_NETWINDER)351/*352* If we're using the NetWinder or CATS, we also need to map353* in the 16550-type serial port for the debug messages354*/355add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)356orr r3, r7, #0x7c000000357str r3, [r0]358#endif359#ifdef CONFIG_ARCH_RPC360/*361* Map in screen at 0x02000000 & SCREEN2_BASE362* Similar reasons here - for debug. This is363* only for Acorn RiscPC architectures.364*/365add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)366orr r3, r7, #0x02000000367str r3, [r0]368add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)369str r3, [r0]370#endif371#endif372#ifdef CONFIG_ARM_LPAE373sub r4, r4, #0x1000 @ point to the PGD table374#endif375ret lr376ENDPROC(__create_page_tables)377.ltorg378379#if defined(CONFIG_SMP)380.text381.arm382ENTRY(secondary_startup_arm)383THUMB( badr r9, 1f ) @ Kernel is entered in ARM.384THUMB( bx r9 ) @ If this is a Thumb-2 kernel,385THUMB( .thumb ) @ switch to Thumb now.386THUMB(1: )387ENTRY(secondary_startup)388/*389* Common entry point for secondary CPUs.390*391* Ensure that we're in SVC mode, and IRQs are disabled. Lookup392* the processor type - there is no need to check the machine type393* as it has already been validated by the primary processor.394*/395396ARM_BE8(setend be) @ ensure we are in BE8 mode397398#ifdef CONFIG_ARM_VIRT_EXT399bl __hyp_stub_install_secondary400#endif401safe_svcmode_maskall r9402403mrc p15, 0, r9, c0, c0 @ get processor id404bl __lookup_processor_type405movs r10, r5 @ invalid processor?406moveq r0, #'p' @ yes, error 'p'407THUMB( it eq ) @ force fixup-able long branch encoding408beq __error_p409410/*411* Use the page tables supplied from __cpu_up.412*/413#ifdef CONFIG_XIP_KERNEL414ldr r3, =(secondary_data + PLAT_PHYS_OFFSET - PAGE_OFFSET)415#else416adr_l r3, secondary_data417#endif418mov_l r12, __secondary_switched419ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir420ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:421ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps422ARM_BE8(eor r4, r4, r5) @ without using a temp reg.423ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir424badr lr, __enable_mmu @ return address425mov r13, r12 @ __secondary_switched address426ldr r12, [r10, #PROCINFO_INITFUNC]427add r12, r12, r10 @ initialise processor428@ (return control reg)429ret r12430ENDPROC(secondary_startup)431ENDPROC(secondary_startup_arm)432433ENTRY(__secondary_switched)434#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)435@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir436@ as the ID map does not cover the vmalloc region.437mrc p15, 0, ip, c2, c0, 1 @ read TTBR1438mcr p15, 0, ip, c2, c0, 0 @ set TTBR0439instr_sync440#endif441adr_l r7, secondary_data + 12 @ get secondary_data.stack442ldr sp, [r7]443ldr r0, [r7, #4] @ get secondary_data.task444mov fp, #0445b secondary_start_kernel446ENDPROC(__secondary_switched)447448#endif /* defined(CONFIG_SMP) */449450451452/*453* Setup common bits before finally enabling the MMU. Essentially454* this is just loading the page table pointer and domain access455* registers. All these registers need to be preserved by the456* processor setup function (or set in the case of r0)457*458* r0 = cp#15 control register459* r1 = machine ID460* r2 = atags or dtb pointer461* r4 = TTBR pointer (low word)462* r5 = TTBR pointer (high word if LPAE)463* r9 = processor ID464* r13 = *virtual* address to jump to upon completion465*/466__enable_mmu:467#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6468orr r0, r0, #CR_A469#else470bic r0, r0, #CR_A471#endif472#ifdef CONFIG_CPU_DCACHE_DISABLE473bic r0, r0, #CR_C474#endif475#ifdef CONFIG_CPU_BPREDICT_DISABLE476bic r0, r0, #CR_Z477#endif478#ifdef CONFIG_CPU_ICACHE_DISABLE479bic r0, r0, #CR_I480#endif481#ifdef CONFIG_ARM_LPAE482mcrr p15, 0, r4, r5, c2 @ load TTBR0483#else484mov r5, #DACR_INIT485mcr p15, 0, r5, c3, c0, 0 @ load domain access register486mcr p15, 0, r4, c2, c0, 0 @ load page table pointer487#endif488b __turn_mmu_on489ENDPROC(__enable_mmu)490491/*492* Enable the MMU. This completely changes the structure of the visible493* memory space. You will not be able to trace execution through this.494* If you have an enquiry about this, *please* check the linux-arm-kernel495* mailing list archives BEFORE sending another post to the list.496*497* r0 = cp#15 control register498* r1 = machine ID499* r2 = atags or dtb pointer500* r9 = processor ID501* r13 = *virtual* address to jump to upon completion502*503* other registers depend on the function called upon completion504*/505.align 5506.pushsection .idmap.text, "ax"507ENTRY(__turn_mmu_on)508mov r0, r0509instr_sync510mcr p15, 0, r0, c1, c0, 0 @ write control reg511mrc p15, 0, r3, c0, c0, 0 @ read id reg512instr_sync513mov r3, r3514mov r3, r13515ret r3516__turn_mmu_on_end:517ENDPROC(__turn_mmu_on)518.popsection519520521#ifdef CONFIG_SMP_ON_UP522__HEAD523__fixup_smp:524and r3, r9, #0x000f0000 @ architecture version525teq r3, #0x000f0000 @ CPU ID supported?526bne __fixup_smp_on_up @ no, assume UP527528bic r3, r9, #0x00ff0000529bic r3, r3, #0x0000000f @ mask 0xff00fff0530mov r4, #0x41000000531orr r4, r4, #0x0000b000532orr r4, r4, #0x00000020 @ val 0x4100b020533teq r3, r4 @ ARM 11MPCore?534reteq lr @ yes, assume SMP535536mrc p15, 0, r0, c0, c0, 5 @ read MPIDR537and r0, r0, #0xc0000000 @ multiprocessing extensions and538teq r0, #0x80000000 @ not part of a uniprocessor system?539bne __fixup_smp_on_up @ no, assume UP540541@ Core indicates it is SMP. Check for Aegis SOC where a single542@ Cortex-A9 CPU is present but SMP operations fault.543mov r4, #0x41000000544orr r4, r4, #0x0000c000545orr r4, r4, #0x00000090546teq r3, r4 @ Check for ARM Cortex-A9547retne lr @ Not ARM Cortex-A9,548549@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the550@ below address check will need to be #ifdef'd or equivalent551@ for the Aegis platform.552mrc p15, 4, r0, c15, c0 @ get SCU base address553teq r0, #0x0 @ '0' on actual UP A9 hardware554beq __fixup_smp_on_up @ So its an A9 UP555ldr r0, [r0, #4] @ read SCU Config556ARM_BE8(rev r0, r0) @ byteswap if big endian557and r0, r0, #0x3 @ number of CPUs558teq r0, #0x0 @ is 1?559retne lr560561__fixup_smp_on_up:562adr_l r4, __smpalt_begin563adr_l r5, __smpalt_end564b __do_fixup_smp_on_up565ENDPROC(__fixup_smp)566567.pushsection .data568.align 2569.globl smp_on_up570smp_on_up:571ALT_SMP(.long 1)572ALT_UP(.long 0)573.popsection574#endif575576.text577__do_fixup_smp_on_up:578cmp r4, r5579reths lr580ldmia r4, {r0, r6}581ARM( str r6, [r0, r4] )582THUMB( add r0, r0, r4 )583add r4, r4, #8584#ifdef __ARMEB__585THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.586#endif587THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords588THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0.589THUMB( strh r6, [r0] )590b __do_fixup_smp_on_up591ENDPROC(__do_fixup_smp_on_up)592593ENTRY(fixup_smp)594stmfd sp!, {r4 - r6, lr}595mov r4, r0596add r5, r0, r1597bl __do_fixup_smp_on_up598ldmfd sp!, {r4 - r6, pc}599ENDPROC(fixup_smp)600601#include "head-common.S"602603604