/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Low-level CPU initialisation3* Based on arch/arm/kernel/head.S4*5* Copyright (C) 1994-2002 Russell King6* Copyright (C) 2003-2012 ARM Ltd.7* Authors: Catalin Marinas <[email protected]>8* Will Deacon <[email protected]>9*/1011#include <linux/linkage.h>12#include <linux/init.h>13#include <linux/pgtable.h>1415#include <asm/asm_pointer_auth.h>16#include <asm/assembler.h>17#include <asm/boot.h>18#include <asm/bug.h>19#include <asm/ptrace.h>20#include <asm/asm-offsets.h>21#include <asm/cache.h>22#include <asm/cputype.h>23#include <asm/el2_setup.h>24#include <asm/elf.h>25#include <asm/image.h>26#include <asm/kernel-pgtable.h>27#include <asm/kvm_arm.h>28#include <asm/memory.h>29#include <asm/pgtable-hwdef.h>30#include <asm/page.h>31#include <asm/scs.h>32#include <asm/smp.h>33#include <asm/sysreg.h>34#include <asm/stacktrace/frame.h>35#include <asm/thread_info.h>36#include <asm/virt.h>3738#include "efi-header.S"3940#if (PAGE_OFFSET & 0x1fffff) != 041#error PAGE_OFFSET must be at least 2MB aligned42#endif4344/*45* Kernel startup entry point.46* ---------------------------47*48* The requirements are:49* MMU = off, D-cache = off, I-cache = on or off,50* x0 = physical address to the FDT blob.51*52* Note that the callee-saved registers are used for storing variables53* that are useful before the MMU is enabled. The allocations are described54* in the entry routines.55*/56__HEAD57/*58* DO NOT MODIFY. Image header expected by Linux boot-loaders.59*/60efi_signature_nop // special NOP to identity as PE/COFF executable61b primary_entry // branch to kernel start, magic62.quad 0 // Image load offset from start of RAM, little-endian63le64sym _kernel_size_le // Effective size of kernel image, little-endian64le64sym _kernel_flags_le // Informative flags, little-endian65.quad 0 // reserved66.quad 0 // reserved67.quad 0 // reserved68.ascii ARM64_IMAGE_MAGIC // Magic number69.long .Lpe_header_offset // Offset to the PE header.7071__EFI_PE_HEADER7273.section ".idmap.text","a"7475/*76* The following callee saved general purpose registers are used on the77* primary lowlevel boot path:78*79* Register Scope Purpose80* x19 primary_entry() .. start_kernel() whether we entered with the MMU on81* x20 primary_entry() .. __primary_switch() CPU boot mode82* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x083*/84SYM_CODE_START(primary_entry)85bl record_mmu_state86bl preserve_boot_args8788adrp x1, early_init_stack89mov sp, x190mov x29, xzr91adrp x0, __pi_init_idmap_pg_dir92mov x1, xzr93bl __pi_create_init_idmap9495/*96* If the page tables have been populated with non-cacheable97* accesses (MMU disabled), invalidate those tables again to98* remove any speculatively loaded cache lines.99*/100cbnz x19, 0f101dmb sy102mov x1, x0 // end of used region103adrp x0, __pi_init_idmap_pg_dir104adr_l x2, dcache_inval_poc105blr x2106b 1f107108/*109* If we entered with the MMU and caches on, clean the ID mapped part110* of the primary boot code to the PoC so we can safely execute it with111* the MMU off.112*/1130: adrp x0, __idmap_text_start114adr_l x1, __idmap_text_end115adr_l x2, dcache_clean_poc116blr x21171181: mov x0, x19119bl init_kernel_el // w0=cpu_boot_mode120mov x20, x0121122/*123* The following calls CPU setup code, see arch/arm64/mm/proc.S for124* details.125* On return, the CPU will be ready for the MMU to be turned on and126* the TCR will have been set.127*/128bl __cpu_setup // initialise processor129b __primary_switch130SYM_CODE_END(primary_entry)131132__INIT133SYM_CODE_START_LOCAL(record_mmu_state)134mrs x19, CurrentEL135cmp x19, #CurrentEL_EL2136mrs x19, sctlr_el1137b.ne 0f138mrs x19, sctlr_el21390:140CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f )141CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )142tst x19, #SCTLR_ELx_C // Z := (C == 0)143and x19, x19, #SCTLR_ELx_M // isolate M bit144csel x19, xzr, x19, eq // clear x19 if Z145ret146147/*148* Set the correct endianness early so all memory accesses issued149* before init_kernel_el() occur in the correct byte order. Note that150* this means the MMU must be disabled, or the active ID map will end151* up getting interpreted with the wrong byte order.152*/1531: eor x19, x19, #SCTLR_ELx_EE154bic x19, x19, #SCTLR_ELx_M155b.ne 2f156pre_disable_mmu_workaround157msr sctlr_el2, x19158b 3f1592: pre_disable_mmu_workaround160msr sctlr_el1, x191613: isb162mov x19, xzr163ret164SYM_CODE_END(record_mmu_state)165166/*167* Preserve the arguments passed by the bootloader in x0 .. x3168*/169SYM_CODE_START_LOCAL(preserve_boot_args)170mov x21, x0 // x21=FDT171172adr_l x0, boot_args // record the contents of173stp x21, x1, [x0] // x0 .. x3 at kernel entry174stp x2, x3, [x0, #16]175176cbnz x19, 0f // skip cache invalidation if MMU is on177dmb sy // needed before dc ivac with178// MMU off179180add x1, x0, #0x20 // 4 x 8 bytes181b dcache_inval_poc // tail call1820: str_l x19, mmu_enabled_at_boot, x0183ret184SYM_CODE_END(preserve_boot_args)185186/*187* Initialize CPU registers with task-specific and cpu-specific context.188*189* Create a final frame record at task_pt_regs(current)->stackframe, so190* that the unwinder can identify the final frame record of any task by191* its location in the task stack. We reserve the entire pt_regs space192* for consistency with user tasks and kthreads.193*/194.macro init_cpu_task tsk, tmp1, tmp2195msr sp_el0, \tsk196197ldr \tmp1, [\tsk, #TSK_STACK]198add sp, \tmp1, #THREAD_SIZE199sub sp, sp, #PT_REGS_SIZE200201stp xzr, xzr, [sp, #S_STACKFRAME]202mov \tmp1, #FRAME_META_TYPE_FINAL203str \tmp1, [sp, #S_STACKFRAME_TYPE]204add x29, sp, #S_STACKFRAME205206scs_load_current207208adr_l \tmp1, __per_cpu_offset209ldr w\tmp2, [\tsk, #TSK_TI_CPU]210ldr \tmp1, [\tmp1, \tmp2, lsl #3]211set_this_cpu_offset \tmp1212.endm213214/*215* The following fragment of code is executed with the MMU enabled.216*217* x0 = __pa(KERNEL_START)218*/219SYM_FUNC_START_LOCAL(__primary_switched)220adr_l x4, init_task221init_cpu_task x4, x5, x6222223adr_l x8, vectors // load VBAR_EL1 with virtual224msr vbar_el1, x8 // vector table address225isb226227stp x29, x30, [sp, #-16]!228mov x29, sp229230str_l x21, __fdt_pointer, x5 // Save FDT pointer231232adrp x4, _text // Save the offset between233sub x4, x4, x0 // the kernel virtual and234str_l x4, kimage_voffset, x5 // physical mappings235236mov x0, x20237bl set_cpu_boot_mode_flag238239#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)240bl kasan_early_init241#endif242mov x0, x20243bl finalise_el2 // Prefer VHE if possible244ldp x29, x30, [sp], #16245bl start_kernel246ASM_BUG()247SYM_FUNC_END(__primary_switched)248249/*250* end early head section, begin head code that is also used for251* hotplug and needs to have the same protections as the text region252*/253.section ".idmap.text","a"254255/*256* Starting from EL2 or EL1, configure the CPU to execute at the highest257* reachable EL supported by the kernel in a chosen default state. If dropping258* from EL2 to EL1, configure EL2 before configuring EL1.259*260* Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if261* SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.262*263* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if264* booted in EL1 or EL2 respectively, with the top 32 bits containing265* potential context flags. These flags are *not* stored in __boot_cpu_mode.266*267* x0: whether we are being called from the primary boot path with the MMU on268*/269SYM_FUNC_START(init_kernel_el)270mrs x1, CurrentEL271cmp x1, #CurrentEL_EL2272b.eq init_el2273274SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)275mov_q x0, INIT_SCTLR_EL1_MMU_OFF276pre_disable_mmu_workaround277msr sctlr_el1, x0278isb279mov_q x0, INIT_PSTATE_EL1280msr spsr_el1, x0281msr elr_el1, lr282mov w0, #BOOT_CPU_MODE_EL1283eret284285SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)286msr elr_el2, lr287288// clean all HYP code to the PoC if we booted at EL2 with the MMU on289cbz x0, 0f290adrp x0, __hyp_idmap_text_start291adr_l x1, __hyp_text_end292adr_l x2, dcache_clean_poc293blr x2294295mov_q x0, INIT_SCTLR_EL2_MMU_OFF296pre_disable_mmu_workaround297msr sctlr_el2, x0298isb2990:300301init_el2_hcr HCR_HOST_NVHE_FLAGS302init_el2_state303304/* Hypervisor stub */305adr_l x0, __hyp_stub_vectors306msr vbar_el2, x0307isb308309mov_q x1, INIT_SCTLR_EL1_MMU_OFF310311mrs x0, hcr_el2312and x0, x0, #HCR_E2H313cbz x0, 2f314315/* Set a sane SCTLR_EL1, the VHE way */316msr_s SYS_SCTLR_EL12, x1317mov x2, #BOOT_CPU_FLAG_E2H318b 3f3193202:321msr sctlr_el1, x1322mov x2, xzr3233:324mov x0, #INIT_PSTATE_EL1325msr spsr_el2, x0326327mov w0, #BOOT_CPU_MODE_EL2328orr x0, x0, x2329eret330SYM_FUNC_END(init_kernel_el)331332/*333* This provides a "holding pen" for platforms to hold all secondary334* cores are held until we're ready for them to initialise.335*/336SYM_FUNC_START(secondary_holding_pen)337mov x0, xzr338bl init_kernel_el // w0=cpu_boot_mode339mrs x2, mpidr_el1340mov_q x1, MPIDR_HWID_BITMASK341and x2, x2, x1342adr_l x3, secondary_holding_pen_release343pen: ldr x4, [x3]344cmp x4, x2345b.eq secondary_startup346wfe347b pen348SYM_FUNC_END(secondary_holding_pen)349350/*351* Secondary entry point that jumps straight into the kernel. Only to352* be used where CPUs are brought online dynamically by the kernel.353*/354SYM_FUNC_START(secondary_entry)355mov x0, xzr356bl init_kernel_el // w0=cpu_boot_mode357b secondary_startup358SYM_FUNC_END(secondary_entry)359360SYM_FUNC_START_LOCAL(secondary_startup)361/*362* Common entry point for secondary CPUs.363*/364mov x20, x0 // preserve boot mode365366#ifdef CONFIG_ARM64_VA_BITS_52367alternative_if ARM64_HAS_VA52368bl __cpu_secondary_check52bitva369alternative_else_nop_endif370#endif371372bl __cpu_setup // initialise processor373adrp x1, swapper_pg_dir374adrp x2, idmap_pg_dir375bl __enable_mmu376ldr x8, =__secondary_switched377br x8378SYM_FUNC_END(secondary_startup)379380.text381SYM_FUNC_START_LOCAL(__secondary_switched)382mov x0, x20383bl set_cpu_boot_mode_flag384385mov x0, x20386bl finalise_el2387388str_l xzr, __early_cpu_boot_status, x3389adr_l x5, vectors390msr vbar_el1, x5391isb392393adr_l x0, secondary_data394ldr x2, [x0, #CPU_BOOT_TASK]395cbz x2, __secondary_too_slow396397init_cpu_task x2, x1, x3398399#ifdef CONFIG_ARM64_PTR_AUTH400ptrauth_keys_init_cpu x2, x3, x4, x5401#endif402403bl secondary_start_kernel404ASM_BUG()405SYM_FUNC_END(__secondary_switched)406407SYM_FUNC_START_LOCAL(__secondary_too_slow)408wfe409wfi410b __secondary_too_slow411SYM_FUNC_END(__secondary_too_slow)412413/*414* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed415* in w0. See arch/arm64/include/asm/virt.h for more info.416*/417SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)418adr_l x1, __boot_cpu_mode419cmp w0, #BOOT_CPU_MODE_EL2420b.ne 1f421add x1, x1, #44221: str w0, [x1] // Save CPU boot mode423ret424SYM_FUNC_END(set_cpu_boot_mode_flag)425426/*427* The booting CPU updates the failed status @__early_cpu_boot_status,428* with MMU turned off.429*430* update_early_cpu_boot_status tmp, status431* - Corrupts tmp1, tmp2432* - Writes 'status' to __early_cpu_boot_status and makes sure433* it is committed to memory.434*/435436.macro update_early_cpu_boot_status status, tmp1, tmp2437mov \tmp2, #\status438adr_l \tmp1, __early_cpu_boot_status439str \tmp2, [\tmp1]440dmb sy441dc ivac, \tmp1 // Invalidate potentially stale cache line442.endm443444/*445* Enable the MMU.446*447* x0 = SCTLR_EL1 value for turning on the MMU.448* x1 = TTBR1_EL1 value449* x2 = ID map root table address450*451* Returns to the caller via x30/lr. This requires the caller to be covered452* by the .idmap.text section.453*454* Checks if the selected granule size is supported by the CPU.455* If it isn't, park the CPU456*/457.section ".idmap.text","a"458SYM_FUNC_START(__enable_mmu)459mrs x3, ID_AA64MMFR0_EL1460ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4461cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN462b.lt __no_granule_support463cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX464b.gt __no_granule_support465phys_to_ttbr x2, x2466msr ttbr0_el1, x2 // load TTBR0467load_ttbr1 x1, x1, x3468469set_sctlr_el1 x0470471ret472SYM_FUNC_END(__enable_mmu)473474#ifdef CONFIG_ARM64_VA_BITS_52475SYM_FUNC_START(__cpu_secondary_check52bitva)476#ifndef CONFIG_ARM64_LPA2477mrs_s x0, SYS_ID_AA64MMFR2_EL1478and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK479cbnz x0, 2f480#else481mrs x0, id_aa64mmfr0_el1482sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4483cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2484b.ge 2f485#endif486487update_early_cpu_boot_status \488CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x14891: wfe490wfi491b 1b4924932: ret494SYM_FUNC_END(__cpu_secondary_check52bitva)495#endif496497SYM_FUNC_START_LOCAL(__no_granule_support)498/* Indicate that this CPU can't boot and is stuck in the kernel */499update_early_cpu_boot_status \500CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x25011:502wfe503wfi504b 1b505SYM_FUNC_END(__no_granule_support)506507SYM_FUNC_START_LOCAL(__primary_switch)508adrp x1, reserved_pg_dir509adrp x2, __pi_init_idmap_pg_dir510bl __enable_mmu511512adrp x1, early_init_stack513mov sp, x1514mov x29, xzr515mov x0, x20 // pass the full boot status516mov x1, x21 // pass the FDT517bl __pi_early_map_kernel // Map and relocate the kernel518519ldr x8, =__primary_switched520adrp x0, KERNEL_START // __pa(KERNEL_START)521br x8522SYM_FUNC_END(__primary_switch)523524525