/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Based on arch/arm/mm/proc.S3*4* Copyright (C) 2001 Deep Blue Solutions Ltd.5* Copyright (C) 2012 ARM Ltd.6* Author: Catalin Marinas <[email protected]>7*/89#include <linux/init.h>10#include <linux/linkage.h>11#include <linux/pgtable.h>12#include <linux/cfi_types.h>13#include <asm/assembler.h>14#include <asm/asm-offsets.h>15#include <asm/asm_pointer_auth.h>16#include <asm/hwcap.h>17#include <asm/kernel-pgtable.h>18#include <asm/pgtable-hwdef.h>19#include <asm/cpufeature.h>20#include <asm/alternative.h>21#include <asm/smp.h>22#include <asm/sysreg.h>2324#ifdef CONFIG_ARM64_64K_PAGES25#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K26#elif defined(CONFIG_ARM64_16K_PAGES)27#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K28#else /* CONFIG_ARM64_4K_PAGES */29#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K30#endif3132#ifdef CONFIG_RANDOMIZE_BASE33#define TCR_KASLR_FLAGS TCR_NFD134#else35#define TCR_KASLR_FLAGS 036#endif3738/* PTWs cacheable, inner/outer WBWA */39#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA4041#ifdef CONFIG_KASAN_SW_TAGS42#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID143#else44#define TCR_KASAN_SW_FLAGS 045#endif4647#ifdef CONFIG_KASAN_HW_TAGS48#define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID149#elif defined(CONFIG_ARM64_MTE)50/*51* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on52* TBI being enabled at EL1.53*/54#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID155#else56#define TCR_MTE_FLAGS 057#endif5859/*60* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and61* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.62*/63#define MAIR_EL1_SET \64(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \65MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \66MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \67MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \68MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))6970#ifdef CONFIG_CPU_PM71/**72* cpu_do_suspend - save CPU registers context73*74* x0: virtual address of context pointer75*76* This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.77*/78SYM_FUNC_START(cpu_do_suspend)79mrs x2, tpidr_el080mrs x3, tpidrro_el081mrs x4, contextidr_el182mrs x5, osdlr_el183mrs x6, cpacr_el184mrs x7, tcr_el185mrs x8, vbar_el186mrs x9, mdscr_el187mrs x10, oslsr_el188mrs x11, sctlr_el189get_this_cpu_offset x1290mrs x13, sp_el091stp x2, x3, [x0]92stp x4, x5, [x0, #16]93stp x6, x7, [x0, #32]94stp x8, x9, [x0, #48]95stp x10, x11, [x0, #64]96stp x12, x13, [x0, #80]97/*98* Save x18 as it may be used as a platform register, e.g. by shadow99* call stack.100*/101str x18, [x0, #96]102ret103SYM_FUNC_END(cpu_do_suspend)104105/**106* cpu_do_resume - restore CPU register context107*108* x0: Address of context pointer109*/110SYM_FUNC_START(cpu_do_resume)111ldp x2, x3, [x0]112ldp x4, x5, [x0, #16]113ldp x6, x8, [x0, #32]114ldp x9, x10, [x0, #48]115ldp x11, x12, [x0, #64]116ldp x13, x14, [x0, #80]117/*118* Restore x18, as it may be used as a platform register, and clear119* the buffer to minimize the risk of exposure when used for shadow120* call stack.121*/122ldr x18, [x0, #96]123str xzr, [x0, #96]124msr tpidr_el0, x2125msr tpidrro_el0, x3126msr contextidr_el1, x4127msr cpacr_el1, x6128129/* Don't change t0sz here, mask those bits when restoring */130mrs x7, tcr_el1131bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH132133msr tcr_el1, x8134msr vbar_el1, x9135msr mdscr_el1, x10136137msr sctlr_el1, x12138set_this_cpu_offset x13139msr sp_el0, x14140/*141* Restore oslsr_el1 by writing oslar_el1142*/143msr osdlr_el1, x5144ubfx x11, x11, #1, #1145msr oslar_el1, x11146reset_pmuserenr_el0 x0 // Disable PMU access from EL0147reset_amuserenr_el0 x0 // Disable AMU access from EL0148149alternative_if ARM64_HAS_RAS_EXTN150msr_s SYS_DISR_EL1, xzr151alternative_else_nop_endif152153ptrauth_keys_install_kernel_nosync x14, x1, x2, x3154isb155ret156SYM_FUNC_END(cpu_do_resume)157#endif158159.pushsection ".idmap.text", "a"160161.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2162adrp \tmp1, reserved_pg_dir163phys_to_ttbr \tmp2, \tmp1164offset_ttbr1 \tmp2, \tmp1165msr ttbr1_el1, \tmp2166isb167tlbi vmalle1168dsb nsh169isb170.endm171172/*173* void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)174*175* This is the low-level counterpart to cpu_replace_ttbr1, and should not be176* called by anything else. It can only be executed from a TTBR0 mapping.177*/178SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)179__idmap_cpu_set_reserved_ttbr1 x1, x3180181offset_ttbr1 x0, x3182msr ttbr1_el1, x0183isb184185ret186SYM_FUNC_END(idmap_cpu_replace_ttbr1)187SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)188.popsection189190#ifdef CONFIG_UNMAP_KERNEL_AT_EL0191192#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \193PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)194195.pushsection ".idmap.text", "a"196197.macro pte_to_phys, phys, pte198and \phys, \pte, #PTE_ADDR_LOW199#ifdef CONFIG_ARM64_PA_BITS_52200and \pte, \pte, #PTE_ADDR_HIGH201orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT202#endif203.endm204205.macro kpti_mk_tbl_ng, type, num_entries206add end_\type\()p, cur_\type\()p, #\num_entries * 8207.Ldo_\type:208ldr \type, [cur_\type\()p], #8 // Load the entry and advance209tbz \type, #0, .Lnext_\type // Skip invalid and210tbnz \type, #11, .Lnext_\type // non-global entries211orr \type, \type, #PTE_NG // Same bit for blocks and pages212str \type, [cur_\type\()p, #-8] // Update the entry213.ifnc \type, pte214tbnz \type, #1, .Lderef_\type215.endif216.Lnext_\type:217cmp cur_\type\()p, end_\type\()p218b.ne .Ldo_\type219.endm220221/*222* Dereference the current table entry and map it into the temporary223* fixmap slot associated with the current level.224*/225.macro kpti_map_pgtbl, type, level226str xzr, [temp_pte, #8 * (\level + 2)] // break before make227dsb nshst228add pte, temp_pte, #PAGE_SIZE * (\level + 2)229lsr pte, pte, #12230tlbi vaae1, pte231dsb nsh232isb233234phys_to_pte pte, cur_\type\()p235add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)236orr pte, pte, pte_flags237str pte, [temp_pte, #8 * (\level + 2)]238dsb nshst239.endm240241/*242* void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,243* unsigned long temp_pte_va)244*245* Called exactly once from stop_machine context by each CPU found during boot.246*/247.pushsection ".data", "aw", %progbits248SYM_DATA(__idmap_kpti_flag, .long 1)249.popsection250251SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)252cpu .req w0253temp_pte .req x0254num_cpus .req w1255pte_flags .req x1256temp_pgd_phys .req x2257swapper_ttb .req x3258flag_ptr .req x4259cur_pgdp .req x5260end_pgdp .req x6261pgd .req x7262cur_pudp .req x8263end_pudp .req x9264cur_pmdp .req x11265end_pmdp .req x12266cur_ptep .req x14267end_ptep .req x15268pte .req x16269valid .req x17270cur_p4dp .req x19271end_p4dp .req x20272273mov x5, x3 // preserve temp_pte arg274mrs swapper_ttb, ttbr1_el1275adr_l flag_ptr, __idmap_kpti_flag276277cbnz cpu, __idmap_kpti_secondary278279#if CONFIG_PGTABLE_LEVELS > 4280stp x29, x30, [sp, #-32]!281mov x29, sp282stp x19, x20, [sp, #16]283#endif284285/* We're the boot CPU. Wait for the others to catch up */286sevl2871: wfe288ldaxr w17, [flag_ptr]289eor w17, w17, num_cpus290cbnz w17, 1b291292/* Switch to the temporary page tables on this CPU only */293__idmap_cpu_set_reserved_ttbr1 x8, x9294offset_ttbr1 temp_pgd_phys, x8295msr ttbr1_el1, temp_pgd_phys296isb297298mov temp_pte, x5299mov_q pte_flags, KPTI_NG_PTE_FLAGS300301/* Everybody is enjoying the idmap, so we can rewrite swapper. */302303#ifdef CONFIG_ARM64_LPA2304/*305* If LPA2 support is configured, but 52-bit virtual addressing is not306* enabled at runtime, we will fall back to one level of paging less,307* and so we have to walk swapper_pg_dir as if we dereferenced its308* address from a PGD level entry, and terminate the PGD level loop309* right after.310*/311adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level312mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop313alternative_if_not ARM64_HAS_VA52314b .Lderef_pgd // skip to the next level315alternative_else_nop_endif316/*317* LPA2 based 52-bit virtual addressing requires 52-bit physical318* addressing to be enabled as well. In this case, the shareability319* bits are repurposed as physical address bits, and should not be320* set in pte_flags.321*/322bic pte_flags, pte_flags, #PTE_SHARED323#endif324325/* PGD */326adrp cur_pgdp, swapper_pg_dir327kpti_map_pgtbl pgd, -1328kpti_mk_tbl_ng pgd, PTRS_PER_PGD329330/* Ensure all the updated entries are visible to secondary CPUs */331dsb ishst332333/* We're done: fire up swapper_pg_dir again */334__idmap_cpu_set_reserved_ttbr1 x8, x9335msr ttbr1_el1, swapper_ttb336isb337338/* Set the flag to zero to indicate that we're all done */339str wzr, [flag_ptr]340#if CONFIG_PGTABLE_LEVELS > 4341ldp x19, x20, [sp, #16]342ldp x29, x30, [sp], #32343#endif344ret345346.Lderef_pgd:347/* P4D */348.if CONFIG_PGTABLE_LEVELS > 4349p4d .req x30350pte_to_phys cur_p4dp, pgd351kpti_map_pgtbl p4d, 0352kpti_mk_tbl_ng p4d, PTRS_PER_P4D353b .Lnext_pgd354.else /* CONFIG_PGTABLE_LEVELS <= 4 */355p4d .req pgd356.set .Lnext_p4d, .Lnext_pgd357.endif358359.Lderef_p4d:360/* PUD */361.if CONFIG_PGTABLE_LEVELS > 3362pud .req x10363pte_to_phys cur_pudp, p4d364kpti_map_pgtbl pud, 1365kpti_mk_tbl_ng pud, PTRS_PER_PUD366b .Lnext_p4d367.else /* CONFIG_PGTABLE_LEVELS <= 3 */368pud .req pgd369.set .Lnext_pud, .Lnext_pgd370.endif371372.Lderef_pud:373/* PMD */374.if CONFIG_PGTABLE_LEVELS > 2375pmd .req x13376pte_to_phys cur_pmdp, pud377kpti_map_pgtbl pmd, 2378kpti_mk_tbl_ng pmd, PTRS_PER_PMD379b .Lnext_pud380.else /* CONFIG_PGTABLE_LEVELS <= 2 */381pmd .req pgd382.set .Lnext_pmd, .Lnext_pgd383.endif384385.Lderef_pmd:386/* PTE */387pte_to_phys cur_ptep, pmd388kpti_map_pgtbl pte, 3389kpti_mk_tbl_ng pte, PTRS_PER_PTE390b .Lnext_pmd391392.unreq cpu393.unreq temp_pte394.unreq num_cpus395.unreq pte_flags396.unreq temp_pgd_phys397.unreq cur_pgdp398.unreq end_pgdp399.unreq pgd400.unreq cur_pudp401.unreq end_pudp402.unreq pud403.unreq cur_pmdp404.unreq end_pmdp405.unreq pmd406.unreq cur_ptep407.unreq end_ptep408.unreq pte409.unreq valid410.unreq cur_p4dp411.unreq end_p4dp412.unreq p4d413414/* Secondary CPUs end up here */415__idmap_kpti_secondary:416/* Uninstall swapper before surgery begins */417__idmap_cpu_set_reserved_ttbr1 x16, x17418419/* Increment the flag to let the boot CPU we're ready */4201: ldxr w16, [flag_ptr]421add w16, w16, #1422stxr w17, w16, [flag_ptr]423cbnz w17, 1b424425/* Wait for the boot CPU to finish messing around with swapper */426sevl4271: wfe428ldxr w16, [flag_ptr]429cbnz w16, 1b430431/* All done, act like nothing happened */432msr ttbr1_el1, swapper_ttb433isb434ret435436.unreq swapper_ttb437.unreq flag_ptr438SYM_FUNC_END(idmap_kpti_install_ng_mappings)439.popsection440#endif441442/*443* __cpu_setup444*445* Initialise the processor for turning the MMU on.446*447* Output:448* Return in x0 the value of the SCTLR_EL1 register.449*/450.pushsection ".idmap.text", "a"451SYM_FUNC_START(__cpu_setup)452tlbi vmalle1 // Invalidate local TLB453dsb nsh454455msr cpacr_el1, xzr // Reset cpacr_el1456mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable457msr mdscr_el1, x1 // access to the DCC from EL0458reset_pmuserenr_el0 x1 // Disable PMU access from EL0459reset_amuserenr_el0 x1 // Disable AMU access from EL0460461/*462* Default values for VMSA control registers. These will be adjusted463* below depending on detected CPU features.464*/465mair .req x17466tcr .req x16467tcr2 .req x15468mov_q mair, MAIR_EL1_SET469mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \470TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \471TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS472mov tcr2, xzr473474tcr_clear_errata_bits tcr, x9, x5475476#ifdef CONFIG_ARM64_VA_BITS_52477mov x9, #64 - VA_BITS478alternative_if ARM64_HAS_VA52479tcr_set_t1sz tcr, x9480#ifdef CONFIG_ARM64_LPA2481orr tcr, tcr, #TCR_DS482#endif483alternative_else_nop_endif484#endif485486/*487* Set the IPS bits in TCR_EL1.488*/489tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6490#ifdef CONFIG_ARM64_HW_AFDBM491/*492* Enable hardware update of the Access Flags bit.493* Hardware dirty bit management is enabled later,494* via capabilities.495*/496mrs x9, ID_AA64MMFR1_EL1497ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4498cbz x9, 1f499orr tcr, tcr, #TCR_HA // hardware Access flag update500#ifdef CONFIG_ARM64_HAFT501cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT502b.lt 1f503orr tcr2, tcr2, TCR2_EL1_HAFT504#endif /* CONFIG_ARM64_HAFT */5051:506#endif /* CONFIG_ARM64_HW_AFDBM */507msr mair_el1, mair508msr tcr_el1, tcr509510mrs_s x1, SYS_ID_AA64MMFR3_EL1511ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4512cbz x1, .Lskip_indirection513514mov_q x0, PIE_E0_ASM515msr REG_PIRE0_EL1, x0516mov_q x0, PIE_E1_ASM517msr REG_PIR_EL1, x0518519orr tcr2, tcr2, TCR2_EL1_PIE520521.Lskip_indirection:522523mrs_s x1, SYS_ID_AA64MMFR3_EL1524ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4525cbz x1, 1f526msr REG_TCR2_EL1, tcr25271:528529/*530* Prepare SCTLR531*/532mov_q x0, INIT_SCTLR_EL1_MMU_ON533ret // return to head.S534535.unreq mair536.unreq tcr537.unreq tcr2538SYM_FUNC_END(__cpu_setup)539540541