/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Based on arch/arm/mm/proc.S3*4* Copyright (C) 2001 Deep Blue Solutions Ltd.5* Copyright (C) 2012 ARM Ltd.6* Author: Catalin Marinas <[email protected]>7*/89#include <linux/init.h>10#include <linux/linkage.h>11#include <linux/pgtable.h>12#include <linux/cfi_types.h>13#include <asm/assembler.h>14#include <asm/asm-offsets.h>15#include <asm/asm_pointer_auth.h>16#include <asm/hwcap.h>17#include <asm/kernel-pgtable.h>18#include <asm/pgtable-hwdef.h>19#include <asm/cpufeature.h>20#include <asm/alternative.h>21#include <asm/smp.h>22#include <asm/sysreg.h>2324#ifdef CONFIG_ARM64_64K_PAGES25#define TCR_TG_FLAGS ((TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) |\26(TCR_EL1_TG1_64K << TCR_EL1_TG1_SHIFT))27#elif defined(CONFIG_ARM64_16K_PAGES)28#define TCR_TG_FLAGS ((TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) |\29(TCR_EL1_TG1_16K << TCR_EL1_TG1_SHIFT))30#else /* CONFIG_ARM64_4K_PAGES */31#define TCR_TG_FLAGS ((TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) |\32(TCR_EL1_TG1_4K << TCR_EL1_TG1_SHIFT))33#endif3435#ifdef CONFIG_RANDOMIZE_BASE36#define TCR_KASLR_FLAGS TCR_EL1_NFD137#else38#define TCR_KASLR_FLAGS 039#endif4041/* PTWs cacheable, inner/outer WBWA */42#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA4344#ifdef CONFIG_KASAN_SW_TAGS45#define TCR_KASAN_SW_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID146#else47#define TCR_KASAN_SW_FLAGS 048#endif4950#ifdef CONFIG_KASAN_HW_TAGS51#define TCR_MTE_FLAGS TCR_EL1_TCMA1 | TCR_EL1_TBI1 | TCR_EL1_TBID152#elif defined(CONFIG_ARM64_MTE)53/*54* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on55* TBI being enabled at EL1.56*/57#define TCR_MTE_FLAGS TCR_EL1_TBI1 | TCR_EL1_TBID158#else59#define TCR_MTE_FLAGS 060#endif6162#define TCR_IRGN_WBWA ((TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) |\63(TCR_EL1_IRGN1_WBWA << TCR_EL1_IRGN1_SHIFT))64#define TCR_ORGN_WBWA ((TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) |\65(TCR_EL1_ORGN1_WBWA << TCR_EL1_ORGN1_SHIFT))66#define TCR_SHARED ((TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) |\67(TCR_EL1_SH1_INNER << TCR_EL1_SH1_SHIFT))6869/*70* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and71* changed during mte_cpu_setup to Normal Tagged if the system supports MTE.72*/73#define MAIR_EL1_SET \74(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \75MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \76MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \77MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \78MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))7980#ifdef CONFIG_CPU_PM81/**82* cpu_do_suspend - save CPU registers context83*84* x0: virtual address of context pointer85*86* This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.87*/88SYM_FUNC_START(cpu_do_suspend)89mrs x2, tpidr_el090mrs x3, tpidrro_el091mrs x4, contextidr_el192mrs x5, osdlr_el193mrs x6, cpacr_el194mrs x7, tcr_el195mrs x8, vbar_el196mrs x9, mdscr_el197mrs x10, oslsr_el198mrs x11, sctlr_el199get_this_cpu_offset x12100mrs x13, sp_el0101stp x2, x3, [x0]102stp x4, x5, [x0, #16]103stp x6, x7, [x0, #32]104stp x8, x9, [x0, #48]105stp x10, x11, [x0, #64]106stp x12, x13, [x0, #80]107/*108* Save x18 as it may be used as a platform register, e.g. by shadow109* call stack.110*/111str x18, [x0, #96]112alternative_if ARM64_HAS_TCR2113mrs x2, REG_TCR2_EL1114str x2, [x0, #104]115alternative_else_nop_endif116ret117SYM_FUNC_END(cpu_do_suspend)118119/**120* cpu_do_resume - restore CPU register context121*122* x0: Address of context pointer123*/124SYM_FUNC_START(cpu_do_resume)125ldp x2, x3, [x0]126ldp x4, x5, [x0, #16]127ldp x6, x8, [x0, #32]128ldp x9, x10, [x0, #48]129ldp x11, x12, [x0, #64]130ldp x13, x14, [x0, #80]131/*132* Restore x18, as it may be used as a platform register, and clear133* the buffer to minimize the risk of exposure when used for shadow134* call stack.135*/136ldr x18, [x0, #96]137str xzr, [x0, #96]138msr tpidr_el0, x2139msr tpidrro_el0, x3140msr contextidr_el1, x4141msr cpacr_el1, x6142143/* Don't change t0sz here, mask those bits when restoring */144mrs x7, tcr_el1145bfi x8, x7, TCR_EL1_T0SZ_SHIFT, TCR_EL1_T0SZ_WIDTH146147msr tcr_el1, x8148msr vbar_el1, x9149msr mdscr_el1, x10150alternative_if ARM64_HAS_TCR2151ldr x2, [x0, #104]152msr REG_TCR2_EL1, x2153alternative_else_nop_endif154155msr sctlr_el1, x12156set_this_cpu_offset x13157msr sp_el0, x14158/*159* Restore oslsr_el1 by writing oslar_el1160*/161msr osdlr_el1, x5162ubfx x11, x11, #1, #1163msr oslar_el1, x11164reset_pmuserenr_el0 x0 // Disable PMU access from EL0165reset_amuserenr_el0 x0 // Disable AMU access from EL0166167alternative_if ARM64_HAS_RAS_EXTN168msr_s SYS_DISR_EL1, xzr169alternative_else_nop_endif170171ptrauth_keys_install_kernel_nosync x14, x1, x2, x3172isb173ret174SYM_FUNC_END(cpu_do_resume)175#endif176177.pushsection ".idmap.text", "a"178179.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2180adrp \tmp1, reserved_pg_dir181phys_to_ttbr \tmp2, \tmp1182offset_ttbr1 \tmp2, \tmp1183msr ttbr1_el1, \tmp2184isb185tlbi vmalle1186dsb nsh187isb188.endm189190/*191* void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)192*193* This is the low-level counterpart to cpu_replace_ttbr1, and should not be194* called by anything else. It can only be executed from a TTBR0 mapping.195*/196SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)197__idmap_cpu_set_reserved_ttbr1 x1, x3198199offset_ttbr1 x0, x3200msr ttbr1_el1, x0201isb202203ret204SYM_FUNC_END(idmap_cpu_replace_ttbr1)205SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)206.popsection207208#ifdef CONFIG_UNMAP_KERNEL_AT_EL0209210#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \211PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)212213.pushsection ".idmap.text", "a"214215.macro pte_to_phys, phys, pte216and \phys, \pte, #PTE_ADDR_LOW217#ifdef CONFIG_ARM64_PA_BITS_52218and \pte, \pte, #PTE_ADDR_HIGH219orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT220#endif221.endm222223.macro kpti_mk_tbl_ng, type, num_entries224add end_\type\()p, cur_\type\()p, #\num_entries * 8225.Ldo_\type:226ldr \type, [cur_\type\()p], #8 // Load the entry and advance227tbz \type, #0, .Lnext_\type // Skip invalid and228tbnz \type, #11, .Lnext_\type // non-global entries229orr \type, \type, #PTE_NG // Same bit for blocks and pages230str \type, [cur_\type\()p, #-8] // Update the entry231.ifnc \type, pte232tbnz \type, #1, .Lderef_\type233.endif234.Lnext_\type:235cmp cur_\type\()p, end_\type\()p236b.ne .Ldo_\type237.endm238239/*240* Dereference the current table entry and map it into the temporary241* fixmap slot associated with the current level.242*/243.macro kpti_map_pgtbl, type, level244str xzr, [temp_pte, #8 * (\level + 2)] // break before make245dsb nshst246add pte, temp_pte, #PAGE_SIZE * (\level + 2)247lsr pte, pte, #12248tlbi vaae1, pte249dsb nsh250isb251252phys_to_pte pte, cur_\type\()p253add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)254orr pte, pte, pte_flags255str pte, [temp_pte, #8 * (\level + 2)]256dsb nshst257.endm258259/*260* void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,261* unsigned long temp_pte_va)262*263* Called exactly once from stop_machine context by each CPU found during boot.264*/265SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)266cpu .req w0267temp_pte .req x0268num_cpus .req w1269pte_flags .req x1270temp_pgd_phys .req x2271swapper_ttb .req x3272flag_ptr .req x4273cur_pgdp .req x5274end_pgdp .req x6275pgd .req x7276cur_pudp .req x8277end_pudp .req x9278cur_pmdp .req x11279end_pmdp .req x12280cur_ptep .req x14281end_ptep .req x15282pte .req x16283valid .req x17284cur_p4dp .req x19285end_p4dp .req x20286287mov x5, x3 // preserve temp_pte arg288mrs swapper_ttb, ttbr1_el1289adr_l flag_ptr, idmap_kpti_bbml2_flag290291cbnz cpu, __idmap_kpti_secondary292293#if CONFIG_PGTABLE_LEVELS > 4294stp x29, x30, [sp, #-32]!295mov x29, sp296stp x19, x20, [sp, #16]297#endif298299/* We're the boot CPU. Wait for the others to catch up */300sevl3011: wfe302ldaxr w17, [flag_ptr]303eor w17, w17, num_cpus304cbnz w17, 1b305306/* Switch to the temporary page tables on this CPU only */307__idmap_cpu_set_reserved_ttbr1 x8, x9308offset_ttbr1 temp_pgd_phys, x8309msr ttbr1_el1, temp_pgd_phys310isb311312mov temp_pte, x5313mov_q pte_flags, KPTI_NG_PTE_FLAGS314315/* Everybody is enjoying the idmap, so we can rewrite swapper. */316317#ifdef CONFIG_ARM64_LPA2318/*319* If LPA2 support is configured, but 52-bit virtual addressing is not320* enabled at runtime, we will fall back to one level of paging less,321* and so we have to walk swapper_pg_dir as if we dereferenced its322* address from a PGD level entry, and terminate the PGD level loop323* right after.324*/325adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level326mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop327alternative_if_not ARM64_HAS_VA52328b .Lderef_pgd // skip to the next level329alternative_else_nop_endif330/*331* LPA2 based 52-bit virtual addressing requires 52-bit physical332* addressing to be enabled as well. In this case, the shareability333* bits are repurposed as physical address bits, and should not be334* set in pte_flags.335*/336bic pte_flags, pte_flags, #PTE_SHARED337#endif338339/* PGD */340adrp cur_pgdp, swapper_pg_dir341kpti_map_pgtbl pgd, -1342kpti_mk_tbl_ng pgd, PTRS_PER_PGD343344/* Ensure all the updated entries are visible to secondary CPUs */345dsb ishst346347/* We're done: fire up swapper_pg_dir again */348__idmap_cpu_set_reserved_ttbr1 x8, x9349msr ttbr1_el1, swapper_ttb350isb351352/* Set the flag to zero to indicate that we're all done */353str wzr, [flag_ptr]354#if CONFIG_PGTABLE_LEVELS > 4355ldp x19, x20, [sp, #16]356ldp x29, x30, [sp], #32357#endif358ret359360.Lderef_pgd:361/* P4D */362.if CONFIG_PGTABLE_LEVELS > 4363p4d .req x30364pte_to_phys cur_p4dp, pgd365kpti_map_pgtbl p4d, 0366kpti_mk_tbl_ng p4d, PTRS_PER_P4D367b .Lnext_pgd368.else /* CONFIG_PGTABLE_LEVELS <= 4 */369p4d .req pgd370.set .Lnext_p4d, .Lnext_pgd371.endif372373.Lderef_p4d:374/* PUD */375.if CONFIG_PGTABLE_LEVELS > 3376pud .req x10377pte_to_phys cur_pudp, p4d378kpti_map_pgtbl pud, 1379kpti_mk_tbl_ng pud, PTRS_PER_PUD380b .Lnext_p4d381.else /* CONFIG_PGTABLE_LEVELS <= 3 */382pud .req pgd383.set .Lnext_pud, .Lnext_pgd384.endif385386.Lderef_pud:387/* PMD */388.if CONFIG_PGTABLE_LEVELS > 2389pmd .req x13390pte_to_phys cur_pmdp, pud391kpti_map_pgtbl pmd, 2392kpti_mk_tbl_ng pmd, PTRS_PER_PMD393b .Lnext_pud394.else /* CONFIG_PGTABLE_LEVELS <= 2 */395pmd .req pgd396.set .Lnext_pmd, .Lnext_pgd397.endif398399.Lderef_pmd:400/* PTE */401pte_to_phys cur_ptep, pmd402kpti_map_pgtbl pte, 3403kpti_mk_tbl_ng pte, PTRS_PER_PTE404b .Lnext_pmd405406.unreq cpu407.unreq temp_pte408.unreq num_cpus409.unreq pte_flags410.unreq temp_pgd_phys411.unreq cur_pgdp412.unreq end_pgdp413.unreq pgd414.unreq cur_pudp415.unreq end_pudp416.unreq pud417.unreq cur_pmdp418.unreq end_pmdp419.unreq pmd420.unreq cur_ptep421.unreq end_ptep422.unreq pte423.unreq valid424.unreq cur_p4dp425.unreq end_p4dp426.unreq p4d427428/* Secondary CPUs end up here */429__idmap_kpti_secondary:430/* Uninstall swapper before surgery begins */431__idmap_cpu_set_reserved_ttbr1 x16, x17432b scondary_cpu_wait433434.unreq swapper_ttb435.unreq flag_ptr436SYM_FUNC_END(idmap_kpti_install_ng_mappings)437.popsection438#endif439440.pushsection ".idmap.text", "a"441SYM_TYPED_FUNC_START(wait_linear_map_split_to_ptes)442/* Must be same registers as in idmap_kpti_install_ng_mappings */443swapper_ttb .req x3444flag_ptr .req x4445446mrs swapper_ttb, ttbr1_el1447adr_l flag_ptr, idmap_kpti_bbml2_flag448__idmap_cpu_set_reserved_ttbr1 x16, x17449450scondary_cpu_wait:451/* Increment the flag to let the boot CPU we're ready */4521: ldxr w16, [flag_ptr]453add w16, w16, #1454stxr w17, w16, [flag_ptr]455cbnz w17, 1b456457/* Wait for the boot CPU to finish messing around with swapper */458sevl4591: wfe460ldxr w16, [flag_ptr]461cbnz w16, 1b462463/* All done, act like nothing happened */464msr ttbr1_el1, swapper_ttb465isb466ret467468.unreq swapper_ttb469.unreq flag_ptr470SYM_FUNC_END(wait_linear_map_split_to_ptes)471.popsection472473/*474* __cpu_setup475*476* Initialise the processor for turning the MMU on.477*478* Output:479* Return in x0 the value of the SCTLR_EL1 register.480*/481.pushsection ".idmap.text", "a"482SYM_FUNC_START(__cpu_setup)483tlbi vmalle1 // Invalidate local TLB484dsb nsh485486msr cpacr_el1, xzr // Reset cpacr_el1487mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable488msr mdscr_el1, x1 // access to the DCC from EL0489reset_pmuserenr_el0 x1 // Disable PMU access from EL0490reset_amuserenr_el0 x1 // Disable AMU access from EL0491492/*493* Default values for VMSA control registers. These will be adjusted494* below depending on detected CPU features.495*/496mair .req x17497tcr .req x16498tcr2 .req x15499mov_q mair, MAIR_EL1_SET500mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \501TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_EL1_AS | \502TCR_EL1_TBI0 | TCR_EL1_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS503mov tcr2, xzr504505tcr_clear_errata_bits tcr, x9, x5506507#ifdef CONFIG_ARM64_VA_BITS_52508mov x9, #64 - VA_BITS509alternative_if ARM64_HAS_VA52510tcr_set_t1sz tcr, x9511#ifdef CONFIG_ARM64_LPA2512orr tcr, tcr, #TCR_EL1_DS513#endif514alternative_else_nop_endif515#endif516517/*518* Set the IPS bits in TCR_EL1.519*/520tcr_compute_pa_size tcr, #TCR_EL1_IPS_SHIFT, x5, x6521#ifdef CONFIG_ARM64_HW_AFDBM522/*523* Enable hardware update of the Access Flags bit.524* Hardware dirty bit management is enabled later,525* via capabilities.526*/527mrs x9, ID_AA64MMFR1_EL1528ubfx x9, x9, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, #4529cbz x9, 1f530orr tcr, tcr, #TCR_EL1_HA // hardware Access flag update531#ifdef CONFIG_ARM64_HAFT532cmp x9, ID_AA64MMFR1_EL1_HAFDBS_HAFT533b.lt 1f534orr tcr2, tcr2, TCR2_EL1_HAFT535#endif /* CONFIG_ARM64_HAFT */5361:537#endif /* CONFIG_ARM64_HW_AFDBM */538msr mair_el1, mair539msr tcr_el1, tcr540541mrs_s x1, SYS_ID_AA64MMFR3_EL1542ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4543cbz x1, .Lskip_indirection544545mov_q x0, PIE_E0_ASM546msr REG_PIRE0_EL1, x0547mov_q x0, PIE_E1_ASM548msr REG_PIR_EL1, x0549550orr tcr2, tcr2, TCR2_EL1_PIE551552.Lskip_indirection:553554mrs_s x1, SYS_ID_AA64MMFR3_EL1555ubfx x1, x1, #ID_AA64MMFR3_EL1_TCRX_SHIFT, #4556cbz x1, 1f557msr REG_TCR2_EL1, tcr25581:559560/*561* Prepare SCTLR562*/563mov_q x0, INIT_SCTLR_EL1_MMU_ON564ret // return to head.S565566.unreq mair567.unreq tcr568.unreq tcr2569SYM_FUNC_END(__cpu_setup)570571572