// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright(c) 2017 Intel Corporation. All rights reserved.3*4* This code is based in part on work published here:5*6* https://github.com/IAIK/KAISER7*8* The original work was written by and signed off by for the Linux9* kernel by:10*11* Signed-off-by: Richard Fellner <[email protected]>12* Signed-off-by: Moritz Lipp <[email protected]>13* Signed-off-by: Daniel Gruss <[email protected]>14* Signed-off-by: Michael Schwarz <[email protected]>15*16* Major changes to the original code by: Dave Hansen <[email protected]>17* Mostly rewritten by Thomas Gleixner <[email protected]> and18* Andy Lutomirsky <[email protected]>19*/20#include <linux/kernel.h>21#include <linux/errno.h>22#include <linux/string.h>23#include <linux/types.h>24#include <linux/bug.h>25#include <linux/init.h>26#include <linux/spinlock.h>27#include <linux/mm.h>28#include <linux/uaccess.h>29#include <linux/cpu.h>3031#include <asm/cpufeature.h>32#include <asm/hypervisor.h>33#include <asm/vsyscall.h>34#include <asm/cmdline.h>35#include <asm/pti.h>36#include <asm/tlbflush.h>37#include <asm/desc.h>38#include <asm/sections.h>39#include <asm/set_memory.h>40#include <asm/bugs.h>4142#undef pr_fmt43#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt4445/* Backporting helper */46#ifndef __GFP_NOTRACK47#define __GFP_NOTRACK 048#endif4950/*51* Define the page-table levels we clone for user-space on 3252* and 64 bit.53*/54#ifdef CONFIG_X86_6455#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD56#else57#define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE58#endif5960static void __init pti_print_if_insecure(const char *reason)61{62if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))63pr_info("%s\n", reason);64}6566static void __init pti_print_if_secure(const char *reason)67{68if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))69pr_info("%s\n", reason);70}7172/* Assume mode is auto unless overridden via cmdline below. */73static enum pti_mode {74PTI_AUTO = 0,75PTI_FORCE_OFF,76PTI_FORCE_ON77} pti_mode;7879void __init pti_check_boottime_disable(void)80{81if (hypervisor_is_type(X86_HYPER_XEN_PV)) {82pti_mode = PTI_FORCE_OFF;83pti_print_if_insecure("disabled on XEN PV.");84return;85}8687if (pti_mode == PTI_AUTO &&88!cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))89pti_mode = PTI_FORCE_OFF;90if (pti_mode == PTI_FORCE_OFF) {91pti_print_if_insecure("disabled on command line.");92return;93}9495if (pti_mode == PTI_FORCE_ON)96pti_print_if_secure("force enabled on command line.");9798if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))99return;100101setup_force_cpu_cap(X86_FEATURE_PTI);102103if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {104pr_debug("PTI enabled, disabling INVLPGB\n");105setup_clear_cpu_cap(X86_FEATURE_INVLPGB);106}107}108109static int __init pti_parse_cmdline(char *arg)110{111if (!strcmp(arg, "off"))112pti_mode = PTI_FORCE_OFF;113else if (!strcmp(arg, "on"))114pti_mode = PTI_FORCE_ON;115else if (!strcmp(arg, "auto"))116pti_mode = PTI_AUTO;117else118return -EINVAL;119return 0;120}121early_param("pti", pti_parse_cmdline);122123static int __init pti_parse_cmdline_nopti(char *arg)124{125pti_mode = PTI_FORCE_OFF;126return 0;127}128early_param("nopti", pti_parse_cmdline_nopti);129130pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)131{132/*133* Changes to the high (kernel) portion of the kernelmode page134* tables are not automatically propagated to the usermode tables.135*136* Users should keep in mind that, unlike the kernelmode tables,137* there is no vmalloc_fault equivalent for the usermode tables.138* Top-level entries added to init_mm's usermode pgd after boot139* will not be automatically propagated to other mms.140*/141if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))142return pgd;143144/*145* The user page tables get the full PGD, accessible from146* userspace:147*/148kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;149150/*151* If this is normal user memory, make it NX in the kernel152* pagetables so that, if we somehow screw up and return to153* usermode with the kernel CR3 loaded, we'll get a page fault154* instead of allowing user code to execute with the wrong CR3.155*156* As exceptions, we don't set NX if:157* - _PAGE_USER is not set. This could be an executable158* EFI runtime mapping or something similar, and the kernel159* may execute from it160* - we don't have NX support161* - we're clearing the PGD (i.e. the new pgd is not present).162*/163if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&164(__supported_pte_mask & _PAGE_NX))165pgd.pgd |= _PAGE_NX;166167/* return the copy of the PGD we want the kernel to use: */168return pgd;169}170171/*172* Walk the user copy of the page tables (optionally) trying to allocate173* page table pages on the way down.174*175* Returns a pointer to a P4D on success, or NULL on failure.176*/177static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)178{179pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));180gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);181182if (address < PAGE_OFFSET) {183WARN_ONCE(1, "attempt to walk user address\n");184return NULL;185}186187if (pgd_none(*pgd)) {188unsigned long new_p4d_page = __get_free_page(gfp);189if (WARN_ON_ONCE(!new_p4d_page))190return NULL;191192set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));193}194BUILD_BUG_ON(pgd_leaf(*pgd));195196return p4d_offset(pgd, address);197}198199/*200* Walk the user copy of the page tables (optionally) trying to allocate201* page table pages on the way down.202*203* Returns a pointer to a PMD on success, or NULL on failure.204*/205static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)206{207gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);208p4d_t *p4d;209pud_t *pud;210211p4d = pti_user_pagetable_walk_p4d(address);212if (!p4d)213return NULL;214215BUILD_BUG_ON(p4d_leaf(*p4d));216if (p4d_none(*p4d)) {217unsigned long new_pud_page = __get_free_page(gfp);218if (WARN_ON_ONCE(!new_pud_page))219return NULL;220221set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));222}223224pud = pud_offset(p4d, address);225/* The user page tables do not use large mappings: */226if (pud_leaf(*pud)) {227WARN_ON(1);228return NULL;229}230if (pud_none(*pud)) {231unsigned long new_pmd_page = __get_free_page(gfp);232if (WARN_ON_ONCE(!new_pmd_page))233return NULL;234235set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));236}237238return pmd_offset(pud, address);239}240241/*242* Walk the shadow copy of the page tables (optionally) trying to allocate243* page table pages on the way down. Does not support large pages.244*245* Note: this is only used when mapping *new* kernel data into the246* user/shadow page tables. It is never used for userspace data.247*248* Returns a pointer to a PTE on success, or NULL on failure.249*/250static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)251{252gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);253pmd_t *pmd;254pte_t *pte;255256pmd = pti_user_pagetable_walk_pmd(address);257if (!pmd)258return NULL;259260/* Large PMD mapping found */261if (pmd_leaf(*pmd)) {262/* Clear the PMD if we hit a large mapping from the first round */263if (late_text) {264set_pmd(pmd, __pmd(0));265} else {266WARN_ON_ONCE(1);267return NULL;268}269}270271if (pmd_none(*pmd)) {272unsigned long new_pte_page = __get_free_page(gfp);273if (!new_pte_page)274return NULL;275276set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));277}278279pte = pte_offset_kernel(pmd, address);280if (pte_flags(*pte) & _PAGE_USER) {281WARN_ONCE(1, "attempt to walk to user pte\n");282return NULL;283}284return pte;285}286287#ifdef CONFIG_X86_VSYSCALL_EMULATION288static void __init pti_setup_vsyscall(void)289{290pte_t *pte, *target_pte;291unsigned int level;292293pte = lookup_address(VSYSCALL_ADDR, &level);294if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))295return;296297target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);298if (WARN_ON(!target_pte))299return;300301*target_pte = *pte;302set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));303}304#else305static void __init pti_setup_vsyscall(void) { }306#endif307308enum pti_clone_level {309PTI_CLONE_PMD,310PTI_CLONE_PTE,311};312313static void314pti_clone_pgtable(unsigned long start, unsigned long end,315enum pti_clone_level level, bool late_text)316{317unsigned long addr;318319/*320* Clone the populated PMDs which cover start to end. These PMD areas321* can have holes.322*/323for (addr = start; addr < end;) {324pte_t *pte, *target_pte;325pmd_t *pmd, *target_pmd;326pgd_t *pgd;327p4d_t *p4d;328pud_t *pud;329330/* Overflow check */331if (addr < start)332break;333334pgd = pgd_offset_k(addr);335if (WARN_ON(pgd_none(*pgd)))336return;337p4d = p4d_offset(pgd, addr);338if (WARN_ON(p4d_none(*p4d)))339return;340341pud = pud_offset(p4d, addr);342if (pud_none(*pud)) {343WARN_ON_ONCE(addr & ~PUD_MASK);344addr = round_up(addr + 1, PUD_SIZE);345continue;346}347348pmd = pmd_offset(pud, addr);349if (pmd_none(*pmd)) {350WARN_ON_ONCE(addr & ~PMD_MASK);351addr = round_up(addr + 1, PMD_SIZE);352continue;353}354355if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {356target_pmd = pti_user_pagetable_walk_pmd(addr);357if (WARN_ON(!target_pmd))358return;359360/*361* Only clone present PMDs. This ensures only setting362* _PAGE_GLOBAL on present PMDs. This should only be363* called on well-known addresses anyway, so a non-364* present PMD would be a surprise.365*/366if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))367return;368369/*370* Setting 'target_pmd' below creates a mapping in both371* the user and kernel page tables. It is effectively372* global, so set it as global in both copies. Note:373* the X86_FEATURE_PGE check is not _required_ because374* the CPU ignores _PAGE_GLOBAL when PGE is not375* supported. The check keeps consistency with376* code that only set this bit when supported.377*/378if (boot_cpu_has(X86_FEATURE_PGE))379*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);380381/*382* Copy the PMD. That is, the kernelmode and usermode383* tables will share the last-level page tables of this384* address range385*/386*target_pmd = *pmd;387388addr = round_up(addr + 1, PMD_SIZE);389390} else if (level == PTI_CLONE_PTE) {391392/* Walk the page-table down to the pte level */393pte = pte_offset_kernel(pmd, addr);394if (pte_none(*pte)) {395addr = round_up(addr + 1, PAGE_SIZE);396continue;397}398399/* Only clone present PTEs */400if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))401return;402403/* Allocate PTE in the user page-table */404target_pte = pti_user_pagetable_walk_pte(addr, late_text);405if (WARN_ON(!target_pte))406return;407408/* Set GLOBAL bit in both PTEs */409if (boot_cpu_has(X86_FEATURE_PGE))410*pte = pte_set_flags(*pte, _PAGE_GLOBAL);411412/* Clone the PTE */413*target_pte = *pte;414415addr = round_up(addr + 1, PAGE_SIZE);416417} else {418BUG();419}420}421}422423#ifdef CONFIG_X86_64424/*425* Clone a single p4d (i.e. a top-level entry on 4-level systems and a426* next-level entry on 5-level systems.427*/428static void __init pti_clone_p4d(unsigned long addr)429{430p4d_t *kernel_p4d, *user_p4d;431pgd_t *kernel_pgd;432433user_p4d = pti_user_pagetable_walk_p4d(addr);434if (!user_p4d)435return;436437kernel_pgd = pgd_offset_k(addr);438kernel_p4d = p4d_offset(kernel_pgd, addr);439*user_p4d = *kernel_p4d;440}441442/*443* Clone the CPU_ENTRY_AREA and associated data into the user space visible444* page table.445*/446static void __init pti_clone_user_shared(void)447{448unsigned int cpu;449450pti_clone_p4d(CPU_ENTRY_AREA_BASE);451452for_each_possible_cpu(cpu) {453/*454* The SYSCALL64 entry code needs one word of scratch space455* in which to spill a register. It lives in the sp2 slot456* of the CPU's TSS.457*458* This is done for all possible CPUs during boot to ensure459* that it's propagated to all mms.460*/461462unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);463phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);464pte_t *target_pte;465466target_pte = pti_user_pagetable_walk_pte(va, false);467if (WARN_ON(!target_pte))468return;469470*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);471}472}473474#else /* CONFIG_X86_64 */475476/*477* On 32 bit PAE systems with 1GB of Kernel address space there is only478* one pgd/p4d for the whole kernel. Cloning that would map the whole479* address space into the user page-tables, making PTI useless. So clone480* the page-table on the PMD level to prevent that.481*/482static void __init pti_clone_user_shared(void)483{484unsigned long start, end;485486start = CPU_ENTRY_AREA_BASE;487end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);488489pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);490}491#endif /* CONFIG_X86_64 */492493/*494* Clone the ESPFIX P4D into the user space visible page table495*/496static void __init pti_setup_espfix64(void)497{498#ifdef CONFIG_X86_ESPFIX64499pti_clone_p4d(ESPFIX_BASE_ADDR);500#endif501}502503/*504* Clone the populated PMDs of the entry text and force it RO.505*/506static void pti_clone_entry_text(bool late)507{508pti_clone_pgtable((unsigned long) __entry_text_start,509(unsigned long) __entry_text_end,510PTI_LEVEL_KERNEL_IMAGE, late);511}512513/*514* Global pages and PCIDs are both ways to make kernel TLB entries515* live longer, reduce TLB misses and improve kernel performance.516* But, leaving all kernel text Global makes it potentially accessible517* to Meltdown-style attacks which make it trivial to find gadgets or518* defeat KASLR.519*520* Only use global pages when it is really worth it.521*/522static inline bool pti_kernel_image_global_ok(void)523{524/*525* Systems with PCIDs get little benefit from global526* kernel text and are not worth the downsides.527*/528if (cpu_feature_enabled(X86_FEATURE_PCID))529return false;530531/*532* Only do global kernel image for pti=auto. Do the most533* secure thing (not global) if pti=on specified.534*/535if (pti_mode != PTI_AUTO)536return false;537538/*539* K8 may not tolerate the cleared _PAGE_RW on the userspace540* global kernel image pages. Do the safe thing (disable541* global kernel image). This is unlikely to ever be542* noticed because PTI is disabled by default on AMD CPUs.543*/544if (boot_cpu_has(X86_FEATURE_K8))545return false;546547/*548* RANDSTRUCT derives its hardening benefits from the549* attacker's lack of knowledge about the layout of kernel550* data structures. Keep the kernel image non-global in551* cases where RANDSTRUCT is in use to help keep the layout a552* secret.553*/554if (IS_ENABLED(CONFIG_RANDSTRUCT))555return false;556557return true;558}559560/*561* For some configurations, map all of kernel text into the user page562* tables. This reduces TLB misses, especially on non-PCID systems.563*/564static void pti_clone_kernel_text(void)565{566/*567* rodata is part of the kernel image and is normally568* readable on the filesystem or on the web. But, do not569* clone the areas past rodata, they might contain secrets.570*/571unsigned long start = PFN_ALIGN(_text);572unsigned long end_clone = (unsigned long)__end_rodata_aligned;573unsigned long end_global = PFN_ALIGN((unsigned long)_etext);574575if (!pti_kernel_image_global_ok())576return;577578pr_debug("mapping partial kernel image into user address space\n");579580/*581* Note that this will undo _some_ of the work that582* pti_set_kernel_image_nonglobal() did to clear the583* global bit.584*/585pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);586587/*588* pti_clone_pgtable() will set the global bit in any PMDs589* that it clones, but we also need to get any PTEs in590* the last level for areas that are not huge-page-aligned.591*/592593/* Set the global bit for normal non-__init kernel text: */594set_memory_global(start, (end_global - start) >> PAGE_SHIFT);595}596597static void pti_set_kernel_image_nonglobal(void)598{599/*600* The identity map is created with PMDs, regardless of the601* actual length of the kernel. We need to clear602* _PAGE_GLOBAL up to a PMD boundary, not just to the end603* of the image.604*/605unsigned long start = PFN_ALIGN(_text);606unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);607608/*609* This clears _PAGE_GLOBAL from the entire kernel image.610* pti_clone_kernel_text() map put _PAGE_GLOBAL back for611* areas that are mapped to userspace.612*/613set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);614}615616/*617* Initialize kernel page table isolation618*/619void __init pti_init(void)620{621if (!boot_cpu_has(X86_FEATURE_PTI))622return;623624pr_info("enabled\n");625626#ifdef CONFIG_X86_32627/*628* We check for X86_FEATURE_PCID here. But the init-code will629* clear the feature flag on 32 bit because the feature is not630* supported on 32 bit anyway. To print the warning we need to631* check with cpuid directly again.632*/633if (cpuid_ecx(0x1) & BIT(17)) {634/* Use printk to work around pr_fmt() */635printk(KERN_WARNING "\n");636printk(KERN_WARNING "************************************************************\n");637printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");638printk(KERN_WARNING "** **\n");639printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");640printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");641printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");642printk(KERN_WARNING "** **\n");643printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");644printk(KERN_WARNING "************************************************************\n");645}646#endif647648pti_clone_user_shared();649650/* Undo all global bits from the init pagetables in head_64.S: */651pti_set_kernel_image_nonglobal();652653/* Replace some of the global bits just for shared entry text: */654/*655* This is very early in boot. Device and Late initcalls can do656* modprobe before free_initmem() and mark_readonly(). This657* pti_clone_entry_text() allows those user-mode-helpers to function,658* but notably the text is still RW.659*/660pti_clone_entry_text(false);661pti_setup_espfix64();662pti_setup_vsyscall();663}664665/*666* Finalize the kernel mappings in the userspace page-table. Some of the667* mappings for the kernel image might have changed since pti_init()668* cloned them. This is because parts of the kernel image have been669* mapped RO and/or NX. These changes need to be cloned again to the670* userspace page-table.671*/672void pti_finalize(void)673{674if (!boot_cpu_has(X86_FEATURE_PTI))675return;676/*677* This is after free_initmem() (all initcalls are done) and we've done678* mark_readonly(). Text is now NX which might've split some PMDs679* relative to the early clone.680*/681pti_clone_entry_text(true);682pti_clone_kernel_text();683684debug_checkwx_user();685}686687688