// SPDX-License-Identifier: GPL-2.01/*2* This file implements KASLR memory randomization for x86_64. It randomizes3* the virtual address space of kernel memory regions (physical memory4* mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates5* exploits relying on predictable kernel addresses.6*7* Entropy is generated using the KASLR early boot functions now shared in8* the lib directory (originally written by Kees Cook). Randomization is9* done on PGD & P4D/PUD page table levels to increase possible addresses.10* The physical memory mapping code was adapted to support P4D/PUD level11* virtual addresses. This implementation on the best configuration provides12* 30,000 possible virtual addresses in average for each memory region.13* An additional low memory page is used to ensure each CPU can start with14* a PGD aligned virtual address (for realmode).15*16* The order of each memory region is not changed. The feature looks at17* the available space for the regions based on different configuration18* options and randomizes the base and space between each. The size of the19* physical memory mapping is the available physical memory.20*/2122#include <linux/kernel.h>23#include <linux/init.h>24#include <linux/prandom.h>25#include <linux/memblock.h>26#include <linux/pgtable.h>2728#include <asm/setup.h>29#include <asm/kaslr.h>3031#include "mm_internal.h"3233#define TB_SHIFT 403435/*36* The end address could depend on more configuration options to make the37* highest amount of space for randomization available, but that's too hard38* to keep straight and caused issues already.39*/40static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;4142/*43* Memory regions randomized by KASLR (except modules that use a separate logic44* earlier during boot). The list is ordered based on virtual addresses. This45* order is kept after randomization.46*/47static __initdata struct kaslr_memory_region {48unsigned long *base;49unsigned long *end;50unsigned long size_tb;51} kaslr_regions[] = {52{53.base = &page_offset_base,54.end = &direct_map_physmem_end,55},56{57.base = &vmalloc_base,58},59{60.base = &vmemmap_base,61},62};6364/*65* The end of the physical address space that can be mapped directly by the66* kernel. This starts out at (1<<MAX_PHYSMEM_BITS) - 1), but KASLR may reduce67* that in order to increase the available entropy for mapping other regions.68*/69unsigned long direct_map_physmem_end __ro_after_init;7071/* Get size in bytes used by the memory region */72static inline unsigned long get_padding(struct kaslr_memory_region *region)73{74return (region->size_tb << TB_SHIFT);75}7677/* Initialize base and padding for each memory region randomized with KASLR */78void __init kernel_randomize_memory(void)79{80size_t i;81unsigned long vaddr_start, vaddr;82unsigned long rand, memory_tb;83struct rnd_state rand_state;84unsigned long remain_entropy;85unsigned long vmemmap_size;8687vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;88vaddr = vaddr_start;8990/*91* These BUILD_BUG_ON checks ensure the memory layout is consistent92* with the vaddr_start/vaddr_end variables. These checks are very93* limited....94*/95BUILD_BUG_ON(vaddr_start >= vaddr_end);96BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);97BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);9899/* Preset the end of the possible address space for physical memory */100direct_map_physmem_end = ((1ULL << MAX_PHYSMEM_BITS) - 1);101if (!kaslr_memory_enabled())102return;103104kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);105kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;106107/*108* Update Physical memory mapping to available and109* add padding if needed (especially for memory hotplug support).110*/111BUG_ON(kaslr_regions[0].base != &page_offset_base);112memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +113CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;114115/*116* Adapt physical memory region size based on available memory,117* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the118* device BAR space assuming the direct map space is large enough119* for creating a ZONE_DEVICE mapping in the direct map corresponding120* to the physical BAR address.121*/122if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))123kaslr_regions[0].size_tb = memory_tb;124125/*126* Calculate the vmemmap region size in TBs, aligned to a TB127* boundary.128*/129vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *130sizeof(struct page);131kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);132133/* Calculate entropy available between regions */134remain_entropy = vaddr_end - vaddr_start;135for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)136remain_entropy -= get_padding(&kaslr_regions[i]);137138prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));139140for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {141unsigned long entropy;142143/*144* Select a random virtual address using the extra entropy145* available.146*/147entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);148prandom_bytes_state(&rand_state, &rand, sizeof(rand));149entropy = (rand % (entropy + 1)) & PUD_MASK;150vaddr += entropy;151*kaslr_regions[i].base = vaddr;152153/* Calculate the end of the region */154vaddr += get_padding(&kaslr_regions[i]);155/*156* KASLR trims the maximum possible size of the157* direct-map. Update the direct_map_physmem_end boundary.158* No rounding required as the region starts159* PUD aligned and size is in units of TB.160*/161if (kaslr_regions[i].end)162*kaslr_regions[i].end = __pa_nodebug(vaddr - 1);163164/* Add a minimum padding based on randomization alignment. */165vaddr = round_up(vaddr + 1, PUD_SIZE);166remain_entropy -= entropy;167}168}169170void __meminit init_trampoline_kaslr(void)171{172pud_t *pud_page_tramp, *pud, *pud_tramp;173p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;174unsigned long paddr, vaddr;175pgd_t *pgd;176177pud_page_tramp = alloc_low_page();178179/*180* There are two mappings for the low 1MB area, the direct mapping181* and the 1:1 mapping for the real mode trampoline:182*183* Direct mapping: virt_addr = phys_addr + PAGE_OFFSET184* 1:1 mapping: virt_addr = phys_addr185*/186paddr = 0;187vaddr = (unsigned long)__va(paddr);188pgd = pgd_offset_k(vaddr);189190p4d = p4d_offset(pgd, vaddr);191pud = pud_offset(p4d, vaddr);192193pud_tramp = pud_page_tramp + pud_index(paddr);194*pud_tramp = *pud;195196if (pgtable_l5_enabled()) {197p4d_page_tramp = alloc_low_page();198199p4d_tramp = p4d_page_tramp + p4d_index(paddr);200201set_p4d(p4d_tramp,202__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));203204trampoline_pgd_entry =205__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));206} else {207trampoline_pgd_entry =208__pgd(_KERNPG_TABLE | __pa(pud_page_tramp));209}210}211212213