Path: blob/master/arch/powerpc/mm/book3s64/hash_pgtable.c
26481 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* Copyright 2005, Paul Mackerras, IBM Corporation.3* Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.4* Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.5*/67#include <linux/sched.h>8#include <linux/mm_types.h>9#include <linux/mm.h>10#include <linux/stop_machine.h>1112#include <asm/sections.h>13#include <asm/mmu.h>14#include <asm/tlb.h>15#include <asm/firmware.h>1617#include <mm/mmu_decl.h>1819#include <trace/events/thp.h>2021#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))22#warning Limited user VSID range means pagetable space is wasted23#endif2425#ifdef CONFIG_SPARSEMEM_VMEMMAP26/*27* vmemmap is the starting address of the virtual address space where28* struct pages are allocated for all possible PFNs present on the system29* including holes and bad memory (hence sparse). These virtual struct30* pages are stored in sequence in this virtual address space irrespective31* of the fact whether the corresponding PFN is valid or not. This achieves32* constant relationship between address of struct page and its PFN.33*34* During boot or memory hotplug operation when a new memory section is35* added, physical memory allocation (including hash table bolting) will36* be performed for the set of struct pages which are part of the memory37* section. This saves memory by not allocating struct pages for PFNs38* which are not valid.39*40* ----------------------------------------------41* | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|42* ----------------------------------------------43*44* f000000000000000 c00000000000000045* vmemmap +--------------+ +--------------+46* + | page struct | +--------------> | page struct |47* | +--------------+ +--------------+48* | | page struct | +--------------> | page struct |49* | +--------------+ | +--------------+50* | | page struct | + +------> | page struct |51* | +--------------+ | +--------------+52* | | page struct | | +--> | page struct |53* | +--------------+ | | +--------------+54* | | page struct | | |55* | +--------------+ | |56* | | page struct | | |57* | +--------------+ | |58* | | page struct | | |59* | +--------------+ | |60* | | page struct | | |61* | +--------------+ | |62* | | page struct | +-------+ |63* | +--------------+ |64* | | page struct | +-----------+65* | +--------------+66* | | page struct | No mapping67* | +--------------+68* | | page struct | No mapping69* v +--------------+70*71* -----------------------------------------72* | RELATION BETWEEN STRUCT PAGES AND PFNS|73* -----------------------------------------74*75* vmemmap +--------------+ +---------------+76* + | page struct | +-------------> | PFN |77* | +--------------+ +---------------+78* | | page struct | +-------------> | PFN |79* | +--------------+ +---------------+80* | | page struct | +-------------> | PFN |81* | +--------------+ +---------------+82* | | page struct | +-------------> | PFN |83* | +--------------+ +---------------+84* | | |85* | +--------------+86* | | |87* | +--------------+88* | | |89* | +--------------+ +---------------+90* | | page struct | +-------------> | PFN |91* | +--------------+ +---------------+92* | | |93* | +--------------+94* | | |95* | +--------------+ +---------------+96* | | page struct | +-------------> | PFN |97* | +--------------+ +---------------+98* | | page struct | +-------------> | PFN |99* v +--------------+ +---------------+100*/101/*102* On hash-based CPUs, the vmemmap is bolted in the hash table.103*104*/105int __meminit hash__vmemmap_create_mapping(unsigned long start,106unsigned long page_size,107unsigned long phys)108{109int rc;110111if ((start + page_size) >= H_VMEMMAP_END) {112pr_warn("Outside the supported range\n");113return -1;114}115116rc = htab_bolt_mapping(start, start + page_size, phys,117pgprot_val(PAGE_KERNEL),118mmu_vmemmap_psize, mmu_kernel_ssize);119if (rc < 0) {120int rc2 = htab_remove_mapping(start, start + page_size,121mmu_vmemmap_psize,122mmu_kernel_ssize);123BUG_ON(rc2 && (rc2 != -ENOENT));124}125return rc;126}127128#ifdef CONFIG_MEMORY_HOTPLUG129void hash__vmemmap_remove_mapping(unsigned long start,130unsigned long page_size)131{132int rc = htab_remove_mapping(start, start + page_size,133mmu_vmemmap_psize,134mmu_kernel_ssize);135BUG_ON((rc < 0) && (rc != -ENOENT));136WARN_ON(rc == -ENOENT);137}138#endif139#endif /* CONFIG_SPARSEMEM_VMEMMAP */140141/*142* map_kernel_page currently only called by __ioremap143* map_kernel_page adds an entry to the ioremap page table144* and adds an entry to the HPT, possibly bolting it145*/146int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)147{148pgd_t *pgdp;149p4d_t *p4dp;150pud_t *pudp;151pmd_t *pmdp;152pte_t *ptep;153154BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);155if (slab_is_available()) {156pgdp = pgd_offset_k(ea);157p4dp = p4d_offset(pgdp, ea);158pudp = pud_alloc(&init_mm, p4dp, ea);159if (!pudp)160return -ENOMEM;161pmdp = pmd_alloc(&init_mm, pudp, ea);162if (!pmdp)163return -ENOMEM;164ptep = pte_alloc_kernel(pmdp, ea);165if (!ptep)166return -ENOMEM;167set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));168} else {169/*170* If the mm subsystem is not fully up, we cannot create a171* linux page table entry for this mapping. Simply bolt an172* entry in the hardware page table.173*174*/175if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),176mmu_io_psize, mmu_kernel_ssize)) {177printk(KERN_ERR "Failed to do bolted mapping IO "178"memory at %016lx !\n", pa);179return -ENOMEM;180}181}182183smp_wmb();184return 0;185}186187#ifdef CONFIG_TRANSPARENT_HUGEPAGE188189unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,190pmd_t *pmdp, unsigned long clr,191unsigned long set)192{193__be64 old_be, tmp;194unsigned long old;195196#ifdef CONFIG_DEBUG_VM197WARN_ON(!hash__pmd_trans_huge(*pmdp));198assert_spin_locked(pmd_lockptr(mm, pmdp));199#endif200201__asm__ __volatile__(202"1: ldarx %0,0,%3\n\203and. %1,%0,%6\n\204bne- 1b \n\205andc %1,%0,%4 \n\206or %1,%1,%7\n\207stdcx. %1,0,%3 \n\208bne- 1b"209: "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)210: "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),211"r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))212: "cc" );213214old = be64_to_cpu(old_be);215216trace_hugepage_update_pmd(addr, old, clr, set);217if (old & H_PAGE_HASHPTE)218hpte_do_hugepage_flush(mm, addr, pmdp, old);219return old;220}221222pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,223pmd_t *pmdp)224{225pmd_t pmd;226227VM_BUG_ON(address & ~HPAGE_PMD_MASK);228VM_BUG_ON(pmd_trans_huge(*pmdp));229230pmd = *pmdp;231pmd_clear(pmdp);232/*233* Wait for all pending hash_page to finish. This is needed234* in case of subpage collapse. When we collapse normal pages235* to hugepage, we first clear the pmd, then invalidate all236* the PTE entries. The assumption here is that any low level237* page fault will see a none pmd and take the slow path that238* will wait on mmap_lock. But we could very well be in a239* hash_page with local ptep pointer value. Such a hash page240* can result in adding new HPTE entries for normal subpages.241* That means we could be modifying the page content as we242* copy them to a huge page. So wait for parallel hash_page243* to finish before invalidating HPTE entries. We can do this244* by sending an IPI to all the cpus and executing a dummy245* function there.246*/247serialize_against_pte_lookup(vma->vm_mm);248/*249* Now invalidate the hpte entries in the range250* covered by pmd. This make sure we take a251* fault and will find the pmd as none, which will252* result in a major fault which takes mmap_lock and253* hence wait for collapse to complete. Without this254* the __collapse_huge_page_copy can result in copying255* the old content.256*/257flush_hash_table_pmd_range(vma->vm_mm, &pmd, address);258return pmd;259}260261/*262* We want to put the pgtable in pmd and use pgtable for tracking263* the base page size hptes264*/265void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,266pgtable_t pgtable)267{268pgtable_t *pgtable_slot;269270assert_spin_locked(pmd_lockptr(mm, pmdp));271/*272* we store the pgtable in the second half of PMD273*/274pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;275*pgtable_slot = pgtable;276/*277* expose the deposited pgtable to other cpus.278* before we set the hugepage PTE at pmd level279* hash fault code looks at the deposted pgtable280* to store hash index values.281*/282smp_wmb();283}284285pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)286{287pgtable_t pgtable;288pgtable_t *pgtable_slot;289290assert_spin_locked(pmd_lockptr(mm, pmdp));291292pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;293pgtable = *pgtable_slot;294/*295* Once we withdraw, mark the entry NULL.296*/297*pgtable_slot = NULL;298/*299* We store HPTE information in the deposited PTE fragment.300* zero out the content on withdraw.301*/302memset(pgtable, 0, PTE_FRAG_SIZE);303return pgtable;304}305306/*307* A linux hugepage PMD was changed and the corresponding hash table entries308* neesd to be flushed.309*/310void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,311pmd_t *pmdp, unsigned long old_pmd)312{313int ssize;314unsigned int psize;315unsigned long vsid;316unsigned long flags = 0;317318/* get the base page size,vsid and segment size */319#ifdef CONFIG_DEBUG_VM320psize = get_slice_psize(mm, addr);321BUG_ON(psize == MMU_PAGE_16M);322#endif323if (old_pmd & H_PAGE_COMBO)324psize = MMU_PAGE_4K;325else326psize = MMU_PAGE_64K;327328if (!is_kernel_addr(addr)) {329ssize = user_segment_size(addr);330vsid = get_user_vsid(&mm->context, addr, ssize);331WARN_ON(vsid == 0);332} else {333vsid = get_kernel_vsid(addr, mmu_kernel_ssize);334ssize = mmu_kernel_ssize;335}336337if (mm_is_thread_local(mm))338flags |= HPTE_LOCAL_UPDATE;339340return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);341}342343pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,344unsigned long addr, pmd_t *pmdp)345{346pmd_t old_pmd;347pgtable_t pgtable;348unsigned long old;349pgtable_t *pgtable_slot;350351old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);352old_pmd = __pmd(old);353/*354* We have pmd == none and we are holding page_table_lock.355* So we can safely go and clear the pgtable hash356* index info.357*/358pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;359pgtable = *pgtable_slot;360/*361* Let's zero out old valid and hash index details362* hash fault look at them.363*/364memset(pgtable, 0, PTE_FRAG_SIZE);365return old_pmd;366}367368int hash__has_transparent_hugepage(void)369{370371if (!mmu_has_feature(MMU_FTR_16M_PAGE))372return 0;373/*374* We support THP only if PMD_SIZE is 16MB.375*/376if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)377return 0;378/*379* We need to make sure that we support 16MB hugepage in a segment380* with base page size 64K or 4K. We only enable THP with a PAGE_SIZE381* of 64K.382*/383/*384* If we have 64K HPTE, we will be using that by default385*/386if (mmu_psize_defs[MMU_PAGE_64K].shift &&387(mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))388return 0;389/*390* Ok we only have 4K HPTE391*/392if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)393return 0;394395return 1;396}397EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);398399#endif /* CONFIG_TRANSPARENT_HUGEPAGE */400401#ifdef CONFIG_STRICT_KERNEL_RWX402403struct change_memory_parms {404unsigned long start, end, newpp;405unsigned int step, nr_cpus;406atomic_t master_cpu;407atomic_t cpu_counter;408};409410// We'd rather this was on the stack but it has to be in the RMO411static struct change_memory_parms chmem_parms;412413// And therefore we need a lock to protect it from concurrent use414static DEFINE_MUTEX(chmem_lock);415416static void change_memory_range(unsigned long start, unsigned long end,417unsigned int step, unsigned long newpp)418{419unsigned long idx;420421pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",422start, end, newpp, step);423424for (idx = start; idx < end; idx += step)425/* Not sure if we can do much with the return value */426mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,427mmu_kernel_ssize);428}429430static int notrace chmem_secondary_loop(struct change_memory_parms *parms)431{432unsigned long msr, tmp, flags;433int *p;434435p = &parms->cpu_counter.counter;436437local_irq_save(flags);438hard_irq_disable();439440asm volatile (441// Switch to real mode and leave interrupts off442"mfmsr %[msr] ;"443"li %[tmp], %[MSR_IR_DR] ;"444"andc %[tmp], %[msr], %[tmp] ;"445"mtmsrd %[tmp] ;"446447// Tell the master we are in real mode448"1: "449"lwarx %[tmp], 0, %[p] ;"450"addic %[tmp], %[tmp], -1 ;"451"stwcx. %[tmp], 0, %[p] ;"452"bne- 1b ;"453454// Spin until the counter goes to zero455"2: ;"456"lwz %[tmp], 0(%[p]) ;"457"cmpwi %[tmp], 0 ;"458"bne- 2b ;"459460// Switch back to virtual mode461"mtmsrd %[msr] ;"462463: // outputs464[msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)465: // inputs466[p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)467: // clobbers468"cc", "xer"469);470471local_irq_restore(flags);472473return 0;474}475476static int change_memory_range_fn(void *data)477{478struct change_memory_parms *parms = data;479480// First CPU goes through, all others wait.481if (atomic_xchg(&parms->master_cpu, 1) == 1)482return chmem_secondary_loop(parms);483484// Wait for all but one CPU (this one) to call-in485while (atomic_read(&parms->cpu_counter) > 1)486barrier();487488change_memory_range(parms->start, parms->end, parms->step, parms->newpp);489490mb();491492// Signal the other CPUs that we're done493atomic_dec(&parms->cpu_counter);494495return 0;496}497498static bool hash__change_memory_range(unsigned long start, unsigned long end,499unsigned long newpp)500{501unsigned int step, shift;502503shift = mmu_psize_defs[mmu_linear_psize].shift;504step = 1 << shift;505506start = ALIGN_DOWN(start, step);507end = ALIGN(end, step); // aligns up508509if (start >= end)510return false;511512if (firmware_has_feature(FW_FEATURE_LPAR)) {513mutex_lock(&chmem_lock);514515chmem_parms.start = start;516chmem_parms.end = end;517chmem_parms.step = step;518chmem_parms.newpp = newpp;519atomic_set(&chmem_parms.master_cpu, 0);520521cpus_read_lock();522523atomic_set(&chmem_parms.cpu_counter, num_online_cpus());524525// Ensure state is consistent before we call the other CPUs526mb();527528stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms,529cpu_online_mask);530531cpus_read_unlock();532mutex_unlock(&chmem_lock);533} else534change_memory_range(start, end, step, newpp);535536return true;537}538539void hash__mark_rodata_ro(void)540{541unsigned long start, end, pp;542543start = (unsigned long)_stext;544end = (unsigned long)__end_rodata;545546pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);547548WARN_ON(!hash__change_memory_range(start, end, pp));549}550551void hash__mark_initmem_nx(void)552{553unsigned long start, end, pp;554555start = (unsigned long)__init_begin;556end = (unsigned long)__init_end;557558pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);559560WARN_ON(!hash__change_memory_range(start, end, pp));561}562#endif563564565