Path: blob/master/arch/powerpc/mm/book3s64/mmu_context.c
26481 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* MMU context allocation for 64-bit kernels.3*4* Copyright (C) 2004 Anton Blanchard, IBM Corp. <[email protected]>5*/67#include <linux/sched.h>8#include <linux/kernel.h>9#include <linux/errno.h>10#include <linux/string.h>11#include <linux/types.h>12#include <linux/mm.h>13#include <linux/pkeys.h>14#include <linux/spinlock.h>15#include <linux/idr.h>16#include <linux/export.h>17#include <linux/gfp.h>18#include <linux/slab.h>19#include <linux/cpu.h>2021#include <asm/mmu_context.h>22#include <asm/pgalloc.h>2324#include "internal.h"2526static DEFINE_IDA(mmu_context_ida);2728static int alloc_context_id(int min_id, int max_id)29{30return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);31}3233#ifdef CONFIG_PPC_64S_HASH_MMU34void __init hash__reserve_context_id(int id)35{36int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);3738WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);39}4041int hash__alloc_context_id(void)42{43unsigned long max;4445if (mmu_has_feature(MMU_FTR_68_BIT_VA))46max = MAX_USER_CONTEXT;47else48max = MAX_USER_CONTEXT_65BIT_VA;4950return alloc_context_id(MIN_USER_CONTEXT, max);51}52EXPORT_SYMBOL_GPL(hash__alloc_context_id);53#endif5455#ifdef CONFIG_PPC_64S_HASH_MMU56static int realloc_context_ids(mm_context_t *ctx)57{58int i, id;5960/*61* id 0 (aka. ctx->id) is special, we always allocate a new one, even if62* there wasn't one allocated previously (which happens in the exec63* case where ctx is newly allocated).64*65* We have to be a bit careful here. We must keep the existing ids in66* the array, so that we can test if they're non-zero to decide if we67* need to allocate a new one. However in case of error we must free the68* ids we've allocated but *not* any of the existing ones (or risk a69* UAF). That's why we decrement i at the start of the error handling70* loop, to skip the id that we just tested but couldn't reallocate.71*/72for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {73if (i == 0 || ctx->extended_id[i]) {74id = hash__alloc_context_id();75if (id < 0)76goto error;7778ctx->extended_id[i] = id;79}80}8182/* The caller expects us to return id */83return ctx->id;8485error:86for (i--; i >= 0; i--) {87if (ctx->extended_id[i])88ida_free(&mmu_context_ida, ctx->extended_id[i]);89}9091return id;92}9394static int hash__init_new_context(struct mm_struct *mm)95{96int index;9798mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),99GFP_KERNEL);100if (!mm->context.hash_context)101return -ENOMEM;102103/*104* The old code would re-promote on fork, we don't do that when using105* slices as it could cause problem promoting slices that have been106* forced down to 4K.107*108* For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check109* explicitly against context.id == 0. This ensures that we properly110* initialize context slice details for newly allocated mm's (which will111* have id == 0) and don't alter context slice inherited via fork (which112* will have id != 0).113*114* We should not be calling init_new_context() on init_mm. Hence a115* check against 0 is OK.116*/117if (mm->context.id == 0) {118memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));119slice_init_new_context_exec(mm);120} else {121/* This is fork. Copy hash_context details from current->mm */122memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));123#ifdef CONFIG_PPC_SUBPAGE_PROT124/* inherit subpage prot details if we have one. */125if (current->mm->context.hash_context->spt) {126mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),127GFP_KERNEL);128if (!mm->context.hash_context->spt) {129kfree(mm->context.hash_context);130return -ENOMEM;131}132}133#endif134}135136index = realloc_context_ids(&mm->context);137if (index < 0) {138#ifdef CONFIG_PPC_SUBPAGE_PROT139kfree(mm->context.hash_context->spt);140#endif141kfree(mm->context.hash_context);142return index;143}144145pkey_mm_init(mm);146return index;147}148149void hash__setup_new_exec(void)150{151slice_setup_new_exec();152153slb_setup_new_exec();154}155#else156static inline int hash__init_new_context(struct mm_struct *mm)157{158BUILD_BUG();159return 0;160}161#endif162163static int radix__init_new_context(struct mm_struct *mm)164{165unsigned long rts_field;166int index, max_id;167168max_id = (1 << mmu_pid_bits) - 1;169index = alloc_context_id(mmu_base_pid, max_id);170if (index < 0)171return index;172173/*174* set the process table entry,175*/176rts_field = radix__get_tree_size();177process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);178179/*180* Order the above store with subsequent update of the PID181* register (at which point HW can start loading/caching182* the entry) and the corresponding load by the MMU from183* the L2 cache.184*/185asm volatile("ptesync;isync" : : : "memory");186187#ifdef CONFIG_PPC_64S_HASH_MMU188mm->context.hash_context = NULL;189#endif190191return index;192}193194int init_new_context(struct task_struct *tsk, struct mm_struct *mm)195{196int index;197198if (radix_enabled())199index = radix__init_new_context(mm);200else201index = hash__init_new_context(mm);202203if (index < 0)204return index;205206mm->context.id = index;207208mm->context.pte_frag = NULL;209mm->context.pmd_frag = NULL;210#ifdef CONFIG_SPAPR_TCE_IOMMU211mm_iommu_init(mm);212#endif213atomic_set(&mm->context.active_cpus, 0);214atomic_set(&mm->context.copros, 0);215216return 0;217}218219void __destroy_context(int context_id)220{221ida_free(&mmu_context_ida, context_id);222}223EXPORT_SYMBOL_GPL(__destroy_context);224225static void destroy_contexts(mm_context_t *ctx)226{227if (radix_enabled()) {228ida_free(&mmu_context_ida, ctx->id);229} else {230#ifdef CONFIG_PPC_64S_HASH_MMU231int index, context_id;232233for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {234context_id = ctx->extended_id[index];235if (context_id)236ida_free(&mmu_context_ida, context_id);237}238kfree(ctx->hash_context);239#else240BUILD_BUG(); // radix_enabled() should be constant true241#endif242}243}244245static void pmd_frag_destroy(void *pmd_frag)246{247int count;248struct ptdesc *ptdesc;249250ptdesc = virt_to_ptdesc(pmd_frag);251/* drop all the pending references */252count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;253/* We allow PTE_FRAG_NR fragments from a PTE page */254if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {255pagetable_dtor(ptdesc);256pagetable_free(ptdesc);257}258}259260static void destroy_pagetable_cache(struct mm_struct *mm)261{262void *frag;263264frag = mm->context.pte_frag;265if (frag)266pte_frag_destroy(frag);267268frag = mm->context.pmd_frag;269if (frag)270pmd_frag_destroy(frag);271return;272}273274void destroy_context(struct mm_struct *mm)275{276#ifdef CONFIG_SPAPR_TCE_IOMMU277WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));278#endif279/*280* For tasks which were successfully initialized we end up calling281* arch_exit_mmap() which clears the process table entry. And282* arch_exit_mmap() is called before the required fullmm TLB flush283* which does a RIC=2 flush. Hence for an initialized task, we do clear284* any cached process table entries.285*286* The condition below handles the error case during task init. We have287* set the process table entry early and if we fail a task288* initialization, we need to ensure the process table entry is zeroed.289* We need not worry about process table entry caches because the task290* never ran with the PID value.291*/292if (radix_enabled())293process_tb[mm->context.id].prtb0 = 0;294else295subpage_prot_free(mm);296destroy_contexts(&mm->context);297mm->context.id = MMU_NO_CONTEXT;298}299300void arch_exit_mmap(struct mm_struct *mm)301{302destroy_pagetable_cache(mm);303304if (radix_enabled()) {305/*306* Radix doesn't have a valid bit in the process table307* entries. However we know that at least P9 implementation308* will avoid caching an entry with an invalid RTS field,309* and 0 is invalid. So this will do.310*311* This runs before the "fullmm" tlb flush in exit_mmap,312* which does a RIC=2 tlbie to clear the process table313* entry. See the "fullmm" comments in tlb-radix.c.314*315* No barrier required here after the store because316* this process will do the invalidate, which starts with317* ptesync.318*/319process_tb[mm->context.id].prtb0 = 0;320}321}322323#ifdef CONFIG_PPC_RADIX_MMU324void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)325{326mtspr(SPRN_PID, next->context.id);327isync();328}329#endif330331/**332* cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)333*334* This clears the CPU from mm_cpumask for all processes, and then flushes the335* local TLB to ensure TLB coherency in case the CPU is onlined again.336*337* KVM guest translations are not necessarily flushed here. If KVM started338* using mm_cpumask or the Linux APIs which do, this would have to be resolved.339*/340#ifdef CONFIG_HOTPLUG_CPU341void cleanup_cpu_mmu_context(void)342{343int cpu = smp_processor_id();344345clear_tasks_mm_cpumask(cpu);346tlbiel_all();347}348#endif349350351