/* SPDX-License-Identifier: GPL-2.0-only */1/*2* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)3*4* vineetg: May 20115* -Refactored get_new_mmu_context( ) to only handle live-mm.6* retiring-mm handled in other hooks7*8* Vineetg: March 25th, 2008: Bug #926909* -Major rewrite of Core ASID allocation routine get_new_mmu_context10*11* Amit Bhor, Sameer Dhavale: Codito Technologies 200412*/1314#ifndef _ASM_ARC_MMU_CONTEXT_H15#define _ASM_ARC_MMU_CONTEXT_H1617#include <linux/sched/mm.h>1819#include <asm/tlb.h>20#include <asm-generic/mm_hooks.h>2122/* ARC ASID Management23*24* MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on25* context-switch.26*27* ASID is managed per cpu, so task threads across CPUs can have different28* ASID. Global ASID management is needed if hardware supports TLB shootdown29* and/or shared TLB across cores, which ARC doesn't.30*31* Each task is assigned unique ASID, with a simple round-robin allocator32* tracked in @asid_cpu. When 8-bit value rolls over,a new cycle is started33* over from 0, and TLB is flushed34*35* A new allocation cycle, post rollover, could potentially reassign an ASID36* to a different task. Thus the rule is to refresh the ASID in a new cycle.37* The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits38* serve as cycle/generation indicator and natural 32 bit unsigned math39* automagically increments the generation when lower 8 bits rollover.40*/4142#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */43#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)4445#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)46#define MM_CTXT_NO_ASID 0UL4748#define asid_mm(mm, cpu) mm->context.asid[cpu]49#define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK)5051DECLARE_PER_CPU(unsigned int, asid_cache);52#define asid_cpu(cpu) per_cpu(asid_cache, cpu)5354/*55* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)56* Also set the MMU PID register to existing/updated ASID57*/58static inline void get_new_mmu_context(struct mm_struct *mm)59{60const unsigned int cpu = smp_processor_id();61unsigned long flags;6263local_irq_save(flags);6465/*66* Move to new ASID if it was not from current alloc-cycle/generation.67* This is done by ensuring that the generation bits in both mm->ASID68* and cpu's ASID counter are exactly same.69*70* Note: Callers needing new ASID unconditionally, independent of71* generation, e.g. local_flush_tlb_mm() for forking parent,72* first need to destroy the context, setting it to invalid73* value.74*/75if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))76goto set_hw;7778/* move to new ASID and handle rollover */79if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {8081local_flush_tlb_all();8283/*84* Above check for rollover of 8 bit ASID in 32 bit container.85* If the container itself wrapped around, set it to a non zero86* "generation" to distinguish from no context87*/88if (!asid_cpu(cpu))89asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;90}9192/* Assign new ASID to tsk */93asid_mm(mm, cpu) = asid_cpu(cpu);9495set_hw:96mmu_setup_asid(mm, hw_pid(mm, cpu));9798local_irq_restore(flags);99}100101/*102* Initialize the context related info for a new mm_struct103* instance.104*/105#define init_new_context init_new_context106static inline int107init_new_context(struct task_struct *tsk, struct mm_struct *mm)108{109int i;110111for_each_possible_cpu(i)112asid_mm(mm, i) = MM_CTXT_NO_ASID;113114return 0;115}116117#define destroy_context destroy_context118static inline void destroy_context(struct mm_struct *mm)119{120unsigned long flags;121122/* Needed to elide CONFIG_DEBUG_PREEMPT warning */123local_irq_save(flags);124asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID;125local_irq_restore(flags);126}127128/* Prepare the MMU for task: setup PID reg with allocated ASID129If task doesn't have an ASID (never alloc or stolen, get a new ASID)130*/131static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,132struct task_struct *tsk)133{134const int cpu = smp_processor_id();135136/*137* Note that the mm_cpumask is "aggregating" only, we don't clear it138* for the switched-out task, unlike some other arches.139* It is used to enlist cpus for sending TLB flush IPIs and not sending140* it to CPUs where a task once ran-on, could cause stale TLB entry141* re-use, specially for a multi-threaded task.142* e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.143* For a non-aggregating mm_cpumask, IPI not sent C1, and if T1144* were to re-migrate to C1, it could access the unmapped region145* via any existing stale TLB entries.146*/147cpumask_set_cpu(cpu, mm_cpumask(next));148149mmu_setup_pgd(next, next->pgd);150151get_new_mmu_context(next);152}153154/*155* activate_mm defaults (in asm-generic) to switch_mm and is called at the156* time of execve() to get a new ASID Note the subtlety here:157* get_new_mmu_context() behaves differently here vs. in switch_mm(). Here158* it always returns a new ASID, because mm has an unallocated "initial"159* value, while in latter, it moves to a new ASID, only if it was160* unallocated161*/162163/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping164* for retiring-mm. However destroy_context( ) still needs to do that because165* between mm_release( ) = >deactive_mm( ) and166* mmput => .. => __mmdrop( ) => destroy_context( )167* there is a good chance that task gets sched-out/in, making its ASID valid168* again (this teased me for a whole day).169*/170171#include <asm-generic/mmu_context.h>172173#endif /* __ASM_ARC_MMU_CONTEXT_H */174175176