// SPDX-License-Identifier: GPL-2.01/*2* Alpha TLB shootdown helpers3*4* Copyright (C) 2025 Magnus Lindholm <[email protected]>5*6* Alpha-specific TLB flush helpers that cannot be expressed purely7* as inline functions.8*9* These helpers provide combined MM context handling (ASN rollover)10* and immediate TLB invalidation for page migration and memory11* compaction paths, where lazy shootdowns are insufficient.12*/1314#include <linux/mm.h>15#include <linux/smp.h>16#include <linux/sched.h>17#include <asm/tlbflush.h>18#include <asm/pal.h>19#include <asm/mmu_context.h>2021#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)2223/*24* Migration/compaction helper: combine mm context (ASN) handling with an25* immediate per-page TLB invalidate and (for exec) an instruction barrier.26*27* This mirrors the SMP combined IPI handler semantics, but runs locally on UP.28*/29#ifndef CONFIG_SMP30void migrate_flush_tlb_page(struct vm_area_struct *vma,31unsigned long addr)32{33struct mm_struct *mm = vma->vm_mm;34int tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2;3536/*37* First do the mm-context side:38* If we're currently running this mm, reload a fresh context ASN.39* Otherwise, mark context invalid.40*41* On UP, this is mostly about matching the SMP semantics and ensuring42* exec/i-cache tagging assumptions hold when compaction migrates pages.43*/44if (mm == current->active_mm)45flush_tlb_current(mm);46else47flush_tlb_other(mm);4849/*50* Then do the immediate translation kill for this VA.51* For exec mappings, order instruction fetch after invalidation.52*/53tbi(tbi_type, addr);54}5556#else57struct tlb_mm_and_addr {58struct mm_struct *mm;59unsigned long addr;60int tbi_type; /* 2 = DTB, 3 = ITB+DTB */61};6263static void ipi_flush_mm_and_page(void *x)64{65struct tlb_mm_and_addr *d = x;6667/* Part 1: mm context side (Alpha uses ASN/context as a key mechanism). */68if (d->mm == current->active_mm && !asn_locked())69__load_new_mm_context(d->mm);70else71flush_tlb_other(d->mm);7273/* Part 2: immediate per-VA invalidation on this CPU. */74tbi(d->tbi_type, d->addr);75}7677void migrate_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)78{79struct mm_struct *mm = vma->vm_mm;80struct tlb_mm_and_addr d = {81.mm = mm,82.addr = addr,83.tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2,84};8586/*87* One synchronous rendezvous: every CPU runs ipi_flush_mm_and_page().88* This is the "combined" version of flush_tlb_mm + per-page invalidate.89*/90preempt_disable();91on_each_cpu(ipi_flush_mm_and_page, &d, 1);9293/*94* mimic flush_tlb_mm()'s mm_users<=1 optimization.95*/96if (atomic_read(&mm->mm_users) <= 1) {9798int cpu, this_cpu;99this_cpu = smp_processor_id();100101for (cpu = 0; cpu < NR_CPUS; cpu++) {102if (!cpu_online(cpu) || cpu == this_cpu)103continue;104if (READ_ONCE(mm->context[cpu]))105WRITE_ONCE(mm->context[cpu], 0);106}107}108preempt_enable();109}110111#endif112113114