// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2017 SiFive3*/45#include <linux/acpi.h>6#include <linux/of.h>7#include <linux/prctl.h>8#include <asm/acpi.h>9#include <asm/cacheflush.h>1011#ifdef CONFIG_SMP1213#include <asm/sbi.h>1415static void ipi_remote_fence_i(void *info)16{17return local_flush_icache_all();18}1920void flush_icache_all(void)21{22local_flush_icache_all();2324if (num_online_cpus() < 2)25return;2627/*28* Make sure all previous writes to the D$ are ordered before making29* the IPI. The RISC-V spec states that a hart must execute a data fence30* before triggering a remote fence.i in order to make the modification31* visable for remote harts.32*33* IPIs on RISC-V are triggered by MMIO writes to either CLINT or34* S-IMSIC, so the fence ensures previous data writes "happen before"35* the MMIO.36*/37RISCV_FENCE(w, o);3839if (riscv_use_sbi_for_rfence())40sbi_remote_fence_i(NULL);41else42on_each_cpu(ipi_remote_fence_i, NULL, 1);43}44EXPORT_SYMBOL(flush_icache_all);4546/*47* Performs an icache flush for the given MM context. RISC-V has no direct48* mechanism for instruction cache shoot downs, so instead we send an IPI that49* informs the remote harts they need to flush their local instruction caches.50* To avoid pathologically slow behavior in a common case (a bunch of51* single-hart processes on a many-hart machine, ie 'make -j') we avoid the52* IPIs for harts that are not currently executing a MM context and instead53* schedule a deferred local instruction cache flush to be performed before54* execution resumes on each hart.55*/56void flush_icache_mm(struct mm_struct *mm, bool local)57{58unsigned int cpu;59cpumask_t others, *mask;6061preempt_disable();6263/* Mark every hart's icache as needing a flush for this MM. */64mask = &mm->context.icache_stale_mask;65cpumask_setall(mask);66/* Flush this hart's I$ now, and mark it as flushed. */67cpu = smp_processor_id();68cpumask_clear_cpu(cpu, mask);69local_flush_icache_all();7071/*72* Flush the I$ of other harts concurrently executing, and mark them as73* flushed.74*/75cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));76local |= cpumask_empty(&others);77if (mm == current->active_mm && local) {78/*79* It's assumed that at least one strongly ordered operation is80* performed on this hart between setting a hart's cpumask bit81* and scheduling this MM context on that hart. Sending an SBI82* remote message will do this, but in the case where no83* messages are sent we still need to order this hart's writes84* with flush_icache_deferred().85*/86smp_mb();87} else if (riscv_use_sbi_for_rfence()) {88sbi_remote_fence_i(&others);89} else {90on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);91}9293preempt_enable();94}9596#endif /* CONFIG_SMP */9798#ifdef CONFIG_MMU99void flush_icache_pte(struct mm_struct *mm, pte_t pte)100{101struct folio *folio = page_folio(pte_page(pte));102103if (!test_bit(PG_dcache_clean, &folio->flags)) {104flush_icache_mm(mm, false);105set_bit(PG_dcache_clean, &folio->flags);106}107}108#endif /* CONFIG_MMU */109110unsigned int riscv_cbom_block_size;111EXPORT_SYMBOL_GPL(riscv_cbom_block_size);112113unsigned int riscv_cboz_block_size;114EXPORT_SYMBOL_GPL(riscv_cboz_block_size);115116unsigned int riscv_cbop_block_size;117EXPORT_SYMBOL_GPL(riscv_cbop_block_size);118119static void __init cbo_get_block_size(struct device_node *node,120const char *name, u32 *block_size,121unsigned long *first_hartid)122{123unsigned long hartid;124u32 val;125126if (riscv_of_processor_hartid(node, &hartid))127return;128129if (of_property_read_u32(node, name, &val))130return;131132if (!*block_size) {133*block_size = val;134*first_hartid = hartid;135} else if (*block_size != val) {136pr_warn("%s mismatched between harts %lu and %lu\n",137name, *first_hartid, hartid);138}139}140141void __init riscv_init_cbo_blocksizes(void)142{143unsigned long cbom_hartid, cboz_hartid, cbop_hartid;144u32 cbom_block_size = 0, cboz_block_size = 0, cbop_block_size = 0;145struct device_node *node;146struct acpi_table_header *rhct;147acpi_status status;148149if (acpi_disabled) {150for_each_of_cpu_node(node) {151/* set block-size for cbom and/or cboz extension if available */152cbo_get_block_size(node, "riscv,cbom-block-size",153&cbom_block_size, &cbom_hartid);154cbo_get_block_size(node, "riscv,cboz-block-size",155&cboz_block_size, &cboz_hartid);156cbo_get_block_size(node, "riscv,cbop-block-size",157&cbop_block_size, &cbop_hartid);158}159} else {160status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);161if (ACPI_FAILURE(status))162return;163164acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, &cbop_block_size);165acpi_put_table((struct acpi_table_header *)rhct);166}167168if (cbom_block_size)169riscv_cbom_block_size = cbom_block_size;170171if (cboz_block_size)172riscv_cboz_block_size = cboz_block_size;173174if (cbop_block_size)175riscv_cbop_block_size = cbop_block_size;176}177178#ifdef CONFIG_SMP179static void set_icache_stale_mask(void)180{181int cpu = get_cpu();182cpumask_t *mask;183bool stale_cpu;184185/*186* Mark every other hart's icache as needing a flush for187* this MM. Maintain the previous value of the current188* cpu to handle the case when this function is called189* concurrently on different harts.190*/191mask = ¤t->mm->context.icache_stale_mask;192stale_cpu = cpumask_test_cpu(cpu, mask);193194cpumask_setall(mask);195__assign_cpu(cpu, mask, stale_cpu);196put_cpu();197}198#endif199200/**201* riscv_set_icache_flush_ctx() - Enable/disable icache flushing instructions in202* userspace.203* @ctx: Set the type of icache flushing instructions permitted/prohibited in204* userspace. Supported values described below.205*206* Supported values for ctx:207*208* * %PR_RISCV_CTX_SW_FENCEI_ON: Allow fence.i in user space.209*210* * %PR_RISCV_CTX_SW_FENCEI_OFF: Disallow fence.i in user space. All threads in211* a process will be affected when ``scope == PR_RISCV_SCOPE_PER_PROCESS``.212* Therefore, caution must be taken; use this flag only when you can guarantee213* that no thread in the process will emit fence.i from this point onward.214*215* @scope: Set scope of where icache flushing instructions are allowed to be216* emitted. Supported values described below.217*218* Supported values for scope:219*220* * %PR_RISCV_SCOPE_PER_PROCESS: Ensure the icache of any thread in this process221* is coherent with instruction storage upon222* migration.223*224* * %PR_RISCV_SCOPE_PER_THREAD: Ensure the icache of the current thread is225* coherent with instruction storage upon226* migration.227*228* When ``scope == PR_RISCV_SCOPE_PER_PROCESS``, all threads in the process are229* permitted to emit icache flushing instructions. Whenever any thread in the230* process is migrated, the corresponding hart's icache will be guaranteed to be231* consistent with instruction storage. This does not enforce any guarantees232* outside of migration. If a thread modifies an instruction that another thread233* may attempt to execute, the other thread must still emit an icache flushing234* instruction before attempting to execute the potentially modified235* instruction. This must be performed by the user-space program.236*237* In per-thread context (eg. ``scope == PR_RISCV_SCOPE_PER_THREAD``) only the238* thread calling this function is permitted to emit icache flushing239* instructions. When the thread is migrated, the corresponding hart's icache240* will be guaranteed to be consistent with instruction storage.241*242* On kernels configured without SMP, this function is a nop as migrations243* across harts will not occur.244*/245int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long scope)246{247#ifdef CONFIG_SMP248switch (ctx) {249case PR_RISCV_CTX_SW_FENCEI_ON:250switch (scope) {251case PR_RISCV_SCOPE_PER_PROCESS:252current->mm->context.force_icache_flush = true;253break;254case PR_RISCV_SCOPE_PER_THREAD:255current->thread.force_icache_flush = true;256break;257default:258return -EINVAL;259}260break;261case PR_RISCV_CTX_SW_FENCEI_OFF:262switch (scope) {263case PR_RISCV_SCOPE_PER_PROCESS:264set_icache_stale_mask();265current->mm->context.force_icache_flush = false;266break;267case PR_RISCV_SCOPE_PER_THREAD:268set_icache_stale_mask();269current->thread.force_icache_flush = false;270break;271default:272return -EINVAL;273}274break;275default:276return -EINVAL;277}278return 0;279#else280switch (ctx) {281case PR_RISCV_CTX_SW_FENCEI_ON:282case PR_RISCV_CTX_SW_FENCEI_OFF:283return 0;284default:285return -EINVAL;286}287#endif288}289290291