// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2015 - ARM Ltd3* Author: Marc Zyngier <[email protected]>4*/56#include <asm/kvm_hyp.h>7#include <asm/kvm_mmu.h>8#include <asm/tlbflush.h>910#include <nvhe/mem_protect.h>1112struct tlb_inv_context {13struct kvm_s2_mmu *mmu;14u64 tcr;15u64 sctlr;16};1718static void enter_vmid_context(struct kvm_s2_mmu *mmu,19struct tlb_inv_context *cxt,20bool nsh)21{22struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu;23struct kvm_cpu_context *host_ctxt;24struct kvm_vcpu *vcpu;2526host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;27vcpu = host_ctxt->__hyp_running_vcpu;28cxt->mmu = NULL;2930/*31* We have two requirements:32*33* - ensure that the page table updates are visible to all34* CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN35* being either ish or nsh, depending on the invalidation36* type.37*38* - complete any speculative page table walk started before39* we trapped to EL2 so that we can mess with the MM40* registers out of context, for which dsb(nsh) is enough41*42* The composition of these two barriers is a dsb(DOMAIN), and43* the 'nsh' parameter tracks the distinction between44* Inner-Shareable and Non-Shareable, as specified by the45* callers.46*/47if (nsh)48dsb(nsh);49else50dsb(ish);5152/*53* If we're already in the desired context, then there's nothing to do.54*/55if (vcpu) {56/*57* We're in guest context. However, for this to work, this needs58* to be called from within __kvm_vcpu_run(), which ensures that59* __hyp_running_vcpu is set to the current guest vcpu.60*/61if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))62return;6364cxt->mmu = vcpu->arch.hw_mmu;65} else {66/* We're in host context. */67if (mmu == host_s2_mmu)68return;6970cxt->mmu = host_s2_mmu;71}7273if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {74u64 val;7576/*77* For CPUs that are affected by ARM 1319367, we need to78* avoid a Stage-1 walk with the old VMID while we have79* the new VMID set in the VTTBR in order to invalidate TLBs.80* We're guaranteed that the host S1 MMU is enabled, so81* we can simply set the EPD bits to avoid any further82* TLB fill. For guests, we ensure that the S1 MMU is83* temporarily enabled in the next context.84*/85val = cxt->tcr = read_sysreg_el1(SYS_TCR);86val |= TCR_EPD1_MASK | TCR_EPD0_MASK;87write_sysreg_el1(val, SYS_TCR);88isb();8990if (vcpu) {91val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);92if (!(val & SCTLR_ELx_M)) {93val |= SCTLR_ELx_M;94write_sysreg_el1(val, SYS_SCTLR);95isb();96}97} else {98/* The host S1 MMU is always enabled. */99cxt->sctlr = SCTLR_ELx_M;100}101}102103/*104* __load_stage2() includes an ISB only when the AT105* workaround is applied. Take care of the opposite condition,106* ensuring that we always have an ISB, but not two ISBs back107* to back.108*/109if (vcpu)110__load_host_stage2();111else112__load_stage2(mmu, kern_hyp_va(mmu->arch));113114asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));115}116117static void exit_vmid_context(struct tlb_inv_context *cxt)118{119struct kvm_s2_mmu *mmu = cxt->mmu;120struct kvm_cpu_context *host_ctxt;121struct kvm_vcpu *vcpu;122123host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;124vcpu = host_ctxt->__hyp_running_vcpu;125126if (!mmu)127return;128129if (vcpu)130__load_stage2(mmu, kern_hyp_va(mmu->arch));131else132__load_host_stage2();133134/* Ensure write of the old VMID */135isb();136137if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {138if (!(cxt->sctlr & SCTLR_ELx_M)) {139write_sysreg_el1(cxt->sctlr, SYS_SCTLR);140isb();141}142143write_sysreg_el1(cxt->tcr, SYS_TCR);144}145}146147void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,148phys_addr_t ipa, int level)149{150struct tlb_inv_context cxt;151152/* Switch to requested VMID */153enter_vmid_context(mmu, &cxt, false);154155/*156* We could do so much better if we had the VA as well.157* Instead, we invalidate Stage-2 for this IPA, and the158* whole of Stage-1. Weep...159*/160ipa >>= 12;161__tlbi_level(ipas2e1is, ipa, level);162163/*164* We have to ensure completion of the invalidation at Stage-2,165* since a table walk on another CPU could refill a TLB with a166* complete (S1 + S2) walk based on the old Stage-2 mapping if167* the Stage-1 invalidation happened first.168*/169dsb(ish);170__tlbi(vmalle1is);171dsb(ish);172isb();173174exit_vmid_context(&cxt);175}176177void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,178phys_addr_t ipa, int level)179{180struct tlb_inv_context cxt;181182/* Switch to requested VMID */183enter_vmid_context(mmu, &cxt, true);184185/*186* We could do so much better if we had the VA as well.187* Instead, we invalidate Stage-2 for this IPA, and the188* whole of Stage-1. Weep...189*/190ipa >>= 12;191__tlbi_level(ipas2e1, ipa, level);192193/*194* We have to ensure completion of the invalidation at Stage-2,195* since a table walk on another CPU could refill a TLB with a196* complete (S1 + S2) walk based on the old Stage-2 mapping if197* the Stage-1 invalidation happened first.198*/199dsb(nsh);200__tlbi(vmalle1);201dsb(nsh);202isb();203204exit_vmid_context(&cxt);205}206207void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,208phys_addr_t start, unsigned long pages)209{210struct tlb_inv_context cxt;211unsigned long stride;212213/*214* Since the range of addresses may not be mapped at215* the same level, assume the worst case as PAGE_SIZE216*/217stride = PAGE_SIZE;218start = round_down(start, stride);219220/* Switch to requested VMID */221enter_vmid_context(mmu, &cxt, false);222223__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,224TLBI_TTL_UNKNOWN);225226dsb(ish);227__tlbi(vmalle1is);228dsb(ish);229isb();230231exit_vmid_context(&cxt);232}233234void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)235{236struct tlb_inv_context cxt;237238/* Switch to requested VMID */239enter_vmid_context(mmu, &cxt, false);240241__tlbi(vmalls12e1is);242dsb(ish);243isb();244245exit_vmid_context(&cxt);246}247248void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)249{250struct tlb_inv_context cxt;251252/* Switch to requested VMID */253enter_vmid_context(mmu, &cxt, false);254255__tlbi(vmalle1);256asm volatile("ic iallu");257dsb(nsh);258isb();259260exit_vmid_context(&cxt);261}262263void __kvm_flush_vm_context(void)264{265/* Same remark as in enter_vmid_context() */266dsb(ish);267__tlbi(alle1is);268dsb(ish);269}270271272