// SPDX-License-Identifier: GPL-2.01/*2* arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers3*4* Copyright 2018 Arm Limited5* Author: Dave Martin <[email protected]>6*/7#include <linux/irqflags.h>8#include <linux/sched.h>9#include <linux/kvm_host.h>10#include <asm/fpsimd.h>11#include <asm/kvm_asm.h>12#include <asm/kvm_hyp.h>13#include <asm/kvm_mmu.h>14#include <asm/sysreg.h>1516/*17* Prepare vcpu for saving the host's FPSIMD state and loading the guest's.18* The actual loading is done by the FPSIMD access trap taken to hyp.19*20* Here, we just set the correct metadata to indicate that the FPSIMD21* state in the cpu regs (if any) belongs to current on the host.22*/23void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)24{25BUG_ON(!current->mm);2627if (!system_supports_fpsimd())28return;2930/*31* Ensure that any host FPSIMD/SVE/SME state is saved and unbound such32* that the host kernel is responsible for restoring this state upon33* return to userspace, and the hyp code doesn't need to save anything.34*35* When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures36* that PSTATE.{SM,ZA} == {0,0}.37*/38fpsimd_save_and_flush_cpu_state();39*host_data_ptr(fp_owner) = FP_STATE_FREE;4041WARN_ON_ONCE(system_supports_sme() && read_sysreg_s(SYS_SVCR));42}4344/*45* Called just before entering the guest once we are no longer preemptible46* and interrupts are disabled. If we have managed to run anything using47* FP while we were preemptible (such as off the back of an interrupt),48* then neither the host nor the guest own the FP hardware (and it was the49* responsibility of the code that used FP to save the existing state).50*/51void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)52{53if (test_thread_flag(TIF_FOREIGN_FPSTATE))54*host_data_ptr(fp_owner) = FP_STATE_FREE;55}5657/*58* Called just after exiting the guest. If the guest FPSIMD state59* was loaded, update the host's context tracking data mark the CPU60* FPSIMD regs as dirty and belonging to vcpu so that they will be61* written back if the kernel clobbers them due to kernel-mode NEON62* before re-entry into the guest.63*/64void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)65{66struct cpu_fp_state fp_state;6768WARN_ON_ONCE(!irqs_disabled());6970if (guest_owns_fp_regs()) {71/*72* Currently we do not support SME guests so SVCR is73* always 0 and we just need a variable to point to.74*/75fp_state.st = &vcpu->arch.ctxt.fp_regs;76fp_state.sve_state = vcpu->arch.sve_state;77fp_state.sve_vl = vcpu->arch.sve_max_vl;78fp_state.sme_state = NULL;79fp_state.svcr = __ctxt_sys_reg(&vcpu->arch.ctxt, SVCR);80fp_state.fpmr = __ctxt_sys_reg(&vcpu->arch.ctxt, FPMR);81fp_state.fp_type = &vcpu->arch.fp_type;8283if (vcpu_has_sve(vcpu))84fp_state.to_save = FP_STATE_SVE;85else86fp_state.to_save = FP_STATE_FPSIMD;8788fpsimd_bind_state_to_cpu(&fp_state);8990clear_thread_flag(TIF_FOREIGN_FPSTATE);91}92}9394/*95* Write back the vcpu FPSIMD regs if they are dirty, and invalidate the96* cpu FPSIMD regs so that they can't be spuriously reused if this vcpu97* disappears and another task or vcpu appears that recycles the same98* struct fpsimd_state.99*/100void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)101{102unsigned long flags;103104local_irq_save(flags);105106if (guest_owns_fp_regs()) {107/*108* Flush (save and invalidate) the fpsimd/sve state so that if109* the host tries to use fpsimd/sve, it's not using stale data110* from the guest.111*112* Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the113* context unconditionally, in both nVHE and VHE. This allows114* the kernel to restore the fpsimd/sve state, including ZCR_EL1115* when needed.116*/117fpsimd_save_and_flush_cpu_state();118}119120local_irq_restore(flags);121}122123124