/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2012 NetApp, Inc.4* All rights reserved.5*6* Redistribution and use in source and binary forms, with or without7* modification, are permitted provided that the following conditions8* are met:9* 1. Redistributions of source code must retain the above copyright10* notice, this list of conditions and the following disclaimer.11* 2. Redistributions in binary form must reproduce the above copyright12* notice, this list of conditions and the following disclaimer in the13* documentation and/or other materials provided with the distribution.14*15* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND16* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE18* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE19* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL20* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS21* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)22* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT23* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY24* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF25* SUCH DAMAGE.26*/2728#include <sys/param.h>29#include <sys/pcpu.h>3031#include <machine/cpufunc.h>32#include <machine/segments.h>33#include <machine/specialreg.h>3435#include "vmm_host.h"3637static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,38vmm_host_xcr0;39static struct xsave_limits vmm_xsave_limits;4041void42vmm_host_state_init(void)43{44int regs[4];4546vmm_host_efer = rdmsr(MSR_EFER);47vmm_host_pat = rdmsr(MSR_PAT);4849/*50* We always want CR0.TS to be set when the processor does a VM exit.51*52* With emulation turned on unconditionally after a VM exit, we are53* able to trap inadvertent use of the FPU until the guest FPU state54* has been safely squirreled away.55*/56vmm_host_cr0 = rcr0() | CR0_TS;5758/*59* On non-PCID or PCID but without INVPCID support machines,60* we flush kernel i.e. global TLB entries, by temporary61* clearing the CR4.PGE bit, see invltlb_glob(). If62* preemption occurs at the wrong time, cached vmm_host_cr463* might store the value with CR4.PGE cleared. Since FreeBSD64* requires support for PG_G on amd64, just set it65* unconditionally.66*/67vmm_host_cr4 = rcr4() | CR4_PGE;6869/*70* Only permit a guest to use XSAVE if the host is using71* XSAVE. Only permit a guest to use XSAVE features supported72* by the host. This ensures that the FPU state used by the73* guest is always a subset of the saved guest FPU state.74*75* In addition, only permit known XSAVE features where the76* rules for which features depend on other features is known77* to properly emulate xsetbv.78*/79if (vmm_host_cr4 & CR4_XSAVE) {80vmm_xsave_limits.xsave_enabled = 1;81vmm_host_xcr0 = rxcr(0);82vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &83(XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);8485cpuid_count(0xd, 0x0, regs);86vmm_xsave_limits.xsave_max_size = regs[1];87}88}8990uint64_t91vmm_get_host_pat(void)92{9394return (vmm_host_pat);95}9697uint64_t98vmm_get_host_efer(void)99{100101return (vmm_host_efer);102}103104uint64_t105vmm_get_host_cr0(void)106{107108return (vmm_host_cr0);109}110111uint64_t112vmm_get_host_cr4(void)113{114115return (vmm_host_cr4);116}117118uint64_t119vmm_get_host_xcr0(void)120{121122return (vmm_host_xcr0);123}124125uint64_t126vmm_get_host_datasel(void)127{128129return (GSEL(GDATA_SEL, SEL_KPL));130131}132133uint64_t134vmm_get_host_codesel(void)135{136137return (GSEL(GCODE_SEL, SEL_KPL));138}139140uint64_t141vmm_get_host_tsssel(void)142{143144return (GSEL(GPROC0_SEL, SEL_KPL));145}146147uint64_t148vmm_get_host_fsbase(void)149{150151return (0);152}153154uint64_t155vmm_get_host_idtrbase(void)156{157158return (r_idt.rd_base);159}160161const struct xsave_limits *162vmm_get_xsave_limits(void)163{164165return (&vmm_xsave_limits);166}167168169