/*-1* SPDX-License-Identifier: BSD-2-Clause2*3* Copyright (c) 2014, Neel Natu ([email protected])4* All rights reserved.5*6* Redistribution and use in source and binary forms, with or without7* modification, are permitted provided that the following conditions8* are met:9* 1. Redistributions of source code must retain the above copyright10* notice unmodified, this list of conditions, and the following11* disclaimer.12* 2. Redistributions in binary form must reproduce the above copyright13* notice, this list of conditions and the following disclaimer in the14* documentation and/or other materials provided with the distribution.15*16* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR17* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES18* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.19* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,20* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT21* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,22* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY23* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT24* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF25* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.26*/2728#include <sys/cdefs.h>29#include "opt_bhyve_snapshot.h"3031#include <sys/param.h>32#include <sys/errno.h>33#include <sys/systm.h>3435#include <machine/cpufunc.h>36#include <machine/specialreg.h>37#include <machine/vmm.h>3839#include "svm.h"40#include "vmcb.h"41#include "svm_softc.h"42#include "svm_msr.h"4344#ifndef MSR_AMDK8_IPM45#define MSR_AMDK8_IPM 0xc001005546#endif4748enum {49IDX_MSR_LSTAR,50IDX_MSR_CSTAR,51IDX_MSR_STAR,52IDX_MSR_SF_MASK,53HOST_MSR_NUM /* must be the last enumeration */54};5556static uint64_t host_msrs[HOST_MSR_NUM];5758void59svm_msr_init(void)60{61/*62* It is safe to cache the values of the following MSRs because they63* don't change based on curcpu, curproc or curthread.64*/65host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR);66host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR);67host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR);68host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK);69}7071void72svm_msr_guest_init(struct svm_softc *sc, struct svm_vcpu *vcpu)73{74/*75* All the MSRs accessible to the guest are either saved/restored by76* hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored77* by VMSAVE/VMLOAD (e.g., MSR_GSBASE).78*79* There are no guest MSRs that are saved/restored "by hand" so nothing80* more to do here.81*/82return;83}8485void86svm_msr_guest_enter(struct svm_vcpu *vcpu)87{88/*89* Save host MSRs (if any) and restore guest MSRs (if any).90*/91}9293void94svm_msr_guest_exit(struct svm_vcpu *vcpu)95{96/*97* Save guest MSRs (if any) and restore host MSRs.98*/99wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]);100wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]);101wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]);102wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]);103104/* MSR_KGSBASE will be restored on the way back to userspace */105}106107int108svm_rdmsr(struct svm_vcpu *vcpu, u_int num, uint64_t *result, bool *retu)109{110int error = 0;111112switch (num) {113case MSR_MCG_CAP:114case MSR_MCG_STATUS:115*result = 0;116break;117case MSR_MTRRcap:118case MSR_MTRRdefType:119case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:120case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:121case MSR_MTRR64kBase:122case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:123if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {124vm_inject_gp(vcpu->vcpu);125}126break;127case MSR_SYSCFG:128case MSR_AMDK8_IPM:129case MSR_EXTFEATURES:130*result = 0;131break;132default:133error = EINVAL;134break;135}136137return (error);138}139140int141svm_wrmsr(struct svm_vcpu *vcpu, u_int num, uint64_t val, bool *retu)142{143int error = 0;144145switch (num) {146case MSR_MCG_CAP:147case MSR_MCG_STATUS:148break; /* ignore writes */149case MSR_MTRRcap:150case MSR_MTRRdefType:151case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7:152case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1:153case MSR_MTRR64kBase:154case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:155if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {156vm_inject_gp(vcpu->vcpu);157}158break;159case MSR_SYSCFG:160break; /* Ignore writes */161case MSR_AMDK8_IPM:162/*163* Ignore writes to the "Interrupt Pending Message" MSR.164*/165break;166case MSR_K8_UCODE_UPDATE:167/*168* Ignore writes to microcode update register.169*/170break;171#ifdef BHYVE_SNAPSHOT172case MSR_TSC:173svm_set_tsc_offset(vcpu, val - rdtsc());174break;175#endif176case MSR_EXTFEATURES:177break;178default:179error = EINVAL;180break;181}182183return (error);184}185186187