/* SPDX-License-Identifier: GPL-2.0 */1#ifndef __KVM_X86_MMU_INTERNAL_H2#define __KVM_X86_MMU_INTERNAL_H34#include <linux/types.h>5#include <linux/kvm_host.h>6#include <asm/kvm_host.h>78#include "mmu.h"910#ifdef CONFIG_KVM_PROVE_MMU11#define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)12#else13#define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)14#endif1516/* Page table builder macros common to shadow (host) PTEs and guest PTEs. */17#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12)18#define __PT_LEVEL_SHIFT(level, bits_per_level) \19(PAGE_SHIFT + ((level) - 1) * (bits_per_level))20#define __PT_INDEX(address, level, bits_per_level) \21(((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))2223#define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \24((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))2526#define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \27((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))2829#define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level))3031/*32* Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT33* bit, and thus are guaranteed to be non-zero when valid. And, when a guest34* PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,35* as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use36* '0' instead of INVALID_PAGE to indicate an invalid PAE root.37*/38#define INVALID_PAE_ROOT 039#define IS_VALID_PAE_ROOT(x) (!!(x))4041static inline hpa_t kvm_mmu_get_dummy_root(void)42{43return my_zero_pfn(0) << PAGE_SHIFT;44}4546static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)47{48return is_zero_pfn(shadow_page >> PAGE_SHIFT);49}5051typedef u64 __rcu *tdp_ptep_t;5253struct kvm_mmu_page {54/*55* Note, "link" through "spt" fit in a single 64 byte cache line on56* 64-bit kernels, keep it that way unless there's a reason not to.57*/58struct list_head link;59struct hlist_node hash_link;6061bool tdp_mmu_page;62bool unsync;63union {64u8 mmu_valid_gen;6566/* Only accessed under slots_lock. */67bool tdp_mmu_scheduled_root_to_zap;68};6970/*71* The shadow page can't be replaced by an equivalent huge page72* because it is being used to map an executable page in the guest73* and the NX huge page mitigation is enabled.74*/75bool nx_huge_page_disallowed;7677/*78* The following two entries are used to key the shadow page in the79* hash table.80*/81union kvm_mmu_page_role role;82gfn_t gfn;8384u64 *spt;8586/*87* Stores the result of the guest translation being shadowed by each88* SPTE. KVM shadows two types of guest translations: nGPA -> GPA89* (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both90* cases the result of the translation is a GPA and a set of access91* constraints.92*93* The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed94* access permissions are stored in the lower bits. Note, for95* convenience and uniformity across guests, the access permissions are96* stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format.97*/98u64 *shadowed_translation;99100/* Currently serving as active root */101union {102int root_count;103refcount_t tdp_mmu_root_count;104};105106bool has_mapped_host_mmio;107108union {109/* These two members aren't used for TDP MMU */110struct {111unsigned int unsync_children;112/*113* Number of writes since the last time traversal114* visited this page.115*/116atomic_t write_flooding_count;117};118/*119* Page table page of external PT.120* Passed to TDX module, not accessed by KVM.121*/122void *external_spt;123};124union {125struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */126tdp_ptep_t ptep;127};128DECLARE_BITMAP(unsync_child_bitmap, 512);129130/*131* Tracks shadow pages that, if zapped, would allow KVM to create an NX132* huge page. A shadow page will have nx_huge_page_disallowed set but133* not be on the list if a huge page is disallowed for other reasons,134* e.g. because KVM is shadowing a PTE at the same gfn, the memslot135* isn't properly aligned, etc...136*/137struct list_head possible_nx_huge_page_link;138#ifdef CONFIG_X86_32139/*140* Used out of the mmu-lock to avoid reading spte values while an141* update is in progress; see the comments in __get_spte_lockless().142*/143int clear_spte_count;144#endif145146#ifdef CONFIG_X86_64147/* Used for freeing the page asynchronously if it is a TDP MMU page. */148struct rcu_head rcu_head;149#endif150};151152extern struct kmem_cache *mmu_page_header_cache;153154static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)155{156return role.smm ? 1 : 0;157}158159static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)160{161return kvm_mmu_role_as_id(sp->role);162}163164static inline bool is_mirror_sp(const struct kvm_mmu_page *sp)165{166return sp->role.is_mirror;167}168169static inline void kvm_mmu_alloc_external_spt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)170{171/*172* external_spt is allocated for TDX module to hold private EPT mappings,173* TDX module will initialize the page by itself.174* Therefore, KVM does not need to initialize or access external_spt.175* KVM only interacts with sp->spt for private EPT operations.176*/177sp->external_spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_external_spt_cache);178}179180static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mmu_page *root)181{182/*183* Since mirror SPs are used only for TDX, which maps private memory184* at its "natural" GFN, no mask needs to be applied to them - and, dually,185* we expect that the bits is only used for the shared PT.186*/187if (is_mirror_sp(root))188return 0;189return kvm_gfn_direct_bits(kvm);190}191192static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm,193struct kvm_mmu_page *sp)194{195/*196* When using the EPT page-modification log, the GPAs in the CPU dirty197* log would come from L2 rather than L1. Therefore, we need to rely198* on write protection to record dirty pages, which bypasses PML, since199* writes now result in a vmexit. Note, the check on CPU dirty logging200* being enabled is mandatory as the bits used to denote WP-only SPTEs201* are reserved for PAE paging (32-bit KVM).202*/203return kvm->arch.cpu_dirty_log_size && sp->role.guest_mode;204}205206static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)207{208return gfn & -KVM_PAGES_PER_HPAGE(level);209}210211int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,212gfn_t gfn, bool synchronizing, bool prefetch);213214void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);215void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);216bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,217struct kvm_memory_slot *slot, u64 gfn,218int min_level);219220/* Flush the given page (huge or not) of guest memory. */221static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)222{223kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),224KVM_PAGES_PER_HPAGE(level));225}226227unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);228229extern int nx_huge_pages;230static inline bool is_nx_huge_page_enabled(struct kvm *kvm)231{232return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;233}234235struct kvm_page_fault {236/* arguments to kvm_mmu_do_page_fault. */237const gpa_t addr;238const u64 error_code;239const bool prefetch;240241/* Derived from error_code. */242const bool exec;243const bool write;244const bool present;245const bool rsvd;246const bool user;247248/* Derived from mmu and global state. */249const bool is_tdp;250const bool is_private;251const bool nx_huge_page_workaround_enabled;252253/*254* Whether a >4KB mapping can be created or is forbidden due to NX255* hugepages.256*/257bool huge_page_disallowed;258259/*260* Maximum page size that can be created for this fault; input to261* FNAME(fetch), direct_map() and kvm_tdp_mmu_map().262*/263u8 max_level;264265/*266* Page size that can be created based on the max_level and the267* page size used by the host mapping.268*/269u8 req_level;270271/*272* Page size that will be created based on the req_level and273* huge_page_disallowed.274*/275u8 goal_level;276277/*278* Shifted addr, or result of guest page table walk if addr is a gva. In279* the case of VM where memslot's can be mapped at multiple GPA aliases280* (i.e. TDX), the gfn field does not contain the bit that selects between281* the aliases (i.e. the shared bit for TDX).282*/283gfn_t gfn;284285/* The memslot containing gfn. May be NULL. */286struct kvm_memory_slot *slot;287288/* Outputs of kvm_mmu_faultin_pfn(). */289unsigned long mmu_seq;290kvm_pfn_t pfn;291struct page *refcounted_page;292bool map_writable;293294/*295* Indicates the guest is trying to write a gfn that contains one or296* more of the PTEs used to translate the write itself, i.e. the access297* is changing its own translation in the guest page tables.298*/299bool write_fault_to_shadow_pgtable;300};301302int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);303304/*305* Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),306* and of course kvm_mmu_do_page_fault().307*308* RET_PF_CONTINUE: So far, so good, keep handling the page fault.309* RET_PF_RETRY: let CPU fault again on the address.310* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.311* RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the312* gfn and retry, or emulate the instruction directly.313* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.314* RET_PF_FIXED: The faulting entry has been fixed.315* RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.316*317* Any names added to this enum should be exported to userspace for use in318* tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h319*320* Note, all values must be greater than or equal to zero so as not to encroach321* on -errno return values.322*/323enum {324RET_PF_CONTINUE = 0,325RET_PF_RETRY,326RET_PF_EMULATE,327RET_PF_WRITE_PROTECTED,328RET_PF_INVALID,329RET_PF_FIXED,330RET_PF_SPURIOUS,331};332333/*334* Define RET_PF_CONTINUE as 0 to allow for335* - efficient machine code when checking for CONTINUE, e.g.336* "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero,337* - kvm_mmu_do_page_fault() to return other RET_PF_* as a positive value.338*/339static_assert(RET_PF_CONTINUE == 0);340341static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,342struct kvm_page_fault *fault)343{344kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,345PAGE_SIZE, fault->write, fault->exec,346fault->is_private);347}348349static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,350u64 err, bool prefetch,351int *emulation_type, u8 *level)352{353struct kvm_page_fault fault = {354.addr = cr2_or_gpa,355.error_code = err,356.exec = err & PFERR_FETCH_MASK,357.write = err & PFERR_WRITE_MASK,358.present = err & PFERR_PRESENT_MASK,359.rsvd = err & PFERR_RSVD_MASK,360.user = err & PFERR_USER_MASK,361.prefetch = prefetch,362.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),363.nx_huge_page_workaround_enabled =364is_nx_huge_page_enabled(vcpu->kvm),365366.max_level = KVM_MAX_HUGEPAGE_LEVEL,367.req_level = PG_LEVEL_4K,368.goal_level = PG_LEVEL_4K,369.is_private = err & PFERR_PRIVATE_ACCESS,370371.pfn = KVM_PFN_ERR_FAULT,372};373int r;374375if (vcpu->arch.mmu->root_role.direct) {376/*377* Things like memslots don't understand the concept of a shared378* bit. Strip it so that the GFN can be used like normal, and the379* fault.addr can be used when the shared bit is needed.380*/381fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm);382fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);383}384385/*386* With retpoline being active an indirect call is rather expensive,387* so do a direct call in the most common case.388*/389if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp)390r = kvm_tdp_page_fault(vcpu, &fault);391else392r = vcpu->arch.mmu->page_fault(vcpu, &fault);393394/*395* Not sure what's happening, but punt to userspace and hope that396* they can fix it by changing memory to shared, or they can397* provide a better error.398*/399if (r == RET_PF_EMULATE && fault.is_private) {400pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n");401kvm_mmu_prepare_memory_fault_exit(vcpu, &fault);402return -EFAULT;403}404405if (fault.write_fault_to_shadow_pgtable && emulation_type)406*emulation_type |= EMULTYPE_WRITE_PF_TO_SP;407if (level)408*level = fault.goal_level;409410return r;411}412413int kvm_mmu_max_mapping_level(struct kvm *kvm,414const struct kvm_memory_slot *slot, gfn_t gfn);415void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);416void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);417418void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);419void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);420421#endif /* __KVM_X86_MMU_INTERNAL_H */422423424