// SPDX-License-Identifier: GPL-2.01#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt23#include "mmu.h"4#include "mmu_internal.h"5#include "mmutrace.h"6#include "tdp_iter.h"7#include "tdp_mmu.h"8#include "spte.h"910#include <asm/cmpxchg.h>11#include <trace/events/kvm.h>1213/* Initializes the TDP MMU for the VM, if enabled. */14void kvm_mmu_init_tdp_mmu(struct kvm *kvm)15{16INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);17spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);18}1920/* Arbitrarily returns true so that this may be used in if statements. */21static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,22bool shared)23{24if (shared)25lockdep_assert_held_read(&kvm->mmu_lock);26else27lockdep_assert_held_write(&kvm->mmu_lock);2829return true;30}3132void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)33{34/*35* Invalidate all roots, which besides the obvious, schedules all roots36* for zapping and thus puts the TDP MMU's reference to each root, i.e.37* ultimately frees all roots.38*/39kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);40kvm_tdp_mmu_zap_invalidated_roots(kvm, false);4142#ifdef CONFIG_KVM_PROVE_MMU43KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));44#endif45WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));4647/*48* Ensure that all the outstanding RCU callbacks to free shadow pages49* can run before the VM is torn down. Putting the last reference to50* zapped roots will create new callbacks.51*/52rcu_barrier();53}5455static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)56{57free_page((unsigned long)sp->external_spt);58free_page((unsigned long)sp->spt);59kmem_cache_free(mmu_page_header_cache, sp);60}6162/*63* This is called through call_rcu in order to free TDP page table memory64* safely with respect to other kernel threads that may be operating on65* the memory.66* By only accessing TDP MMU page table memory in an RCU read critical67* section, and freeing it after a grace period, lockless access to that68* memory won't use it after it is freed.69*/70static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)71{72struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,73rcu_head);7475tdp_mmu_free_sp(sp);76}7778void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)79{80if (!refcount_dec_and_test(&root->tdp_mmu_root_count))81return;8283/*84* The TDP MMU itself holds a reference to each root until the root is85* explicitly invalidated, i.e. the final reference should be never be86* put for a valid root.87*/88KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);8990spin_lock(&kvm->arch.tdp_mmu_pages_lock);91list_del_rcu(&root->link);92spin_unlock(&kvm->arch.tdp_mmu_pages_lock);93call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);94}9596static bool tdp_mmu_root_match(struct kvm_mmu_page *root,97enum kvm_tdp_mmu_root_types types)98{99if (WARN_ON_ONCE(!(types & KVM_VALID_ROOTS)))100return false;101102if (root->role.invalid && !(types & KVM_INVALID_ROOTS))103return false;104105if (likely(!is_mirror_sp(root)))106return types & KVM_DIRECT_ROOTS;107return types & KVM_MIRROR_ROOTS;108}109110/*111* Returns the next root after @prev_root (or the first root if @prev_root is112* NULL) that matches with @types. A reference to the returned root is113* acquired, and the reference to @prev_root is released (the caller obviously114* must hold a reference to @prev_root if it's non-NULL).115*116* Roots that doesn't match with @types are skipped.117*118* Returns NULL if the end of tdp_mmu_roots was reached.119*/120static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,121struct kvm_mmu_page *prev_root,122enum kvm_tdp_mmu_root_types types)123{124struct kvm_mmu_page *next_root;125126/*127* While the roots themselves are RCU-protected, fields such as128* role.invalid are protected by mmu_lock.129*/130lockdep_assert_held(&kvm->mmu_lock);131132rcu_read_lock();133134if (prev_root)135next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,136&prev_root->link,137typeof(*prev_root), link);138else139next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,140typeof(*next_root), link);141142while (next_root) {143if (tdp_mmu_root_match(next_root, types) &&144kvm_tdp_mmu_get_root(next_root))145break;146147next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,148&next_root->link, typeof(*next_root), link);149}150151rcu_read_unlock();152153if (prev_root)154kvm_tdp_mmu_put_root(kvm, prev_root);155156return next_root;157}158159/*160* Note: this iterator gets and puts references to the roots it iterates over.161* This makes it safe to release the MMU lock and yield within the loop, but162* if exiting the loop early, the caller must drop the reference to the most163* recent root. (Unless keeping a live reference is desirable.)164*165* If shared is set, this function is operating under the MMU lock in read166* mode.167*/168#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types) \169for (_root = tdp_mmu_next_root(_kvm, NULL, _types); \170({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \171_root = tdp_mmu_next_root(_kvm, _root, _types)) \172if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \173} else174175#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \176__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_VALID_ROOTS)177178#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \179for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ALL_ROOTS); \180({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \181_root = tdp_mmu_next_root(_kvm, _root, KVM_ALL_ROOTS))182183/*184* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,185* the implication being that any flow that holds mmu_lock for read is186* inherently yield-friendly and should use the yield-safe variant above.187* Holding mmu_lock for write obviates the need for RCU protection as the list188* is guaranteed to be stable.189*/190#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types) \191list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \192if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \193((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \194!tdp_mmu_root_match((_root), (_types)))) { \195} else196197/*198* Iterate over all TDP MMU roots in an RCU read-side critical section.199* It is safe to iterate over the SPTEs under the root, but their values will200* be unstable, so all writes must be atomic. As this routine is meant to be201* used without holding the mmu_lock at all, any bits that are flipped must202* be reflected in kvm_tdp_mmu_spte_need_atomic_write().203*/204#define for_each_tdp_mmu_root_rcu(_kvm, _root, _as_id, _types) \205list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link) \206if ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \207!tdp_mmu_root_match((_root), (_types))) { \208} else209210#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \211__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)212213static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)214{215struct kvm_mmu_page *sp;216217sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);218sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);219220return sp;221}222223static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,224gfn_t gfn, union kvm_mmu_page_role role)225{226INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);227228set_page_private(virt_to_page(sp->spt), (unsigned long)sp);229230sp->role = role;231sp->gfn = gfn;232sp->ptep = sptep;233sp->tdp_mmu_page = true;234235trace_kvm_mmu_get_page(sp, true);236}237238static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,239struct tdp_iter *iter)240{241struct kvm_mmu_page *parent_sp;242union kvm_mmu_page_role role;243244parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));245246role = parent_sp->role;247role.level--;248249tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);250}251252void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool mirror)253{254struct kvm_mmu *mmu = vcpu->arch.mmu;255union kvm_mmu_page_role role = mmu->root_role;256int as_id = kvm_mmu_role_as_id(role);257struct kvm *kvm = vcpu->kvm;258struct kvm_mmu_page *root;259260if (mirror)261role.is_mirror = true;262263/*264* Check for an existing root before acquiring the pages lock to avoid265* unnecessary serialization if multiple vCPUs are loading a new root.266* E.g. when bringing up secondary vCPUs, KVM will already have created267* a valid root on behalf of the primary vCPU.268*/269read_lock(&kvm->mmu_lock);270271for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {272if (root->role.word == role.word)273goto out_read_unlock;274}275276spin_lock(&kvm->arch.tdp_mmu_pages_lock);277278/*279* Recheck for an existing root after acquiring the pages lock, another280* vCPU may have raced ahead and created a new usable root. Manually281* walk the list of roots as the standard macros assume that the pages282* lock is *not* held. WARN if grabbing a reference to a usable root283* fails, as the last reference to a root can only be put *after* the284* root has been invalidated, which requires holding mmu_lock for write.285*/286list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {287if (root->role.word == role.word &&288!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))289goto out_spin_unlock;290}291292root = tdp_mmu_alloc_sp(vcpu);293tdp_mmu_init_sp(root, NULL, 0, role);294295/*296* TDP MMU roots are kept until they are explicitly invalidated, either297* by a memslot update or by the destruction of the VM. Initialize the298* refcount to two; one reference for the vCPU, and one reference for299* the TDP MMU itself, which is held until the root is invalidated and300* is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().301*/302refcount_set(&root->tdp_mmu_root_count, 2);303list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);304305out_spin_unlock:306spin_unlock(&kvm->arch.tdp_mmu_pages_lock);307out_read_unlock:308read_unlock(&kvm->mmu_lock);309/*310* Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest311* and actually consuming the root if it's invalidated after dropping312* mmu_lock, and the root can't be freed as this vCPU holds a reference.313*/314if (mirror) {315mmu->mirror_root_hpa = __pa(root->spt);316} else {317mmu->root.hpa = __pa(root->spt);318mmu->root.pgd = 0;319}320}321322static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,323u64 old_spte, u64 new_spte, int level,324bool shared);325326static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)327{328kvm_account_pgtable_pages((void *)sp->spt, +1);329#ifdef CONFIG_KVM_PROVE_MMU330atomic64_inc(&kvm->arch.tdp_mmu_pages);331#endif332}333334static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)335{336kvm_account_pgtable_pages((void *)sp->spt, -1);337#ifdef CONFIG_KVM_PROVE_MMU338atomic64_dec(&kvm->arch.tdp_mmu_pages);339#endif340}341342/**343* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages344*345* @kvm: kvm instance346* @sp: the page to be removed347*/348static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)349{350tdp_unaccount_mmu_page(kvm, sp);351352if (!sp->nx_huge_page_disallowed)353return;354355spin_lock(&kvm->arch.tdp_mmu_pages_lock);356sp->nx_huge_page_disallowed = false;357untrack_possible_nx_huge_page(kvm, sp, KVM_TDP_MMU);358spin_unlock(&kvm->arch.tdp_mmu_pages_lock);359}360361static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,362int level)363{364/*365* External (TDX) SPTEs are limited to PG_LEVEL_4K, and external366* PTs are removed in a special order, involving free_external_spt().367* But remove_external_spte() will be called on non-leaf PTEs via368* __tdp_mmu_zap_root(), so avoid the error the former would return369* in this case.370*/371if (!is_last_spte(old_spte, level))372return;373374/* Zapping leaf spte is allowed only when write lock is held. */375lockdep_assert_held_write(&kvm->mmu_lock);376377kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_spte);378}379380/**381* handle_removed_pt() - handle a page table removed from the TDP structure382*383* @kvm: kvm instance384* @pt: the page removed from the paging structure385* @shared: This operation may not be running under the exclusive use386* of the MMU lock and the operation must synchronize with other387* threads that might be modifying SPTEs.388*389* Given a page table that has been removed from the TDP paging structure,390* iterates through the page table to clear SPTEs and free child page tables.391*392* Note that pt is passed in as a tdp_ptep_t, but it does not need RCU393* protection. Since this thread removed it from the paging structure,394* this thread will be responsible for ensuring the page is freed. Hence the395* early rcu_dereferences in the function.396*/397static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)398{399struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));400int level = sp->role.level;401gfn_t base_gfn = sp->gfn;402int i;403404trace_kvm_mmu_prepare_zap_page(sp);405406tdp_mmu_unlink_sp(kvm, sp);407408for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {409tdp_ptep_t sptep = pt + i;410gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);411u64 old_spte;412413if (shared) {414/*415* Set the SPTE to a nonpresent value that other416* threads will not overwrite. If the SPTE was417* already marked as frozen then another thread418* handling a page fault could overwrite it, so419* set the SPTE until it is set from some other420* value to the frozen SPTE value.421*/422for (;;) {423old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);424if (!is_frozen_spte(old_spte))425break;426cpu_relax();427}428} else {429/*430* If the SPTE is not MMU-present, there is no backing431* page associated with the SPTE and so no side effects432* that need to be recorded, and exclusive ownership of433* mmu_lock ensures the SPTE can't be made present.434* Note, zapping MMIO SPTEs is also unnecessary as they435* are guarded by the memslots generation, not by being436* unreachable.437*/438old_spte = kvm_tdp_mmu_read_spte(sptep);439if (!is_shadow_present_pte(old_spte))440continue;441442/*443* Use the common helper instead of a raw WRITE_ONCE as444* the SPTE needs to be updated atomically if it can be445* modified by a different vCPU outside of mmu_lock.446* Even though the parent SPTE is !PRESENT, the TLB447* hasn't yet been flushed, and both Intel and AMD448* document that A/D assists can use upper-level PxE449* entries that are cached in the TLB, i.e. the CPU can450* still access the page and mark it dirty.451*452* No retry is needed in the atomic update path as the453* sole concern is dropping a Dirty bit, i.e. no other454* task can zap/remove the SPTE as mmu_lock is held for455* write. Marking the SPTE as a frozen SPTE is not456* strictly necessary for the same reason, but using457* the frozen SPTE value keeps the shared/exclusive458* paths consistent and allows the handle_changed_spte()459* call below to hardcode the new value to FROZEN_SPTE.460*461* Note, even though dropping a Dirty bit is the only462* scenario where a non-atomic update could result in a463* functional bug, simply checking the Dirty bit isn't464* sufficient as a fast page fault could read the upper465* level SPTE before it is zapped, and then make this466* target SPTE writable, resume the guest, and set the467* Dirty bit between reading the SPTE above and writing468* it here.469*/470old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,471FROZEN_SPTE, level);472}473handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,474old_spte, FROZEN_SPTE, level, shared);475476if (is_mirror_sp(sp)) {477KVM_BUG_ON(shared, kvm);478remove_external_spte(kvm, gfn, old_spte, level);479}480}481482if (is_mirror_sp(sp) &&483WARN_ON(kvm_x86_call(free_external_spt)(kvm, base_gfn, sp->role.level,484sp->external_spt))) {485/*486* Failed to free page table page in mirror page table and487* there is nothing to do further.488* Intentionally leak the page to prevent the kernel from489* accessing the encrypted page.490*/491sp->external_spt = NULL;492}493494call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);495}496497static void *get_external_spt(gfn_t gfn, u64 new_spte, int level)498{499if (is_shadow_present_pte(new_spte) && !is_last_spte(new_spte, level)) {500struct kvm_mmu_page *sp = spte_to_child_sp(new_spte);501502WARN_ON_ONCE(sp->role.level + 1 != level);503WARN_ON_ONCE(sp->gfn != gfn);504return sp->external_spt;505}506507return NULL;508}509510static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sptep,511gfn_t gfn, u64 old_spte,512u64 new_spte, int level)513{514bool was_present = is_shadow_present_pte(old_spte);515bool is_present = is_shadow_present_pte(new_spte);516bool is_leaf = is_present && is_last_spte(new_spte, level);517int ret = 0;518519KVM_BUG_ON(was_present, kvm);520521lockdep_assert_held(&kvm->mmu_lock);522/*523* We need to lock out other updates to the SPTE until the external524* page table has been modified. Use FROZEN_SPTE similar to525* the zapping case.526*/527if (!try_cmpxchg64(rcu_dereference(sptep), &old_spte, FROZEN_SPTE))528return -EBUSY;529530/*531* Use different call to either set up middle level532* external page table, or leaf.533*/534if (is_leaf) {535ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_spte);536} else {537void *external_spt = get_external_spt(gfn, new_spte, level);538539KVM_BUG_ON(!external_spt, kvm);540ret = kvm_x86_call(link_external_spt)(kvm, gfn, level, external_spt);541}542if (ret)543__kvm_tdp_mmu_write_spte(sptep, old_spte);544else545__kvm_tdp_mmu_write_spte(sptep, new_spte);546return ret;547}548549/**550* handle_changed_spte - handle bookkeeping associated with an SPTE change551* @kvm: kvm instance552* @as_id: the address space of the paging structure the SPTE was a part of553* @gfn: the base GFN that was mapped by the SPTE554* @old_spte: The value of the SPTE before the change555* @new_spte: The value of the SPTE after the change556* @level: the level of the PT the SPTE is part of in the paging structure557* @shared: This operation may not be running under the exclusive use of558* the MMU lock and the operation must synchronize with other559* threads that might be modifying SPTEs.560*561* Handle bookkeeping that might result from the modification of a SPTE. Note,562* dirty logging updates are handled in common code, not here (see make_spte()563* and fast_pf_fix_direct_spte()).564*/565static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,566u64 old_spte, u64 new_spte, int level,567bool shared)568{569bool was_present = is_shadow_present_pte(old_spte);570bool is_present = is_shadow_present_pte(new_spte);571bool was_leaf = was_present && is_last_spte(old_spte, level);572bool is_leaf = is_present && is_last_spte(new_spte, level);573bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);574575WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);576WARN_ON_ONCE(level < PG_LEVEL_4K);577WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));578579/*580* If this warning were to trigger it would indicate that there was a581* missing MMU notifier or a race with some notifier handler.582* A present, leaf SPTE should never be directly replaced with another583* present leaf SPTE pointing to a different PFN. A notifier handler584* should be zapping the SPTE before the main MM's page table is585* changed, or the SPTE should be zeroed, and the TLBs flushed by the586* thread before replacement.587*/588if (was_leaf && is_leaf && pfn_changed) {589pr_err("Invalid SPTE change: cannot replace a present leaf\n"590"SPTE with another present leaf SPTE mapping a\n"591"different PFN!\n"592"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",593as_id, gfn, old_spte, new_spte, level);594595/*596* Crash the host to prevent error propagation and guest data597* corruption.598*/599BUG();600}601602if (old_spte == new_spte)603return;604605trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);606607if (is_leaf)608check_spte_writable_invariants(new_spte);609610/*611* The only times a SPTE should be changed from a non-present to612* non-present state is when an MMIO entry is installed/modified/613* removed. In that case, there is nothing to do here.614*/615if (!was_present && !is_present) {616/*617* If this change does not involve a MMIO SPTE or frozen SPTE,618* it is unexpected. Log the change, though it should not619* impact the guest since both the former and current SPTEs620* are nonpresent.621*/622if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&623!is_mmio_spte(kvm, new_spte) &&624!is_frozen_spte(new_spte)))625pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"626"should not be replaced with another,\n"627"different nonpresent SPTE, unless one or both\n"628"are MMIO SPTEs, or the new SPTE is\n"629"a temporary frozen SPTE.\n"630"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",631as_id, gfn, old_spte, new_spte, level);632return;633}634635if (is_leaf != was_leaf)636kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);637638/*639* Recursively handle child PTs if the change removed a subtree from640* the paging structure. Note the WARN on the PFN changing without the641* SPTE being converted to a hugepage (leaf) or being zapped. Shadow642* pages are kernel allocations and should never be migrated.643*/644if (was_present && !was_leaf &&645(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))646handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);647}648649static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,650struct tdp_iter *iter,651u64 new_spte)652{653/*654* The caller is responsible for ensuring the old SPTE is not a FROZEN655* SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,656* and pre-checking before inserting a new SPTE is advantageous as it657* avoids unnecessary work.658*/659WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));660661if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(new_spte)) {662int ret;663664/*665* Users of atomic zapping don't operate on mirror roots,666* so don't handle it and bug the VM if it's seen.667*/668if (KVM_BUG_ON(!is_shadow_present_pte(new_spte), kvm))669return -EBUSY;670671ret = set_external_spte_present(kvm, iter->sptep, iter->gfn,672iter->old_spte, new_spte, iter->level);673if (ret)674return ret;675} else {676u64 *sptep = rcu_dereference(iter->sptep);677678/*679* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs680* and does not hold the mmu_lock. On failure, i.e. if a681* different logical CPU modified the SPTE, try_cmpxchg64()682* updates iter->old_spte with the current value, so the caller683* operates on fresh data, e.g. if it retries684* tdp_mmu_set_spte_atomic()685*/686if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))687return -EBUSY;688}689690return 0;691}692693/*694* tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically695* and handle the associated bookkeeping. Do not mark the page dirty696* in KVM's dirty bitmaps.697*698* If setting the SPTE fails because it has changed, iter->old_spte will be699* refreshed to the current value of the spte.700*701* @kvm: kvm instance702* @iter: a tdp_iter instance currently on the SPTE that should be set703* @new_spte: The value the SPTE should be set to704* Return:705* * 0 - If the SPTE was set.706* * -EBUSY - If the SPTE cannot be set. In this case this function will have707* no side-effects other than setting iter->old_spte to the last708* known value of the spte.709*/710static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm,711struct tdp_iter *iter,712u64 new_spte)713{714int ret;715716lockdep_assert_held_read(&kvm->mmu_lock);717718ret = __tdp_mmu_set_spte_atomic(kvm, iter, new_spte);719if (ret)720return ret;721722handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,723new_spte, iter->level, true);724725return 0;726}727728/*729* tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping730* @kvm: KVM instance731* @as_id: Address space ID, i.e. regular vs. SMM732* @sptep: Pointer to the SPTE733* @old_spte: The current value of the SPTE734* @new_spte: The new value that will be set for the SPTE735* @gfn: The base GFN that was (or will be) mapped by the SPTE736* @level: The level _containing_ the SPTE (its parent PT's level)737*738* Returns the old SPTE value, which _may_ be different than @old_spte if the739* SPTE had voldatile bits.740*/741static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,742u64 old_spte, u64 new_spte, gfn_t gfn, int level)743{744lockdep_assert_held_write(&kvm->mmu_lock);745746/*747* No thread should be using this function to set SPTEs to or from the748* temporary frozen SPTE value.749* If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic750* should be used. If operating under the MMU lock in write mode, the751* use of the frozen SPTE should not be necessary.752*/753WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));754755old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);756757handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);758759/*760* Users that do non-atomic setting of PTEs don't operate on mirror761* roots, so don't handle it and bug the VM if it's seen.762*/763if (is_mirror_sptep(sptep)) {764KVM_BUG_ON(is_shadow_present_pte(new_spte), kvm);765remove_external_spte(kvm, gfn, old_spte, level);766}767768return old_spte;769}770771static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,772u64 new_spte)773{774WARN_ON_ONCE(iter->yielded);775iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,776iter->old_spte, new_spte,777iter->gfn, iter->level);778}779780#define tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \781for_each_tdp_pte(_iter, _kvm, _root, _start, _end)782783#define tdp_root_for_each_leaf_pte(_iter, _kvm, _root, _start, _end) \784tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \785if (!is_shadow_present_pte(_iter.old_spte) || \786!is_last_spte(_iter.old_spte, _iter.level)) \787continue; \788else789790static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,791struct tdp_iter *iter)792{793if (!need_resched() && !rwlock_needbreak(&kvm->mmu_lock))794return false;795796/* Ensure forward progress has been made before yielding. */797return iter->next_last_level_gfn != iter->yielded_gfn;798}799800/*801* Yield if the MMU lock is contended or this thread needs to return control802* to the scheduler.803*804* If this function should yield and flush is set, it will perform a remote805* TLB flush before yielding.806*807* If this function yields, iter->yielded is set and the caller must skip to808* the next iteration, where tdp_iter_next() will reset the tdp_iter's walk809* over the paging structures to allow the iterator to continue its traversal810* from the paging structure root.811*812* Returns true if this function yielded.813*/814static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,815struct tdp_iter *iter,816bool flush, bool shared)817{818KVM_MMU_WARN_ON(iter->yielded);819820if (!tdp_mmu_iter_need_resched(kvm, iter))821return false;822823if (flush)824kvm_flush_remote_tlbs(kvm);825826rcu_read_unlock();827828if (shared)829cond_resched_rwlock_read(&kvm->mmu_lock);830else831cond_resched_rwlock_write(&kvm->mmu_lock);832833rcu_read_lock();834835WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);836837iter->yielded = true;838return true;839}840841static inline gfn_t tdp_mmu_max_gfn_exclusive(void)842{843/*844* Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with845* a gpa range that would exceed the max gfn, and KVM does not create846* MMIO SPTEs for "impossible" gfns, instead sending such accesses down847* the slow emulation path every time.848*/849return kvm_mmu_max_gfn() + 1;850}851852static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,853bool shared, int zap_level)854{855struct tdp_iter iter;856857for_each_tdp_pte_min_level_all(iter, root, zap_level) {858retry:859if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))860continue;861862if (!is_shadow_present_pte(iter.old_spte))863continue;864865if (iter.level > zap_level)866continue;867868if (!shared)869tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);870else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))871goto retry;872}873}874875static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,876bool shared)877{878879/*880* The root must have an elevated refcount so that it's reachable via881* mmu_notifier callbacks, which allows this path to yield and drop882* mmu_lock. When handling an unmap/release mmu_notifier command, KVM883* must drop all references to relevant pages prior to completing the884* callback. Dropping mmu_lock with an unreachable root would result885* in zapping SPTEs after a relevant mmu_notifier callback completes886* and lead to use-after-free as zapping a SPTE triggers "writeback" of887* dirty accessed bits to the SPTE's associated struct page.888*/889WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));890891kvm_lockdep_assert_mmu_lock_held(kvm, shared);892893rcu_read_lock();894895/*896* Zap roots in multiple passes of decreasing granularity, i.e. zap at897* 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all898* preempt models) or mmu_lock contention (full or real-time models).899* Zapping at finer granularity marginally increases the total time of900* the zap, but in most cases the zap itself isn't latency sensitive.901*902* If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps903* in order to mimic the page fault path, which can replace a 1GiB page904* table with an equivalent 1GiB hugepage, i.e. can get saddled with905* zapping a 1GiB region that's fully populated with 4KiB SPTEs. This906* allows verifying that KVM can safely zap 1GiB regions, e.g. without907* inducing RCU stalls, without relying on a relatively rare event908* (zapping roots is orders of magnitude more common). Note, because909* zapping a SP recurses on its children, stepping down to PG_LEVEL_4K910* in the iterator itself is unnecessary.911*/912if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {913__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);914__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);915}916__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);917__tdp_mmu_zap_root(kvm, root, shared, root->role.level);918919rcu_read_unlock();920}921922bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,923struct kvm_mmu_page *sp)924{925struct tdp_iter iter = {926.old_spte = sp->ptep ? kvm_tdp_mmu_read_spte(sp->ptep) : 0,927.sptep = sp->ptep,928.level = sp->role.level + 1,929.gfn = sp->gfn,930.as_id = kvm_mmu_page_as_id(sp),931};932933lockdep_assert_held_read(&kvm->mmu_lock);934935if (WARN_ON_ONCE(!is_tdp_mmu_page(sp)))936return false;937938/*939* Root shadow pages don't have a parent page table and thus no940* associated entry, but they can never be possible NX huge pages.941*/942if (WARN_ON_ONCE(!sp->ptep))943return false;944945/*946* Since mmu_lock is held in read mode, it's possible another task has947* already modified the SPTE. Zap the SPTE if and only if the SPTE948* points at the SP's page table, as checking shadow-present isn't949* sufficient, e.g. the SPTE could be replaced by a leaf SPTE, or even950* another SP. Note, spte_to_child_pt() also checks that the SPTE is951* shadow-present, i.e. guards against zapping a frozen SPTE.952*/953if ((tdp_ptep_t)sp->spt != spte_to_child_pt(iter.old_spte, iter.level))954return false;955956/*957* If a different task modified the SPTE, then it should be impossible958* for the SPTE to still be used for the to-be-zapped SP. Non-leaf959* SPTEs don't have Dirty bits, KVM always sets the Accessed bit when960* creating non-leaf SPTEs, and all other bits are immutable for non-961* leaf SPTEs, i.e. the only legal operations for non-leaf SPTEs are962* zapping and replacement.963*/964if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE)) {965WARN_ON_ONCE((tdp_ptep_t)sp->spt == spte_to_child_pt(iter.old_spte, iter.level));966return false;967}968969return true;970}971972/*973* If can_yield is true, will release the MMU lock and reschedule if the974* scheduler needs the CPU or there is contention on the MMU lock. If this975* function cannot yield, it will not release the MMU lock or reschedule and976* the caller must ensure it does not supply too large a GFN range, or the977* operation can cause a soft lockup.978*/979static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,980gfn_t start, gfn_t end, bool can_yield, bool flush)981{982struct tdp_iter iter;983984end = min(end, tdp_mmu_max_gfn_exclusive());985986lockdep_assert_held_write(&kvm->mmu_lock);987988rcu_read_lock();989990for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {991if (can_yield &&992tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {993flush = false;994continue;995}996997if (!is_shadow_present_pte(iter.old_spte) ||998!is_last_spte(iter.old_spte, iter.level))999continue;10001001tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);10021003/*1004* Zappings SPTEs in invalid roots doesn't require a TLB flush,1005* see kvm_tdp_mmu_zap_invalidated_roots() for details.1006*/1007if (!root->role.invalid)1008flush = true;1009}10101011rcu_read_unlock();10121013/*1014* Because this flow zaps _only_ leaf SPTEs, the caller doesn't need1015* to provide RCU protection as no 'struct kvm_mmu_page' will be freed.1016*/1017return flush;1018}10191020/*1021* Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.1022* Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if1023* one or more SPTEs were zapped since the MMU lock was last acquired.1024*/1025bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)1026{1027struct kvm_mmu_page *root;10281029lockdep_assert_held_write(&kvm->mmu_lock);1030for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)1031flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);10321033return flush;1034}10351036void kvm_tdp_mmu_zap_all(struct kvm *kvm)1037{1038struct kvm_mmu_page *root;10391040/*1041* Zap all direct roots, including invalid direct roots, as all direct1042* SPTEs must be dropped before returning to the caller. For TDX, mirror1043* roots don't need handling in response to the mmu notifier (the caller).1044*1045* Zap directly even if the root is also being zapped by a concurrent1046* "fast zap". Walking zapped top-level SPTEs isn't all that expensive1047* and mmu_lock is already held, which means the other thread has yielded.1048*1049* A TLB flush is unnecessary, KVM zaps everything if and only the VM1050* is being destroyed or the userspace VMM has exited. In both cases,1051* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.1052*/1053lockdep_assert_held_write(&kvm->mmu_lock);1054__for_each_tdp_mmu_root_yield_safe(kvm, root, -1,1055KVM_DIRECT_ROOTS | KVM_INVALID_ROOTS)1056tdp_mmu_zap_root(kvm, root, false);1057}10581059/*1060* Zap all invalidated roots to ensure all SPTEs are dropped before the "fast1061* zap" completes.1062*/1063void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared)1064{1065struct kvm_mmu_page *root;10661067if (shared)1068read_lock(&kvm->mmu_lock);1069else1070write_lock(&kvm->mmu_lock);10711072for_each_tdp_mmu_root_yield_safe(kvm, root) {1073if (!root->tdp_mmu_scheduled_root_to_zap)1074continue;10751076root->tdp_mmu_scheduled_root_to_zap = false;1077KVM_BUG_ON(!root->role.invalid, kvm);10781079/*1080* A TLB flush is not necessary as KVM performs a local TLB1081* flush when allocating a new root (see kvm_mmu_load()), and1082* when migrating a vCPU to a different pCPU. Note, the local1083* TLB flush on reuse also invalidates paging-structure-cache1084* entries, i.e. TLB entries for intermediate paging structures,1085* that may be zapped, as such entries are associated with the1086* ASID on both VMX and SVM.1087*/1088tdp_mmu_zap_root(kvm, root, shared);10891090/*1091* The referenced needs to be put *after* zapping the root, as1092* the root must be reachable by mmu_notifiers while it's being1093* zapped1094*/1095kvm_tdp_mmu_put_root(kvm, root);1096}10971098if (shared)1099read_unlock(&kvm->mmu_lock);1100else1101write_unlock(&kvm->mmu_lock);1102}11031104/*1105* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that1106* is about to be zapped, e.g. in response to a memslots update. The actual1107* zapping is done separately so that it happens with mmu_lock with read,1108* whereas invalidating roots must be done with mmu_lock held for write (unless1109* the VM is being destroyed).1110*1111* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.1112* See kvm_tdp_mmu_alloc_root().1113*/1114void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,1115enum kvm_tdp_mmu_root_types root_types)1116{1117struct kvm_mmu_page *root;11181119/*1120* Invalidating invalid roots doesn't make sense, prevent developers from1121* having to think about it.1122*/1123if (WARN_ON_ONCE(root_types & KVM_INVALID_ROOTS))1124root_types &= ~KVM_INVALID_ROOTS;11251126/*1127* mmu_lock must be held for write to ensure that a root doesn't become1128* invalid while there are active readers (invalidating a root while1129* there are active readers may or may not be problematic in practice,1130* but it's uncharted territory and not supported).1131*1132* Waive the assertion if there are no users of @kvm, i.e. the VM is1133* being destroyed after all references have been put, or if no vCPUs1134* have been created (which means there are no roots), i.e. the VM is1135* being destroyed in an error path of KVM_CREATE_VM.1136*/1137if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&1138refcount_read(&kvm->users_count) && kvm->created_vcpus)1139lockdep_assert_held_write(&kvm->mmu_lock);11401141/*1142* As above, mmu_lock isn't held when destroying the VM! There can't1143* be other references to @kvm, i.e. nothing else can invalidate roots1144* or get/put references to roots.1145*/1146list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {1147if (!tdp_mmu_root_match(root, root_types))1148continue;11491150/*1151* Note, invalid roots can outlive a memslot update! Invalid1152* roots must be *zapped* before the memslot update completes,1153* but a different task can acquire a reference and keep the1154* root alive after its been zapped.1155*/1156if (!root->role.invalid) {1157root->tdp_mmu_scheduled_root_to_zap = true;1158root->role.invalid = true;1159}1160}1161}11621163/*1164* Installs a last-level SPTE to handle a TDP page fault.1165* (NPT/EPT violation/misconfiguration)1166*/1167static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,1168struct kvm_page_fault *fault,1169struct tdp_iter *iter)1170{1171struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));1172u64 new_spte;1173int ret = RET_PF_FIXED;1174bool wrprot = false;11751176if (WARN_ON_ONCE(sp->role.level != fault->goal_level))1177return RET_PF_RETRY;11781179if (is_shadow_present_pte(iter->old_spte) &&1180(fault->prefetch || is_access_allowed(fault, iter->old_spte)) &&1181is_last_spte(iter->old_spte, iter->level)) {1182WARN_ON_ONCE(fault->pfn != spte_to_pfn(iter->old_spte));1183return RET_PF_SPURIOUS;1184}11851186if (unlikely(!fault->slot))1187new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);1188else1189wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,1190fault->pfn, iter->old_spte, fault->prefetch,1191false, fault->map_writable, &new_spte);11921193if (new_spte == iter->old_spte)1194ret = RET_PF_SPURIOUS;1195else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))1196return RET_PF_RETRY;1197else if (is_shadow_present_pte(iter->old_spte) &&1198(!is_last_spte(iter->old_spte, iter->level) ||1199WARN_ON_ONCE(leaf_spte_change_needs_tlb_flush(iter->old_spte, new_spte))))1200kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);12011202/*1203* If the page fault was caused by a write but the page is write1204* protected, emulation is needed. If the emulation was skipped,1205* the vCPU would have the same fault again.1206*/1207if (wrprot && fault->write)1208ret = RET_PF_WRITE_PROTECTED;12091210/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */1211if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {1212vcpu->stat.pf_mmio_spte_created++;1213trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,1214new_spte);1215ret = RET_PF_EMULATE;1216} else {1217trace_kvm_mmu_set_spte(iter->level, iter->gfn,1218rcu_dereference(iter->sptep));1219}12201221return ret;1222}12231224/*1225* tdp_mmu_link_sp - Replace the given spte with an spte pointing to the1226* provided page table.1227*1228* @kvm: kvm instance1229* @iter: a tdp_iter instance currently on the SPTE that should be set1230* @sp: The new TDP page table to install.1231* @shared: This operation is running under the MMU lock in read mode.1232*1233* Returns: 0 if the new page table was installed. Non-0 if the page table1234* could not be installed (e.g. the atomic compare-exchange failed).1235*/1236static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,1237struct kvm_mmu_page *sp, bool shared)1238{1239u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled);1240int ret = 0;12411242if (shared) {1243ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);1244if (ret)1245return ret;1246} else {1247tdp_mmu_iter_set_spte(kvm, iter, spte);1248}12491250tdp_account_mmu_page(kvm, sp);12511252return 0;1253}12541255static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,1256struct kvm_mmu_page *sp, bool shared);12571258/*1259* Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing1260* page tables and SPTEs to translate the faulting guest physical address.1261*/1262int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)1263{1264struct kvm_mmu_page *root = tdp_mmu_get_root_for_fault(vcpu, fault);1265struct kvm *kvm = vcpu->kvm;1266struct tdp_iter iter;1267struct kvm_mmu_page *sp;1268int ret = RET_PF_RETRY;12691270KVM_MMU_WARN_ON(!root || root->role.invalid);12711272kvm_mmu_hugepage_adjust(vcpu, fault);12731274trace_kvm_mmu_spte_requested(fault);12751276rcu_read_lock();12771278for_each_tdp_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {1279int r;12801281if (fault->nx_huge_page_workaround_enabled)1282disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);12831284/*1285* If SPTE has been frozen by another thread, just give up and1286* retry, avoiding unnecessary page table allocation and free.1287*/1288if (is_frozen_spte(iter.old_spte))1289goto retry;12901291if (iter.level == fault->goal_level)1292goto map_target_level;12931294/* Step down into the lower level page table if it exists. */1295if (is_shadow_present_pte(iter.old_spte) &&1296!is_large_pte(iter.old_spte))1297continue;12981299/*1300* The SPTE is either non-present or points to a huge page that1301* needs to be split.1302*/1303sp = tdp_mmu_alloc_sp(vcpu);1304tdp_mmu_init_child_sp(sp, &iter);1305if (is_mirror_sp(sp))1306kvm_mmu_alloc_external_spt(vcpu, sp);13071308sp->nx_huge_page_disallowed = fault->huge_page_disallowed;13091310if (is_shadow_present_pte(iter.old_spte)) {1311/* Don't support large page for mirrored roots (TDX) */1312KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);1313r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);1314} else {1315r = tdp_mmu_link_sp(kvm, &iter, sp, true);1316}13171318/*1319* Force the guest to retry if installing an upper level SPTE1320* failed, e.g. because a different task modified the SPTE.1321*/1322if (r) {1323tdp_mmu_free_sp(sp);1324goto retry;1325}13261327if (fault->huge_page_disallowed &&1328fault->req_level >= iter.level) {1329spin_lock(&kvm->arch.tdp_mmu_pages_lock);1330if (sp->nx_huge_page_disallowed)1331track_possible_nx_huge_page(kvm, sp, KVM_TDP_MMU);1332spin_unlock(&kvm->arch.tdp_mmu_pages_lock);1333}1334}13351336/*1337* The walk aborted before reaching the target level, e.g. because the1338* iterator detected an upper level SPTE was frozen during traversal.1339*/1340WARN_ON_ONCE(iter.level == fault->goal_level);1341goto retry;13421343map_target_level:1344ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);13451346retry:1347rcu_read_unlock();1348return ret;1349}13501351/* Used by mmu notifier via kvm_unmap_gfn_range() */1352bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,1353bool flush)1354{1355enum kvm_tdp_mmu_root_types types;1356struct kvm_mmu_page *root;13571358types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter) | KVM_INVALID_ROOTS;13591360__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, types)1361flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,1362range->may_block, flush);13631364return flush;1365}13661367/*1368* Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero1369* if any of the GFNs in the range have been accessed.1370*1371* No need to mark the corresponding PFN as accessed as this call is coming1372* from the clear_young() or clear_flush_young() notifier, which uses the1373* return value to determine if the page has been accessed.1374*/1375static void kvm_tdp_mmu_age_spte(struct kvm *kvm, struct tdp_iter *iter)1376{1377u64 new_spte;13781379if (spte_ad_enabled(iter->old_spte)) {1380iter->old_spte = tdp_mmu_clear_spte_bits_atomic(iter->sptep,1381shadow_accessed_mask);1382new_spte = iter->old_spte & ~shadow_accessed_mask;1383} else {1384new_spte = mark_spte_for_access_track(iter->old_spte);1385/*1386* It is safe for the following cmpxchg to fail. Leave the1387* Accessed bit set, as the spte is most likely young anyway.1388*/1389if (__tdp_mmu_set_spte_atomic(kvm, iter, new_spte))1390return;1391}13921393trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,1394iter->old_spte, new_spte);1395}13961397static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,1398struct kvm_gfn_range *range,1399bool test_only)1400{1401enum kvm_tdp_mmu_root_types types;1402struct kvm_mmu_page *root;1403struct tdp_iter iter;1404bool ret = false;14051406types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter);14071408/*1409* Don't support rescheduling, none of the MMU notifiers that funnel1410* into this helper allow blocking; it'd be dead, wasteful code. Note,1411* this helper must NOT be used to unmap GFNs, as it processes only1412* valid roots!1413*/1414WARN_ON(types & ~KVM_VALID_ROOTS);14151416guard(rcu)();1417for_each_tdp_mmu_root_rcu(kvm, root, range->slot->as_id, types) {1418tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {1419if (!is_accessed_spte(iter.old_spte))1420continue;14211422if (test_only)1423return true;14241425ret = true;1426kvm_tdp_mmu_age_spte(kvm, &iter);1427}1428}14291430return ret;1431}14321433bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)1434{1435return __kvm_tdp_mmu_age_gfn_range(kvm, range, false);1436}14371438bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)1439{1440return __kvm_tdp_mmu_age_gfn_range(kvm, range, true);1441}14421443/*1444* Remove write access from all SPTEs at or above min_level that map GFNs1445* [start, end). Returns true if an SPTE has been changed and the TLBs need to1446* be flushed.1447*/1448static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,1449gfn_t start, gfn_t end, int min_level)1450{1451struct tdp_iter iter;1452u64 new_spte;1453bool spte_set = false;14541455rcu_read_lock();14561457BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);14581459for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {1460retry:1461if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))1462continue;14631464if (!is_shadow_present_pte(iter.old_spte) ||1465!is_last_spte(iter.old_spte, iter.level) ||1466!(iter.old_spte & PT_WRITABLE_MASK))1467continue;14681469new_spte = iter.old_spte & ~PT_WRITABLE_MASK;14701471if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))1472goto retry;14731474spte_set = true;1475}14761477rcu_read_unlock();1478return spte_set;1479}14801481/*1482* Remove write access from all the SPTEs mapping GFNs in the memslot. Will1483* only affect leaf SPTEs down to min_level.1484* Returns true if an SPTE has been changed and the TLBs need to be flushed.1485*/1486bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,1487const struct kvm_memory_slot *slot, int min_level)1488{1489struct kvm_mmu_page *root;1490bool spte_set = false;14911492lockdep_assert_held_read(&kvm->mmu_lock);14931494for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1495spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,1496slot->base_gfn + slot->npages, min_level);14971498return spte_set;1499}15001501static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)1502{1503struct kvm_mmu_page *sp;15041505sp = kmem_cache_zalloc(mmu_page_header_cache, GFP_KERNEL_ACCOUNT);1506if (!sp)1507return NULL;15081509sp->spt = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);1510if (!sp->spt) {1511kmem_cache_free(mmu_page_header_cache, sp);1512return NULL;1513}15141515return sp;1516}15171518/* Note, the caller is responsible for initializing @sp. */1519static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,1520struct kvm_mmu_page *sp, bool shared)1521{1522const u64 huge_spte = iter->old_spte;1523const int level = iter->level;1524int ret, i;15251526/*1527* No need for atomics when writing to sp->spt since the page table has1528* not been linked in yet and thus is not reachable from any other CPU.1529*/1530for (i = 0; i < SPTE_ENT_PER_PAGE; i++)1531sp->spt[i] = make_small_spte(kvm, huge_spte, sp->role, i);15321533/*1534* Replace the huge spte with a pointer to the populated lower level1535* page table. Since we are making this change without a TLB flush vCPUs1536* will see a mix of the split mappings and the original huge mapping,1537* depending on what's currently in their TLB. This is fine from a1538* correctness standpoint since the translation will be the same either1539* way.1540*/1541ret = tdp_mmu_link_sp(kvm, iter, sp, shared);1542if (ret)1543goto out;15441545/*1546* tdp_mmu_link_sp_atomic() will handle subtracting the huge page we1547* are overwriting from the page stats. But we have to manually update1548* the page stats with the new present child pages.1549*/1550kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);15511552out:1553trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);1554return ret;1555}15561557static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,1558struct kvm_mmu_page *root,1559gfn_t start, gfn_t end,1560int target_level, bool shared)1561{1562struct kvm_mmu_page *sp = NULL;1563struct tdp_iter iter;15641565rcu_read_lock();15661567/*1568* Traverse the page table splitting all huge pages above the target1569* level into one lower level. For example, if we encounter a 1GB page1570* we split it into 512 2MB pages.1571*1572* Since the TDP iterator uses a pre-order traversal, we are guaranteed1573* to visit an SPTE before ever visiting its children, which means we1574* will correctly recursively split huge pages that are more than one1575* level above the target level (e.g. splitting a 1GB to 512 2MB pages,1576* and then splitting each of those to 512 4KB pages).1577*/1578for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {1579retry:1580if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))1581continue;15821583if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))1584continue;15851586if (!sp) {1587rcu_read_unlock();15881589if (shared)1590read_unlock(&kvm->mmu_lock);1591else1592write_unlock(&kvm->mmu_lock);15931594sp = tdp_mmu_alloc_sp_for_split();15951596if (shared)1597read_lock(&kvm->mmu_lock);1598else1599write_lock(&kvm->mmu_lock);16001601if (!sp) {1602trace_kvm_mmu_split_huge_page(iter.gfn,1603iter.old_spte,1604iter.level, -ENOMEM);1605return -ENOMEM;1606}16071608rcu_read_lock();16091610iter.yielded = true;1611continue;1612}16131614tdp_mmu_init_child_sp(sp, &iter);16151616if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))1617goto retry;16181619sp = NULL;1620}16211622rcu_read_unlock();16231624/*1625* It's possible to exit the loop having never used the last sp if, for1626* example, a vCPU doing HugePage NX splitting wins the race and1627* installs its own sp in place of the last sp we tried to split.1628*/1629if (sp)1630tdp_mmu_free_sp(sp);16311632return 0;1633}163416351636/*1637* Try to split all huge pages mapped by the TDP MMU down to the target level.1638*/1639void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,1640const struct kvm_memory_slot *slot,1641gfn_t start, gfn_t end,1642int target_level, bool shared)1643{1644struct kvm_mmu_page *root;1645int r = 0;16461647kvm_lockdep_assert_mmu_lock_held(kvm, shared);1648for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {1649r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);1650if (r) {1651kvm_tdp_mmu_put_root(kvm, root);1652break;1653}1654}1655}16561657static bool tdp_mmu_need_write_protect(struct kvm *kvm, struct kvm_mmu_page *sp)1658{1659/*1660* All TDP MMU shadow pages share the same role as their root, aside1661* from level, so it is valid to key off any shadow page to determine if1662* write protection is needed for an entire tree.1663*/1664return kvm_mmu_page_ad_need_write_protect(kvm, sp) || !kvm_ad_enabled;1665}16661667static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,1668gfn_t start, gfn_t end)1669{1670const u64 dbit = tdp_mmu_need_write_protect(kvm, root) ?1671PT_WRITABLE_MASK : shadow_dirty_mask;1672struct tdp_iter iter;16731674rcu_read_lock();16751676tdp_root_for_each_pte(iter, kvm, root, start, end) {1677retry:1678if (!is_shadow_present_pte(iter.old_spte) ||1679!is_last_spte(iter.old_spte, iter.level))1680continue;16811682if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))1683continue;16841685KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&1686spte_ad_need_write_protect(iter.old_spte));16871688if (!(iter.old_spte & dbit))1689continue;16901691if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))1692goto retry;1693}16941695rcu_read_unlock();1696}16971698/*1699* Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the1700* memslot.1701*/1702void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,1703const struct kvm_memory_slot *slot)1704{1705struct kvm_mmu_page *root;17061707lockdep_assert_held_read(&kvm->mmu_lock);1708for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1709clear_dirty_gfn_range(kvm, root, slot->base_gfn,1710slot->base_gfn + slot->npages);1711}17121713static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,1714gfn_t gfn, unsigned long mask, bool wrprot)1715{1716const u64 dbit = (wrprot || tdp_mmu_need_write_protect(kvm, root)) ?1717PT_WRITABLE_MASK : shadow_dirty_mask;1718struct tdp_iter iter;17191720lockdep_assert_held_write(&kvm->mmu_lock);17211722rcu_read_lock();17231724tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),1725gfn + BITS_PER_LONG) {1726if (!mask)1727break;17281729KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&1730spte_ad_need_write_protect(iter.old_spte));17311732if (iter.level > PG_LEVEL_4K ||1733!(mask & (1UL << (iter.gfn - gfn))))1734continue;17351736mask &= ~(1UL << (iter.gfn - gfn));17371738if (!(iter.old_spte & dbit))1739continue;17401741iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,1742iter.old_spte, dbit,1743iter.level);17441745trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,1746iter.old_spte,1747iter.old_spte & ~dbit);1748}17491750rcu_read_unlock();1751}17521753/*1754* Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for1755* which a bit is set in mask, starting at gfn. The given memslot is expected to1756* contain all the GFNs represented by set bits in the mask.1757*/1758void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,1759struct kvm_memory_slot *slot,1760gfn_t gfn, unsigned long mask,1761bool wrprot)1762{1763struct kvm_mmu_page *root;17641765for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)1766clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);1767}17681769static int tdp_mmu_make_huge_spte(struct kvm *kvm,1770struct tdp_iter *parent,1771u64 *huge_spte)1772{1773struct kvm_mmu_page *root = spte_to_child_sp(parent->old_spte);1774gfn_t start = parent->gfn;1775gfn_t end = start + KVM_PAGES_PER_HPAGE(parent->level);1776struct tdp_iter iter;17771778tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {1779/*1780* Use the parent iterator when checking for forward progress so1781* that KVM doesn't get stuck continuously trying to yield (i.e.1782* returning -EAGAIN here and then failing the forward progress1783* check in the caller ad nauseam).1784*/1785if (tdp_mmu_iter_need_resched(kvm, parent))1786return -EAGAIN;17871788*huge_spte = make_huge_spte(kvm, iter.old_spte, parent->level);1789return 0;1790}17911792return -ENOENT;1793}17941795static void recover_huge_pages_range(struct kvm *kvm,1796struct kvm_mmu_page *root,1797const struct kvm_memory_slot *slot)1798{1799gfn_t start = slot->base_gfn;1800gfn_t end = start + slot->npages;1801struct tdp_iter iter;1802int max_mapping_level;1803bool flush = false;1804u64 huge_spte;1805int r;18061807if (WARN_ON_ONCE(kvm_slot_dirty_track_enabled(slot)))1808return;18091810rcu_read_lock();18111812for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {1813retry:1814if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {1815flush = false;1816continue;1817}18181819if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||1820!is_shadow_present_pte(iter.old_spte))1821continue;18221823/*1824* Don't zap leaf SPTEs, if a leaf SPTE could be replaced with1825* a large page size, then its parent would have been zapped1826* instead of stepping down.1827*/1828if (is_last_spte(iter.old_spte, iter.level))1829continue;18301831/*1832* If iter.gfn resides outside of the slot, i.e. the page for1833* the current level overlaps but is not contained by the slot,1834* then the SPTE can't be made huge. More importantly, trying1835* to query that info from slot->arch.lpage_info will cause an1836* out-of-bounds access.1837*/1838if (iter.gfn < start || iter.gfn >= end)1839continue;18401841max_mapping_level = kvm_mmu_max_mapping_level(kvm, NULL, slot, iter.gfn);1842if (max_mapping_level < iter.level)1843continue;18441845r = tdp_mmu_make_huge_spte(kvm, &iter, &huge_spte);1846if (r == -EAGAIN)1847goto retry;1848else if (r)1849continue;18501851if (tdp_mmu_set_spte_atomic(kvm, &iter, huge_spte))1852goto retry;18531854flush = true;1855}18561857if (flush)1858kvm_flush_remote_tlbs_memslot(kvm, slot);18591860rcu_read_unlock();1861}18621863/*1864* Recover huge page mappings within the slot by replacing non-leaf SPTEs with1865* huge SPTEs where possible.1866*/1867void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm,1868const struct kvm_memory_slot *slot)1869{1870struct kvm_mmu_page *root;18711872lockdep_assert_held_read(&kvm->mmu_lock);1873for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1874recover_huge_pages_range(kvm, root, slot);1875}18761877/*1878* Removes write access on the last level SPTE mapping this GFN and unsets the1879* MMU-writable bit to ensure future writes continue to be intercepted.1880* Returns true if an SPTE was set and a TLB flush is needed.1881*/1882static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,1883gfn_t gfn, int min_level)1884{1885struct tdp_iter iter;1886u64 new_spte;1887bool spte_set = false;18881889BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);18901891rcu_read_lock();18921893for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {1894if (!is_shadow_present_pte(iter.old_spte) ||1895!is_last_spte(iter.old_spte, iter.level))1896continue;18971898new_spte = iter.old_spte &1899~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);19001901if (new_spte == iter.old_spte)1902break;19031904tdp_mmu_iter_set_spte(kvm, &iter, new_spte);1905spte_set = true;1906}19071908rcu_read_unlock();19091910return spte_set;1911}19121913/*1914* Removes write access on the last level SPTE mapping this GFN and unsets the1915* MMU-writable bit to ensure future writes continue to be intercepted.1916* Returns true if an SPTE was set and a TLB flush is needed.1917*/1918bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,1919struct kvm_memory_slot *slot, gfn_t gfn,1920int min_level)1921{1922struct kvm_mmu_page *root;1923bool spte_set = false;19241925lockdep_assert_held_write(&kvm->mmu_lock);1926for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)1927spte_set |= write_protect_gfn(kvm, root, gfn, min_level);19281929return spte_set;1930}19311932/*1933* Return the level of the lowest level SPTE added to sptes.1934* That SPTE may be non-present.1935*1936* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.1937*/1938int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,1939int *root_level)1940{1941struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);1942struct tdp_iter iter;1943gfn_t gfn = addr >> PAGE_SHIFT;1944int leaf = -1;19451946*root_level = vcpu->arch.mmu->root_role.level;19471948for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {1949leaf = iter.level;1950sptes[leaf] = iter.old_spte;1951}19521953return leaf;1954}19551956/*1957* Returns the last level spte pointer of the shadow page walk for the given1958* gpa, and sets *spte to the spte value. This spte may be non-preset. If no1959* walk could be performed, returns NULL and *spte does not contain valid data.1960*1961* Contract:1962* - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.1963* - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.1964*1965* WARNING: This function is only intended to be called during fast_page_fault.1966*/1967u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,1968u64 *spte)1969{1970/* Fast pf is not supported for mirrored roots */1971struct kvm_mmu_page *root = tdp_mmu_get_root(vcpu, KVM_DIRECT_ROOTS);1972struct tdp_iter iter;1973tdp_ptep_t sptep = NULL;19741975for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {1976*spte = iter.old_spte;1977sptep = iter.sptep;1978}19791980/*1981* Perform the rcu_dereference to get the raw spte pointer value since1982* we are passing it up to fast_page_fault, which is shared with the1983* legacy MMU and thus does not retain the TDP MMU-specific __rcu1984* annotation.1985*1986* This is safe since fast_page_fault obeys the contracts of this1987* function as well as all TDP MMU contracts around modifying SPTEs1988* outside of mmu_lock.1989*/1990return rcu_dereference(sptep);1991}199219931994