// SPDX-License-Identifier: GPL-2.01#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt23#include "mmu.h"4#include "mmu_internal.h"5#include "mmutrace.h"6#include "tdp_iter.h"7#include "tdp_mmu.h"8#include "spte.h"910#include <asm/cmpxchg.h>11#include <trace/events/kvm.h>1213/* Initializes the TDP MMU for the VM, if enabled. */14void kvm_mmu_init_tdp_mmu(struct kvm *kvm)15{16INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);17spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);18}1920/* Arbitrarily returns true so that this may be used in if statements. */21static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,22bool shared)23{24if (shared)25lockdep_assert_held_read(&kvm->mmu_lock);26else27lockdep_assert_held_write(&kvm->mmu_lock);2829return true;30}3132void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)33{34/*35* Invalidate all roots, which besides the obvious, schedules all roots36* for zapping and thus puts the TDP MMU's reference to each root, i.e.37* ultimately frees all roots.38*/39kvm_tdp_mmu_invalidate_roots(kvm, KVM_VALID_ROOTS);40kvm_tdp_mmu_zap_invalidated_roots(kvm, false);4142#ifdef CONFIG_KVM_PROVE_MMU43KVM_MMU_WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));44#endif45WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));4647/*48* Ensure that all the outstanding RCU callbacks to free shadow pages49* can run before the VM is torn down. Putting the last reference to50* zapped roots will create new callbacks.51*/52rcu_barrier();53}5455static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)56{57free_page((unsigned long)sp->external_spt);58free_page((unsigned long)sp->spt);59kmem_cache_free(mmu_page_header_cache, sp);60}6162/*63* This is called through call_rcu in order to free TDP page table memory64* safely with respect to other kernel threads that may be operating on65* the memory.66* By only accessing TDP MMU page table memory in an RCU read critical67* section, and freeing it after a grace period, lockless access to that68* memory won't use it after it is freed.69*/70static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)71{72struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,73rcu_head);7475tdp_mmu_free_sp(sp);76}7778void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)79{80if (!refcount_dec_and_test(&root->tdp_mmu_root_count))81return;8283/*84* The TDP MMU itself holds a reference to each root until the root is85* explicitly invalidated, i.e. the final reference should be never be86* put for a valid root.87*/88KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);8990spin_lock(&kvm->arch.tdp_mmu_pages_lock);91list_del_rcu(&root->link);92spin_unlock(&kvm->arch.tdp_mmu_pages_lock);93call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);94}9596static bool tdp_mmu_root_match(struct kvm_mmu_page *root,97enum kvm_tdp_mmu_root_types types)98{99if (WARN_ON_ONCE(!(types & KVM_VALID_ROOTS)))100return false;101102if (root->role.invalid && !(types & KVM_INVALID_ROOTS))103return false;104105if (likely(!is_mirror_sp(root)))106return types & KVM_DIRECT_ROOTS;107return types & KVM_MIRROR_ROOTS;108}109110/*111* Returns the next root after @prev_root (or the first root if @prev_root is112* NULL) that matches with @types. A reference to the returned root is113* acquired, and the reference to @prev_root is released (the caller obviously114* must hold a reference to @prev_root if it's non-NULL).115*116* Roots that doesn't match with @types are skipped.117*118* Returns NULL if the end of tdp_mmu_roots was reached.119*/120static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,121struct kvm_mmu_page *prev_root,122enum kvm_tdp_mmu_root_types types)123{124struct kvm_mmu_page *next_root;125126/*127* While the roots themselves are RCU-protected, fields such as128* role.invalid are protected by mmu_lock.129*/130lockdep_assert_held(&kvm->mmu_lock);131132rcu_read_lock();133134if (prev_root)135next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,136&prev_root->link,137typeof(*prev_root), link);138else139next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,140typeof(*next_root), link);141142while (next_root) {143if (tdp_mmu_root_match(next_root, types) &&144kvm_tdp_mmu_get_root(next_root))145break;146147next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,148&next_root->link, typeof(*next_root), link);149}150151rcu_read_unlock();152153if (prev_root)154kvm_tdp_mmu_put_root(kvm, prev_root);155156return next_root;157}158159/*160* Note: this iterator gets and puts references to the roots it iterates over.161* This makes it safe to release the MMU lock and yield within the loop, but162* if exiting the loop early, the caller must drop the reference to the most163* recent root. (Unless keeping a live reference is desirable.)164*165* If shared is set, this function is operating under the MMU lock in read166* mode.167*/168#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types) \169for (_root = tdp_mmu_next_root(_kvm, NULL, _types); \170({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \171_root = tdp_mmu_next_root(_kvm, _root, _types)) \172if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \173} else174175#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \176__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_VALID_ROOTS)177178#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \179for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ALL_ROOTS); \180({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \181_root = tdp_mmu_next_root(_kvm, _root, KVM_ALL_ROOTS))182183/*184* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,185* the implication being that any flow that holds mmu_lock for read is186* inherently yield-friendly and should use the yield-safe variant above.187* Holding mmu_lock for write obviates the need for RCU protection as the list188* is guaranteed to be stable.189*/190#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types) \191list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \192if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \193((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \194!tdp_mmu_root_match((_root), (_types)))) { \195} else196197/*198* Iterate over all TDP MMU roots in an RCU read-side critical section.199* It is safe to iterate over the SPTEs under the root, but their values will200* be unstable, so all writes must be atomic. As this routine is meant to be201* used without holding the mmu_lock at all, any bits that are flipped must202* be reflected in kvm_tdp_mmu_spte_need_atomic_write().203*/204#define for_each_tdp_mmu_root_rcu(_kvm, _root, _as_id, _types) \205list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link) \206if ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \207!tdp_mmu_root_match((_root), (_types))) { \208} else209210#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \211__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)212213static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)214{215struct kvm_mmu_page *sp;216217sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);218sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);219220return sp;221}222223static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,224gfn_t gfn, union kvm_mmu_page_role role)225{226INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);227228set_page_private(virt_to_page(sp->spt), (unsigned long)sp);229230sp->role = role;231sp->gfn = gfn;232sp->ptep = sptep;233sp->tdp_mmu_page = true;234235trace_kvm_mmu_get_page(sp, true);236}237238static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,239struct tdp_iter *iter)240{241struct kvm_mmu_page *parent_sp;242union kvm_mmu_page_role role;243244parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));245246role = parent_sp->role;247role.level--;248249tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);250}251252void kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu, bool mirror)253{254struct kvm_mmu *mmu = vcpu->arch.mmu;255union kvm_mmu_page_role role = mmu->root_role;256int as_id = kvm_mmu_role_as_id(role);257struct kvm *kvm = vcpu->kvm;258struct kvm_mmu_page *root;259260if (mirror)261role.is_mirror = true;262263/*264* Check for an existing root before acquiring the pages lock to avoid265* unnecessary serialization if multiple vCPUs are loading a new root.266* E.g. when bringing up secondary vCPUs, KVM will already have created267* a valid root on behalf of the primary vCPU.268*/269read_lock(&kvm->mmu_lock);270271for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {272if (root->role.word == role.word)273goto out_read_unlock;274}275276spin_lock(&kvm->arch.tdp_mmu_pages_lock);277278/*279* Recheck for an existing root after acquiring the pages lock, another280* vCPU may have raced ahead and created a new usable root. Manually281* walk the list of roots as the standard macros assume that the pages282* lock is *not* held. WARN if grabbing a reference to a usable root283* fails, as the last reference to a root can only be put *after* the284* root has been invalidated, which requires holding mmu_lock for write.285*/286list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {287if (root->role.word == role.word &&288!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))289goto out_spin_unlock;290}291292root = tdp_mmu_alloc_sp(vcpu);293tdp_mmu_init_sp(root, NULL, 0, role);294295/*296* TDP MMU roots are kept until they are explicitly invalidated, either297* by a memslot update or by the destruction of the VM. Initialize the298* refcount to two; one reference for the vCPU, and one reference for299* the TDP MMU itself, which is held until the root is invalidated and300* is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().301*/302refcount_set(&root->tdp_mmu_root_count, 2);303list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);304305out_spin_unlock:306spin_unlock(&kvm->arch.tdp_mmu_pages_lock);307out_read_unlock:308read_unlock(&kvm->mmu_lock);309/*310* Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest311* and actually consuming the root if it's invalidated after dropping312* mmu_lock, and the root can't be freed as this vCPU holds a reference.313*/314if (mirror) {315mmu->mirror_root_hpa = __pa(root->spt);316} else {317mmu->root.hpa = __pa(root->spt);318mmu->root.pgd = 0;319}320}321322static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,323u64 old_spte, u64 new_spte, int level,324bool shared);325326static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)327{328kvm_account_pgtable_pages((void *)sp->spt, +1);329#ifdef CONFIG_KVM_PROVE_MMU330atomic64_inc(&kvm->arch.tdp_mmu_pages);331#endif332}333334static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)335{336kvm_account_pgtable_pages((void *)sp->spt, -1);337#ifdef CONFIG_KVM_PROVE_MMU338atomic64_dec(&kvm->arch.tdp_mmu_pages);339#endif340}341342/**343* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages344*345* @kvm: kvm instance346* @sp: the page to be removed347*/348static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)349{350tdp_unaccount_mmu_page(kvm, sp);351352if (!sp->nx_huge_page_disallowed)353return;354355spin_lock(&kvm->arch.tdp_mmu_pages_lock);356sp->nx_huge_page_disallowed = false;357untrack_possible_nx_huge_page(kvm, sp);358spin_unlock(&kvm->arch.tdp_mmu_pages_lock);359}360361static void remove_external_spte(struct kvm *kvm, gfn_t gfn, u64 old_spte,362int level)363{364kvm_pfn_t old_pfn = spte_to_pfn(old_spte);365int ret;366367/*368* External (TDX) SPTEs are limited to PG_LEVEL_4K, and external369* PTs are removed in a special order, involving free_external_spt().370* But remove_external_spte() will be called on non-leaf PTEs via371* __tdp_mmu_zap_root(), so avoid the error the former would return372* in this case.373*/374if (!is_last_spte(old_spte, level))375return;376377/* Zapping leaf spte is allowed only when write lock is held. */378lockdep_assert_held_write(&kvm->mmu_lock);379/* Because write lock is held, operation should success. */380ret = kvm_x86_call(remove_external_spte)(kvm, gfn, level, old_pfn);381KVM_BUG_ON(ret, kvm);382}383384/**385* handle_removed_pt() - handle a page table removed from the TDP structure386*387* @kvm: kvm instance388* @pt: the page removed from the paging structure389* @shared: This operation may not be running under the exclusive use390* of the MMU lock and the operation must synchronize with other391* threads that might be modifying SPTEs.392*393* Given a page table that has been removed from the TDP paging structure,394* iterates through the page table to clear SPTEs and free child page tables.395*396* Note that pt is passed in as a tdp_ptep_t, but it does not need RCU397* protection. Since this thread removed it from the paging structure,398* this thread will be responsible for ensuring the page is freed. Hence the399* early rcu_dereferences in the function.400*/401static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)402{403struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));404int level = sp->role.level;405gfn_t base_gfn = sp->gfn;406int i;407408trace_kvm_mmu_prepare_zap_page(sp);409410tdp_mmu_unlink_sp(kvm, sp);411412for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {413tdp_ptep_t sptep = pt + i;414gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);415u64 old_spte;416417if (shared) {418/*419* Set the SPTE to a nonpresent value that other420* threads will not overwrite. If the SPTE was421* already marked as frozen then another thread422* handling a page fault could overwrite it, so423* set the SPTE until it is set from some other424* value to the frozen SPTE value.425*/426for (;;) {427old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);428if (!is_frozen_spte(old_spte))429break;430cpu_relax();431}432} else {433/*434* If the SPTE is not MMU-present, there is no backing435* page associated with the SPTE and so no side effects436* that need to be recorded, and exclusive ownership of437* mmu_lock ensures the SPTE can't be made present.438* Note, zapping MMIO SPTEs is also unnecessary as they439* are guarded by the memslots generation, not by being440* unreachable.441*/442old_spte = kvm_tdp_mmu_read_spte(sptep);443if (!is_shadow_present_pte(old_spte))444continue;445446/*447* Use the common helper instead of a raw WRITE_ONCE as448* the SPTE needs to be updated atomically if it can be449* modified by a different vCPU outside of mmu_lock.450* Even though the parent SPTE is !PRESENT, the TLB451* hasn't yet been flushed, and both Intel and AMD452* document that A/D assists can use upper-level PxE453* entries that are cached in the TLB, i.e. the CPU can454* still access the page and mark it dirty.455*456* No retry is needed in the atomic update path as the457* sole concern is dropping a Dirty bit, i.e. no other458* task can zap/remove the SPTE as mmu_lock is held for459* write. Marking the SPTE as a frozen SPTE is not460* strictly necessary for the same reason, but using461* the frozen SPTE value keeps the shared/exclusive462* paths consistent and allows the handle_changed_spte()463* call below to hardcode the new value to FROZEN_SPTE.464*465* Note, even though dropping a Dirty bit is the only466* scenario where a non-atomic update could result in a467* functional bug, simply checking the Dirty bit isn't468* sufficient as a fast page fault could read the upper469* level SPTE before it is zapped, and then make this470* target SPTE writable, resume the guest, and set the471* Dirty bit between reading the SPTE above and writing472* it here.473*/474old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,475FROZEN_SPTE, level);476}477handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,478old_spte, FROZEN_SPTE, level, shared);479480if (is_mirror_sp(sp)) {481KVM_BUG_ON(shared, kvm);482remove_external_spte(kvm, gfn, old_spte, level);483}484}485486if (is_mirror_sp(sp) &&487WARN_ON(kvm_x86_call(free_external_spt)(kvm, base_gfn, sp->role.level,488sp->external_spt))) {489/*490* Failed to free page table page in mirror page table and491* there is nothing to do further.492* Intentionally leak the page to prevent the kernel from493* accessing the encrypted page.494*/495sp->external_spt = NULL;496}497498call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);499}500501static void *get_external_spt(gfn_t gfn, u64 new_spte, int level)502{503if (is_shadow_present_pte(new_spte) && !is_last_spte(new_spte, level)) {504struct kvm_mmu_page *sp = spte_to_child_sp(new_spte);505506WARN_ON_ONCE(sp->role.level + 1 != level);507WARN_ON_ONCE(sp->gfn != gfn);508return sp->external_spt;509}510511return NULL;512}513514static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sptep,515gfn_t gfn, u64 old_spte,516u64 new_spte, int level)517{518bool was_present = is_shadow_present_pte(old_spte);519bool is_present = is_shadow_present_pte(new_spte);520bool is_leaf = is_present && is_last_spte(new_spte, level);521kvm_pfn_t new_pfn = spte_to_pfn(new_spte);522int ret = 0;523524KVM_BUG_ON(was_present, kvm);525526lockdep_assert_held(&kvm->mmu_lock);527/*528* We need to lock out other updates to the SPTE until the external529* page table has been modified. Use FROZEN_SPTE similar to530* the zapping case.531*/532if (!try_cmpxchg64(rcu_dereference(sptep), &old_spte, FROZEN_SPTE))533return -EBUSY;534535/*536* Use different call to either set up middle level537* external page table, or leaf.538*/539if (is_leaf) {540ret = kvm_x86_call(set_external_spte)(kvm, gfn, level, new_pfn);541} else {542void *external_spt = get_external_spt(gfn, new_spte, level);543544KVM_BUG_ON(!external_spt, kvm);545ret = kvm_x86_call(link_external_spt)(kvm, gfn, level, external_spt);546}547if (ret)548__kvm_tdp_mmu_write_spte(sptep, old_spte);549else550__kvm_tdp_mmu_write_spte(sptep, new_spte);551return ret;552}553554/**555* handle_changed_spte - handle bookkeeping associated with an SPTE change556* @kvm: kvm instance557* @as_id: the address space of the paging structure the SPTE was a part of558* @gfn: the base GFN that was mapped by the SPTE559* @old_spte: The value of the SPTE before the change560* @new_spte: The value of the SPTE after the change561* @level: the level of the PT the SPTE is part of in the paging structure562* @shared: This operation may not be running under the exclusive use of563* the MMU lock and the operation must synchronize with other564* threads that might be modifying SPTEs.565*566* Handle bookkeeping that might result from the modification of a SPTE. Note,567* dirty logging updates are handled in common code, not here (see make_spte()568* and fast_pf_fix_direct_spte()).569*/570static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,571u64 old_spte, u64 new_spte, int level,572bool shared)573{574bool was_present = is_shadow_present_pte(old_spte);575bool is_present = is_shadow_present_pte(new_spte);576bool was_leaf = was_present && is_last_spte(old_spte, level);577bool is_leaf = is_present && is_last_spte(new_spte, level);578bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);579580WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);581WARN_ON_ONCE(level < PG_LEVEL_4K);582WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));583584/*585* If this warning were to trigger it would indicate that there was a586* missing MMU notifier or a race with some notifier handler.587* A present, leaf SPTE should never be directly replaced with another588* present leaf SPTE pointing to a different PFN. A notifier handler589* should be zapping the SPTE before the main MM's page table is590* changed, or the SPTE should be zeroed, and the TLBs flushed by the591* thread before replacement.592*/593if (was_leaf && is_leaf && pfn_changed) {594pr_err("Invalid SPTE change: cannot replace a present leaf\n"595"SPTE with another present leaf SPTE mapping a\n"596"different PFN!\n"597"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",598as_id, gfn, old_spte, new_spte, level);599600/*601* Crash the host to prevent error propagation and guest data602* corruption.603*/604BUG();605}606607if (old_spte == new_spte)608return;609610trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);611612if (is_leaf)613check_spte_writable_invariants(new_spte);614615/*616* The only times a SPTE should be changed from a non-present to617* non-present state is when an MMIO entry is installed/modified/618* removed. In that case, there is nothing to do here.619*/620if (!was_present && !is_present) {621/*622* If this change does not involve a MMIO SPTE or frozen SPTE,623* it is unexpected. Log the change, though it should not624* impact the guest since both the former and current SPTEs625* are nonpresent.626*/627if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&628!is_mmio_spte(kvm, new_spte) &&629!is_frozen_spte(new_spte)))630pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"631"should not be replaced with another,\n"632"different nonpresent SPTE, unless one or both\n"633"are MMIO SPTEs, or the new SPTE is\n"634"a temporary frozen SPTE.\n"635"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",636as_id, gfn, old_spte, new_spte, level);637return;638}639640if (is_leaf != was_leaf)641kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);642643/*644* Recursively handle child PTs if the change removed a subtree from645* the paging structure. Note the WARN on the PFN changing without the646* SPTE being converted to a hugepage (leaf) or being zapped. Shadow647* pages are kernel allocations and should never be migrated.648*/649if (was_present && !was_leaf &&650(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))651handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);652}653654static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,655struct tdp_iter *iter,656u64 new_spte)657{658/*659* The caller is responsible for ensuring the old SPTE is not a FROZEN660* SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,661* and pre-checking before inserting a new SPTE is advantageous as it662* avoids unnecessary work.663*/664WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));665666if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(new_spte)) {667int ret;668669/*670* Users of atomic zapping don't operate on mirror roots,671* so don't handle it and bug the VM if it's seen.672*/673if (KVM_BUG_ON(!is_shadow_present_pte(new_spte), kvm))674return -EBUSY;675676ret = set_external_spte_present(kvm, iter->sptep, iter->gfn,677iter->old_spte, new_spte, iter->level);678if (ret)679return ret;680} else {681u64 *sptep = rcu_dereference(iter->sptep);682683/*684* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs685* and does not hold the mmu_lock. On failure, i.e. if a686* different logical CPU modified the SPTE, try_cmpxchg64()687* updates iter->old_spte with the current value, so the caller688* operates on fresh data, e.g. if it retries689* tdp_mmu_set_spte_atomic()690*/691if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))692return -EBUSY;693}694695return 0;696}697698/*699* tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically700* and handle the associated bookkeeping. Do not mark the page dirty701* in KVM's dirty bitmaps.702*703* If setting the SPTE fails because it has changed, iter->old_spte will be704* refreshed to the current value of the spte.705*706* @kvm: kvm instance707* @iter: a tdp_iter instance currently on the SPTE that should be set708* @new_spte: The value the SPTE should be set to709* Return:710* * 0 - If the SPTE was set.711* * -EBUSY - If the SPTE cannot be set. In this case this function will have712* no side-effects other than setting iter->old_spte to the last713* known value of the spte.714*/715static inline int __must_check tdp_mmu_set_spte_atomic(struct kvm *kvm,716struct tdp_iter *iter,717u64 new_spte)718{719int ret;720721lockdep_assert_held_read(&kvm->mmu_lock);722723ret = __tdp_mmu_set_spte_atomic(kvm, iter, new_spte);724if (ret)725return ret;726727handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,728new_spte, iter->level, true);729730return 0;731}732733/*734* tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping735* @kvm: KVM instance736* @as_id: Address space ID, i.e. regular vs. SMM737* @sptep: Pointer to the SPTE738* @old_spte: The current value of the SPTE739* @new_spte: The new value that will be set for the SPTE740* @gfn: The base GFN that was (or will be) mapped by the SPTE741* @level: The level _containing_ the SPTE (its parent PT's level)742*743* Returns the old SPTE value, which _may_ be different than @old_spte if the744* SPTE had voldatile bits.745*/746static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,747u64 old_spte, u64 new_spte, gfn_t gfn, int level)748{749lockdep_assert_held_write(&kvm->mmu_lock);750751/*752* No thread should be using this function to set SPTEs to or from the753* temporary frozen SPTE value.754* If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic755* should be used. If operating under the MMU lock in write mode, the756* use of the frozen SPTE should not be necessary.757*/758WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));759760old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);761762handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);763764/*765* Users that do non-atomic setting of PTEs don't operate on mirror766* roots, so don't handle it and bug the VM if it's seen.767*/768if (is_mirror_sptep(sptep)) {769KVM_BUG_ON(is_shadow_present_pte(new_spte), kvm);770remove_external_spte(kvm, gfn, old_spte, level);771}772773return old_spte;774}775776static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,777u64 new_spte)778{779WARN_ON_ONCE(iter->yielded);780iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,781iter->old_spte, new_spte,782iter->gfn, iter->level);783}784785#define tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \786for_each_tdp_pte(_iter, _kvm, _root, _start, _end)787788#define tdp_root_for_each_leaf_pte(_iter, _kvm, _root, _start, _end) \789tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end) \790if (!is_shadow_present_pte(_iter.old_spte) || \791!is_last_spte(_iter.old_spte, _iter.level)) \792continue; \793else794795static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,796struct tdp_iter *iter)797{798if (!need_resched() && !rwlock_needbreak(&kvm->mmu_lock))799return false;800801/* Ensure forward progress has been made before yielding. */802return iter->next_last_level_gfn != iter->yielded_gfn;803}804805/*806* Yield if the MMU lock is contended or this thread needs to return control807* to the scheduler.808*809* If this function should yield and flush is set, it will perform a remote810* TLB flush before yielding.811*812* If this function yields, iter->yielded is set and the caller must skip to813* the next iteration, where tdp_iter_next() will reset the tdp_iter's walk814* over the paging structures to allow the iterator to continue its traversal815* from the paging structure root.816*817* Returns true if this function yielded.818*/819static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,820struct tdp_iter *iter,821bool flush, bool shared)822{823KVM_MMU_WARN_ON(iter->yielded);824825if (!tdp_mmu_iter_need_resched(kvm, iter))826return false;827828if (flush)829kvm_flush_remote_tlbs(kvm);830831rcu_read_unlock();832833if (shared)834cond_resched_rwlock_read(&kvm->mmu_lock);835else836cond_resched_rwlock_write(&kvm->mmu_lock);837838rcu_read_lock();839840WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);841842iter->yielded = true;843return true;844}845846static inline gfn_t tdp_mmu_max_gfn_exclusive(void)847{848/*849* Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with850* a gpa range that would exceed the max gfn, and KVM does not create851* MMIO SPTEs for "impossible" gfns, instead sending such accesses down852* the slow emulation path every time.853*/854return kvm_mmu_max_gfn() + 1;855}856857static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,858bool shared, int zap_level)859{860struct tdp_iter iter;861862for_each_tdp_pte_min_level_all(iter, root, zap_level) {863retry:864if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))865continue;866867if (!is_shadow_present_pte(iter.old_spte))868continue;869870if (iter.level > zap_level)871continue;872873if (!shared)874tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);875else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))876goto retry;877}878}879880static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,881bool shared)882{883884/*885* The root must have an elevated refcount so that it's reachable via886* mmu_notifier callbacks, which allows this path to yield and drop887* mmu_lock. When handling an unmap/release mmu_notifier command, KVM888* must drop all references to relevant pages prior to completing the889* callback. Dropping mmu_lock with an unreachable root would result890* in zapping SPTEs after a relevant mmu_notifier callback completes891* and lead to use-after-free as zapping a SPTE triggers "writeback" of892* dirty accessed bits to the SPTE's associated struct page.893*/894WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));895896kvm_lockdep_assert_mmu_lock_held(kvm, shared);897898rcu_read_lock();899900/*901* Zap roots in multiple passes of decreasing granularity, i.e. zap at902* 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all903* preempt models) or mmu_lock contention (full or real-time models).904* Zapping at finer granularity marginally increases the total time of905* the zap, but in most cases the zap itself isn't latency sensitive.906*907* If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps908* in order to mimic the page fault path, which can replace a 1GiB page909* table with an equivalent 1GiB hugepage, i.e. can get saddled with910* zapping a 1GiB region that's fully populated with 4KiB SPTEs. This911* allows verifying that KVM can safely zap 1GiB regions, e.g. without912* inducing RCU stalls, without relying on a relatively rare event913* (zapping roots is orders of magnitude more common). Note, because914* zapping a SP recurses on its children, stepping down to PG_LEVEL_4K915* in the iterator itself is unnecessary.916*/917if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {918__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);919__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);920}921__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);922__tdp_mmu_zap_root(kvm, root, shared, root->role.level);923924rcu_read_unlock();925}926927bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)928{929u64 old_spte;930931/*932* This helper intentionally doesn't allow zapping a root shadow page,933* which doesn't have a parent page table and thus no associated entry.934*/935if (WARN_ON_ONCE(!sp->ptep))936return false;937938old_spte = kvm_tdp_mmu_read_spte(sp->ptep);939if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))940return false;941942tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,943SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);944945return true;946}947948/*949* If can_yield is true, will release the MMU lock and reschedule if the950* scheduler needs the CPU or there is contention on the MMU lock. If this951* function cannot yield, it will not release the MMU lock or reschedule and952* the caller must ensure it does not supply too large a GFN range, or the953* operation can cause a soft lockup.954*/955static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,956gfn_t start, gfn_t end, bool can_yield, bool flush)957{958struct tdp_iter iter;959960end = min(end, tdp_mmu_max_gfn_exclusive());961962lockdep_assert_held_write(&kvm->mmu_lock);963964rcu_read_lock();965966for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {967if (can_yield &&968tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {969flush = false;970continue;971}972973if (!is_shadow_present_pte(iter.old_spte) ||974!is_last_spte(iter.old_spte, iter.level))975continue;976977tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);978979/*980* Zappings SPTEs in invalid roots doesn't require a TLB flush,981* see kvm_tdp_mmu_zap_invalidated_roots() for details.982*/983if (!root->role.invalid)984flush = true;985}986987rcu_read_unlock();988989/*990* Because this flow zaps _only_ leaf SPTEs, the caller doesn't need991* to provide RCU protection as no 'struct kvm_mmu_page' will be freed.992*/993return flush;994}995996/*997* Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.998* Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if999* one or more SPTEs were zapped since the MMU lock was last acquired.1000*/1001bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)1002{1003struct kvm_mmu_page *root;10041005lockdep_assert_held_write(&kvm->mmu_lock);1006for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)1007flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);10081009return flush;1010}10111012void kvm_tdp_mmu_zap_all(struct kvm *kvm)1013{1014struct kvm_mmu_page *root;10151016/*1017* Zap all direct roots, including invalid direct roots, as all direct1018* SPTEs must be dropped before returning to the caller. For TDX, mirror1019* roots don't need handling in response to the mmu notifier (the caller).1020*1021* Zap directly even if the root is also being zapped by a concurrent1022* "fast zap". Walking zapped top-level SPTEs isn't all that expensive1023* and mmu_lock is already held, which means the other thread has yielded.1024*1025* A TLB flush is unnecessary, KVM zaps everything if and only the VM1026* is being destroyed or the userspace VMM has exited. In both cases,1027* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.1028*/1029lockdep_assert_held_write(&kvm->mmu_lock);1030__for_each_tdp_mmu_root_yield_safe(kvm, root, -1,1031KVM_DIRECT_ROOTS | KVM_INVALID_ROOTS)1032tdp_mmu_zap_root(kvm, root, false);1033}10341035/*1036* Zap all invalidated roots to ensure all SPTEs are dropped before the "fast1037* zap" completes.1038*/1039void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared)1040{1041struct kvm_mmu_page *root;10421043if (shared)1044read_lock(&kvm->mmu_lock);1045else1046write_lock(&kvm->mmu_lock);10471048for_each_tdp_mmu_root_yield_safe(kvm, root) {1049if (!root->tdp_mmu_scheduled_root_to_zap)1050continue;10511052root->tdp_mmu_scheduled_root_to_zap = false;1053KVM_BUG_ON(!root->role.invalid, kvm);10541055/*1056* A TLB flush is not necessary as KVM performs a local TLB1057* flush when allocating a new root (see kvm_mmu_load()), and1058* when migrating a vCPU to a different pCPU. Note, the local1059* TLB flush on reuse also invalidates paging-structure-cache1060* entries, i.e. TLB entries for intermediate paging structures,1061* that may be zapped, as such entries are associated with the1062* ASID on both VMX and SVM.1063*/1064tdp_mmu_zap_root(kvm, root, shared);10651066/*1067* The referenced needs to be put *after* zapping the root, as1068* the root must be reachable by mmu_notifiers while it's being1069* zapped1070*/1071kvm_tdp_mmu_put_root(kvm, root);1072}10731074if (shared)1075read_unlock(&kvm->mmu_lock);1076else1077write_unlock(&kvm->mmu_lock);1078}10791080/*1081* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that1082* is about to be zapped, e.g. in response to a memslots update. The actual1083* zapping is done separately so that it happens with mmu_lock with read,1084* whereas invalidating roots must be done with mmu_lock held for write (unless1085* the VM is being destroyed).1086*1087* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.1088* See kvm_tdp_mmu_alloc_root().1089*/1090void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,1091enum kvm_tdp_mmu_root_types root_types)1092{1093struct kvm_mmu_page *root;10941095/*1096* Invalidating invalid roots doesn't make sense, prevent developers from1097* having to think about it.1098*/1099if (WARN_ON_ONCE(root_types & KVM_INVALID_ROOTS))1100root_types &= ~KVM_INVALID_ROOTS;11011102/*1103* mmu_lock must be held for write to ensure that a root doesn't become1104* invalid while there are active readers (invalidating a root while1105* there are active readers may or may not be problematic in practice,1106* but it's uncharted territory and not supported).1107*1108* Waive the assertion if there are no users of @kvm, i.e. the VM is1109* being destroyed after all references have been put, or if no vCPUs1110* have been created (which means there are no roots), i.e. the VM is1111* being destroyed in an error path of KVM_CREATE_VM.1112*/1113if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&1114refcount_read(&kvm->users_count) && kvm->created_vcpus)1115lockdep_assert_held_write(&kvm->mmu_lock);11161117/*1118* As above, mmu_lock isn't held when destroying the VM! There can't1119* be other references to @kvm, i.e. nothing else can invalidate roots1120* or get/put references to roots.1121*/1122list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {1123if (!tdp_mmu_root_match(root, root_types))1124continue;11251126/*1127* Note, invalid roots can outlive a memslot update! Invalid1128* roots must be *zapped* before the memslot update completes,1129* but a different task can acquire a reference and keep the1130* root alive after its been zapped.1131*/1132if (!root->role.invalid) {1133root->tdp_mmu_scheduled_root_to_zap = true;1134root->role.invalid = true;1135}1136}1137}11381139/*1140* Installs a last-level SPTE to handle a TDP page fault.1141* (NPT/EPT violation/misconfiguration)1142*/1143static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,1144struct kvm_page_fault *fault,1145struct tdp_iter *iter)1146{1147struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));1148u64 new_spte;1149int ret = RET_PF_FIXED;1150bool wrprot = false;11511152if (WARN_ON_ONCE(sp->role.level != fault->goal_level))1153return RET_PF_RETRY;11541155if (is_shadow_present_pte(iter->old_spte) &&1156(fault->prefetch || is_access_allowed(fault, iter->old_spte)) &&1157is_last_spte(iter->old_spte, iter->level)) {1158WARN_ON_ONCE(fault->pfn != spte_to_pfn(iter->old_spte));1159return RET_PF_SPURIOUS;1160}11611162if (unlikely(!fault->slot))1163new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);1164else1165wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,1166fault->pfn, iter->old_spte, fault->prefetch,1167false, fault->map_writable, &new_spte);11681169if (new_spte == iter->old_spte)1170ret = RET_PF_SPURIOUS;1171else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))1172return RET_PF_RETRY;1173else if (is_shadow_present_pte(iter->old_spte) &&1174(!is_last_spte(iter->old_spte, iter->level) ||1175WARN_ON_ONCE(leaf_spte_change_needs_tlb_flush(iter->old_spte, new_spte))))1176kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);11771178/*1179* If the page fault was caused by a write but the page is write1180* protected, emulation is needed. If the emulation was skipped,1181* the vCPU would have the same fault again.1182*/1183if (wrprot && fault->write)1184ret = RET_PF_WRITE_PROTECTED;11851186/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */1187if (unlikely(is_mmio_spte(vcpu->kvm, new_spte))) {1188vcpu->stat.pf_mmio_spte_created++;1189trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,1190new_spte);1191ret = RET_PF_EMULATE;1192} else {1193trace_kvm_mmu_set_spte(iter->level, iter->gfn,1194rcu_dereference(iter->sptep));1195}11961197return ret;1198}11991200/*1201* tdp_mmu_link_sp - Replace the given spte with an spte pointing to the1202* provided page table.1203*1204* @kvm: kvm instance1205* @iter: a tdp_iter instance currently on the SPTE that should be set1206* @sp: The new TDP page table to install.1207* @shared: This operation is running under the MMU lock in read mode.1208*1209* Returns: 0 if the new page table was installed. Non-0 if the page table1210* could not be installed (e.g. the atomic compare-exchange failed).1211*/1212static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,1213struct kvm_mmu_page *sp, bool shared)1214{1215u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled);1216int ret = 0;12171218if (shared) {1219ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);1220if (ret)1221return ret;1222} else {1223tdp_mmu_iter_set_spte(kvm, iter, spte);1224}12251226tdp_account_mmu_page(kvm, sp);12271228return 0;1229}12301231static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,1232struct kvm_mmu_page *sp, bool shared);12331234/*1235* Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing1236* page tables and SPTEs to translate the faulting guest physical address.1237*/1238int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)1239{1240struct kvm_mmu_page *root = tdp_mmu_get_root_for_fault(vcpu, fault);1241struct kvm *kvm = vcpu->kvm;1242struct tdp_iter iter;1243struct kvm_mmu_page *sp;1244int ret = RET_PF_RETRY;12451246kvm_mmu_hugepage_adjust(vcpu, fault);12471248trace_kvm_mmu_spte_requested(fault);12491250rcu_read_lock();12511252for_each_tdp_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {1253int r;12541255if (fault->nx_huge_page_workaround_enabled)1256disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);12571258/*1259* If SPTE has been frozen by another thread, just give up and1260* retry, avoiding unnecessary page table allocation and free.1261*/1262if (is_frozen_spte(iter.old_spte))1263goto retry;12641265if (iter.level == fault->goal_level)1266goto map_target_level;12671268/* Step down into the lower level page table if it exists. */1269if (is_shadow_present_pte(iter.old_spte) &&1270!is_large_pte(iter.old_spte))1271continue;12721273/*1274* The SPTE is either non-present or points to a huge page that1275* needs to be split.1276*/1277sp = tdp_mmu_alloc_sp(vcpu);1278tdp_mmu_init_child_sp(sp, &iter);1279if (is_mirror_sp(sp))1280kvm_mmu_alloc_external_spt(vcpu, sp);12811282sp->nx_huge_page_disallowed = fault->huge_page_disallowed;12831284if (is_shadow_present_pte(iter.old_spte)) {1285/* Don't support large page for mirrored roots (TDX) */1286KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);1287r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);1288} else {1289r = tdp_mmu_link_sp(kvm, &iter, sp, true);1290}12911292/*1293* Force the guest to retry if installing an upper level SPTE1294* failed, e.g. because a different task modified the SPTE.1295*/1296if (r) {1297tdp_mmu_free_sp(sp);1298goto retry;1299}13001301if (fault->huge_page_disallowed &&1302fault->req_level >= iter.level) {1303spin_lock(&kvm->arch.tdp_mmu_pages_lock);1304if (sp->nx_huge_page_disallowed)1305track_possible_nx_huge_page(kvm, sp);1306spin_unlock(&kvm->arch.tdp_mmu_pages_lock);1307}1308}13091310/*1311* The walk aborted before reaching the target level, e.g. because the1312* iterator detected an upper level SPTE was frozen during traversal.1313*/1314WARN_ON_ONCE(iter.level == fault->goal_level);1315goto retry;13161317map_target_level:1318ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);13191320retry:1321rcu_read_unlock();1322return ret;1323}13241325/* Used by mmu notifier via kvm_unmap_gfn_range() */1326bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,1327bool flush)1328{1329enum kvm_tdp_mmu_root_types types;1330struct kvm_mmu_page *root;13311332types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter) | KVM_INVALID_ROOTS;13331334__for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, types)1335flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,1336range->may_block, flush);13371338return flush;1339}13401341/*1342* Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero1343* if any of the GFNs in the range have been accessed.1344*1345* No need to mark the corresponding PFN as accessed as this call is coming1346* from the clear_young() or clear_flush_young() notifier, which uses the1347* return value to determine if the page has been accessed.1348*/1349static void kvm_tdp_mmu_age_spte(struct kvm *kvm, struct tdp_iter *iter)1350{1351u64 new_spte;13521353if (spte_ad_enabled(iter->old_spte)) {1354iter->old_spte = tdp_mmu_clear_spte_bits_atomic(iter->sptep,1355shadow_accessed_mask);1356new_spte = iter->old_spte & ~shadow_accessed_mask;1357} else {1358new_spte = mark_spte_for_access_track(iter->old_spte);1359/*1360* It is safe for the following cmpxchg to fail. Leave the1361* Accessed bit set, as the spte is most likely young anyway.1362*/1363if (__tdp_mmu_set_spte_atomic(kvm, iter, new_spte))1364return;1365}13661367trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,1368iter->old_spte, new_spte);1369}13701371static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,1372struct kvm_gfn_range *range,1373bool test_only)1374{1375enum kvm_tdp_mmu_root_types types;1376struct kvm_mmu_page *root;1377struct tdp_iter iter;1378bool ret = false;13791380types = kvm_gfn_range_filter_to_root_types(kvm, range->attr_filter);13811382/*1383* Don't support rescheduling, none of the MMU notifiers that funnel1384* into this helper allow blocking; it'd be dead, wasteful code. Note,1385* this helper must NOT be used to unmap GFNs, as it processes only1386* valid roots!1387*/1388WARN_ON(types & ~KVM_VALID_ROOTS);13891390guard(rcu)();1391for_each_tdp_mmu_root_rcu(kvm, root, range->slot->as_id, types) {1392tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {1393if (!is_accessed_spte(iter.old_spte))1394continue;13951396if (test_only)1397return true;13981399ret = true;1400kvm_tdp_mmu_age_spte(kvm, &iter);1401}1402}14031404return ret;1405}14061407bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)1408{1409return __kvm_tdp_mmu_age_gfn_range(kvm, range, false);1410}14111412bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)1413{1414return __kvm_tdp_mmu_age_gfn_range(kvm, range, true);1415}14161417/*1418* Remove write access from all SPTEs at or above min_level that map GFNs1419* [start, end). Returns true if an SPTE has been changed and the TLBs need to1420* be flushed.1421*/1422static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,1423gfn_t start, gfn_t end, int min_level)1424{1425struct tdp_iter iter;1426u64 new_spte;1427bool spte_set = false;14281429rcu_read_lock();14301431BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);14321433for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {1434retry:1435if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))1436continue;14371438if (!is_shadow_present_pte(iter.old_spte) ||1439!is_last_spte(iter.old_spte, iter.level) ||1440!(iter.old_spte & PT_WRITABLE_MASK))1441continue;14421443new_spte = iter.old_spte & ~PT_WRITABLE_MASK;14441445if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))1446goto retry;14471448spte_set = true;1449}14501451rcu_read_unlock();1452return spte_set;1453}14541455/*1456* Remove write access from all the SPTEs mapping GFNs in the memslot. Will1457* only affect leaf SPTEs down to min_level.1458* Returns true if an SPTE has been changed and the TLBs need to be flushed.1459*/1460bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,1461const struct kvm_memory_slot *slot, int min_level)1462{1463struct kvm_mmu_page *root;1464bool spte_set = false;14651466lockdep_assert_held_read(&kvm->mmu_lock);14671468for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1469spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,1470slot->base_gfn + slot->npages, min_level);14711472return spte_set;1473}14741475static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)1476{1477struct kvm_mmu_page *sp;14781479sp = kmem_cache_zalloc(mmu_page_header_cache, GFP_KERNEL_ACCOUNT);1480if (!sp)1481return NULL;14821483sp->spt = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);1484if (!sp->spt) {1485kmem_cache_free(mmu_page_header_cache, sp);1486return NULL;1487}14881489return sp;1490}14911492/* Note, the caller is responsible for initializing @sp. */1493static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,1494struct kvm_mmu_page *sp, bool shared)1495{1496const u64 huge_spte = iter->old_spte;1497const int level = iter->level;1498int ret, i;14991500/*1501* No need for atomics when writing to sp->spt since the page table has1502* not been linked in yet and thus is not reachable from any other CPU.1503*/1504for (i = 0; i < SPTE_ENT_PER_PAGE; i++)1505sp->spt[i] = make_small_spte(kvm, huge_spte, sp->role, i);15061507/*1508* Replace the huge spte with a pointer to the populated lower level1509* page table. Since we are making this change without a TLB flush vCPUs1510* will see a mix of the split mappings and the original huge mapping,1511* depending on what's currently in their TLB. This is fine from a1512* correctness standpoint since the translation will be the same either1513* way.1514*/1515ret = tdp_mmu_link_sp(kvm, iter, sp, shared);1516if (ret)1517goto out;15181519/*1520* tdp_mmu_link_sp_atomic() will handle subtracting the huge page we1521* are overwriting from the page stats. But we have to manually update1522* the page stats with the new present child pages.1523*/1524kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);15251526out:1527trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);1528return ret;1529}15301531static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,1532struct kvm_mmu_page *root,1533gfn_t start, gfn_t end,1534int target_level, bool shared)1535{1536struct kvm_mmu_page *sp = NULL;1537struct tdp_iter iter;15381539rcu_read_lock();15401541/*1542* Traverse the page table splitting all huge pages above the target1543* level into one lower level. For example, if we encounter a 1GB page1544* we split it into 512 2MB pages.1545*1546* Since the TDP iterator uses a pre-order traversal, we are guaranteed1547* to visit an SPTE before ever visiting its children, which means we1548* will correctly recursively split huge pages that are more than one1549* level above the target level (e.g. splitting a 1GB to 512 2MB pages,1550* and then splitting each of those to 512 4KB pages).1551*/1552for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {1553retry:1554if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))1555continue;15561557if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))1558continue;15591560if (!sp) {1561rcu_read_unlock();15621563if (shared)1564read_unlock(&kvm->mmu_lock);1565else1566write_unlock(&kvm->mmu_lock);15671568sp = tdp_mmu_alloc_sp_for_split();15691570if (shared)1571read_lock(&kvm->mmu_lock);1572else1573write_lock(&kvm->mmu_lock);15741575if (!sp) {1576trace_kvm_mmu_split_huge_page(iter.gfn,1577iter.old_spte,1578iter.level, -ENOMEM);1579return -ENOMEM;1580}15811582rcu_read_lock();15831584iter.yielded = true;1585continue;1586}15871588tdp_mmu_init_child_sp(sp, &iter);15891590if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))1591goto retry;15921593sp = NULL;1594}15951596rcu_read_unlock();15971598/*1599* It's possible to exit the loop having never used the last sp if, for1600* example, a vCPU doing HugePage NX splitting wins the race and1601* installs its own sp in place of the last sp we tried to split.1602*/1603if (sp)1604tdp_mmu_free_sp(sp);16051606return 0;1607}160816091610/*1611* Try to split all huge pages mapped by the TDP MMU down to the target level.1612*/1613void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,1614const struct kvm_memory_slot *slot,1615gfn_t start, gfn_t end,1616int target_level, bool shared)1617{1618struct kvm_mmu_page *root;1619int r = 0;16201621kvm_lockdep_assert_mmu_lock_held(kvm, shared);1622for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {1623r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);1624if (r) {1625kvm_tdp_mmu_put_root(kvm, root);1626break;1627}1628}1629}16301631static bool tdp_mmu_need_write_protect(struct kvm *kvm, struct kvm_mmu_page *sp)1632{1633/*1634* All TDP MMU shadow pages share the same role as their root, aside1635* from level, so it is valid to key off any shadow page to determine if1636* write protection is needed for an entire tree.1637*/1638return kvm_mmu_page_ad_need_write_protect(kvm, sp) || !kvm_ad_enabled;1639}16401641static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,1642gfn_t start, gfn_t end)1643{1644const u64 dbit = tdp_mmu_need_write_protect(kvm, root) ?1645PT_WRITABLE_MASK : shadow_dirty_mask;1646struct tdp_iter iter;16471648rcu_read_lock();16491650tdp_root_for_each_pte(iter, kvm, root, start, end) {1651retry:1652if (!is_shadow_present_pte(iter.old_spte) ||1653!is_last_spte(iter.old_spte, iter.level))1654continue;16551656if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))1657continue;16581659KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&1660spte_ad_need_write_protect(iter.old_spte));16611662if (!(iter.old_spte & dbit))1663continue;16641665if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))1666goto retry;1667}16681669rcu_read_unlock();1670}16711672/*1673* Clear the dirty status (D-bit or W-bit) of all the SPTEs mapping GFNs in the1674* memslot.1675*/1676void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,1677const struct kvm_memory_slot *slot)1678{1679struct kvm_mmu_page *root;16801681lockdep_assert_held_read(&kvm->mmu_lock);1682for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1683clear_dirty_gfn_range(kvm, root, slot->base_gfn,1684slot->base_gfn + slot->npages);1685}16861687static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,1688gfn_t gfn, unsigned long mask, bool wrprot)1689{1690const u64 dbit = (wrprot || tdp_mmu_need_write_protect(kvm, root)) ?1691PT_WRITABLE_MASK : shadow_dirty_mask;1692struct tdp_iter iter;16931694lockdep_assert_held_write(&kvm->mmu_lock);16951696rcu_read_lock();16971698tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),1699gfn + BITS_PER_LONG) {1700if (!mask)1701break;17021703KVM_MMU_WARN_ON(dbit == shadow_dirty_mask &&1704spte_ad_need_write_protect(iter.old_spte));17051706if (iter.level > PG_LEVEL_4K ||1707!(mask & (1UL << (iter.gfn - gfn))))1708continue;17091710mask &= ~(1UL << (iter.gfn - gfn));17111712if (!(iter.old_spte & dbit))1713continue;17141715iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,1716iter.old_spte, dbit,1717iter.level);17181719trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,1720iter.old_spte,1721iter.old_spte & ~dbit);1722}17231724rcu_read_unlock();1725}17261727/*1728* Clear the dirty status (D-bit or W-bit) of all the 4k SPTEs mapping GFNs for1729* which a bit is set in mask, starting at gfn. The given memslot is expected to1730* contain all the GFNs represented by set bits in the mask.1731*/1732void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,1733struct kvm_memory_slot *slot,1734gfn_t gfn, unsigned long mask,1735bool wrprot)1736{1737struct kvm_mmu_page *root;17381739for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)1740clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);1741}17421743static int tdp_mmu_make_huge_spte(struct kvm *kvm,1744struct tdp_iter *parent,1745u64 *huge_spte)1746{1747struct kvm_mmu_page *root = spte_to_child_sp(parent->old_spte);1748gfn_t start = parent->gfn;1749gfn_t end = start + KVM_PAGES_PER_HPAGE(parent->level);1750struct tdp_iter iter;17511752tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {1753/*1754* Use the parent iterator when checking for forward progress so1755* that KVM doesn't get stuck continuously trying to yield (i.e.1756* returning -EAGAIN here and then failing the forward progress1757* check in the caller ad nauseam).1758*/1759if (tdp_mmu_iter_need_resched(kvm, parent))1760return -EAGAIN;17611762*huge_spte = make_huge_spte(kvm, iter.old_spte, parent->level);1763return 0;1764}17651766return -ENOENT;1767}17681769static void recover_huge_pages_range(struct kvm *kvm,1770struct kvm_mmu_page *root,1771const struct kvm_memory_slot *slot)1772{1773gfn_t start = slot->base_gfn;1774gfn_t end = start + slot->npages;1775struct tdp_iter iter;1776int max_mapping_level;1777bool flush = false;1778u64 huge_spte;1779int r;17801781if (WARN_ON_ONCE(kvm_slot_dirty_track_enabled(slot)))1782return;17831784rcu_read_lock();17851786for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {1787retry:1788if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {1789flush = false;1790continue;1791}17921793if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||1794!is_shadow_present_pte(iter.old_spte))1795continue;17961797/*1798* Don't zap leaf SPTEs, if a leaf SPTE could be replaced with1799* a large page size, then its parent would have been zapped1800* instead of stepping down.1801*/1802if (is_last_spte(iter.old_spte, iter.level))1803continue;18041805/*1806* If iter.gfn resides outside of the slot, i.e. the page for1807* the current level overlaps but is not contained by the slot,1808* then the SPTE can't be made huge. More importantly, trying1809* to query that info from slot->arch.lpage_info will cause an1810* out-of-bounds access.1811*/1812if (iter.gfn < start || iter.gfn >= end)1813continue;18141815max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn);1816if (max_mapping_level < iter.level)1817continue;18181819r = tdp_mmu_make_huge_spte(kvm, &iter, &huge_spte);1820if (r == -EAGAIN)1821goto retry;1822else if (r)1823continue;18241825if (tdp_mmu_set_spte_atomic(kvm, &iter, huge_spte))1826goto retry;18271828flush = true;1829}18301831if (flush)1832kvm_flush_remote_tlbs_memslot(kvm, slot);18331834rcu_read_unlock();1835}18361837/*1838* Recover huge page mappings within the slot by replacing non-leaf SPTEs with1839* huge SPTEs where possible.1840*/1841void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm,1842const struct kvm_memory_slot *slot)1843{1844struct kvm_mmu_page *root;18451846lockdep_assert_held_read(&kvm->mmu_lock);1847for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)1848recover_huge_pages_range(kvm, root, slot);1849}18501851/*1852* Removes write access on the last level SPTE mapping this GFN and unsets the1853* MMU-writable bit to ensure future writes continue to be intercepted.1854* Returns true if an SPTE was set and a TLB flush is needed.1855*/1856static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,1857gfn_t gfn, int min_level)1858{1859struct tdp_iter iter;1860u64 new_spte;1861bool spte_set = false;18621863BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);18641865rcu_read_lock();18661867for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {1868if (!is_shadow_present_pte(iter.old_spte) ||1869!is_last_spte(iter.old_spte, iter.level))1870continue;18711872new_spte = iter.old_spte &1873~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);18741875if (new_spte == iter.old_spte)1876break;18771878tdp_mmu_iter_set_spte(kvm, &iter, new_spte);1879spte_set = true;1880}18811882rcu_read_unlock();18831884return spte_set;1885}18861887/*1888* Removes write access on the last level SPTE mapping this GFN and unsets the1889* MMU-writable bit to ensure future writes continue to be intercepted.1890* Returns true if an SPTE was set and a TLB flush is needed.1891*/1892bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,1893struct kvm_memory_slot *slot, gfn_t gfn,1894int min_level)1895{1896struct kvm_mmu_page *root;1897bool spte_set = false;18981899lockdep_assert_held_write(&kvm->mmu_lock);1900for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)1901spte_set |= write_protect_gfn(kvm, root, gfn, min_level);19021903return spte_set;1904}19051906/*1907* Return the level of the lowest level SPTE added to sptes.1908* That SPTE may be non-present.1909*1910* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.1911*/1912static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,1913struct kvm_mmu_page *root)1914{1915struct tdp_iter iter;1916gfn_t gfn = addr >> PAGE_SHIFT;1917int leaf = -1;19181919for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {1920leaf = iter.level;1921sptes[leaf] = iter.old_spte;1922}19231924return leaf;1925}19261927int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,1928int *root_level)1929{1930struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);1931*root_level = vcpu->arch.mmu->root_role.level;19321933return __kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root);1934}19351936bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)1937{1938struct kvm *kvm = vcpu->kvm;1939bool is_direct = kvm_is_addr_direct(kvm, gpa);1940hpa_t root = is_direct ? vcpu->arch.mmu->root.hpa :1941vcpu->arch.mmu->mirror_root_hpa;1942u64 sptes[PT64_ROOT_MAX_LEVEL + 1], spte;1943int leaf;19441945lockdep_assert_held(&kvm->mmu_lock);1946rcu_read_lock();1947leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root));1948rcu_read_unlock();1949if (leaf < 0)1950return false;19511952spte = sptes[leaf];1953return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);1954}1955EXPORT_SYMBOL_GPL(kvm_tdp_mmu_gpa_is_mapped);19561957/*1958* Returns the last level spte pointer of the shadow page walk for the given1959* gpa, and sets *spte to the spte value. This spte may be non-preset. If no1960* walk could be performed, returns NULL and *spte does not contain valid data.1961*1962* Contract:1963* - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.1964* - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.1965*1966* WARNING: This function is only intended to be called during fast_page_fault.1967*/1968u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,1969u64 *spte)1970{1971/* Fast pf is not supported for mirrored roots */1972struct kvm_mmu_page *root = tdp_mmu_get_root(vcpu, KVM_DIRECT_ROOTS);1973struct tdp_iter iter;1974tdp_ptep_t sptep = NULL;19751976for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {1977*spte = iter.old_spte;1978sptep = iter.sptep;1979}19801981/*1982* Perform the rcu_dereference to get the raw spte pointer value since1983* we are passing it up to fast_page_fault, which is shared with the1984* legacy MMU and thus does not retain the TDP MMU-specific __rcu1985* annotation.1986*1987* This is safe since fast_page_fault obeys the contracts of this1988* function as well as all TDP MMU contracts around modifying SPTEs1989* outside of mmu_lock.1990*/1991return rcu_dereference(sptep);1992}199319941995