#include <linux/cc_platform.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
#include <linux/memblock.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
#include <linux/iommu.h>
#include <linux/amd-iommu.h>
#include <linux/nospec.h>
#include <asm/sev.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/svm.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/cpuid/api.h>
#include <asm/cmdline.h>
#include <asm/iommu.h>
#include <asm/msr.h>
struct rmpentry {
u64 gpa;
u8 assigned :1,
rsvd1 :7;
u8 pagesize :1,
hpage_region_status :1,
rsvd2 :6;
u8 immutable :1,
rsvd3 :7;
u8 rsvd4;
u32 asid;
} __packed;
struct rmpentry_raw {
union {
struct {
u64 assigned : 1,
pagesize : 1,
immutable : 1,
rsvd1 : 9,
gpa : 39,
asid : 10,
vmsa : 1,
validated : 1,
rsvd2 : 1;
};
u64 lo;
};
u64 hi;
} __packed;
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
#define RMPTABLE_NON_SEGMENTED_SHIFT 52
struct rmp_segment_desc {
struct rmpentry_raw *rmp_entry;
u64 max_index;
u64 size;
};
#define RST_ENTRY_MAPPED_SIZE(x) ((x) & GENMASK_ULL(19, 0))
#define RST_ENTRY_SEGMENT_BASE(x) ((x) & GENMASK_ULL(51, 20))
#define RST_SIZE SZ_4K
static struct rmp_segment_desc **rmp_segment_table __ro_after_init;
static unsigned int rst_max_index __ro_after_init = 512;
static unsigned int rmp_segment_shift;
static u64 rmp_segment_size;
static u64 rmp_segment_mask;
#define RST_ENTRY_INDEX(x) ((x) >> rmp_segment_shift)
#define RMP_ENTRY_INDEX(x) ((u64)(PHYS_PFN((x) & rmp_segment_mask)))
static u64 rmp_cfg;
#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
static u64 probed_rmp_base, probed_rmp_size;
static LIST_HEAD(snp_leaked_pages_list);
static DEFINE_SPINLOCK(snp_leaked_pages_list_lock);
static unsigned long snp_nr_leaked_pages;
#undef pr_fmt
#define pr_fmt(fmt) "SEV-SNP: " fmt
static int __mfd_enable(unsigned int cpu)
{
u64 val;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrq(MSR_AMD64_SYSCFG, val);
val |= MSR_AMD64_SYSCFG_MFDM;
wrmsrq(MSR_AMD64_SYSCFG, val);
return 0;
}
static __init void mfd_enable(void *arg)
{
__mfd_enable(smp_processor_id());
}
static int __snp_enable(unsigned int cpu)
{
u64 val;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0;
rdmsrq(MSR_AMD64_SYSCFG, val);
val |= MSR_AMD64_SYSCFG_SNP_EN;
val |= MSR_AMD64_SYSCFG_SNP_VMPL_EN;
wrmsrq(MSR_AMD64_SYSCFG, val);
return 0;
}
static __init void snp_enable(void *arg)
{
__snp_enable(smp_processor_id());
}
static void __init __snp_fixup_e820_tables(u64 pa)
{
if (IS_ALIGNED(pa, PMD_SIZE))
return;
pa = ALIGN_DOWN(pa, PMD_SIZE);
if (e820__mapped_any(pa, pa + PMD_SIZE, E820_TYPE_RAM)) {
pr_info("Reserving start/end of RMP table on a 2MB boundary [0x%016llx]\n", pa);
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
if (!memblock_is_region_reserved(pa, PMD_SIZE))
memblock_reserve(pa, PMD_SIZE);
}
}
static void __init fixup_e820_tables_for_segmented_rmp(void)
{
u64 pa, *rst, size, mapped_size;
unsigned int i;
__snp_fixup_e820_tables(probed_rmp_base);
pa = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
__snp_fixup_e820_tables(pa + RST_SIZE);
rst = early_memremap(pa, RST_SIZE);
if (!rst)
return;
for (i = 0; i < rst_max_index; i++) {
pa = RST_ENTRY_SEGMENT_BASE(rst[i]);
mapped_size = RST_ENTRY_MAPPED_SIZE(rst[i]);
if (!mapped_size)
continue;
__snp_fixup_e820_tables(pa);
mapped_size <<= 30;
if (mapped_size > rmp_segment_size)
mapped_size = rmp_segment_size;
size = PHYS_PFN(mapped_size) << 4;
__snp_fixup_e820_tables(pa + size);
}
early_memunmap(rst, RST_SIZE);
}
static void __init fixup_e820_tables_for_contiguous_rmp(void)
{
__snp_fixup_e820_tables(probed_rmp_base);
__snp_fixup_e820_tables(probed_rmp_base + probed_rmp_size);
}
void __init snp_fixup_e820_tables(void)
{
if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED) {
fixup_e820_tables_for_segmented_rmp();
} else {
fixup_e820_tables_for_contiguous_rmp();
}
}
static bool __init clear_rmptable_bookkeeping(void)
{
void *bk;
bk = memremap(probed_rmp_base, RMPTABLE_CPU_BOOKKEEPING_SZ, MEMREMAP_WB);
if (!bk) {
pr_err("Failed to map RMP bookkeeping area\n");
return false;
}
memset(bk, 0, RMPTABLE_CPU_BOOKKEEPING_SZ);
memunmap(bk);
return true;
}
static bool __init alloc_rmp_segment_desc(u64 segment_pa, u64 segment_size, u64 pa)
{
u64 rst_index, rmp_segment_size_max;
struct rmp_segment_desc *desc;
void *rmp_segment;
rmp_segment_size_max = PHYS_PFN(rmp_segment_size) << 4;
if (segment_size > rmp_segment_size_max) {
pr_err("Invalid RMP size 0x%llx for configured segment size 0x%llx\n",
segment_size, rmp_segment_size_max);
return false;
}
rst_index = RST_ENTRY_INDEX(pa);
if (rst_index >= rst_max_index) {
pr_err("Invalid RMP segment base address 0x%llx for configured segment size 0x%llx\n",
pa, rmp_segment_size);
return false;
}
if (rmp_segment_table[rst_index]) {
pr_err("RMP segment descriptor already exists at index %llu\n", rst_index);
return false;
}
rmp_segment = memremap(segment_pa, segment_size, MEMREMAP_WB);
if (!rmp_segment) {
pr_err("Failed to map RMP segment addr 0x%llx size 0x%llx\n",
segment_pa, segment_size);
return false;
}
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc) {
memunmap(rmp_segment);
return false;
}
desc->rmp_entry = rmp_segment;
desc->max_index = segment_size / sizeof(*desc->rmp_entry);
desc->size = segment_size;
rmp_segment_table[rst_index] = desc;
return true;
}
static void __init free_rmp_segment_table(void)
{
unsigned int i;
for (i = 0; i < rst_max_index; i++) {
struct rmp_segment_desc *desc;
desc = rmp_segment_table[i];
if (!desc)
continue;
memunmap(desc->rmp_entry);
kfree(desc);
}
free_page((unsigned long)rmp_segment_table);
rmp_segment_table = NULL;
}
static bool __init alloc_rmp_segment_table(void)
{
struct page *page;
page = alloc_page(__GFP_ZERO);
if (!page)
return false;
rmp_segment_table = page_address(page);
return true;
}
static bool __init setup_contiguous_rmptable(void)
{
u64 max_rmp_pfn, calc_rmp_sz, rmptable_segment, rmptable_size, rmp_end;
if (!probed_rmp_size)
return false;
rmp_end = probed_rmp_base + probed_rmp_size - 1;
max_rmp_pfn = max_pfn;
if (PFN_UP(rmp_end) > max_pfn)
max_rmp_pfn = PFN_UP(rmp_end);
calc_rmp_sz = (max_rmp_pfn << 4) + RMPTABLE_CPU_BOOKKEEPING_SZ;
if (calc_rmp_sz > probed_rmp_size) {
pr_err("Memory reserved for the RMP table does not cover full system RAM (expected 0x%llx got 0x%llx)\n",
calc_rmp_sz, probed_rmp_size);
return false;
}
if (!alloc_rmp_segment_table())
return false;
rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ;
if (!alloc_rmp_segment_desc(rmptable_segment, rmptable_size, 0)) {
free_rmp_segment_table();
return false;
}
return true;
}
static bool __init setup_segmented_rmptable(void)
{
u64 rst_pa, *rst, pa, ram_pa_end, ram_pa_max;
unsigned int i, max_index;
if (!probed_rmp_base)
return false;
if (!alloc_rmp_segment_table())
return false;
rst_pa = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ;
rst = memremap(rst_pa, RST_SIZE, MEMREMAP_WB);
if (!rst) {
pr_err("Failed to map RMP segment table addr 0x%llx\n", rst_pa);
goto e_free;
}
pr_info("Segmented RMP using %lluGB segments\n", rmp_segment_size >> 30);
ram_pa_max = max_pfn << PAGE_SHIFT;
max_index = 0;
ram_pa_end = 0;
for (i = 0; i < rst_max_index; i++) {
u64 rmp_segment, rmp_size, mapped_size;
mapped_size = RST_ENTRY_MAPPED_SIZE(rst[i]);
if (!mapped_size)
continue;
max_index = i;
mapped_size <<= 30;
if (mapped_size > rmp_segment_size) {
pr_info("RMP segment %u mapped size (0x%llx) reduced to 0x%llx\n",
i, mapped_size, rmp_segment_size);
mapped_size = rmp_segment_size;
}
rmp_segment = RST_ENTRY_SEGMENT_BASE(rst[i]);
rmp_size = PHYS_PFN(mapped_size) << 4;
pa = (u64)i << rmp_segment_shift;
if (pa < ram_pa_max)
ram_pa_end = pa + mapped_size;
if (!alloc_rmp_segment_desc(rmp_segment, rmp_size, pa))
goto e_unmap;
pr_info("RMP segment %u physical address [0x%llx - 0x%llx] covering [0x%llx - 0x%llx]\n",
i, rmp_segment, rmp_segment + rmp_size - 1, pa, pa + mapped_size - 1);
}
if (ram_pa_max > ram_pa_end) {
pr_err("Segmented RMP does not cover full system RAM (expected 0x%llx got 0x%llx)\n",
ram_pa_max, ram_pa_end);
goto e_unmap;
}
rst_max_index = max_index + 1;
memunmap(rst);
return true;
e_unmap:
memunmap(rst);
e_free:
free_rmp_segment_table();
return false;
}
static bool __init setup_rmptable(void)
{
if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED) {
return setup_segmented_rmptable();
} else {
return setup_contiguous_rmptable();
}
}
int __init snp_rmptable_init(void)
{
unsigned int i;
u64 val;
if (WARN_ON_ONCE(!cc_platform_has(CC_ATTR_HOST_SEV_SNP)))
return -ENOSYS;
if (WARN_ON_ONCE(!amd_iommu_snp_en))
return -ENOSYS;
if (!setup_rmptable())
return -ENOSYS;
rdmsrq(MSR_AMD64_SYSCFG, val);
if (val & MSR_AMD64_SYSCFG_SNP_EN)
goto skip_enable;
if (!clear_rmptable_bookkeeping()) {
free_rmp_segment_table();
return -ENOSYS;
}
for (i = 0; i < rst_max_index; i++) {
struct rmp_segment_desc *desc;
desc = rmp_segment_table[i];
if (!desc)
continue;
memset(desc->rmp_entry, 0, desc->size);
}
wbinvd_on_all_cpus();
on_each_cpu(mfd_enable, NULL, 1);
on_each_cpu(snp_enable, NULL, 1);
skip_enable:
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/rmptable_init:online", __snp_enable, NULL);
crash_kexec_post_notifiers = true;
return 0;
}
static void set_rmp_segment_info(unsigned int segment_shift)
{
rmp_segment_shift = segment_shift;
rmp_segment_size = 1ULL << rmp_segment_shift;
rmp_segment_mask = rmp_segment_size - 1;
}
#define RMP_ADDR_MASK GENMASK_ULL(51, 13)
static bool probe_contiguous_rmptable_info(void)
{
u64 rmp_sz, rmp_base, rmp_end;
rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
rdmsrq(MSR_AMD64_RMP_END, rmp_end);
if (!(rmp_base & RMP_ADDR_MASK) || !(rmp_end & RMP_ADDR_MASK)) {
pr_err("Memory for the RMP table has not been reserved by BIOS\n");
return false;
}
if (rmp_base > rmp_end) {
pr_err("RMP configuration not valid: base=%#llx, end=%#llx\n", rmp_base, rmp_end);
return false;
}
rmp_sz = rmp_end - rmp_base + 1;
rst_max_index = 1;
set_rmp_segment_info(RMPTABLE_NON_SEGMENTED_SHIFT);
probed_rmp_base = rmp_base;
probed_rmp_size = rmp_sz;
pr_info("RMP table physical range [0x%016llx - 0x%016llx]\n",
rmp_base, rmp_end);
return true;
}
static bool probe_segmented_rmptable_info(void)
{
unsigned int eax, ebx, segment_shift, segment_shift_min, segment_shift_max;
u64 rmp_base, rmp_end;
rdmsrq(MSR_AMD64_RMP_BASE, rmp_base);
if (!(rmp_base & RMP_ADDR_MASK)) {
pr_err("Memory for the RMP table has not been reserved by BIOS\n");
return false;
}
rdmsrq(MSR_AMD64_RMP_END, rmp_end);
WARN_ONCE(rmp_end & RMP_ADDR_MASK,
"Segmented RMP enabled but RMP_END MSR is non-zero\n");
eax = cpuid_eax(0x80000025);
segment_shift_min = eax & GENMASK(5, 0);
segment_shift_max = (eax & GENMASK(11, 6)) >> 6;
segment_shift = MSR_AMD64_RMP_SEGMENT_SHIFT(rmp_cfg);
if (segment_shift > segment_shift_max || segment_shift < segment_shift_min) {
pr_err("RMP segment size (%u) is not within advertised bounds (min=%u, max=%u)\n",
segment_shift, segment_shift_min, segment_shift_max);
return false;
}
ebx = cpuid_ebx(0x80000025);
if (ebx & BIT(10))
rst_max_index = ebx & GENMASK(9, 0);
set_rmp_segment_info(segment_shift);
probed_rmp_base = rmp_base;
probed_rmp_size = 0;
pr_info("Segmented RMP base table physical range [0x%016llx - 0x%016llx]\n",
rmp_base, rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ + RST_SIZE);
return true;
}
bool snp_probe_rmptable_info(void)
{
if (cpu_feature_enabled(X86_FEATURE_SEGMENTED_RMP))
rdmsrq(MSR_AMD64_RMP_CFG, rmp_cfg);
if (rmp_cfg & MSR_AMD64_SEG_RMP_ENABLED)
return probe_segmented_rmptable_info();
else
return probe_contiguous_rmptable_info();
}
static struct rmpentry_raw *get_raw_rmpentry(u64 pfn)
{
u64 paddr, rst_index, segment_index;
struct rmp_segment_desc *desc;
if (!rmp_segment_table)
return ERR_PTR(-ENODEV);
paddr = pfn << PAGE_SHIFT;
rst_index = RST_ENTRY_INDEX(paddr);
if (unlikely(rst_index >= rst_max_index))
return ERR_PTR(-EFAULT);
rst_index = array_index_nospec(rst_index, rst_max_index);
desc = rmp_segment_table[rst_index];
if (unlikely(!desc))
return ERR_PTR(-EFAULT);
segment_index = RMP_ENTRY_INDEX(paddr);
if (unlikely(segment_index >= desc->max_index))
return ERR_PTR(-EFAULT);
segment_index = array_index_nospec(segment_index, desc->max_index);
return desc->rmp_entry + segment_index;
}
static int get_rmpentry(u64 pfn, struct rmpentry *e)
{
struct rmpentry_raw *e_raw;
if (cpu_feature_enabled(X86_FEATURE_RMPREAD)) {
int ret;
asm volatile(".byte 0xf2, 0x0f, 0x01, 0xfd"
: "=a" (ret)
: "a" (pfn << PAGE_SHIFT), "c" (e)
: "memory", "cc");
return ret;
}
e_raw = get_raw_rmpentry(pfn);
if (IS_ERR(e_raw))
return PTR_ERR(e_raw);
memset(e, 0, sizeof(*e));
e->gpa = e_raw->gpa << PAGE_SHIFT;
e->asid = e_raw->asid;
e->assigned = e_raw->assigned;
e->pagesize = e_raw->pagesize;
e->immutable = e_raw->immutable;
return 0;
}
static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *e, int *level)
{
struct rmpentry e_large;
int ret;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
ret = get_rmpentry(pfn, e);
if (ret)
return ret;
ret = get_rmpentry(pfn & PFN_PMD_MASK, &e_large);
if (ret)
return ret;
*level = RMP_TO_PG_LEVEL(e_large.pagesize);
return 0;
}
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
{
struct rmpentry e;
int ret;
ret = __snp_lookup_rmpentry(pfn, &e, level);
if (ret)
return ret;
*assigned = !!e.assigned;
return 0;
}
EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
static void dump_rmpentry(u64 pfn)
{
struct rmpentry_raw *e_raw;
u64 pfn_i, pfn_end;
struct rmpentry e;
int level, ret;
ret = __snp_lookup_rmpentry(pfn, &e, &level);
if (ret) {
pr_err("Failed to read RMP entry for PFN 0x%llx, error %d\n",
pfn, ret);
return;
}
if (e.assigned) {
e_raw = get_raw_rmpentry(pfn);
if (IS_ERR(e_raw)) {
pr_err("Failed to read RMP contents for PFN 0x%llx, error %ld\n",
pfn, PTR_ERR(e_raw));
return;
}
pr_info("PFN 0x%llx, RMP entry: [0x%016llx - 0x%016llx]\n",
pfn, e_raw->lo, e_raw->hi);
return;
}
pfn_i = ALIGN_DOWN(pfn, PTRS_PER_PMD);
pfn_end = pfn_i + PTRS_PER_PMD;
pr_info("PFN 0x%llx unassigned, dumping non-zero entries in 2M PFN region: [0x%llx - 0x%llx]\n",
pfn, pfn_i, pfn_end);
while (pfn_i < pfn_end) {
e_raw = get_raw_rmpentry(pfn_i);
if (IS_ERR(e_raw)) {
pr_err("Error %ld reading RMP contents for PFN 0x%llx\n",
PTR_ERR(e_raw), pfn_i);
pfn_i++;
continue;
}
if (e_raw->lo || e_raw->hi)
pr_info("PFN: 0x%llx, [0x%016llx - 0x%016llx]\n", pfn_i, e_raw->lo, e_raw->hi);
pfn_i++;
}
}
void snp_dump_hva_rmpentry(unsigned long hva)
{
unsigned long paddr;
unsigned int level;
pgd_t *pgd;
pte_t *pte;
pgd = __va(read_cr3_pa());
pgd += pgd_index(hva);
pte = lookup_address_in_pgd(pgd, hva, &level);
if (!pte) {
pr_err("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva);
return;
}
paddr = PFN_PHYS(pte_pfn(*pte)) | (hva & ~page_level_mask(level));
dump_rmpentry(PHYS_PFN(paddr));
}
int psmash(u64 pfn)
{
unsigned long paddr = pfn << PAGE_SHIFT;
int ret;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
if (!pfn_valid(pfn))
return -EINVAL;
asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
: "=a" (ret)
: "a" (paddr)
: "memory", "cc");
return ret;
}
EXPORT_SYMBOL_GPL(psmash);
static int adjust_direct_map(u64 pfn, int rmp_level)
{
unsigned long vaddr;
unsigned int level;
int npages, ret;
pte_t *pte;
vaddr = (unsigned long)pfn_to_kaddr(pfn);
if (WARN_ON_ONCE(rmp_level > PG_LEVEL_2M))
return -EINVAL;
if (!pfn_valid(pfn))
return -EINVAL;
if (rmp_level == PG_LEVEL_2M &&
(!IS_ALIGNED(pfn, PTRS_PER_PMD) || !pfn_valid(pfn + PTRS_PER_PMD - 1)))
return -EINVAL;
if (rmp_level == PG_LEVEL_2M)
return 0;
pte = lookup_address(vaddr, &level);
if (!pte || pte_none(*pte))
return 0;
if (level == PG_LEVEL_4K)
return 0;
npages = page_level_size(rmp_level) / PAGE_SIZE;
ret = set_memory_4k(vaddr, npages);
if (ret)
pr_warn("Failed to split direct map for PFN 0x%llx, ret: %d\n",
pfn, ret);
return ret;
}
static int rmpupdate(u64 pfn, struct rmp_state *state)
{
unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize);
if (adjust_direct_map(pfn, level))
return -EFAULT;
do {
asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
: "=a" (ret)
: "a" (paddr), "c" ((unsigned long)state)
: "memory", "cc");
} while (ret == RMPUPDATE_FAIL_OVERLAP);
if (ret) {
pr_err("RMPUPDATE failed for PFN %llx, pg_level: %d, ret: %d\n",
pfn, level, ret);
dump_rmpentry(pfn);
dump_stack();
return -EFAULT;
}
return 0;
}
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable)
{
struct rmp_state state;
memset(&state, 0, sizeof(state));
state.assigned = 1;
state.asid = asid;
state.immutable = immutable;
state.gpa = gpa;
state.pagesize = PG_LEVEL_TO_RMP(level);
return rmpupdate(pfn, &state);
}
EXPORT_SYMBOL_GPL(rmp_make_private);
int rmp_make_shared(u64 pfn, enum pg_level level)
{
struct rmp_state state;
memset(&state, 0, sizeof(state));
state.pagesize = PG_LEVEL_TO_RMP(level);
return rmpupdate(pfn, &state);
}
EXPORT_SYMBOL_GPL(rmp_make_shared);
void snp_leak_pages(u64 pfn, unsigned int npages)
{
struct page *page = pfn_to_page(pfn);
pr_warn("Leaking PFN range 0x%llx-0x%llx\n", pfn, pfn + npages);
spin_lock(&snp_leaked_pages_list_lock);
while (npages--) {
if (likely(!PageCompound(page)) ||
(PageHead(page) && compound_nr(page) <= npages))
list_add_tail(&page->buddy_list, &snp_leaked_pages_list);
dump_rmpentry(pfn);
snp_nr_leaked_pages++;
pfn++;
page++;
}
spin_unlock(&snp_leaked_pages_list_lock);
}
EXPORT_SYMBOL_GPL(snp_leak_pages);
void kdump_sev_callback(void)
{
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
wbinvd();
}