Path: blob/master/drivers/firmware/efi/unaccepted_memory.c
26444 views
// SPDX-License-Identifier: GPL-2.0-only12#include <linux/efi.h>3#include <linux/memblock.h>4#include <linux/spinlock.h>5#include <linux/crash_dump.h>6#include <linux/nmi.h>7#include <asm/unaccepted_memory.h>89/* Protects unaccepted memory bitmap and accepting_list */10static DEFINE_SPINLOCK(unaccepted_memory_lock);1112struct accept_range {13struct list_head list;14unsigned long start;15unsigned long end;16};1718static LIST_HEAD(accepting_list);1920/*21* accept_memory() -- Consult bitmap and accept the memory if needed.22*23* Only memory that is explicitly marked as unaccepted in the bitmap requires24* an action. All the remaining memory is implicitly accepted and doesn't need25* acceptance.26*27* No need to accept:28* - anything if the system has no unaccepted table;29* - memory that is below phys_base;30* - memory that is above the memory that addressable by the bitmap;31*/32void accept_memory(phys_addr_t start, unsigned long size)33{34struct efi_unaccepted_memory *unaccepted;35unsigned long range_start, range_end;36struct accept_range range, *entry;37phys_addr_t end = start + size;38unsigned long flags;39u64 unit_size;4041unaccepted = efi_get_unaccepted_table();42if (!unaccepted)43return;4445unit_size = unaccepted->unit_size;4647/*48* Only care for the part of the range that is represented49* in the bitmap.50*/51if (start < unaccepted->phys_base)52start = unaccepted->phys_base;53if (end < unaccepted->phys_base)54return;5556/* Translate to offsets from the beginning of the bitmap */57start -= unaccepted->phys_base;58end -= unaccepted->phys_base;5960/*61* load_unaligned_zeropad() can lead to unwanted loads across page62* boundaries. The unwanted loads are typically harmless. But, they63* might be made to totally unrelated or even unmapped memory.64* load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now65* #VE) to recover from these unwanted loads.66*67* But, this approach does not work for unaccepted memory. For TDX, a68* load from unaccepted memory will not lead to a recoverable exception69* within the guest. The guest will exit to the VMM where the only70* recourse is to terminate the guest.71*72* There are two parts to fix this issue and comprehensively avoid73* access to unaccepted memory. Together these ensure that an extra74* "guard" page is accepted in addition to the memory that needs to be75* used:76*77* 1. Implicitly extend the range_contains_unaccepted_memory(start, size)78* checks up to the next unit_size if 'start+size' is aligned on a79* unit_size boundary.80*81* 2. Implicitly extend accept_memory(start, size) to the next unit_size82* if 'size+end' is aligned on a unit_size boundary. (immediately83* following this comment)84*/85if (!(end % unit_size))86end += unit_size;8788/* Make sure not to overrun the bitmap */89if (end > unaccepted->size * unit_size * BITS_PER_BYTE)90end = unaccepted->size * unit_size * BITS_PER_BYTE;9192range.start = start / unit_size;93range.end = DIV_ROUND_UP(end, unit_size);94retry:95spin_lock_irqsave(&unaccepted_memory_lock, flags);9697/*98* Check if anybody works on accepting the same range of the memory.99*100* The check is done with unit_size granularity. It is crucial to catch101* all accept requests to the same unit_size block, even if they don't102* overlap on physical address level.103*/104list_for_each_entry(entry, &accepting_list, list) {105if (entry->end <= range.start)106continue;107if (entry->start >= range.end)108continue;109110/*111* Somebody else accepting the range. Or at least part of it.112*113* Drop the lock and retry until it is complete.114*/115spin_unlock_irqrestore(&unaccepted_memory_lock, flags);116goto retry;117}118119/*120* Register that the range is about to be accepted.121* Make sure nobody else will accept it.122*/123list_add(&range.list, &accepting_list);124125range_start = range.start;126for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,127range.end) {128unsigned long phys_start, phys_end;129unsigned long len = range_end - range_start;130131phys_start = range_start * unit_size + unaccepted->phys_base;132phys_end = range_end * unit_size + unaccepted->phys_base;133134/*135* Keep interrupts disabled until the accept operation is136* complete in order to prevent deadlocks.137*138* Enabling interrupts before calling arch_accept_memory()139* creates an opportunity for an interrupt handler to request140* acceptance for the same memory. The handler will continuously141* spin with interrupts disabled, preventing other task from142* making progress with the acceptance process.143*/144spin_unlock(&unaccepted_memory_lock);145146arch_accept_memory(phys_start, phys_end);147148spin_lock(&unaccepted_memory_lock);149bitmap_clear(unaccepted->bitmap, range_start, len);150}151152list_del(&range.list);153154touch_softlockup_watchdog();155156spin_unlock_irqrestore(&unaccepted_memory_lock, flags);157}158159bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)160{161struct efi_unaccepted_memory *unaccepted;162phys_addr_t end = start + size;163unsigned long flags;164bool ret = false;165u64 unit_size;166167unaccepted = efi_get_unaccepted_table();168if (!unaccepted)169return false;170171unit_size = unaccepted->unit_size;172173/*174* Only care for the part of the range that is represented175* in the bitmap.176*/177if (start < unaccepted->phys_base)178start = unaccepted->phys_base;179if (end < unaccepted->phys_base)180return false;181182/* Translate to offsets from the beginning of the bitmap */183start -= unaccepted->phys_base;184end -= unaccepted->phys_base;185186/*187* Also consider the unaccepted state of the *next* page. See fix #1 in188* the comment on load_unaligned_zeropad() in accept_memory().189*/190if (!(end % unit_size))191end += unit_size;192193/* Make sure not to overrun the bitmap */194if (end > unaccepted->size * unit_size * BITS_PER_BYTE)195end = unaccepted->size * unit_size * BITS_PER_BYTE;196197spin_lock_irqsave(&unaccepted_memory_lock, flags);198while (start < end) {199if (test_bit(start / unit_size, unaccepted->bitmap)) {200ret = true;201break;202}203204start += unit_size;205}206spin_unlock_irqrestore(&unaccepted_memory_lock, flags);207208return ret;209}210211#ifdef CONFIG_PROC_VMCORE212static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,213unsigned long pfn)214{215return !pfn_is_unaccepted_memory(pfn);216}217218static struct vmcore_cb vmcore_cb = {219.pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,220};221222static int __init unaccepted_memory_init_kdump(void)223{224register_vmcore_cb(&vmcore_cb);225return 0;226}227core_initcall(unaccepted_memory_init_kdump);228#endif /* CONFIG_PROC_VMCORE */229230231