// SPDX-License-Identifier: GPL-2.0-only1/*2* Memory Encryption Support Common Code3*4* Copyright (C) 2016 Advanced Micro Devices, Inc.5*6* Author: Tom Lendacky <[email protected]>7*/89#include <linux/dma-direct.h>10#include <linux/dma-mapping.h>11#include <linux/swiotlb.h>12#include <linux/cc_platform.h>13#include <linux/mem_encrypt.h>14#include <linux/virtio_anchor.h>1516#include <asm/sev.h>1718/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */19bool force_dma_unencrypted(struct device *dev)20{21/*22* For SEV, all DMA must be to unencrypted addresses.23*/24if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))25return true;2627/*28* For SME, all DMA must be to unencrypted addresses if the29* device does not support DMA to addresses that include the30* encryption mask.31*/32if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {33u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));34u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,35dev->bus_dma_limit);3637if (dma_dev_mask <= dma_enc_mask)38return true;39}4041return false;42}4344static void print_mem_encrypt_feature_info(void)45{46pr_info("Memory Encryption Features active: ");4748switch (cc_vendor) {49case CC_VENDOR_INTEL:50pr_cont("Intel TDX\n");51break;52case CC_VENDOR_AMD:53pr_cont("AMD");5455/* Secure Memory Encryption */56if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {57/*58* SME is mutually exclusive with any of the SEV59* features below.60*/61pr_cont(" SME\n");62return;63}6465/* Secure Encrypted Virtualization */66if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))67pr_cont(" SEV");6869/* Encrypted Register State */70if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))71pr_cont(" SEV-ES");7273/* Secure Nested Paging */74if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))75pr_cont(" SEV-SNP");7677pr_cont("\n");7879sev_show_status();8081break;82default:83pr_cont("Unknown\n");84}85}8687/* Architecture __weak replacement functions */88void __init mem_encrypt_init(void)89{90if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))91return;9293/* Call into SWIOTLB to update the SWIOTLB DMA buffers */94swiotlb_update_mem_attributes();9596snp_secure_tsc_prepare();9798print_mem_encrypt_feature_info();99}100101void __init mem_encrypt_setup_arch(void)102{103phys_addr_t total_mem = memblock_phys_mem_size();104unsigned long size;105106/*107* Do RMP table fixups after the e820 tables have been setup by108* e820__memory_setup().109*/110if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))111snp_fixup_e820_tables();112113if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))114return;115116/*117* For SEV and TDX, all DMA has to occur via shared/unencrypted pages.118* Kernel uses SWIOTLB to make this happen without changing device119* drivers. However, depending on the workload being run, the120* default 64MB of SWIOTLB may not be enough and SWIOTLB may121* run out of buffers for DMA, resulting in I/O errors and/or122* performance degradation especially with high I/O workloads.123*124* Adjust the default size of SWIOTLB using a percentage of guest125* memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer126* memory is allocated from low memory, ensure that the adjusted size127* is within the limits of low available memory.128*129* The percentage of guest memory used here for SWIOTLB buffers130* is more of an approximation of the static adjustment which131* 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%132*/133size = total_mem * 6 / 100;134size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);135swiotlb_adjust_size(size);136137/* Set restricted memory access for virtio. */138virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);139}140141142