Path: blob/master/tools/testing/selftests/kvm/lib/x86/sev.c
49657 views
// SPDX-License-Identifier: GPL-2.0-only1#include <stdint.h>2#include <stdbool.h>34#include "sev.h"56/*7* sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the8* -1 would then cause an underflow back to 2**64 - 1. This is expected and9* correct.10*11* If the last range in the sparsebit is [x, y] and we try to iterate,12* sparsebit_next_set() will return 0, and sparsebit_next_clear() will try13* and find the first range, but that's correct because the condition14* expression would cause us to quit the loop.15*/16static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region,17uint8_t page_type, bool private)18{19const struct sparsebit *protected_phy_pages = region->protected_phy_pages;20const vm_paddr_t gpa_base = region->region.guest_phys_addr;21const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;22sparsebit_idx_t i, j;2324if (!sparsebit_any_set(protected_phy_pages))25return;2627if (!is_sev_snp_vm(vm))28sev_register_encrypted_memory(vm, region);2930sparsebit_for_each_set_range(protected_phy_pages, i, j) {31const uint64_t size = (j - i + 1) * vm->page_size;32const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;3334if (private)35vm_mem_set_private(vm, gpa_base + offset, size);3637if (is_sev_snp_vm(vm))38snp_launch_update_data(vm, gpa_base + offset,39(uint64_t)addr_gpa2hva(vm, gpa_base + offset),40size, page_type);41else42sev_launch_update_data(vm, gpa_base + offset, size);4344}45}4647void sev_vm_init(struct kvm_vm *vm)48{49if (vm->type == KVM_X86_DEFAULT_VM) {50TEST_ASSERT_EQ(vm->arch.sev_fd, -1);51vm->arch.sev_fd = open_sev_dev_path_or_exit();52vm_sev_ioctl(vm, KVM_SEV_INIT, NULL);53} else {54struct kvm_sev_init init = { 0 };55TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_VM);56vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);57}58}5960void sev_es_vm_init(struct kvm_vm *vm)61{62if (vm->type == KVM_X86_DEFAULT_VM) {63TEST_ASSERT_EQ(vm->arch.sev_fd, -1);64vm->arch.sev_fd = open_sev_dev_path_or_exit();65vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL);66} else {67struct kvm_sev_init init = { 0 };68TEST_ASSERT_EQ(vm->type, KVM_X86_SEV_ES_VM);69vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);70}71}7273void snp_vm_init(struct kvm_vm *vm)74{75struct kvm_sev_init init = { 0 };7677TEST_ASSERT_EQ(vm->type, KVM_X86_SNP_VM);78vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);79}8081void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)82{83struct kvm_sev_launch_start launch_start = {84.policy = policy,85};86struct userspace_mem_region *region;87struct kvm_sev_guest_status status;88int ctr;8990vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);91vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);9293TEST_ASSERT_EQ(status.policy, policy);94TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);9596hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)97encrypt_region(vm, region, KVM_SEV_PAGE_TYPE_INVALID, false);9899if (policy & SEV_POLICY_ES)100vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);101102vm->arch.is_pt_protected = true;103}104105void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)106{107struct kvm_sev_launch_measure launch_measure;108struct kvm_sev_guest_status guest_status;109110launch_measure.len = 256;111launch_measure.uaddr = (__u64)measurement;112vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);113114vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);115TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);116}117118void sev_vm_launch_finish(struct kvm_vm *vm)119{120struct kvm_sev_guest_status status;121122vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);123TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||124status.state == SEV_GUEST_STATE_LAUNCH_SECRET,125"Unexpected guest state: %d", status.state);126127vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);128129vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);130TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);131}132133void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)134{135struct kvm_sev_snp_launch_start launch_start = {136.policy = policy,137};138139vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start);140}141142void snp_vm_launch_update(struct kvm_vm *vm)143{144struct userspace_mem_region *region;145int ctr;146147hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)148encrypt_region(vm, region, KVM_SEV_SNP_PAGE_TYPE_NORMAL, true);149150vm->arch.is_pt_protected = true;151}152153void snp_vm_launch_finish(struct kvm_vm *vm)154{155struct kvm_sev_snp_launch_finish launch_finish = { 0 };156157vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);158}159160struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,161struct kvm_vcpu **cpu)162{163struct vm_shape shape = {164.mode = VM_MODE_DEFAULT,165.type = type,166};167struct kvm_vm *vm;168struct kvm_vcpu *cpus[1];169170vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);171*cpu = cpus[0];172173return vm;174}175176void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)177{178if (is_sev_snp_vm(vm)) {179vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));180181snp_vm_launch_start(vm, policy);182183snp_vm_launch_update(vm);184185snp_vm_launch_finish(vm);186187return;188}189190sev_vm_launch(vm, policy);191192if (!measurement)193measurement = alloca(256);194195sev_vm_launch_measure(vm, measurement);196197sev_vm_launch_finish(vm);198}199200201