Path: blob/master/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
38237 views
// SPDX-License-Identifier: GPL-2.01/*2* Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests3*4* Copyright (C) 2022, Red Hat, Inc.5*6*/7#include <asm/barrier.h>8#include <pthread.h>9#include <inttypes.h>1011#include "kvm_util.h"12#include "processor.h"13#include "hyperv.h"14#include "test_util.h"15#include "vmx.h"1617#define WORKER_VCPU_ID_1 218#define WORKER_VCPU_ID_2 651920#define NTRY 10021#define NTEST_PAGES 22223struct hv_vpset {24u64 format;25u64 valid_bank_mask;26u64 bank_contents[];27};2829enum HV_GENERIC_SET_FORMAT {30HV_GENERIC_SET_SPARSE_4K,31HV_GENERIC_SET_ALL,32};3334#define HV_FLUSH_ALL_PROCESSORS BIT(0)35#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)36#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)37#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)3839/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */40struct hv_tlb_flush {41u64 address_space;42u64 flags;43u64 processor_mask;44u64 gva_list[];45} __packed;4647/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */48struct hv_tlb_flush_ex {49u64 address_space;50u64 flags;51struct hv_vpset hv_vp_set;52u64 gva_list[];53} __packed;5455/*56* Pass the following info to 'workers' and 'sender'57* - Hypercall page's GVA58* - Hypercall page's GPA59* - Test pages GVA60* - GVAs of the test pages' PTEs61*/62struct test_data {63vm_vaddr_t hcall_gva;64vm_paddr_t hcall_gpa;65vm_vaddr_t test_pages;66vm_vaddr_t test_pages_pte[NTEST_PAGES];67};6869/* 'Worker' vCPU code checking the contents of the test page */70static void worker_guest_code(vm_vaddr_t test_data)71{72struct test_data *data = (struct test_data *)test_data;73u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);74void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;75u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64));76u64 expected, val;7778x2apic_enable();79wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);8081for (;;) {82cpu_relax();8384expected = READ_ONCE(*this_cpu);8586/*87* Make sure the value in the test page is read after reading88* the expectation for the first time. Pairs with wmb() in89* prepare_to_test().90*/91rmb();9293val = READ_ONCE(*(u64 *)data->test_pages);9495/*96* Make sure the value in the test page is read after before97* reading the expectation for the second time. Pairs with wmb()98* post_test().99*/100rmb();101102/*103* '0' indicates the sender is between iterations, wait until104* the sender is ready for this vCPU to start checking again.105*/106if (!expected)107continue;108109/*110* Re-read the per-vCPU byte to ensure the sender didn't move111* onto a new iteration.112*/113if (expected != READ_ONCE(*this_cpu))114continue;115116GUEST_ASSERT(val == expected);117}118}119120/*121* Write per-CPU info indicating what each 'worker' CPU is supposed to see in122* test page. '0' means don't check.123*/124static void set_expected_val(void *addr, u64 val, int vcpu_id)125{126void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;127128*(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val;129}130131/*132* Update PTEs swapping two test pages.133* TODO: use swap()/xchg() when these are provided.134*/135static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)136{137uint64_t tmp = *(uint64_t *)pte_gva1;138139*(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;140*(uint64_t *)pte_gva2 = tmp;141}142143/*144* TODO: replace the silly NOP loop with a proper udelay() implementation.145*/146static inline void do_delay(void)147{148int i;149150for (i = 0; i < 1000000; i++)151asm volatile("nop");152}153154/*155* Prepare to test: 'disable' workers by setting the expectation to '0',156* clear hypercall input page and then swap two test pages.157*/158static inline void prepare_to_test(struct test_data *data)159{160/* Clear hypercall input page */161memset((void *)data->hcall_gva, 0, PAGE_SIZE);162163/* 'Disable' workers */164set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1);165set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2);166167/* Make sure workers are 'disabled' before we swap PTEs. */168wmb();169170/* Make sure workers have enough time to notice */171do_delay();172173/* Swap test page mappings */174swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]);175}176177/*178* Finalize the test: check hypercall resule set the expected val for179* 'worker' CPUs and give them some time to test.180*/181static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)182{183/* Make sure we change the expectation after swapping PTEs */184wmb();185186/* Set the expectation for workers, '0' means don't test */187set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1);188set_expected_val((void *)data->test_pages, exp2, WORKER_VCPU_ID_2);189190/* Make sure workers have enough time to test */191do_delay();192}193194#define TESTVAL1 0x0101010101010101195#define TESTVAL2 0x0202020202020202196197/* Main vCPU doing the test */198static void sender_guest_code(vm_vaddr_t test_data)199{200struct test_data *data = (struct test_data *)test_data;201struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;202struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;203vm_paddr_t hcall_gpa = data->hcall_gpa;204int i, stage = 1;205206wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);207wrmsr(HV_X64_MSR_HYPERCALL, data->hcall_gpa);208209/* "Slow" hypercalls */210211GUEST_SYNC(stage++);212213/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */214for (i = 0; i < NTRY; i++) {215prepare_to_test(data);216flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;217flush->processor_mask = BIT(WORKER_VCPU_ID_1);218hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,219hcall_gpa + PAGE_SIZE);220post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);221}222223GUEST_SYNC(stage++);224225/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */226for (i = 0; i < NTRY; i++) {227prepare_to_test(data);228flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;229flush->processor_mask = BIT(WORKER_VCPU_ID_1);230flush->gva_list[0] = (u64)data->test_pages;231hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |232(1UL << HV_HYPERCALL_REP_COMP_OFFSET),233hcall_gpa, hcall_gpa + PAGE_SIZE);234post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);235}236237GUEST_SYNC(stage++);238239/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */240for (i = 0; i < NTRY; i++) {241prepare_to_test(data);242flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |243HV_FLUSH_ALL_PROCESSORS;244flush->processor_mask = 0;245hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,246hcall_gpa + PAGE_SIZE);247post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);248}249250GUEST_SYNC(stage++);251252/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */253for (i = 0; i < NTRY; i++) {254prepare_to_test(data);255flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |256HV_FLUSH_ALL_PROCESSORS;257flush->gva_list[0] = (u64)data->test_pages;258hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |259(1UL << HV_HYPERCALL_REP_COMP_OFFSET),260hcall_gpa, hcall_gpa + PAGE_SIZE);261post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,262i % 2 ? TESTVAL1 : TESTVAL2);263}264265GUEST_SYNC(stage++);266267/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */268for (i = 0; i < NTRY; i++) {269prepare_to_test(data);270flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;271flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;272flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);273flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);274hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |275(1 << HV_HYPERCALL_VARHEAD_OFFSET),276hcall_gpa, hcall_gpa + PAGE_SIZE);277post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);278}279280GUEST_SYNC(stage++);281282/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */283for (i = 0; i < NTRY; i++) {284prepare_to_test(data);285flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;286flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;287flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);288flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);289/* bank_contents and gva_list occupy the same space, thus [1] */290flush_ex->gva_list[1] = (u64)data->test_pages;291hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |292(1 << HV_HYPERCALL_VARHEAD_OFFSET) |293(1UL << HV_HYPERCALL_REP_COMP_OFFSET),294hcall_gpa, hcall_gpa + PAGE_SIZE);295post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);296}297298GUEST_SYNC(stage++);299300/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */301for (i = 0; i < NTRY; i++) {302prepare_to_test(data);303flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;304flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;305flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |306BIT_ULL(WORKER_VCPU_ID_1 / 64);307flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);308flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);309hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |310(2 << HV_HYPERCALL_VARHEAD_OFFSET),311hcall_gpa, hcall_gpa + PAGE_SIZE);312post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,313i % 2 ? TESTVAL1 : TESTVAL2);314}315316GUEST_SYNC(stage++);317318/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */319for (i = 0; i < NTRY; i++) {320prepare_to_test(data);321flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;322flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;323flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |324BIT_ULL(WORKER_VCPU_ID_2 / 64);325flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);326flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);327/* bank_contents and gva_list occupy the same space, thus [2] */328flush_ex->gva_list[2] = (u64)data->test_pages;329hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |330(2 << HV_HYPERCALL_VARHEAD_OFFSET) |331(1UL << HV_HYPERCALL_REP_COMP_OFFSET),332hcall_gpa, hcall_gpa + PAGE_SIZE);333post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,334i % 2 ? TESTVAL1 : TESTVAL2);335}336337GUEST_SYNC(stage++);338339/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */340for (i = 0; i < NTRY; i++) {341prepare_to_test(data);342flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;343flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;344hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,345hcall_gpa, hcall_gpa + PAGE_SIZE);346post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,347i % 2 ? TESTVAL1 : TESTVAL2);348}349350GUEST_SYNC(stage++);351352/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */353for (i = 0; i < NTRY; i++) {354prepare_to_test(data);355flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;356flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;357flush_ex->gva_list[0] = (u64)data->test_pages;358hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |359(1UL << HV_HYPERCALL_REP_COMP_OFFSET),360hcall_gpa, hcall_gpa + PAGE_SIZE);361post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,362i % 2 ? TESTVAL1 : TESTVAL2);363}364365/* "Fast" hypercalls */366367GUEST_SYNC(stage++);368369/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */370for (i = 0; i < NTRY; i++) {371prepare_to_test(data);372flush->processor_mask = BIT(WORKER_VCPU_ID_1);373hyperv_write_xmm_input(&flush->processor_mask, 1);374hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |375HV_HYPERCALL_FAST_BIT, 0x0,376HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);377post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);378}379380GUEST_SYNC(stage++);381382/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */383for (i = 0; i < NTRY; i++) {384prepare_to_test(data);385flush->processor_mask = BIT(WORKER_VCPU_ID_1);386flush->gva_list[0] = (u64)data->test_pages;387hyperv_write_xmm_input(&flush->processor_mask, 1);388hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |389HV_HYPERCALL_FAST_BIT |390(1UL << HV_HYPERCALL_REP_COMP_OFFSET),3910x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);392post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);393}394395GUEST_SYNC(stage++);396397/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */398for (i = 0; i < NTRY; i++) {399prepare_to_test(data);400hyperv_write_xmm_input(&flush->processor_mask, 1);401hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |402HV_HYPERCALL_FAST_BIT, 0x0,403HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |404HV_FLUSH_ALL_PROCESSORS);405post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,406i % 2 ? TESTVAL1 : TESTVAL2);407}408409GUEST_SYNC(stage++);410411/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */412for (i = 0; i < NTRY; i++) {413prepare_to_test(data);414flush->gva_list[0] = (u64)data->test_pages;415hyperv_write_xmm_input(&flush->processor_mask, 1);416hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |417HV_HYPERCALL_FAST_BIT |418(1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,419HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |420HV_FLUSH_ALL_PROCESSORS);421post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,422i % 2 ? TESTVAL1 : TESTVAL2);423}424425GUEST_SYNC(stage++);426427/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */428for (i = 0; i < NTRY; i++) {429prepare_to_test(data);430flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;431flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);432flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);433hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);434hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |435HV_HYPERCALL_FAST_BIT |436(1 << HV_HYPERCALL_VARHEAD_OFFSET),4370x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);438post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);439}440441GUEST_SYNC(stage++);442443/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */444for (i = 0; i < NTRY; i++) {445prepare_to_test(data);446flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;447flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);448flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);449/* bank_contents and gva_list occupy the same space, thus [1] */450flush_ex->gva_list[1] = (u64)data->test_pages;451hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);452hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |453HV_HYPERCALL_FAST_BIT |454(1 << HV_HYPERCALL_VARHEAD_OFFSET) |455(1UL << HV_HYPERCALL_REP_COMP_OFFSET),4560x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);457post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);458}459460GUEST_SYNC(stage++);461462/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */463for (i = 0; i < NTRY; i++) {464prepare_to_test(data);465flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;466flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |467BIT_ULL(WORKER_VCPU_ID_1 / 64);468flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);469flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);470hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);471hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |472HV_HYPERCALL_FAST_BIT |473(2 << HV_HYPERCALL_VARHEAD_OFFSET),4740x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);475post_test(data, i % 2 ? TESTVAL1 :476TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);477}478479GUEST_SYNC(stage++);480481/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */482for (i = 0; i < NTRY; i++) {483prepare_to_test(data);484flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;485flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |486BIT_ULL(WORKER_VCPU_ID_2 / 64);487flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);488flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);489/* bank_contents and gva_list occupy the same space, thus [2] */490flush_ex->gva_list[2] = (u64)data->test_pages;491hyperv_write_xmm_input(&flush_ex->hv_vp_set, 3);492hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |493HV_HYPERCALL_FAST_BIT |494(2 << HV_HYPERCALL_VARHEAD_OFFSET) |495(1UL << HV_HYPERCALL_REP_COMP_OFFSET),4960x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);497post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,498i % 2 ? TESTVAL1 : TESTVAL2);499}500501GUEST_SYNC(stage++);502503/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */504for (i = 0; i < NTRY; i++) {505prepare_to_test(data);506flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;507flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;508hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);509hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |510HV_HYPERCALL_FAST_BIT,5110x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);512post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,513i % 2 ? TESTVAL1 : TESTVAL2);514}515516GUEST_SYNC(stage++);517518/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */519for (i = 0; i < NTRY; i++) {520prepare_to_test(data);521flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;522flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;523flush_ex->gva_list[0] = (u64)data->test_pages;524hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);525hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |526HV_HYPERCALL_FAST_BIT |527(1UL << HV_HYPERCALL_REP_COMP_OFFSET),5280x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);529post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,530i % 2 ? TESTVAL1 : TESTVAL2);531}532533GUEST_DONE();534}535536static void *vcpu_thread(void *arg)537{538struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;539struct ucall uc;540int old;541int r;542543r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);544TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",545vcpu->id, r);546547vcpu_run(vcpu);548TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);549550switch (get_ucall(vcpu, &uc)) {551case UCALL_ABORT:552REPORT_GUEST_ASSERT(uc);553/* NOT REACHED */554default:555TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc.cmd, vcpu->id);556}557558return NULL;559}560561static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)562{563void *retval;564int r;565566r = pthread_cancel(thread);567TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",568vcpu->id, r);569570r = pthread_join(thread, &retval);571TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",572vcpu->id, r);573TEST_ASSERT(retval == PTHREAD_CANCELED,574"expected retval=%p, got %p", PTHREAD_CANCELED,575retval);576}577578int main(int argc, char *argv[])579{580struct kvm_vm *vm;581struct kvm_vcpu *vcpu[3];582pthread_t threads[2];583vm_vaddr_t test_data_page, gva;584vm_paddr_t gpa;585uint64_t *pte;586struct test_data *data;587struct ucall uc;588int stage = 1, r, i;589590TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TLBFLUSH));591592vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);593594/* Test data page */595test_data_page = vm_vaddr_alloc_page(vm);596data = (struct test_data *)addr_gva2hva(vm, test_data_page);597598/* Hypercall input/output */599data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);600data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);601memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);602603/*604* Test pages: the first one is filled with '0x01's, the second with '0x02's605* and the test will swap their mappings. The third page keeps the indication606* about the current state of mappings.607*/608data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);609for (i = 0; i < NTEST_PAGES; i++)610memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),611(u8)(i + 1), PAGE_SIZE);612set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_1);613set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_2);614615/*616* Get PTE pointers for test pages and map them inside the guest.617* Use separate page for each PTE for simplicity.618*/619gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);620for (i = 0; i < NTEST_PAGES; i++) {621pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);622gpa = addr_hva2gpa(vm, pte);623virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK);624data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);625}626627/*628* Sender vCPU which performs the test: swaps test pages, sets expectation629* for 'workers' and issues TLB flush hypercalls.630*/631vcpu_args_set(vcpu[0], 1, test_data_page);632vcpu_set_hv_cpuid(vcpu[0]);633634/* Create worker vCPUs which check the contents of the test pages */635vcpu[1] = vm_vcpu_add(vm, WORKER_VCPU_ID_1, worker_guest_code);636vcpu_args_set(vcpu[1], 1, test_data_page);637vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);638vcpu_set_hv_cpuid(vcpu[1]);639640vcpu[2] = vm_vcpu_add(vm, WORKER_VCPU_ID_2, worker_guest_code);641vcpu_args_set(vcpu[2], 1, test_data_page);642vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);643vcpu_set_hv_cpuid(vcpu[2]);644645r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);646TEST_ASSERT(!r, "pthread_create() failed");647648r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);649TEST_ASSERT(!r, "pthread_create() failed");650651while (true) {652vcpu_run(vcpu[0]);653TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);654655switch (get_ucall(vcpu[0], &uc)) {656case UCALL_SYNC:657TEST_ASSERT(uc.args[1] == stage,658"Unexpected stage: %ld (%d expected)",659uc.args[1], stage);660break;661case UCALL_ABORT:662REPORT_GUEST_ASSERT(uc);663/* NOT REACHED */664case UCALL_DONE:665goto done;666default:667TEST_FAIL("Unknown ucall %lu", uc.cmd);668}669670stage++;671}672673done:674cancel_join_vcpu_thread(threads[0], vcpu[1]);675cancel_join_vcpu_thread(threads[1], vcpu[2]);676kvm_vm_free(vm);677678return 0;679}680681682