Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/x86/hyperv_tlb_flush.c
38237 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
4
*
5
* Copyright (C) 2022, Red Hat, Inc.
6
*
7
*/
8
#include <asm/barrier.h>
9
#include <pthread.h>
10
#include <inttypes.h>
11
12
#include "kvm_util.h"
13
#include "processor.h"
14
#include "hyperv.h"
15
#include "test_util.h"
16
#include "vmx.h"
17
18
#define WORKER_VCPU_ID_1 2
19
#define WORKER_VCPU_ID_2 65
20
21
#define NTRY 100
22
#define NTEST_PAGES 2
23
24
struct hv_vpset {
25
u64 format;
26
u64 valid_bank_mask;
27
u64 bank_contents[];
28
};
29
30
enum HV_GENERIC_SET_FORMAT {
31
HV_GENERIC_SET_SPARSE_4K,
32
HV_GENERIC_SET_ALL,
33
};
34
35
#define HV_FLUSH_ALL_PROCESSORS BIT(0)
36
#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
37
#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
38
#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
39
40
/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
41
struct hv_tlb_flush {
42
u64 address_space;
43
u64 flags;
44
u64 processor_mask;
45
u64 gva_list[];
46
} __packed;
47
48
/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
49
struct hv_tlb_flush_ex {
50
u64 address_space;
51
u64 flags;
52
struct hv_vpset hv_vp_set;
53
u64 gva_list[];
54
} __packed;
55
56
/*
57
* Pass the following info to 'workers' and 'sender'
58
* - Hypercall page's GVA
59
* - Hypercall page's GPA
60
* - Test pages GVA
61
* - GVAs of the test pages' PTEs
62
*/
63
struct test_data {
64
vm_vaddr_t hcall_gva;
65
vm_paddr_t hcall_gpa;
66
vm_vaddr_t test_pages;
67
vm_vaddr_t test_pages_pte[NTEST_PAGES];
68
};
69
70
/* 'Worker' vCPU code checking the contents of the test page */
71
static void worker_guest_code(vm_vaddr_t test_data)
72
{
73
struct test_data *data = (struct test_data *)test_data;
74
u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
75
void *exp_page = (void *)data->test_pages + PAGE_SIZE * NTEST_PAGES;
76
u64 *this_cpu = (u64 *)(exp_page + vcpu_id * sizeof(u64));
77
u64 expected, val;
78
79
x2apic_enable();
80
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
81
82
for (;;) {
83
cpu_relax();
84
85
expected = READ_ONCE(*this_cpu);
86
87
/*
88
* Make sure the value in the test page is read after reading
89
* the expectation for the first time. Pairs with wmb() in
90
* prepare_to_test().
91
*/
92
rmb();
93
94
val = READ_ONCE(*(u64 *)data->test_pages);
95
96
/*
97
* Make sure the value in the test page is read after before
98
* reading the expectation for the second time. Pairs with wmb()
99
* post_test().
100
*/
101
rmb();
102
103
/*
104
* '0' indicates the sender is between iterations, wait until
105
* the sender is ready for this vCPU to start checking again.
106
*/
107
if (!expected)
108
continue;
109
110
/*
111
* Re-read the per-vCPU byte to ensure the sender didn't move
112
* onto a new iteration.
113
*/
114
if (expected != READ_ONCE(*this_cpu))
115
continue;
116
117
GUEST_ASSERT(val == expected);
118
}
119
}
120
121
/*
122
* Write per-CPU info indicating what each 'worker' CPU is supposed to see in
123
* test page. '0' means don't check.
124
*/
125
static void set_expected_val(void *addr, u64 val, int vcpu_id)
126
{
127
void *exp_page = addr + PAGE_SIZE * NTEST_PAGES;
128
129
*(u64 *)(exp_page + vcpu_id * sizeof(u64)) = val;
130
}
131
132
/*
133
* Update PTEs swapping two test pages.
134
* TODO: use swap()/xchg() when these are provided.
135
*/
136
static void swap_two_test_pages(vm_paddr_t pte_gva1, vm_paddr_t pte_gva2)
137
{
138
uint64_t tmp = *(uint64_t *)pte_gva1;
139
140
*(uint64_t *)pte_gva1 = *(uint64_t *)pte_gva2;
141
*(uint64_t *)pte_gva2 = tmp;
142
}
143
144
/*
145
* TODO: replace the silly NOP loop with a proper udelay() implementation.
146
*/
147
static inline void do_delay(void)
148
{
149
int i;
150
151
for (i = 0; i < 1000000; i++)
152
asm volatile("nop");
153
}
154
155
/*
156
* Prepare to test: 'disable' workers by setting the expectation to '0',
157
* clear hypercall input page and then swap two test pages.
158
*/
159
static inline void prepare_to_test(struct test_data *data)
160
{
161
/* Clear hypercall input page */
162
memset((void *)data->hcall_gva, 0, PAGE_SIZE);
163
164
/* 'Disable' workers */
165
set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_1);
166
set_expected_val((void *)data->test_pages, 0x0, WORKER_VCPU_ID_2);
167
168
/* Make sure workers are 'disabled' before we swap PTEs. */
169
wmb();
170
171
/* Make sure workers have enough time to notice */
172
do_delay();
173
174
/* Swap test page mappings */
175
swap_two_test_pages(data->test_pages_pte[0], data->test_pages_pte[1]);
176
}
177
178
/*
179
* Finalize the test: check hypercall resule set the expected val for
180
* 'worker' CPUs and give them some time to test.
181
*/
182
static inline void post_test(struct test_data *data, u64 exp1, u64 exp2)
183
{
184
/* Make sure we change the expectation after swapping PTEs */
185
wmb();
186
187
/* Set the expectation for workers, '0' means don't test */
188
set_expected_val((void *)data->test_pages, exp1, WORKER_VCPU_ID_1);
189
set_expected_val((void *)data->test_pages, exp2, WORKER_VCPU_ID_2);
190
191
/* Make sure workers have enough time to test */
192
do_delay();
193
}
194
195
#define TESTVAL1 0x0101010101010101
196
#define TESTVAL2 0x0202020202020202
197
198
/* Main vCPU doing the test */
199
static void sender_guest_code(vm_vaddr_t test_data)
200
{
201
struct test_data *data = (struct test_data *)test_data;
202
struct hv_tlb_flush *flush = (struct hv_tlb_flush *)data->hcall_gva;
203
struct hv_tlb_flush_ex *flush_ex = (struct hv_tlb_flush_ex *)data->hcall_gva;
204
vm_paddr_t hcall_gpa = data->hcall_gpa;
205
int i, stage = 1;
206
207
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
208
wrmsr(HV_X64_MSR_HYPERCALL, data->hcall_gpa);
209
210
/* "Slow" hypercalls */
211
212
GUEST_SYNC(stage++);
213
214
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
215
for (i = 0; i < NTRY; i++) {
216
prepare_to_test(data);
217
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
218
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
219
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
220
hcall_gpa + PAGE_SIZE);
221
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
222
}
223
224
GUEST_SYNC(stage++);
225
226
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
227
for (i = 0; i < NTRY; i++) {
228
prepare_to_test(data);
229
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
230
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
231
flush->gva_list[0] = (u64)data->test_pages;
232
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
233
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
234
hcall_gpa, hcall_gpa + PAGE_SIZE);
235
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
236
}
237
238
GUEST_SYNC(stage++);
239
240
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
241
for (i = 0; i < NTRY; i++) {
242
prepare_to_test(data);
243
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
244
HV_FLUSH_ALL_PROCESSORS;
245
flush->processor_mask = 0;
246
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE, hcall_gpa,
247
hcall_gpa + PAGE_SIZE);
248
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
249
}
250
251
GUEST_SYNC(stage++);
252
253
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
254
for (i = 0; i < NTRY; i++) {
255
prepare_to_test(data);
256
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
257
HV_FLUSH_ALL_PROCESSORS;
258
flush->gva_list[0] = (u64)data->test_pages;
259
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
260
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
261
hcall_gpa, hcall_gpa + PAGE_SIZE);
262
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
263
i % 2 ? TESTVAL1 : TESTVAL2);
264
}
265
266
GUEST_SYNC(stage++);
267
268
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
269
for (i = 0; i < NTRY; i++) {
270
prepare_to_test(data);
271
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
272
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
273
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
274
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
275
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
276
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
277
hcall_gpa, hcall_gpa + PAGE_SIZE);
278
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
279
}
280
281
GUEST_SYNC(stage++);
282
283
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
284
for (i = 0; i < NTRY; i++) {
285
prepare_to_test(data);
286
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
287
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
288
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
289
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
290
/* bank_contents and gva_list occupy the same space, thus [1] */
291
flush_ex->gva_list[1] = (u64)data->test_pages;
292
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
293
(1 << HV_HYPERCALL_VARHEAD_OFFSET) |
294
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
295
hcall_gpa, hcall_gpa + PAGE_SIZE);
296
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
297
}
298
299
GUEST_SYNC(stage++);
300
301
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
302
for (i = 0; i < NTRY; i++) {
303
prepare_to_test(data);
304
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
305
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
306
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
307
BIT_ULL(WORKER_VCPU_ID_1 / 64);
308
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
309
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
310
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
311
(2 << HV_HYPERCALL_VARHEAD_OFFSET),
312
hcall_gpa, hcall_gpa + PAGE_SIZE);
313
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
314
i % 2 ? TESTVAL1 : TESTVAL2);
315
}
316
317
GUEST_SYNC(stage++);
318
319
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
320
for (i = 0; i < NTRY; i++) {
321
prepare_to_test(data);
322
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
323
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
324
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
325
BIT_ULL(WORKER_VCPU_ID_2 / 64);
326
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
327
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
328
/* bank_contents and gva_list occupy the same space, thus [2] */
329
flush_ex->gva_list[2] = (u64)data->test_pages;
330
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
331
(2 << HV_HYPERCALL_VARHEAD_OFFSET) |
332
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
333
hcall_gpa, hcall_gpa + PAGE_SIZE);
334
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
335
i % 2 ? TESTVAL1 : TESTVAL2);
336
}
337
338
GUEST_SYNC(stage++);
339
340
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
341
for (i = 0; i < NTRY; i++) {
342
prepare_to_test(data);
343
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
344
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
345
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
346
hcall_gpa, hcall_gpa + PAGE_SIZE);
347
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
348
i % 2 ? TESTVAL1 : TESTVAL2);
349
}
350
351
GUEST_SYNC(stage++);
352
353
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
354
for (i = 0; i < NTRY; i++) {
355
prepare_to_test(data);
356
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
357
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
358
flush_ex->gva_list[0] = (u64)data->test_pages;
359
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
360
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
361
hcall_gpa, hcall_gpa + PAGE_SIZE);
362
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
363
i % 2 ? TESTVAL1 : TESTVAL2);
364
}
365
366
/* "Fast" hypercalls */
367
368
GUEST_SYNC(stage++);
369
370
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
371
for (i = 0; i < NTRY; i++) {
372
prepare_to_test(data);
373
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
374
hyperv_write_xmm_input(&flush->processor_mask, 1);
375
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
376
HV_HYPERCALL_FAST_BIT, 0x0,
377
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
378
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
379
}
380
381
GUEST_SYNC(stage++);
382
383
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
384
for (i = 0; i < NTRY; i++) {
385
prepare_to_test(data);
386
flush->processor_mask = BIT(WORKER_VCPU_ID_1);
387
flush->gva_list[0] = (u64)data->test_pages;
388
hyperv_write_xmm_input(&flush->processor_mask, 1);
389
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
390
HV_HYPERCALL_FAST_BIT |
391
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
392
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
393
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2, 0x0);
394
}
395
396
GUEST_SYNC(stage++);
397
398
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
399
for (i = 0; i < NTRY; i++) {
400
prepare_to_test(data);
401
hyperv_write_xmm_input(&flush->processor_mask, 1);
402
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE |
403
HV_HYPERCALL_FAST_BIT, 0x0,
404
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
405
HV_FLUSH_ALL_PROCESSORS);
406
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
407
i % 2 ? TESTVAL1 : TESTVAL2);
408
}
409
410
GUEST_SYNC(stage++);
411
412
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
413
for (i = 0; i < NTRY; i++) {
414
prepare_to_test(data);
415
flush->gva_list[0] = (u64)data->test_pages;
416
hyperv_write_xmm_input(&flush->processor_mask, 1);
417
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST |
418
HV_HYPERCALL_FAST_BIT |
419
(1UL << HV_HYPERCALL_REP_COMP_OFFSET), 0x0,
420
HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES |
421
HV_FLUSH_ALL_PROCESSORS);
422
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
423
i % 2 ? TESTVAL1 : TESTVAL2);
424
}
425
426
GUEST_SYNC(stage++);
427
428
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
429
for (i = 0; i < NTRY; i++) {
430
prepare_to_test(data);
431
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
432
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
433
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
434
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
435
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
436
HV_HYPERCALL_FAST_BIT |
437
(1 << HV_HYPERCALL_VARHEAD_OFFSET),
438
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
439
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
440
}
441
442
GUEST_SYNC(stage++);
443
444
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
445
for (i = 0; i < NTRY; i++) {
446
prepare_to_test(data);
447
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
448
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64);
449
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
450
/* bank_contents and gva_list occupy the same space, thus [1] */
451
flush_ex->gva_list[1] = (u64)data->test_pages;
452
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
453
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
454
HV_HYPERCALL_FAST_BIT |
455
(1 << HV_HYPERCALL_VARHEAD_OFFSET) |
456
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
457
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
458
post_test(data, 0x0, i % 2 ? TESTVAL1 : TESTVAL2);
459
}
460
461
GUEST_SYNC(stage++);
462
463
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
464
for (i = 0; i < NTRY; i++) {
465
prepare_to_test(data);
466
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
467
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_2 / 64) |
468
BIT_ULL(WORKER_VCPU_ID_1 / 64);
469
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
470
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
471
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
472
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
473
HV_HYPERCALL_FAST_BIT |
474
(2 << HV_HYPERCALL_VARHEAD_OFFSET),
475
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
476
post_test(data, i % 2 ? TESTVAL1 :
477
TESTVAL2, i % 2 ? TESTVAL1 : TESTVAL2);
478
}
479
480
GUEST_SYNC(stage++);
481
482
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
483
for (i = 0; i < NTRY; i++) {
484
prepare_to_test(data);
485
flush_ex->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
486
flush_ex->hv_vp_set.valid_bank_mask = BIT_ULL(WORKER_VCPU_ID_1 / 64) |
487
BIT_ULL(WORKER_VCPU_ID_2 / 64);
488
flush_ex->hv_vp_set.bank_contents[0] = BIT_ULL(WORKER_VCPU_ID_1 % 64);
489
flush_ex->hv_vp_set.bank_contents[1] = BIT_ULL(WORKER_VCPU_ID_2 % 64);
490
/* bank_contents and gva_list occupy the same space, thus [2] */
491
flush_ex->gva_list[2] = (u64)data->test_pages;
492
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 3);
493
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
494
HV_HYPERCALL_FAST_BIT |
495
(2 << HV_HYPERCALL_VARHEAD_OFFSET) |
496
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
497
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
498
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
499
i % 2 ? TESTVAL1 : TESTVAL2);
500
}
501
502
GUEST_SYNC(stage++);
503
504
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
505
for (i = 0; i < NTRY; i++) {
506
prepare_to_test(data);
507
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
508
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
509
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
510
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX |
511
HV_HYPERCALL_FAST_BIT,
512
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
513
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
514
i % 2 ? TESTVAL1 : TESTVAL2);
515
}
516
517
GUEST_SYNC(stage++);
518
519
/* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
520
for (i = 0; i < NTRY; i++) {
521
prepare_to_test(data);
522
flush_ex->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
523
flush_ex->hv_vp_set.format = HV_GENERIC_SET_ALL;
524
flush_ex->gva_list[0] = (u64)data->test_pages;
525
hyperv_write_xmm_input(&flush_ex->hv_vp_set, 2);
526
hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX |
527
HV_HYPERCALL_FAST_BIT |
528
(1UL << HV_HYPERCALL_REP_COMP_OFFSET),
529
0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES);
530
post_test(data, i % 2 ? TESTVAL1 : TESTVAL2,
531
i % 2 ? TESTVAL1 : TESTVAL2);
532
}
533
534
GUEST_DONE();
535
}
536
537
static void *vcpu_thread(void *arg)
538
{
539
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
540
struct ucall uc;
541
int old;
542
int r;
543
544
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
545
TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
546
vcpu->id, r);
547
548
vcpu_run(vcpu);
549
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
550
551
switch (get_ucall(vcpu, &uc)) {
552
case UCALL_ABORT:
553
REPORT_GUEST_ASSERT(uc);
554
/* NOT REACHED */
555
default:
556
TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc.cmd, vcpu->id);
557
}
558
559
return NULL;
560
}
561
562
static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
563
{
564
void *retval;
565
int r;
566
567
r = pthread_cancel(thread);
568
TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
569
vcpu->id, r);
570
571
r = pthread_join(thread, &retval);
572
TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
573
vcpu->id, r);
574
TEST_ASSERT(retval == PTHREAD_CANCELED,
575
"expected retval=%p, got %p", PTHREAD_CANCELED,
576
retval);
577
}
578
579
int main(int argc, char *argv[])
580
{
581
struct kvm_vm *vm;
582
struct kvm_vcpu *vcpu[3];
583
pthread_t threads[2];
584
vm_vaddr_t test_data_page, gva;
585
vm_paddr_t gpa;
586
uint64_t *pte;
587
struct test_data *data;
588
struct ucall uc;
589
int stage = 1, r, i;
590
591
TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TLBFLUSH));
592
593
vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
594
595
/* Test data page */
596
test_data_page = vm_vaddr_alloc_page(vm);
597
data = (struct test_data *)addr_gva2hva(vm, test_data_page);
598
599
/* Hypercall input/output */
600
data->hcall_gva = vm_vaddr_alloc_pages(vm, 2);
601
data->hcall_gpa = addr_gva2gpa(vm, data->hcall_gva);
602
memset(addr_gva2hva(vm, data->hcall_gva), 0x0, 2 * PAGE_SIZE);
603
604
/*
605
* Test pages: the first one is filled with '0x01's, the second with '0x02's
606
* and the test will swap their mappings. The third page keeps the indication
607
* about the current state of mappings.
608
*/
609
data->test_pages = vm_vaddr_alloc_pages(vm, NTEST_PAGES + 1);
610
for (i = 0; i < NTEST_PAGES; i++)
611
memset(addr_gva2hva(vm, data->test_pages + PAGE_SIZE * i),
612
(u8)(i + 1), PAGE_SIZE);
613
set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_1);
614
set_expected_val(addr_gva2hva(vm, data->test_pages), 0x0, WORKER_VCPU_ID_2);
615
616
/*
617
* Get PTE pointers for test pages and map them inside the guest.
618
* Use separate page for each PTE for simplicity.
619
*/
620
gva = vm_vaddr_unused_gap(vm, NTEST_PAGES * PAGE_SIZE, KVM_UTIL_MIN_VADDR);
621
for (i = 0; i < NTEST_PAGES; i++) {
622
pte = vm_get_page_table_entry(vm, data->test_pages + i * PAGE_SIZE);
623
gpa = addr_hva2gpa(vm, pte);
624
virt_pg_map(vm, gva + PAGE_SIZE * i, gpa & PAGE_MASK);
625
data->test_pages_pte[i] = gva + (gpa & ~PAGE_MASK);
626
}
627
628
/*
629
* Sender vCPU which performs the test: swaps test pages, sets expectation
630
* for 'workers' and issues TLB flush hypercalls.
631
*/
632
vcpu_args_set(vcpu[0], 1, test_data_page);
633
vcpu_set_hv_cpuid(vcpu[0]);
634
635
/* Create worker vCPUs which check the contents of the test pages */
636
vcpu[1] = vm_vcpu_add(vm, WORKER_VCPU_ID_1, worker_guest_code);
637
vcpu_args_set(vcpu[1], 1, test_data_page);
638
vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_1);
639
vcpu_set_hv_cpuid(vcpu[1]);
640
641
vcpu[2] = vm_vcpu_add(vm, WORKER_VCPU_ID_2, worker_guest_code);
642
vcpu_args_set(vcpu[2], 1, test_data_page);
643
vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, WORKER_VCPU_ID_2);
644
vcpu_set_hv_cpuid(vcpu[2]);
645
646
r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
647
TEST_ASSERT(!r, "pthread_create() failed");
648
649
r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
650
TEST_ASSERT(!r, "pthread_create() failed");
651
652
while (true) {
653
vcpu_run(vcpu[0]);
654
TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
655
656
switch (get_ucall(vcpu[0], &uc)) {
657
case UCALL_SYNC:
658
TEST_ASSERT(uc.args[1] == stage,
659
"Unexpected stage: %ld (%d expected)",
660
uc.args[1], stage);
661
break;
662
case UCALL_ABORT:
663
REPORT_GUEST_ASSERT(uc);
664
/* NOT REACHED */
665
case UCALL_DONE:
666
goto done;
667
default:
668
TEST_FAIL("Unknown ucall %lu", uc.cmd);
669
}
670
671
stage++;
672
}
673
674
done:
675
cancel_join_vcpu_thread(threads[0], vcpu[1]);
676
cancel_join_vcpu_thread(threads[1], vcpu[2]);
677
kvm_vm_free(vm);
678
679
return 0;
680
}
681
682