Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/memslot_modification_stress_test.c
38189 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* KVM memslot modification stress test
4
* Adapted from demand_paging_test.c
5
*
6
* Copyright (C) 2018, Red Hat, Inc.
7
* Copyright (C) 2020, Google, Inc.
8
*/
9
#include <stdio.h>
10
#include <stdlib.h>
11
#include <sys/syscall.h>
12
#include <unistd.h>
13
#include <asm/unistd.h>
14
#include <time.h>
15
#include <poll.h>
16
#include <pthread.h>
17
#include <linux/bitmap.h>
18
#include <linux/bitops.h>
19
#include <linux/userfaultfd.h>
20
21
#include "memstress.h"
22
#include "processor.h"
23
#include "test_util.h"
24
#include "guest_modes.h"
25
#include "ucall_common.h"
26
27
#define DUMMY_MEMSLOT_INDEX 7
28
29
#define DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS 10
30
31
32
static int nr_vcpus = 1;
33
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
34
35
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
36
{
37
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
38
struct kvm_run *run;
39
int ret;
40
41
run = vcpu->run;
42
43
/* Let the guest access its memory until a stop signal is received */
44
while (!READ_ONCE(memstress_args.stop_vcpus)) {
45
ret = _vcpu_run(vcpu);
46
TEST_ASSERT(ret == 0, "vcpu_run failed: %d", ret);
47
48
if (get_ucall(vcpu, NULL) == UCALL_SYNC)
49
continue;
50
51
TEST_ASSERT(false,
52
"Invalid guest sync status: exit_reason=%s\n",
53
exit_reason_str(run->exit_reason));
54
}
55
}
56
57
static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
58
uint64_t nr_modifications)
59
{
60
uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size;
61
uint64_t gpa;
62
int i;
63
64
/*
65
* Add the dummy memslot just below the memstress memslot, which is
66
* at the top of the guest physical address space.
67
*/
68
gpa = memstress_args.gpa - pages * vm->page_size;
69
70
for (i = 0; i < nr_modifications; i++) {
71
usleep(delay);
72
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa,
73
DUMMY_MEMSLOT_INDEX, pages, 0);
74
75
vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX);
76
}
77
}
78
79
struct test_params {
80
useconds_t delay;
81
uint64_t nr_iterations;
82
bool partition_vcpu_memory_access;
83
bool disable_slot_zap_quirk;
84
};
85
86
static void run_test(enum vm_guest_mode mode, void *arg)
87
{
88
struct test_params *p = arg;
89
struct kvm_vm *vm;
90
91
vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
92
VM_MEM_SRC_ANONYMOUS,
93
p->partition_vcpu_memory_access);
94
#ifdef __x86_64__
95
if (p->disable_slot_zap_quirk)
96
vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
97
98
pr_info("Memslot zap quirk %s\n", p->disable_slot_zap_quirk ?
99
"disabled" : "enabled");
100
#endif
101
102
pr_info("Finished creating vCPUs\n");
103
104
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
105
106
pr_info("Started all vCPUs\n");
107
108
add_remove_memslot(vm, p->delay, p->nr_iterations);
109
110
memstress_join_vcpu_threads(nr_vcpus);
111
pr_info("All vCPU threads joined\n");
112
113
memstress_destroy_vm(vm);
114
}
115
116
static void help(char *name)
117
{
118
puts("");
119
printf("usage: %s [-h] [-m mode] [-d delay_usec] [-q]\n"
120
" [-b memory] [-v vcpus] [-o] [-i iterations]\n", name);
121
guest_modes_help();
122
printf(" -d: add a delay between each iteration of adding and\n"
123
" deleting a memslot in usec.\n");
124
printf(" -q: Disable memslot zap quirk.\n");
125
printf(" -b: specify the size of the memory region which should be\n"
126
" accessed by each vCPU. e.g. 10M or 3G.\n"
127
" Default: 1G\n");
128
printf(" -v: specify the number of vCPUs to run.\n");
129
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
130
" them into a separate region of memory for each vCPU.\n");
131
printf(" -i: specify the number of iterations of adding and removing\n"
132
" a memslot.\n"
133
" Default: %d\n", DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS);
134
puts("");
135
exit(0);
136
}
137
138
int main(int argc, char *argv[])
139
{
140
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
141
int opt;
142
struct test_params p = {
143
.delay = 0,
144
.nr_iterations = DEFAULT_MEMSLOT_MODIFICATION_ITERATIONS,
145
.partition_vcpu_memory_access = true
146
};
147
148
guest_modes_append_default();
149
150
while ((opt = getopt(argc, argv, "hm:d:qb:v:oi:")) != -1) {
151
switch (opt) {
152
case 'm':
153
guest_modes_cmdline(optarg);
154
break;
155
case 'd':
156
p.delay = atoi_non_negative("Delay", optarg);
157
break;
158
case 'b':
159
guest_percpu_mem_size = parse_size(optarg);
160
break;
161
case 'v':
162
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
163
TEST_ASSERT(nr_vcpus <= max_vcpus,
164
"Invalid number of vcpus, must be between 1 and %d",
165
max_vcpus);
166
break;
167
case 'o':
168
p.partition_vcpu_memory_access = false;
169
break;
170
case 'i':
171
p.nr_iterations = atoi_positive("Number of iterations", optarg);
172
break;
173
#ifdef __x86_64__
174
case 'q':
175
p.disable_slot_zap_quirk = true;
176
177
TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) &
178
KVM_X86_QUIRK_SLOT_ZAP_ALL);
179
break;
180
#endif
181
case 'h':
182
default:
183
help(argv[0]);
184
break;
185
}
186
}
187
188
for_each_guest_mode(run_test, &p);
189
190
return 0;
191
}
192
193