Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/mmu_stress_test.c
38189 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <stdio.h>
3
#include <stdlib.h>
4
#include <pthread.h>
5
#include <semaphore.h>
6
#include <sys/types.h>
7
#include <signal.h>
8
#include <errno.h>
9
#include <linux/bitmap.h>
10
#include <linux/bitops.h>
11
#include <linux/atomic.h>
12
#include <linux/sizes.h>
13
14
#include "kvm_util.h"
15
#include "test_util.h"
16
#include "guest_modes.h"
17
#include "processor.h"
18
#include "ucall_common.h"
19
20
static bool mprotect_ro_done;
21
static bool all_vcpus_hit_ro_fault;
22
23
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
24
{
25
uint64_t gpa;
26
int i;
27
28
for (i = 0; i < 2; i++) {
29
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
30
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
31
GUEST_SYNC(i);
32
}
33
34
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
35
*((volatile uint64_t *)gpa);
36
GUEST_SYNC(2);
37
38
/*
39
* Write to the region while mprotect(PROT_READ) is underway. Keep
40
* looping until the memory is guaranteed to be read-only and a fault
41
* has occurred, otherwise vCPUs may complete their writes and advance
42
* to the next stage prematurely.
43
*
44
* For architectures that support skipping the faulting instruction,
45
* generate the store via inline assembly to ensure the exact length
46
* of the instruction is known and stable (vcpu_arch_put_guest() on
47
* fixed-length architectures should work, but the cost of paranoia
48
* is low in this case). For x86, hand-code the exact opcode so that
49
* there is no room for variability in the generated instruction.
50
*/
51
do {
52
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
53
#ifdef __x86_64__
54
asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */
55
#elif defined(__aarch64__)
56
asm volatile("str %0, [%0]" :: "r" (gpa) : "memory");
57
#else
58
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
59
#endif
60
} while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault));
61
62
/*
63
* Only architectures that write the entire range can explicitly sync,
64
* as other architectures will be stuck on the write fault.
65
*/
66
#if defined(__x86_64__) || defined(__aarch64__)
67
GUEST_SYNC(3);
68
#endif
69
70
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
71
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
72
GUEST_SYNC(4);
73
74
GUEST_ASSERT(0);
75
}
76
77
struct vcpu_info {
78
struct kvm_vcpu *vcpu;
79
uint64_t start_gpa;
80
uint64_t end_gpa;
81
};
82
83
static int nr_vcpus;
84
static atomic_t rendezvous;
85
static atomic_t nr_ro_faults;
86
87
static void rendezvous_with_boss(void)
88
{
89
int orig = atomic_read(&rendezvous);
90
91
if (orig > 0) {
92
atomic_dec_and_test(&rendezvous);
93
while (atomic_read(&rendezvous) > 0)
94
cpu_relax();
95
} else {
96
atomic_inc(&rendezvous);
97
while (atomic_read(&rendezvous) < 0)
98
cpu_relax();
99
}
100
}
101
102
static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage)
103
{
104
struct ucall uc;
105
106
TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
107
TEST_ASSERT_EQ(uc.args[1], stage);
108
}
109
110
static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
111
{
112
vcpu_run(vcpu);
113
assert_sync_stage(vcpu, stage);
114
}
115
116
static void *vcpu_worker(void *data)
117
{
118
struct kvm_sregs __maybe_unused sregs;
119
struct vcpu_info *info = data;
120
struct kvm_vcpu *vcpu = info->vcpu;
121
struct kvm_vm *vm = vcpu->vm;
122
int r;
123
124
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
125
126
rendezvous_with_boss();
127
128
/* Stage 0, write all of guest memory. */
129
run_vcpu(vcpu, 0);
130
rendezvous_with_boss();
131
#ifdef __x86_64__
132
vcpu_sregs_get(vcpu, &sregs);
133
/* Toggle CR0.WP to trigger a MMU context reset. */
134
sregs.cr0 ^= X86_CR0_WP;
135
vcpu_sregs_set(vcpu, &sregs);
136
#endif
137
rendezvous_with_boss();
138
139
/* Stage 1, re-write all of guest memory. */
140
run_vcpu(vcpu, 1);
141
rendezvous_with_boss();
142
143
/* Stage 2, read all of guest memory, which is now read-only. */
144
run_vcpu(vcpu, 2);
145
146
/*
147
* Stage 3, write guest memory and verify KVM returns -EFAULT for once
148
* the mprotect(PROT_READ) lands. Only architectures that support
149
* validating *all* of guest memory sync for this stage, as vCPUs will
150
* be stuck on the faulting instruction for other architectures. Go to
151
* stage 3 without a rendezvous
152
*/
153
r = _vcpu_run(vcpu);
154
TEST_ASSERT(r == -1 && errno == EFAULT,
155
"Expected EFAULT on write to RO memory, got r = %d, errno = %d", r, errno);
156
157
atomic_inc(&nr_ro_faults);
158
if (atomic_read(&nr_ro_faults) == nr_vcpus) {
159
WRITE_ONCE(all_vcpus_hit_ro_fault, true);
160
sync_global_to_guest(vm, all_vcpus_hit_ro_fault);
161
}
162
163
#if defined(__x86_64__) || defined(__aarch64__)
164
/*
165
* Verify *all* writes from the guest hit EFAULT due to the VMA now
166
* being read-only. x86 and arm64 only at this time as skipping the
167
* instruction that hits the EFAULT requires advancing the program
168
* counter, which is arch specific and relies on inline assembly.
169
*/
170
#ifdef __x86_64__
171
vcpu->run->kvm_valid_regs = KVM_SYNC_X86_REGS;
172
#endif
173
for (;;) {
174
r = _vcpu_run(vcpu);
175
if (!r)
176
break;
177
TEST_ASSERT_EQ(errno, EFAULT);
178
#if defined(__x86_64__)
179
WRITE_ONCE(vcpu->run->kvm_dirty_regs, KVM_SYNC_X86_REGS);
180
vcpu->run->s.regs.regs.rip += 3;
181
#elif defined(__aarch64__)
182
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc),
183
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)) + 4);
184
#endif
185
186
}
187
assert_sync_stage(vcpu, 3);
188
#endif /* __x86_64__ || __aarch64__ */
189
rendezvous_with_boss();
190
191
/*
192
* Stage 4. Run to completion, waiting for mprotect(PROT_WRITE) to
193
* make the memory writable again.
194
*/
195
do {
196
r = _vcpu_run(vcpu);
197
} while (r && errno == EFAULT);
198
TEST_ASSERT_EQ(r, 0);
199
assert_sync_stage(vcpu, 4);
200
rendezvous_with_boss();
201
202
return NULL;
203
}
204
205
static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
206
uint64_t start_gpa, uint64_t end_gpa)
207
{
208
struct vcpu_info *info;
209
uint64_t gpa, nr_bytes;
210
pthread_t *threads;
211
int i;
212
213
threads = malloc(nr_vcpus * sizeof(*threads));
214
TEST_ASSERT(threads, "Failed to allocate vCPU threads");
215
216
info = malloc(nr_vcpus * sizeof(*info));
217
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
218
219
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
220
~((uint64_t)vm->page_size - 1);
221
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
222
223
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
224
info[i].vcpu = vcpus[i];
225
info[i].start_gpa = gpa;
226
info[i].end_gpa = gpa + nr_bytes;
227
pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
228
}
229
return threads;
230
}
231
232
static void rendezvous_with_vcpus(struct timespec *time, const char *name)
233
{
234
int i, rendezvoused;
235
236
pr_info("Waiting for vCPUs to finish %s...\n", name);
237
238
rendezvoused = atomic_read(&rendezvous);
239
for (i = 0; abs(rendezvoused) != 1; i++) {
240
usleep(100);
241
if (!(i & 0x3f))
242
pr_info("\r%d vCPUs haven't rendezvoused...",
243
abs(rendezvoused) - 1);
244
rendezvoused = atomic_read(&rendezvous);
245
}
246
247
clock_gettime(CLOCK_MONOTONIC, time);
248
249
/* Release the vCPUs after getting the time of the previous action. */
250
pr_info("\rAll vCPUs finished %s, releasing...\n", name);
251
if (rendezvoused > 0)
252
atomic_set(&rendezvous, -nr_vcpus - 1);
253
else
254
atomic_set(&rendezvous, nr_vcpus + 1);
255
}
256
257
static void calc_default_nr_vcpus(void)
258
{
259
cpu_set_t possible_mask;
260
int r;
261
262
r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
263
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
264
errno, strerror(errno));
265
266
nr_vcpus = CPU_COUNT(&possible_mask);
267
TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
268
if (nr_vcpus >= 2)
269
nr_vcpus = nr_vcpus * 3/4;
270
}
271
272
int main(int argc, char *argv[])
273
{
274
/*
275
* Skip the first 4gb and slot0. slot0 maps <1gb and is used to back
276
* the guest's code, stack, and page tables. Because selftests creates
277
* an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot
278
* just below the 4gb boundary. This test could create memory at
279
* 1gb-3gb,but it's simpler to skip straight to 4gb.
280
*/
281
const uint64_t start_gpa = SZ_4G;
282
const int first_slot = 1;
283
284
struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw;
285
uint64_t max_gpa, gpa, slot_size, max_mem, i;
286
int max_slots, slot, opt, fd;
287
bool hugepages = false;
288
struct kvm_vcpu **vcpus;
289
pthread_t *threads;
290
struct kvm_vm *vm;
291
void *mem;
292
293
/*
294
* Default to 2gb so that maxing out systems with MAXPHADDR=46, which
295
* are quite common for x86, requires changing only max_mem (KVM allows
296
* 32k memslots, 32k * 2gb == ~64tb of guest memory).
297
*/
298
slot_size = SZ_2G;
299
300
max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
301
TEST_ASSERT(max_slots > first_slot, "KVM is broken");
302
303
/* All KVM MMUs should be able to survive a 128gb guest. */
304
max_mem = 128ull * SZ_1G;
305
306
calc_default_nr_vcpus();
307
308
while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
309
switch (opt) {
310
case 'c':
311
nr_vcpus = atoi_positive("Number of vCPUs", optarg);
312
break;
313
case 'm':
314
max_mem = 1ull * atoi_positive("Memory size", optarg) * SZ_1G;
315
break;
316
case 's':
317
slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G;
318
break;
319
case 'H':
320
hugepages = true;
321
break;
322
case 'h':
323
default:
324
printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]);
325
exit(1);
326
}
327
}
328
329
vcpus = malloc(nr_vcpus * sizeof(*vcpus));
330
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
331
332
vm = __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus,
333
#ifdef __x86_64__
334
max_mem / SZ_1G,
335
#else
336
max_mem / vm_guest_mode_params[VM_MODE_DEFAULT].page_size,
337
#endif
338
guest_code, vcpus);
339
340
max_gpa = vm->max_gfn << vm->page_shift;
341
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
342
343
fd = kvm_memfd_alloc(slot_size, hugepages);
344
mem = kvm_mmap(slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd);
345
346
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
347
348
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
349
for (i = 0; i < slot_size; i += vm->page_size)
350
((uint8_t *)mem)[i] = 0xaa;
351
352
gpa = 0;
353
for (slot = first_slot; slot < max_slots; slot++) {
354
gpa = start_gpa + ((slot - first_slot) * slot_size);
355
if (gpa + slot_size > max_gpa)
356
break;
357
358
if ((gpa - start_gpa) >= max_mem)
359
break;
360
361
vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem);
362
363
#ifdef __x86_64__
364
/* Identity map memory in the guest using 1gb pages. */
365
virt_map_level(vm, gpa, gpa, slot_size, PG_LEVEL_1G);
366
#else
367
virt_map(vm, gpa, gpa, slot_size >> vm->page_shift);
368
#endif
369
}
370
371
atomic_set(&rendezvous, nr_vcpus + 1);
372
threads = spawn_workers(vm, vcpus, start_gpa, gpa);
373
374
free(vcpus);
375
vcpus = NULL;
376
377
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
378
(gpa - start_gpa) / SZ_1G, nr_vcpus);
379
380
rendezvous_with_vcpus(&time_start, "spawning");
381
rendezvous_with_vcpus(&time_run1, "run 1");
382
rendezvous_with_vcpus(&time_reset, "reset");
383
rendezvous_with_vcpus(&time_run2, "run 2");
384
385
mprotect(mem, slot_size, PROT_READ);
386
mprotect_ro_done = true;
387
sync_global_to_guest(vm, mprotect_ro_done);
388
389
rendezvous_with_vcpus(&time_ro, "mprotect RO");
390
mprotect(mem, slot_size, PROT_READ | PROT_WRITE);
391
rendezvous_with_vcpus(&time_rw, "mprotect RW");
392
393
time_rw = timespec_sub(time_rw, time_ro);
394
time_ro = timespec_sub(time_ro, time_run2);
395
time_run2 = timespec_sub(time_run2, time_reset);
396
time_reset = timespec_sub(time_reset, time_run1);
397
time_run1 = timespec_sub(time_run1, time_start);
398
399
pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds, "
400
"ro = %ld.%.9lds, rw = %ld.%.9lds\n",
401
time_run1.tv_sec, time_run1.tv_nsec,
402
time_reset.tv_sec, time_reset.tv_nsec,
403
time_run2.tv_sec, time_run2.tv_nsec,
404
time_ro.tv_sec, time_ro.tv_nsec,
405
time_rw.tv_sec, time_rw.tv_nsec);
406
407
/*
408
* Delete even numbered slots (arbitrary) and unmap the first half of
409
* the backing (also arbitrary) to verify KVM correctly drops all
410
* references to the removed regions.
411
*/
412
for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
413
vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
414
415
kvm_munmap(mem, slot_size / 2);
416
417
/* Sanity check that the vCPUs actually ran. */
418
for (i = 0; i < nr_vcpus; i++)
419
pthread_join(threads[i], NULL);
420
421
/*
422
* Deliberately exit without deleting the remaining memslots or closing
423
* kvm_fd to test cleanup via mmu_notifier.release.
424
*/
425
}
426
427