Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/hypervisor/tests/kvm/main.rs
5394 views
1
// Copyright 2022 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#![cfg(any(target_os = "android", target_os = "linux"))]
6
7
#[cfg(target_arch = "aarch64")]
8
mod aarch64;
9
10
#[cfg(target_arch = "x86_64")]
11
mod x86_64;
12
13
use std::thread;
14
15
use base::pagesize;
16
use base::Event;
17
use base::FromRawDescriptor;
18
use base::IntoRawDescriptor;
19
use base::MappedRegion;
20
use base::MemoryMappingArena;
21
use base::MemoryMappingBuilder;
22
use hypervisor::kvm::dirty_log_bitmap_size;
23
use hypervisor::kvm::Kvm;
24
use hypervisor::kvm::KvmCap;
25
use hypervisor::kvm::KvmVm;
26
use hypervisor::Datamatch;
27
use hypervisor::Hypervisor;
28
use hypervisor::HypervisorCap;
29
use hypervisor::IoEventAddress;
30
use hypervisor::MemCacheType::CacheCoherent;
31
use hypervisor::Vm;
32
#[cfg(target_arch = "aarch64")]
33
use hypervisor::VmAArch64;
34
#[cfg(target_arch = "riscv64")]
35
use hypervisor::VmRiscv64;
36
#[cfg(target_arch = "x86_64")]
37
use hypervisor::VmX86_64;
38
use vm_memory::GuestAddress;
39
use vm_memory::GuestMemory;
40
41
#[test]
42
fn dirty_log_size() {
43
let page_size = pagesize();
44
assert_eq!(dirty_log_bitmap_size(0), 0);
45
assert_eq!(dirty_log_bitmap_size(page_size), 1);
46
assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
47
assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
48
assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
49
}
50
51
#[test]
52
fn new() {
53
Kvm::new().unwrap();
54
}
55
56
#[test]
57
fn check_capability() {
58
let kvm = Kvm::new().unwrap();
59
assert!(kvm.check_capability(HypervisorCap::UserMemory));
60
assert!(!kvm.check_capability(HypervisorCap::HypervisorInitializedBootContext));
61
}
62
63
#[test]
64
fn create_vm() {
65
let kvm = Kvm::new().unwrap();
66
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
67
KvmVm::new(&kvm, gm, Default::default()).unwrap();
68
}
69
70
#[test]
71
fn clone_vm() {
72
let kvm = Kvm::new().unwrap();
73
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
74
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
75
vm.try_clone().unwrap();
76
}
77
78
#[test]
79
fn send_vm() {
80
let kvm = Kvm::new().unwrap();
81
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
82
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
83
thread::spawn(move || {
84
let _vm = vm;
85
})
86
.join()
87
.unwrap();
88
}
89
90
#[test]
91
fn check_vm_capability() {
92
let kvm = Kvm::new().unwrap();
93
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
94
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
95
assert!(vm.check_raw_capability(KvmCap::UserMemory));
96
assert!(!vm.check_raw_capability(KvmCap::S390Psw))
97
}
98
99
#[test]
100
fn create_vcpu() {
101
let kvm = Kvm::new().unwrap();
102
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
103
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
104
vm.create_vcpu(0).unwrap();
105
}
106
107
#[test]
108
fn get_memory() {
109
let kvm = Kvm::new().unwrap();
110
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
111
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
112
let obj_addr = GuestAddress(0xf0);
113
vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
114
let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
115
assert_eq!(read_val, 67u8);
116
}
117
118
#[test]
119
fn add_memory() {
120
let kvm = Kvm::new().unwrap();
121
let gm = GuestMemory::new(&[
122
(GuestAddress(0), pagesize() as u64),
123
(GuestAddress(pagesize() as u64 * 5), pagesize() as u64 * 5),
124
])
125
.unwrap();
126
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
127
let mem_size = 0x1000;
128
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
129
vm.add_memory_region(
130
GuestAddress(pagesize() as u64),
131
Box::new(mem),
132
false,
133
false,
134
CacheCoherent,
135
)
136
.unwrap();
137
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
138
vm.add_memory_region(
139
GuestAddress(0x10 * pagesize() as u64),
140
Box::new(mem),
141
false,
142
false,
143
CacheCoherent,
144
)
145
.unwrap();
146
}
147
148
#[test]
149
fn add_memory_ro() {
150
let kvm = Kvm::new().unwrap();
151
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
152
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
153
let mem_size = 0x1000;
154
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
155
vm.add_memory_region(
156
GuestAddress(pagesize() as u64),
157
Box::new(mem),
158
true,
159
false,
160
CacheCoherent,
161
)
162
.unwrap();
163
}
164
165
#[test]
166
fn remove_memory() {
167
let kvm = Kvm::new().unwrap();
168
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
169
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
170
let mem_size = 0x1000;
171
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
172
let mem_ptr = mem.as_ptr();
173
let slot = vm
174
.add_memory_region(
175
GuestAddress(pagesize() as u64),
176
Box::new(mem),
177
false,
178
false,
179
CacheCoherent,
180
)
181
.unwrap();
182
let removed_mem = vm.remove_memory_region(slot).unwrap();
183
assert_eq!(removed_mem.size(), mem_size);
184
assert_eq!(removed_mem.as_ptr(), mem_ptr);
185
}
186
187
#[test]
188
fn remove_invalid_memory() {
189
let kvm = Kvm::new().unwrap();
190
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
191
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
192
assert!(vm.remove_memory_region(0).is_err());
193
}
194
195
#[test]
196
fn overlap_memory() {
197
let kvm = Kvm::new().unwrap();
198
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10 * pagesize() as u64)]).unwrap();
199
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
200
let mem_size = 2 * pagesize();
201
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
202
assert!(vm
203
.add_memory_region(
204
GuestAddress(2 * pagesize() as u64),
205
Box::new(mem),
206
false,
207
false,
208
CacheCoherent,
209
)
210
.is_err());
211
}
212
213
#[test]
214
fn sync_memory() {
215
let kvm = Kvm::new().unwrap();
216
let gm = GuestMemory::new(&[
217
(GuestAddress(0), pagesize() as u64),
218
(GuestAddress(5 * pagesize() as u64), 5 * pagesize() as u64),
219
])
220
.unwrap();
221
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
222
let mem_size = pagesize();
223
let mem = MemoryMappingArena::new(mem_size).unwrap();
224
let slot = vm
225
.add_memory_region(
226
GuestAddress(pagesize() as u64),
227
Box::new(mem),
228
false,
229
false,
230
CacheCoherent,
231
)
232
.unwrap();
233
vm.msync_memory_region(slot, mem_size, 0).unwrap();
234
assert!(vm.msync_memory_region(slot, mem_size + 1, 0).is_err());
235
assert!(vm.msync_memory_region(slot + 1, mem_size, 0).is_err());
236
}
237
238
#[test]
239
fn register_irqfd() {
240
let kvm = Kvm::new().unwrap();
241
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
242
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
243
let evtfd1 = Event::new().unwrap();
244
let evtfd2 = Event::new().unwrap();
245
let evtfd3 = Event::new().unwrap();
246
vm.create_irq_chip().unwrap();
247
vm.register_irqfd(4, &evtfd1, None).unwrap();
248
vm.register_irqfd(8, &evtfd2, None).unwrap();
249
vm.register_irqfd(4, &evtfd3, None).unwrap();
250
vm.register_irqfd(4, &evtfd3, None).unwrap_err();
251
}
252
253
#[test]
254
fn unregister_irqfd() {
255
let kvm = Kvm::new().unwrap();
256
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
257
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
258
let evtfd1 = Event::new().unwrap();
259
let evtfd2 = Event::new().unwrap();
260
let evtfd3 = Event::new().unwrap();
261
vm.create_irq_chip().unwrap();
262
vm.register_irqfd(4, &evtfd1, None).unwrap();
263
vm.register_irqfd(8, &evtfd2, None).unwrap();
264
vm.register_irqfd(4, &evtfd3, None).unwrap();
265
vm.unregister_irqfd(4, &evtfd1).unwrap();
266
vm.unregister_irqfd(8, &evtfd2).unwrap();
267
vm.unregister_irqfd(4, &evtfd3).unwrap();
268
}
269
270
#[test]
271
fn irqfd_resample() {
272
let kvm = Kvm::new().unwrap();
273
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
274
let vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
275
let evtfd1 = Event::new().unwrap();
276
let evtfd2 = Event::new().unwrap();
277
vm.create_irq_chip().unwrap();
278
vm.register_irqfd(4, &evtfd1, Some(&evtfd2)).unwrap();
279
vm.unregister_irqfd(4, &evtfd1).unwrap();
280
281
// Ensures the ioctl is actually reading the resamplefd by providing an invalid fd and expecting
282
// an error. File descriptor numbers are allocated sequentially, so this very large fd should
283
// never practically be in use.
284
// SAFETY: This is a bad idea! Don't try this at home! Professional driver on a closed course.
285
let resample_evt = unsafe { Event::from_raw_descriptor(2147483647) };
286
vm.register_irqfd(4, &evtfd1, Some(&resample_evt))
287
.unwrap_err();
288
let _ = resample_evt.into_raw_descriptor(); // Don't try to close the invalid fd.
289
}
290
291
#[test]
292
fn register_ioevent() {
293
let kvm = Kvm::new().unwrap();
294
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
295
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
296
let evtfd = Event::new().unwrap();
297
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
298
.unwrap();
299
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
300
.unwrap();
301
vm.register_ioevent(
302
&evtfd,
303
IoEventAddress::Pio(0xc1),
304
Datamatch::U8(Some(0x7fu8)),
305
)
306
.unwrap();
307
vm.register_ioevent(
308
&evtfd,
309
IoEventAddress::Pio(0xc2),
310
Datamatch::U16(Some(0x1337u16)),
311
)
312
.unwrap();
313
vm.register_ioevent(
314
&evtfd,
315
IoEventAddress::Pio(0xc4),
316
Datamatch::U32(Some(0xdeadbeefu32)),
317
)
318
.unwrap();
319
vm.register_ioevent(
320
&evtfd,
321
IoEventAddress::Pio(0xc8),
322
Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
323
)
324
.unwrap();
325
}
326
327
#[test]
328
fn unregister_ioevent() {
329
let kvm = Kvm::new().unwrap();
330
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
331
let mut vm = KvmVm::new(&kvm, gm, Default::default()).unwrap();
332
let evtfd = Event::new().unwrap();
333
vm.register_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
334
.unwrap();
335
vm.register_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
336
.unwrap();
337
vm.register_ioevent(
338
&evtfd,
339
IoEventAddress::Mmio(0x1004),
340
Datamatch::U8(Some(0x7fu8)),
341
)
342
.unwrap();
343
vm.unregister_ioevent(&evtfd, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
344
.unwrap();
345
vm.unregister_ioevent(&evtfd, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
346
.unwrap();
347
vm.unregister_ioevent(
348
&evtfd,
349
IoEventAddress::Mmio(0x1004),
350
Datamatch::U8(Some(0x7fu8)),
351
)
352
.unwrap();
353
}
354
355