Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/devices/src/virtio/vhost_user_backend/vsock.rs
5394 views
1
// Copyright 2021 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use std::convert::TryInto;
6
use std::fs::File;
7
use std::fs::OpenOptions;
8
use std::mem::size_of;
9
use std::num::Wrapping;
10
use std::os::unix::fs::OpenOptionsExt;
11
use std::path::Path;
12
use std::str;
13
14
use anyhow::Context;
15
use argh::FromArgs;
16
use base::AsRawDescriptor;
17
use base::Event;
18
use base::RawDescriptor;
19
use base::SafeDescriptor;
20
use cros_async::Executor;
21
use data_model::Le64;
22
use vhost::Vhost;
23
use vhost::Vsock;
24
use vm_memory::GuestMemory;
25
use vmm_vhost::connection::Connection;
26
use vmm_vhost::message::VhostUserConfigFlags;
27
use vmm_vhost::message::VhostUserInflight;
28
use vmm_vhost::message::VhostUserMemoryRegion;
29
use vmm_vhost::message::VhostUserMigrationPhase;
30
use vmm_vhost::message::VhostUserProtocolFeatures;
31
use vmm_vhost::message::VhostUserSingleMemoryRegion;
32
use vmm_vhost::message::VhostUserTransferDirection;
33
use vmm_vhost::message::VhostUserVringAddrFlags;
34
use vmm_vhost::message::VhostUserVringState;
35
use vmm_vhost::Error;
36
use vmm_vhost::Result;
37
use vmm_vhost::SharedMemoryRegion;
38
use vmm_vhost::VHOST_USER_F_PROTOCOL_FEATURES;
39
use zerocopy::IntoBytes;
40
41
use super::BackendConnection;
42
use crate::virtio::device_constants::vsock::NUM_QUEUES;
43
use crate::virtio::vhost_user_backend::handler::vmm_va_to_gpa;
44
use crate::virtio::vhost_user_backend::handler::MappingInfo;
45
use crate::virtio::vhost_user_backend::handler::VhostUserRegularOps;
46
use crate::virtio::vhost_user_backend::VhostUserDeviceBuilder;
47
use crate::virtio::Queue;
48
use crate::virtio::QueueConfig;
49
50
const EVENT_QUEUE: usize = NUM_QUEUES - 1;
51
52
struct VsockBackend {
53
queues: [QueueConfig; NUM_QUEUES],
54
vmm_maps: Option<Vec<MappingInfo>>,
55
mem: Option<GuestMemory>,
56
57
handle: Vsock,
58
cid: u64,
59
protocol_features: VhostUserProtocolFeatures,
60
}
61
62
/// A vhost-vsock device which handle is already opened. This allows the parent process to open the
63
/// vhost-vsock device, create this structure, and pass it to the child process so it doesn't need
64
/// the rights to open the vhost-vsock device itself.
65
pub struct VhostUserVsockDevice {
66
cid: u64,
67
handle: Vsock,
68
}
69
70
impl VhostUserVsockDevice {
71
pub fn new<P: AsRef<Path>>(cid: u64, vhost_device: P) -> anyhow::Result<Self> {
72
let handle = Vsock::new(
73
OpenOptions::new()
74
.read(true)
75
.write(true)
76
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
77
.open(vhost_device.as_ref())
78
.with_context(|| {
79
format!(
80
"failed to open vhost-vsock device {}",
81
vhost_device.as_ref().display()
82
)
83
})?,
84
);
85
86
Ok(Self { cid, handle })
87
}
88
}
89
90
impl AsRawDescriptor for VhostUserVsockDevice {
91
fn as_raw_descriptor(&self) -> base::RawDescriptor {
92
self.handle.as_raw_descriptor()
93
}
94
}
95
96
impl VhostUserDeviceBuilder for VhostUserVsockDevice {
97
fn build(self: Box<Self>, _ex: &Executor) -> anyhow::Result<Box<dyn vmm_vhost::Backend>> {
98
let backend = VsockBackend {
99
queues: [
100
QueueConfig::new(Queue::MAX_SIZE, 0),
101
QueueConfig::new(Queue::MAX_SIZE, 0),
102
QueueConfig::new(Queue::MAX_SIZE, 0),
103
],
104
vmm_maps: None,
105
mem: None,
106
handle: self.handle,
107
cid: self.cid,
108
protocol_features: VhostUserProtocolFeatures::MQ | VhostUserProtocolFeatures::CONFIG,
109
};
110
111
Ok(Box::new(backend))
112
}
113
}
114
115
fn convert_vhost_error(err: vhost::Error) -> Error {
116
use vhost::Error::*;
117
match err {
118
IoctlError(e) => Error::ReqHandlerError(e),
119
_ => Error::BackendInternalError,
120
}
121
}
122
123
impl vmm_vhost::Backend for VsockBackend {
124
fn set_owner(&mut self) -> Result<()> {
125
self.handle.set_owner().map_err(convert_vhost_error)
126
}
127
128
fn reset_owner(&mut self) -> Result<()> {
129
self.handle.reset_owner().map_err(convert_vhost_error)
130
}
131
132
fn get_features(&mut self) -> Result<u64> {
133
// Add the vhost-user features that we support.
134
let features = self.handle.get_features().map_err(convert_vhost_error)?
135
| 1 << VHOST_USER_F_PROTOCOL_FEATURES;
136
Ok(features)
137
}
138
139
fn set_features(&mut self, features: u64) -> Result<()> {
140
// Unset the vhost-user feature flags as they are not supported by the underlying vhost
141
// device.
142
let features = features & !(1 << VHOST_USER_F_PROTOCOL_FEATURES);
143
self.handle
144
.set_features(features)
145
.map_err(convert_vhost_error)
146
}
147
148
fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
149
Ok(self.protocol_features)
150
}
151
152
fn set_protocol_features(&mut self, features: u64) -> Result<()> {
153
let unrequested_features = features & !self.protocol_features.bits();
154
if unrequested_features != 0 {
155
Err(Error::InvalidParam("unsupported protocol feature"))
156
} else {
157
Ok(())
158
}
159
}
160
161
fn set_mem_table(
162
&mut self,
163
contexts: &[VhostUserMemoryRegion],
164
files: Vec<File>,
165
) -> Result<()> {
166
let (guest_mem, vmm_maps) = VhostUserRegularOps::set_mem_table(contexts, files)?;
167
168
self.handle
169
.set_mem_table(&guest_mem)
170
.map_err(convert_vhost_error)?;
171
172
self.mem = Some(guest_mem);
173
self.vmm_maps = Some(vmm_maps);
174
175
Ok(())
176
}
177
178
fn get_queue_num(&mut self) -> Result<u64> {
179
Ok(NUM_QUEUES as u64)
180
}
181
182
fn set_vring_num(&mut self, index: u32, num: u32) -> Result<()> {
183
if index >= NUM_QUEUES as u32 || num == 0 || num > Queue::MAX_SIZE.into() {
184
return Err(Error::InvalidParam(
185
"set_vring_num: vring index or size out of range",
186
));
187
}
188
189
// We checked these values already.
190
let index = index as usize;
191
let num = num as u16;
192
self.queues[index].set_size(num);
193
194
// The last vq is an event-only vq that is not handled by the kernel.
195
if index == EVENT_QUEUE {
196
return Ok(());
197
}
198
199
self.handle
200
.set_vring_num(index, num)
201
.map_err(convert_vhost_error)
202
}
203
204
fn set_vring_addr(
205
&mut self,
206
index: u32,
207
flags: VhostUserVringAddrFlags,
208
descriptor: u64,
209
used: u64,
210
available: u64,
211
log: u64,
212
) -> Result<()> {
213
if index >= NUM_QUEUES as u32 {
214
return Err(Error::InvalidParam("set_vring_addr: index out of range"));
215
}
216
217
let index = index as usize;
218
219
let mem = self
220
.mem
221
.as_ref()
222
.ok_or(Error::InvalidParam("set_vring_addr: could not get mem"))?;
223
let maps = self.vmm_maps.as_ref().ok_or(Error::InvalidParam(
224
"set_vring_addr: could not get vmm_maps",
225
))?;
226
227
let queue = &mut self.queues[index];
228
queue.set_desc_table(vmm_va_to_gpa(maps, descriptor)?);
229
queue.set_avail_ring(vmm_va_to_gpa(maps, available)?);
230
queue.set_used_ring(vmm_va_to_gpa(maps, used)?);
231
let log_addr = if flags.contains(VhostUserVringAddrFlags::VHOST_VRING_F_LOG) {
232
vmm_va_to_gpa(maps, log).map(Some)?
233
} else {
234
None
235
};
236
237
if index == EVENT_QUEUE {
238
return Ok(());
239
}
240
241
self.handle
242
.set_vring_addr(
243
mem,
244
queue.size(),
245
index,
246
flags.bits(),
247
queue.desc_table(),
248
queue.used_ring(),
249
queue.avail_ring(),
250
log_addr,
251
)
252
.map_err(convert_vhost_error)
253
}
254
255
fn set_vring_base(&mut self, index: u32, base: u32) -> Result<()> {
256
if index >= NUM_QUEUES as u32 {
257
return Err(Error::InvalidParam("set_vring_base: index out of range"));
258
}
259
260
let index = index as usize;
261
let base = base as u16;
262
263
let queue = &mut self.queues[index];
264
queue.set_next_avail(Wrapping(base));
265
queue.set_next_used(Wrapping(base));
266
267
if index == EVENT_QUEUE {
268
return Ok(());
269
}
270
271
self.handle
272
.set_vring_base(index, base)
273
.map_err(convert_vhost_error)
274
}
275
276
fn get_vring_base(&mut self, index: u32) -> Result<VhostUserVringState> {
277
if index >= NUM_QUEUES as u32 {
278
return Err(Error::InvalidParam("get_vring_base: index out of range"));
279
}
280
281
let index = index as usize;
282
let next_avail = if index == EVENT_QUEUE {
283
self.queues[index].next_avail().0
284
} else {
285
self.handle
286
.get_vring_base(index)
287
.map_err(convert_vhost_error)?
288
};
289
290
Ok(VhostUserVringState::new(index as u32, next_avail.into()))
291
}
292
293
fn set_vring_kick(&mut self, index: u8, fd: Option<File>) -> Result<()> {
294
if index >= NUM_QUEUES as u8 {
295
return Err(Error::InvalidParam("set_vring_kick: index out of range"));
296
}
297
298
let file = fd.ok_or(Error::InvalidParam("set_vring_kick: missing fd"))?;
299
let event = Event::from(SafeDescriptor::from(file));
300
let index = usize::from(index);
301
if index != EVENT_QUEUE {
302
self.handle
303
.set_vring_kick(index, &event)
304
.map_err(convert_vhost_error)?;
305
}
306
307
Ok(())
308
}
309
310
fn set_vring_call(&mut self, index: u8, fd: Option<File>) -> Result<()> {
311
if index >= NUM_QUEUES as u8 {
312
return Err(Error::InvalidParam("set_vring_call: index out of range"));
313
}
314
315
let file = fd.ok_or(Error::InvalidParam("set_vring_call: missing fd"))?;
316
let event = Event::from(SafeDescriptor::from(file));
317
let index = usize::from(index);
318
if index != EVENT_QUEUE {
319
self.handle
320
.set_vring_call(index, &event)
321
.map_err(convert_vhost_error)?;
322
}
323
324
Ok(())
325
}
326
327
fn set_vring_err(&mut self, index: u8, fd: Option<File>) -> Result<()> {
328
if index >= NUM_QUEUES as u8 {
329
return Err(Error::InvalidParam("set_vring_err: index out of range"));
330
}
331
332
let index = usize::from(index);
333
let file = fd.ok_or(Error::InvalidParam("set_vring_err: missing fd"))?;
334
335
let event = Event::from(SafeDescriptor::from(file));
336
337
if index == EVENT_QUEUE {
338
return Ok(());
339
}
340
341
self.handle
342
.set_vring_err(index, &event)
343
.map_err(convert_vhost_error)
344
}
345
346
fn set_vring_enable(&mut self, index: u32, enable: bool) -> Result<()> {
347
if index >= NUM_QUEUES as u32 {
348
return Err(Error::InvalidParam("vring index out of range"));
349
}
350
351
self.queues[index as usize].set_ready(enable);
352
353
if index == (EVENT_QUEUE) as u32 {
354
return Ok(());
355
}
356
357
if self.queues[..EVENT_QUEUE].iter().all(|q| q.ready()) {
358
// All queues are ready. Start the device.
359
self.handle.set_cid(self.cid).map_err(convert_vhost_error)?;
360
self.handle.start().map_err(convert_vhost_error)
361
} else if !enable {
362
// If we just disabled a vring then stop the device.
363
self.handle.stop().map_err(convert_vhost_error)
364
} else {
365
Ok(())
366
}
367
}
368
369
fn get_config(
370
&mut self,
371
offset: u32,
372
size: u32,
373
_flags: VhostUserConfigFlags,
374
) -> Result<Vec<u8>> {
375
let start: usize = offset
376
.try_into()
377
.map_err(|_| Error::InvalidParam("offset does not fit in usize"))?;
378
let end: usize = offset
379
.checked_add(size)
380
.and_then(|e| e.try_into().ok())
381
.ok_or(Error::InvalidParam("offset + size does not fit in usize"))?;
382
383
if start >= size_of::<Le64>() || end > size_of::<Le64>() {
384
return Err(Error::InvalidParam(
385
"get_config: offset and/or size out of range",
386
));
387
}
388
389
Ok(Le64::from(self.cid).as_bytes()[start..end].to_vec())
390
}
391
392
fn set_config(
393
&mut self,
394
_offset: u32,
395
_buf: &[u8],
396
_flags: VhostUserConfigFlags,
397
) -> Result<()> {
398
Err(Error::InvalidOperation)
399
}
400
401
fn set_backend_req_fd(&mut self, _vu_req: Connection) {
402
// We didn't set VhostUserProtocolFeatures::BACKEND_REQ
403
unreachable!("unexpected set_backend_req_fd");
404
}
405
406
fn get_inflight_fd(
407
&mut self,
408
_inflight: &VhostUserInflight,
409
) -> Result<(VhostUserInflight, File)> {
410
Err(Error::InvalidOperation)
411
}
412
413
fn set_inflight_fd(&mut self, _inflight: &VhostUserInflight, _file: File) -> Result<()> {
414
Err(Error::InvalidOperation)
415
}
416
417
fn get_max_mem_slots(&mut self) -> Result<u64> {
418
Err(Error::InvalidOperation)
419
}
420
421
fn add_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion, _fd: File) -> Result<()> {
422
Err(Error::InvalidOperation)
423
}
424
425
fn remove_mem_region(&mut self, _region: &VhostUserSingleMemoryRegion) -> Result<()> {
426
Err(Error::InvalidOperation)
427
}
428
429
fn set_device_state_fd(
430
&mut self,
431
_transfer_direction: VhostUserTransferDirection,
432
_migration_phase: VhostUserMigrationPhase,
433
_fd: File,
434
) -> Result<Option<File>> {
435
Err(Error::InvalidOperation)
436
}
437
438
fn check_device_state(&mut self) -> Result<()> {
439
Err(Error::InvalidOperation)
440
}
441
442
fn get_shmem_config(&mut self) -> Result<Vec<SharedMemoryRegion>> {
443
Ok(Vec::new())
444
}
445
}
446
447
#[derive(FromArgs)]
448
#[argh(subcommand, name = "vsock")]
449
/// Vsock device
450
pub struct Options {
451
#[argh(option, arg_name = "PATH", hidden_help)]
452
/// deprecated - please use --socket-path instead
453
socket: Option<String>,
454
#[argh(option, arg_name = "PATH")]
455
/// path to the vhost-user socket to bind to.
456
/// If this flag is set, --fd cannot be specified.
457
socket_path: Option<String>,
458
#[argh(option, arg_name = "FD")]
459
/// file descriptor of a connected vhost-user socket.
460
/// If this flag is set, --socket-path cannot be specified.
461
fd: Option<RawDescriptor>,
462
463
#[argh(option, arg_name = "INT")]
464
/// the vsock context id for this device
465
cid: u64,
466
#[argh(
467
option,
468
default = "String::from(\"/dev/vhost-vsock\")",
469
arg_name = "PATH"
470
)]
471
/// path to the vhost-vsock control socket
472
vhost_socket: String,
473
}
474
475
/// Returns an error if the given `args` is invalid or the device fails to run.
476
pub fn run_vsock_device(opts: Options) -> anyhow::Result<()> {
477
let ex = Executor::new().context("failed to create executor")?;
478
479
let conn =
480
BackendConnection::from_opts(opts.socket.as_deref(), opts.socket_path.as_deref(), opts.fd)?;
481
482
let vsock_device = Box::new(VhostUserVsockDevice::new(opts.cid, opts.vhost_socket)?);
483
484
conn.run_device(ex, vsock_device)
485
}
486
487