Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/devices/src/virtio/interrupt.rs
5394 views
1
// Copyright 2019 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
use std::fmt;
6
use std::sync::atomic::AtomicUsize;
7
use std::sync::atomic::Ordering;
8
use std::sync::Arc;
9
#[cfg(target_arch = "x86_64")]
10
use std::time::Instant;
11
12
#[cfg(target_arch = "x86_64")]
13
use base::error;
14
use base::Event;
15
use base::EventToken;
16
use base::WaitContext;
17
use base::WorkerThread;
18
#[cfg(target_arch = "x86_64")]
19
use metrics::log_metric;
20
#[cfg(target_arch = "x86_64")]
21
use metrics::MetricEventType;
22
use serde::Deserialize;
23
use serde::Serialize;
24
use sync::Mutex;
25
26
use super::INTERRUPT_STATUS_CONFIG_CHANGED;
27
use super::INTERRUPT_STATUS_USED_RING;
28
use super::VIRTIO_MSI_NO_VECTOR;
29
#[cfg(target_arch = "x86_64")]
30
use crate::acpi::PmWakeupEvent;
31
use crate::irq_event::IrqEdgeEvent;
32
use crate::irq_event::IrqLevelEvent;
33
use crate::pci::MsixConfig;
34
35
struct TransportPci {
36
irq_evt_lvl: IrqLevelEvent,
37
msix_config: Option<Arc<Mutex<MsixConfig>>>,
38
config_msix_vector: u16,
39
}
40
41
enum Transport {
42
Pci {
43
pci: TransportPci,
44
},
45
Mmio {
46
irq_evt_edge: IrqEdgeEvent,
47
},
48
VhostUser {
49
call_evt: Event,
50
signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
51
},
52
}
53
54
struct InterruptInner {
55
interrupt_status: AtomicUsize,
56
transport: Transport,
57
async_intr_status: bool,
58
pm_state: Mutex<PmState>,
59
}
60
61
impl InterruptInner {
62
/// Add `interrupt_status_mask` to any existing interrupt status.
63
///
64
/// Returns `true` if the interrupt should be triggered after this update.
65
fn update_interrupt_status(&self, interrupt_status_mask: u32) -> bool {
66
// Set bit in ISR and inject the interrupt if it was not already pending.
67
// Don't need to inject the interrupt if the guest hasn't processed it.
68
// In hypervisors where interrupt_status is updated asynchronously, inject the
69
// interrupt even if the previous interrupt appears to be already pending.
70
self.interrupt_status
71
.fetch_or(interrupt_status_mask as usize, Ordering::SeqCst)
72
== 0
73
|| self.async_intr_status
74
}
75
}
76
77
#[derive(Clone)]
78
pub struct Interrupt {
79
inner: Arc<InterruptInner>,
80
}
81
82
impl fmt::Debug for Interrupt {
83
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
84
write!(f, "Interrupt")
85
}
86
}
87
88
#[derive(Serialize, Deserialize)]
89
pub struct InterruptSnapshot {
90
interrupt_status: usize,
91
}
92
93
impl Interrupt {
94
/// Writes to the irqfd to VMM to deliver virtual interrupt to the guest.
95
///
96
/// If MSI-X is enabled in this device, MSI-X interrupt is preferred.
97
/// Write to the irqfd to VMM to deliver virtual interrupt to the guest
98
pub fn signal(&self, vector: u16, interrupt_status_mask: u32) {
99
if self
100
.inner
101
.pm_state
102
.lock()
103
.handle_interrupt(vector, interrupt_status_mask)
104
{
105
return;
106
}
107
108
match &self.inner.transport {
109
Transport::Pci { pci } => {
110
// Don't need to set ISR for MSI-X interrupts
111
if let Some(msix_config) = &pci.msix_config {
112
let mut msix_config = msix_config.lock();
113
if msix_config.enabled() {
114
if vector != VIRTIO_MSI_NO_VECTOR {
115
msix_config.trigger(vector);
116
}
117
return;
118
}
119
}
120
121
if self.inner.update_interrupt_status(interrupt_status_mask) {
122
pci.irq_evt_lvl.trigger().unwrap();
123
}
124
}
125
Transport::Mmio { irq_evt_edge } => {
126
if self.inner.update_interrupt_status(interrupt_status_mask) {
127
irq_evt_edge.trigger().unwrap();
128
}
129
}
130
Transport::VhostUser { call_evt, .. } => {
131
// TODO(b/187487351): To avoid sending unnecessary events, we might want to support
132
// interrupt status. For this purpose, we need a mechanism to share interrupt status
133
// between the vmm and the device process.
134
call_evt.signal().unwrap();
135
}
136
}
137
}
138
139
/// Notify the driver that buffers have been placed in the used queue.
140
pub fn signal_used_queue(&self, vector: u16) {
141
self.signal(vector, INTERRUPT_STATUS_USED_RING)
142
}
143
144
/// Notify the driver that the device configuration has changed.
145
pub fn signal_config_changed(&self) {
146
match &self.inner.as_ref().transport {
147
Transport::Pci { pci } => {
148
self.signal(pci.config_msix_vector, INTERRUPT_STATUS_CONFIG_CHANGED)
149
}
150
Transport::Mmio { .. } => {
151
self.signal(VIRTIO_MSI_NO_VECTOR, INTERRUPT_STATUS_CONFIG_CHANGED)
152
}
153
Transport::VhostUser {
154
signal_config_changed_fn,
155
..
156
} => signal_config_changed_fn(),
157
}
158
}
159
160
/// Get the event to signal resampling is needed if it exists.
161
fn get_resample_evt(&self) -> Option<&Event> {
162
match &self.inner.as_ref().transport {
163
Transport::Pci { pci } => Some(pci.irq_evt_lvl.get_resample()),
164
_ => None,
165
}
166
}
167
168
pub fn spawn_resample_thread(&self) -> Option<WorkerThread<()>> {
169
if self.get_resample_evt().is_some() {
170
let interrupt = self.clone();
171
// TODO(dverkamp): investigate using a smaller-than-default stack size for this thread
172
Some(WorkerThread::start("crosvm_resample", move |kill_evt| {
173
interrupt_resample_thread(kill_evt, interrupt)
174
}))
175
} else {
176
None
177
}
178
}
179
}
180
181
impl Interrupt {
182
pub fn new(
183
irq_evt_lvl: IrqLevelEvent,
184
msix_config: Option<Arc<Mutex<MsixConfig>>>,
185
config_msix_vector: u16,
186
#[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
187
) -> Interrupt {
188
Interrupt {
189
inner: Arc::new(InterruptInner {
190
interrupt_status: AtomicUsize::new(0),
191
async_intr_status: false,
192
transport: Transport::Pci {
193
pci: TransportPci {
194
irq_evt_lvl,
195
msix_config,
196
config_msix_vector,
197
},
198
},
199
pm_state: PmState::new(
200
#[cfg(target_arch = "x86_64")]
201
wakeup_event,
202
),
203
}),
204
}
205
}
206
207
/// Create a new `Interrupt`, restoring internal state to match `snapshot`.
208
///
209
/// The other arguments are assumed to be snapshot'd and restore'd elsewhere.
210
pub fn new_from_snapshot(
211
irq_evt_lvl: IrqLevelEvent,
212
msix_config: Option<Arc<Mutex<MsixConfig>>>,
213
config_msix_vector: u16,
214
snapshot: InterruptSnapshot,
215
#[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
216
) -> Interrupt {
217
Interrupt {
218
inner: Arc::new(InterruptInner {
219
interrupt_status: AtomicUsize::new(snapshot.interrupt_status),
220
async_intr_status: false,
221
transport: Transport::Pci {
222
pci: TransportPci {
223
irq_evt_lvl,
224
msix_config,
225
config_msix_vector,
226
},
227
},
228
pm_state: PmState::new(
229
#[cfg(target_arch = "x86_64")]
230
wakeup_event,
231
),
232
}),
233
}
234
}
235
236
pub fn new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt {
237
Interrupt {
238
inner: Arc::new(InterruptInner {
239
interrupt_status: AtomicUsize::new(0),
240
transport: Transport::Mmio { irq_evt_edge },
241
async_intr_status,
242
pm_state: PmState::new(
243
#[cfg(target_arch = "x86_64")]
244
None,
245
),
246
}),
247
}
248
}
249
250
/// Create an `Interrupt` wrapping a vhost-user vring call event and function that sends a
251
/// VHOST_USER_BACKEND_CONFIG_CHANGE_MSG to the frontend.
252
pub fn new_vhost_user(
253
call_evt: Event,
254
signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
255
) -> Interrupt {
256
Interrupt {
257
inner: Arc::new(InterruptInner {
258
interrupt_status: AtomicUsize::new(0),
259
transport: Transport::VhostUser {
260
call_evt,
261
signal_config_changed_fn,
262
},
263
async_intr_status: false,
264
pm_state: PmState::new(
265
#[cfg(target_arch = "x86_64")]
266
None,
267
),
268
}),
269
}
270
}
271
272
#[cfg(test)]
273
pub fn new_for_test() -> Interrupt {
274
Interrupt::new(
275
IrqLevelEvent::new().unwrap(),
276
None,
277
VIRTIO_MSI_NO_VECTOR,
278
#[cfg(target_arch = "x86_64")]
279
None,
280
)
281
}
282
283
#[cfg(test)]
284
pub fn new_for_test_with_msix() -> Interrupt {
285
let (_, unused_config_tube) = base::Tube::pair().unwrap();
286
let msix_vectors = 2;
287
let msix_cfg = MsixConfig::new(
288
msix_vectors,
289
unused_config_tube,
290
0,
291
"test_device".to_owned(),
292
);
293
294
Interrupt::new(
295
IrqLevelEvent::new().unwrap(),
296
Some(Arc::new(Mutex::new(msix_cfg))),
297
msix_vectors,
298
#[cfg(target_arch = "x86_64")]
299
None,
300
)
301
}
302
303
/// Get a reference to the interrupt event.
304
pub fn get_interrupt_evt(&self) -> &Event {
305
match &self.inner.as_ref().transport {
306
Transport::Pci { pci } => pci.irq_evt_lvl.get_trigger(),
307
Transport::Mmio { irq_evt_edge } => irq_evt_edge.get_trigger(),
308
Transport::VhostUser { call_evt, .. } => call_evt,
309
}
310
}
311
312
/// Get a reference to the msix configuration
313
pub fn get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>> {
314
match &self.inner.as_ref().transport {
315
Transport::Pci { pci } => &pci.msix_config,
316
_ => &None,
317
}
318
}
319
320
/// Reads the current value of the interrupt status.
321
pub fn read_interrupt_status(&self) -> u8 {
322
self.inner.interrupt_status.load(Ordering::SeqCst) as u8
323
}
324
325
/// Reads the current value of the interrupt status and resets it to 0.
326
pub fn read_and_reset_interrupt_status(&self) -> u8 {
327
self.inner.interrupt_status.swap(0, Ordering::SeqCst) as u8
328
}
329
330
/// Clear the bits set in `mask` in the interrupt status.
331
pub fn clear_interrupt_status_bits(&self, mask: u8) {
332
self.inner
333
.interrupt_status
334
.fetch_and(!(mask as usize), Ordering::SeqCst);
335
}
336
337
/// Snapshot internal state. Can be restored with with `Interrupt::new_from_snapshot`.
338
pub fn snapshot(&self) -> InterruptSnapshot {
339
InterruptSnapshot {
340
interrupt_status: self.inner.interrupt_status.load(Ordering::SeqCst),
341
}
342
}
343
344
pub fn set_suspended(&self, suspended: bool) {
345
let retrigger_evts = self.inner.pm_state.lock().set_suspended(suspended);
346
for (vector, interrupt_status_mask) in retrigger_evts.into_iter() {
347
self.signal(vector, interrupt_status_mask);
348
}
349
}
350
351
#[cfg(target_arch = "x86_64")]
352
pub fn set_wakeup_event_active(&self, active: bool) {
353
self.inner.pm_state.lock().set_wakeup_event_active(active);
354
}
355
}
356
357
#[cfg(target_arch = "x86_64")]
358
struct WakeupState {
359
wakeup_event: PmWakeupEvent,
360
wakeup_enabled: bool,
361
armed_time: Instant,
362
metrics_event: MetricEventType,
363
wakeup_clear_evt: Option<Event>,
364
}
365
366
#[cfg(target_arch = "x86_64")]
367
impl WakeupState {
368
fn new(wakeup_event: Option<(PmWakeupEvent, MetricEventType)>) -> Option<Self> {
369
wakeup_event.map(|(wakeup_event, metrics_event)| Self {
370
wakeup_event,
371
wakeup_enabled: false,
372
// Not actually armed, but simpler than wrapping with an Option.
373
armed_time: Instant::now(),
374
metrics_event,
375
wakeup_clear_evt: None,
376
})
377
}
378
379
fn trigger_wakeup(&mut self) {
380
if self.wakeup_clear_evt.is_some() {
381
return;
382
}
383
384
let elapsed = self.armed_time.elapsed().as_millis();
385
log_metric(
386
self.metrics_event.clone(),
387
elapsed.try_into().unwrap_or(i64::MAX),
388
);
389
390
match self.wakeup_event.trigger_wakeup() {
391
Ok(clear_evt) => self.wakeup_clear_evt = clear_evt,
392
Err(err) => error!("Wakeup trigger failed {:?}", err),
393
}
394
}
395
}
396
397
// Power management state of the interrupt.
398
struct PmState {
399
// Whether or not the virtio device that owns this interrupt is suspended. A
400
// suspended virtio device MUST NOT send notifications (i.e. interrupts) to the
401
// driver.
402
suspended: bool,
403
// The queue of interrupts that the virtio device has generated while suspended.
404
// These are deferred and sent in order when the device is un-suspended.
405
pending_signals: Vec<(u16, u32)>,
406
#[cfg(target_arch = "x86_64")]
407
wakeup_state: Option<WakeupState>,
408
}
409
410
impl PmState {
411
fn new(
412
#[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
413
) -> Mutex<Self> {
414
Mutex::new(Self {
415
suspended: false,
416
pending_signals: Vec::new(),
417
#[cfg(target_arch = "x86_64")]
418
wakeup_state: WakeupState::new(wakeup_event),
419
})
420
}
421
422
fn handle_interrupt(&mut self, vector: u16, mask: u32) -> bool {
423
if self.suspended {
424
self.pending_signals.push((vector, mask));
425
#[cfg(target_arch = "x86_64")]
426
if let Some(wakeup_state) = self.wakeup_state.as_mut() {
427
if wakeup_state.wakeup_enabled {
428
wakeup_state.trigger_wakeup();
429
}
430
}
431
}
432
self.suspended
433
}
434
435
fn set_suspended(&mut self, suspended: bool) -> Vec<(u16, u32)> {
436
self.suspended = suspended;
437
std::mem::take(&mut self.pending_signals)
438
}
439
440
#[cfg(target_arch = "x86_64")]
441
fn set_wakeup_event_active(&mut self, active: bool) {
442
let Some(wakeup_state) = self.wakeup_state.as_mut() else {
443
return;
444
};
445
446
wakeup_state.wakeup_enabled = active;
447
if active {
448
wakeup_state.armed_time = Instant::now();
449
if !self.pending_signals.is_empty() {
450
wakeup_state.trigger_wakeup();
451
}
452
} else if let Some(clear_evt) = wakeup_state.wakeup_clear_evt.take() {
453
if let Err(e) = clear_evt.signal() {
454
error!("failed to signal clear event {}", e);
455
}
456
}
457
}
458
}
459
460
fn interrupt_resample_thread(kill_evt: Event, interrupt: Interrupt) {
461
#[derive(EventToken)]
462
enum Token {
463
Resample,
464
Kill,
465
}
466
467
let interrupt_status = &interrupt.inner.interrupt_status;
468
let interrupt_evt = interrupt.get_interrupt_evt();
469
let resample_evt = interrupt
470
.get_resample_evt()
471
.expect("must have resample evt in interrupt_resample_thread");
472
473
let wait_ctx =
474
WaitContext::build_with(&[(resample_evt, Token::Resample), (&kill_evt, Token::Kill)])
475
.expect("failed to create WaitContext");
476
477
loop {
478
let events = wait_ctx.wait().expect("WaitContext::wait() failed");
479
for event in events {
480
match event.token {
481
Token::Resample => {
482
let _ = resample_evt.wait();
483
if interrupt_status.load(Ordering::SeqCst) != 0 {
484
interrupt_evt.signal().unwrap();
485
}
486
}
487
Token::Kill => return,
488
}
489
}
490
}
491
}
492
493