Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/devices/src/irqchip/ioapic.rs
5394 views
1
// Copyright 2020 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
// Implementation of an Intel ICH10 Input/Output Advanced Programmable Interrupt Controller
6
// See https://www.intel.com/content/dam/doc/datasheet/io-controller-hub-10-family-datasheet.pdf
7
// for a specification.
8
9
use anyhow::Context;
10
use base::error;
11
use base::warn;
12
use base::Error;
13
use base::Event;
14
use base::Result;
15
use base::Tube;
16
use base::TubeError;
17
use hypervisor::IoapicRedirectionTableEntry;
18
use hypervisor::IoapicState;
19
use hypervisor::MsiAddressMessage;
20
use hypervisor::MsiDataMessage;
21
use hypervisor::TriggerMode;
22
use hypervisor::NUM_IOAPIC_PINS;
23
use remain::sorted;
24
use serde::Deserialize;
25
use serde::Serialize;
26
use snapshot::AnySnapshot;
27
use thiserror::Error;
28
use vm_control::DeviceId;
29
use vm_control::PlatformDeviceId;
30
use vm_control::VmIrqRequest;
31
use vm_control::VmIrqResponse;
32
33
use super::IrqEvent;
34
use crate::bus::BusAccessInfo;
35
use crate::BusDevice;
36
use crate::IrqEventSource;
37
use crate::Suspendable;
38
39
// ICH10 I/O APIC version: 0x20
40
const IOAPIC_VERSION_ID: u32 = 0x00000020;
41
pub const IOAPIC_BASE_ADDRESS: u64 = 0xfec00000;
42
// The Intel manual does not specify this size, but KVM uses it.
43
pub const IOAPIC_MEM_LENGTH_BYTES: u64 = 0x100;
44
45
// Constants for IOAPIC direct register offset.
46
const IOAPIC_REG_ID: u8 = 0x00;
47
const IOAPIC_REG_VERSION: u8 = 0x01;
48
const IOAPIC_REG_ARBITRATION_ID: u8 = 0x02;
49
50
// Register offsets
51
const IOREGSEL_OFF: u8 = 0x0;
52
const IOREGSEL_DUMMY_UPPER_32_BITS_OFF: u8 = 0x4;
53
const IOWIN_OFF: u8 = 0x10;
54
const IOEOIR_OFF: u8 = 0x40;
55
56
const IOWIN_SCALE: u8 = 0x2;
57
58
/// Given an IRQ and whether or not the selector should refer to the high bits, return a selector
59
/// suitable to use as an offset to read to/write from.
60
#[allow(dead_code)]
61
fn encode_selector_from_irq(irq: usize, is_high_bits: bool) -> u8 {
62
(irq as u8) * IOWIN_SCALE + IOWIN_OFF + (is_high_bits as u8)
63
}
64
65
/// Given an offset that was read from/written to, return a tuple of the relevant IRQ and whether
66
/// the offset refers to the high bits of that register.
67
fn decode_irq_from_selector(selector: u8) -> (usize, bool) {
68
(
69
((selector - IOWIN_OFF) / IOWIN_SCALE) as usize,
70
selector & 1 != 0,
71
)
72
}
73
74
// The RTC needs special treatment to work properly for Windows (or other OSs that use tick
75
// stuffing). In order to avoid time drift, we need to guarantee that the correct number of RTC
76
// interrupts are injected into the guest. This hack essentialy treats RTC interrupts as level
77
// triggered, which allows the IOAPIC to be responsible for interrupt coalescing and allows the
78
// IOAPIC to pass back whether or not the interrupt was coalesced to the CMOS (which allows the
79
// CMOS to perform tick stuffing). This deviates from the IOAPIC spec in ways very similar to (but
80
// not exactly the same as) KVM's IOAPIC.
81
const RTC_IRQ: usize = 0x8;
82
83
/// This struct is essentially the complete serialized form of [IrqEvent] as used in
84
/// [Ioapic::out_events].
85
///
86
/// [Ioapic] stores MSIs used to back GSIs, but not enough information to re-create these MSIs
87
/// (it is missing the address & data). It also includes data that is unused by the userspace
88
/// ioapic (the per gsi resample event, [IrqEvent::resample_event], is always None). This
89
/// struct incorporates the necessary information for snapshotting, and excludes that which
90
/// is not required.
91
#[derive(Clone, Serialize, Deserialize)]
92
struct OutEventSnapshot {
93
gsi: u32,
94
msi_address: u64,
95
msi_data: u32,
96
source: IrqEventSource,
97
}
98
99
/// Snapshot of [Ioapic] state. Some fields were intentionally excluded:
100
/// * [Ioapic::resample_events]: these will get re-registered when the VM is created (e.g. prior to
101
/// restoring a snapshot).
102
/// * [Ioapic::out_events]: this isn't serializable as it contains Events. Replaced by
103
/// [IoapicSnapshot::out_event_snapshots].
104
/// * [Ioapic::irq_tube]: will be set up as part of creating the VM.
105
///
106
/// See [Ioapic] for descriptions of fields by the same names.
107
#[derive(Serialize, Deserialize)]
108
struct IoapicSnapshot {
109
num_pins: usize,
110
ioregsel: u8,
111
ioapicid: u32,
112
rtc_remote_irr: bool,
113
out_event_snapshots: Vec<Option<OutEventSnapshot>>,
114
redirect_table: Vec<IoapicRedirectionTableEntry>,
115
interrupt_level: Vec<bool>,
116
}
117
118
/// Stores the outbound IRQ line in runtime & serializable forms.
119
struct OutEvent {
120
/// The actual IrqEvent used to dispatch IRQs when the VM is running.
121
irq_event: IrqEvent,
122
/// Serializable form of this IRQ line so that it can be re-created when
123
/// the VM is snapshotted & resumed. Will be None until the line is
124
/// completely set up.
125
snapshot: Option<OutEventSnapshot>,
126
}
127
128
pub struct Ioapic {
129
/// Number of supported IO-APIC inputs / redirection entries.
130
num_pins: usize,
131
/// ioregsel register. Used for selecting which entry of the redirect table to read/write.
132
ioregsel: u8,
133
/// ioapicid register. Bits 24 - 27 contain the APIC ID for this device.
134
ioapicid: u32,
135
/// Remote IRR for Edge Triggered Real Time Clock interrupts, which allows the CMOS to know
136
/// when one of its interrupts is being coalesced.
137
rtc_remote_irr: bool,
138
/// Outgoing irq events that are used to inject MSI interrupts.
139
/// Also contains the serializable form used for snapshotting.
140
out_events: Vec<Option<OutEvent>>,
141
/// Events that should be triggered on an EOI. The outer Vec is indexed by GSI, and the inner
142
/// Vec is an unordered list of registered resample events for the GSI.
143
resample_events: Vec<Vec<Event>>,
144
/// Redirection settings for each irq line.
145
redirect_table: Vec<IoapicRedirectionTableEntry>,
146
/// Interrupt activation state.
147
interrupt_level: Vec<bool>,
148
/// Tube used to route MSI irqs.
149
irq_tube: Tube,
150
}
151
152
impl BusDevice for Ioapic {
153
fn debug_label(&self) -> String {
154
"userspace IOAPIC".to_string()
155
}
156
157
fn device_id(&self) -> DeviceId {
158
PlatformDeviceId::Ioapic.into()
159
}
160
161
fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
162
if data.len() > 8 || data.is_empty() {
163
warn!("IOAPIC: Bad read size: {}", data.len());
164
return;
165
}
166
if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
167
warn!("IOAPIC: Bad read from {}", info);
168
}
169
let out = match info.offset as u8 {
170
IOREGSEL_OFF => self.ioregsel.into(),
171
IOREGSEL_DUMMY_UPPER_32_BITS_OFF => 0,
172
IOWIN_OFF => self.ioapic_read(),
173
IOEOIR_OFF => 0,
174
_ => {
175
warn!("IOAPIC: Bad read from {}", info);
176
return;
177
}
178
};
179
let out_arr = out.to_ne_bytes();
180
for i in 0..4 {
181
if i < data.len() {
182
data[i] = out_arr[i];
183
}
184
}
185
}
186
187
fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
188
if data.len() > 8 || data.is_empty() {
189
warn!("IOAPIC: Bad write size: {}", data.len());
190
return;
191
}
192
if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
193
warn!("IOAPIC: Bad write to {}", info);
194
}
195
match info.offset as u8 {
196
IOREGSEL_OFF => self.ioregsel = data[0],
197
IOREGSEL_DUMMY_UPPER_32_BITS_OFF => {} // Ignored.
198
IOWIN_OFF => {
199
if data.len() != 4 {
200
warn!("IOAPIC: Bad write size for iowin: {}", data.len());
201
return;
202
}
203
let data_arr = [data[0], data[1], data[2], data[3]];
204
let val = u32::from_ne_bytes(data_arr);
205
self.ioapic_write(val);
206
}
207
IOEOIR_OFF => self.end_of_interrupt(data[0]),
208
_ => {
209
warn!("IOAPIC: Bad write to {}", info);
210
}
211
}
212
}
213
}
214
215
impl Ioapic {
216
pub fn new(irq_tube: Tube, num_pins: usize) -> Result<Ioapic> {
217
// TODO(dverkamp): clean this up once we are sure all callers use 24 pins.
218
assert_eq!(num_pins, NUM_IOAPIC_PINS);
219
let mut entry = IoapicRedirectionTableEntry::new();
220
entry.set_interrupt_mask(true);
221
Ok(Ioapic {
222
num_pins,
223
ioregsel: 0,
224
ioapicid: 0,
225
rtc_remote_irr: false,
226
out_events: (0..num_pins).map(|_| None).collect(),
227
resample_events: Vec::new(),
228
redirect_table: (0..num_pins).map(|_| entry).collect(),
229
interrupt_level: (0..num_pins).map(|_| false).collect(),
230
irq_tube,
231
})
232
}
233
234
pub fn get_ioapic_state(&self) -> IoapicState {
235
// Convert vector of first NUM_IOAPIC_PINS active interrupts into an u32 value.
236
let level_bitmap = self
237
.interrupt_level
238
.iter()
239
.take(NUM_IOAPIC_PINS)
240
.rev()
241
.fold(0, |acc, &l| acc * 2 + l as u32);
242
let mut state = IoapicState {
243
base_address: IOAPIC_BASE_ADDRESS,
244
ioregsel: self.ioregsel,
245
ioapicid: self.ioapicid,
246
current_interrupt_level_bitmap: level_bitmap,
247
..Default::default()
248
};
249
for (dst, src) in state
250
.redirect_table
251
.iter_mut()
252
.zip(self.redirect_table.iter())
253
{
254
*dst = *src;
255
}
256
state
257
}
258
259
pub fn set_ioapic_state(&mut self, state: &IoapicState) {
260
self.ioregsel = state.ioregsel;
261
self.ioapicid = state.ioapicid & 0x0f00_0000;
262
for (src, dst) in state
263
.redirect_table
264
.iter()
265
.zip(self.redirect_table.iter_mut())
266
{
267
*dst = *src;
268
}
269
for (i, level) in self
270
.interrupt_level
271
.iter_mut()
272
.take(NUM_IOAPIC_PINS)
273
.enumerate()
274
{
275
*level = state.current_interrupt_level_bitmap & (1 << i) != 0;
276
}
277
}
278
279
pub fn register_resample_events(&mut self, resample_events: Vec<Vec<Event>>) {
280
self.resample_events = resample_events;
281
}
282
283
// The ioapic must be informed about EOIs in order to avoid sending multiple interrupts of the
284
// same type at the same time.
285
pub fn end_of_interrupt(&mut self, vector: u8) {
286
if self.redirect_table[RTC_IRQ].get_vector() == vector && self.rtc_remote_irr {
287
// Specifically clear RTC IRQ field
288
self.rtc_remote_irr = false;
289
}
290
291
for i in 0..self.num_pins {
292
if self.redirect_table[i].get_vector() == vector
293
&& self.redirect_table[i].get_trigger_mode() == TriggerMode::Level
294
{
295
if self
296
.resample_events
297
.get(i)
298
.is_some_and(|events| !events.is_empty())
299
{
300
self.service_irq(i, false);
301
}
302
303
if let Some(resample_events) = self.resample_events.get(i) {
304
for resample_evt in resample_events {
305
resample_evt.signal().unwrap();
306
}
307
}
308
self.redirect_table[i].set_remote_irr(false);
309
}
310
// There is an inherent race condition in hardware if the OS is finished processing an
311
// interrupt and a new interrupt is delivered between issuing an EOI and the EOI being
312
// completed. When that happens the ioapic is supposed to re-inject the interrupt.
313
if self.interrupt_level[i] {
314
self.service_irq(i, true);
315
}
316
}
317
}
318
319
pub fn service_irq(&mut self, irq: usize, level: bool) -> bool {
320
let entry = &mut self.redirect_table[irq];
321
322
// De-assert the interrupt.
323
if !level {
324
self.interrupt_level[irq] = false;
325
return true;
326
}
327
328
// If it's an edge-triggered interrupt that's already high we ignore it.
329
if entry.get_trigger_mode() == TriggerMode::Edge && self.interrupt_level[irq] {
330
return false;
331
}
332
333
self.interrupt_level[irq] = true;
334
335
// Interrupts are masked, so don't inject.
336
if entry.get_interrupt_mask() {
337
return false;
338
}
339
340
// Level-triggered and remote irr is already active, so we don't inject a new interrupt.
341
// (Coalesce with the prior one(s)).
342
if entry.get_trigger_mode() == TriggerMode::Level && entry.get_remote_irr() {
343
return false;
344
}
345
346
// Coalesce RTC interrupt to make tick stuffing work.
347
if irq == RTC_IRQ && self.rtc_remote_irr {
348
return false;
349
}
350
351
let injected = match self.out_events.get(irq) {
352
Some(Some(out_event)) => out_event.irq_event.event.signal().is_ok(),
353
_ => false,
354
};
355
356
if entry.get_trigger_mode() == TriggerMode::Level && level && injected {
357
entry.set_remote_irr(true);
358
} else if irq == RTC_IRQ && injected {
359
self.rtc_remote_irr = true;
360
}
361
362
injected
363
}
364
365
fn ioapic_write(&mut self, val: u32) {
366
match self.ioregsel {
367
IOAPIC_REG_VERSION => { /* read-only register */ }
368
IOAPIC_REG_ID => self.ioapicid = val & 0x0f00_0000,
369
IOAPIC_REG_ARBITRATION_ID => { /* read-only register */ }
370
_ => {
371
if self.ioregsel < IOWIN_OFF {
372
// Invalid write; ignore.
373
return;
374
}
375
let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
376
if index >= self.num_pins {
377
// Invalid write; ignore.
378
return;
379
}
380
381
let entry = &mut self.redirect_table[index];
382
if is_high_bits {
383
entry.set(32, 32, val.into());
384
} else {
385
let before = *entry;
386
entry.set(0, 32, val.into());
387
388
// respect R/O bits.
389
entry.set_delivery_status(before.get_delivery_status());
390
entry.set_remote_irr(before.get_remote_irr());
391
392
// Clear remote_irr when switching to edge_triggered.
393
if entry.get_trigger_mode() == TriggerMode::Edge {
394
entry.set_remote_irr(false);
395
}
396
397
// NOTE: on pre-4.0 kernels, there's a race we would need to work around.
398
// "KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race"
399
// is the fix for this.
400
}
401
402
if self.redirect_table[index].get_trigger_mode() == TriggerMode::Level
403
&& self.interrupt_level[index]
404
&& !self.redirect_table[index].get_interrupt_mask()
405
{
406
self.service_irq(index, true);
407
}
408
409
let mut address = MsiAddressMessage::new();
410
let mut data = MsiDataMessage::new();
411
let entry = &self.redirect_table[index];
412
address.set_destination_mode(entry.get_dest_mode());
413
address.set_destination_id(entry.get_dest_id());
414
address.set_always_0xfee(0xfee);
415
data.set_vector(entry.get_vector());
416
data.set_delivery_mode(entry.get_delivery_mode());
417
data.set_trigger(entry.get_trigger_mode());
418
419
let msi_address = address.get(0, 32);
420
let msi_data = data.get(0, 32);
421
if let Err(e) = self.setup_msi(index, msi_address, msi_data as u32) {
422
error!("IOAPIC failed to set up MSI for index {}: {}", index, e);
423
}
424
}
425
}
426
}
427
428
fn setup_msi(
429
&mut self,
430
index: usize,
431
msi_address: u64,
432
msi_data: u32,
433
) -> std::result::Result<(), IoapicError> {
434
if msi_data == 0 {
435
// During boot, Linux first configures all ioapic pins with msi_data == 0; the routes
436
// aren't yet assigned to devices and aren't usable. We skip MSI setup if msi_data is
437
// 0.
438
return Ok(());
439
}
440
441
// Allocate a GSI and event for the outgoing route, if we haven't already done it.
442
// The event will be used on the "outgoing" end of the ioapic to send an interrupt to the
443
// apics: when an incoming ioapic irq line gets signalled, the ioapic writes to the
444
// corresponding outgoing event. The GSI number is used to update the routing info (MSI
445
// data and addr) for the event. The GSI and event are allocated only once for each ioapic
446
// irq line, when the guest first sets up the ioapic with a valid route. If the guest
447
// later reconfigures an ioapic irq line, the same GSI and event are reused, and we change
448
// the GSI's route to the new MSI data+addr destination.
449
let name = self.debug_label();
450
let gsi = if let Some(evt) = &self.out_events[index] {
451
evt.irq_event.gsi
452
} else {
453
let event = Event::new().map_err(IoapicError::CreateEvent)?;
454
let request = VmIrqRequest::AllocateOneMsi {
455
irqfd: event,
456
device_id: self.device_id(),
457
queue_id: index, // Use out_events index as queue_id for outgoing ioapic MSIs
458
device_name: name.clone(),
459
};
460
self.irq_tube
461
.send(&request)
462
.map_err(IoapicError::AllocateOneMsiSend)?;
463
match self
464
.irq_tube
465
.recv()
466
.map_err(IoapicError::AllocateOneMsiRecv)?
467
{
468
VmIrqResponse::AllocateOneMsi { gsi, .. } => {
469
self.out_events[index] = Some(OutEvent {
470
irq_event: IrqEvent {
471
gsi,
472
event: match request {
473
VmIrqRequest::AllocateOneMsi { irqfd, .. } => irqfd,
474
_ => unreachable!(),
475
},
476
resample_event: None,
477
// This source isn't currently used for anything, we already sent the
478
// relevant source information to the main thread via the AllocateOneMsi
479
// request, but we populate it anyways for debugging.
480
source: IrqEventSource {
481
device_id: self.device_id(),
482
queue_id: index,
483
device_name: name,
484
},
485
},
486
snapshot: None,
487
});
488
gsi
489
}
490
VmIrqResponse::Err(e) => return Err(IoapicError::AllocateOneMsi(e)),
491
_ => unreachable!(),
492
}
493
};
494
495
// Set the MSI route for the GSI. This controls which apic(s) get the interrupt when the
496
// ioapic's outgoing event is written, and various attributes of how the interrupt is
497
// delivered.
498
let request = VmIrqRequest::AddMsiRoute {
499
gsi,
500
msi_address,
501
msi_data,
502
};
503
self.irq_tube
504
.send(&request)
505
.map_err(IoapicError::AddMsiRouteSend)?;
506
if let VmIrqResponse::Err(e) = self.irq_tube.recv().map_err(IoapicError::AddMsiRouteRecv)? {
507
return Err(IoapicError::AddMsiRoute(e));
508
}
509
510
// Track this MSI route for snapshotting so it can be restored.
511
self.out_events[index]
512
.as_mut()
513
.expect("IRQ is guaranteed initialized")
514
.snapshot = Some(OutEventSnapshot {
515
gsi,
516
msi_address,
517
msi_data,
518
source: IrqEventSource {
519
device_id: self.device_id(),
520
queue_id: index,
521
device_name: self.debug_label(),
522
},
523
});
524
Ok(())
525
}
526
527
/// Similar to [Ioapic::setup_msi], but used only to re-create an MSI as
528
/// part of the snapshot restore process, which allows us to assume certain
529
/// invariants (like msi_data != 0) already hold.
530
fn restore_msi(
531
&mut self,
532
index: usize,
533
gsi: u32,
534
msi_address: u64,
535
msi_data: u32,
536
) -> std::result::Result<(), IoapicError> {
537
let event = Event::new().map_err(IoapicError::CreateEvent)?;
538
let name = self.debug_label();
539
let request = VmIrqRequest::AllocateOneMsiAtGsi {
540
irqfd: event,
541
gsi,
542
device_id: self.device_id(),
543
queue_id: index, // Use out_events index as queue_id for outgoing ioapic MSIs
544
device_name: name.clone(),
545
};
546
self.irq_tube
547
.send(&request)
548
.map_err(IoapicError::AllocateOneMsiSend)?;
549
if let VmIrqResponse::Err(e) = self
550
.irq_tube
551
.recv()
552
.map_err(IoapicError::AllocateOneMsiRecv)?
553
{
554
return Err(IoapicError::AllocateOneMsi(e));
555
}
556
557
self.out_events[index] = Some(OutEvent {
558
irq_event: IrqEvent {
559
gsi,
560
event: match request {
561
VmIrqRequest::AllocateOneMsiAtGsi { irqfd, .. } => irqfd,
562
_ => unreachable!(),
563
},
564
resample_event: None,
565
// This source isn't currently used for anything, we already sent the
566
// relevant source information to the main thread via the AllocateOneMsi
567
// request, but we populate it anyways for debugging.
568
source: IrqEventSource {
569
device_id: self.device_id(),
570
queue_id: index,
571
device_name: name,
572
},
573
},
574
snapshot: None,
575
});
576
577
// Set the MSI route for the GSI. This controls which apic(s) get the interrupt when the
578
// ioapic's outgoing event is written, and various attributes of how the interrupt is
579
// delivered.
580
let request = VmIrqRequest::AddMsiRoute {
581
gsi,
582
msi_address,
583
msi_data,
584
};
585
self.irq_tube
586
.send(&request)
587
.map_err(IoapicError::AddMsiRouteSend)?;
588
if let VmIrqResponse::Err(e) = self.irq_tube.recv().map_err(IoapicError::AddMsiRouteRecv)? {
589
return Err(IoapicError::AddMsiRoute(e));
590
}
591
592
// Track this MSI route for snapshotting so it can be restored.
593
self.out_events[index]
594
.as_mut()
595
.expect("IRQ is guaranteed initialized")
596
.snapshot = Some(OutEventSnapshot {
597
gsi,
598
msi_address,
599
msi_data,
600
source: IrqEventSource {
601
device_id: self.device_id(),
602
queue_id: index,
603
device_name: self.debug_label(),
604
},
605
});
606
Ok(())
607
}
608
609
/// On warm restore, there could already be MSIs registered. We need to
610
/// release them in case the routing has changed (e.g. different
611
/// data <-> GSI).
612
fn release_all_msis(&mut self) -> std::result::Result<(), IoapicError> {
613
for out_event in self.out_events.drain(..).flatten() {
614
let request = VmIrqRequest::ReleaseOneIrq {
615
gsi: out_event.irq_event.gsi,
616
irqfd: out_event.irq_event.event,
617
};
618
619
self.irq_tube
620
.send(&request)
621
.map_err(IoapicError::ReleaseOneIrqSend)?;
622
if let VmIrqResponse::Err(e) = self
623
.irq_tube
624
.recv()
625
.map_err(IoapicError::ReleaseOneIrqRecv)?
626
{
627
return Err(IoapicError::ReleaseOneIrq(e));
628
}
629
}
630
Ok(())
631
}
632
633
fn ioapic_read(&mut self) -> u32 {
634
match self.ioregsel {
635
IOAPIC_REG_VERSION => ((self.num_pins - 1) as u32) << 16 | IOAPIC_VERSION_ID,
636
IOAPIC_REG_ID | IOAPIC_REG_ARBITRATION_ID => self.ioapicid,
637
_ => {
638
if self.ioregsel < IOWIN_OFF {
639
// Invalid read; ignore and return 0.
640
0
641
} else {
642
let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
643
if index < self.num_pins {
644
let offset = if is_high_bits { 32 } else { 0 };
645
self.redirect_table[index].get(offset, 32) as u32
646
} else {
647
!0 // Invalid index - return all 1s
648
}
649
}
650
}
651
}
652
}
653
}
654
655
impl Suspendable for Ioapic {
656
fn snapshot(&mut self) -> anyhow::Result<AnySnapshot> {
657
AnySnapshot::to_any(IoapicSnapshot {
658
num_pins: self.num_pins,
659
ioregsel: self.ioregsel,
660
ioapicid: self.ioapicid,
661
rtc_remote_irr: self.rtc_remote_irr,
662
out_event_snapshots: self
663
.out_events
664
.iter()
665
.map(|out_event_opt| {
666
if let Some(out_event) = out_event_opt {
667
out_event.snapshot.clone()
668
} else {
669
None
670
}
671
})
672
.collect(),
673
redirect_table: self.redirect_table.clone(),
674
interrupt_level: self.interrupt_level.clone(),
675
})
676
.context("failed serializing Ioapic")
677
}
678
679
fn restore(&mut self, data: AnySnapshot) -> anyhow::Result<()> {
680
let snap: IoapicSnapshot =
681
AnySnapshot::from_any(data).context("failed to deserialize Ioapic snapshot")?;
682
683
self.num_pins = snap.num_pins;
684
self.ioregsel = snap.ioregsel;
685
self.ioapicid = snap.ioapicid;
686
self.rtc_remote_irr = snap.rtc_remote_irr;
687
self.redirect_table = snap.redirect_table;
688
self.interrupt_level = snap.interrupt_level;
689
self.release_all_msis()
690
.context("failed to clear MSIs prior to restore")?;
691
self.out_events = (0..snap.num_pins).map(|_| None).collect();
692
693
for (index, maybe_out_event) in snap.out_event_snapshots.iter().enumerate() {
694
if let Some(out_event) = maybe_out_event {
695
self.restore_msi(
696
index,
697
out_event.gsi,
698
out_event.msi_address,
699
out_event.msi_data,
700
)?;
701
}
702
}
703
Ok(())
704
}
705
706
fn sleep(&mut self) -> anyhow::Result<()> {
707
Ok(())
708
}
709
710
fn wake(&mut self) -> anyhow::Result<()> {
711
Ok(())
712
}
713
}
714
715
#[sorted]
716
#[derive(Error, Debug)]
717
enum IoapicError {
718
#[error("AddMsiRoute failed: {0}")]
719
AddMsiRoute(Error),
720
#[error("failed to receive AddMsiRoute response: {0}")]
721
AddMsiRouteRecv(TubeError),
722
#[error("failed to send AddMsiRoute request: {0}")]
723
AddMsiRouteSend(TubeError),
724
#[error("AllocateOneMsi failed: {0}")]
725
AllocateOneMsi(Error),
726
#[error("failed to receive AllocateOneMsi response: {0}")]
727
AllocateOneMsiRecv(TubeError),
728
#[error("failed to send AllocateOneMsi request: {0}")]
729
AllocateOneMsiSend(TubeError),
730
#[error("failed to create event object: {0}")]
731
CreateEvent(Error),
732
#[error("ReleaseOneIrq failed: {0}")]
733
ReleaseOneIrq(Error),
734
#[error("failed to receive ReleaseOneIrq response: {0}")]
735
ReleaseOneIrqRecv(TubeError),
736
#[error("failed to send ReleaseOneIrq request: {0}")]
737
ReleaseOneIrqSend(TubeError),
738
}
739
740
#[cfg(test)]
741
mod tests {
742
use std::thread;
743
744
use hypervisor::DeliveryMode;
745
use hypervisor::DeliveryStatus;
746
use hypervisor::DestinationMode;
747
748
use super::*;
749
750
const DEFAULT_VECTOR: u8 = 0x3a;
751
const DEFAULT_DESTINATION_ID: u8 = 0x5f;
752
753
fn new() -> Ioapic {
754
let (_, irq_tube) = Tube::pair().unwrap();
755
Ioapic::new(irq_tube, NUM_IOAPIC_PINS).unwrap()
756
}
757
758
fn ioapic_bus_address(offset: u8) -> BusAccessInfo {
759
let offset = offset as u64;
760
BusAccessInfo {
761
offset,
762
address: IOAPIC_BASE_ADDRESS + offset,
763
id: 0,
764
}
765
}
766
767
fn set_up(trigger: TriggerMode) -> (Ioapic, usize) {
768
let irq = NUM_IOAPIC_PINS - 1;
769
let ioapic = set_up_with_irq(irq, trigger);
770
(ioapic, irq)
771
}
772
773
fn set_up_with_irq(irq: usize, trigger: TriggerMode) -> Ioapic {
774
let mut ioapic = self::new();
775
set_up_redirection_table_entry(&mut ioapic, irq, trigger);
776
ioapic.out_events[irq] = Some(OutEvent {
777
irq_event: IrqEvent {
778
gsi: NUM_IOAPIC_PINS as u32,
779
event: Event::new().unwrap(),
780
resample_event: None,
781
source: IrqEventSource {
782
device_id: ioapic.device_id(),
783
queue_id: irq,
784
device_name: ioapic.debug_label(),
785
},
786
},
787
788
snapshot: Some(OutEventSnapshot {
789
gsi: NUM_IOAPIC_PINS as u32,
790
msi_address: 0xa,
791
msi_data: 0xd,
792
source: IrqEventSource {
793
device_id: ioapic.device_id(),
794
queue_id: irq,
795
device_name: ioapic.debug_label(),
796
},
797
}),
798
});
799
ioapic
800
}
801
802
fn read_reg(ioapic: &mut Ioapic, selector: u8) -> u32 {
803
let mut data = [0; 4];
804
ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
805
ioapic.read(ioapic_bus_address(IOWIN_OFF), &mut data);
806
u32::from_ne_bytes(data)
807
}
808
809
fn write_reg(ioapic: &mut Ioapic, selector: u8, value: u32) {
810
ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
811
ioapic.write(ioapic_bus_address(IOWIN_OFF), &value.to_ne_bytes());
812
}
813
814
fn read_entry(ioapic: &mut Ioapic, irq: usize) -> IoapicRedirectionTableEntry {
815
let mut entry = IoapicRedirectionTableEntry::new();
816
entry.set(
817
0,
818
32,
819
read_reg(ioapic, encode_selector_from_irq(irq, false)).into(),
820
);
821
entry.set(
822
32,
823
32,
824
read_reg(ioapic, encode_selector_from_irq(irq, true)).into(),
825
);
826
entry
827
}
828
829
fn write_entry(ioapic: &mut Ioapic, irq: usize, entry: IoapicRedirectionTableEntry) {
830
write_reg(
831
ioapic,
832
encode_selector_from_irq(irq, false),
833
entry.get(0, 32) as u32,
834
);
835
write_reg(
836
ioapic,
837
encode_selector_from_irq(irq, true),
838
entry.get(32, 32) as u32,
839
);
840
}
841
842
fn set_up_redirection_table_entry(ioapic: &mut Ioapic, irq: usize, trigger_mode: TriggerMode) {
843
let mut entry = IoapicRedirectionTableEntry::new();
844
entry.set_vector(DEFAULT_DESTINATION_ID);
845
entry.set_delivery_mode(DeliveryMode::Startup);
846
entry.set_delivery_status(DeliveryStatus::Pending);
847
entry.set_dest_id(DEFAULT_VECTOR);
848
entry.set_trigger_mode(trigger_mode);
849
write_entry(ioapic, irq, entry);
850
}
851
852
fn set_mask(ioapic: &mut Ioapic, irq: usize, mask: bool) {
853
let mut entry = read_entry(ioapic, irq);
854
entry.set_interrupt_mask(mask);
855
write_entry(ioapic, irq, entry);
856
}
857
858
#[test]
859
fn write_read_ioregsel() {
860
let mut ioapic = self::new();
861
let data_write = [0x0f, 0xf0, 0x01, 0xff];
862
let mut data_read = [0; 4];
863
864
for i in 0..data_write.len() {
865
ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &data_write[i..i + 1]);
866
ioapic.read(ioapic_bus_address(IOREGSEL_OFF), &mut data_read[i..i + 1]);
867
assert_eq!(data_write[i], data_read[i]);
868
}
869
}
870
871
// Verify that version register is actually read-only.
872
#[test]
873
fn write_read_ioaic_reg_version() {
874
let mut ioapic = self::new();
875
let before = read_reg(&mut ioapic, IOAPIC_REG_VERSION);
876
let data_write = !before;
877
878
write_reg(&mut ioapic, IOAPIC_REG_VERSION, data_write);
879
assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_VERSION), before);
880
}
881
882
// Verify that only bits 27:24 of the IOAPICID are readable/writable.
883
#[test]
884
fn write_read_ioapic_reg_id() {
885
let mut ioapic = self::new();
886
887
write_reg(&mut ioapic, IOAPIC_REG_ID, 0x1f3e5d7c);
888
assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_ID), 0x0f000000);
889
}
890
891
// Write to read-only register IOAPICARB.
892
#[test]
893
fn write_read_ioapic_arbitration_id() {
894
let mut ioapic = self::new();
895
896
let data_write_id = 0x1f3e5d7c;
897
let expected_result = 0x0f000000;
898
899
// Write to IOAPICID. This should also change IOAPICARB.
900
write_reg(&mut ioapic, IOAPIC_REG_ID, data_write_id);
901
902
// Read IOAPICARB
903
assert_eq!(
904
read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
905
expected_result
906
);
907
908
// Try to write to IOAPICARB and verify unchanged result.
909
write_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID, !data_write_id);
910
assert_eq!(
911
read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
912
expected_result
913
);
914
}
915
916
#[test]
917
#[should_panic(expected = "index out of bounds: the len is 24 but the index is 24")]
918
fn service_invalid_irq() {
919
let mut ioapic = self::new();
920
ioapic.service_irq(NUM_IOAPIC_PINS, false);
921
}
922
923
// Test a level triggered IRQ interrupt.
924
#[test]
925
fn service_level_irq() {
926
let (mut ioapic, irq) = set_up(TriggerMode::Level);
927
928
// TODO(mutexlox): Check that interrupt is fired once.
929
ioapic.service_irq(irq, true);
930
ioapic.service_irq(irq, false);
931
}
932
933
#[test]
934
fn service_multiple_level_irqs() {
935
let (mut ioapic, irq) = set_up(TriggerMode::Level);
936
// TODO(mutexlox): Check that interrupt is fired twice.
937
ioapic.service_irq(irq, true);
938
ioapic.service_irq(irq, false);
939
ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
940
ioapic.service_irq(irq, true);
941
}
942
943
// Test multiple level interrupts without an EOI and verify that only one interrupt is
944
// delivered.
945
#[test]
946
fn coalesce_multiple_level_irqs() {
947
let (mut ioapic, irq) = set_up(TriggerMode::Level);
948
949
// TODO(mutexlox): Test that only one interrupt is delivered.
950
ioapic.service_irq(irq, true);
951
ioapic.service_irq(irq, false);
952
ioapic.service_irq(irq, true);
953
}
954
955
// Test multiple RTC interrupts without an EOI and verify that only one interrupt is delivered.
956
#[test]
957
fn coalesce_multiple_rtc_irqs() {
958
let irq = RTC_IRQ;
959
let mut ioapic = set_up_with_irq(irq, TriggerMode::Edge);
960
961
// TODO(mutexlox): Verify that only one IRQ is delivered.
962
ioapic.service_irq(irq, true);
963
ioapic.service_irq(irq, false);
964
ioapic.service_irq(irq, true);
965
}
966
967
// Test that a level interrupt that has been coalesced is re-raised if a guest issues an
968
// EndOfInterrupt after the interrupt was coalesced while the line is still asserted.
969
#[test]
970
fn reinject_level_interrupt() {
971
let (mut ioapic, irq) = set_up(TriggerMode::Level);
972
973
// TODO(mutexlox): Verify that only one IRQ is delivered.
974
ioapic.service_irq(irq, true);
975
ioapic.service_irq(irq, false);
976
ioapic.service_irq(irq, true);
977
978
// TODO(mutexlox): Verify that this last interrupt occurs as a result of the EOI, rather
979
// than in response to the last service_irq.
980
ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
981
}
982
983
#[test]
984
fn service_edge_triggered_irq() {
985
let (mut ioapic, irq) = set_up(TriggerMode::Edge);
986
987
// TODO(mutexlox): Verify that one interrupt is delivered.
988
ioapic.service_irq(irq, true);
989
ioapic.service_irq(irq, true); // Repeated asserts before a deassert should be ignored.
990
ioapic.service_irq(irq, false);
991
}
992
993
// Verify that the state of an edge-triggered interrupt is properly tracked even when the
994
// interrupt is disabled.
995
#[test]
996
fn edge_trigger_unmask_test() {
997
let (mut ioapic, irq) = set_up(TriggerMode::Edge);
998
999
// TODO(mutexlox): Expect an IRQ.
1000
1001
ioapic.service_irq(irq, true);
1002
1003
set_mask(&mut ioapic, irq, true);
1004
ioapic.service_irq(irq, false);
1005
1006
// No interrupt triggered while masked.
1007
ioapic.service_irq(irq, true);
1008
ioapic.service_irq(irq, false);
1009
1010
set_mask(&mut ioapic, irq, false);
1011
1012
// TODO(mutexlox): Expect another interrupt.
1013
// Interrupt triggered while unmasked, even though when it was masked the level was high.
1014
ioapic.service_irq(irq, true);
1015
ioapic.service_irq(irq, false);
1016
}
1017
1018
// Verify that a level-triggered interrupt that is triggered while masked will fire once the
1019
// interrupt is unmasked.
1020
#[test]
1021
fn level_trigger_unmask_test() {
1022
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1023
1024
set_mask(&mut ioapic, irq, true);
1025
ioapic.service_irq(irq, true);
1026
1027
// TODO(mutexlox): expect an interrupt after this.
1028
set_mask(&mut ioapic, irq, false);
1029
}
1030
1031
// Verify that multiple asserts before a deassert are ignored even if there's an EOI between
1032
// them.
1033
#[test]
1034
fn end_of_interrupt_edge_triggered_irq() {
1035
let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1036
1037
// TODO(mutexlox): Expect 1 interrupt.
1038
ioapic.service_irq(irq, true);
1039
ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
1040
// Repeated asserts before a de-assert should be ignored.
1041
ioapic.service_irq(irq, true);
1042
ioapic.service_irq(irq, false);
1043
}
1044
1045
// Send multiple edge-triggered interrupts in a row.
1046
#[test]
1047
fn service_multiple_edge_irqs() {
1048
let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1049
1050
ioapic.service_irq(irq, true);
1051
// TODO(mutexlox): Verify that an interrupt occurs here.
1052
ioapic.service_irq(irq, false);
1053
1054
ioapic.service_irq(irq, true);
1055
// TODO(mutexlox): Verify that an interrupt occurs here.
1056
ioapic.service_irq(irq, false);
1057
}
1058
1059
// Test an interrupt line with negative polarity.
1060
#[test]
1061
fn service_negative_polarity_irq() {
1062
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1063
1064
let mut entry = read_entry(&mut ioapic, irq);
1065
entry.set_polarity(1);
1066
write_entry(&mut ioapic, irq, entry);
1067
1068
// TODO(mutexlox): Expect an interrupt to fire.
1069
ioapic.service_irq(irq, false);
1070
}
1071
1072
// Ensure that remote IRR can't be edited via mmio.
1073
#[test]
1074
fn remote_irr_read_only() {
1075
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1076
1077
ioapic.redirect_table[irq].set_remote_irr(true);
1078
1079
let mut entry = read_entry(&mut ioapic, irq);
1080
entry.set_remote_irr(false);
1081
write_entry(&mut ioapic, irq, entry);
1082
1083
assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
1084
}
1085
1086
#[test]
1087
fn delivery_status_read_only() {
1088
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1089
1090
ioapic.redirect_table[irq].set_delivery_status(DeliveryStatus::Pending);
1091
1092
let mut entry = read_entry(&mut ioapic, irq);
1093
entry.set_delivery_status(DeliveryStatus::Idle);
1094
write_entry(&mut ioapic, irq, entry);
1095
1096
assert_eq!(
1097
read_entry(&mut ioapic, irq).get_delivery_status(),
1098
DeliveryStatus::Pending
1099
);
1100
}
1101
1102
#[test]
1103
fn level_to_edge_transition_clears_remote_irr() {
1104
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1105
1106
ioapic.redirect_table[irq].set_remote_irr(true);
1107
1108
let mut entry = read_entry(&mut ioapic, irq);
1109
entry.set_trigger_mode(TriggerMode::Edge);
1110
write_entry(&mut ioapic, irq, entry);
1111
1112
assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), false);
1113
}
1114
1115
#[test]
1116
fn masking_preserves_remote_irr() {
1117
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1118
1119
ioapic.redirect_table[irq].set_remote_irr(true);
1120
1121
set_mask(&mut ioapic, irq, true);
1122
set_mask(&mut ioapic, irq, false);
1123
1124
assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
1125
}
1126
1127
// Test reconfiguration racing with EOIs.
1128
#[test]
1129
fn reconfiguration_race() {
1130
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1131
1132
// Fire one level-triggered interrupt.
1133
// TODO(mutexlox): Check that it fires.
1134
ioapic.service_irq(irq, true);
1135
1136
// Read the redirection table entry before the EOI...
1137
let mut entry = read_entry(&mut ioapic, irq);
1138
entry.set_trigger_mode(TriggerMode::Edge);
1139
1140
ioapic.service_irq(irq, false);
1141
ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
1142
1143
// ... and write back that (modified) value.
1144
write_entry(&mut ioapic, irq, entry);
1145
1146
// Fire one *edge* triggered interrupt
1147
// TODO(mutexlox): Assert that the interrupt fires once.
1148
ioapic.service_irq(irq, true);
1149
ioapic.service_irq(irq, false);
1150
}
1151
1152
// Ensure that swapping to edge triggered and back clears the remote irr bit.
1153
#[test]
1154
fn implicit_eoi() {
1155
let (mut ioapic, irq) = set_up(TriggerMode::Level);
1156
1157
// Fire one level-triggered interrupt.
1158
ioapic.service_irq(irq, true);
1159
// TODO(mutexlox): Verify that one interrupt was fired.
1160
ioapic.service_irq(irq, false);
1161
1162
// Do an implicit EOI by cycling between edge and level triggered.
1163
let mut entry = read_entry(&mut ioapic, irq);
1164
entry.set_trigger_mode(TriggerMode::Edge);
1165
write_entry(&mut ioapic, irq, entry);
1166
entry.set_trigger_mode(TriggerMode::Level);
1167
write_entry(&mut ioapic, irq, entry);
1168
1169
// Fire one level-triggered interrupt.
1170
ioapic.service_irq(irq, true);
1171
// TODO(mutexlox): Verify that one interrupt fires.
1172
ioapic.service_irq(irq, false);
1173
}
1174
1175
#[test]
1176
fn set_redirection_entry_by_bits() {
1177
let mut entry = IoapicRedirectionTableEntry::new();
1178
// destination_mode
1179
// polarity |
1180
// trigger_mode | |
1181
// | | |
1182
// 0011 1010 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 1001 0110 0101 1111
1183
// |_______| |______________________________________________|| | | |_| |_______|
1184
// dest_id reserved | | | | vector
1185
// interrupt_mask | | |
1186
// remote_irr | |
1187
// delivery_status |
1188
// delivery_mode
1189
entry.set(0, 64, 0x3a0000000000965f);
1190
assert_eq!(entry.get_vector(), 0x5f);
1191
assert_eq!(entry.get_delivery_mode(), DeliveryMode::Startup);
1192
assert_eq!(entry.get_dest_mode(), DestinationMode::Physical);
1193
assert_eq!(entry.get_delivery_status(), DeliveryStatus::Pending);
1194
assert_eq!(entry.get_polarity(), 0);
1195
assert_eq!(entry.get_remote_irr(), false);
1196
assert_eq!(entry.get_trigger_mode(), TriggerMode::Level);
1197
assert_eq!(entry.get_interrupt_mask(), false);
1198
assert_eq!(entry.get_reserved(), 0);
1199
assert_eq!(entry.get_dest_id(), 0x3a);
1200
1201
let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1202
write_entry(&mut ioapic, irq, entry);
1203
assert_eq!(
1204
read_entry(&mut ioapic, irq).get_trigger_mode(),
1205
TriggerMode::Level
1206
);
1207
1208
// TODO(mutexlox): Verify that this actually fires an interrupt.
1209
ioapic.service_irq(irq, true);
1210
}
1211
1212
#[track_caller]
1213
fn recv_allocate_msi(t: &Tube) -> u32 {
1214
match t.recv::<VmIrqRequest>().unwrap() {
1215
VmIrqRequest::AllocateOneMsiAtGsi { gsi, .. } => gsi,
1216
msg => panic!("unexpected irqchip message: {msg:?}"),
1217
}
1218
}
1219
1220
struct MsiRouteDetails {
1221
gsi: u32,
1222
msi_address: u64,
1223
msi_data: u32,
1224
}
1225
1226
#[track_caller]
1227
fn recv_add_msi_route(t: &Tube) -> MsiRouteDetails {
1228
match t.recv::<VmIrqRequest>().unwrap() {
1229
VmIrqRequest::AddMsiRoute {
1230
gsi,
1231
msi_address,
1232
msi_data,
1233
} => MsiRouteDetails {
1234
gsi,
1235
msi_address,
1236
msi_data,
1237
},
1238
msg => panic!("unexpected irqchip message: {msg:?}"),
1239
}
1240
}
1241
1242
#[track_caller]
1243
fn recv_release_one_irq(t: &Tube) -> u32 {
1244
match t.recv::<VmIrqRequest>().unwrap() {
1245
VmIrqRequest::ReleaseOneIrq { gsi, irqfd: _ } => gsi,
1246
msg => panic!("unexpected irqchip message: {msg:?}"),
1247
}
1248
}
1249
1250
#[track_caller]
1251
fn send_ok(t: &Tube) {
1252
t.send(&VmIrqResponse::Ok).unwrap();
1253
}
1254
1255
/// Simulates restoring the ioapic as if the VM had never booted a guest.
1256
/// This is called the "cold" restore case since all the devices are
1257
/// expected to be essentially blank / unconfigured.
1258
#[test]
1259
fn verify_ioapic_restore_cold_smoke() {
1260
let (irqchip_tube, ioapic_irq_tube) = Tube::pair().unwrap();
1261
let gsi_num = NUM_IOAPIC_PINS as u32;
1262
1263
// Creates an ioapic w/ an MSI for GSI = NUM_IOAPIC_PINS, MSI
1264
// address 0xa, and data 0xd. The irq index (pin number) is 10, but
1265
// this is not meaningful.
1266
let mut saved_ioapic = set_up_with_irq(10, TriggerMode::Level);
1267
1268
// Take a snapshot of the ioapic.
1269
let snapshot = saved_ioapic.snapshot().unwrap();
1270
1271
// Create a fake irqchip to respond to our requests.
1272
let irqchip_fake = thread::spawn(move || {
1273
assert_eq!(recv_allocate_msi(&irqchip_tube), gsi_num);
1274
send_ok(&irqchip_tube);
1275
let route = recv_add_msi_route(&irqchip_tube);
1276
assert_eq!(route.gsi, gsi_num);
1277
assert_eq!(route.msi_address, 0xa);
1278
assert_eq!(route.msi_data, 0xd);
1279
send_ok(&irqchip_tube);
1280
irqchip_tube
1281
});
1282
1283
let mut restored_ioapic = Ioapic::new(ioapic_irq_tube, NUM_IOAPIC_PINS).unwrap();
1284
restored_ioapic.restore(snapshot).unwrap();
1285
1286
irqchip_fake.join().unwrap();
1287
}
1288
1289
/// In the warm case, we are restoring to an Ioapic that already exists and
1290
/// may have MSIs already allocated. Here, we're verifying the restore
1291
/// process releases any existing MSIs before registering the restored MSIs.
1292
#[test]
1293
fn verify_ioapic_restore_warm_smoke() {
1294
let (irqchip_tube, ioapic_irq_tube) = Tube::pair().unwrap();
1295
let gsi_num = NUM_IOAPIC_PINS as u32;
1296
1297
// Creates an ioapic w/ an MSI for GSI = NUM_IOAPIC_PINS, MSI
1298
// address 0xa, and data 0xd. The irq index (pin number) is 10, but
1299
// this is not meaningful.
1300
let mut ioapic = set_up_with_irq(10, TriggerMode::Level);
1301
1302
// We don't connect this Tube until after the IRQ is initially set up
1303
// as it triggers messages we don't want to assert on (they're about
1304
// ioapic functionality, not snapshotting).
1305
ioapic.irq_tube = ioapic_irq_tube;
1306
1307
// Take a snapshot of the ioapic.
1308
let snapshot = ioapic.snapshot().unwrap();
1309
1310
// Create a fake irqchip to respond to our requests.
1311
let irqchip_fake = thread::spawn(move || {
1312
// We should clear the existing MSI as the first restore step.
1313
assert_eq!(recv_release_one_irq(&irqchip_tube), gsi_num);
1314
send_ok(&irqchip_tube);
1315
1316
// Then re-allocate it as part of restoring.
1317
assert_eq!(recv_allocate_msi(&irqchip_tube), gsi_num);
1318
send_ok(&irqchip_tube);
1319
let route = recv_add_msi_route(&irqchip_tube);
1320
assert_eq!(route.gsi, gsi_num);
1321
assert_eq!(route.msi_address, 0xa);
1322
assert_eq!(route.msi_data, 0xd);
1323
send_ok(&irqchip_tube);
1324
irqchip_tube
1325
});
1326
1327
ioapic.restore(snapshot).unwrap();
1328
1329
irqchip_fake.join().unwrap();
1330
}
1331
}
1332
1333