Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
google
GitHub Repository: google/crosvm
Path: blob/main/devices/src/virtio/video/decoder/backend/vaapi.rs
5394 views
1
// Copyright 2022 The ChromiumOS Authors
2
// Use of this source code is governed by a BSD-style license that can be
3
// found in the LICENSE file.
4
5
#![deny(missing_docs)]
6
7
use std::collections::btree_map::Entry;
8
use std::collections::BTreeMap;
9
use std::collections::VecDeque;
10
use std::os::fd::FromRawFd;
11
use std::os::fd::OwnedFd;
12
use std::rc::Rc;
13
14
use anyhow::anyhow;
15
use anyhow::Result;
16
use base::IntoRawDescriptor;
17
use base::MappedRegion;
18
use base::MemoryMappingArena;
19
use cros_codecs::decoder::stateless::h264::H264;
20
use cros_codecs::decoder::stateless::h265::H265;
21
use cros_codecs::decoder::stateless::vp8::Vp8;
22
use cros_codecs::decoder::stateless::vp9::Vp9;
23
use cros_codecs::decoder::stateless::DecodeError;
24
use cros_codecs::decoder::stateless::StatelessVideoDecoder;
25
use cros_codecs::decoder::DecodedHandle;
26
use cros_codecs::libva;
27
use cros_codecs::libva::Display;
28
use cros_codecs::multiple_desc_type;
29
use cros_codecs::utils::DmabufFrame;
30
use cros_codecs::DecodedFormat;
31
use cros_codecs::FrameLayout;
32
use cros_codecs::PlaneLayout;
33
34
use crate::virtio::video::decoder::Capability;
35
use crate::virtio::video::decoder::DecoderBackend;
36
use crate::virtio::video::decoder::DecoderEvent;
37
use crate::virtio::video::decoder::DecoderSession;
38
use crate::virtio::video::error::VideoError;
39
use crate::virtio::video::error::VideoResult;
40
use crate::virtio::video::format::Format;
41
use crate::virtio::video::format::FormatDesc;
42
use crate::virtio::video::format::FormatRange;
43
use crate::virtio::video::format::FrameFormat;
44
use crate::virtio::video::format::Level;
45
use crate::virtio::video::format::Profile;
46
use crate::virtio::video::format::Rect;
47
use crate::virtio::video::resource::BufferHandle;
48
use crate::virtio::video::resource::GuestMemHandle;
49
use crate::virtio::video::resource::GuestResource;
50
use crate::virtio::video::resource::GuestResourceHandle;
51
use crate::virtio::video::utils::EventQueue;
52
53
/// A guest memory descriptor that uses a managed buffer as a shadow that will be copied into the
54
/// guest memory once decoding is over.
55
struct GuestMemDescriptor(GuestMemHandle);
56
57
impl libva::SurfaceMemoryDescriptor for GuestMemDescriptor {
58
fn add_attrs(
59
&mut self,
60
attrs: &mut Vec<libva::VASurfaceAttrib>,
61
) -> Option<Box<dyn std::any::Any>> {
62
// Decode into a managed buffer.
63
().add_attrs(attrs)
64
}
65
}
66
67
multiple_desc_type! {
68
enum BufferDescriptor {
69
GuestMem(GuestMemDescriptor),
70
Dmabuf(DmabufFrame),
71
}
72
}
73
74
struct BufferDescWithPicId {
75
desc: BufferDescriptor,
76
picture_buffer_id: i32,
77
}
78
79
impl libva::SurfaceMemoryDescriptor for BufferDescWithPicId {
80
fn add_attrs(
81
&mut self,
82
attrs: &mut Vec<libva::VASurfaceAttrib>,
83
) -> Option<Box<dyn std::any::Any>> {
84
self.desc.add_attrs(attrs)
85
}
86
}
87
88
/// Represents a buffer we have not yet sent to the accelerator.
89
struct PendingJob {
90
resource_id: u32,
91
timestamp: u64,
92
resource: GuestResourceHandle,
93
offset: usize,
94
bytes_used: usize,
95
remaining: usize,
96
}
97
98
impl TryFrom<DecodedFormat> for Format {
99
type Error = anyhow::Error;
100
101
fn try_from(value: DecodedFormat) -> Result<Self, Self::Error> {
102
match value {
103
DecodedFormat::NV12 => Ok(Format::NV12),
104
_ => Err(anyhow!("Unsupported format")),
105
}
106
}
107
}
108
109
impl TryFrom<Format> for DecodedFormat {
110
type Error = anyhow::Error;
111
112
fn try_from(value: Format) -> Result<Self, Self::Error> {
113
match value {
114
Format::NV12 => Ok(DecodedFormat::NV12),
115
_ => Err(anyhow!("Unsupported format")),
116
}
117
}
118
}
119
120
impl TryFrom<libva::VAProfile::Type> for Profile {
121
type Error = anyhow::Error;
122
123
fn try_from(value: libva::VAProfile::Type) -> Result<Self, Self::Error> {
124
match value {
125
libva::VAProfile::VAProfileH264Baseline => Ok(Self::H264Baseline),
126
libva::VAProfile::VAProfileH264Main => Ok(Self::H264Main),
127
libva::VAProfile::VAProfileH264High => Ok(Self::H264High),
128
libva::VAProfile::VAProfileH264StereoHigh => Ok(Self::H264StereoHigh),
129
libva::VAProfile::VAProfileH264MultiviewHigh => Ok(Self::H264MultiviewHigh),
130
libva::VAProfile::VAProfileHEVCMain => Ok(Self::HevcMain),
131
libva::VAProfile::VAProfileHEVCMain10 => Ok(Self::HevcMain10),
132
libva::VAProfile::VAProfileVP8Version0_3 => Ok(Self::VP8Profile0),
133
libva::VAProfile::VAProfileVP9Profile0 => Ok(Self::VP9Profile0),
134
libva::VAProfile::VAProfileVP9Profile1 => Ok(Self::VP9Profile1),
135
libva::VAProfile::VAProfileVP9Profile2 => Ok(Self::VP9Profile2),
136
libva::VAProfile::VAProfileVP9Profile3 => Ok(Self::VP9Profile3),
137
_ => Err(anyhow!(
138
"Conversion failed for unexpected VAProfile: {}",
139
value
140
)),
141
}
142
}
143
}
144
145
/// The state for the output queue containing the buffers that will receive the
146
/// decoded data.
147
enum OutputQueueState {
148
/// Waiting for the client to call `set_output_buffer_count`.
149
AwaitingBufferCount,
150
/// Codec is capable of decoding frames.
151
Decoding,
152
/// Dynamic Resolution Change - we can still accept buffers in the old
153
/// format, but are waiting for new parameters before doing any decoding.
154
Drc,
155
}
156
157
///A safe decoder abstraction over libva for a single vaContext
158
pub struct VaapiDecoder {
159
/// The capabilities for the decoder
160
caps: Capability,
161
}
162
163
// The VA capabilities for the coded side
164
struct CodedCap {
165
profile: libva::VAProfile::Type,
166
max_width: u32,
167
max_height: u32,
168
}
169
170
// The VA capabilities for the raw side
171
struct RawCap {
172
fourcc: u32,
173
min_width: u32,
174
min_height: u32,
175
max_width: u32,
176
max_height: u32,
177
}
178
179
impl VaapiDecoder {
180
// Query the capabilities for the coded format
181
fn get_coded_cap(
182
display: &libva::Display,
183
profile: libva::VAProfile::Type,
184
) -> Result<CodedCap> {
185
let mut attrs = vec![
186
libva::VAConfigAttrib {
187
type_: libva::VAConfigAttribType::VAConfigAttribMaxPictureWidth,
188
value: 0,
189
},
190
libva::VAConfigAttrib {
191
type_: libva::VAConfigAttribType::VAConfigAttribMaxPictureHeight,
192
value: 0,
193
},
194
];
195
196
display.get_config_attributes(profile, libva::VAEntrypoint::VAEntrypointVLD, &mut attrs)?;
197
198
let mut max_width = 1u32;
199
let mut max_height = 1u32;
200
201
for attr in &attrs {
202
if attr.value == libva::constants::VA_ATTRIB_NOT_SUPPORTED {
203
continue;
204
}
205
206
match attr.type_ {
207
libva::VAConfigAttribType::VAConfigAttribMaxPictureWidth => max_width = attr.value,
208
libva::VAConfigAttribType::VAConfigAttribMaxPictureHeight => {
209
max_height = attr.value
210
}
211
212
_ => panic!("Unexpected VAConfigAttribType {}", attr.type_),
213
}
214
}
215
216
Ok(CodedCap {
217
profile,
218
max_width,
219
max_height,
220
})
221
}
222
223
// Query the capabilities for the raw format
224
fn get_raw_caps(display: Rc<libva::Display>, coded_cap: &CodedCap) -> Result<Vec<RawCap>> {
225
let mut raw_caps = Vec::new();
226
227
let mut config = display.create_config(
228
vec![],
229
coded_cap.profile,
230
libva::VAEntrypoint::VAEntrypointVLD,
231
)?;
232
233
let fourccs = config.query_surface_attributes_by_type(
234
libva::VASurfaceAttribType::VASurfaceAttribPixelFormat,
235
)?;
236
237
for fourcc in fourccs {
238
let fourcc = match fourcc {
239
libva::GenericValue::Integer(i) => i as u32,
240
other => panic!("Unexpected VAGenericValue {other:?}"),
241
};
242
243
let min_width = config.query_surface_attributes_by_type(
244
libva::VASurfaceAttribType::VASurfaceAttribMinWidth,
245
)?;
246
247
let min_width = match min_width.first() {
248
Some(libva::GenericValue::Integer(i)) => *i as u32,
249
Some(other) => panic!("Unexpected VAGenericValue {other:?}"),
250
None => 1,
251
};
252
253
let min_height = config.query_surface_attributes_by_type(
254
libva::VASurfaceAttribType::VASurfaceAttribMinHeight,
255
)?;
256
let min_height = match min_height.first() {
257
Some(libva::GenericValue::Integer(i)) => *i as u32,
258
Some(other) => panic!("Unexpected VAGenericValue {other:?}"),
259
None => 1,
260
};
261
262
let max_width = config.query_surface_attributes_by_type(
263
libva::VASurfaceAttribType::VASurfaceAttribMaxWidth,
264
)?;
265
let max_width = match max_width.first() {
266
Some(libva::GenericValue::Integer(i)) => *i as u32,
267
Some(other) => panic!("Unexpected VAGenericValue {other:?}"),
268
None => coded_cap.max_width,
269
};
270
271
let max_height = config.query_surface_attributes_by_type(
272
libva::VASurfaceAttribType::VASurfaceAttribMaxHeight,
273
)?;
274
let max_height = match max_height.first() {
275
Some(libva::GenericValue::Integer(i)) => *i as u32,
276
Some(other) => panic!("Unexpected VAGenericValue {other:?}"),
277
None => coded_cap.max_height,
278
};
279
280
raw_caps.push(RawCap {
281
fourcc,
282
min_width,
283
min_height,
284
max_width,
285
max_height,
286
});
287
}
288
289
Ok(raw_caps)
290
}
291
292
/// Creates a new instance of the Vaapi decoder.
293
pub fn new() -> Result<Self> {
294
let display = libva::Display::open().ok_or_else(|| anyhow!("failed to open VA display"))?;
295
296
let va_profiles = display.query_config_profiles()?;
297
298
let mut in_fmts = Vec::new();
299
let mut out_fmts = Vec::new();
300
let mut profiles_map: BTreeMap<Format, Vec<Profile>> = Default::default();
301
302
// VA has no API for querying the levels supported by the driver.
303
// vaQueryProcessingRate is close, but not quite a solution here
304
// for all codecs.
305
let levels: BTreeMap<Format, Vec<Level>> = Default::default();
306
307
for va_profile in va_profiles {
308
let mut profiles = Vec::new();
309
310
let entrypoints = display.query_config_entrypoints(va_profile)?;
311
if !entrypoints.contains(&libva::VAEntrypoint::VAEntrypointVLD) {
312
// All formats we are aiming to support require
313
// VAEntrypointVLD.
314
continue;
315
}
316
317
let profile = match Profile::try_from(va_profile) {
318
Ok(p) => p,
319
// Skip if we cannot convert to a valid virtio format
320
Err(_) => continue,
321
};
322
323
// Manually push all VP8 profiles, since VA exposes only a single
324
// VP8 profile for all of these
325
if va_profile == libva::VAProfile::VAProfileVP8Version0_3 {
326
profiles.push(Profile::VP8Profile0);
327
profiles.push(Profile::VP8Profile1);
328
profiles.push(Profile::VP8Profile2);
329
profiles.push(Profile::VP8Profile3);
330
} else {
331
profiles.push(profile);
332
}
333
334
let coded_cap = VaapiDecoder::get_coded_cap(display.as_ref(), va_profile)?;
335
let raw_caps = VaapiDecoder::get_raw_caps(Rc::clone(&display), &coded_cap)?;
336
337
let coded_frame_fmt = FrameFormat {
338
width: FormatRange {
339
min: 1,
340
max: coded_cap.max_width,
341
step: 1,
342
},
343
344
height: FormatRange {
345
min: 1,
346
max: coded_cap.max_height,
347
step: 1,
348
},
349
350
bitrates: Default::default(),
351
};
352
353
let coded_format = profile.to_format();
354
match profiles_map.entry(coded_format) {
355
Entry::Vacant(e) => {
356
e.insert(profiles);
357
}
358
Entry::Occupied(mut ps) => {
359
ps.get_mut().push(profile);
360
}
361
}
362
363
let mut n_out = 0;
364
for raw_cap in raw_caps {
365
if raw_cap.fourcc != libva::constants::VA_FOURCC_NV12 {
366
// Apparently only NV12 is currently supported by virtio video
367
continue;
368
}
369
370
let raw_frame_fmt = FrameFormat {
371
width: FormatRange {
372
min: raw_cap.min_width,
373
max: raw_cap.max_width,
374
step: 1,
375
},
376
377
height: FormatRange {
378
min: raw_cap.min_height,
379
max: raw_cap.max_height,
380
step: 1,
381
},
382
383
bitrates: Default::default(),
384
};
385
386
out_fmts.push(FormatDesc {
387
mask: 0,
388
format: Format::NV12,
389
frame_formats: vec![raw_frame_fmt],
390
plane_align: 1,
391
});
392
393
n_out += 1;
394
}
395
396
let mask = !(u64::MAX << n_out) << (out_fmts.len() - n_out);
397
398
if mask != 0 {
399
in_fmts.push(FormatDesc {
400
mask,
401
format: coded_format,
402
frame_formats: vec![coded_frame_fmt],
403
plane_align: 1,
404
});
405
}
406
}
407
408
Ok(Self {
409
caps: Capability::new(in_fmts, out_fmts, profiles_map, levels),
410
})
411
}
412
}
413
414
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
415
pub struct Resolution {
416
width: u32,
417
height: u32,
418
}
419
420
trait AsBufferHandle {
421
type BufferHandle: BufferHandle;
422
fn as_buffer_handle(&self) -> &Self::BufferHandle;
423
}
424
425
impl AsBufferHandle for GuestResource {
426
type BufferHandle = GuestResourceHandle;
427
428
fn as_buffer_handle(&self) -> &Self::BufferHandle {
429
&self.handle
430
}
431
}
432
433
impl AsBufferHandle for GuestMemHandle {
434
type BufferHandle = Self;
435
436
fn as_buffer_handle(&self) -> &Self::BufferHandle {
437
self
438
}
439
}
440
441
impl AsBufferHandle for GuestResourceHandle {
442
type BufferHandle = Self;
443
444
fn as_buffer_handle(&self) -> &Self::BufferHandle {
445
self
446
}
447
}
448
449
/// A convenience type implementing persistent slice access for BufferHandles.
450
struct BufferMapping<'a, T: AsBufferHandle> {
451
#[allow(dead_code)]
452
/// The underlying resource. Must be kept so as not to drop the BufferHandle
453
resource: &'a T,
454
/// The mapping that backs the underlying slices returned by AsRef and AsMut
455
mapping: MemoryMappingArena,
456
}
457
458
impl<'a, T: AsBufferHandle> BufferMapping<'a, T> {
459
/// Creates a new BufferMap
460
pub fn new(resource: &'a T, offset: usize, size: usize) -> Result<Self> {
461
let mapping = resource.as_buffer_handle().get_mapping(offset, size)?;
462
463
Ok(Self { resource, mapping })
464
}
465
}
466
467
impl<T: AsBufferHandle> AsRef<[u8]> for BufferMapping<'_, T> {
468
fn as_ref(&self) -> &[u8] {
469
let mapping = &self.mapping;
470
// SAFETY:
471
// Safe because the mapping is linear and we own it, so it will not be unmapped during
472
// the lifetime of this slice.
473
unsafe { std::slice::from_raw_parts(mapping.as_ptr(), mapping.size()) }
474
}
475
}
476
477
impl<T: AsBufferHandle> AsMut<[u8]> for BufferMapping<'_, T> {
478
fn as_mut(&mut self) -> &mut [u8] {
479
let mapping = &self.mapping;
480
// SAFETY:
481
// Safe because the mapping is linear and we own it, so it will not be unmapped during
482
// the lifetime of this slice.
483
unsafe { std::slice::from_raw_parts_mut(mapping.as_ptr(), mapping.size()) }
484
}
485
}
486
487
/// A frame that is currently not available for being decoded into, either because it has been
488
/// decoded and is waiting for us to release it (`Decoded`), or because we temporarily removed it
489
/// from the decoder pool after a reset and are waiting for the client to tell us we can use it
490
/// (`Held`).
491
#[allow(dead_code)] // TODO: b/344974550
492
enum BorrowedFrame {
493
Decoded(Box<dyn DecodedHandle<Descriptor = BufferDescWithPicId>>),
494
Held(Box<dyn AsRef<BufferDescWithPicId>>),
495
}
496
497
/// A decoder session for the libva backend
498
pub struct VaapiDecoderSession {
499
/// The implementation for the codec specific logic.
500
codec: Box<dyn StatelessVideoDecoder<BufferDescWithPicId>>,
501
/// The state for the output queue. Updated when `set_output_buffer_count`
502
/// is called or when we detect a dynamic resolution change.
503
output_queue_state: OutputQueueState,
504
/// Frames currently held by us, indexed by `picture_buffer_id`.
505
held_frames: BTreeMap<i32, BorrowedFrame>,
506
/// Queue containing the buffers we have not yet submitted to the codec.
507
submit_queue: VecDeque<PendingJob>,
508
/// The event queue we can use to signal new events.
509
event_queue: EventQueue<DecoderEvent>,
510
/// Whether the decoder is currently flushing.
511
flushing: bool,
512
}
513
514
impl VaapiDecoderSession {
515
/// Copy raw decoded data from `image` into the output buffer
516
fn output_picture(
517
decoded_frame: &dyn DecodedHandle<Descriptor = BufferDescWithPicId>,
518
event_queue: &mut EventQueue<DecoderEvent>,
519
) -> Result<()> {
520
let timestamp = decoded_frame.timestamp();
521
522
let buffer_desc = decoded_frame.resource();
523
let picture_buffer_id = buffer_desc.picture_buffer_id;
524
525
// Sync the frame if it is in guest memory, as we are going to map and read it.
526
// This statement is in its own block so we can drop the `buffer_desc` reference
527
// before calling `sync`, which does a mutable borrow.
528
if let BufferDescriptor::GuestMem(_) = &buffer_desc.desc {
529
drop(buffer_desc);
530
decoded_frame.sync()?;
531
}
532
533
// Copy guest memory buffers into their destination.
534
if let BufferDescriptor::GuestMem(handle) = &decoded_frame.resource().desc {
535
let picture = decoded_frame.dyn_picture();
536
let mut backend_handle = picture.dyn_mappable_handle()?;
537
let buffer_size = backend_handle.image_size();
538
539
// Get a mapping from the start of the buffer to the size of the
540
// underlying decoded data in the Image.
541
let mut output_map = BufferMapping::new(&handle.0, 0, buffer_size)?;
542
let output_bytes = output_map.as_mut();
543
544
backend_handle.read(output_bytes)?;
545
}
546
547
// Say that we are done decoding this picture.
548
event_queue
549
.queue_event(DecoderEvent::PictureReady {
550
picture_buffer_id,
551
timestamp,
552
})
553
.map_err(|e| {
554
VideoError::BackendFailure(anyhow!("Can't queue the PictureReady event {}", e))
555
})?;
556
557
Ok(())
558
}
559
560
fn try_emit_flush_completed(&mut self) -> Result<()> {
561
if self.submit_queue.is_empty() {
562
self.flushing = false;
563
564
let event_queue = &mut self.event_queue;
565
566
event_queue
567
.queue_event(DecoderEvent::FlushCompleted(Ok(())))
568
.map_err(|e| anyhow!("Can't queue the PictureReady event {}", e))
569
} else {
570
Ok(())
571
}
572
}
573
574
fn drain_submit_queue(&mut self) -> VideoResult<()> {
575
while let Some(job) = self.submit_queue.front_mut() {
576
let bitstream_map = BufferMapping::new(&job.resource, job.offset, job.bytes_used)
577
.map_err(VideoError::BackendFailure)?;
578
579
let slice_start = job.bytes_used - job.remaining;
580
match self
581
.codec
582
.decode(job.timestamp, &bitstream_map.as_ref()[slice_start..])
583
{
584
Ok(processed) => {
585
job.remaining = job.remaining.saturating_sub(processed);
586
// We have completed the buffer.
587
if job.remaining == 0 {
588
// We are always done with the input buffer after decode returns.
589
self.event_queue
590
.queue_event(DecoderEvent::NotifyEndOfBitstreamBuffer(job.resource_id))
591
.map_err(|e| {
592
VideoError::BackendFailure(anyhow!(
593
"Can't queue the NotifyEndOfBitstream event {}",
594
e
595
))
596
})?;
597
self.submit_queue.pop_front();
598
}
599
}
600
Err(DecodeError::CheckEvents) => {
601
self.process_decoder_events()?;
602
break;
603
}
604
// We will succeed once buffers are returned by the client. This could be optimized
605
// to only retry decoding once buffers are effectively returned.
606
Err(DecodeError::NotEnoughOutputBuffers(_)) => break,
607
// TODO add an InvalidInput error to cros-codecs so we can detect these cases and
608
// just throw a warning instead of a fatal error?
609
Err(e) => {
610
self.event_queue
611
.queue_event(DecoderEvent::NotifyError(VideoError::BackendFailure(
612
anyhow!("Decoding buffer {} failed", job.resource_id),
613
)))
614
.map_err(|e| {
615
VideoError::BackendFailure(anyhow!(
616
"Can't queue the NotifyError event {}",
617
e
618
))
619
})?;
620
return Err(VideoError::BackendFailure(e.into()));
621
}
622
}
623
}
624
625
Ok(())
626
}
627
628
fn process_decoder_events(&mut self) -> VideoResult<()> {
629
while let Some(event) = self.codec.next_event() {
630
match event {
631
cros_codecs::decoder::DecoderEvent::FrameReady(frame) => {
632
Self::output_picture(frame.as_ref(), &mut self.event_queue)
633
.map_err(VideoError::BackendFailure)?;
634
let picture_id = frame.resource().picture_buffer_id;
635
self.held_frames
636
.insert(picture_id, BorrowedFrame::Decoded(frame));
637
}
638
cros_codecs::decoder::DecoderEvent::FormatChanged(mut format) => {
639
let coded_resolution = format.stream_info().coded_resolution;
640
let display_resolution = format.stream_info().display_resolution;
641
642
// Ask the client for new buffers.
643
self.event_queue
644
.queue_event(DecoderEvent::ProvidePictureBuffers {
645
min_num_buffers: format.stream_info().min_num_frames as u32,
646
width: coded_resolution.width as i32,
647
height: coded_resolution.height as i32,
648
visible_rect: Rect {
649
left: 0,
650
top: 0,
651
right: display_resolution.width as i32,
652
bottom: display_resolution.height as i32,
653
},
654
})
655
.map_err(|e| VideoError::BackendFailure(e.into()))?;
656
657
format.frame_pool().clear();
658
659
// Drop our output queue and wait for the new number of output buffers.
660
self.output_queue_state = match &self.output_queue_state {
661
// If this is part of the initialization step, then do not switch states.
662
OutputQueueState::AwaitingBufferCount => {
663
OutputQueueState::AwaitingBufferCount
664
}
665
OutputQueueState::Decoding => OutputQueueState::Drc,
666
OutputQueueState::Drc => {
667
return Err(VideoError::BackendFailure(anyhow!(
668
"Invalid state during DRC."
669
)))
670
}
671
};
672
}
673
}
674
}
675
676
Ok(())
677
}
678
679
fn try_make_progress(&mut self) -> VideoResult<()> {
680
self.process_decoder_events()?;
681
self.drain_submit_queue()?;
682
683
Ok(())
684
}
685
}
686
687
impl DecoderSession for VaapiDecoderSession {
688
fn set_output_parameters(&mut self, _: usize, _: Format) -> VideoResult<()> {
689
let output_queue_state = &mut self.output_queue_state;
690
691
// This logic can still be improved, in particular it needs better
692
// support at the virtio-video protocol level.
693
//
694
// We must ensure that set_output_parameters is only called after we are
695
// sure that we have processed some stream metadata, which currently is
696
// not the case. In particular, the {SET|GET}_PARAMS logic currently
697
// takes place *before* we had a chance to parse any stream metadata at
698
// all.
699
//
700
// This can lead to a situation where we accept a format (say, NV12),
701
// but then discover we are unable to decode it after processing some
702
// buffers (because the stream indicates that the bit depth is 10, for
703
// example). Note that there is no way to reject said stream as of right
704
// now unless we hardcode NV12 in cros-codecs itself.
705
//
706
// Nevertheless, the support is already in place in cros-codecs: the
707
// decoders will queue buffers until they read some metadata. At this
708
// point, it will allow for the negotiation of the decoded format until
709
// a new call to decode() is made. At the crosvm level, we can use this
710
// window of time to try different decoded formats with .try_format().
711
//
712
// For now, we accept the default format chosen by cros-codecs instead.
713
// In practice, this means NV12 if it the stream can be decoded into
714
// NV12 and if the hardware can do so.
715
716
match output_queue_state {
717
OutputQueueState::AwaitingBufferCount | OutputQueueState::Drc => {
718
// Accept the default format chosen by cros-codecs instead.
719
//
720
// if let Some(backend_format) = self.backend.backend().format() {
721
// let backend_format = Format::try_from(backend_format);
722
723
// let format_matches = match backend_format {
724
// Ok(backend_format) => backend_format != format,
725
// Err(_) => false,
726
// };
727
728
// if !format_matches {
729
// let format =
730
// DecodedFormat::try_from(format).map_err(VideoError::BackendFailure)?;
731
732
// self.backend.backend().try_format(format).map_err(|e| {
733
// VideoError::BackendFailure(anyhow!(
734
// "Failed to set the codec backend format: {}",
735
// e
736
// ))
737
// })?;
738
// }
739
// }
740
741
*output_queue_state = OutputQueueState::Decoding;
742
743
Ok(())
744
}
745
OutputQueueState::Decoding => {
746
// Covers the slightly awkward ffmpeg v4l2 stateful
747
// implementation for the capture queue setup.
748
//
749
// ffmpeg will queue a single OUTPUT buffer and immediately
750
// follow up with a VIDIOC_G_FMT call on the CAPTURE queue.
751
// This leads to a race condition, because it takes some
752
// appreciable time for the real resolution to propagate back to
753
// the guest as the virtio machinery processes and delivers the
754
// event.
755
//
756
// In the event that VIDIOC_G_FMT(capture) returns the default
757
// format, ffmpeg allocates buffers of the default resolution
758
// (640x480) only to immediately reallocate as soon as it
759
// processes the SRC_CH v4l2 event. Otherwise (if the resolution
760
// has propagated in time), this path will not be taken during
761
// the initialization.
762
//
763
// This leads to the following workflow in the virtio video
764
// worker:
765
// RESOURCE_QUEUE -> QUEUE_CLEAR -> RESOURCE_QUEUE
766
//
767
// Failing to accept this (as we previously did), leaves us
768
// with bad state and completely breaks the decoding process. We
769
// should replace the queue even if this is not 100% according
770
// to spec.
771
//
772
// On the other hand, this branch still exists to highlight the
773
// fact that we should assert that we have emitted a buffer with
774
// the LAST flag when support for buffer flags is implemented in
775
// a future CL. If a buffer with the LAST flag hasn't been
776
// emitted, it's technically a mistake to be here because we
777
// still have buffers of the old resolution to deliver.
778
*output_queue_state = OutputQueueState::Decoding;
779
780
// TODO: check whether we have emitted a buffer with the LAST
781
// flag before returning.
782
Ok(())
783
}
784
}
785
}
786
787
fn decode(
788
&mut self,
789
resource_id: u32,
790
timestamp: u64,
791
resource: GuestResourceHandle,
792
offset: u32,
793
bytes_used: u32,
794
) -> VideoResult<()> {
795
let job = PendingJob {
796
resource_id,
797
timestamp,
798
resource,
799
offset: offset as usize,
800
bytes_used: bytes_used as usize,
801
remaining: bytes_used as usize,
802
};
803
804
self.submit_queue.push_back(job);
805
self.try_make_progress()?;
806
807
Ok(())
808
}
809
810
fn flush(&mut self) -> VideoResult<()> {
811
self.flushing = true;
812
813
self.try_make_progress()?;
814
815
if !self.submit_queue.is_empty() {
816
return Ok(());
817
}
818
819
// Retrieve ready frames from the codec, if any.
820
self.codec
821
.flush()
822
.map_err(|e| VideoError::BackendFailure(e.into()))?;
823
self.process_decoder_events()?;
824
825
self.try_emit_flush_completed()
826
.map_err(VideoError::BackendFailure)
827
}
828
829
fn reset(&mut self) -> VideoResult<()> {
830
self.submit_queue.clear();
831
832
// Make sure the codec is not active.
833
self.codec
834
.flush()
835
.map_err(|e| VideoError::BackendFailure(e.into()))?;
836
837
self.process_decoder_events()?;
838
839
// Drop the queued output buffers.
840
self.clear_output_buffers()?;
841
842
self.event_queue
843
.queue_event(DecoderEvent::ResetCompleted(Ok(())))
844
.map_err(|e| {
845
VideoError::BackendFailure(anyhow!("Can't queue the ResetCompleted event {}", e))
846
})?;
847
848
Ok(())
849
}
850
851
fn clear_output_buffers(&mut self) -> VideoResult<()> {
852
// Cancel any ongoing flush.
853
self.flushing = false;
854
855
// Drop all decoded frames signaled as ready and cancel any reported flush.
856
self.event_queue.retain(|event| {
857
!matches!(
858
event,
859
DecoderEvent::PictureReady { .. } | DecoderEvent::FlushCompleted(_)
860
)
861
});
862
863
// Now hold all the imported frames until reuse_output_buffer is called on them.
864
let frame_pool = self.codec.frame_pool();
865
while let Some(frame) = frame_pool.take_free_frame() {
866
let picture_id = (*frame).as_ref().picture_buffer_id;
867
self.held_frames
868
.insert(picture_id, BorrowedFrame::Held(frame));
869
}
870
871
Ok(())
872
}
873
874
fn event_pipe(&self) -> &dyn base::AsRawDescriptor {
875
&self.event_queue
876
}
877
878
fn use_output_buffer(
879
&mut self,
880
picture_buffer_id: i32,
881
resource: GuestResource,
882
) -> VideoResult<()> {
883
let output_queue_state = &mut self.output_queue_state;
884
if let OutputQueueState::Drc = output_queue_state {
885
// Reusing buffers during DRC is valid, but we won't use them and can just drop them.
886
return Ok(());
887
}
888
889
let desc = match resource.handle {
890
GuestResourceHandle::GuestPages(handle) => {
891
BufferDescriptor::GuestMem(GuestMemDescriptor(handle))
892
}
893
GuestResourceHandle::VirtioObject(handle) => {
894
// SAFETY: descriptor is expected to be valid
895
let fd = unsafe { OwnedFd::from_raw_fd(handle.desc.into_raw_descriptor()) };
896
let modifier = handle.modifier;
897
898
let frame = DmabufFrame {
899
fds: vec![fd],
900
layout: FrameLayout {
901
format: (cros_codecs::Fourcc::from(b"NV12"), modifier),
902
size: cros_codecs::Resolution::from((resource.width, resource.height)),
903
planes: resource
904
.planes
905
.iter()
906
.map(|p| PlaneLayout {
907
buffer_index: 0,
908
offset: p.offset,
909
stride: p.stride,
910
})
911
.collect(),
912
},
913
};
914
915
BufferDescriptor::Dmabuf(frame)
916
}
917
};
918
919
let desc_with_pic_id = BufferDescWithPicId {
920
desc,
921
picture_buffer_id,
922
};
923
924
self.codec
925
.frame_pool()
926
.add_frames(vec![desc_with_pic_id])
927
.map_err(VideoError::BackendFailure)?;
928
929
self.try_make_progress()
930
}
931
932
fn reuse_output_buffer(&mut self, picture_buffer_id: i32) -> VideoResult<()> {
933
let output_queue_state = &mut self.output_queue_state;
934
if let OutputQueueState::Drc = output_queue_state {
935
// Reusing buffers during DRC is valid, but we won't use them and can just drop them.
936
return Ok(());
937
}
938
939
self.held_frames.remove(&picture_buffer_id);
940
941
self.try_make_progress()?;
942
943
if self.flushing {
944
// Try flushing again now that we have a new buffer. This might let
945
// us progress further in the flush operation.
946
self.flush()?;
947
}
948
Ok(())
949
}
950
951
fn read_event(&mut self) -> VideoResult<DecoderEvent> {
952
self.event_queue
953
.dequeue_event()
954
.map_err(|e| VideoError::BackendFailure(anyhow!("Can't read event {}", e)))
955
}
956
}
957
958
impl DecoderBackend for VaapiDecoder {
959
type Session = VaapiDecoderSession;
960
961
fn get_capabilities(&self) -> Capability {
962
self.caps.clone()
963
}
964
965
fn new_session(&mut self, format: Format) -> VideoResult<Self::Session> {
966
let display = Display::open()
967
.ok_or_else(|| VideoError::BackendFailure(anyhow!("failed to open VA display")))?;
968
969
let codec: Box<dyn StatelessVideoDecoder<BufferDescWithPicId>> = match format {
970
Format::VP8 => Box::new(
971
cros_codecs::decoder::stateless::StatelessDecoder::<Vp8, _>::new_vaapi(
972
display,
973
cros_codecs::decoder::BlockingMode::NonBlocking,
974
),
975
),
976
Format::VP9 => Box::new(
977
cros_codecs::decoder::stateless::StatelessDecoder::<Vp9, _>::new_vaapi(
978
display,
979
cros_codecs::decoder::BlockingMode::NonBlocking,
980
),
981
),
982
Format::H264 => Box::new(
983
cros_codecs::decoder::stateless::StatelessDecoder::<H264, _>::new_vaapi(
984
display,
985
cros_codecs::decoder::BlockingMode::NonBlocking,
986
),
987
),
988
Format::Hevc => Box::new(
989
cros_codecs::decoder::stateless::StatelessDecoder::<H265, _>::new_vaapi(
990
display,
991
cros_codecs::decoder::BlockingMode::NonBlocking,
992
),
993
),
994
_ => return Err(VideoError::InvalidFormat),
995
};
996
997
Ok(VaapiDecoderSession {
998
codec,
999
output_queue_state: OutputQueueState::AwaitingBufferCount,
1000
held_frames: Default::default(),
1001
submit_queue: Default::default(),
1002
event_queue: EventQueue::new().map_err(|e| VideoError::BackendFailure(anyhow!(e)))?,
1003
flushing: Default::default(),
1004
})
1005
}
1006
}
1007
1008
#[cfg(test)]
1009
mod tests {
1010
use super::super::tests::*;
1011
use super::*;
1012
1013
#[test]
1014
// Ignore this test by default as it requires libva-compatible hardware.
1015
#[ignore]
1016
fn test_get_capabilities() {
1017
let decoder = VaapiDecoder::new().unwrap();
1018
let caps = decoder.get_capabilities();
1019
assert!(!caps.input_formats().is_empty());
1020
assert!(!caps.output_formats().is_empty());
1021
}
1022
1023
// Decode using guest memory input and output buffers.
1024
#[test]
1025
// Ignore this test by default as it requires libva-compatible hardware.
1026
#[ignore]
1027
fn test_decode_h264_guestmem_to_guestmem() {
1028
decode_h264_generic(
1029
&mut VaapiDecoder::new().unwrap(),
1030
build_guest_mem_handle,
1031
build_guest_mem_handle,
1032
);
1033
}
1034
}
1035
1036