Path: blob/main/devices/src/virtio/video/decoder/backend/ffmpeg.rs
5394 views
// Copyright 2022 The ChromiumOS Authors1// Use of this source code is governed by a BSD-style license that can be2// found in the LICENSE file.34//! A ffmpeg-based software decoder backend for crosvm. Since it does not require any particular5//! harware, it can provide fake hardware acceleration decoding to any guest and is mostly useful in6//! order to work on the virtio-video specification, or to implement guest decoder code from the7//! comfort of a workstation.8//!9//! This backend is supposed to serve as the reference implementation for decoding backends in10//! crosvm. As such it is fairly complete and exposes all the features and memory types that crosvm11//! support.12//!13//! The code in this main module provides the actual implementation and is free of unsafe code. Safe14//! abstractions over the ffmpeg libraries are provided in sub-modules, one per ffmpeg library we15//! want to support.1617use std::collections::BTreeMap;18use std::collections::VecDeque;19use std::sync::Arc;20use std::sync::Weak;2122use ::ffmpeg::avcodec::*;23use ::ffmpeg::swscale::*;24use ::ffmpeg::*;25use anyhow::anyhow;26use anyhow::Context;27use base::error;28use base::info;29use base::warn;30use base::MappedRegion;31use base::MemoryMappingArena;32use thiserror::Error as ThisError;3334use crate::virtio::video::decoder::backend::*;35use crate::virtio::video::ffmpeg::GuestResourceToAvFrameError;36use crate::virtio::video::ffmpeg::MemoryMappingAvBufferSource;37use crate::virtio::video::ffmpeg::TryAsAvFrameExt;38use crate::virtio::video::format::FormatDesc;39use crate::virtio::video::format::FormatRange;40use crate::virtio::video::format::FrameFormat;41use crate::virtio::video::format::Level;42use crate::virtio::video::format::Profile;43use crate::virtio::video::resource::BufferHandle;44use crate::virtio::video::resource::GuestResource;45use crate::virtio::video::resource::GuestResourceHandle;46use crate::virtio::video::utils::EventQueue;47use crate::virtio::video::utils::OutputQueue;48use crate::virtio::video::utils::SyncEventQueue;4950/// Structure maintaining a mapping for an encoded input buffer that can be used as a libavcodec51/// buffer source. It also sends a `NotifyEndOfBitstreamBuffer` event when dropped.52struct InputBuffer {53/// Memory mapping to the encoded input data.54mapping: MemoryMappingArena,55/// Resource ID that we will signal using `NotifyEndOfBitstreamBuffer` upon destruction.56resource_id: u32,57/// Pointer to the event queue to send the `NotifyEndOfBitstreamBuffer` event to. The event58/// will not be sent if the pointer becomes invalid.59event_queue: Weak<SyncEventQueue<DecoderEvent>>,60}6162impl Drop for InputBuffer {63fn drop(&mut self) {64match self.event_queue.upgrade() {65None => (),66// If the event queue is still valid, send the event signaling we can be reused.67Some(event_queue) => event_queue68.queue_event(DecoderEvent::NotifyEndOfBitstreamBuffer(self.resource_id))69.unwrap_or_else(|e| {70error!("cannot send end of input buffer notification: {:#}", e)71}),72}73}74}7576impl AvBufferSource for InputBuffer {77fn as_ptr(&self) -> *const u8 {78self.mapping.as_ptr()79}8081fn len(&self) -> usize {82self.mapping.size()83}8485fn is_empty(&self) -> bool {86self.len() == 087}88}8990/// Types of input job we can receive from the crosvm decoder code.91enum CodecJob {92Packet(AvPacket<'static>),93Flush,94}9596/// A crosvm decoder needs to go through a number if setup stages before being able to decode, and97/// can require some setup to be redone when a dynamic resolution change occurs. This enum ensures98/// that the data associated with a given state only exists when we actually are in this state.99enum SessionState {100/// Waiting for libavcodec to tell us the resolution of the stream.101AwaitingInitialResolution,102/// Waiting for the client to call `set_output_buffer_count`.103AwaitingBufferCount,104/// Decoding and producing frames.105Decoding {106output_queue: OutputQueue,107format_converter: SwConverter,108},109/// Dynamic Resolution Change - we can still accept buffers in the old110/// format, but are waiting for new parameters before doing any decoding.111Drc,112}113114/// A decoder session for the ffmpeg backend.115pub struct FfmpegDecoderSession {116/// Queue of events waiting to be read by the client.117event_queue: Arc<SyncEventQueue<DecoderEvent>>,118119/// FIFO of jobs submitted by the client and waiting to be performed.120codec_jobs: VecDeque<CodecJob>,121/// Whether we are currently flushing.122is_flushing: bool,123124/// Current state of the session.125state: SessionState,126/// Visible size of the decoded frames (width, height).127current_visible_res: (usize, usize),128129/// The libav context for this session.130context: AvCodecContext,131/// The last frame to have been decoded, waiting to be copied into an output buffer and sent132/// to the client.133avframe: Option<AvFrame>,134}135136#[derive(Debug, ThisError)]137enum TrySendFrameError {138#[error("error while converting frame: {0}")]139CannotConvertFrame(#[from] ConversionError),140#[error("error while constructing AvFrame: {0}")]141IntoAvFrame(#[from] GuestResourceToAvFrameError),142#[error("error while sending picture ready event: {0}")]143BrokenPipe(#[from] base::Error),144}145146#[derive(Debug, ThisError)]147enum TryReceiveFrameError {148#[error("error creating AvFrame: {0}")]149CreateAvFrame(#[from] AvFrameError),150#[error("error queueing flush completed event: {0}")]151CannotQueueFlushEvent(#[from] base::Error),152#[error("error while changing resolution: {0}")]153ChangeResolutionError(#[from] ChangeResolutionError),154}155156#[derive(Debug, ThisError)]157enum TrySendPacketError {158#[error("error while sending input packet to libavcodec: {0}")]159AvError(#[from] AvError),160}161162#[derive(Debug, ThisError)]163enum TryDecodeError {164#[error("error while sending packet: {0}")]165SendPacket(#[from] TrySendPacketError),166#[error("error while trying to send decoded frame: {0}")]167SendFrameError(#[from] TrySendFrameError),168#[error("error while receiving frame: {0}")]169ReceiveFrame(#[from] TryReceiveFrameError),170}171172#[derive(Debug, ThisError)]173enum ChangeResolutionError {174#[error("error queueing event: {0}")]175QueueEventFailed(#[from] base::Error),176#[error("unexpected state during resolution change")]177UnexpectedState,178}179180impl FfmpegDecoderSession {181/// Queue an event for the client to receive.182fn queue_event(&mut self, event: DecoderEvent) -> base::Result<()> {183self.event_queue.queue_event(event)184}185186/// Start the resolution change process, buffers will now be of size `new_visible_res`.187fn change_resolution(188&mut self,189new_visible_res: (usize, usize),190) -> Result<(), ChangeResolutionError> {191info!("resolution changed to {:?}", new_visible_res);192193// Ask the client for new buffers.194self.queue_event(DecoderEvent::ProvidePictureBuffers {195min_num_buffers: std::cmp::max(self.context.as_ref().refs, 0) as u32 + 1,196width: new_visible_res.0 as i32,197height: new_visible_res.1 as i32,198visible_rect: Rect {199left: 0,200top: 0,201right: new_visible_res.0 as i32,202bottom: new_visible_res.1 as i32,203},204})?;205206self.current_visible_res = new_visible_res;207208// Drop our output queue and wait for the new number of output buffers.209self.state = match self.state {210SessionState::AwaitingInitialResolution => SessionState::AwaitingBufferCount,211SessionState::Decoding { .. } => SessionState::Drc,212_ => return Err(ChangeResolutionError::UnexpectedState),213};214215Ok(())216}217218/// Try to send one input packet to the codec.219///220/// Returns `true` if a packet has successfully been queued, `false` if it could not be, either221/// because all pending work has already been queued or because the codec could not accept more222/// input at the moment.223fn try_send_packet(224&mut self,225input_packet: &AvPacket<'static>,226) -> Result<bool, TrySendPacketError> {227match self.context.try_send_packet(input_packet) {228Ok(true) => Ok(true),229// The codec cannot take more input at the moment, we'll try again after we receive some230// frames.231Ok(false) => Ok(false),232// This should happen only if we attempt to submit data while flushing.233Err(AvError(AVERROR_EOF)) => Ok(false),234// If we got invalid data, keep going in hope that we will catch a valid state later.235Err(AvError(AVERROR_INVALIDDATA)) => {236warn!("Invalid data in stream, ignoring...");237Ok(true)238}239Err(e) => Err(e.into()),240}241}242243/// Try to run the next input job, if any.244///245/// Returns `true` if the next job has been submitted, `false` if it could not be, either246/// because all pending work has already been queued or because the codec could not accept more247/// input at the moment.248fn try_send_input_job(&mut self) -> Result<bool, TrySendPacketError> {249// Do not process any more input while we are flushing.250if self.is_flushing {251return Ok(false);252}253254let mut next_job = match self.codec_jobs.pop_front() {255// No work to do at the moment.256None => return Ok(false),257Some(job) => job,258};259260match &mut next_job {261CodecJob::Packet(input_packet) => {262let res = self.try_send_packet(input_packet)?;263match res {264// The input buffer has been processed so we can drop it.265true => drop(next_job),266// The codec cannot accept new input for now, put the job back into the queue.267false => self.codec_jobs.push_front(next_job),268}269270Ok(res)271}272CodecJob::Flush => {273// Just set the is_flushing flag for now. We will send the actual flush command when274// `try_receive_frame` returns `TryAgain`. This should probably not be necessary but275// we sometimes miss the last frame if we send the flush command to libavcodec276// earlier (which looks like a bug with libavcodec but needs to be confirmed).277self.is_flushing = true;278279Ok(true)280}281}282}283284/// Try to receive a frame from the codec and store it until we emit the corresponding285/// `PictureReady` decoder event.286///287/// Returns `true` if a frame was successfully retrieved, or false if no frame was available at288/// the time, a decoded frame is already waiting to be returned to the client, or the decoder289/// needs more input data to proceed further.290fn try_receive_frame(&mut self) -> Result<bool, TryReceiveFrameError> {291let mut avframe = match self.avframe {292// We already have a frame waiting. Wait until it is sent to process the next one.293Some(_) => return Ok(false),294None => AvFrame::new()?,295};296297match self.context.try_receive_frame(&mut avframe) {298Ok(TryReceiveResult::Received) => {299// Now check whether the resolution of the stream has changed.300let new_visible_res = (avframe.width as usize, avframe.height as usize);301if new_visible_res != self.current_visible_res {302self.change_resolution(new_visible_res)?;303}304305self.avframe = Some(avframe);306307Ok(true)308}309Ok(TryReceiveResult::TryAgain) => {310if self.is_flushing {311// Start flushing. `try_receive_frame` will return `FlushCompleted` when the312// flush is completed. `TryAgain` will not be returned again until the flush is313// completed.314match self.context.flush_decoder() {315// Call ourselves again so we can process the flush.316Ok(()) => self.try_receive_frame(),317Err(err) => {318self.is_flushing = false;319self.queue_event(DecoderEvent::FlushCompleted(Err(320VideoError::BackendFailure(err.into()),321)))?;322Ok(false)323}324}325} else {326// The codec is not ready to output a frame yet.327Ok(false)328}329}330Ok(TryReceiveResult::FlushCompleted) => {331self.is_flushing = false;332self.queue_event(DecoderEvent::FlushCompleted(Ok(())))?;333self.context.reset();334Ok(false)335}336// If we got invalid data, keep going in hope that we will catch a valid state later.337Err(AvError(AVERROR_INVALIDDATA)) => {338warn!("Invalid data in stream, ignoring...");339Ok(false)340}341Err(av_err) => {342// This is a decoding error, so signal it using a `NotifyError` event to reflect the343// same asynchronous flow as a hardware decoder would.344if let Err(e) = self.event_queue.queue_event(DecoderEvent::NotifyError(345VideoError::BackendFailure(av_err.into()),346)) {347error!("failed to notify error: {}", e);348}349Ok(false)350}351}352}353354/// Try to send a pending decoded frame to the client by copying its content into an output355/// buffer.356///357/// This can only be done if `self.avframe` contains a decoded frame, and an output buffer is358/// ready to be written into.359///360/// Returns `true` if a frame has been emitted, `false` if the conditions were not met for it to361/// happen yet.362fn try_send_frame(&mut self) -> Result<bool, TrySendFrameError> {363let (output_queue, format_converter) = match &mut self.state {364SessionState::Decoding {365output_queue,366format_converter,367} => (output_queue, format_converter),368// Frames can only be emitted if we are actively decoding.369_ => return Ok(false),370};371372let avframe = match self.avframe.take() {373// No decoded frame available at the moment.374None => return Ok(false),375Some(avframe) => avframe,376};377378let (picture_buffer_id, target_buffer) = match output_queue.try_get_ready_buffer() {379None => {380// Keep the decoded frame since we don't have a destination buffer to process it.381self.avframe = Some(avframe);382return Ok(false);383}384Some(buffer) => buffer,385};386387// Prepare the picture ready event that we will emit once the frame is written into the388// target buffer.389let picture_ready_event = DecoderEvent::PictureReady {390picture_buffer_id: picture_buffer_id as i32,391timestamp: avframe.pts as u64,392};393394// Convert the frame into the target buffer and emit the picture ready event.395format_converter.convert(396&avframe,397&mut target_buffer.try_as_av_frame(MemoryMappingAvBufferSource::from)?,398)?;399self.event_queue.queue_event(picture_ready_event)?;400401Ok(true)402}403404/// Try to progress as much as possible with decoding.405///406/// Our pipeline has three stages: send encoded input to libavcodec, receive decoded frames from407/// libavcodec, and copy decoded frames into output buffers sent to the client. This method408/// calls these three stages in a loop for as long as at least one makes progress.409fn try_decode(&mut self) -> Result<(), TryDecodeError> {410// Try to make the pipeline progress as long as one of the stages can move forward411while self.try_send_frame()? || self.try_receive_frame()? || self.try_send_input_job()? {}412413Ok(())414}415}416417impl DecoderSession for FfmpegDecoderSession {418fn set_output_parameters(&mut self, buffer_count: usize, format: Format) -> VideoResult<()> {419match self.state {420// It is valid to set an output format before the the initial DRC, but we won't do421// anything with it.422SessionState::AwaitingInitialResolution => Ok(()),423SessionState::AwaitingBufferCount | SessionState::Drc => {424let avcontext = self.context.as_ref();425426let dst_pix_format: AvPixelFormat =427format.try_into().map_err(|_| VideoError::InvalidFormat)?;428429self.state = SessionState::Decoding {430output_queue: OutputQueue::new(buffer_count),431format_converter: SwConverter::new(432avcontext.width as usize,433avcontext.height as usize,434avcontext.pix_fmt,435dst_pix_format.pix_fmt(),436)437.context("while setting output parameters")438.map_err(VideoError::BackendFailure)?,439};440Ok(())441}442_ => Err(VideoError::BackendFailure(anyhow!(443"invalid state while calling set_output_parameters"444))),445}446}447448fn decode(449&mut self,450resource_id: u32,451timestamp: u64,452resource: GuestResourceHandle,453offset: u32,454bytes_used: u32,455) -> VideoResult<()> {456let input_buffer = InputBuffer {457mapping: resource458.get_mapping(offset as usize, bytes_used as usize)459.context("while mapping input buffer")460.map_err(VideoError::BackendFailure)?,461resource_id,462event_queue: Arc::downgrade(&self.event_queue),463};464465let avbuffer = AvBuffer::new(input_buffer)466.context("while creating AvPacket")467.map_err(VideoError::BackendFailure)?;468469let avpacket = AvPacket::new_owned(timestamp as i64, avbuffer);470471self.codec_jobs.push_back(CodecJob::Packet(avpacket));472473self.try_decode()474.context("while decoding")475.map_err(VideoError::BackendFailure)476}477478fn flush(&mut self) -> VideoResult<()> {479if self.is_flushing {480Err(VideoError::BackendFailure(anyhow!(481"flush is already in progress"482)))483} else {484self.codec_jobs.push_back(CodecJob::Flush);485self.try_decode()486.context("while flushing")487.map_err(VideoError::BackendFailure)488}489}490491fn reset(&mut self) -> VideoResult<()> {492// Reset the codec.493self.context.reset();494495// Drop all currently pending jobs.496self.codec_jobs.clear();497498// Drop the queued output buffers.499self.clear_output_buffers()?;500501self.queue_event(DecoderEvent::ResetCompleted(Ok(())))502.context("while resetting")503.map_err(VideoError::BackendFailure)504}505506fn clear_output_buffers(&mut self) -> VideoResult<()> {507// Cancel any ongoing flush.508self.is_flushing = false;509510// Drop all output buffers we currently hold.511if let SessionState::Decoding { output_queue, .. } = &mut self.state {512output_queue.clear_ready_buffers();513}514515// Drop the currently decoded frame.516self.avframe = None;517518// Drop all decoded frames signaled as ready and cancel any reported flush.519self.event_queue.retain(|event| {520!matches!(521event,522DecoderEvent::PictureReady { .. } | DecoderEvent::FlushCompleted(_)523)524});525526Ok(())527}528529fn event_pipe(&self) -> &dyn AsRawDescriptor {530self.event_queue.as_ref()531}532533fn use_output_buffer(534&mut self,535picture_buffer_id: i32,536resource: GuestResource,537) -> VideoResult<()> {538let output_queue = match &mut self.state {539// It is valid to receive buffers before the the initial DRC, but we won't decode540// anything into them.541SessionState::AwaitingInitialResolution => return Ok(()),542SessionState::Decoding { output_queue, .. } => output_queue,543// Receiving buffers during DRC is valid, but we won't use them and can just drop them.544SessionState::Drc => return Ok(()),545_ => {546error!("use_output_buffer: invalid state");547return Ok(());548}549};550551output_queue552.import_buffer(picture_buffer_id as u32, resource)553.context("while importing output buffer")554.map_err(VideoError::BackendFailure)?;555self.try_decode()556.context("while decoding output buffer")557.map_err(VideoError::BackendFailure)558}559560fn reuse_output_buffer(&mut self, picture_buffer_id: i32) -> VideoResult<()> {561let output_queue = match &mut self.state {562// It is valid to receive buffers before the the initial DRC, but we won't decode563// anything into them.564SessionState::AwaitingInitialResolution => return Ok(()),565SessionState::Decoding { output_queue, .. } => output_queue,566// Reusing buffers during DRC is valid, but we won't use them and can just drop them.567SessionState::Drc => return Ok(()),568_ => {569return Err(VideoError::BackendFailure(anyhow!(570"invalid state while calling reuse_output_buffer"571)))572}573};574575output_queue576.reuse_buffer(picture_buffer_id as u32)577.context("while reusing output buffer")578.map_err(VideoError::BackendFailure)?;579self.try_decode()580.context("while reusing output buffer")581.map_err(VideoError::BackendFailure)582}583584fn read_event(&mut self) -> VideoResult<DecoderEvent> {585self.event_queue586.dequeue_event()587.context("while reading decoder event")588.map_err(VideoError::BackendFailure)589}590}591592pub struct FfmpegDecoder {593codecs: BTreeMap<Format, AvCodec>,594}595596impl FfmpegDecoder {597/// Create a new ffmpeg decoder backend instance.598pub fn new() -> Self {599// Find all the decoders supported by libav and store them.600let codecs = AvCodecIterator::new()601.filter_map(|codec| {602if !codec.is_decoder() {603return None;604}605606let codec_name = codec.name();607608// Only keep processing the decoders we are interested in. These are all software609// decoders, but nothing prevents us from supporting hardware-accelerated ones610// (e.g. *_qsv for VAAPI-based acceleration) in the future!611let format = match codec_name {612"h264" => Format::H264,613"vp8" => Format::VP8,614"vp9" => Format::VP9,615"hevc" => Format::Hevc,616_ => return None,617};618619// We require custom buffer allocators, so ignore codecs that are not capable of620// using them.621if codec.capabilities() & AV_CODEC_CAP_DR1 == 0 {622warn!(623"Skipping codec {} due to lack of DR1 capability.",624codec_name625);626return None;627}628629Some((format, codec))630})631.collect();632633Self { codecs }634}635}636637impl DecoderBackend for FfmpegDecoder {638type Session = FfmpegDecoderSession;639640fn get_capabilities(&self) -> Capability {641// The virtio device only supports NV12 for now it seems...642const SUPPORTED_OUTPUT_FORMATS: [Format; 1] = [Format::NV12];643644let mut in_formats = vec![];645let mut profiles_map: BTreeMap<Format, Vec<Profile>> = Default::default();646let mut levels: BTreeMap<Format, Vec<Level>> = Default::default();647for (&format, codec) in &self.codecs {648let profile_iter = codec.profile_iter();649let profiles = match format {650Format::H264 => {651// We only support Level 1.0 for H.264.652// TODO Do we? Why?653levels.insert(format, vec![Level::H264_1_0]);654655profile_iter656.filter_map(|p| {657match p.profile() {658AV_PROFILE_H264_BASELINE => Some(Profile::H264Baseline),659AV_PROFILE_H264_MAIN => Some(Profile::H264Main),660AV_PROFILE_H264_EXTENDED => Some(Profile::H264Extended),661AV_PROFILE_H264_HIGH => Some(Profile::H264High),662AV_PROFILE_H264_HIGH_10 => Some(Profile::H264High10),663AV_PROFILE_H264_HIGH_422 => Some(Profile::H264High422),664AV_PROFILE_H264_HIGH_444_PREDICTIVE => {665Some(Profile::H264High444PredictiveProfile)666}667AV_PROFILE_H264_STEREO_HIGH => Some(Profile::H264StereoHigh),668AV_PROFILE_H264_MULTIVIEW_HIGH => Some(Profile::H264MultiviewHigh),669// TODO H264ScalableBaseline and H264ScalableHigh have no libav670// equivalents?671_ => None,672}673})674.collect()675}676Format::VP8 => {677// FFmpeg has no VP8 profiles, for some reason...678vec![679Profile::VP8Profile0,680Profile::VP8Profile1,681Profile::VP8Profile2,682Profile::VP8Profile3,683]684}685Format::VP9 => profile_iter686.filter_map(|p| match p.profile() {687AV_PROFILE_VP9_0 => Some(Profile::VP9Profile0),688AV_PROFILE_VP9_1 => Some(Profile::VP9Profile1),689AV_PROFILE_VP9_2 => Some(Profile::VP9Profile2),690AV_PROFILE_VP9_3 => Some(Profile::VP9Profile3),691_ => None,692})693.collect(),694Format::Hevc => profile_iter695.filter_map(|p| match p.profile() {696AV_PROFILE_HEVC_MAIN => Some(Profile::HevcMain),697AV_PROFILE_HEVC_MAIN_10 => Some(Profile::HevcMain10),698AV_PROFILE_HEVC_MAIN_STILL_PICTURE => Some(Profile::HevcMainStillPicture),699_ => None,700})701.collect(),702_ => unreachable!("Unhandled format {:?}", format),703};704705profiles_map.insert(format, profiles);706707in_formats.push(FormatDesc {708mask: !(u64::MAX << SUPPORTED_OUTPUT_FORMATS.len()),709format,710frame_formats: vec![FrameFormat {711// These frame sizes are arbitrary, but avcodec does not seem to have any712// specific restriction in that regard (or any way to query the supported713// resolutions).714width: FormatRange {715min: 64,716max: 16384,717step: 1,718},719height: FormatRange {720min: 64,721max: 16384,722step: 1,723},724bitrates: Default::default(),725}],726plane_align: max_buffer_alignment() as u32,727});728}729730// We support all output formats through the use of swscale().731let out_formats = SUPPORTED_OUTPUT_FORMATS732.iter()733.map(|&format| FormatDesc {734mask: !(u64::MAX << in_formats.len()),735format,736frame_formats: vec![FrameFormat {737// These frame sizes are arbitrary, but avcodec does not seem to have any738// specific restriction in that regard (or any way to query the supported739// resolutions).740width: FormatRange {741min: 64,742max: 16384,743step: 1,744},745height: FormatRange {746min: 64,747max: 16384,748step: 1,749},750bitrates: Default::default(),751}],752plane_align: max_buffer_alignment() as u32,753})754.collect::<Vec<_>>();755756Capability::new(in_formats, out_formats, profiles_map, levels)757}758759fn new_session(&mut self, format: Format) -> VideoResult<Self::Session> {760let codec = self.codecs.get(&format).ok_or(VideoError::InvalidFormat)?;761let context = codec762// TODO we should use a custom `get_buffer` function that renders directly into the763// target buffer if the output format is directly supported by libavcodec. Right now764// libavcodec is allocating its own frame buffers, which forces us to perform a copy.765.build_decoder()766.and_then(|b| b.build())767.context("while creating new session")768.map_err(VideoError::BackendFailure)?;769Ok(FfmpegDecoderSession {770codec_jobs: Default::default(),771is_flushing: false,772state: SessionState::AwaitingInitialResolution,773event_queue: Arc::new(774EventQueue::new()775.context("while creating decoder session")776.map_err(VideoError::BackendFailure)?777.into(),778),779context,780current_visible_res: (0, 0),781avframe: None,782})783}784}785786#[cfg(test)]787mod tests {788use super::super::tests::*;789use super::*;790791#[test]792fn test_get_capabilities() {793let decoder = FfmpegDecoder::new();794let caps = decoder.get_capabilities();795assert!(!caps.input_formats().is_empty());796assert!(!caps.output_formats().is_empty());797}798799#[test]800fn test_decode_h264_guestmem_to_guestmem() {801decode_h264_generic(802&mut FfmpegDecoder::new(),803build_guest_mem_handle,804build_guest_mem_handle,805);806}807808// Decode using guest memory input and virtio object output buffers.809#[test]810fn test_decode_h264_guestmem_to_object() {811decode_h264_generic(812&mut FfmpegDecoder::new(),813build_guest_mem_handle,814build_object_handle,815);816}817818// Decode using virtio object input and guest memory output buffers.819#[test]820fn test_decode_h264_object_to_guestmem() {821decode_h264_generic(822&mut FfmpegDecoder::new(),823build_object_handle,824build_guest_mem_handle,825);826}827828// Decode using virtio object input and output buffers.829#[test]830fn test_decode_h264_object_to_object() {831decode_h264_generic(832&mut FfmpegDecoder::new(),833build_object_handle,834build_object_handle,835);836}837}838839840