Path: blob/main/devices/src/virtio/video/decoder/backend/vda.rs
5394 views
// Copyright 2020 The ChromiumOS Authors1// Use of this source code is governed by a BSD-style license that can be2// found in the LICENSE file.34use std::collections::btree_map::Entry;5use std::collections::BTreeMap;6use std::convert::TryFrom;78use anyhow::anyhow;9use base::error;10use base::warn;11use base::AsRawDescriptor;12use base::IntoRawDescriptor;13use libvda::decode::Event as LibvdaEvent;1415use crate::virtio::video::decoder::backend::*;16use crate::virtio::video::decoder::Capability;17use crate::virtio::video::error::VideoError;18use crate::virtio::video::error::VideoResult;19use crate::virtio::video::format::*;2021/// Since libvda only accepts 32-bit timestamps, we are going to truncate the frame 64-bit timestamp22/// (of nanosecond granularity) to only keep seconds granularity. This would result in information23/// being lost on a regular client, but the Android C2 decoder only sends timestamps with second24/// granularity, so this approach is going to work there. However, this means that this backend is25/// very unlikely to work with any other guest software. We accept this fact because it is26/// impossible to use outside of ChromeOS anyway.27const TIMESTAMP_TRUNCATE_FACTOR: u64 = 1_000_000_000;2829impl TryFrom<Format> for libvda::Profile {30type Error = VideoError;3132fn try_from(format: Format) -> Result<Self, Self::Error> {33Ok(match format {34Format::VP8 => libvda::Profile::VP8,35Format::VP9 => libvda::Profile::VP9Profile0,36Format::H264 => libvda::Profile::H264ProfileBaseline,37Format::Hevc => libvda::Profile::HevcProfileMain,38_ => {39error!("specified format {} is not supported by VDA", format);40return Err(VideoError::InvalidParameter);41}42})43}44}4546impl TryFrom<Format> for libvda::PixelFormat {47type Error = VideoError;4849fn try_from(format: Format) -> Result<Self, Self::Error> {50Ok(match format {51Format::NV12 => libvda::PixelFormat::NV12,52_ => {53error!("specified format {} is not supported by VDA", format);54return Err(VideoError::InvalidParameter);55}56})57}58}5960impl From<&FramePlane> for libvda::FramePlane {61fn from(plane: &FramePlane) -> Self {62libvda::FramePlane {63offset: plane.offset as i32,64stride: plane.stride as i32,65}66}67}6869impl From<libvda::decode::Event> for DecoderEvent {70fn from(event: libvda::decode::Event) -> Self {71// We cannot use the From trait here since neither libvda::decode::Response72// no std::result::Result are defined in the current crate.73fn vda_response_to_result(resp: libvda::decode::Response) -> VideoResult<()> {74match resp {75libvda::decode::Response::Success => Ok(()),76resp => Err(VideoError::BackendFailure(anyhow!("VDA failure: {}", resp))),77}78}7980match event {81LibvdaEvent::ProvidePictureBuffers {82min_num_buffers,83width,84height,85visible_rect_left,86visible_rect_top,87visible_rect_right,88visible_rect_bottom,89} => DecoderEvent::ProvidePictureBuffers {90min_num_buffers,91width,92height,93visible_rect: Rect {94left: visible_rect_left,95top: visible_rect_top,96right: visible_rect_right,97bottom: visible_rect_bottom,98},99},100LibvdaEvent::PictureReady {101buffer_id,102bitstream_id,103..104} => DecoderEvent::PictureReady {105picture_buffer_id: buffer_id,106// Restore the truncated timestamp to its original value (hopefully).107timestamp: TIMESTAMP_TRUNCATE_FACTOR.wrapping_mul(bitstream_id as u64),108},109LibvdaEvent::NotifyEndOfBitstreamBuffer { bitstream_id } => {110// We will patch the timestamp to the actual bitstream ID in `read_event`.111DecoderEvent::NotifyEndOfBitstreamBuffer(bitstream_id as u32)112}113LibvdaEvent::NotifyError(resp) => DecoderEvent::NotifyError(114VideoError::BackendFailure(anyhow!("VDA failure: {}", resp)),115),116LibvdaEvent::ResetResponse(resp) => {117DecoderEvent::ResetCompleted(vda_response_to_result(resp))118}119LibvdaEvent::FlushResponse(resp) => {120DecoderEvent::FlushCompleted(vda_response_to_result(resp))121}122}123}124}125126// Used by DecoderSession::get_capabilities().127fn from_pixel_format(128fmt: &libvda::PixelFormat,129mask: u64,130width_range: FormatRange,131height_range: FormatRange,132) -> FormatDesc {133let format = match fmt {134libvda::PixelFormat::NV12 => Format::NV12,135libvda::PixelFormat::YV12 => Format::YUV420,136};137138let frame_formats = vec![FrameFormat {139width: width_range,140height: height_range,141bitrates: Vec::new(),142}];143144FormatDesc {145mask,146format,147frame_formats,148plane_align: 1,149}150}151152pub struct VdaDecoderSession {153vda_session: libvda::decode::Session,154format: Option<libvda::PixelFormat>,155/// libvda can only handle 32-bit timestamps, so we will give it the buffer ID as a timestamp156/// and map it back to the actual timestamp using this table when a decoded frame is produced.157timestamp_to_resource_id: BTreeMap<u32, u32>,158}159160impl DecoderSession for VdaDecoderSession {161fn set_output_parameters(&mut self, buffer_count: usize, format: Format) -> VideoResult<()> {162self.format = Some(libvda::PixelFormat::try_from(format)?);163Ok(self.vda_session.set_output_buffer_count(buffer_count)?)164}165166fn decode(167&mut self,168resource_id: u32,169timestamp: u64,170resource: GuestResourceHandle,171offset: u32,172bytes_used: u32,173) -> VideoResult<()> {174let handle = match resource {175GuestResourceHandle::VirtioObject(handle) => handle,176_ => {177return Err(VideoError::BackendFailure(anyhow!(178"VDA backend only supports virtio object resources"179)))180}181};182183// While the virtio-video driver handles timestamps as nanoseconds, Chrome assumes184// per-second timestamps coming. So, we need a conversion from nsec to sec. Note that this185// value should not be an unix time stamp but a frame number that the Android V4L2 C2186// decoder passes to the driver as a 32-bit integer in our implementation. So, overflow must187// not happen in this conversion.188let truncated_timestamp = (timestamp / TIMESTAMP_TRUNCATE_FACTOR) as u32;189self.timestamp_to_resource_id190.insert(truncated_timestamp, resource_id);191192if truncated_timestamp as u64 * TIMESTAMP_TRUNCATE_FACTOR != timestamp {193warn!("truncation of timestamp {} resulted in precision loss. Only send timestamps with second granularity to this backend.", timestamp);194}195196Ok(self.vda_session.decode(197truncated_timestamp as i32, // bitstream_id198// Steal the descriptor of the resource, as libvda will close it.199handle.desc.into_raw_descriptor(),200offset,201bytes_used,202)?)203}204205fn flush(&mut self) -> VideoResult<()> {206Ok(self.vda_session.flush()?)207}208209fn reset(&mut self) -> VideoResult<()> {210Ok(self.vda_session.reset()?)211}212213fn clear_output_buffers(&mut self) -> VideoResult<()> {214Ok(())215}216217fn event_pipe(&self) -> &dyn AsRawDescriptor {218self.vda_session.pipe()219}220221fn use_output_buffer(222&mut self,223picture_buffer_id: i32,224resource: GuestResource,225) -> VideoResult<()> {226let handle = match resource.handle {227GuestResourceHandle::VirtioObject(handle) => handle,228_ => {229return Err(VideoError::BackendFailure(anyhow!(230"VDA backend only supports virtio object resources"231)))232}233};234let vda_planes: Vec<libvda::FramePlane> = resource.planes.iter().map(Into::into).collect();235236Ok(self.vda_session.use_output_buffer(237picture_buffer_id,238self.format.ok_or(VideoError::BackendFailure(anyhow!(239"set_output_parameters() must be called before use_output_buffer()"240)))?,241// Steal the descriptor of the resource, as libvda will close it.242handle.desc.into_raw_descriptor(),243&vda_planes,244handle.modifier,245)?)246}247248fn reuse_output_buffer(&mut self, picture_buffer_id: i32) -> VideoResult<()> {249Ok(self.vda_session.reuse_output_buffer(picture_buffer_id)?)250}251252fn read_event(&mut self) -> VideoResult<DecoderEvent> {253self.vda_session254.read_event()255.map(Into::into)256// Libvda returned the truncated timestamp that we gave it as the timestamp of this257// buffer. Replace it with the bitstream ID that was passed to `decode` for this258// resource.259.map(|mut e| {260if let DecoderEvent::NotifyEndOfBitstreamBuffer(timestamp) = &mut e {261let bitstream_id = self262.timestamp_to_resource_id263.remove(timestamp)264.unwrap_or_else(|| {265error!("timestamp {} not registered!", *timestamp);2660267});268*timestamp = bitstream_id;269}270e271})272.map_err(Into::into)273}274}275276/// A VDA decoder backend that can be passed to `Decoder::new` in order to create a working decoder.277pub struct LibvdaDecoder(libvda::decode::VdaInstance);278279/// SAFETY: safe because the Rcs in `VdaInstance` are always used from the same thread.280unsafe impl Send for LibvdaDecoder {}281282impl LibvdaDecoder {283/// Create a decoder backend instance that can be used to instantiate an decoder.284pub fn new(backend_type: libvda::decode::VdaImplType) -> VideoResult<Self> {285Ok(Self(libvda::decode::VdaInstance::new(backend_type)?))286}287}288289impl DecoderBackend for LibvdaDecoder {290type Session = VdaDecoderSession;291292fn new_session(&mut self, format: Format) -> VideoResult<Self::Session> {293let profile = libvda::Profile::try_from(format)?;294295Ok(VdaDecoderSession {296vda_session: self.0.open_session(profile).map_err(|e| {297error!("failed to open a session for {:?}: {}", format, e);298VideoError::InvalidOperation299})?,300format: None,301timestamp_to_resource_id: Default::default(),302})303}304305fn get_capabilities(&self) -> Capability {306let caps = libvda::decode::VdaInstance::get_capabilities(&self.0);307308// Raise the first |# of supported raw formats|-th bits because we can assume that any309// combination of (a coded format, a raw format) is valid in Chrome.310let mask = !(u64::MAX << caps.output_formats.len());311312let mut in_fmts = vec![];313let mut profiles: BTreeMap<Format, Vec<Profile>> = Default::default();314for fmt in caps.input_formats.iter() {315match Profile::from_libvda_profile(fmt.profile) {316Some(profile) => {317let format = profile.to_format();318in_fmts.push(FormatDesc {319mask,320format,321frame_formats: vec![FrameFormat {322width: FormatRange {323min: fmt.min_width,324max: fmt.max_width,325step: 1,326},327height: FormatRange {328min: fmt.min_height,329max: fmt.max_height,330step: 1,331},332bitrates: Vec::new(),333}],334plane_align: 1,335});336match profiles.entry(format) {337Entry::Occupied(mut e) => e.get_mut().push(profile),338Entry::Vacant(e) => {339e.insert(vec![profile]);340}341}342}343None => {344warn!(345"No virtio-video equivalent for libvda profile, skipping: {:?}",346fmt.profile347);348}349}350}351352let levels: BTreeMap<Format, Vec<Level>> = if profiles.contains_key(&Format::H264) {353// We only support Level 1.0 for H.264.354vec![(Format::H264, vec![Level::H264_1_0])]355.into_iter()356.collect()357} else {358Default::default()359};360361// Prepare {min, max} of {width, height}.362// While these values are associated with each input format in libvda,363// they are associated with each output format in virtio-video protocol.364// Thus, we compute max of min values and min of max values here.365let min_width = caps.input_formats.iter().map(|fmt| fmt.min_width).max();366let max_width = caps.input_formats.iter().map(|fmt| fmt.max_width).min();367let min_height = caps.input_formats.iter().map(|fmt| fmt.min_height).max();368let max_height = caps.input_formats.iter().map(|fmt| fmt.max_height).min();369let width_range = FormatRange {370min: min_width.unwrap_or(0),371max: max_width.unwrap_or(0),372step: 1,373};374let height_range = FormatRange {375min: min_height.unwrap_or(0),376max: max_height.unwrap_or(0),377step: 1,378};379380// Raise the first |# of supported coded formats|-th bits because we can assume that any381// combination of (a coded format, a raw format) is valid in Chrome.382let mask = !(u64::MAX << caps.input_formats.len());383let out_fmts = caps384.output_formats385.iter()386.map(|fmt| from_pixel_format(fmt, mask, width_range, height_range))387.collect();388389Capability::new(in_fmts, out_fmts, profiles, levels)390}391}392393394