Path: blob/main/crates/polars-parquet/src/parquet/parquet_bridge.rs
6940 views
// Bridges structs from thrift-generated code to rust enums.12#[cfg(feature = "serde")]3use serde::{Deserialize, Serialize};45use super::thrift_format::{6BoundaryOrder as ParquetBoundaryOrder, CompressionCodec, DataPageHeader, DataPageHeaderV2,7DecimalType, Encoding as ParquetEncoding, FieldRepetitionType, IntType,8LogicalType as ParquetLogicalType, PageType as ParquetPageType, TimeType,9TimeUnit as ParquetTimeUnit, TimestampType,10};11use crate::parquet::error::ParquetError;1213/// The repetition of a parquet field14#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]15#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]16pub enum Repetition {17/// When the field has no null values18Required,19/// When the field may have null values20Optional,21/// When the field may be repeated (list field)22Repeated,23}2425impl TryFrom<FieldRepetitionType> for Repetition {26type Error = ParquetError;2728fn try_from(repetition: FieldRepetitionType) -> Result<Self, Self::Error> {29Ok(match repetition {30FieldRepetitionType::REQUIRED => Repetition::Required,31FieldRepetitionType::OPTIONAL => Repetition::Optional,32FieldRepetitionType::REPEATED => Repetition::Repeated,33_ => return Err(ParquetError::oos("Thrift out of range")),34})35}36}3738impl From<Repetition> for FieldRepetitionType {39fn from(repetition: Repetition) -> Self {40match repetition {41Repetition::Required => FieldRepetitionType::REQUIRED,42Repetition::Optional => FieldRepetitionType::OPTIONAL,43Repetition::Repeated => FieldRepetitionType::REPEATED,44}45}46}4748#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]49#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]50pub enum Compression {51Uncompressed,52Snappy,53Gzip,54Lzo,55Brotli,56Lz4,57Zstd,58Lz4Raw,59}6061impl TryFrom<CompressionCodec> for Compression {62type Error = ParquetError;6364fn try_from(codec: CompressionCodec) -> Result<Self, Self::Error> {65Ok(match codec {66CompressionCodec::UNCOMPRESSED => Compression::Uncompressed,67CompressionCodec::SNAPPY => Compression::Snappy,68CompressionCodec::GZIP => Compression::Gzip,69CompressionCodec::LZO => Compression::Lzo,70CompressionCodec::BROTLI => Compression::Brotli,71CompressionCodec::LZ4 => Compression::Lz4,72CompressionCodec::ZSTD => Compression::Zstd,73CompressionCodec::LZ4_RAW => Compression::Lz4Raw,74_ => return Err(ParquetError::oos("Thrift out of range")),75})76}77}7879impl From<Compression> for CompressionCodec {80fn from(codec: Compression) -> Self {81match codec {82Compression::Uncompressed => CompressionCodec::UNCOMPRESSED,83Compression::Snappy => CompressionCodec::SNAPPY,84Compression::Gzip => CompressionCodec::GZIP,85Compression::Lzo => CompressionCodec::LZO,86Compression::Brotli => CompressionCodec::BROTLI,87Compression::Lz4 => CompressionCodec::LZ4,88Compression::Zstd => CompressionCodec::ZSTD,89Compression::Lz4Raw => CompressionCodec::LZ4_RAW,90}91}92}9394/// Defines the compression settings for writing a parquet file.95///96/// If None is provided as a compression setting, then the default compression level is used.97#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]98pub enum CompressionOptions {99Uncompressed,100Snappy,101Gzip(Option<GzipLevel>),102Lzo,103Brotli(Option<BrotliLevel>),104Lz4,105Zstd(Option<ZstdLevel>),106Lz4Raw,107}108109impl From<CompressionOptions> for Compression {110fn from(value: CompressionOptions) -> Self {111match value {112CompressionOptions::Uncompressed => Compression::Uncompressed,113CompressionOptions::Snappy => Compression::Snappy,114CompressionOptions::Gzip(_) => Compression::Gzip,115CompressionOptions::Lzo => Compression::Lzo,116CompressionOptions::Brotli(_) => Compression::Brotli,117CompressionOptions::Lz4 => Compression::Lz4,118CompressionOptions::Zstd(_) => Compression::Zstd,119CompressionOptions::Lz4Raw => Compression::Lz4Raw,120}121}122}123124impl From<CompressionOptions> for CompressionCodec {125fn from(codec: CompressionOptions) -> Self {126match codec {127CompressionOptions::Uncompressed => CompressionCodec::UNCOMPRESSED,128CompressionOptions::Snappy => CompressionCodec::SNAPPY,129CompressionOptions::Gzip(_) => CompressionCodec::GZIP,130CompressionOptions::Lzo => CompressionCodec::LZO,131CompressionOptions::Brotli(_) => CompressionCodec::BROTLI,132CompressionOptions::Lz4 => CompressionCodec::LZ4,133CompressionOptions::Zstd(_) => CompressionCodec::ZSTD,134CompressionOptions::Lz4Raw => CompressionCodec::LZ4_RAW,135}136}137}138139/// Defines valid compression levels.140pub(crate) trait CompressionLevel<T: std::fmt::Display + std::cmp::PartialOrd> {141const MINIMUM_LEVEL: T;142const MAXIMUM_LEVEL: T;143144/// Tests if the provided compression level is valid.145fn is_valid_level(level: T) -> Result<(), ParquetError> {146let compression_range = Self::MINIMUM_LEVEL..=Self::MAXIMUM_LEVEL;147if compression_range.contains(&level) {148Ok(())149} else {150Err(ParquetError::InvalidParameter(format!(151"valid compression range {}..={} exceeded.",152compression_range.start(),153compression_range.end()154)))155}156}157}158159/// Represents a valid brotli compression level.160#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]161pub struct BrotliLevel(u32);162163impl Default for BrotliLevel {164fn default() -> Self {165Self(1)166}167}168169impl CompressionLevel<u32> for BrotliLevel {170const MINIMUM_LEVEL: u32 = 0;171const MAXIMUM_LEVEL: u32 = 11;172}173174impl BrotliLevel {175/// Attempts to create a brotli compression level.176///177/// Compression levels must be valid.178pub fn try_new(level: u32) -> Result<Self, ParquetError> {179Self::is_valid_level(level).map(|_| Self(level))180}181182/// Returns the compression level.183pub fn compression_level(&self) -> u32 {184self.0185}186}187188/// Represents a valid gzip compression level.189#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]190pub struct GzipLevel(u8);191192impl Default for GzipLevel {193fn default() -> Self {194// The default as of miniz_oxide 0.5.1 is 6 for compression level195// (miniz_oxide::deflate::CompressionLevel::DefaultLevel)196Self(6)197}198}199200impl CompressionLevel<u8> for GzipLevel {201const MINIMUM_LEVEL: u8 = 0;202const MAXIMUM_LEVEL: u8 = 9;203}204205impl GzipLevel {206/// Attempts to create a gzip compression level.207///208/// Compression levels must be valid (i.e. be acceptable for [`flate2::Compression`]).209pub fn try_new(level: u8) -> Result<Self, ParquetError> {210Self::is_valid_level(level).map(|_| Self(level))211}212213/// Returns the compression level.214pub fn compression_level(&self) -> u8 {215self.0216}217}218219#[cfg(feature = "gzip")]220impl From<GzipLevel> for flate2::Compression {221fn from(level: GzipLevel) -> Self {222Self::new(level.compression_level() as u32)223}224}225226/// Represents a valid zstd compression level.227#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]228pub struct ZstdLevel(i32);229230impl CompressionLevel<i32> for ZstdLevel {231// zstd binds to C, and hence zstd::compression_level_range() is not const as this calls the232// underlying C library.233const MINIMUM_LEVEL: i32 = 1;234const MAXIMUM_LEVEL: i32 = 22;235}236237impl ZstdLevel {238/// Attempts to create a zstd compression level from a given compression level.239///240/// Compression levels must be valid (i.e. be acceptable for [`zstd::compression_level_range`]).241pub fn try_new(level: i32) -> Result<Self, ParquetError> {242Self::is_valid_level(level).map(|_| Self(level))243}244245/// Returns the compression level.246pub fn compression_level(&self) -> i32 {247self.0248}249}250251#[cfg(feature = "zstd")]252impl Default for ZstdLevel {253fn default() -> Self {254Self(zstd::DEFAULT_COMPRESSION_LEVEL)255}256}257258#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]259pub enum PageType {260DataPage,261DataPageV2,262DictionaryPage,263}264265impl TryFrom<ParquetPageType> for PageType {266type Error = ParquetError;267268fn try_from(type_: ParquetPageType) -> Result<Self, Self::Error> {269Ok(match type_ {270ParquetPageType::DATA_PAGE => PageType::DataPage,271ParquetPageType::DATA_PAGE_V2 => PageType::DataPageV2,272ParquetPageType::DICTIONARY_PAGE => PageType::DictionaryPage,273_ => return Err(ParquetError::oos("Thrift out of range")),274})275}276}277278impl From<PageType> for ParquetPageType {279fn from(type_: PageType) -> Self {280match type_ {281PageType::DataPage => ParquetPageType::DATA_PAGE,282PageType::DataPageV2 => ParquetPageType::DATA_PAGE_V2,283PageType::DictionaryPage => ParquetPageType::DICTIONARY_PAGE,284}285}286}287288#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]289pub enum Encoding {290/// Default encoding.291/// BOOLEAN - 1 bit per value. 0 is false; 1 is true.292/// INT32 - 4 bytes per value. Stored as little-endian.293/// INT64 - 8 bytes per value. Stored as little-endian.294/// FLOAT - 4 bytes per value. IEEE. Stored as little-endian.295/// DOUBLE - 8 bytes per value. IEEE. Stored as little-endian.296/// BYTE_ARRAY - 4 byte length stored as little endian, followed by bytes.297/// FIXED_LEN_BYTE_ARRAY - Just the bytes.298Plain,299/// Deprecated: Dictionary encoding. The values in the dictionary are encoded in the300/// plain type.301/// in a data page use RLE_DICTIONARY instead.302/// in a Dictionary page use PLAIN instead303PlainDictionary,304/// Group packed run length encoding. Usable for definition/repetition levels305/// encoding and Booleans (on one bit: 0 is false; 1 is true.)306Rle,307/// Bit packed encoding. This can only be used if the data has a known max308/// width. Usable for definition/repetition levels encoding.309BitPacked,310/// Delta encoding for integers. This can be used for int columns and works best311/// on sorted data312DeltaBinaryPacked,313/// Encoding for byte arrays to separate the length values and the data. The lengths314/// are encoded using DELTA_BINARY_PACKED315DeltaLengthByteArray,316/// Incremental-encoded byte array. Prefix lengths are encoded using DELTA_BINARY_PACKED.317/// Suffixes are stored as delta length byte arrays.318DeltaByteArray,319/// Dictionary encoding: the ids are encoded using the RLE encoding320RleDictionary,321/// Encoding for floating-point data.322/// K byte-streams are created where K is the size in bytes of the data type.323/// The individual bytes of an FP value are scattered to the corresponding stream and324/// the streams are concatenated.325/// This itself does not reduce the size of the data but can lead to better compression326/// afterwards.327ByteStreamSplit,328}329330impl TryFrom<ParquetEncoding> for Encoding {331type Error = ParquetError;332333fn try_from(encoding: ParquetEncoding) -> Result<Self, Self::Error> {334Ok(match encoding {335ParquetEncoding::PLAIN => Encoding::Plain,336ParquetEncoding::PLAIN_DICTIONARY => Encoding::PlainDictionary,337ParquetEncoding::RLE => Encoding::Rle,338ParquetEncoding::BIT_PACKED => Encoding::BitPacked,339ParquetEncoding::DELTA_BINARY_PACKED => Encoding::DeltaBinaryPacked,340ParquetEncoding::DELTA_LENGTH_BYTE_ARRAY => Encoding::DeltaLengthByteArray,341ParquetEncoding::DELTA_BYTE_ARRAY => Encoding::DeltaByteArray,342ParquetEncoding::RLE_DICTIONARY => Encoding::RleDictionary,343ParquetEncoding::BYTE_STREAM_SPLIT => Encoding::ByteStreamSplit,344_ => return Err(ParquetError::oos("Thrift out of range")),345})346}347}348349impl From<Encoding> for ParquetEncoding {350fn from(encoding: Encoding) -> Self {351match encoding {352Encoding::Plain => ParquetEncoding::PLAIN,353Encoding::PlainDictionary => ParquetEncoding::PLAIN_DICTIONARY,354Encoding::Rle => ParquetEncoding::RLE,355Encoding::BitPacked => ParquetEncoding::BIT_PACKED,356Encoding::DeltaBinaryPacked => ParquetEncoding::DELTA_BINARY_PACKED,357Encoding::DeltaLengthByteArray => ParquetEncoding::DELTA_LENGTH_BYTE_ARRAY,358Encoding::DeltaByteArray => ParquetEncoding::DELTA_BYTE_ARRAY,359Encoding::RleDictionary => ParquetEncoding::RLE_DICTIONARY,360Encoding::ByteStreamSplit => ParquetEncoding::BYTE_STREAM_SPLIT,361}362}363}364365/// Enum to annotate whether lists of min/max elements inside ColumnIndex366/// are ordered and if so, in which direction.367#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]368pub enum BoundaryOrder {369Unordered,370Ascending,371Descending,372}373374impl Default for BoundaryOrder {375fn default() -> Self {376Self::Unordered377}378}379380impl TryFrom<ParquetBoundaryOrder> for BoundaryOrder {381type Error = ParquetError;382383fn try_from(encoding: ParquetBoundaryOrder) -> Result<Self, Self::Error> {384Ok(match encoding {385ParquetBoundaryOrder::UNORDERED => BoundaryOrder::Unordered,386ParquetBoundaryOrder::ASCENDING => BoundaryOrder::Ascending,387ParquetBoundaryOrder::DESCENDING => BoundaryOrder::Descending,388_ => return Err(ParquetError::oos("BoundaryOrder Thrift value out of range")),389})390}391}392393impl From<BoundaryOrder> for ParquetBoundaryOrder {394fn from(encoding: BoundaryOrder) -> Self {395match encoding {396BoundaryOrder::Unordered => ParquetBoundaryOrder::UNORDERED,397BoundaryOrder::Ascending => ParquetBoundaryOrder::ASCENDING,398BoundaryOrder::Descending => ParquetBoundaryOrder::DESCENDING,399}400}401}402403pub trait DataPageHeaderExt {404fn encoding(&self) -> Encoding;405fn repetition_level_encoding(&self) -> Encoding;406fn definition_level_encoding(&self) -> Encoding;407}408409impl DataPageHeaderExt for DataPageHeader {410fn encoding(&self) -> Encoding {411self.encoding.try_into().unwrap()412}413414fn repetition_level_encoding(&self) -> Encoding {415self.repetition_level_encoding.try_into().unwrap()416}417418fn definition_level_encoding(&self) -> Encoding {419self.definition_level_encoding.try_into().unwrap()420}421}422423impl DataPageHeaderExt for DataPageHeaderV2 {424fn encoding(&self) -> Encoding {425self.encoding.try_into().unwrap()426}427428fn repetition_level_encoding(&self) -> Encoding {429Encoding::Rle430}431432fn definition_level_encoding(&self) -> Encoding {433Encoding::Rle434}435}436437#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]438#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]439pub enum TimeUnit {440Milliseconds,441Microseconds,442Nanoseconds,443}444445impl From<ParquetTimeUnit> for TimeUnit {446fn from(encoding: ParquetTimeUnit) -> Self {447match encoding {448ParquetTimeUnit::MILLIS(_) => TimeUnit::Milliseconds,449ParquetTimeUnit::MICROS(_) => TimeUnit::Microseconds,450ParquetTimeUnit::NANOS(_) => TimeUnit::Nanoseconds,451}452}453}454455impl From<TimeUnit> for ParquetTimeUnit {456fn from(unit: TimeUnit) -> Self {457match unit {458TimeUnit::Milliseconds => ParquetTimeUnit::MILLIS(Default::default()),459TimeUnit::Microseconds => ParquetTimeUnit::MICROS(Default::default()),460TimeUnit::Nanoseconds => ParquetTimeUnit::NANOS(Default::default()),461}462}463}464465/// Enum of all valid logical integer types466#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]467#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]468pub enum IntegerType {469Int8,470Int16,471Int32,472Int64,473UInt8,474UInt16,475UInt32,476UInt64,477}478479#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]480#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]481pub enum PrimitiveLogicalType {482String,483Enum,484Decimal(usize, usize),485Date,486Time {487unit: TimeUnit,488is_adjusted_to_utc: bool,489},490Timestamp {491unit: TimeUnit,492is_adjusted_to_utc: bool,493},494Integer(IntegerType),495Unknown,496Json,497Bson,498Uuid,499Float16,500}501502#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]503#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]504pub enum GroupLogicalType {505Map,506List,507}508509impl From<GroupLogicalType> for ParquetLogicalType {510fn from(type_: GroupLogicalType) -> Self {511match type_ {512GroupLogicalType::Map => ParquetLogicalType::MAP(Default::default()),513GroupLogicalType::List => ParquetLogicalType::LIST(Default::default()),514}515}516}517518impl From<(i32, bool)> for IntegerType {519fn from((bit_width, is_signed): (i32, bool)) -> Self {520match (bit_width, is_signed) {521(8, true) => IntegerType::Int8,522(16, true) => IntegerType::Int16,523(32, true) => IntegerType::Int32,524(64, true) => IntegerType::Int64,525(8, false) => IntegerType::UInt8,526(16, false) => IntegerType::UInt16,527(32, false) => IntegerType::UInt32,528(64, false) => IntegerType::UInt64,529// The above are the only possible annotations for parquet's int32. Anything else530// is a deviation to the parquet specification and we ignore531_ => IntegerType::Int32,532}533}534}535536impl From<IntegerType> for (usize, bool) {537fn from(type_: IntegerType) -> (usize, bool) {538match type_ {539IntegerType::Int8 => (8, true),540IntegerType::Int16 => (16, true),541IntegerType::Int32 => (32, true),542IntegerType::Int64 => (64, true),543IntegerType::UInt8 => (8, false),544IntegerType::UInt16 => (16, false),545IntegerType::UInt32 => (32, false),546IntegerType::UInt64 => (64, false),547}548}549}550551impl TryFrom<ParquetLogicalType> for PrimitiveLogicalType {552type Error = ParquetError;553554fn try_from(type_: ParquetLogicalType) -> Result<Self, Self::Error> {555Ok(match type_ {556ParquetLogicalType::STRING(_) => PrimitiveLogicalType::String,557ParquetLogicalType::ENUM(_) => PrimitiveLogicalType::Enum,558ParquetLogicalType::DECIMAL(decimal) => PrimitiveLogicalType::Decimal(559decimal.precision.try_into()?,560decimal.scale.try_into()?,561),562ParquetLogicalType::DATE(_) => PrimitiveLogicalType::Date,563ParquetLogicalType::TIME(time) => PrimitiveLogicalType::Time {564unit: time.unit.into(),565is_adjusted_to_utc: time.is_adjusted_to_u_t_c,566},567ParquetLogicalType::TIMESTAMP(time) => PrimitiveLogicalType::Timestamp {568unit: time.unit.into(),569is_adjusted_to_utc: time.is_adjusted_to_u_t_c,570},571ParquetLogicalType::INTEGER(int) => {572PrimitiveLogicalType::Integer((int.bit_width as i32, int.is_signed).into())573},574ParquetLogicalType::UNKNOWN(_) => PrimitiveLogicalType::Unknown,575ParquetLogicalType::JSON(_) => PrimitiveLogicalType::Json,576ParquetLogicalType::BSON(_) => PrimitiveLogicalType::Bson,577ParquetLogicalType::UUID(_) => PrimitiveLogicalType::Uuid,578ParquetLogicalType::FLOAT16(_) => PrimitiveLogicalType::Float16,579_ => return Err(ParquetError::oos("LogicalType value out of range")),580})581}582}583584impl TryFrom<ParquetLogicalType> for GroupLogicalType {585type Error = ParquetError;586587fn try_from(type_: ParquetLogicalType) -> Result<Self, Self::Error> {588Ok(match type_ {589ParquetLogicalType::LIST(_) => GroupLogicalType::List,590ParquetLogicalType::MAP(_) => GroupLogicalType::Map,591_ => return Err(ParquetError::oos("LogicalType value out of range")),592})593}594}595596impl From<PrimitiveLogicalType> for ParquetLogicalType {597fn from(type_: PrimitiveLogicalType) -> Self {598match type_ {599PrimitiveLogicalType::String => ParquetLogicalType::STRING(Default::default()),600PrimitiveLogicalType::Enum => ParquetLogicalType::ENUM(Default::default()),601PrimitiveLogicalType::Decimal(precision, scale) => {602ParquetLogicalType::DECIMAL(DecimalType {603precision: precision as i32,604scale: scale as i32,605})606},607PrimitiveLogicalType::Date => ParquetLogicalType::DATE(Default::default()),608PrimitiveLogicalType::Time {609unit,610is_adjusted_to_utc,611} => ParquetLogicalType::TIME(TimeType {612unit: unit.into(),613is_adjusted_to_u_t_c: is_adjusted_to_utc,614}),615PrimitiveLogicalType::Timestamp {616unit,617is_adjusted_to_utc,618} => ParquetLogicalType::TIMESTAMP(TimestampType {619unit: unit.into(),620is_adjusted_to_u_t_c: is_adjusted_to_utc,621}),622PrimitiveLogicalType::Integer(integer) => {623let (bit_width, is_signed) = integer.into();624ParquetLogicalType::INTEGER(IntType {625bit_width: bit_width as i8,626is_signed,627})628},629PrimitiveLogicalType::Unknown => ParquetLogicalType::UNKNOWN(Default::default()),630PrimitiveLogicalType::Json => ParquetLogicalType::JSON(Default::default()),631PrimitiveLogicalType::Bson => ParquetLogicalType::BSON(Default::default()),632PrimitiveLogicalType::Uuid => ParquetLogicalType::UUID(Default::default()),633PrimitiveLogicalType::Float16 => ParquetLogicalType::FLOAT16(Default::default()),634}635}636}637638#[cfg(test)]639mod tests {640use super::*;641642#[test]643fn round_trip_primitive() -> Result<(), ParquetError> {644use PrimitiveLogicalType::*;645let a = vec![646String,647Enum,648Decimal(3, 1),649Date,650Time {651unit: TimeUnit::Milliseconds,652is_adjusted_to_utc: true,653},654Timestamp {655unit: TimeUnit::Milliseconds,656is_adjusted_to_utc: true,657},658Integer(IntegerType::Int16),659Unknown,660Json,661Bson,662Uuid,663];664for a in a {665let c: ParquetLogicalType = a.into();666let e: PrimitiveLogicalType = c.try_into()?;667assert_eq!(e, a);668}669Ok(())670}671672#[test]673fn round_trip_encoding() -> Result<(), ParquetError> {674use Encoding::*;675let a = vec![676Plain,677PlainDictionary,678Rle,679BitPacked,680DeltaBinaryPacked,681DeltaLengthByteArray,682DeltaByteArray,683RleDictionary,684ByteStreamSplit,685];686for a in a {687let c: ParquetEncoding = a.into();688let e: Encoding = c.try_into()?;689assert_eq!(e, a);690}691Ok(())692}693694#[test]695fn round_compression() -> Result<(), ParquetError> {696use Compression::*;697let a = vec![Uncompressed, Snappy, Gzip, Lzo, Brotli, Lz4, Zstd, Lz4Raw];698for a in a {699let c: CompressionCodec = a.into();700let e: Compression = c.try_into()?;701assert_eq!(e, a);702}703Ok(())704}705}706707708