Path: blob/main/cranelift/codegen/src/isle_prelude.rs
1693 views
//! Shared ISLE prelude implementation for optimization (mid-end) and1//! lowering (backend) ISLE environments.23/// Helper macro to define methods in `prelude.isle` within `impl Context for4/// ...` for each backend. These methods are shared amongst all backends.5#[macro_export]6#[doc(hidden)]7macro_rules! isle_common_prelude_methods {8() => {9isle_numerics_methods!();1011/// We don't have a way of making a `()` value in isle directly.12#[inline]13fn unit(&mut self) -> Unit {14()15}1617#[inline]18fn checked_add_with_type(&mut self, ty: Type, a: u64, b: u64) -> Option<u64> {19let c = a.checked_add(b)?;20let ty_mask = self.ty_mask(ty);21if (c & !ty_mask) == 0 { Some(c) } else { None }22}2324#[inline]25fn add_overflows_with_type(&mut self, ty: Type, a: u64, b: u64) -> bool {26self.checked_add_with_type(ty, a, b).is_none()27}2829#[inline]30fn imm64_sdiv(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {31// Sign extend `x` and `y`.32let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);33let x = (x.bits() << shift) >> shift;34let y = (y.bits() << shift) >> shift;3536// NB: We can't rely on `checked_div` to detect `ty::MIN / -1`37// (which overflows and should trap) because we are working with38// `i64` values here, and `i32::MIN != i64::MIN`, for39// example. Therefore, we have to explicitly check for this case40// ourselves.41let min = ((self.ty_smin(ty) as i64) << shift) >> shift;42if x == min && y == -1 {43return None;44}4546let ty_mask = self.ty_mask(ty) as i64;47let result = x.checked_div(y)? & ty_mask;48Some(Imm64::new(result))49}5051#[inline]52fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {53// Mask off any excess shift bits.54let shift_mask = (ty.bits() - 1) as u64;55let y = (y.bits() as u64) & shift_mask;5657// Mask the result to `ty` bits.58let ty_mask = self.ty_mask(ty) as i64;59Imm64::new((x.bits() << y) & ty_mask)60}6162#[inline]63fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {64let ty_mask = self.ty_mask(ty);65let x = (x.bits() as u64) & ty_mask;6667// Mask off any excess shift bits.68let shift_mask = (ty.bits() - 1) as u64;69let y = (y.bits() as u64) & shift_mask;7071// NB: No need to mask off high bits because they are already zero.72Imm64::new((x >> y) as i64)73}7475#[inline]76fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {77// Sign extend `x` from `ty.bits()`-width to the full 64 bits.78let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);79let x = (x.bits() << shift) >> shift;8081// Mask off any excess shift bits.82let shift_mask = (ty.bits() - 1) as i64;83let y = y.bits() & shift_mask;8485// Mask off sign bits that aren't part of `ty`.86let ty_mask = self.ty_mask(ty) as i64;87Imm64::new((x >> y) & ty_mask)88}8990#[inline]91fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {92let shift_amt = std::cmp::max(0, 64 - ty.bits());93((x as i64) << shift_amt) >> shift_amt94}9596#[inline]97fn i64_sextend_imm64(&mut self, ty: Type, x: Imm64) -> i64 {98x.sign_extend_from_width(ty.bits()).bits()99}100101#[inline]102fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {103(x.bits() as u64) & self.ty_mask(ty)104}105106#[inline]107fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {108let ux = self.u64_uextend_imm64(ty, x);109let uy = self.u64_uextend_imm64(ty, y);110let sx = self.i64_sextend_imm64(ty, x);111let sy = self.i64_sextend_imm64(ty, y);112let result = match cc {113IntCC::Equal => ux == uy,114IntCC::NotEqual => ux != uy,115IntCC::UnsignedGreaterThanOrEqual => ux >= uy,116IntCC::UnsignedGreaterThan => ux > uy,117IntCC::UnsignedLessThanOrEqual => ux <= uy,118IntCC::UnsignedLessThan => ux < uy,119IntCC::SignedGreaterThanOrEqual => sx >= sy,120IntCC::SignedGreaterThan => sx > sy,121IntCC::SignedLessThanOrEqual => sx <= sy,122IntCC::SignedLessThan => sx < sy,123};124Imm64::new(result.into())125}126127#[inline]128fn ty_bits(&mut self, ty: Type) -> u8 {129use std::convert::TryInto;130ty.bits().try_into().unwrap()131}132133#[inline]134fn ty_bits_u16(&mut self, ty: Type) -> u16 {135ty.bits() as u16136}137138#[inline]139fn ty_bits_u64(&mut self, ty: Type) -> u64 {140ty.bits() as u64141}142143#[inline]144fn ty_bytes(&mut self, ty: Type) -> u16 {145u16::try_from(ty.bytes()).unwrap()146}147148#[inline]149fn ty_mask(&mut self, ty: Type) -> u64 {150let ty_bits = ty.bits();151debug_assert_ne!(ty_bits, 0);152let shift = 64_u64153.checked_sub(ty_bits.into())154.expect("unimplemented for > 64 bits");155u64::MAX >> shift156}157158#[inline]159fn ty_lane_mask(&mut self, ty: Type) -> u64 {160let ty_lane_count = ty.lane_count();161debug_assert_ne!(ty_lane_count, 0);162let shift = 64_u64163.checked_sub(ty_lane_count.into())164.expect("unimplemented for > 64 bits");165u64::MAX >> shift166}167168#[inline]169fn ty_lane_count(&mut self, ty: Type) -> u64 {170ty.lane_count() as u64171}172173#[inline]174fn ty_umin(&mut self, _ty: Type) -> u64 {1750176}177178#[inline]179fn ty_umax(&mut self, ty: Type) -> u64 {180self.ty_mask(ty)181}182183#[inline]184fn ty_smin(&mut self, ty: Type) -> u64 {185let ty_bits = ty.bits();186debug_assert_ne!(ty_bits, 0);187let shift = 64_u64188.checked_sub(ty_bits.into())189.expect("unimplemented for > 64 bits");190(i64::MIN as u64) >> shift191}192193#[inline]194fn ty_smax(&mut self, ty: Type) -> u64 {195let ty_bits = ty.bits();196debug_assert_ne!(ty_bits, 0);197let shift = 64_u64198.checked_sub(ty_bits.into())199.expect("unimplemented for > 64 bits");200(i64::MAX as u64) >> shift201}202203fn fits_in_16(&mut self, ty: Type) -> Option<Type> {204if ty.bits() <= 16 && !ty.is_dynamic_vector() {205Some(ty)206} else {207None208}209}210211#[inline]212fn fits_in_32(&mut self, ty: Type) -> Option<Type> {213if ty.bits() <= 32 && !ty.is_dynamic_vector() {214Some(ty)215} else {216None217}218}219220#[inline]221fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {222if !ty.is_vector() && !ty.is_dynamic_vector() {223None224} else if ty.lane_type().bits() <= 32 {225Some(ty)226} else {227None228}229}230231#[inline]232fn fits_in_64(&mut self, ty: Type) -> Option<Type> {233if ty.bits() <= 64 && !ty.is_dynamic_vector() {234Some(ty)235} else {236None237}238}239240#[inline]241fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {242if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() {243Some(ty)244} else {245None246}247}248249#[inline]250fn ty_int_ref_scalar_64_extract(&mut self, ty: Type) -> Option<Type> {251self.ty_int_ref_scalar_64(ty)252}253254#[inline]255fn ty_16(&mut self, ty: Type) -> Option<Type> {256if ty.bits() == 16 { Some(ty) } else { None }257}258259#[inline]260fn ty_32(&mut self, ty: Type) -> Option<Type> {261if ty.bits() == 32 { Some(ty) } else { None }262}263264#[inline]265fn ty_64(&mut self, ty: Type) -> Option<Type> {266if ty.bits() == 64 { Some(ty) } else { None }267}268269#[inline]270fn ty_128(&mut self, ty: Type) -> Option<Type> {271if ty.bits() == 128 { Some(ty) } else { None }272}273274#[inline]275fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {276if ty.bits() == 32 || ty.bits() == 64 {277Some(ty)278} else {279None280}281}282283#[inline]284fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {285if ty.bits() == 8 || ty.bits() == 16 {286Some(ty)287} else {288None289}290}291292#[inline]293fn ty_16_or_32(&mut self, ty: Type) -> Option<Type> {294if ty.bits() == 16 || ty.bits() == 32 {295Some(ty)296} else {297None298}299}300301#[inline]302fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {303match ty {304I8 | I16 | I32 => Some(ty),305_ => None,306}307}308309#[inline]310fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {311match ty {312I64 => Some(ty),313_ => None,314}315}316317#[inline]318fn ty_int_ref_16_to_64(&mut self, ty: Type) -> Option<Type> {319match ty {320I16 | I32 | I64 => Some(ty),321_ => None,322}323}324325#[inline]326fn ty_int(&mut self, ty: Type) -> Option<Type> {327ty.is_int().then(|| ty)328}329330#[inline]331fn ty_scalar(&mut self, ty: Type) -> Option<Type> {332if ty.lane_count() == 1 { Some(ty) } else { None }333}334335#[inline]336fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {337if ty.is_float() { Some(ty) } else { None }338}339340#[inline]341fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {342if ty.is_float() || ty.is_vector() {343Some(ty)344} else {345None346}347}348349fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {350if ty.is_vector() && ty.lane_type().is_float() {351Some(ty)352} else {353None354}355}356357#[inline]358fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {359if ty.is_vector() && !ty.lane_type().is_float() {360Some(ty)361} else {362None363}364}365366#[inline]367fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {368if ty.is_vector() && ty.bits() == 64 {369Some(ty)370} else {371None372}373}374375#[inline]376fn ty_vec64(&mut self, ty: Type) -> Option<Type> {377if ty.is_vector() && ty.bits() == 64 {378Some(ty)379} else {380None381}382}383384#[inline]385fn ty_vec128(&mut self, ty: Type) -> Option<Type> {386if ty.is_vector() && ty.bits() == 128 {387Some(ty)388} else {389None390}391}392393#[inline]394fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {395if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {396Some(ty)397} else {398None399}400}401402#[inline]403fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {404if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {405Some(ty)406} else {407None408}409}410411#[inline]412fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {413if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {414Some(ty)415} else {416None417}418}419420#[inline]421fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {422if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {423Some(ty)424} else {425None426}427}428429#[inline]430fn ty_addr64(&mut self, ty: Type) -> Option<Type> {431match ty {432I64 => Some(ty),433_ => None,434}435}436437#[inline]438fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {439imm.bits() as u64440}441442#[inline]443fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {444let x = i64::from(x);445let x = u64::try_from(x).ok()?;446if x.is_power_of_two() {447Some(x.trailing_zeros().into())448} else {449None450}451}452453#[inline]454fn u64_from_bool(&mut self, b: bool) -> u64 {455if b { u64::MAX } else { 0 }456}457458#[inline]459fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {460if ty.lane_count() > 1 {461Some((ty.lane_bits(), ty.lane_count()))462} else {463None464}465}466467#[inline]468fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {469if ty.is_dynamic_vector() {470Some((ty.lane_bits(), ty.min_lane_count()))471} else {472None473}474}475476#[inline]477fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {478if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {479Some(ty)480} else {481None482}483}484485#[inline]486fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {487if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {488Some(ty)489} else {490None491}492}493494fn u16_from_ieee16(&mut self, val: Ieee16) -> u16 {495val.bits()496}497498fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {499val.bits()500}501502fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {503val.bits()504}505506fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {507val508}509510fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {511if ty.lane_bits() == 32 && ty.lane_count() == 2 {512None513} else {514Some(ty)515}516}517518fn not_i64x2(&mut self, ty: Type) -> Option<()> {519if ty == I64X2 { None } else { Some(()) }520}521522fn trap_code_division_by_zero(&mut self) -> TrapCode {523TrapCode::INTEGER_DIVISION_BY_ZERO524}525526fn trap_code_integer_overflow(&mut self) -> TrapCode {527TrapCode::INTEGER_OVERFLOW528}529530fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {531TrapCode::BAD_CONVERSION_TO_INTEGER532}533534fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {535match val.bits() {5360 => None,537n => Some(n as u64),538}539}540541#[inline]542fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {543if (x as i32) >= 0 { Some(x) } else { None }544}545546#[inline]547fn imm64(&mut self, x: u64) -> Imm64 {548Imm64::new(x as i64)549}550551#[inline]552fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {553Imm64::new((x & self.ty_mask(ty)) as i64)554}555556#[inline]557fn offset32(&mut self, x: Offset32) -> i32 {558x.into()559}560561#[inline]562fn lane_type(&mut self, ty: Type) -> Type {563ty.lane_type()564}565566#[inline]567fn ty_half_lanes(&mut self, ty: Type) -> Option<Type> {568if ty.lane_count() == 1 {569None570} else {571ty.lane_type().by(ty.lane_count() / 2)572}573}574575#[inline]576fn ty_half_width(&mut self, ty: Type) -> Option<Type> {577ty.half_width()578}579580#[inline]581fn ty_equal(&mut self, lhs: Type, rhs: Type) -> bool {582lhs == rhs583}584585#[inline]586fn offset32_to_i32(&mut self, offset: Offset32) -> i32 {587offset.into()588}589590#[inline]591fn i32_to_offset32(&mut self, offset: i32) -> Offset32 {592Offset32::new(offset)593}594595#[inline]596fn mem_flags_trusted(&mut self) -> MemFlags {597MemFlags::trusted()598}599600#[inline]601fn little_or_native_endian(&mut self, flags: MemFlags) -> Option<MemFlags> {602match flags.explicit_endianness() {603Some(crate::ir::Endianness::Little) | None => Some(flags),604Some(crate::ir::Endianness::Big) => None,605}606}607608#[inline]609fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {610x.unsigned()611}612613#[inline]614fn signed_cond_code(&mut self, cc: &IntCC) -> Option<IntCC> {615match cc {616IntCC::Equal617| IntCC::UnsignedGreaterThanOrEqual618| IntCC::UnsignedGreaterThan619| IntCC::UnsignedLessThanOrEqual620| IntCC::UnsignedLessThan621| IntCC::NotEqual => None,622IntCC::SignedGreaterThanOrEqual623| IntCC::SignedGreaterThan624| IntCC::SignedLessThanOrEqual625| IntCC::SignedLessThan => Some(*cc),626}627}628629#[inline]630fn intcc_swap_args(&mut self, cc: &IntCC) -> IntCC {631cc.swap_args()632}633634#[inline]635fn intcc_complement(&mut self, cc: &IntCC) -> IntCC {636cc.complement()637}638639#[inline]640fn intcc_without_eq(&mut self, x: &IntCC) -> IntCC {641x.without_equal()642}643644#[inline]645fn floatcc_swap_args(&mut self, cc: &FloatCC) -> FloatCC {646cc.swap_args()647}648649#[inline]650fn floatcc_complement(&mut self, cc: &FloatCC) -> FloatCC {651cc.complement()652}653654fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {655match *cc {656FloatCC::Unordered657| FloatCC::UnorderedOrEqual658| FloatCC::UnorderedOrLessThan659| FloatCC::UnorderedOrLessThanOrEqual660| FloatCC::UnorderedOrGreaterThan661| FloatCC::UnorderedOrGreaterThanOrEqual => true,662_ => false,663}664}665666#[inline]667fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {668let [a, b] = *arr;669(a, b)670}671672#[inline]673fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {674[a, b]675}676677#[inline]678fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {679let [a, b, c] = *arr;680(a, b, c)681}682683#[inline]684fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {685[a, b, c]686}687688#[inline]689fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {690let [a, b] = *arr;691(a, b)692}693694#[inline]695fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {696[a, b]697}698699fn u128_replicated_u64(&mut self, val: u128) -> Option<u64> {700let low64 = val as u64 as u128;701if (low64 | (low64 << 64)) == val {702Some(low64 as u64)703} else {704None705}706}707708fn u64_replicated_u32(&mut self, val: u64) -> Option<u64> {709let low32 = val as u32 as u64;710if (low32 | (low32 << 32)) == val {711Some(low32)712} else {713None714}715}716717fn u32_replicated_u16(&mut self, val: u64) -> Option<u64> {718let val = val as u32;719let low16 = val as u16 as u32;720if (low16 | (low16 << 16)) == val {721Some(low16.into())722} else {723None724}725}726727fn u16_replicated_u8(&mut self, val: u64) -> Option<u8> {728let val = val as u16;729let low8 = val as u8 as u16;730if (low8 | (low8 << 8)) == val {731Some(low8 as u8)732} else {733None734}735}736737fn u128_low_bits(&mut self, val: u128) -> u64 {738val as u64739}740741fn u128_high_bits(&mut self, val: u128) -> u64 {742(val >> 64) as u64743}744745fn f16_min(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {746a.minimum(b).non_nan()747}748749fn f16_max(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {750a.maximum(b).non_nan()751}752753fn f16_neg(&mut self, n: Ieee16) -> Ieee16 {754-n755}756757fn f16_abs(&mut self, n: Ieee16) -> Ieee16 {758n.abs()759}760761fn f16_copysign(&mut self, a: Ieee16, b: Ieee16) -> Ieee16 {762a.copysign(b)763}764765fn f32_add(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {766(lhs + rhs).non_nan()767}768769fn f32_sub(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {770(lhs - rhs).non_nan()771}772773fn f32_mul(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {774(lhs * rhs).non_nan()775}776777fn f32_div(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {778(lhs / rhs).non_nan()779}780781fn f32_sqrt(&mut self, n: Ieee32) -> Option<Ieee32> {782n.sqrt().non_nan()783}784785fn f32_ceil(&mut self, n: Ieee32) -> Option<Ieee32> {786n.ceil().non_nan()787}788789fn f32_floor(&mut self, n: Ieee32) -> Option<Ieee32> {790n.floor().non_nan()791}792793fn f32_trunc(&mut self, n: Ieee32) -> Option<Ieee32> {794n.trunc().non_nan()795}796797fn f32_nearest(&mut self, n: Ieee32) -> Option<Ieee32> {798n.round_ties_even().non_nan()799}800801fn f32_min(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {802a.minimum(b).non_nan()803}804805fn f32_max(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {806a.maximum(b).non_nan()807}808809fn f32_neg(&mut self, n: Ieee32) -> Ieee32 {810-n811}812813fn f32_abs(&mut self, n: Ieee32) -> Ieee32 {814n.abs()815}816817fn f32_copysign(&mut self, a: Ieee32, b: Ieee32) -> Ieee32 {818a.copysign(b)819}820821fn f64_add(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {822(lhs + rhs).non_nan()823}824825fn f64_sub(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {826(lhs - rhs).non_nan()827}828829fn f64_mul(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {830(lhs * rhs).non_nan()831}832833fn f64_div(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {834(lhs / rhs).non_nan()835}836837fn f64_sqrt(&mut self, n: Ieee64) -> Option<Ieee64> {838n.sqrt().non_nan()839}840841fn f64_ceil(&mut self, n: Ieee64) -> Option<Ieee64> {842n.ceil().non_nan()843}844845fn f64_floor(&mut self, n: Ieee64) -> Option<Ieee64> {846n.floor().non_nan()847}848849fn f64_trunc(&mut self, n: Ieee64) -> Option<Ieee64> {850n.trunc().non_nan()851}852853fn f64_nearest(&mut self, n: Ieee64) -> Option<Ieee64> {854n.round_ties_even().non_nan()855}856857fn f64_min(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {858a.minimum(b).non_nan()859}860861fn f64_max(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {862a.maximum(b).non_nan()863}864865fn f64_neg(&mut self, n: Ieee64) -> Ieee64 {866-n867}868869fn f64_abs(&mut self, n: Ieee64) -> Ieee64 {870n.abs()871}872873fn f64_copysign(&mut self, a: Ieee64, b: Ieee64) -> Ieee64 {874a.copysign(b)875}876877fn f128_min(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {878a.minimum(b).non_nan()879}880881fn f128_max(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {882a.maximum(b).non_nan()883}884885fn f128_neg(&mut self, n: Ieee128) -> Ieee128 {886-n887}888889fn f128_abs(&mut self, n: Ieee128) -> Ieee128 {890n.abs()891}892893fn f128_copysign(&mut self, a: Ieee128, b: Ieee128) -> Ieee128 {894a.copysign(b)895}896897#[inline]898fn def_inst(&mut self, val: Value) -> Option<Inst> {899self.dfg().value_def(val).inst()900}901};902}903904905