Path: blob/main/cranelift/codegen/src/isle_prelude.rs
3069 views
//! Shared ISLE prelude implementation for optimization (mid-end) and1//! lowering (backend) ISLE environments.23/// Helper macro to define methods in `prelude.isle` within `impl Context for4/// ...` for each backend. These methods are shared amongst all backends.5#[macro_export]6#[doc(hidden)]7macro_rules! isle_common_prelude_methods {8() => {9isle_numerics_methods!();1011/// We don't have a way of making a `()` value in isle directly.12#[inline]13fn unit(&mut self) -> Unit {14()15}1617#[inline]18fn checked_add_with_type(&mut self, ty: Type, a: u64, b: u64) -> Option<u64> {19let c = a.checked_add(b)?;20let ty_mask = self.ty_mask(ty);21if (c & !ty_mask) == 0 { Some(c) } else { None }22}2324#[inline]25fn add_overflows_with_type(&mut self, ty: Type, a: u64, b: u64) -> bool {26self.checked_add_with_type(ty, a, b).is_none()27}2829#[inline]30fn imm64_clz(&mut self, ty: Type, a: Imm64) -> Imm64 {31let bits = ty.bits();32assert!(bits <= 64);33let clz_offset = 64 - bits;34let a_v: u64 = a.bits().cast_unsigned();35let lz = a_v.leading_zeros() - clz_offset;36Imm64::new(i64::from(lz))37}3839#[inline]40fn imm64_ctz(&mut self, ty: Type, a: Imm64) -> Imm64 {41let bits = ty.bits();42assert!(bits <= 64);43let a_v: u64 = a.bits().cast_unsigned();44if a_v == 0 {45// ctz(0) is defined to be the number of bits in the type.46Imm64::new(i64::from(bits))47} else {48let lz = a_v.trailing_zeros();49Imm64::new(i64::from(lz))50}51}5253#[inline]54fn imm64_sdiv(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {55// Sign extend `x` and `y`.56let type_width = ty.bits();57assert!(type_width <= 64);58let x = x.sign_extend_from_width(type_width).bits();59let y = y.sign_extend_from_width(type_width).bits();60let shift = 64 - type_width;6162// NB: We can't rely on `checked_div` to detect `ty::MIN / -1`63// (which overflows and should trap) because we are working with64// `i64` values here, and `i32::MIN != i64::MIN`, for65// example. Therefore, we have to explicitly check for this case66// ourselves.67let min = ((self.ty_smin(ty) as i64) << shift) >> shift;68if x == min && y == -1 {69return None;70}7172let result = x.checked_div(y)?;73Some(Imm64::new(result).mask_to_width(type_width))74}7576#[inline]77fn imm64_srem(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {78// Sign extend `x` and `y`.79let type_width = ty.bits();80assert!(type_width <= 64);81let x = x.sign_extend_from_width(type_width).bits();82let y = y.sign_extend_from_width(type_width).bits();8384// iN::min % -1 is defined as 0 in wasm so no need85// to check for it8687let result = x.checked_rem(y)?;88Some(Imm64::new(result).mask_to_width(type_width))89}9091#[inline]92fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {93// Mask off any excess shift bits.94let shift_mask = (ty.bits() - 1) as u64;95let y = (y.bits() as u64) & shift_mask;9697// Mask the result to `ty` bits.98let ty_mask = self.ty_mask(ty) as i64;99Imm64::new((x.bits() << y) & ty_mask)100}101102#[inline]103fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {104let ty_mask = self.ty_mask(ty);105let x = (x.bits() as u64) & ty_mask;106107// Mask off any excess shift bits.108let shift_mask = (ty.bits() - 1) as u64;109let y = (y.bits() as u64) & shift_mask;110111// NB: No need to mask off high bits because they are already zero.112Imm64::new((x >> y) as i64)113}114115#[inline]116fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {117// Sign extend `x` from `ty.bits()`-width to the full 64 bits.118let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);119let x = (x.bits() << shift) >> shift;120121// Mask off any excess shift bits.122let shift_mask = (ty.bits() - 1) as i64;123let y = y.bits() & shift_mask;124125// Mask off sign bits that aren't part of `ty`.126let ty_mask = self.ty_mask(ty) as i64;127Imm64::new((x >> y) & ty_mask)128}129130#[inline]131fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {132let shift_amt = core::cmp::max(0, 64 - ty.bits());133((x as i64) << shift_amt) >> shift_amt134}135136#[inline]137fn i64_sextend_imm64(&mut self, ty: Type, x: Imm64) -> i64 {138x.sign_extend_from_width(ty.bits()).bits()139}140141#[inline]142fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {143(x.bits() as u64) & self.ty_mask(ty)144}145146#[inline]147fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {148let ux = self.u64_uextend_imm64(ty, x);149let uy = self.u64_uextend_imm64(ty, y);150let sx = self.i64_sextend_imm64(ty, x);151let sy = self.i64_sextend_imm64(ty, y);152let result = match cc {153IntCC::Equal => ux == uy,154IntCC::NotEqual => ux != uy,155IntCC::UnsignedGreaterThanOrEqual => ux >= uy,156IntCC::UnsignedGreaterThan => ux > uy,157IntCC::UnsignedLessThanOrEqual => ux <= uy,158IntCC::UnsignedLessThan => ux < uy,159IntCC::SignedGreaterThanOrEqual => sx >= sy,160IntCC::SignedGreaterThan => sx > sy,161IntCC::SignedLessThanOrEqual => sx <= sy,162IntCC::SignedLessThan => sx < sy,163};164Imm64::new(result.into())165}166167#[inline]168fn ty_bits(&mut self, ty: Type) -> u8 {169use core::convert::TryInto;170ty.bits().try_into().unwrap()171}172173#[inline]174fn ty_bits_u16(&mut self, ty: Type) -> u16 {175ty.bits() as u16176}177178#[inline]179fn ty_bits_u64(&mut self, ty: Type) -> u64 {180ty.bits() as u64181}182183#[inline]184fn ty_bytes(&mut self, ty: Type) -> u16 {185u16::try_from(ty.bytes()).unwrap()186}187188#[inline]189fn ty_mask(&mut self, ty: Type) -> u64 {190let ty_bits = ty.bits();191debug_assert_ne!(ty_bits, 0);192let shift = 64_u64193.checked_sub(ty_bits.into())194.expect("unimplemented for > 64 bits");195u64::MAX >> shift196}197198#[inline]199fn ty_lane_mask(&mut self, ty: Type) -> u64 {200let ty_lane_count = ty.lane_count();201debug_assert_ne!(ty_lane_count, 0);202let shift = 64_u64203.checked_sub(ty_lane_count.into())204.expect("unimplemented for > 64 bits");205u64::MAX >> shift206}207208#[inline]209fn ty_lane_count(&mut self, ty: Type) -> u64 {210ty.lane_count() as u64211}212213#[inline]214fn ty_umin(&mut self, _ty: Type) -> u64 {2150216}217218#[inline]219fn ty_umax(&mut self, ty: Type) -> u64 {220self.ty_mask(ty)221}222223#[inline]224fn ty_smin(&mut self, ty: Type) -> u64 {225let ty_bits = ty.bits();226debug_assert_ne!(ty_bits, 0);227let shift = 64_u64228.checked_sub(ty_bits.into())229.expect("unimplemented for > 64 bits");230(i64::MIN as u64) >> shift231}232233#[inline]234fn ty_smax(&mut self, ty: Type) -> u64 {235let ty_bits = ty.bits();236debug_assert_ne!(ty_bits, 0);237let shift = 64_u64238.checked_sub(ty_bits.into())239.expect("unimplemented for > 64 bits");240(i64::MAX as u64) >> shift241}242243fn fits_in_16(&mut self, ty: Type) -> Option<Type> {244if ty.bits() <= 16 && !ty.is_dynamic_vector() {245Some(ty)246} else {247None248}249}250251#[inline]252fn fits_in_32(&mut self, ty: Type) -> Option<Type> {253if ty.bits() <= 32 && !ty.is_dynamic_vector() {254Some(ty)255} else {256None257}258}259260#[inline]261fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {262if !ty.is_vector() && !ty.is_dynamic_vector() {263None264} else if ty.lane_type().bits() <= 32 {265Some(ty)266} else {267None268}269}270271#[inline]272fn fits_in_64(&mut self, ty: Type) -> Option<Type> {273if ty.bits() <= 64 && !ty.is_dynamic_vector() {274Some(ty)275} else {276None277}278}279280#[inline]281fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {282if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() {283Some(ty)284} else {285None286}287}288289#[inline]290fn ty_int_ref_scalar_64_extract(&mut self, ty: Type) -> Option<Type> {291self.ty_int_ref_scalar_64(ty)292}293294#[inline]295fn ty_16(&mut self, ty: Type) -> Option<Type> {296if ty.bits() == 16 { Some(ty) } else { None }297}298299#[inline]300fn ty_32(&mut self, ty: Type) -> Option<Type> {301if ty.bits() == 32 { Some(ty) } else { None }302}303304#[inline]305fn ty_64(&mut self, ty: Type) -> Option<Type> {306if ty.bits() == 64 { Some(ty) } else { None }307}308309#[inline]310fn ty_128(&mut self, ty: Type) -> Option<Type> {311if ty.bits() == 128 { Some(ty) } else { None }312}313314#[inline]315fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {316if ty.bits() == 32 || ty.bits() == 64 {317Some(ty)318} else {319None320}321}322323#[inline]324fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {325if ty.bits() == 8 || ty.bits() == 16 {326Some(ty)327} else {328None329}330}331332#[inline]333fn ty_16_or_32(&mut self, ty: Type) -> Option<Type> {334if ty.bits() == 16 || ty.bits() == 32 {335Some(ty)336} else {337None338}339}340341#[inline]342fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {343match ty {344I8 | I16 | I32 => Some(ty),345_ => None,346}347}348349#[inline]350fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {351match ty {352I64 => Some(ty),353_ => None,354}355}356357#[inline]358fn ty_int_ref_16_to_64(&mut self, ty: Type) -> Option<Type> {359match ty {360I16 | I32 | I64 => Some(ty),361_ => None,362}363}364365#[inline]366fn ty_int(&mut self, ty: Type) -> Option<Type> {367ty.is_int().then(|| ty)368}369370#[inline]371fn ty_scalar(&mut self, ty: Type) -> Option<Type> {372if ty.lane_count() == 1 { Some(ty) } else { None }373}374375#[inline]376fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {377if ty.is_float() { Some(ty) } else { None }378}379380#[inline]381fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {382if ty.is_float() || ty.is_vector() {383Some(ty)384} else {385None386}387}388389fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {390if ty.is_vector() && ty.lane_type().is_float() {391Some(ty)392} else {393None394}395}396397#[inline]398fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {399if ty.is_vector() && !ty.lane_type().is_float() {400Some(ty)401} else {402None403}404}405406#[inline]407fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {408if ty.is_vector() && ty.bits() == 64 {409Some(ty)410} else {411None412}413}414415#[inline]416fn ty_vec64(&mut self, ty: Type) -> Option<Type> {417if ty.is_vector() && ty.bits() == 64 {418Some(ty)419} else {420None421}422}423424#[inline]425fn ty_vec128(&mut self, ty: Type) -> Option<Type> {426if ty.is_vector() && ty.bits() == 128 {427Some(ty)428} else {429None430}431}432433#[inline]434fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {435if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {436Some(ty)437} else {438None439}440}441442#[inline]443fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {444if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {445Some(ty)446} else {447None448}449}450451#[inline]452fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {453if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {454Some(ty)455} else {456None457}458}459460#[inline]461fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {462if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {463Some(ty)464} else {465None466}467}468469#[inline]470fn ty_addr64(&mut self, ty: Type) -> Option<Type> {471match ty {472I64 => Some(ty),473_ => None,474}475}476477#[inline]478fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {479imm.bits() as u64480}481482#[inline]483fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {484let x = i64::from(x);485let x = u64::try_from(x).ok()?;486if x.is_power_of_two() {487Some(x.trailing_zeros().into())488} else {489None490}491}492493#[inline]494fn u64_from_bool(&mut self, b: bool) -> u64 {495if b { u64::MAX } else { 0 }496}497498#[inline]499fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {500if ty.lane_count() > 1 {501Some((ty.lane_bits(), ty.lane_count()))502} else {503None504}505}506507#[inline]508fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {509if ty.is_dynamic_vector() {510Some((ty.lane_bits(), ty.min_lane_count()))511} else {512None513}514}515516#[inline]517fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {518if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {519Some(ty)520} else {521None522}523}524525#[inline]526fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {527if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {528Some(ty)529} else {530None531}532}533534fn u16_from_ieee16(&mut self, val: Ieee16) -> u16 {535val.bits()536}537538fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {539val.bits()540}541542fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {543val.bits()544}545546fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {547val548}549550fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {551if ty.lane_bits() == 32 && ty.lane_count() == 2 {552None553} else {554Some(ty)555}556}557558fn not_i64x2(&mut self, ty: Type) -> Option<()> {559if ty == I64X2 { None } else { Some(()) }560}561562fn trap_code_division_by_zero(&mut self) -> TrapCode {563TrapCode::INTEGER_DIVISION_BY_ZERO564}565566fn trap_code_integer_overflow(&mut self) -> TrapCode {567TrapCode::INTEGER_OVERFLOW568}569570fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {571TrapCode::BAD_CONVERSION_TO_INTEGER572}573574fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {575match val.bits() {5760 => None,577n => Some(n as u64),578}579}580581#[inline]582fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {583if (x as i32) >= 0 { Some(x) } else { None }584}585586#[inline]587fn imm64(&mut self, x: u64) -> Imm64 {588Imm64::new(x as i64)589}590591#[inline]592fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {593Imm64::new((x & self.ty_mask(ty)) as i64)594}595596#[inline]597fn offset32(&mut self, x: Offset32) -> i32 {598x.into()599}600601#[inline]602fn lane_type(&mut self, ty: Type) -> Type {603ty.lane_type()604}605606#[inline]607fn ty_half_lanes(&mut self, ty: Type) -> Option<Type> {608if ty.lane_count() == 1 {609None610} else {611ty.lane_type().by(ty.lane_count() / 2)612}613}614615#[inline]616fn ty_half_width(&mut self, ty: Type) -> Option<Type> {617ty.half_width()618}619620#[inline]621fn ty_equal(&mut self, lhs: Type, rhs: Type) -> bool {622lhs == rhs623}624625#[inline]626fn offset32_to_i32(&mut self, offset: Offset32) -> i32 {627offset.into()628}629630#[inline]631fn i32_to_offset32(&mut self, offset: i32) -> Offset32 {632Offset32::new(offset)633}634635#[inline]636fn mem_flags_trusted(&mut self) -> MemFlags {637MemFlags::trusted()638}639640#[inline]641fn little_or_native_endian(&mut self, flags: MemFlags) -> Option<MemFlags> {642match flags.explicit_endianness() {643Some(crate::ir::Endianness::Little) | None => Some(flags),644Some(crate::ir::Endianness::Big) => None,645}646}647648#[inline]649fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {650x.unsigned()651}652653#[inline]654fn signed_cond_code(&mut self, cc: &IntCC) -> Option<IntCC> {655match cc {656IntCC::Equal657| IntCC::UnsignedGreaterThanOrEqual658| IntCC::UnsignedGreaterThan659| IntCC::UnsignedLessThanOrEqual660| IntCC::UnsignedLessThan661| IntCC::NotEqual => None,662IntCC::SignedGreaterThanOrEqual663| IntCC::SignedGreaterThan664| IntCC::SignedLessThanOrEqual665| IntCC::SignedLessThan => Some(*cc),666}667}668669#[inline]670fn intcc_swap_args(&mut self, cc: &IntCC) -> IntCC {671cc.swap_args()672}673674#[inline]675fn intcc_complement(&mut self, cc: &IntCC) -> IntCC {676cc.complement()677}678679#[inline]680fn intcc_without_eq(&mut self, x: &IntCC) -> IntCC {681x.without_equal()682}683684#[inline]685fn floatcc_swap_args(&mut self, cc: &FloatCC) -> FloatCC {686cc.swap_args()687}688689#[inline]690fn floatcc_complement(&mut self, cc: &FloatCC) -> FloatCC {691cc.complement()692}693694fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {695match *cc {696FloatCC::Unordered697| FloatCC::UnorderedOrEqual698| FloatCC::UnorderedOrLessThan699| FloatCC::UnorderedOrLessThanOrEqual700| FloatCC::UnorderedOrGreaterThan701| FloatCC::UnorderedOrGreaterThanOrEqual => true,702_ => false,703}704}705706#[inline]707fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {708let [a, b] = *arr;709(a, b)710}711712#[inline]713fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {714[a, b]715}716717#[inline]718fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {719let [a, b, c] = *arr;720(a, b, c)721}722723#[inline]724fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {725[a, b, c]726}727728#[inline]729fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {730let [a, b] = *arr;731(a, b)732}733734#[inline]735fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {736[a, b]737}738739fn u128_replicated_u64(&mut self, val: u128) -> Option<u64> {740let low64 = val as u64 as u128;741if (low64 | (low64 << 64)) == val {742Some(low64 as u64)743} else {744None745}746}747748fn u64_replicated_u32(&mut self, val: u64) -> Option<u64> {749let low32 = val as u32 as u64;750if (low32 | (low32 << 32)) == val {751Some(low32)752} else {753None754}755}756757fn u32_replicated_u16(&mut self, val: u64) -> Option<u64> {758let val = val as u32;759let low16 = val as u16 as u32;760if (low16 | (low16 << 16)) == val {761Some(low16.into())762} else {763None764}765}766767fn u16_replicated_u8(&mut self, val: u64) -> Option<u8> {768let val = val as u16;769let low8 = val as u8 as u16;770if (low8 | (low8 << 8)) == val {771Some(low8 as u8)772} else {773None774}775}776777fn u128_low_bits(&mut self, val: u128) -> u64 {778val as u64779}780781fn u128_high_bits(&mut self, val: u128) -> u64 {782(val >> 64) as u64783}784785fn f16_min(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {786a.minimum(b).non_nan()787}788789fn f16_max(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {790a.maximum(b).non_nan()791}792793fn f16_neg(&mut self, n: Ieee16) -> Ieee16 {794-n795}796797fn f16_abs(&mut self, n: Ieee16) -> Ieee16 {798n.abs()799}800801fn f16_copysign(&mut self, a: Ieee16, b: Ieee16) -> Ieee16 {802a.copysign(b)803}804805fn f32_add(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {806(lhs + rhs).non_nan()807}808809fn f32_sub(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {810(lhs - rhs).non_nan()811}812813fn f32_mul(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {814(lhs * rhs).non_nan()815}816817fn f32_div(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {818(lhs / rhs).non_nan()819}820821fn f32_sqrt(&mut self, n: Ieee32) -> Option<Ieee32> {822n.sqrt().non_nan()823}824825fn f32_ceil(&mut self, n: Ieee32) -> Option<Ieee32> {826n.ceil().non_nan()827}828829fn f32_floor(&mut self, n: Ieee32) -> Option<Ieee32> {830n.floor().non_nan()831}832833fn f32_trunc(&mut self, n: Ieee32) -> Option<Ieee32> {834n.trunc().non_nan()835}836837fn f32_nearest(&mut self, n: Ieee32) -> Option<Ieee32> {838n.round_ties_even().non_nan()839}840841fn f32_min(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {842a.minimum(b).non_nan()843}844845fn f32_max(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {846a.maximum(b).non_nan()847}848849fn f32_neg(&mut self, n: Ieee32) -> Ieee32 {850-n851}852853fn f32_abs(&mut self, n: Ieee32) -> Ieee32 {854n.abs()855}856857fn f32_copysign(&mut self, a: Ieee32, b: Ieee32) -> Ieee32 {858a.copysign(b)859}860861fn f64_add(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {862(lhs + rhs).non_nan()863}864865fn f64_sub(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {866(lhs - rhs).non_nan()867}868869fn f64_mul(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {870(lhs * rhs).non_nan()871}872873fn f64_div(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {874(lhs / rhs).non_nan()875}876877fn f64_sqrt(&mut self, n: Ieee64) -> Option<Ieee64> {878n.sqrt().non_nan()879}880881fn f64_ceil(&mut self, n: Ieee64) -> Option<Ieee64> {882n.ceil().non_nan()883}884885fn f64_floor(&mut self, n: Ieee64) -> Option<Ieee64> {886n.floor().non_nan()887}888889fn f64_trunc(&mut self, n: Ieee64) -> Option<Ieee64> {890n.trunc().non_nan()891}892893fn f64_nearest(&mut self, n: Ieee64) -> Option<Ieee64> {894n.round_ties_even().non_nan()895}896897fn f64_min(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {898a.minimum(b).non_nan()899}900901fn f64_max(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {902a.maximum(b).non_nan()903}904905fn f64_neg(&mut self, n: Ieee64) -> Ieee64 {906-n907}908909fn f64_abs(&mut self, n: Ieee64) -> Ieee64 {910n.abs()911}912913fn f64_copysign(&mut self, a: Ieee64, b: Ieee64) -> Ieee64 {914a.copysign(b)915}916917fn f128_min(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {918a.minimum(b).non_nan()919}920921fn f128_max(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {922a.maximum(b).non_nan()923}924925fn f128_neg(&mut self, n: Ieee128) -> Ieee128 {926-n927}928929fn f128_abs(&mut self, n: Ieee128) -> Ieee128 {930n.abs()931}932933fn f128_copysign(&mut self, a: Ieee128, b: Ieee128) -> Ieee128 {934a.copysign(b)935}936937#[inline]938fn def_inst(&mut self, val: Value) -> Option<Inst> {939self.dfg().value_def(val).inst()940}941};942}943944945