Path: blob/main/contrib/arm-optimized-routines/math/aarch64/sve/pow.c
48375 views
/*1* Double-precision SVE pow(x, y) function.2*3* Copyright (c) 2022-2025, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "sv_math.h"8#include "test_sig.h"9#include "test_defs.h"1011/* This version share a similar algorithm as AOR scalar pow.1213The core computation consists in computing pow(x, y) as1415exp (y * log (x)).1617The algorithms for exp and log are very similar to scalar exp and log.18The log relies on table lookup for 3 variables and an order 8 polynomial.19It returns a high and a low contribution that are then passed to the exp,20to minimise the loss of accuracy in both routines.21The exp is based on 8-bit table lookup for scale and order-4 polynomial.22The SVE algorithm drops the tail in the exp computation at the price of23a lower accuracy, slightly above 1ULP.24The SVE algorithm also drops the special treatement of small (< 2^-65) and25large (> 2^63) finite values of |y|, as they only affect non-round to26nearest modes.2728Maximum measured error is 1.04 ULPs:29SV_NAME_D2 (pow) (0x1.3d2d45bc848acp+63, -0x1.a48a38b40cd43p-12)30got 0x1.f7116284221fcp-131want 0x1.f7116284221fdp-1. */3233/* Data is defined in v_pow_log_data.c. */34#define N_LOG (1 << V_POW_LOG_TABLE_BITS)35#define Off 0x3fe69555000000003637/* Data is defined in v_pow_exp_data.c. */38#define N_EXP (1 << V_POW_EXP_TABLE_BITS)39#define SignBias (0x800 << V_POW_EXP_TABLE_BITS)40#define SmallExp 0x3c9 /* top12(0x1p-54). */41#define BigExp 0x408 /* top12(512.). */42#define ThresExp 0x03f /* BigExp - SmallExp. */43#define HugeExp 0x409 /* top12(1024.). */4445/* Constants associated with pow. */46#define SmallBoundX 0x1p-12647#define SmallPowX 0x001 /* top12(0x1p-126). */48#define BigPowX 0x7ff /* top12(INFINITY). */49#define ThresPowX 0x7fe /* BigPowX - SmallPowX. */50#define SmallPowY 0x3be /* top12(0x1.e7b6p-65). */51#define BigPowY 0x43e /* top12(0x1.749p62). */52#define ThresPowY 0x080 /* BigPowY - SmallPowY. */5354static const struct data55{56double log_c0, log_c2, log_c4, log_c6, ln2_hi, ln2_lo;57double log_c1, log_c3, log_c5, off;58double n_over_ln2, exp_c2, ln2_over_n_hi, ln2_over_n_lo;59double exp_c0, exp_c1;60} data = {61.log_c0 = -0x1p-1,62.log_c1 = -0x1.555555555556p-1,63.log_c2 = 0x1.0000000000006p-1,64.log_c3 = 0x1.999999959554ep-1,65.log_c4 = -0x1.555555529a47ap-1,66.log_c5 = -0x1.2495b9b4845e9p0,67.log_c6 = 0x1.0002b8b263fc3p0,68.off = Off,69.exp_c0 = 0x1.fffffffffffd4p-2,70.exp_c1 = 0x1.5555571d6ef9p-3,71.exp_c2 = 0x1.5555576a5adcep-5,72.ln2_hi = 0x1.62e42fefa3800p-1,73.ln2_lo = 0x1.ef35793c76730p-45,74.n_over_ln2 = 0x1.71547652b82fep0 * N_EXP,75.ln2_over_n_hi = 0x1.62e42fefc0000p-9,76.ln2_over_n_lo = -0x1.c610ca86c3899p-45,77};7879/* Check if x is an integer. */80static inline svbool_t81sv_isint (svbool_t pg, svfloat64_t x)82{83return svcmpeq (pg, svrintz_z (pg, x), x);84}8586/* Check if x is real not integer valued. */87static inline svbool_t88sv_isnotint (svbool_t pg, svfloat64_t x)89{90return svcmpne (pg, svrintz_z (pg, x), x);91}9293/* Check if x is an odd integer. */94static inline svbool_t95sv_isodd (svbool_t pg, svfloat64_t x)96{97svfloat64_t y = svmul_x (svptrue_b64 (), x, 0.5);98return sv_isnotint (pg, y);99}100101/* Returns 0 if not int, 1 if odd int, 2 if even int. The argument is102the bit representation of a non-zero finite floating-point value. */103static inline int104checkint (uint64_t iy)105{106int e = iy >> 52 & 0x7ff;107if (e < 0x3ff)108return 0;109if (e > 0x3ff + 52)110return 2;111if (iy & ((1ULL << (0x3ff + 52 - e)) - 1))112return 0;113if (iy & (1ULL << (0x3ff + 52 - e)))114return 1;115return 2;116}117118/* Top 12 bits (sign and exponent of each double float lane). */119static inline svuint64_t120sv_top12 (svfloat64_t x)121{122return svlsr_x (svptrue_b64 (), svreinterpret_u64 (x), 52);123}124125/* Returns 1 if input is the bit representation of 0, infinity or nan. */126static inline int127zeroinfnan (uint64_t i)128{129return 2 * i - 1 >= 2 * asuint64 (INFINITY) - 1;130}131132/* Returns 1 if input is the bit representation of 0, infinity or nan. */133static inline svbool_t134sv_zeroinfnan (svbool_t pg, svuint64_t i)135{136return svcmpge (pg, svsub_x (pg, svadd_x (pg, i, i), 1),1372 * asuint64 (INFINITY) - 1);138}139140/* Handle cases that may overflow or underflow when computing the result that141is scale*(1+TMP) without intermediate rounding. The bit representation of142scale is in SBITS, however it has a computed exponent that may have143overflown into the sign bit so that needs to be adjusted before using it as144a double. (int32_t)KI is the k used in the argument reduction and exponent145adjustment of scale, positive k here means the result may overflow and146negative k means the result may underflow. */147static inline double148specialcase (double tmp, uint64_t sbits, uint64_t ki)149{150double scale;151if ((ki & 0x80000000) == 0)152{153/* k > 0, the exponent of scale might have overflowed by <= 460. */154sbits -= 1009ull << 52;155scale = asdouble (sbits);156return 0x1p1009 * (scale + scale * tmp);157}158/* k < 0, need special care in the subnormal range. */159sbits += 1022ull << 52;160/* Note: sbits is signed scale. */161scale = asdouble (sbits);162double y = scale + scale * tmp;163return 0x1p-1022 * y;164}165166/* Scalar fallback for special cases of SVE pow's exp. */167static inline svfloat64_t168sv_call_specialcase (svfloat64_t x1, svuint64_t u1, svuint64_t u2,169svfloat64_t y, svbool_t cmp)170{171svbool_t p = svpfirst (cmp, svpfalse ());172while (svptest_any (cmp, p))173{174double sx1 = svclastb (p, 0, x1);175uint64_t su1 = svclastb (p, 0, u1);176uint64_t su2 = svclastb (p, 0, u2);177double elem = specialcase (sx1, su1, su2);178svfloat64_t y2 = sv_f64 (elem);179y = svsel (p, y2, y);180p = svpnext_b64 (cmp, p);181}182return y;183}184185/* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about186additional 15 bits precision. IX is the bit representation of x, but187normalized in the subnormal range using the sign bit for the exponent. */188static inline svfloat64_t189sv_log_inline (svbool_t pg, svuint64_t ix, svfloat64_t *tail,190const struct data *d)191{192/* x = 2^k z; where z is in range [Off,2*Off) and exact.193The range is split into N subintervals.194The ith subinterval contains z and c is near its center. */195svuint64_t tmp = svsub_x (pg, ix, d->off);196svuint64_t i = svand_x (pg, svlsr_x (pg, tmp, 52 - V_POW_LOG_TABLE_BITS),197sv_u64 (N_LOG - 1));198svint64_t k = svasr_x (pg, svreinterpret_s64 (tmp), 52);199svuint64_t iz = svsub_x (pg, ix, svlsl_x (pg, svreinterpret_u64 (k), 52));200svfloat64_t z = svreinterpret_f64 (iz);201svfloat64_t kd = svcvt_f64_x (pg, k);202203/* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */204/* SVE lookup requires 3 separate lookup tables, as opposed to scalar version205that uses array of structures. We also do the lookup earlier in the code206to make sure it finishes as early as possible. */207svfloat64_t invc = svld1_gather_index (pg, __v_pow_log_data.invc, i);208svfloat64_t logc = svld1_gather_index (pg, __v_pow_log_data.logc, i);209svfloat64_t logctail = svld1_gather_index (pg, __v_pow_log_data.logctail, i);210211/* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and212|z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */213svfloat64_t r = svmad_x (pg, z, invc, -1.0);214/* k*Ln2 + log(c) + r. */215216svfloat64_t ln2_hilo = svld1rq_f64 (svptrue_b64 (), &d->ln2_hi);217svfloat64_t t1 = svmla_lane_f64 (logc, kd, ln2_hilo, 0);218svfloat64_t t2 = svadd_x (pg, t1, r);219svfloat64_t lo1 = svmla_lane_f64 (logctail, kd, ln2_hilo, 1);220svfloat64_t lo2 = svadd_x (pg, svsub_x (pg, t1, t2), r);221222/* Evaluation is optimized assuming superscalar pipelined execution. */223224svfloat64_t log_c02 = svld1rq_f64 (svptrue_b64 (), &d->log_c0);225svfloat64_t ar = svmul_lane_f64 (r, log_c02, 0);226svfloat64_t ar2 = svmul_x (svptrue_b64 (), r, ar);227svfloat64_t ar3 = svmul_x (svptrue_b64 (), r, ar2);228/* k*Ln2 + log(c) + r + A[0]*r*r. */229svfloat64_t hi = svadd_x (pg, t2, ar2);230svfloat64_t lo3 = svmls_x (pg, ar2, ar, r);231svfloat64_t lo4 = svadd_x (pg, svsub_x (pg, t2, hi), ar2);232/* p = log1p(r) - r - A[0]*r*r. */233/* p = (ar3 * (A[1] + r * A[2] + ar2 * (A[3] + r * A[4] + ar2 * (A[5] + r *234A[6])))). */235236svfloat64_t log_c46 = svld1rq_f64 (svptrue_b64 (), &d->log_c4);237svfloat64_t a56 = svmla_lane_f64 (sv_f64 (d->log_c5), r, log_c46, 1);238svfloat64_t a34 = svmla_lane_f64 (sv_f64 (d->log_c3), r, log_c46, 0);239svfloat64_t a12 = svmla_lane_f64 (sv_f64 (d->log_c1), r, log_c02, 1);240svfloat64_t p = svmla_x (pg, a34, ar2, a56);241p = svmla_x (pg, a12, ar2, p);242p = svmul_x (svptrue_b64 (), ar3, p);243svfloat64_t lo = svadd_x (244pg, svadd_x (pg, svsub_x (pg, svadd_x (pg, lo1, lo2), lo3), lo4), p);245svfloat64_t y = svadd_x (pg, hi, lo);246*tail = svadd_x (pg, svsub_x (pg, hi, y), lo);247return y;248}249250static inline svfloat64_t251sv_exp_core (svbool_t pg, svfloat64_t x, svfloat64_t xtail,252svuint64_t sign_bias, svfloat64_t *tmp, svuint64_t *sbits,253svuint64_t *ki, const struct data *d)254{255/* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */256/* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */257svfloat64_t n_over_ln2_and_c2 = svld1rq_f64 (svptrue_b64 (), &d->n_over_ln2);258svfloat64_t z = svmul_lane_f64 (x, n_over_ln2_and_c2, 0);259/* z - kd is in [-1, 1] in non-nearest rounding modes. */260svfloat64_t kd = svrinta_x (pg, z);261*ki = svreinterpret_u64 (svcvt_s64_x (pg, kd));262263svfloat64_t ln2_over_n_hilo264= svld1rq_f64 (svptrue_b64 (), &d->ln2_over_n_hi);265svfloat64_t r = x;266r = svmls_lane_f64 (r, kd, ln2_over_n_hilo, 0);267r = svmls_lane_f64 (r, kd, ln2_over_n_hilo, 1);268/* The code assumes 2^-200 < |xtail| < 2^-8/N. */269r = svadd_x (pg, r, xtail);270/* 2^(k/N) ~= scale. */271svuint64_t idx = svand_x (pg, *ki, N_EXP - 1);272svuint64_t top273= svlsl_x (pg, svadd_x (pg, *ki, sign_bias), 52 - V_POW_EXP_TABLE_BITS);274/* This is only a valid scale when -1023*N < k < 1024*N. */275*sbits = svld1_gather_index (pg, __v_pow_exp_data.sbits, idx);276*sbits = svadd_x (pg, *sbits, top);277/* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */278svfloat64_t r2 = svmul_x (svptrue_b64 (), r, r);279*tmp = svmla_lane_f64 (sv_f64 (d->exp_c1), r, n_over_ln2_and_c2, 1);280*tmp = svmla_x (pg, sv_f64 (d->exp_c0), r, *tmp);281*tmp = svmla_x (pg, r, r2, *tmp);282svfloat64_t scale = svreinterpret_f64 (*sbits);283/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there284is no spurious underflow here even without fma. */285z = svmla_x (pg, scale, scale, *tmp);286return z;287}288289/* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.290The sign_bias argument is SignBias or 0 and sets the sign to -1 or 1. */291static inline svfloat64_t292sv_exp_inline (svbool_t pg, svfloat64_t x, svfloat64_t xtail,293svuint64_t sign_bias, const struct data *d)294{295/* 3 types of special cases: tiny (uflow and spurious uflow), huge (oflow)296and other cases of large values of x (scale * (1 + TMP) oflow). */297svuint64_t abstop = svand_x (pg, sv_top12 (x), 0x7ff);298/* |x| is large (|x| >= 512) or tiny (|x| <= 0x1p-54). */299svbool_t uoflow = svcmpge (pg, svsub_x (pg, abstop, SmallExp), ThresExp);300301svfloat64_t tmp;302svuint64_t sbits, ki;303if (unlikely (svptest_any (pg, uoflow)))304{305svfloat64_t z306= sv_exp_core (pg, x, xtail, sign_bias, &tmp, &sbits, &ki, d);307308/* |x| is tiny (|x| <= 0x1p-54). */309svbool_t uflow310= svcmpge (pg, svsub_x (pg, abstop, SmallExp), 0x80000000);311uflow = svand_z (pg, uoflow, uflow);312/* |x| is huge (|x| >= 1024). */313svbool_t oflow = svcmpge (pg, abstop, HugeExp);314oflow = svand_z (pg, uoflow, svbic_z (pg, oflow, uflow));315316/* For large |x| values (512 < |x| < 1024) scale * (1 + TMP) can overflow317or underflow. */318svbool_t special = svbic_z (pg, uoflow, svorr_z (pg, uflow, oflow));319320/* Update result with special and large cases. */321z = sv_call_specialcase (tmp, sbits, ki, z, special);322323/* Handle underflow and overflow. */324svbool_t x_is_neg = svcmplt (pg, x, 0);325svuint64_t sign_mask326= svlsl_x (pg, sign_bias, 52 - V_POW_EXP_TABLE_BITS);327svfloat64_t res_uoflow328= svsel (x_is_neg, sv_f64 (0.0), sv_f64 (INFINITY));329res_uoflow = svreinterpret_f64 (330svorr_x (pg, svreinterpret_u64 (res_uoflow), sign_mask));331/* Avoid spurious underflow for tiny x. */332svfloat64_t res_spurious_uflow333= svreinterpret_f64 (svorr_x (pg, sign_mask, 0x3ff0000000000000));334335z = svsel (oflow, res_uoflow, z);336z = svsel (uflow, res_spurious_uflow, z);337return z;338}339340return sv_exp_core (pg, x, xtail, sign_bias, &tmp, &sbits, &ki, d);341}342343static inline double344pow_sc (double x, double y)345{346uint64_t ix = asuint64 (x);347uint64_t iy = asuint64 (y);348/* Special cases: |x| or |y| is 0, inf or nan. */349if (unlikely (zeroinfnan (iy)))350{351if (2 * iy == 0)352return issignaling_inline (x) ? x + y : 1.0;353if (ix == asuint64 (1.0))354return issignaling_inline (y) ? x + y : 1.0;355if (2 * ix > 2 * asuint64 (INFINITY) || 2 * iy > 2 * asuint64 (INFINITY))356return x + y;357if (2 * ix == 2 * asuint64 (1.0))358return 1.0;359if ((2 * ix < 2 * asuint64 (1.0)) == !(iy >> 63))360return 0.0; /* |x|<1 && y==inf or |x|>1 && y==-inf. */361return y * y;362}363if (unlikely (zeroinfnan (ix)))364{365double_t x2 = x * x;366if (ix >> 63 && checkint (iy) == 1)367x2 = -x2;368return (iy >> 63) ? 1 / x2 : x2;369}370return x;371}372373svfloat64_t SV_NAME_D2 (pow) (svfloat64_t x, svfloat64_t y, const svbool_t pg)374{375const struct data *d = ptr_barrier (&data);376377/* This preamble handles special case conditions used in the final scalar378fallbacks. It also updates ix and sign_bias, that are used in the core379computation too, i.e., exp( y * log (x) ). */380svuint64_t vix0 = svreinterpret_u64 (x);381svuint64_t viy0 = svreinterpret_u64 (y);382383/* Negative x cases. */384svbool_t xisneg = svcmplt (pg, x, 0);385386/* Set sign_bias and ix depending on sign of x and nature of y. */387svbool_t yint_or_xpos = pg;388svuint64_t sign_bias = sv_u64 (0);389svuint64_t vix = vix0;390if (unlikely (svptest_any (pg, xisneg)))391{392/* Determine nature of y. */393yint_or_xpos = sv_isint (xisneg, y);394svbool_t yisodd_xisneg = sv_isodd (xisneg, y);395/* ix set to abs(ix) if y is integer. */396vix = svand_m (yint_or_xpos, vix0, 0x7fffffffffffffff);397/* Set to SignBias if x is negative and y is odd. */398sign_bias = svsel (yisodd_xisneg, sv_u64 (SignBias), sv_u64 (0));399}400401/* Small cases of x: |x| < 0x1p-126. */402svbool_t xsmall = svaclt (yint_or_xpos, x, SmallBoundX);403if (unlikely (svptest_any (yint_or_xpos, xsmall)))404{405/* Normalize subnormal x so exponent becomes negative. */406svuint64_t vtopx = svlsr_x (svptrue_b64 (), vix, 52);407svbool_t topx_is_null = svcmpeq (xsmall, vtopx, 0);408409svuint64_t vix_norm = svreinterpret_u64 (svmul_m (xsmall, x, 0x1p52));410vix_norm = svand_m (xsmall, vix_norm, 0x7fffffffffffffff);411vix_norm = svsub_m (xsmall, vix_norm, 52ULL << 52);412vix = svsel (topx_is_null, vix_norm, vix);413}414415/* y_hi = log(ix, &y_lo). */416svfloat64_t vlo;417svfloat64_t vhi = sv_log_inline (yint_or_xpos, vix, &vlo, d);418419/* z = exp(y_hi, y_lo, sign_bias). */420svfloat64_t vehi = svmul_x (svptrue_b64 (), y, vhi);421svfloat64_t vemi = svmls_x (yint_or_xpos, vehi, y, vhi);422svfloat64_t velo = svnmls_x (yint_or_xpos, vemi, y, vlo);423svfloat64_t vz = sv_exp_inline (yint_or_xpos, vehi, velo, sign_bias, d);424425/* Cases of finite y and finite negative x. */426vz = svsel (yint_or_xpos, vz, sv_f64 (__builtin_nan ("")));427428/* Special cases of x or y: zero, inf and nan. */429svbool_t xspecial = sv_zeroinfnan (svptrue_b64 (), vix0);430svbool_t yspecial = sv_zeroinfnan (svptrue_b64 (), viy0);431svbool_t special = svorr_z (svptrue_b64 (), xspecial, yspecial);432433/* Cases of zero/inf/nan x or y. */434if (unlikely (svptest_any (svptrue_b64 (), special)))435vz = sv_call2_f64 (pow_sc, x, y, vz, special);436437return vz;438}439440TEST_SIG (SV, D, 2, pow)441TEST_ULP (SV_NAME_D2 (pow), 0.55)442TEST_DISABLE_FENV (SV_NAME_D2 (pow))443/* Wide intervals spanning the whole domain but shared between x and y. */444#define SV_POW_INTERVAL2(xlo, xhi, ylo, yhi, n) \445TEST_INTERVAL2 (SV_NAME_D2 (pow), xlo, xhi, ylo, yhi, n) \446TEST_INTERVAL2 (SV_NAME_D2 (pow), xlo, xhi, -ylo, -yhi, n) \447TEST_INTERVAL2 (SV_NAME_D2 (pow), -xlo, -xhi, ylo, yhi, n) \448TEST_INTERVAL2 (SV_NAME_D2 (pow), -xlo, -xhi, -ylo, -yhi, n)449#define EXPAND(str) str##000000000450#define SHL52(str) EXPAND (str)451SV_POW_INTERVAL2 (0, SHL52 (SmallPowX), 0, inf, 40000)452SV_POW_INTERVAL2 (SHL52 (SmallPowX), SHL52 (BigPowX), 0, inf, 40000)453SV_POW_INTERVAL2 (SHL52 (BigPowX), inf, 0, inf, 40000)454SV_POW_INTERVAL2 (0, inf, 0, SHL52 (SmallPowY), 40000)455SV_POW_INTERVAL2 (0, inf, SHL52 (SmallPowY), SHL52 (BigPowY), 40000)456SV_POW_INTERVAL2 (0, inf, SHL52 (BigPowY), inf, 40000)457SV_POW_INTERVAL2 (0, inf, 0, inf, 1000)458/* x~1 or y~1. */459SV_POW_INTERVAL2 (0x1p-1, 0x1p1, 0x1p-10, 0x1p10, 10000)460SV_POW_INTERVAL2 (0x1.ep-1, 0x1.1p0, 0x1p8, 0x1p16, 10000)461SV_POW_INTERVAL2 (0x1p-500, 0x1p500, 0x1p-1, 0x1p1, 10000)462/* around estimated argmaxs of ULP error. */463SV_POW_INTERVAL2 (0x1p-300, 0x1p-200, 0x1p-20, 0x1p-10, 10000)464SV_POW_INTERVAL2 (0x1p50, 0x1p100, 0x1p-20, 0x1p-10, 10000)465/* x is negative, y is odd or even integer, or y is real not integer. */466TEST_INTERVAL2 (SV_NAME_D2 (pow), -0.0, -10.0, 3.0, 3.0, 10000)467TEST_INTERVAL2 (SV_NAME_D2 (pow), -0.0, -10.0, 4.0, 4.0, 10000)468TEST_INTERVAL2 (SV_NAME_D2 (pow), -0.0, -10.0, 0.0, 10.0, 10000)469TEST_INTERVAL2 (SV_NAME_D2 (pow), 0.0, 10.0, -0.0, -10.0, 10000)470/* |x| is inf, y is odd or even integer, or y is real not integer. */471SV_POW_INTERVAL2 (inf, inf, 0.5, 0.5, 1)472SV_POW_INTERVAL2 (inf, inf, 1.0, 1.0, 1)473SV_POW_INTERVAL2 (inf, inf, 2.0, 2.0, 1)474SV_POW_INTERVAL2 (inf, inf, 3.0, 3.0, 1)475/* 0.0^y. */476SV_POW_INTERVAL2 (0.0, 0.0, 0.0, 0x1p120, 1000)477/* 1.0^y. */478TEST_INTERVAL2 (SV_NAME_D2 (pow), 1.0, 1.0, 0.0, 0x1p-50, 1000)479TEST_INTERVAL2 (SV_NAME_D2 (pow), 1.0, 1.0, 0x1p-50, 1.0, 1000)480TEST_INTERVAL2 (SV_NAME_D2 (pow), 1.0, 1.0, 1.0, 0x1p100, 1000)481TEST_INTERVAL2 (SV_NAME_D2 (pow), 1.0, 1.0, -1.0, -0x1p120, 1000)482CLOSE_SVE_ATTR483484485