Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/erfcf.c
48375 views
/*1* Single-precision vector erfc(x) function.2*3* Copyright (c) 2023-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "v_math.h"8#include "test_sig.h"9#include "test_defs.h"1011static const struct data12{13uint32x4_t offset, table_scale;14float32x4_t max, shift;15float coeffs[4];16float32x4_t third, two_over_five, tenth;17#if WANT_SIMD_EXCEPT18float32x4_t uflow_bound;19#endif2021} data = {22/* Set an offset so the range of the index used for lookup is 644, and it can23be clamped using a saturated add. */24.offset = V4 (0xb7fffd7b), /* 0xffffffff - asuint(shift) - 644. */25.table_scale = V4 (0x28000000 << 1), /* asuint (2^-47) << 1. */26.max = V4 (10.0625f), /* 10 + 1/16 = 644/64. */27.shift = V4 (0x1p17f),28/* Store 1/3, 2/3 and 2/15 in a single register for use with indexed muls and29fmas. */30.coeffs = { 0x1.555556p-2f, 0x1.555556p-1f, 0x1.111112p-3f, 0 },31.third = V4 (0x1.555556p-2f),32.two_over_five = V4 (-0x1.99999ap-2f),33.tenth = V4 (-0x1.99999ap-4f),34#if WANT_SIMD_EXCEPT35.uflow_bound = V4 (0x1.2639cp+3f),36#endif37};3839#define TinyBound 0x41000000 /* 0x1p-62f << 1. */40#define Thres 0xbe000000 /* asuint(infinity) << 1 - TinyBound. */41#define Off 0xfffffd7b /* 0xffffffff - 644. */4243struct entry44{45float32x4_t erfc;46float32x4_t scale;47};4849static inline struct entry50lookup (uint32x4_t i)51{52struct entry e;53float32x2_t t054= vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 0) - Off].erfc);55float32x2_t t156= vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 1) - Off].erfc);57float32x2_t t258= vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 2) - Off].erfc);59float32x2_t t360= vld1_f32 (&__v_erfcf_data.tab[vgetq_lane_u32 (i, 3) - Off].erfc);61float32x4_t e1 = vcombine_f32 (t0, t1);62float32x4_t e2 = vcombine_f32 (t2, t3);63e.erfc = vuzp1q_f32 (e1, e2);64e.scale = vuzp2q_f32 (e1, e2);65return e;66}6768#if WANT_SIMD_EXCEPT69static float32x4_t VPCS_ATTR NOINLINE70special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)71{72return v_call_f32 (erfcf, x, y, cmp);73}74#endif7576/* Optimized single-precision vector erfcf(x).77Approximation based on series expansion near x rounded to78nearest multiple of 1/64.79Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,8081erfc(x) ~ erfc(r) - scale * d * poly(r, d), with8283poly(r, d) = 1 - r d + (2/3 r^2 - 1/3) d^2 - r (1/3 r^2 - 1/2) d^384+ (2/15 r^4 - 2/5 r^2 + 1/10) d^48586Values of erfc(r) and scale are read from lookup tables. Stored values87are scaled to avoid hitting the subnormal range.8889Note that for x < 0, erfc(x) = 2.0 - erfc(-x).90Maximum error: 1.63 ULP (~1.0 ULP for x < 0.0).91_ZGVnN4v_erfcf(0x1.1dbf7ap+3) got 0x1.f51212p-12092want 0x1.f51216p-120. */93NOINLINE VPCS_ATTR float32x4_t V_NAME_F1 (erfc) (float32x4_t x)94{95const struct data *dat = ptr_barrier (&data);9697#if WANT_SIMD_EXCEPT98/* |x| < 2^-62. Avoid fabs by left-shifting by 1. */99uint32x4_t ix = vreinterpretq_u32_f32 (x);100uint32x4_t cmp = vcltq_u32 (vaddq_u32 (ix, ix), v_u32 (TinyBound));101/* x >= ~9.19 (into subnormal case and uflow case). Comparison is done in102integer domain to avoid raising exceptions in presence of nans. */103uint32x4_t uflow = vcgeq_s32 (vreinterpretq_s32_f32 (x),104vreinterpretq_s32_f32 (dat->uflow_bound));105cmp = vorrq_u32 (cmp, uflow);106float32x4_t xm = x;107/* If any lanes are special, mask them with 0 and retain a copy of x to allow108special case handler to fix special lanes later. This is only necessary if109fenv exceptions are to be triggered correctly. */110if (unlikely (v_any_u32 (cmp)))111x = v_zerofy_f32 (x, cmp);112#endif113114float32x4_t a = vabsq_f32 (x);115a = vminq_f32 (a, dat->max);116117/* Lookup erfc(r) and scale(r) in tables, e.g. set erfc(r) to 0 and scale to1182/sqrt(pi), when x reduced to r = 0. */119float32x4_t shift = dat->shift;120float32x4_t z = vaddq_f32 (a, shift);121122/* Clamp index to a range of 644. A naive approach would use a subtract and123min. Instead we offset the table address and the index, then use a124saturating add. */125uint32x4_t i = vqaddq_u32 (vreinterpretq_u32_f32 (z), dat->offset);126127struct entry e = lookup (i);128129/* erfc(x) ~ erfc(r) - scale * d * poly(r, d). */130float32x4_t r = vsubq_f32 (z, shift);131float32x4_t d = vsubq_f32 (a, r);132float32x4_t d2 = vmulq_f32 (d, d);133float32x4_t r2 = vmulq_f32 (r, r);134135float32x4_t p1 = r;136float32x4_t coeffs = vld1q_f32 (dat->coeffs);137float32x4_t p2 = vfmsq_laneq_f32 (dat->third, r2, coeffs, 1);138float32x4_t p3139= vmulq_f32 (r, vfmaq_laneq_f32 (v_f32 (-0.5), r2, coeffs, 0));140float32x4_t p4 = vfmaq_laneq_f32 (dat->two_over_five, r2, coeffs, 2);141p4 = vfmsq_f32 (dat->tenth, r2, p4);142143float32x4_t y = vfmaq_f32 (p3, d, p4);144y = vfmaq_f32 (p2, d, y);145y = vfmaq_f32 (p1, d, y);146y = vfmsq_f32 (e.erfc, e.scale, vfmsq_f32 (d, d2, y));147148/* Offset equals 2.0f if sign, else 0.0f. */149uint32x4_t sign = vshrq_n_u32 (vreinterpretq_u32_f32 (x), 31);150float32x4_t off = vreinterpretq_f32_u32 (vshlq_n_u32 (sign, 30));151/* Copy sign and scale back in a single fma. Since the bit patterns do not152overlap, then logical or and addition are equivalent here. */153float32x4_t fac = vreinterpretq_f32_u32 (154vsraq_n_u32 (vshlq_n_u32 (sign, 31), dat->table_scale, 1));155156#if WANT_SIMD_EXCEPT157if (unlikely (v_any_u32 (cmp)))158return special_case (xm, vfmaq_f32 (off, fac, y), cmp);159#endif160161return vfmaq_f32 (off, fac, y);162}163164HALF_WIDTH_ALIAS_F1 (erfc)165166TEST_SIG (V, F, 1, erfc, -4.0, 10.0)167TEST_DISABLE_FENV_IF_NOT (V_NAME_F1 (erfc), WANT_SIMD_EXCEPT)168TEST_ULP (V_NAME_F1 (erfc), 1.14)169TEST_SYM_INTERVAL (V_NAME_F1 (erfc), 0, 0x1p-26, 40000)170TEST_INTERVAL (V_NAME_F1 (erfc), 0x1p-26, 10.0625, 40000)171TEST_INTERVAL (V_NAME_F1 (erfc), -0x1p-26, -4.0, 40000)172TEST_INTERVAL (V_NAME_F1 (erfc), 10.0625, inf, 40000)173TEST_INTERVAL (V_NAME_F1 (erfc), -4.0, -inf, 40000)174175176