Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/erff.c
48378 views
/*1* Single-precision vector erf(x) function.2*3* Copyright (c) 2023-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "v_math.h"8#include "test_sig.h"9#include "test_defs.h"1011static const struct data12{13float32x4_t max, shift, third;14#if WANT_SIMD_EXCEPT15float32x4_t tiny_bound, scale_minus_one;16#endif17} data = {18.max = V4 (3.9375), /* 4 - 8/128. */19.shift = V4 (0x1p16f),20.third = V4 (0x1.555556p-2f), /* 1/3. */21#if WANT_SIMD_EXCEPT22.tiny_bound = V4 (0x1p-62f),23.scale_minus_one = V4 (0x1.06eba8p-3f), /* scale - 1.0. */24#endif25};2627#define AbsMask 0x7fffffff2829struct entry30{31float32x4_t erf;32float32x4_t scale;33};3435static inline struct entry36lookup (uint32x4_t i)37{38struct entry e;39float32x2_t t0 = vld1_f32 (&__v_erff_data.tab[vgetq_lane_u32 (i, 0)].erf);40float32x2_t t1 = vld1_f32 (&__v_erff_data.tab[vgetq_lane_u32 (i, 1)].erf);41float32x2_t t2 = vld1_f32 (&__v_erff_data.tab[vgetq_lane_u32 (i, 2)].erf);42float32x2_t t3 = vld1_f32 (&__v_erff_data.tab[vgetq_lane_u32 (i, 3)].erf);43float32x4_t e1 = vcombine_f32 (t0, t1);44float32x4_t e2 = vcombine_f32 (t2, t3);45e.erf = vuzp1q_f32 (e1, e2);46e.scale = vuzp2q_f32 (e1, e2);47return e;48}4950/* Single-precision implementation of vector erf(x).51Approximation based on series expansion near x rounded to52nearest multiple of 1/128.53Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,5455erf(x) ~ erf(r) + scale * d * [1 - r * d - 1/3 * d^2]5657Values of erf(r) and scale are read from lookup tables.58For |x| > 3.9375, erf(|x|) rounds to 1.0f.5960Maximum error: 1.93 ULP61_ZGVnN4v_erff(0x1.c373e6p-9) got 0x1.fd686cp-962want 0x1.fd6868p-9. */63float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (erf) (float32x4_t x)64{65const struct data *dat = ptr_barrier (&data);6667#if WANT_SIMD_EXCEPT68/* |x| < 2^-62. */69uint32x4_t cmp = vcaltq_f32 (x, dat->tiny_bound);70float32x4_t xm = x;71/* If any lanes are special, mask them with 1 and retain a copy of x to allow72special case handler to fix special lanes later. This is only necessary if73fenv exceptions are to be triggered correctly. */74if (unlikely (v_any_u32 (cmp)))75x = vbslq_f32 (cmp, v_f32 (1), x);76#endif7778float32x4_t a = vabsq_f32 (x);79uint32x4_t a_gt_max = vcgtq_f32 (a, dat->max);8081/* Lookup erf(r) and scale(r) in tables, e.g. set erf(r) to 0 and scale to822/sqrt(pi), when x reduced to r = 0. */83float32x4_t shift = dat->shift;84float32x4_t z = vaddq_f32 (a, shift);8586uint32x4_t i87= vsubq_u32 (vreinterpretq_u32_f32 (z), vreinterpretq_u32_f32 (shift));88i = vminq_u32 (i, v_u32 (512));89struct entry e = lookup (i);9091float32x4_t r = vsubq_f32 (z, shift);9293/* erf(x) ~ erf(r) + scale * d * (1 - r * d - 1/3 * d^2). */94float32x4_t d = vsubq_f32 (a, r);95float32x4_t d2 = vmulq_f32 (d, d);96float32x4_t y = vfmaq_f32 (r, dat->third, d);97y = vfmaq_f32 (e.erf, e.scale, vfmsq_f32 (d, d2, y));9899/* Solves the |x| = inf case. */100y = vbslq_f32 (a_gt_max, v_f32 (1.0f), y);101102/* Copy sign. */103y = vbslq_f32 (v_u32 (AbsMask), y, x);104105#if WANT_SIMD_EXCEPT106if (unlikely (v_any_u32 (cmp)))107return vbslq_f32 (cmp, vfmaq_f32 (xm, dat->scale_minus_one, xm), y);108#endif109return y;110}111112HALF_WIDTH_ALIAS_F1 (erf)113114TEST_SIG (V, F, 1, erf, -4.0, 4.0)115TEST_ULP (V_NAME_F1 (erf), 1.43)116TEST_DISABLE_FENV_IF_NOT (V_NAME_F1 (erf), WANT_SIMD_EXCEPT)117TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, 3.9375, 40000)118TEST_SYM_INTERVAL (V_NAME_F1 (erf), 3.9375, inf, 40000)119TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, inf, 40000)120121122