Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/erf.c
48375 views
/*1* Double-precision vector erf(x) function.2*3* Copyright (c) 2023-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "v_math.h"8#include "test_sig.h"9#include "test_defs.h"1011static const struct data12{13float64x2_t third;14float64x2_t tenth, two_over_five, two_over_nine;15double two_over_fifteen, two_over_fortyfive;16float64x2_t max, shift;17uint64x2_t max_idx;18#if WANT_SIMD_EXCEPT19float64x2_t tiny_bound, huge_bound, scale_minus_one;20#endif21} data = {22.max_idx = V2 (768),23.third = V2 (0x1.5555555555556p-2), /* used to compute 2/3 and 1/6 too. */24.two_over_fifteen = 0x1.1111111111111p-3,25.tenth = V2 (-0x1.999999999999ap-4),26.two_over_five = V2 (-0x1.999999999999ap-2),27.two_over_nine = V2 (-0x1.c71c71c71c71cp-3),28.two_over_fortyfive = 0x1.6c16c16c16c17p-5,29.max = V2 (5.9921875), /* 6 - 1/128. */30.shift = V2 (0x1p45),31#if WANT_SIMD_EXCEPT32.huge_bound = V2 (0x1p205),33.tiny_bound = V2 (0x1p-226),34.scale_minus_one = V2 (0x1.06eba8214db69p-3), /* 2/sqrt(pi) - 1.0. */35#endif36};3738#define AbsMask 0x7fffffffffffffff3940struct entry41{42float64x2_t erf;43float64x2_t scale;44};4546static inline struct entry47lookup (uint64x2_t i)48{49struct entry e;50float64x2_t e1 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 0)].erf),51e2 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 1)].erf);52e.erf = vuzp1q_f64 (e1, e2);53e.scale = vuzp2q_f64 (e1, e2);54return e;55}5657/* Double-precision implementation of vector erf(x).58Approximation based on series expansion near x rounded to59nearest multiple of 1/128.60Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,6162erf(x) ~ erf(r) + scale * d * [63+ 164- r d65+ 1/3 (2 r^2 - 1) d^266- 1/6 (r (2 r^2 - 3)) d^367+ 1/30 (4 r^4 - 12 r^2 + 3) d^468- 1/90 (4 r^4 - 20 r^2 + 15) d^569]7071Maximum measure error: 2.29 ULP72V_NAME_D1 (erf)(-0x1.00003c924e5d1p-8) got -0x1.20dd59132ebadp-873want -0x1.20dd59132ebafp-8. */74float64x2_t VPCS_ATTR V_NAME_D1 (erf) (float64x2_t x)75{76const struct data *dat = ptr_barrier (&data);7778float64x2_t a = vabsq_f64 (x);79/* Reciprocal conditions that do not catch NaNs so they can be used in BSLs80to return expected results. */81uint64x2_t a_le_max = vcaleq_f64 (x, dat->max);82uint64x2_t a_gt_max = vcagtq_f64 (x, dat->max);8384#if WANT_SIMD_EXCEPT85/* |x| huge or tiny. */86uint64x2_t cmp1 = vcgtq_f64 (a, dat->huge_bound);87uint64x2_t cmp2 = vcltq_f64 (a, dat->tiny_bound);88uint64x2_t cmp = vorrq_u64 (cmp1, cmp2);89/* If any lanes are special, mask them with 1 for small x or 8 for large90values and retain a copy of a to allow special case handler to fix special91lanes later. This is only necessary if fenv exceptions are to be triggered92correctly. */93if (unlikely (v_any_u64 (cmp)))94{95a = vbslq_f64 (cmp1, v_f64 (8.0), a);96a = vbslq_f64 (cmp2, v_f64 (1.0), a);97}98#endif99100/* Set r to multiple of 1/128 nearest to |x|. */101float64x2_t shift = dat->shift;102float64x2_t z = vaddq_f64 (a, shift);103104/* Lookup erf(r) and scale(r) in table, without shortcut for small values,105but with saturated indices for large values and NaNs in order to avoid106segfault. */107uint64x2_t i108= vsubq_u64 (vreinterpretq_u64_f64 (z), vreinterpretq_u64_f64 (shift));109i = vbslq_u64 (a_le_max, i, dat->max_idx);110struct entry e = lookup (i);111112float64x2_t r = vsubq_f64 (z, shift);113114/* erf(x) ~ erf(r) + scale * d * poly (r, d). */115float64x2_t d = vsubq_f64 (a, r);116float64x2_t d2 = vmulq_f64 (d, d);117float64x2_t r2 = vmulq_f64 (r, r);118119float64x2_t two_over_fifteen_and_fortyfive120= vld1q_f64 (&dat->two_over_fifteen);121122/* poly (d, r) = 1 + p1(r) * d + p2(r) * d^2 + ... + p5(r) * d^5. */123float64x2_t p1 = r;124float64x2_t p2125= vfmsq_f64 (dat->third, r2, vaddq_f64 (dat->third, dat->third));126float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third));127float64x2_t p4 = vfmaq_laneq_f64 (dat->two_over_five, r2,128two_over_fifteen_and_fortyfive, 0);129p4 = vfmsq_f64 (dat->tenth, r2, p4);130float64x2_t p5 = vfmaq_laneq_f64 (dat->two_over_nine, r2,131two_over_fifteen_and_fortyfive, 1);132p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5));133134float64x2_t p34 = vfmaq_f64 (p3, d, p4);135float64x2_t p12 = vfmaq_f64 (p1, d, p2);136float64x2_t y = vfmaq_f64 (p34, d2, p5);137y = vfmaq_f64 (p12, d2, y);138139y = vfmaq_f64 (e.erf, e.scale, vfmsq_f64 (d, d2, y));140141/* Solves the |x| = inf and NaN cases. */142y = vbslq_f64 (a_gt_max, v_f64 (1.0), y);143144/* Copy sign. */145y = vbslq_f64 (v_u64 (AbsMask), y, x);146147#if WANT_SIMD_EXCEPT148if (unlikely (v_any_u64 (cmp2)))149{150/* Neutralise huge values of x before fixing small values. */151x = vbslq_f64 (cmp1, v_f64 (1.0), x);152/* Fix tiny values that trigger spurious underflow. */153return vbslq_f64 (cmp2, vfmaq_f64 (x, dat->scale_minus_one, x), y);154}155#endif156return y;157}158159TEST_SIG (V, D, 1, erf, -6.0, 6.0)160TEST_ULP (V_NAME_D1 (erf), 1.79)161/* WANT_SIMD_EXCEPT blocks miss some cases. */162TEST_DISABLE_FENV (V_NAME_D1 (erf))163TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, 5.9921875, 40000)164TEST_SYM_INTERVAL (V_NAME_D1 (erf), 5.9921875, inf, 40000)165TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, inf, 40000)166167168