Path: blob/main/contrib/arm-optimized-routines/math/aarch64/experimental/advsimd/erfinvf_5u.c
48525 views
/*1* Single-precision inverse error function (AdvSIMD variant).2*3* Copyright (c) 2023-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/6#include "v_math.h"7#include "test_sig.h"8#include "test_defs.h"9#include "v_poly_f32.h"10#include "v_logf_inline.h"1112const static struct data13{14/* We use P_N and Q_N to refer to arrays of coefficients, where P_N is the15coeffs of the numerator in table N of Blair et al, and Q_N is the coeffs16of the denominator. Coefficients are stored in various interleaved17formats to allow for table-based (vector-to-vector) lookup.1819Plo is first two coefficients of P_10 and P_29 interleaved.20PQ is third coeff of P_10 and first of Q_29 interleaved.21Qhi is second and third coeffs of Q_29 interleaved.22P29_3 is a homogenous vector with fourth coeff of P_29.2324P_10 and Q_10 are also stored in homogenous vectors to allow better25memory access when no lanes are in a tail region. */26float Plo[4], PQ[4], Qhi[4];27float32x4_t P29_3, tailshift;28float32x4_t P_50[6], Q_50[2];29float32x4_t P_10[3], Q_10[3];30uint8_t idxhi[16], idxlo[16];31struct v_logf_data logf_tbl;32} data = {33.idxlo = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 },34.idxhi = { 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11 },35.P29_3 = V4 (0x1.b13626p-2),36.tailshift = V4 (-0.87890625),37.Plo = { -0x1.a31268p+3, -0x1.fc0252p-4, 0x1.ac9048p+4, 0x1.119d44p+0 },38.PQ = { -0x1.293ff6p+3, -0x1.f59ee2p+0, -0x1.8265eep+3, -0x1.69952p-4 },39.Qhi = { 0x1.ef5eaep+4, 0x1.c7b7d2p-1, -0x1.12665p+4, -0x1.167d7p+1 },40.P_50 = { V4 (0x1.3d8948p-3), V4 (0x1.61f9eap+0), V4 (0x1.61c6bcp-1),41V4 (-0x1.20c9f2p+0), V4 (0x1.5c704cp-1), V4 (-0x1.50c6bep-3) },42.Q_50 = { V4 (0x1.3d7dacp-3), V4 (0x1.629e5p+0) },43.P_10 = { V4 (-0x1.a31268p+3), V4 (0x1.ac9048p+4), V4 (-0x1.293ff6p+3) },44.Q_10 = { V4 (-0x1.8265eep+3), V4 (0x1.ef5eaep+4), V4 (-0x1.12665p+4) },45.logf_tbl = V_LOGF_CONSTANTS46};4748static inline float32x4_t49special (float32x4_t x, const struct data *d)50{51/* Note erfinvf(inf) should return NaN, and erfinvf(1) should return Inf.52By using log here, instead of log1p, we return finite values for both53these inputs, and values outside [-1, 1]. This is non-compliant, but is an54acceptable optimisation at Ofast. To get correct behaviour for all finite55values use the log1pf_inline helper on -abs(x) - note that erfinvf(inf)56will still be finite. */57float32x4_t t = vdivq_f32 (58v_f32 (1), vsqrtq_f32 (vnegq_f32 (v_logf_inline (59vsubq_f32 (v_f32 (1), vabsq_f32 (x)), &d->logf_tbl))));60float32x4_t ts = vbslq_f32 (v_u32 (0x7fffffff), t, x);61float32x4_t q = vfmaq_f32 (d->Q_50[0], vaddq_f32 (t, d->Q_50[1]), t);62return vdivq_f32 (v_horner_5_f32 (t, d->P_50), vmulq_f32 (ts, q));63}6465static inline float32x4_t66notails (float32x4_t x, const struct data *d)67{68/* Shortcut when no input is in a tail region - no need to gather shift or69coefficients. */70float32x4_t t = vfmaq_f32 (v_f32 (-0.5625), x, x);71float32x4_t q = vaddq_f32 (t, d->Q_10[2]);72q = vfmaq_f32 (d->Q_10[1], t, q);73q = vfmaq_f32 (d->Q_10[0], t, q);7475return vdivq_f32 (vmulq_f32 (x, v_horner_2_f32 (t, d->P_10)), q);76}7778static inline float32x4_t79lookup (float32x4_t tbl, uint8x16_t idx)80{81return vreinterpretq_f32_u8 (vqtbl1q_u8 (vreinterpretq_u8_f32 (tbl), idx));82}8384/* Vector implementation of Blair et al's rational approximation to inverse85error function in single-precision. Worst-case error is 4.98 ULP, in the86tail region:87_ZGVnN4v_erfinvf(0x1.f7dbeep-1) got 0x1.b4793p+088want 0x1.b4793ap+0 . */89float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (erfinv) (float32x4_t x)90{91const struct data *d = ptr_barrier (&data);9293/* Calculate inverse error using algorithm described in94J. M. Blair, C. A. Edwards, and J. H. Johnson,95"Rational Chebyshev approximations for the inverse of the error96function", Math. Comp. 30, pp. 827--830 (1976).97https://doi.org/10.1090/S0025-5718-1976-0421040-7.9899Algorithm has 3 intervals:100- 'Normal' region [-0.75, 0.75]101- Tail region [0.75, 0.9375] U [-0.9375, -0.75]102- Extreme tail [-1, -0.9375] U [0.9375, 1]103Normal and tail are both rational approximation of similar order on104shifted input - these are typically performed in parallel using gather105loads to obtain correct coefficients depending on interval. */106uint32x4_t is_tail = vcageq_f32 (x, v_f32 (0.75));107uint32x4_t extreme_tail = vcageq_f32 (x, v_f32 (0.9375));108109if (unlikely (!v_any_u32 (is_tail)))110/* Shortcut for if all lanes are in [-0.75, 0.75] - can avoid having to111gather coefficients. If input is uniform in [-1, 1] then likelihood of112this is 0.75^4 ~= 0.31. */113return notails (x, d);114115/* Select requisite shift depending on interval: polynomial is evaluated on116x * x - shift.117Normal shift = 0.5625118Tail shift = 0.87890625. */119float32x4_t t120= vfmaq_f32 (vbslq_f32 (is_tail, d->tailshift, v_f32 (-0.5625)), x, x);121122/* Calculate indexes for tbl: tbl is byte-wise, so:123[0, 1, 2, 3, 4, 5, 6, ....] copies the vector124Add 4 * i to a group of 4 lanes to copy 32-bit lane i. Each vector stores125two pairs of coeffs, so we need two idx vectors - one for each pair. */126uint8x16_t off = vandq_u8 (vreinterpretq_u8_u32 (is_tail), vdupq_n_u8 (4));127uint8x16_t idx_lo = vaddq_u8 (vld1q_u8 (d->idxlo), off);128uint8x16_t idx_hi = vaddq_u8 (vld1q_u8 (d->idxhi), off);129130/* Load the tables. */131float32x4_t plo = vld1q_f32 (d->Plo);132float32x4_t pq = vld1q_f32 (d->PQ);133float32x4_t qhi = vld1q_f32 (d->Qhi);134135/* Do the lookup (and calculate p3 by masking non-tail lanes). */136float32x4_t p3 = vreinterpretq_f32_u32 (137vandq_u32 (is_tail, vreinterpretq_u32_f32 (d->P29_3)));138float32x4_t p0 = lookup (plo, idx_lo), p1 = lookup (plo, idx_hi),139p2 = lookup (pq, idx_lo), q0 = lookup (pq, idx_hi),140q1 = lookup (qhi, idx_lo), q2 = lookup (qhi, idx_hi);141142float32x4_t p = vfmaq_f32 (p2, p3, t);143p = vfmaq_f32 (p1, p, t);144p = vfmaq_f32 (p0, p, t);145p = vmulq_f32 (x, p);146147float32x4_t q = vfmaq_f32 (q1, vaddq_f32 (q2, t), t);148q = vfmaq_f32 (q0, q, t);149150if (unlikely (v_any_u32 (extreme_tail)))151/* At least one lane is in the extreme tail - if input is uniform in152[-1, 1] the likelihood of this is ~0.23. */153return vbslq_f32 (extreme_tail, special (x, d), vdivq_f32 (p, q));154155return vdivq_f32 (p, q);156}157158HALF_WIDTH_ALIAS_F1 (erfinv)159160#if USE_MPFR161# warning Not generating tests for _ZGVnN4v_erfinvf, as MPFR has no suitable reference162#else163TEST_SIG (V, F, 1, erfinv, -0.99, 0.99)164TEST_DISABLE_FENV (V_NAME_F1 (erfinv))165TEST_ULP (V_NAME_F1 (erfinv), 4.49)166TEST_SYM_INTERVAL (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000)167/* Test with control lane in each interval. */168TEST_CONTROL_VALUE (V_NAME_F1 (erfinv), 0.5)169TEST_CONTROL_VALUE (V_NAME_F1 (erfinv), 0.8)170TEST_CONTROL_VALUE (V_NAME_F1 (erfinv), 0.95)171#endif172173174