Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/erf.c
48375 views
1
/*
2
* Double-precision vector erf(x) function.
3
*
4
* Copyright (c) 2023-2024, Arm Limited.
5
* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6
*/
7
8
#include "v_math.h"
9
#include "test_sig.h"
10
#include "test_defs.h"
11
12
static const struct data
13
{
14
float64x2_t third;
15
float64x2_t tenth, two_over_five, two_over_nine;
16
double two_over_fifteen, two_over_fortyfive;
17
float64x2_t max, shift;
18
uint64x2_t max_idx;
19
#if WANT_SIMD_EXCEPT
20
float64x2_t tiny_bound, huge_bound, scale_minus_one;
21
#endif
22
} data = {
23
.max_idx = V2 (768),
24
.third = V2 (0x1.5555555555556p-2), /* used to compute 2/3 and 1/6 too. */
25
.two_over_fifteen = 0x1.1111111111111p-3,
26
.tenth = V2 (-0x1.999999999999ap-4),
27
.two_over_five = V2 (-0x1.999999999999ap-2),
28
.two_over_nine = V2 (-0x1.c71c71c71c71cp-3),
29
.two_over_fortyfive = 0x1.6c16c16c16c17p-5,
30
.max = V2 (5.9921875), /* 6 - 1/128. */
31
.shift = V2 (0x1p45),
32
#if WANT_SIMD_EXCEPT
33
.huge_bound = V2 (0x1p205),
34
.tiny_bound = V2 (0x1p-226),
35
.scale_minus_one = V2 (0x1.06eba8214db69p-3), /* 2/sqrt(pi) - 1.0. */
36
#endif
37
};
38
39
#define AbsMask 0x7fffffffffffffff
40
41
struct entry
42
{
43
float64x2_t erf;
44
float64x2_t scale;
45
};
46
47
static inline struct entry
48
lookup (uint64x2_t i)
49
{
50
struct entry e;
51
float64x2_t e1 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 0)].erf),
52
e2 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 1)].erf);
53
e.erf = vuzp1q_f64 (e1, e2);
54
e.scale = vuzp2q_f64 (e1, e2);
55
return e;
56
}
57
58
/* Double-precision implementation of vector erf(x).
59
Approximation based on series expansion near x rounded to
60
nearest multiple of 1/128.
61
Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
62
63
erf(x) ~ erf(r) + scale * d * [
64
+ 1
65
- r d
66
+ 1/3 (2 r^2 - 1) d^2
67
- 1/6 (r (2 r^2 - 3)) d^3
68
+ 1/30 (4 r^4 - 12 r^2 + 3) d^4
69
- 1/90 (4 r^4 - 20 r^2 + 15) d^5
70
]
71
72
Maximum measure error: 2.29 ULP
73
V_NAME_D1 (erf)(-0x1.00003c924e5d1p-8) got -0x1.20dd59132ebadp-8
74
want -0x1.20dd59132ebafp-8. */
75
float64x2_t VPCS_ATTR V_NAME_D1 (erf) (float64x2_t x)
76
{
77
const struct data *dat = ptr_barrier (&data);
78
79
float64x2_t a = vabsq_f64 (x);
80
/* Reciprocal conditions that do not catch NaNs so they can be used in BSLs
81
to return expected results. */
82
uint64x2_t a_le_max = vcaleq_f64 (x, dat->max);
83
uint64x2_t a_gt_max = vcagtq_f64 (x, dat->max);
84
85
#if WANT_SIMD_EXCEPT
86
/* |x| huge or tiny. */
87
uint64x2_t cmp1 = vcgtq_f64 (a, dat->huge_bound);
88
uint64x2_t cmp2 = vcltq_f64 (a, dat->tiny_bound);
89
uint64x2_t cmp = vorrq_u64 (cmp1, cmp2);
90
/* If any lanes are special, mask them with 1 for small x or 8 for large
91
values and retain a copy of a to allow special case handler to fix special
92
lanes later. This is only necessary if fenv exceptions are to be triggered
93
correctly. */
94
if (unlikely (v_any_u64 (cmp)))
95
{
96
a = vbslq_f64 (cmp1, v_f64 (8.0), a);
97
a = vbslq_f64 (cmp2, v_f64 (1.0), a);
98
}
99
#endif
100
101
/* Set r to multiple of 1/128 nearest to |x|. */
102
float64x2_t shift = dat->shift;
103
float64x2_t z = vaddq_f64 (a, shift);
104
105
/* Lookup erf(r) and scale(r) in table, without shortcut for small values,
106
but with saturated indices for large values and NaNs in order to avoid
107
segfault. */
108
uint64x2_t i
109
= vsubq_u64 (vreinterpretq_u64_f64 (z), vreinterpretq_u64_f64 (shift));
110
i = vbslq_u64 (a_le_max, i, dat->max_idx);
111
struct entry e = lookup (i);
112
113
float64x2_t r = vsubq_f64 (z, shift);
114
115
/* erf(x) ~ erf(r) + scale * d * poly (r, d). */
116
float64x2_t d = vsubq_f64 (a, r);
117
float64x2_t d2 = vmulq_f64 (d, d);
118
float64x2_t r2 = vmulq_f64 (r, r);
119
120
float64x2_t two_over_fifteen_and_fortyfive
121
= vld1q_f64 (&dat->two_over_fifteen);
122
123
/* poly (d, r) = 1 + p1(r) * d + p2(r) * d^2 + ... + p5(r) * d^5. */
124
float64x2_t p1 = r;
125
float64x2_t p2
126
= vfmsq_f64 (dat->third, r2, vaddq_f64 (dat->third, dat->third));
127
float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third));
128
float64x2_t p4 = vfmaq_laneq_f64 (dat->two_over_five, r2,
129
two_over_fifteen_and_fortyfive, 0);
130
p4 = vfmsq_f64 (dat->tenth, r2, p4);
131
float64x2_t p5 = vfmaq_laneq_f64 (dat->two_over_nine, r2,
132
two_over_fifteen_and_fortyfive, 1);
133
p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5));
134
135
float64x2_t p34 = vfmaq_f64 (p3, d, p4);
136
float64x2_t p12 = vfmaq_f64 (p1, d, p2);
137
float64x2_t y = vfmaq_f64 (p34, d2, p5);
138
y = vfmaq_f64 (p12, d2, y);
139
140
y = vfmaq_f64 (e.erf, e.scale, vfmsq_f64 (d, d2, y));
141
142
/* Solves the |x| = inf and NaN cases. */
143
y = vbslq_f64 (a_gt_max, v_f64 (1.0), y);
144
145
/* Copy sign. */
146
y = vbslq_f64 (v_u64 (AbsMask), y, x);
147
148
#if WANT_SIMD_EXCEPT
149
if (unlikely (v_any_u64 (cmp2)))
150
{
151
/* Neutralise huge values of x before fixing small values. */
152
x = vbslq_f64 (cmp1, v_f64 (1.0), x);
153
/* Fix tiny values that trigger spurious underflow. */
154
return vbslq_f64 (cmp2, vfmaq_f64 (x, dat->scale_minus_one, x), y);
155
}
156
#endif
157
return y;
158
}
159
160
TEST_SIG (V, D, 1, erf, -6.0, 6.0)
161
TEST_ULP (V_NAME_D1 (erf), 1.79)
162
/* WANT_SIMD_EXCEPT blocks miss some cases. */
163
TEST_DISABLE_FENV (V_NAME_D1 (erf))
164
TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, 5.9921875, 40000)
165
TEST_SYM_INTERVAL (V_NAME_D1 (erf), 5.9921875, inf, 40000)
166
TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, inf, 40000)
167
168