Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/arm-optimized-routines/math/test/mathbench_wrappers.h
48254 views
1
/*
2
* Function wrappers for mathbench.
3
*
4
* Copyright (c) 2022-2024, Arm Limited.
5
* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6
*/
7
8
#if WANT_EXPERIMENTAL_MATH
9
static double
10
atan2_wrap (double x)
11
{
12
return atan2 (5.0, x);
13
}
14
15
static float
16
atan2f_wrap (float x)
17
{
18
return atan2f (5.0f, x);
19
}
20
21
static double
22
powi_wrap (double x)
23
{
24
return __builtin_powi (x, (int) round (x));
25
}
26
#endif /* WANT_EXPERIMENTAL_MATH. */
27
28
#if __aarch64__ && __linux__
29
30
__vpcs static float32x4_t
31
_Z_sincospif_wrap (float32x4_t x)
32
{
33
float s[4], c[4];
34
_ZGVnN4vl4l4_sincospif (x, s, c);
35
return vld1q_f32 (s) + vld1q_f32 (c);
36
}
37
38
__vpcs static float64x2_t
39
_Z_sincospi_wrap (float64x2_t x)
40
{
41
double s[2], c[2];
42
_ZGVnN2vl8l8_sincospi (x, s, c);
43
return vld1q_f64 (s) + vld1q_f64 (c);
44
}
45
46
__vpcs static float64x2_t
47
_Z_atan2_wrap (float64x2_t x)
48
{
49
return _ZGVnN2vv_atan2 (vdupq_n_f64 (5.0), x);
50
}
51
52
__vpcs static float32x4_t
53
_Z_atan2f_wrap (float32x4_t x)
54
{
55
return _ZGVnN4vv_atan2f (vdupq_n_f32 (5.0f), x);
56
}
57
58
__vpcs static float32x4_t
59
_Z_hypotf_wrap (float32x4_t x)
60
{
61
return _ZGVnN4vv_hypotf (vdupq_n_f32 (5.0f), x);
62
}
63
64
__vpcs static float64x2_t
65
_Z_hypot_wrap (float64x2_t x)
66
{
67
return _ZGVnN2vv_hypot (vdupq_n_f64 (5.0), x);
68
}
69
70
__vpcs static float32x4_t
71
xy_Z_powf (float32x4_t x)
72
{
73
return _ZGVnN4vv_powf (x, x);
74
}
75
76
__vpcs static float32x4_t
77
x_Z_powf (float32x4_t x)
78
{
79
return _ZGVnN4vv_powf (x, vdupq_n_f32 (23.4));
80
}
81
82
__vpcs static float32x4_t
83
y_Z_powf (float32x4_t x)
84
{
85
return _ZGVnN4vv_powf (vdupq_n_f32 (2.34), x);
86
}
87
88
__vpcs static float64x2_t
89
xy_Z_pow (float64x2_t x)
90
{
91
return _ZGVnN2vv_pow (x, x);
92
}
93
94
__vpcs static float64x2_t
95
x_Z_pow (float64x2_t x)
96
{
97
return _ZGVnN2vv_pow (x, vdupq_n_f64 (23.4));
98
}
99
100
__vpcs static float64x2_t
101
y_Z_pow (float64x2_t x)
102
{
103
return _ZGVnN2vv_pow (vdupq_n_f64 (2.34), x);
104
}
105
106
__vpcs static float32x4_t
107
_Z_modff_wrap (float32x4_t x)
108
{
109
float y[4];
110
float32x4_t ret = _ZGVnN4vl4_modff (x, y);
111
return ret + vld1q_f32 (y);
112
}
113
114
__vpcs static float64x2_t
115
_Z_modf_wrap (float64x2_t x)
116
{
117
double y[2];
118
float64x2_t ret = _ZGVnN2vl8_modf (x, y);
119
return ret + vld1q_f64 (y);
120
}
121
122
__vpcs static float32x4_t
123
_Z_sincosf_wrap (float32x4_t x)
124
{
125
float s[4], c[4];
126
_ZGVnN4vl4l4_sincosf (x, s, c);
127
return vld1q_f32 (s) + vld1q_f32 (c);
128
}
129
130
__vpcs static float32x4_t
131
_Z_cexpif_wrap (float32x4_t x)
132
{
133
float32x4x2_t sc = _ZGVnN4v_cexpif (x);
134
return sc.val[0] + sc.val[1];
135
}
136
137
__vpcs static float64x2_t
138
_Z_sincos_wrap (float64x2_t x)
139
{
140
double s[2], c[2];
141
_ZGVnN2vl8l8_sincos (x, s, c);
142
return vld1q_f64 (s) + vld1q_f64 (c);
143
}
144
145
__vpcs static float64x2_t
146
_Z_cexpi_wrap (float64x2_t x)
147
{
148
float64x2x2_t sc = _ZGVnN2v_cexpi (x);
149
return sc.val[0] + sc.val[1];
150
}
151
152
#endif
153
154
#if WANT_SVE_TESTS
155
156
static svfloat32_t
157
_Z_sv_atan2f_wrap (svfloat32_t x, svbool_t pg)
158
{
159
return _ZGVsMxvv_atan2f (x, svdup_f32 (5.0f), pg);
160
}
161
162
static svfloat64_t
163
_Z_sv_atan2_wrap (svfloat64_t x, svbool_t pg)
164
{
165
return _ZGVsMxvv_atan2 (x, svdup_f64 (5.0), pg);
166
}
167
168
static svfloat32_t
169
_Z_sv_hypotf_wrap (svfloat32_t x, svbool_t pg)
170
{
171
return _ZGVsMxvv_hypotf (x, svdup_f32 (5.0), pg);
172
}
173
174
static svfloat64_t
175
_Z_sv_hypot_wrap (svfloat64_t x, svbool_t pg)
176
{
177
return _ZGVsMxvv_hypot (x, svdup_f64 (5.0), pg);
178
}
179
180
static svfloat32_t
181
xy_Z_sv_powf (svfloat32_t x, svbool_t pg)
182
{
183
return _ZGVsMxvv_powf (x, x, pg);
184
}
185
186
static svfloat32_t
187
x_Z_sv_powf (svfloat32_t x, svbool_t pg)
188
{
189
return _ZGVsMxvv_powf (x, svdup_f32 (23.4f), pg);
190
}
191
192
static svfloat32_t
193
y_Z_sv_powf (svfloat32_t x, svbool_t pg)
194
{
195
return _ZGVsMxvv_powf (svdup_f32 (2.34f), x, pg);
196
}
197
198
static svfloat64_t
199
xy_Z_sv_pow (svfloat64_t x, svbool_t pg)
200
{
201
return _ZGVsMxvv_pow (x, x, pg);
202
}
203
204
static svfloat64_t
205
x_Z_sv_pow (svfloat64_t x, svbool_t pg)
206
{
207
return _ZGVsMxvv_pow (x, svdup_f64 (23.4), pg);
208
}
209
210
static svfloat64_t
211
y_Z_sv_pow (svfloat64_t x, svbool_t pg)
212
{
213
return _ZGVsMxvv_pow (svdup_f64 (2.34), x, pg);
214
}
215
216
static svfloat32_t
217
_Z_sv_sincospif_wrap (svfloat32_t x, svbool_t pg)
218
{
219
float s[svcntw ()], c[svcntw ()];
220
_ZGVsMxvl4l4_sincospif (x, s, c, pg);
221
return svadd_x (pg, svld1 (pg, s), svld1 (pg, c));
222
}
223
224
static svfloat64_t
225
_Z_sv_sincospi_wrap (svfloat64_t x, svbool_t pg)
226
{
227
double s[svcntd ()], c[svcntd ()];
228
_ZGVsMxvl8l8_sincospi (x, s, c, pg);
229
return svadd_x (pg, svld1 (pg, s), svld1 (pg, c));
230
}
231
232
static svfloat32_t
233
_Z_sv_modff_wrap (svfloat32_t x, svbool_t pg)
234
{
235
float i[svcntw ()];
236
svfloat32_t r = _ZGVsMxvl4_modff (x, i, pg);
237
return svadd_x (pg, r, svld1 (pg, i));
238
}
239
240
static svfloat64_t
241
_Z_sv_modf_wrap (svfloat64_t x, svbool_t pg)
242
{
243
double i[svcntd ()];
244
svfloat64_t r = _ZGVsMxvl8_modf (x, i, pg);
245
return svadd_x (pg, r, svld1 (pg, i));
246
}
247
248
static svfloat32_t
249
_Z_sv_sincosf_wrap (svfloat32_t x, svbool_t pg)
250
{
251
float s[svcntw ()], c[svcntw ()];
252
_ZGVsMxvl4l4_sincosf (x, s, c, pg);
253
return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
254
}
255
256
static svfloat32_t
257
_Z_sv_cexpif_wrap (svfloat32_t x, svbool_t pg)
258
{
259
svfloat32x2_t sc = _ZGVsMxv_cexpif (x, pg);
260
return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
261
}
262
263
static svfloat64_t
264
_Z_sv_sincos_wrap (svfloat64_t x, svbool_t pg)
265
{
266
double s[svcntd ()], c[svcntd ()];
267
_ZGVsMxvl8l8_sincos (x, s, c, pg);
268
return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
269
}
270
271
static svfloat64_t
272
_Z_sv_cexpi_wrap (svfloat64_t x, svbool_t pg)
273
{
274
svfloat64x2_t sc = _ZGVsMxv_cexpi (x, pg);
275
return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
276
}
277
278
# if WANT_EXPERIMENTAL_MATH
279
280
static svfloat32_t
281
_Z_sv_powi_wrap (svfloat32_t x, svbool_t pg)
282
{
283
return _ZGVsMxvv_powi (x, svcvt_s32_f32_x (pg, x), pg);
284
}
285
286
static svfloat64_t
287
_Z_sv_powk_wrap (svfloat64_t x, svbool_t pg)
288
{
289
return _ZGVsMxvv_powk (x, svcvt_s64_f64_x (pg, x), pg);
290
}
291
292
# endif
293
294
#endif
295
296
#if __aarch64__
297
static float
298
sincospif_wrap (float x)
299
{
300
float s, c;
301
arm_math_sincospif (x, &s, &c);
302
return s + c;
303
}
304
305
static double
306
sincospi_wrap (double x)
307
{
308
double s, c;
309
arm_math_sincospi (x, &s, &c);
310
return s + c;
311
}
312
#endif
313
314
static double
315
xypow (double x)
316
{
317
return pow (x, x);
318
}
319
320
static float
321
xypowf (float x)
322
{
323
return powf (x, x);
324
}
325
326
static double
327
xpow (double x)
328
{
329
return pow (x, 23.4);
330
}
331
332
static float
333
xpowf (float x)
334
{
335
return powf (x, 23.4f);
336
}
337
338
static double
339
ypow (double x)
340
{
341
return pow (2.34, x);
342
}
343
344
static float
345
ypowf (float x)
346
{
347
return powf (2.34f, x);
348
}
349
350
static float
351
sincosf_wrap (float x)
352
{
353
float s, c;
354
sincosf (x, &s, &c);
355
return s + c;
356
}
357
358