Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/cospi.c
48378 views
/*1* Double-precision vector cospi function.2*3* Copyright (c) 2023-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "mathlib.h"8#include "v_math.h"9#include "v_poly_f64.h"10#include "test_sig.h"11#include "test_defs.h"1213static const struct data14{15float64x2_t poly[10];16float64x2_t range_val;17} data = {18/* Polynomial coefficients generated using Remez algorithm,19see sinpi.sollya for details. */20.poly = { V2 (0x1.921fb54442d184p1), V2 (-0x1.4abbce625be53p2),21V2 (0x1.466bc6775ab16p1), V2 (-0x1.32d2cce62dc33p-1),22V2 (0x1.507834891188ep-4), V2 (-0x1.e30750a28c88ep-8),23V2 (0x1.e8f48308acda4p-12), V2 (-0x1.6fc0032b3c29fp-16),24V2 (0x1.af86ae521260bp-21), V2 (-0x1.012a9870eeb7dp-25) },25.range_val = V2 (0x1p63),26};2728static float64x2_t VPCS_ATTR NOINLINE29special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)30{31/* Fall back to scalar code. */32y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));33return v_call_f64 (arm_math_cospi, x, y, cmp);34}3536/* Approximation for vector double-precision cospi(x).37Maximum Error 3.06 ULP:38_ZGVnN2v_cospi(0x1.7dd4c0b03cc66p-5) got 0x1.fa854babfb6bep-139want 0x1.fa854babfb6c1p-1. */40float64x2_t VPCS_ATTR V_NAME_D1 (cospi) (float64x2_t x)41{42const struct data *d = ptr_barrier (&data);4344#if WANT_SIMD_EXCEPT45float64x2_t r = vabsq_f64 (x);46uint64x2_t cmp = vcaleq_f64 (v_f64 (0x1p64), x);4748/* When WANT_SIMD_EXCEPT = 1, special lanes should be zero'd49to avoid them overflowing and throwing exceptions. */50r = v_zerofy_f64 (r, cmp);51uint64x2_t odd = vshlq_n_u64 (vcvtnq_u64_f64 (r), 63);5253#else54float64x2_t r = x;55uint64x2_t cmp = vcageq_f64 (r, d->range_val);56uint64x2_t odd57= vshlq_n_u64 (vreinterpretq_u64_s64 (vcvtaq_s64_f64 (r)), 63);5859#endif6061r = vsubq_f64 (r, vrndaq_f64 (r));6263/* cospi(x) = sinpi(0.5 - abs(x)) for values -1/2 .. 1/2. */64r = vsubq_f64 (v_f64 (0.5), vabsq_f64 (r));6566/* y = sin(r). */67float64x2_t r2 = vmulq_f64 (r, r);68float64x2_t r4 = vmulq_f64 (r2, r2);69float64x2_t y = vmulq_f64 (v_pw_horner_9_f64 (r2, r4, d->poly), r);7071/* Fallback to scalar. */72if (unlikely (v_any_u64 (cmp)))73return special_case (x, y, odd, cmp);7475/* Reintroduce the sign bit for inputs which round to odd. */76return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));77}7879#if WANT_TRIGPI_TESTS80TEST_ULP (V_NAME_D1 (cospi), 2.56)81TEST_DISABLE_FENV_IF_NOT (V_NAME_D1 (cospi), WANT_SIMD_EXCEPT)82TEST_SYM_INTERVAL (V_NAME_D1 (cospi), 0, 0x1p-63, 5000)83TEST_SYM_INTERVAL (V_NAME_D1 (cospi), 0x1p-63, 0.5, 10000)84TEST_SYM_INTERVAL (V_NAME_D1 (cospi), 0.5, 0x1p51, 10000)85TEST_SYM_INTERVAL (V_NAME_D1 (cospi), 0x1p51, inf, 10000)86#endif878889