Path: blob/main/contrib/arm-optimized-routines/math/aarch64/advsimd/log10f.c
48375 views
/*1* Single-precision vector log10 function.2*3* Copyright (c) 2020-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "v_math.h"8#include "test_sig.h"9#include "test_defs.h"1011static const struct data12{13float32x4_t c0, c2, c4, c6, inv_ln10, ln2;14uint32x4_t off, offset_lower_bound;15uint16x8_t special_bound;16uint32x4_t mantissa_mask;17float c1, c3, c5, c7;18} data = {19/* Use order 9 for log10(1+x), i.e. order 8 for log10(1+x)/x, with x in20[-1/3, 1/3] (offset=2/3). Max. relative error: 0x1.068ee468p-25. */21.c0 = V4 (-0x1.bcb79cp-3f),22.c1 = 0x1.2879c8p-3f,23.c2 = V4 (-0x1.bcd472p-4f),24.c3 = 0x1.6408f8p-4f,25.c4 = V4 (-0x1.246f8p-4f),26.c5 = 0x1.f0e514p-5f,27.c6 = V4 (-0x1.0fc92cp-4f),28.c7 = 0x1.f5f76ap-5f,29.ln2 = V4 (0x1.62e43p-1f),30.inv_ln10 = V4 (0x1.bcb7b2p-2f),31/* Lower bound is the smallest positive normal float 0x00800000. For32optimised register use subnormals are detected after offset has been33subtracted, so lower bound is 0x0080000 - offset (which wraps around). */34.offset_lower_bound = V4 (0x00800000 - 0x3f2aaaab),35.special_bound = V8 (0x7f00), /* top16(asuint32(inf) - 0x00800000). */36.off = V4 (0x3f2aaaab), /* 0.666667. */37.mantissa_mask = V4 (0x007fffff),38};3940static float32x4_t VPCS_ATTR NOINLINE41special_case (float32x4_t y, uint32x4_t u_off, float32x4_t p, float32x4_t r2,42uint16x4_t cmp, const struct data *d)43{44/* Fall back to scalar code. */45return v_call_f32 (log10f, vreinterpretq_f32_u32 (vaddq_u32 (u_off, d->off)),46vfmaq_f32 (y, p, r2), vmovl_u16 (cmp));47}4849/* Fast implementation of AdvSIMD log10f,50uses a similar approach as AdvSIMD logf with the same offset (i.e., 2/3) and51an order 9 polynomial.52Maximum error: 3.305ulps (nearest rounding.)53_ZGVnN4v_log10f(0x1.555c16p+0) got 0x1.ffe2fap-454want 0x1.ffe2f4p-4. */55float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (log10) (float32x4_t x)56{57const struct data *d = ptr_barrier (&data);58float32x4_t c1357 = vld1q_f32 (&d->c1);59/* To avoid having to mov x out of the way, keep u after offset has been60applied, and recover x by adding the offset back in the special-case61handler. */62uint32x4_t u_off = vreinterpretq_u32_f32 (x);6364/* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */65u_off = vsubq_u32 (u_off, d->off);66float32x4_t n = vcvtq_f32_s32 (67vshrq_n_s32 (vreinterpretq_s32_u32 (u_off), 23)); /* signextend. */6869uint16x4_t special = vcge_u16 (vsubhn_u32 (u_off, d->offset_lower_bound),70vget_low_u16 (d->special_bound));7172uint32x4_t u = vaddq_u32 (vandq_u32 (u_off, d->mantissa_mask), d->off);73float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));7475/* y = log10(1+r) + n * log10(2). */76float32x4_t r2 = vmulq_f32 (r, r);7778float32x4_t c01 = vfmaq_laneq_f32 (d->c0, r, c1357, 0);79float32x4_t c23 = vfmaq_laneq_f32 (d->c2, r, c1357, 1);80float32x4_t c45 = vfmaq_laneq_f32 (d->c4, r, c1357, 2);81float32x4_t c67 = vfmaq_laneq_f32 (d->c6, r, c1357, 3);8283float32x4_t p47 = vfmaq_f32 (c45, r2, c67);84float32x4_t p27 = vfmaq_f32 (c23, r2, p47);85float32x4_t poly = vfmaq_f32 (c01, r2, p27);8687/* y = Log10(2) * n + poly * InvLn(10). */88float32x4_t y = vfmaq_f32 (r, d->ln2, n);89y = vmulq_f32 (y, d->inv_ln10);9091if (unlikely (v_any_u16h (special)))92return special_case (y, u_off, poly, r2, special, d);93return vfmaq_f32 (y, poly, r2);94}9596HALF_WIDTH_ALIAS_F1 (log10)9798TEST_SIG (V, F, 1, log10, 0.01, 11.1)99TEST_ULP (V_NAME_F1 (log10), 2.81)100TEST_INTERVAL (V_NAME_F1 (log10), -0.0, -inf, 100)101TEST_INTERVAL (V_NAME_F1 (log10), 0, 0x1p-126, 100)102TEST_INTERVAL (V_NAME_F1 (log10), 0x1p-126, 0x1p-23, 50000)103TEST_INTERVAL (V_NAME_F1 (log10), 0x1p-23, 1.0, 50000)104TEST_INTERVAL (V_NAME_F1 (log10), 1.0, 100, 50000)105TEST_INTERVAL (V_NAME_F1 (log10), 100, inf, 50000)106107108