Path: blob/main/contrib/arm-optimized-routines/math/aarch64/experimental/advsimd/v_logf_inline.h
48525 views
/*1* Single-precision vector log function - inline version2*3* Copyright (c) 2019-2024, Arm Limited.4* SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception5*/67#include "v_math.h"89struct v_logf_data10{11float32x4_t poly[7];12float32x4_t ln2;13uint32x4_t off, mantissa_mask;14};1516#define V_LOGF_CONSTANTS \17{ \18.poly \19= { V4 (-0x1.3e737cp-3f), V4 (0x1.5a9aa2p-3f), V4 (-0x1.4f9934p-3f), \20V4 (0x1.961348p-3f), V4 (-0x1.00187cp-2f), V4 (0x1.555d7cp-2f), \21V4 (-0x1.ffffc8p-2f) }, \22.ln2 = V4 (0x1.62e43p-1f), .off = V4 (0x3f2aaaab), \23.mantissa_mask = V4 (0x007fffff) \24}2526#define P(i) d->poly[7 - i]2728static inline float32x4_t29v_logf_inline (float32x4_t x, const struct v_logf_data *d)30{31float32x4_t n, p, q, r, r2, y;32uint32x4_t u;3334u = vreinterpretq_u32_f32 (x);3536/* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */37u = vsubq_u32 (u, d->off);38n = vcvtq_f32_s32 (39vshrq_n_s32 (vreinterpretq_s32_u32 (u), 23)); /* signextend. */40u = vandq_u32 (u, d->mantissa_mask);41u = vaddq_u32 (u, d->off);42r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f));4344/* y = log(1+r) + n*ln2. */45r2 = vmulq_f32 (r, r);46/* n*ln2 + r + r2*(P1 + r*P2 + r2*(P3 + r*P4 + r2*(P5 + r*P6 + r2*P7))). */47p = vfmaq_f32 (P (5), P (6), r);48q = vfmaq_f32 (P (3), P (4), r);49y = vfmaq_f32 (P (1), P (2), r);50p = vfmaq_f32 (p, P (7), r2);51q = vfmaq_f32 (q, p, r2);52y = vfmaq_f32 (y, q, r2);53p = vfmaq_f32 (r, d->ln2, n);5455return vfmaq_f32 (p, y, r2);56}5758#undef P596061