Path: blob/master/thirdparty/embree/common/simd/vfloat4_sse2.h
9912 views
// Copyright 2009-2021 Intel Corporation1// SPDX-License-Identifier: Apache-2.023#pragma once45#define vboolf vboolf_impl6#define vboold vboold_impl7#define vint vint_impl8#define vuint vuint_impl9#define vllong vllong_impl10#define vfloat vfloat_impl11#define vdouble vdouble_impl1213namespace embree14{15/* 4-wide SSE float type */16template<>17struct vfloat<4>18{19ALIGNED_STRUCT_(16);2021typedef vboolf4 Bool;22typedef vint4 Int;23typedef vfloat4 Float;2425enum { size = 4 }; // number of SIMD elements26union { __m128 v; float f[4]; int i[4]; }; // data2728////////////////////////////////////////////////////////////////////////////////29/// Constructors, Assignment & Cast Operators30////////////////////////////////////////////////////////////////////////////////3132__forceinline vfloat() {}33__forceinline vfloat(const vfloat4& other) { v = other.v; }34//__forceinline vfloat(const vfloat4& other) = default;3536__forceinline vfloat4& operator =(const vfloat4& other) { v = other.v; return *this; }3738__forceinline vfloat(__m128 a) : v(a) {}39__forceinline operator const __m128&() const { return v; }40__forceinline operator __m128&() { return v; }4142__forceinline vfloat(float a) : v(_mm_set1_ps(a)) {}43__forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(d, c, b, a)) {}4445__forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a)) {}46#if defined(__aarch64__)47__forceinline explicit vfloat(const vuint4& x) {48v = vcvtq_f32_u32(vreinterpretq_u32_s32(x.v));49}50#else51__forceinline explicit vfloat(const vuint4& x) {52const __m128i a = _mm_and_si128(x,_mm_set1_epi32(0x7FFFFFFF));53const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^3154const __m128 af = _mm_cvtepi32_ps(a);55const __m128 bf = _mm_castsi128_ps(b);56v = _mm_add_ps(af,bf);57}58#endif59////////////////////////////////////////////////////////////////////////////////60/// Constants61////////////////////////////////////////////////////////////////////////////////6263__forceinline vfloat(ZeroTy) : v(_mm_setzero_ps()) {}64__forceinline vfloat(OneTy) : v(_mm_set1_ps(1.0f)) {}65__forceinline vfloat(PosInfTy) : v(_mm_set1_ps(pos_inf)) {}66__forceinline vfloat(NegInfTy) : v(_mm_set1_ps(neg_inf)) {}67__forceinline vfloat(StepTy) : v(_mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f)) {}68__forceinline vfloat(NaNTy) : v(_mm_set1_ps(nan)) {}69__forceinline vfloat(UndefinedTy) : v(_mm_undefined_ps()) {}7071////////////////////////////////////////////////////////////////////////////////72/// Loads and Stores73////////////////////////////////////////////////////////////////////////////////7475static __forceinline vfloat4 load (const void* a) { return _mm_load_ps((float*)a); }76static __forceinline vfloat4 loadu(const void* a) { return _mm_loadu_ps((float*)a); }7778static __forceinline void store (void* ptr, const vfloat4& v) { _mm_store_ps((float*)ptr,v); }79static __forceinline void storeu(void* ptr, const vfloat4& v) { _mm_storeu_ps((float*)ptr,v); }8081#if defined(__AVX512VL__)8283static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_ps (_mm_setzero_ps(),mask,(float*)ptr); }84static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_ps(_mm_setzero_ps(),mask,(float*)ptr); }8586static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_store_ps ((float*)ptr,mask,v); }87static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_storeu_ps((float*)ptr,mask,v); }88#elif defined(__AVX__)89static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); }90static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); }9192static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); }93static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); }94#else95static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_and_ps(_mm_load_ps ((float*)ptr),mask); }96static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_and_ps(_mm_loadu_ps((float*)ptr),mask); }9798static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { store (ptr,select(mask,v,load (ptr))); }99static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { storeu(ptr,select(mask,v,loadu(ptr))); }100#endif101102#if defined(__AVX__)103static __forceinline vfloat4 broadcast(const void* a) { return _mm_broadcast_ss((float*)a); }104#else105static __forceinline vfloat4 broadcast(const void* a) { return _mm_set1_ps(*(float*)a); }106#endif107108static __forceinline vfloat4 load_nt (const float* ptr) {109#if defined (__SSE4_1__)110return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr));111#else112return _mm_load_ps(ptr);113#endif114}115116#if defined(__aarch64__)117static __forceinline vfloat4 load(const char* ptr) {118return __m128(_mm_load4epi8_f32(((__m128i*)ptr)));119}120#elif defined(__SSE4_1__)121static __forceinline vfloat4 load(const char* ptr) {122return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr)));123}124#else125static __forceinline vfloat4 load(const char* ptr) {126return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);127}128#endif129130#if defined(__aarch64__)131static __forceinline vfloat4 load(const unsigned char* ptr) {132return __m128(_mm_load4epu8_f32(((__m128i*)ptr)));133}134#elif defined(__SSE4_1__)135static __forceinline vfloat4 load(const unsigned char* ptr) {136return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr)));137}138#else139static __forceinline vfloat4 load(const unsigned char* ptr) {140//return _mm_cvtpu8_ps(*(__m64*)ptr); // don't enable, will use MMX instructions141return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);142}143#endif144145#if defined(__aarch64__)146static __forceinline vfloat4 load(const short* ptr) {147return __m128(_mm_load4epi16_f32(((__m128i*)ptr)));148}149#elif defined(__SSE4_1__)150static __forceinline vfloat4 load(const short* ptr) {151return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr)));152}153#else154static __forceinline vfloat4 load(const short* ptr) {155return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]);156}157#endif158159static __forceinline vfloat4 load(const unsigned short* ptr) {160return _mm_mul_ps(vfloat4(vint4::load(ptr)),vfloat4(1.0f/65535.0f));161}162163static __forceinline void store_nt(void* ptr, const vfloat4& v)164{165#if defined (__SSE4_1__)166#if defined(__aarch64__)167_mm_stream_ps((float*)ptr,v);168#else169_mm_stream_ps((float*)ptr,v);170#endif171#else172_mm_store_ps((float*)ptr,v);173#endif174}175176template<int scale = 4>177static __forceinline vfloat4 gather(const float* ptr, const vint4& index) {178#if defined(__AVX2__) && !defined(__aarch64__)179return _mm_i32gather_ps(ptr, index, scale);180#else181return vfloat4(182*(float*)(((char*)ptr)+scale*index[0]),183*(float*)(((char*)ptr)+scale*index[1]),184*(float*)(((char*)ptr)+scale*index[2]),185*(float*)(((char*)ptr)+scale*index[3]));186#endif187}188189template<int scale = 4>190static __forceinline vfloat4 gather(const vboolf4& mask, const float* ptr, const vint4& index) {191vfloat4 r = zero;192#if defined(__AVX512VL__)193return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale);194#elif defined(__AVX2__) && !defined(__aarch64__)195return _mm_mask_i32gather_ps(r, ptr, index, mask, scale);196#else197if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]);198if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]);199if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]);200if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]);201return r;202#endif203}204205template<int scale = 4>206static __forceinline void scatter(void* ptr, const vint4& index, const vfloat4& v)207{208#if defined(__AVX512VL__)209_mm_i32scatter_ps((float*)ptr, index, v, scale);210#else211*(float*)(((char*)ptr)+scale*index[0]) = v[0];212*(float*)(((char*)ptr)+scale*index[1]) = v[1];213*(float*)(((char*)ptr)+scale*index[2]) = v[2];214*(float*)(((char*)ptr)+scale*index[3]) = v[3];215#endif216}217218template<int scale = 4>219static __forceinline void scatter(const vboolf4& mask, void* ptr, const vint4& index, const vfloat4& v)220{221#if defined(__AVX512VL__)222_mm_mask_i32scatter_ps((float*)ptr ,mask, index, v, scale);223#else224if (likely(mask[0])) *(float*)(((char*)ptr)+scale*index[0]) = v[0];225if (likely(mask[1])) *(float*)(((char*)ptr)+scale*index[1]) = v[1];226if (likely(mask[2])) *(float*)(((char*)ptr)+scale*index[2]) = v[2];227if (likely(mask[3])) *(float*)(((char*)ptr)+scale*index[3]) = v[3];228#endif229}230231static __forceinline void store(const vboolf4& mask, char* ptr, const vint4& ofs, const vfloat4& v) {232scatter<1>(mask,ptr,ofs,v);233}234static __forceinline void store(const vboolf4& mask, float* ptr, const vint4& ofs, const vfloat4& v) {235scatter<4>(mask,ptr,ofs,v);236}237238////////////////////////////////////////////////////////////////////////////////239/// Array Access240////////////////////////////////////////////////////////////////////////////////241242__forceinline const float& operator [](size_t index) const { assert(index < 4); return f[index]; }243__forceinline float& operator [](size_t index) { assert(index < 4); return f[index]; }244245friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {246#if defined(__AVX512VL__)247return _mm_mask_blend_ps(m, f, t);248#elif defined(__SSE4_1__) || (defined(__aarch64__))249return _mm_blendv_ps(f, t, m);250#else251return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));252#endif253}254};255256////////////////////////////////////////////////////////////////////////////////257/// Load/Store258////////////////////////////////////////////////////////////////////////////////259260template<> struct mem<vfloat4>261{262static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return vfloat4::load (mask,ptr); }263static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return vfloat4::loadu(mask,ptr); }264265static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::store (mask,ptr,v); }266static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::storeu(mask,ptr,v); }267};268269////////////////////////////////////////////////////////////////////////////////270/// Unary Operators271////////////////////////////////////////////////////////////////////////////////272273__forceinline vfloat4 asFloat(const vint4& a) { return _mm_castsi128_ps(a); }274__forceinline vint4 asInt (const vfloat4& a) { return _mm_castps_si128(a); }275__forceinline vuint4 asUInt (const vfloat4& a) { return _mm_castps_si128(a); }276277__forceinline vint4 toInt (const vfloat4& a) { return vint4(a); }278__forceinline vfloat4 toFloat(const vint4& a) { return vfloat4(a); }279280__forceinline vfloat4 operator +(const vfloat4& a) { return a; }281#if defined(__aarch64__)282__forceinline vfloat4 operator -(const vfloat4& a) {283return vnegq_f32(a);284}285#else286__forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }287#endif288289#if defined(__aarch64__)290__forceinline vfloat4 abs(const vfloat4& a) { return _mm_abs_ps(a); }291#else292__forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); }293#endif294295#if defined(__AVX512VL__)296__forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); }297#else298__forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(vfloat4(one), -vfloat4(one), _mm_cmplt_ps(a, vfloat4(zero))); }299#endif300301__forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); }302303__forceinline vfloat4 rcp(const vfloat4& a)304{305#if defined(__aarch64__)306return vfloat4(vdivq_f32(vdupq_n_f32(1.0f),a.v));307#else308309#if defined(__AVX512VL__)310const vfloat4 r = _mm_rcp14_ps(a);311#else312const vfloat4 r = _mm_rcp_ps(a);313#endif314315#if defined(__AVX2__)316return _mm_fmadd_ps(r, _mm_fnmadd_ps(a, r, vfloat4(1.0f)), r); // computes r + r * (1 - a * r)317#else318return _mm_add_ps(r,_mm_mul_ps(r, _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a, r)))); // computes r + r * (1 - a * r)319#endif320321#endif //defined(__aarch64__)322}323__forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a,a); }324__forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a); }325326__forceinline vfloat4 rsqrt(const vfloat4& a)327{328#if defined(__aarch64__)329vfloat4 r = _mm_rsqrt_ps(a);330r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));331r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));332r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r));333return r;334#else335336#if defined(__AVX512VL__)337vfloat4 r = _mm_rsqrt14_ps(a);338#else339vfloat4 r = _mm_rsqrt_ps(a);340#endif341342#if defined(__AVX2__)343r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));344#else345r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r)));346#endif347348#endif349return r;350}351352__forceinline vboolf4 isnan(const vfloat4& a) {353const vfloat4 b = _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff)));354#if defined(__AVX512VL__)355return _mm_cmp_epi32_mask(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000), _MM_CMPINT_GT);356#else357return _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000)));358#endif359}360361////////////////////////////////////////////////////////////////////////////////362/// Binary Operators363////////////////////////////////////////////////////////////////////////////////364365__forceinline vfloat4 operator +(const vfloat4& a, const vfloat4& b) { return _mm_add_ps(a, b); }366__forceinline vfloat4 operator +(const vfloat4& a, float b) { return a + vfloat4(b); }367__forceinline vfloat4 operator +(float a, const vfloat4& b) { return vfloat4(a) + b; }368369__forceinline vfloat4 operator -(const vfloat4& a, const vfloat4& b) { return _mm_sub_ps(a, b); }370__forceinline vfloat4 operator -(const vfloat4& a, float b) { return a - vfloat4(b); }371__forceinline vfloat4 operator -(float a, const vfloat4& b) { return vfloat4(a) - b; }372373__forceinline vfloat4 operator *(const vfloat4& a, const vfloat4& b) { return _mm_mul_ps(a, b); }374__forceinline vfloat4 operator *(const vfloat4& a, float b) { return a * vfloat4(b); }375__forceinline vfloat4 operator *(float a, const vfloat4& b) { return vfloat4(a) * b; }376377__forceinline vfloat4 operator /(const vfloat4& a, const vfloat4& b) { return _mm_div_ps(a,b); }378__forceinline vfloat4 operator /(const vfloat4& a, float b) { return a/vfloat4(b); }379__forceinline vfloat4 operator /(float a, const vfloat4& b) { return vfloat4(a)/b; }380381__forceinline vfloat4 operator &(const vfloat4& a, const vfloat4& b) { return _mm_and_ps(a,b); }382__forceinline vfloat4 operator |(const vfloat4& a, const vfloat4& b) { return _mm_or_ps(a,b); }383__forceinline vfloat4 operator ^(const vfloat4& a, const vfloat4& b) { return _mm_xor_ps(a,b); }384__forceinline vfloat4 operator ^(const vfloat4& a, const vint4& b) { return _mm_xor_ps(a,_mm_castsi128_ps(b)); }385386__forceinline vfloat4 min(const vfloat4& a, const vfloat4& b) { return _mm_min_ps(a,b); }387__forceinline vfloat4 min(const vfloat4& a, float b) { return _mm_min_ps(a,vfloat4(b)); }388__forceinline vfloat4 min(float a, const vfloat4& b) { return _mm_min_ps(vfloat4(a),b); }389390__forceinline vfloat4 max(const vfloat4& a, const vfloat4& b) { return _mm_max_ps(a,b); }391__forceinline vfloat4 max(const vfloat4& a, float b) { return _mm_max_ps(a,vfloat4(b)); }392__forceinline vfloat4 max(float a, const vfloat4& b) { return _mm_max_ps(vfloat4(a),b); }393394#if defined(__SSE4_1__) || defined(__aarch64__)395396__forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) {397const vint4 ai = _mm_castps_si128(a);398const vint4 bi = _mm_castps_si128(b);399const vint4 ci = _mm_min_epi32(ai,bi);400return _mm_castsi128_ps(ci);401}402403__forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) {404const vint4 ai = _mm_castps_si128(a);405const vint4 bi = _mm_castps_si128(b);406const vint4 ci = _mm_max_epi32(ai,bi);407return _mm_castsi128_ps(ci);408}409410__forceinline vfloat4 minui(const vfloat4& a, const vfloat4& b) {411const vint4 ai = _mm_castps_si128(a);412const vint4 bi = _mm_castps_si128(b);413const vint4 ci = _mm_min_epu32(ai,bi);414return _mm_castsi128_ps(ci);415}416417__forceinline vfloat4 maxui(const vfloat4& a, const vfloat4& b) {418const vint4 ai = _mm_castps_si128(a);419const vint4 bi = _mm_castps_si128(b);420const vint4 ci = _mm_max_epu32(ai,bi);421return _mm_castsi128_ps(ci);422}423#else424__forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) {425return min(a,b);426}427428__forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) {429return max(a,b);430}431#endif432433////////////////////////////////////////////////////////////////////////////////434/// Ternary Operators435////////////////////////////////////////////////////////////////////////////////436437#if defined(__AVX2__) || defined(__ARM_NEON)438__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmadd_ps(a,b,c); }439__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmsub_ps(a,b,c); }440__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmadd_ps(a,b,c); }441__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); }442#else443__forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; }444__forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;}445__forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; }446__forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; }447448#endif449450////////////////////////////////////////////////////////////////////////////////451/// Assignment Operators452////////////////////////////////////////////////////////////////////////////////453454__forceinline vfloat4& operator +=(vfloat4& a, const vfloat4& b) { return a = a + b; }455__forceinline vfloat4& operator +=(vfloat4& a, float b) { return a = a + b; }456457__forceinline vfloat4& operator -=(vfloat4& a, const vfloat4& b) { return a = a - b; }458__forceinline vfloat4& operator -=(vfloat4& a, float b) { return a = a - b; }459460__forceinline vfloat4& operator *=(vfloat4& a, const vfloat4& b) { return a = a * b; }461__forceinline vfloat4& operator *=(vfloat4& a, float b) { return a = a * b; }462463__forceinline vfloat4& operator /=(vfloat4& a, const vfloat4& b) { return a = a / b; }464__forceinline vfloat4& operator /=(vfloat4& a, float b) { return a = a / b; }465466////////////////////////////////////////////////////////////////////////////////467/// Comparison Operators + Select468////////////////////////////////////////////////////////////////////////////////469470#if defined(__AVX512VL__)471__forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_EQ); }472__forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_NE); }473__forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LT); }474__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GE); }475__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GT); }476__forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LE); }477#else478__forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a, b); }479__forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a, b); }480__forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a, b); }481#if defined(__aarch64__)482__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpge_ps (a, b); }483__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpgt_ps (a, b); }484#else485__forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a, b); }486__forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a, b); }487#endif488__forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a, b); }489#endif490491__forceinline vboolf4 operator ==(const vfloat4& a, float b) { return a == vfloat4(b); }492__forceinline vboolf4 operator ==(float a, const vfloat4& b) { return vfloat4(a) == b; }493494__forceinline vboolf4 operator !=(const vfloat4& a, float b) { return a != vfloat4(b); }495__forceinline vboolf4 operator !=(float a, const vfloat4& b) { return vfloat4(a) != b; }496497__forceinline vboolf4 operator < (const vfloat4& a, float b) { return a < vfloat4(b); }498__forceinline vboolf4 operator < (float a, const vfloat4& b) { return vfloat4(a) < b; }499500__forceinline vboolf4 operator >=(const vfloat4& a, float b) { return a >= vfloat4(b); }501__forceinline vboolf4 operator >=(float a, const vfloat4& b) { return vfloat4(a) >= b; }502503__forceinline vboolf4 operator > (const vfloat4& a, float b) { return a > vfloat4(b); }504__forceinline vboolf4 operator > (float a, const vfloat4& b) { return vfloat4(a) > b; }505506__forceinline vboolf4 operator <=(const vfloat4& a, float b) { return a <= vfloat4(b); }507__forceinline vboolf4 operator <=(float a, const vfloat4& b) { return vfloat4(a) <= b; }508509__forceinline vboolf4 eq(const vfloat4& a, const vfloat4& b) { return a == b; }510__forceinline vboolf4 ne(const vfloat4& a, const vfloat4& b) { return a != b; }511__forceinline vboolf4 lt(const vfloat4& a, const vfloat4& b) { return a < b; }512__forceinline vboolf4 ge(const vfloat4& a, const vfloat4& b) { return a >= b; }513__forceinline vboolf4 gt(const vfloat4& a, const vfloat4& b) { return a > b; }514__forceinline vboolf4 le(const vfloat4& a, const vfloat4& b) { return a <= b; }515516#if defined(__AVX512VL__)517__forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_EQ); }518__forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_NE); }519__forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LT); }520__forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GE); }521__forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GT); }522__forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LE); }523#else524__forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a == b); }525__forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a != b); }526__forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a < b); }527__forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a >= b); }528__forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a > b); }529__forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a <= b); }530#endif531532template<int mask>533__forceinline vfloat4 select(const vfloat4& t, const vfloat4& f)534{535#if defined(__SSE4_1__)536return _mm_blend_ps(f, t, mask);537#else538return select(vboolf4(mask), t, f);539#endif540}541542__forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) {543return madd(t,b-a,a);544}545546__forceinline bool isvalid(const vfloat4& v) {547return all((v > vfloat4(-FLT_LARGE)) & (v < vfloat4(+FLT_LARGE)));548}549550__forceinline bool is_finite(const vfloat4& a) {551return all((a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX)));552}553554__forceinline bool is_finite(const vboolf4& valid, const vfloat4& a) {555return all(valid, (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX)));556}557558////////////////////////////////////////////////////////////////////////////////559/// Rounding Functions560////////////////////////////////////////////////////////////////////////////////561562#if defined(__aarch64__)563__forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } // towards -inf564__forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } // toward +inf565__forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } // towards 0566__forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } // to nearest, ties to even. NOTE(LTE): arm clang uses vrndnq, old gcc uses vrndqn?567#elif defined (__SSE4_1__)568__forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); }569__forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); }570__forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); }571__forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); }572#else573__forceinline vfloat4 floor(const vfloat4& a) { return vfloat4(floorf(a[0]),floorf(a[1]),floorf(a[2]),floorf(a[3])); }574__forceinline vfloat4 ceil (const vfloat4& a) { return vfloat4(ceilf (a[0]),ceilf (a[1]),ceilf (a[2]),ceilf (a[3])); }575__forceinline vfloat4 trunc(const vfloat4& a) { return vfloat4(truncf(a[0]),truncf(a[1]),truncf(a[2]),truncf(a[3])); }576__forceinline vfloat4 round(const vfloat4& a) { return vfloat4(roundf(a[0]),roundf(a[1]),roundf(a[2]),roundf(a[3])); }577#endif578__forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); }579580__forceinline vint4 floori(const vfloat4& a) {581#if defined(__aarch64__)582return vcvtq_s32_f32(floor(a));583#elif defined(__SSE4_1__)584return vint4(floor(a));585#else586return vint4(a-vfloat4(0.5f));587#endif588}589590////////////////////////////////////////////////////////////////////////////////591/// Movement/Shifting/Shuffling Functions592////////////////////////////////////////////////////////////////////////////////593594__forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a, b); }595__forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a, b); }596597#if defined(__aarch64__)598template<int i0, int i1, int i2, int i3>599__forceinline vfloat4 shuffle(const vfloat4& v) {600return vreinterpretq_f32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3)));601}602template<int i0, int i1, int i2, int i3>603__forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {604return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3)));605}606#else607template<int i0, int i1, int i2, int i3>608__forceinline vfloat4 shuffle(const vfloat4& v) {609return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0)));610}611612template<int i0, int i1, int i2, int i3>613__forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) {614return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0));615}616#endif617618#if defined(__SSE3__) && !defined(__aarch64__)619template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); }620template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); }621template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); }622#endif623624template<int i>625__forceinline vfloat4 shuffle(const vfloat4& v) {626return shuffle<i,i,i,i>(v);627}628629#if defined(__aarch64__)630template<int i> __forceinline float extract(const vfloat4& a) { return a[i]; }631#else632template<int i> __forceinline float extract (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); }633template<> __forceinline float extract<0>(const vfloat4& a) { return _mm_cvtss_f32(a); }634#endif635636#if defined (__SSE4_1__) && !defined(__aarch64__)637template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); }638template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); }639template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); }640#else641template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { vfloat4 c = a; c[dst&3] = b[src&3]; return c; }642template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b) { vfloat4 c = a; c[dst&3] = b; return c; }643#endif644645__forceinline float toScalar(const vfloat4& v) { return _mm_cvtss_f32(v); }646647__forceinline vfloat4 shift_right_1(const vfloat4& x) {648return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));649}650651#if defined (__AVX2__)652__forceinline vfloat4 permute(const vfloat4 &a, const __m128i &index) {653return _mm_permutevar_ps(a,index);654}655656__forceinline vfloat4 broadcast1f(const void* a) { return _mm_broadcast_ss((float*)a); }657658#endif659660#if defined(__AVX512VL__)661template<int i>662__forceinline vfloat4 align_shift_right(const vfloat4& a, const vfloat4& b) {663return _mm_castsi128_ps(_mm_alignr_epi32(_mm_castps_si128(a), _mm_castps_si128(b), i));664}665#endif666667668////////////////////////////////////////////////////////////////////////////////669/// Sorting Network670////////////////////////////////////////////////////////////////////////////////671672__forceinline vfloat4 sort_ascending(const vfloat4& v)673{674const vfloat4 a0 = v;675const vfloat4 b0 = shuffle<1,0,3,2>(a0);676const vfloat4 c0 = min(a0,b0);677const vfloat4 d0 = max(a0,b0);678const vfloat4 a1 = select<0x5 /* 0b0101 */>(c0,d0);679const vfloat4 b1 = shuffle<2,3,0,1>(a1);680const vfloat4 c1 = min(a1,b1);681const vfloat4 d1 = max(a1,b1);682const vfloat4 a2 = select<0x3 /* 0b0011 */>(c1,d1);683const vfloat4 b2 = shuffle<0,2,1,3>(a2);684const vfloat4 c2 = min(a2,b2);685const vfloat4 d2 = max(a2,b2);686const vfloat4 a3 = select<0x2 /* 0b0010 */>(c2,d2);687return a3;688}689690__forceinline vfloat4 sort_descending(const vfloat4& v)691{692const vfloat4 a0 = v;693const vfloat4 b0 = shuffle<1,0,3,2>(a0);694const vfloat4 c0 = max(a0,b0);695const vfloat4 d0 = min(a0,b0);696const vfloat4 a1 = select<0x5 /* 0b0101 */>(c0,d0);697const vfloat4 b1 = shuffle<2,3,0,1>(a1);698const vfloat4 c1 = max(a1,b1);699const vfloat4 d1 = min(a1,b1);700const vfloat4 a2 = select<0x3 /* 0b0011 */>(c1,d1);701const vfloat4 b2 = shuffle<0,2,1,3>(a2);702const vfloat4 c2 = max(a2,b2);703const vfloat4 d2 = min(a2,b2);704const vfloat4 a3 = select<0x2 /* 0b0010 */>(c2,d2);705return a3;706}707708////////////////////////////////////////////////////////////////////////////////709/// Transpose710////////////////////////////////////////////////////////////////////////////////711712__forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2, vfloat4& c3)713{714vfloat4 l02 = unpacklo(r0,r2);715vfloat4 h02 = unpackhi(r0,r2);716vfloat4 l13 = unpacklo(r1,r3);717vfloat4 h13 = unpackhi(r1,r3);718c0 = unpacklo(l02,l13);719c1 = unpackhi(l02,l13);720c2 = unpacklo(h02,h13);721c3 = unpackhi(h02,h13);722}723724__forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2)725{726vfloat4 l02 = unpacklo(r0,r2);727vfloat4 h02 = unpackhi(r0,r2);728vfloat4 l13 = unpacklo(r1,r3);729vfloat4 h13 = unpackhi(r1,r3);730c0 = unpacklo(l02,l13);731c1 = unpackhi(l02,l13);732c2 = unpacklo(h02,h13);733}734735////////////////////////////////////////////////////////////////////////////////736/// Reductions737////////////////////////////////////////////////////////////////////////////////738#if defined(__aarch64__)739__forceinline vfloat4 vreduce_min(const vfloat4& v) { float h = vminvq_f32(v); return vdupq_n_f32(h); }740__forceinline vfloat4 vreduce_max(const vfloat4& v) { float h = vmaxvq_f32(v); return vdupq_n_f32(h); }741__forceinline vfloat4 vreduce_add(const vfloat4& v) { float h = vaddvq_f32(v); return vdupq_n_f32(h); }742#else743__forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); }744__forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); }745__forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; }746#endif747748#if defined(__aarch64__)749__forceinline float reduce_min(const vfloat4& v) { return vminvq_f32(v); }750__forceinline float reduce_max(const vfloat4& v) { return vmaxvq_f32(v); }751__forceinline float reduce_add(const vfloat4& v) { return vaddvq_f32(v); }752#else753__forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(vreduce_min(v)); }754__forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(vreduce_max(v)); }755__forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(vreduce_add(v)); }756#endif757758__forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)759{760const vfloat4 a = select(valid,v,vfloat4(pos_inf));761const vbool4 valid_min = valid & (a == vreduce_min(a));762return bsf(movemask(any(valid_min) ? valid_min : valid));763}764__forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)765{766const vfloat4 a = select(valid,v,vfloat4(neg_inf));767const vbool4 valid_max = valid & (a == vreduce_max(a));768return bsf(movemask(any(valid_max) ? valid_max : valid));769}770771////////////////////////////////////////////////////////////////////////////////772/// Euclidean Space Operators773////////////////////////////////////////////////////////////////////////////////774775__forceinline float dot(const vfloat4& a, const vfloat4& b) {776return reduce_add(a*b);777}778779__forceinline vfloat4 cross(const vfloat4& a, const vfloat4& b)780{781const vfloat4 a0 = a;782const vfloat4 b0 = shuffle<1,2,0,3>(b);783const vfloat4 a1 = shuffle<1,2,0,3>(a);784const vfloat4 b1 = b;785return shuffle<1,2,0,3>(msub(a0,b0,a1*b1));786}787788////////////////////////////////////////////////////////////////////////////////789/// Output Operators790////////////////////////////////////////////////////////////////////////////////791792__forceinline embree_ostream operator <<(embree_ostream cout, const vfloat4& a) {793return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">";794}795796}797798#undef vboolf799#undef vboold800#undef vint801#undef vuint802#undef vllong803#undef vfloat804#undef vdouble805806807