Path: blob/master/thirdparty/embree/common/simd/vdouble8_avx512.h
9912 views
// Copyright 2009-2021 Intel Corporation1// SPDX-License-Identifier: Apache-2.023#pragma once45#define vboolf vboolf_impl6#define vboold vboold_impl7#define vint vint_impl8#define vuint vuint_impl9#define vllong vllong_impl10#define vfloat vfloat_impl11#define vdouble vdouble_impl1213namespace embree14{15/* 8-wide AVX-512 64-bit double type */16template<>17struct vdouble<8>18{19ALIGNED_STRUCT_(64);2021typedef vboold8 Bool;2223enum { size = 8 }; // number of SIMD elements24union { // data25__m512d v;26double i[8];27};2829////////////////////////////////////////////////////////////////////////////////30/// Constructors, Assignment & Cast Operators31////////////////////////////////////////////////////////////////////////////////3233__forceinline vdouble() {}34__forceinline vdouble(const vdouble8& t) { v = t.v; }35__forceinline vdouble8& operator =(const vdouble8& f) { v = f.v; return *this; }3637__forceinline vdouble(const __m512d& t) { v = t; }38__forceinline operator __m512d() const { return v; }39__forceinline operator __m256d() const { return _mm512_castpd512_pd256(v); }4041__forceinline vdouble(double i) {42v = _mm512_set1_pd(i);43}4445__forceinline vdouble(double a, double b, double c, double d) {46v = _mm512_set4_pd(d,c,b,a);47}4849__forceinline vdouble(double a0, double a1, double a2, double a3,50double a4, double a5, double a6, double a7)51{52v = _mm512_set_pd(a7,a6,a5,a4,a3,a2,a1,a0);53}545556////////////////////////////////////////////////////////////////////////////////57/// Constants58////////////////////////////////////////////////////////////////////////////////5960__forceinline vdouble(ZeroTy) : v(_mm512_setzero_pd()) {}61__forceinline vdouble(OneTy) : v(_mm512_set1_pd(1)) {}62__forceinline vdouble(StepTy) : v(_mm512_set_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}63__forceinline vdouble(ReverseStepTy) : v(_mm512_setr_pd(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)) {}6465////////////////////////////////////////////////////////////////////////////////66/// Loads and Stores67////////////////////////////////////////////////////////////////////////////////6869static __forceinline void store_nt(void *__restrict__ ptr, const vdouble8& a) {70_mm512_stream_pd((double*)ptr, a);71}7273static __forceinline vdouble8 loadu(const void* addr) {74return _mm512_loadu_pd((double*)addr);75}7677static __forceinline vdouble8 load(const vdouble8* addr) {78return _mm512_load_pd((double*)addr);79}8081static __forceinline vdouble8 load(const double* addr) {82return _mm512_load_pd(addr);83}8485static __forceinline void store(void* ptr, const vdouble8& v) {86_mm512_store_pd(ptr, v);87}8889static __forceinline void storeu(void* ptr, const vdouble8& v) {90_mm512_storeu_pd(ptr, v);91}9293static __forceinline void storeu(const vboold8& mask, double* ptr, const vdouble8& f) {94_mm512_mask_storeu_pd(ptr, mask, f);95}9697static __forceinline void store(const vboold8& mask, void* addr, const vdouble8& v2) {98_mm512_mask_store_pd(addr, mask, v2);99}100101static __forceinline vdouble8 compact(const vboold8& mask, vdouble8& v) {102return _mm512_mask_compress_pd(v, mask, v);103}104105static __forceinline vdouble8 compact(const vboold8& mask, const vdouble8& a, vdouble8& b) {106return _mm512_mask_compress_pd(a, mask, b);107}108109static __forceinline vdouble8 broadcast(const void* a) { return _mm512_set1_pd(*(double*)a); }110111////////////////////////////////////////////////////////////////////////////////112/// Array Access113////////////////////////////////////////////////////////////////////////////////114115__forceinline double& operator [](size_t index) { assert(index < 8); return i[index]; }116__forceinline const double& operator [](size_t index) const { assert(index < 8); return i[index]; }117118};119120////////////////////////////////////////////////////////////////////////////////121/// Unary Operators122////////////////////////////////////////////////////////////////////////////////123124__forceinline vdouble8 asDouble(const vllong8& a) { return _mm512_castsi512_pd(a); }125__forceinline vllong8 asLLong (const vdouble8& a) { return _mm512_castpd_si512(a); }126127__forceinline vdouble8 operator +(const vdouble8& a) { return a; }128__forceinline vdouble8 operator -(const vdouble8& a) { return _mm512_sub_pd(_mm512_setzero_pd(), a); }129130////////////////////////////////////////////////////////////////////////////////131/// Binary Operators132////////////////////////////////////////////////////////////////////////////////133134__forceinline vdouble8 operator +(const vdouble8& a, const vdouble8& b) { return _mm512_add_pd(a, b); }135__forceinline vdouble8 operator +(const vdouble8& a, double b) { return a + vdouble8(b); }136__forceinline vdouble8 operator +(double a, const vdouble8& b) { return vdouble8(a) + b; }137138__forceinline vdouble8 operator -(const vdouble8& a, const vdouble8& b) { return _mm512_sub_pd(a, b); }139__forceinline vdouble8 operator -(const vdouble8& a, double b) { return a - vdouble8(b); }140__forceinline vdouble8 operator -(double a, const vdouble8& b) { return vdouble8(a) - b; }141142__forceinline vdouble8 operator *(const vdouble8& a, const vdouble8& b) { return _mm512_mul_pd(a, b); }143__forceinline vdouble8 operator *(const vdouble8& a, double b) { return a * vdouble8(b); }144__forceinline vdouble8 operator *(double a, const vdouble8& b) { return vdouble8(a) * b; }145146__forceinline vdouble8 operator &(const vdouble8& a, const vdouble8& b) { return _mm512_and_pd(a, b); }147__forceinline vdouble8 operator &(const vdouble8& a, double b) { return a & vdouble8(b); }148__forceinline vdouble8 operator &(double a, const vdouble8& b) { return vdouble8(a) & b; }149150__forceinline vdouble8 operator |(const vdouble8& a, const vdouble8& b) { return _mm512_or_pd(a, b); }151__forceinline vdouble8 operator |(const vdouble8& a, double b) { return a | vdouble8(b); }152__forceinline vdouble8 operator |(double a, const vdouble8& b) { return vdouble8(a) | b; }153154__forceinline vdouble8 operator ^(const vdouble8& a, const vdouble8& b) { return _mm512_xor_pd(a, b); }155__forceinline vdouble8 operator ^(const vdouble8& a, double b) { return a ^ vdouble8(b); }156__forceinline vdouble8 operator ^(double a, const vdouble8& b) { return vdouble8(a) ^ b; }157158__forceinline vdouble8 operator <<(const vdouble8& a, const unsigned int n) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), n)); }159__forceinline vdouble8 operator >>(const vdouble8& a, const unsigned int n) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), n)); }160161__forceinline vdouble8 operator <<(const vdouble8& a, const vllong8& n) { return _mm512_castsi512_pd(_mm512_sllv_epi64(_mm512_castpd_si512(a), n)); }162__forceinline vdouble8 operator >>(const vdouble8& a, const vllong8& n) { return _mm512_castsi512_pd(_mm512_srav_epi64(_mm512_castpd_si512(a), n)); }163164__forceinline vdouble8 sll (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_slli_epi64(_mm512_castpd_si512(a), b)); }165__forceinline vdouble8 sra (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_srai_epi64(_mm512_castpd_si512(a), b)); }166__forceinline vdouble8 srl (const vdouble8& a, const unsigned int b) { return _mm512_castsi512_pd(_mm512_srli_epi64(_mm512_castpd_si512(a), b)); }167168__forceinline vdouble8 min(const vdouble8& a, const vdouble8& b) { return _mm512_min_pd(a, b); }169__forceinline vdouble8 min(const vdouble8& a, double b) { return min(a,vdouble8(b)); }170__forceinline vdouble8 min(double a, const vdouble8& b) { return min(vdouble8(a),b); }171172__forceinline vdouble8 max(const vdouble8& a, const vdouble8& b) { return _mm512_max_pd(a, b); }173__forceinline vdouble8 max(const vdouble8& a, double b) { return max(a,vdouble8(b)); }174__forceinline vdouble8 max(double a, const vdouble8& b) { return max(vdouble8(a),b); }175176__forceinline vdouble8 mask_add(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_add_pd(c,mask,a,b); }177__forceinline vdouble8 mask_sub(const vboold8& mask, vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_sub_pd(c,mask,a,b); }178179__forceinline vdouble8 mask_and(const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_and_pd(c,m,a,b); }180__forceinline vdouble8 mask_or (const vboold8& m,vdouble8& c, const vdouble8& a, const vdouble8& b) { return _mm512_mask_or_pd(c,m,a,b); }181182////////////////////////////////////////////////////////////////////////////////183/// Ternary Operators184////////////////////////////////////////////////////////////////////////////////185186__forceinline vdouble8 madd (const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fmadd_pd(a,b,c); }187__forceinline vdouble8 msub (const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fmsub_pd(a,b,c); }188__forceinline vdouble8 nmadd(const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fnmadd_pd(a,b,c); }189__forceinline vdouble8 nmsub(const vdouble8& a, const vdouble8& b, const vdouble8& c) { return _mm512_fnmsub_pd(a,b,c); }190191192////////////////////////////////////////////////////////////////////////////////193/// Assignment Operators194////////////////////////////////////////////////////////////////////////////////195196__forceinline vdouble8& operator +=(vdouble8& a, const vdouble8& b) { return a = a + b; }197__forceinline vdouble8& operator +=(vdouble8& a, double b) { return a = a + b; }198199__forceinline vdouble8& operator -=(vdouble8& a, const vdouble8& b) { return a = a - b; }200__forceinline vdouble8& operator -=(vdouble8& a, double b) { return a = a - b; }201202__forceinline vdouble8& operator *=(vdouble8& a, const vdouble8& b) { return a = a * b; }203__forceinline vdouble8& operator *=(vdouble8& a, double b) { return a = a * b; }204205__forceinline vdouble8& operator &=(vdouble8& a, const vdouble8& b) { return a = a & b; }206__forceinline vdouble8& operator &=(vdouble8& a, double b) { return a = a & b; }207208__forceinline vdouble8& operator |=(vdouble8& a, const vdouble8& b) { return a = a | b; }209__forceinline vdouble8& operator |=(vdouble8& a, double b) { return a = a | b; }210211__forceinline vdouble8& operator <<=(vdouble8& a, const double b) { return a = a << b; }212__forceinline vdouble8& operator >>=(vdouble8& a, const double b) { return a = a >> b; }213214215////////////////////////////////////////////////////////////////////////////////216/// Comparison Operators + Select217////////////////////////////////////////////////////////////////////////////////218219__forceinline vboold8 operator ==(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }220__forceinline vboold8 operator ==(const vdouble8& a, double b) { return a == vdouble8(b); }221__forceinline vboold8 operator ==(double a, const vdouble8& b) { return vdouble8(a) == b; }222223__forceinline vboold8 operator !=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }224__forceinline vboold8 operator !=(const vdouble8& a, double b) { return a != vdouble8(b); }225__forceinline vboold8 operator !=(double a, const vdouble8& b) { return vdouble8(a) != b; }226227__forceinline vboold8 operator < (const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }228__forceinline vboold8 operator < (const vdouble8& a, double b) { return a < vdouble8(b); }229__forceinline vboold8 operator < (double a, const vdouble8& b) { return vdouble8(a) < b; }230231__forceinline vboold8 operator >=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }232__forceinline vboold8 operator >=(const vdouble8& a, double b) { return a >= vdouble8(b); }233__forceinline vboold8 operator >=(double a, const vdouble8& b) { return vdouble8(a) >= b; }234235__forceinline vboold8 operator > (const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }236__forceinline vboold8 operator > (const vdouble8& a, double b) { return a > vdouble8(b); }237__forceinline vboold8 operator > (double a, const vdouble8& b) { return vdouble8(a) > b; }238239__forceinline vboold8 operator <=(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }240__forceinline vboold8 operator <=(const vdouble8& a, double b) { return a <= vdouble8(b); }241__forceinline vboold8 operator <=(double a, const vdouble8& b) { return vdouble8(a) <= b; }242243__forceinline vboold8 eq(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_EQ); }244__forceinline vboold8 ne(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_NE); }245__forceinline vboold8 lt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LT); }246__forceinline vboold8 ge(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GE); }247__forceinline vboold8 gt(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_GT); }248__forceinline vboold8 le(const vdouble8& a, const vdouble8& b) { return _mm512_cmp_pd_mask(a,b,_MM_CMPINT_LE); }249250__forceinline vboold8 eq(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_EQ); }251__forceinline vboold8 ne(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_NE); }252__forceinline vboold8 lt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LT); }253__forceinline vboold8 ge(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GE); }254__forceinline vboold8 gt(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_GT); }255__forceinline vboold8 le(const vboold8 mask, const vdouble8& a, const vdouble8& b) { return _mm512_mask_cmp_pd_mask(mask,a,b,_MM_CMPINT_LE); }256257__forceinline vdouble8 select(const vboold8& m, const vdouble8& t, const vdouble8& f) {258return _mm512_mask_or_pd(f,m,t,t);259}260261////////////////////////////////////////////////////////////////////////////////262// Movement/Shifting/Shuffling Functions263////////////////////////////////////////////////////////////////////////////////264265template<int i0, int i1>266__forceinline vdouble8 shuffle(const vdouble8& v) {267return _mm512_permute_pd(v, (i1 << 7) | (i0 << 6) | (i1 << 5) | (i0 << 4) | (i1 << 3) | (i0 << 2) | (i1 << 1) | i0);268}269270template<int i>271__forceinline vdouble8 shuffle(const vdouble8& v) {272return shuffle<i, i>(v);273}274275template<int i0, int i1, int i2, int i3>276__forceinline vdouble8 shuffle(const vdouble8& v) {277return _mm512_permutex_pd(v, _MM_SHUFFLE(i3, i2, i1, i0));278}279280template<int i0, int i1>281__forceinline vdouble8 shuffle4(const vdouble8& v) {282return _mm512_shuffle_f64x2(v, v, _MM_SHUFFLE(i1*2+1, i1*2, i0*2+1, i0*2));283}284285template<int i>286__forceinline vdouble8 shuffle4(const vdouble8& v) {287return shuffle4<i, i>(v);288}289290template<int i>291__forceinline vdouble8 align_shift_right(const vdouble8& a, const vdouble8& b) {292return _mm512_castsi512_pd(_mm512_alignr_epi64(_mm512_castpd_si512(a), _mm512_castpd_si512(b), i));293}294295__forceinline double toScalar(const vdouble8& v) {296return _mm_cvtsd_f64(_mm512_castpd512_pd128(v));297}298299////////////////////////////////////////////////////////////////////////////////300/// Reductions301////////////////////////////////////////////////////////////////////////////////302303__forceinline vdouble8 vreduce_add2(vdouble8 x) { return x + shuffle<1,0,3,2>(x); }304__forceinline vdouble8 vreduce_add4(vdouble8 x) { x = vreduce_add2(x); return x + shuffle<2,3,0,1>(x); }305__forceinline vdouble8 vreduce_add (vdouble8 x) { x = vreduce_add4(x); return x + shuffle4<1,0>(x); }306307__forceinline vdouble8 vreduce_min2(vdouble8 x) { return min(x, shuffle<1,0,3,2>(x)); }308__forceinline vdouble8 vreduce_min4(vdouble8 x) { x = vreduce_min2(x); return min(x, shuffle<2,3,0,1>(x)); }309__forceinline vdouble8 vreduce_min (vdouble8 x) { x = vreduce_min4(x); return min(x, shuffle4<1,0>(x)); }310311__forceinline vdouble8 vreduce_max2(vdouble8 x) { return max(x, shuffle<1,0,3,2>(x)); }312__forceinline vdouble8 vreduce_max4(vdouble8 x) { x = vreduce_max2(x); return max(x, shuffle<2,3,0,1>(x)); }313__forceinline vdouble8 vreduce_max (vdouble8 x) { x = vreduce_max4(x); return max(x, shuffle4<1,0>(x)); }314315__forceinline double reduce_add(const vdouble8& v) { return toScalar(vreduce_add(v)); }316__forceinline double reduce_min(const vdouble8& v) { return toScalar(vreduce_min(v)); }317__forceinline double reduce_max(const vdouble8& v) { return toScalar(vreduce_max(v)); }318319////////////////////////////////////////////////////////////////////////////////320/// Memory load and store operations321////////////////////////////////////////////////////////////////////////////////322323__forceinline vdouble8 permute(const vdouble8& v, const vllong8& index) {324return _mm512_permutexvar_pd(index, v);325}326327__forceinline vdouble8 reverse(const vdouble8& a) {328return permute(a, vllong8(reverse_step));329}330331////////////////////////////////////////////////////////////////////////////////332/// Output Operators333////////////////////////////////////////////////////////////////////////////////334335__forceinline embree_ostream operator <<(embree_ostream cout, const vdouble8& v)336{337cout << "<" << v[0];338for (size_t i=1; i<8; i++) cout << ", " << v[i];339cout << ">";340return cout;341}342}343344#undef vboolf345#undef vboold346#undef vint347#undef vuint348#undef vllong349#undef vfloat350#undef vdouble351352353