Path: blob/master/thirdparty/embree/common/simd/vllong4_avx2.h
9912 views
// Copyright 2009-2021 Intel Corporation1// SPDX-License-Identifier: Apache-2.023#pragma once45#define vboolf vboolf_impl6#define vboold vboold_impl7#define vint vint_impl8#define vuint vuint_impl9#define vllong vllong_impl10#define vfloat vfloat_impl11#define vdouble vdouble_impl1213namespace embree14{15/* 4-wide AVX2 64-bit long long type */16template<>17struct vllong<4>18{19ALIGNED_STRUCT_(32);2021typedef vboold4 Bool;2223enum { size = 4 }; // number of SIMD elements24union { // data25__m256i v;26long long i[4];27};2829////////////////////////////////////////////////////////////////////////////////30/// Constructors, Assignment & Cast Operators31////////////////////////////////////////////////////////////////////////////////3233__forceinline vllong() {}34__forceinline vllong(const vllong4& t) { v = t.v; }35__forceinline vllong4& operator =(const vllong4& f) { v = f.v; return *this; }3637__forceinline vllong(const __m256i& t) { v = t; }38__forceinline operator __m256i() const { return v; }39__forceinline operator __m256d() const { return _mm256_castsi256_pd(v); }404142__forceinline vllong(long long i) {43v = _mm256_set1_epi64x(i);44}4546__forceinline vllong(long long a, long long b, long long c, long long d) {47v = _mm256_set_epi64x(d,c,b,a);48}495051////////////////////////////////////////////////////////////////////////////////52/// Constants53////////////////////////////////////////////////////////////////////////////////5455__forceinline vllong(ZeroTy) : v(_mm256_setzero_si256()) {}56__forceinline vllong(OneTy) : v(_mm256_set1_epi64x(1)) {}57__forceinline vllong(StepTy) : v(_mm256_set_epi64x(3,2,1,0)) {}58__forceinline vllong(ReverseStepTy) : v(_mm256_set_epi64x(0,1,2,3)) {}5960////////////////////////////////////////////////////////////////////////////////61/// Loads and Stores62////////////////////////////////////////////////////////////////////////////////6364static __forceinline void store_nt(void* __restrict__ ptr, const vllong4& a) {65_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(a));66}6768static __forceinline vllong4 loadu(const void* addr)69{70return _mm256_loadu_si256((__m256i*)addr);71}7273static __forceinline vllong4 load(const vllong4* addr) {74return _mm256_load_si256((__m256i*)addr);75}7677static __forceinline vllong4 load(const long long* addr) {78return _mm256_load_si256((__m256i*)addr);79}8081static __forceinline void store(void* ptr, const vllong4& v) {82_mm256_store_si256((__m256i*)ptr,v);83}8485static __forceinline void storeu(void* ptr, const vllong4& v) {86_mm256_storeu_si256((__m256i*)ptr,v);87}8889static __forceinline void storeu(const vboold4& mask, long long* ptr, const vllong4& f) {90#if defined(__AVX512VL__)91_mm256_mask_storeu_epi64(ptr,mask,f);92#else93_mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));94#endif95}9697static __forceinline void store(const vboold4& mask, void* ptr, const vllong4& f) {98#if defined(__AVX512VL__)99_mm256_mask_store_epi64(ptr,mask,f);100#else101_mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));102#endif103}104105////////////////////////////////////////////////////////////////////////////////106/// Array Access107////////////////////////////////////////////////////////////////////////////////108109__forceinline long long& operator [](size_t index) { assert(index < 4); return i[index]; }110__forceinline const long long& operator [](size_t index) const { assert(index < 4); return i[index]; }111112};113114////////////////////////////////////////////////////////////////////////////////115/// Select116////////////////////////////////////////////////////////////////////////////////117118__forceinline vllong4 select(const vboold4& m, const vllong4& t, const vllong4& f) {119#if defined(__AVX512VL__)120return _mm256_mask_blend_epi64(m, f, t);121#else122return _mm256_castpd_si256(_mm256_blendv_pd(_mm256_castsi256_pd(f), _mm256_castsi256_pd(t), m));123#endif124}125126////////////////////////////////////////////////////////////////////////////////127/// Unary Operators128////////////////////////////////////////////////////////////////////////////////129130#if defined(__AVX512VL__)131__forceinline vboold4 asBool(const vllong4& a) { return _mm256_movepi64_mask(a); }132#else133__forceinline vboold4 asBool(const vllong4& a) { return _mm256_castsi256_pd(a); }134#endif135136__forceinline vllong4 operator +(const vllong4& a) { return a; }137__forceinline vllong4 operator -(const vllong4& a) { return _mm256_sub_epi64(_mm256_setzero_si256(), a); }138139////////////////////////////////////////////////////////////////////////////////140/// Binary Operators141////////////////////////////////////////////////////////////////////////////////142143__forceinline vllong4 operator +(const vllong4& a, const vllong4& b) { return _mm256_add_epi64(a, b); }144__forceinline vllong4 operator +(const vllong4& a, long long b) { return a + vllong4(b); }145__forceinline vllong4 operator +(long long a, const vllong4& b) { return vllong4(a) + b; }146147__forceinline vllong4 operator -(const vllong4& a, const vllong4& b) { return _mm256_sub_epi64(a, b); }148__forceinline vllong4 operator -(const vllong4& a, long long b) { return a - vllong4(b); }149__forceinline vllong4 operator -(long long a, const vllong4& b) { return vllong4(a) - b; }150151/* only low 32bit part */152__forceinline vllong4 operator *(const vllong4& a, const vllong4& b) { return _mm256_mul_epi32(a, b); }153__forceinline vllong4 operator *(const vllong4& a, long long b) { return a * vllong4(b); }154__forceinline vllong4 operator *(long long a, const vllong4& b) { return vllong4(a) * b; }155156__forceinline vllong4 operator &(const vllong4& a, const vllong4& b) { return _mm256_and_si256(a, b); }157__forceinline vllong4 operator &(const vllong4& a, long long b) { return a & vllong4(b); }158__forceinline vllong4 operator &(long long a, const vllong4& b) { return vllong4(a) & b; }159160__forceinline vllong4 operator |(const vllong4& a, const vllong4& b) { return _mm256_or_si256(a, b); }161__forceinline vllong4 operator |(const vllong4& a, long long b) { return a | vllong4(b); }162__forceinline vllong4 operator |(long long a, const vllong4& b) { return vllong4(a) | b; }163164__forceinline vllong4 operator ^(const vllong4& a, const vllong4& b) { return _mm256_xor_si256(a, b); }165__forceinline vllong4 operator ^(const vllong4& a, long long b) { return a ^ vllong4(b); }166__forceinline vllong4 operator ^(long long a, const vllong4& b) { return vllong4(a) ^ b; }167168__forceinline vllong4 operator <<(const vllong4& a, long long n) { return _mm256_slli_epi64(a, (int)n); }169//__forceinline vllong4 operator >>(const vllong4& a, long long n) { return _mm256_srai_epi64(a, n); }170171__forceinline vllong4 operator <<(const vllong4& a, const vllong4& n) { return _mm256_sllv_epi64(a, n); }172//__forceinline vllong4 operator >>(const vllong4& a, const vllong4& n) { return _mm256_srav_epi64(a, n); }173//__forceinline vllong4 sra(const vllong4& a, long long b) { return _mm256_srai_epi64(a, b); }174175__forceinline vllong4 srl(const vllong4& a, long long b) { return _mm256_srli_epi64(a, (int)b); }176177//__forceinline vllong4 min(const vllong4& a, const vllong4& b) { return _mm256_min_epi64(a, b); }178//__forceinline vllong4 min(const vllong4& a, long long b) { return min(a,vllong4(b)); }179//__forceinline vllong4 min(long long a, const vllong4& b) { return min(vllong4(a),b); }180181//__forceinline vllong4 max(const vllong4& a, const vllong4& b) { return _mm256_max_epi64(a, b); }182//__forceinline vllong4 max(const vllong4& a, long long b) { return max(a,vllong4(b)); }183//__forceinline vllong4 max(long long a, const vllong4& b) { return max(vllong4(a),b); }184185#if defined(__AVX512VL__)186__forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_and_epi64(c,m,a,b); }187__forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_or_epi64(c,m,a,b); }188#else189__forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a & b, c); }190__forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a | b, c); }191#endif192193////////////////////////////////////////////////////////////////////////////////194/// Assignment Operators195////////////////////////////////////////////////////////////////////////////////196197__forceinline vllong4& operator +=(vllong4& a, const vllong4& b) { return a = a + b; }198__forceinline vllong4& operator +=(vllong4& a, long long b) { return a = a + b; }199200__forceinline vllong4& operator -=(vllong4& a, const vllong4& b) { return a = a - b; }201__forceinline vllong4& operator -=(vllong4& a, long long b) { return a = a - b; }202203__forceinline vllong4& operator *=(vllong4& a, const vllong4& b) { return a = a * b; }204__forceinline vllong4& operator *=(vllong4& a, long long b) { return a = a * b; }205206__forceinline vllong4& operator &=(vllong4& a, const vllong4& b) { return a = a & b; }207__forceinline vllong4& operator &=(vllong4& a, long long b) { return a = a & b; }208209__forceinline vllong4& operator |=(vllong4& a, const vllong4& b) { return a = a | b; }210__forceinline vllong4& operator |=(vllong4& a, long long b) { return a = a | b; }211212__forceinline vllong4& operator <<=(vllong4& a, long long b) { return a = a << b; }213//__forceinline vllong4& operator >>=(vllong4& a, long long b) { return a = a >> b; }214215////////////////////////////////////////////////////////////////////////////////216/// Comparison Operators217////////////////////////////////////////////////////////////////////////////////218219#if defined(__AVX512VL__)220__forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_EQ); }221__forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_NE); }222__forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LT); }223__forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GE); }224__forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GT); }225__forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LE); }226#else227__forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmpeq_epi64(a,b); }228__forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return !(a == b); }229__forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(a,b); }230__forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(b,a); }231__forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return !(a < b); }232__forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return !(a > b); }233#endif234235__forceinline vboold4 operator ==(const vllong4& a, long long b) { return a == vllong4(b); }236__forceinline vboold4 operator ==(long long a, const vllong4& b) { return vllong4(a) == b; }237238__forceinline vboold4 operator !=(const vllong4& a, long long b) { return a != vllong4(b); }239__forceinline vboold4 operator !=(long long a, const vllong4& b) { return vllong4(a) != b; }240241__forceinline vboold4 operator > (const vllong4& a, long long b) { return a > vllong4(b); }242__forceinline vboold4 operator > (long long a, const vllong4& b) { return vllong4(a) > b; }243244__forceinline vboold4 operator < (const vllong4& a, long long b) { return a < vllong4(b); }245__forceinline vboold4 operator < (long long a, const vllong4& b) { return vllong4(a) < b; }246247__forceinline vboold4 operator >=(const vllong4& a, long long b) { return a >= vllong4(b); }248__forceinline vboold4 operator >=(long long a, const vllong4& b) { return vllong4(a) >= b; }249250__forceinline vboold4 operator <=(const vllong4& a, long long b) { return a <= vllong4(b); }251__forceinline vboold4 operator <=(long long a, const vllong4& b) { return vllong4(a) <= b; }252253__forceinline vboold4 eq(const vllong4& a, const vllong4& b) { return a == b; }254__forceinline vboold4 ne(const vllong4& a, const vllong4& b) { return a != b; }255__forceinline vboold4 lt(const vllong4& a, const vllong4& b) { return a < b; }256__forceinline vboold4 ge(const vllong4& a, const vllong4& b) { return a >= b; }257__forceinline vboold4 gt(const vllong4& a, const vllong4& b) { return a > b; }258__forceinline vboold4 le(const vllong4& a, const vllong4& b) { return a <= b; }259260#if defined(__AVX512VL__)261__forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_EQ); }262__forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_NE); }263__forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LT); }264__forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GE); }265__forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GT); }266__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LE); }267#else268__forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a == b); }269__forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a != b); }270__forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a < b); }271__forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a >= b); }272__forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a > b); }273__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }274#endif275276////////////////////////////////////////////////////////////////////////////////277// Movement/Shifting/Shuffling Functions278////////////////////////////////////////////////////////////////////////////////279280template<int i0, int i1>281__forceinline vllong4 shuffle(const vllong4& v) {282return _mm256_castpd_si256(_mm256_permute_pd(_mm256_castsi256_pd(v), (i1 << 3) | (i0 << 2) | (i1 << 1) | i0));283}284285template<int i>286__forceinline vllong4 shuffle(const vllong4& v) {287return shuffle<i, i>(v);288}289290template<int i0, int i1>291__forceinline vllong4 shuffle2(const vllong4& v) {292return _mm256_castpd_si256(_mm256_permute2f128_pd(_mm256_castsi256_pd(v), _mm256_castsi256_pd(v), (i1 << 4) | i0));293}294295__forceinline long long toScalar(const vllong4& v) {296return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));297}298299#if defined(__AVX512VL__)300__forceinline vllong4 permute(const vllong4& a, const __m256i& index) {301// workaround for GCC 7.x302#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)303return _mm256_permutex2var_epi64(a,index,a);304#else305return _mm256_permutexvar_epi64(index,a);306#endif307}308309__forceinline vllong4 permutex2var(const vllong4& index, const vllong4& a, const vllong4& b) {310return _mm256_permutex2var_epi64(a,index,b);311}312313#endif314////////////////////////////////////////////////////////////////////////////////315/// Reductions316////////////////////////////////////////////////////////////////////////////////317318319__forceinline vllong4 vreduce_and2(const vllong4& x) { return x & shuffle<1,0>(x); }320__forceinline vllong4 vreduce_and (const vllong4& y) { const vllong4 x = vreduce_and2(y); return x & shuffle2<1,0>(x); }321322__forceinline vllong4 vreduce_or2(const vllong4& x) { return x | shuffle<1,0>(x); }323__forceinline vllong4 vreduce_or (const vllong4& y) { const vllong4 x = vreduce_or2(y); return x | shuffle2<1,0>(x); }324325__forceinline vllong4 vreduce_add2(const vllong4& x) { return x + shuffle<1,0>(x); }326__forceinline vllong4 vreduce_add (const vllong4& y) { const vllong4 x = vreduce_add2(y); return x + shuffle2<1,0>(x); }327328__forceinline long long reduce_add(const vllong4& a) { return toScalar(vreduce_add(a)); }329__forceinline long long reduce_or (const vllong4& a) { return toScalar(vreduce_or(a)); }330__forceinline long long reduce_and(const vllong4& a) { return toScalar(vreduce_and(a)); }331332////////////////////////////////////////////////////////////////////////////////333/// Output Operators334////////////////////////////////////////////////////////////////////////////////335336__forceinline embree_ostream operator <<(embree_ostream cout, const vllong4& v)337{338cout << "<" << v[0];339for (size_t i=1; i<4; i++) cout << ", " << v[i];340cout << ">";341return cout;342}343}344345#undef vboolf346#undef vboold347#undef vint348#undef vuint349#undef vllong350#undef vfloat351#undef vdouble352353354