Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/embree/common/simd/vllong4_avx2.h
9912 views
1
// Copyright 2009-2021 Intel Corporation
2
// SPDX-License-Identifier: Apache-2.0
3
4
#pragma once
5
6
#define vboolf vboolf_impl
7
#define vboold vboold_impl
8
#define vint vint_impl
9
#define vuint vuint_impl
10
#define vllong vllong_impl
11
#define vfloat vfloat_impl
12
#define vdouble vdouble_impl
13
14
namespace embree
15
{
16
/* 4-wide AVX2 64-bit long long type */
17
template<>
18
struct vllong<4>
19
{
20
ALIGNED_STRUCT_(32);
21
22
typedef vboold4 Bool;
23
24
enum { size = 4 }; // number of SIMD elements
25
union { // data
26
__m256i v;
27
long long i[4];
28
};
29
30
////////////////////////////////////////////////////////////////////////////////
31
/// Constructors, Assignment & Cast Operators
32
////////////////////////////////////////////////////////////////////////////////
33
34
__forceinline vllong() {}
35
__forceinline vllong(const vllong4& t) { v = t.v; }
36
__forceinline vllong4& operator =(const vllong4& f) { v = f.v; return *this; }
37
38
__forceinline vllong(const __m256i& t) { v = t; }
39
__forceinline operator __m256i() const { return v; }
40
__forceinline operator __m256d() const { return _mm256_castsi256_pd(v); }
41
42
43
__forceinline vllong(long long i) {
44
v = _mm256_set1_epi64x(i);
45
}
46
47
__forceinline vllong(long long a, long long b, long long c, long long d) {
48
v = _mm256_set_epi64x(d,c,b,a);
49
}
50
51
52
////////////////////////////////////////////////////////////////////////////////
53
/// Constants
54
////////////////////////////////////////////////////////////////////////////////
55
56
__forceinline vllong(ZeroTy) : v(_mm256_setzero_si256()) {}
57
__forceinline vllong(OneTy) : v(_mm256_set1_epi64x(1)) {}
58
__forceinline vllong(StepTy) : v(_mm256_set_epi64x(3,2,1,0)) {}
59
__forceinline vllong(ReverseStepTy) : v(_mm256_set_epi64x(0,1,2,3)) {}
60
61
////////////////////////////////////////////////////////////////////////////////
62
/// Loads and Stores
63
////////////////////////////////////////////////////////////////////////////////
64
65
static __forceinline void store_nt(void* __restrict__ ptr, const vllong4& a) {
66
_mm256_stream_ps((float*)ptr,_mm256_castsi256_ps(a));
67
}
68
69
static __forceinline vllong4 loadu(const void* addr)
70
{
71
return _mm256_loadu_si256((__m256i*)addr);
72
}
73
74
static __forceinline vllong4 load(const vllong4* addr) {
75
return _mm256_load_si256((__m256i*)addr);
76
}
77
78
static __forceinline vllong4 load(const long long* addr) {
79
return _mm256_load_si256((__m256i*)addr);
80
}
81
82
static __forceinline void store(void* ptr, const vllong4& v) {
83
_mm256_store_si256((__m256i*)ptr,v);
84
}
85
86
static __forceinline void storeu(void* ptr, const vllong4& v) {
87
_mm256_storeu_si256((__m256i*)ptr,v);
88
}
89
90
static __forceinline void storeu(const vboold4& mask, long long* ptr, const vllong4& f) {
91
#if defined(__AVX512VL__)
92
_mm256_mask_storeu_epi64(ptr,mask,f);
93
#else
94
_mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
95
#endif
96
}
97
98
static __forceinline void store(const vboold4& mask, void* ptr, const vllong4& f) {
99
#if defined(__AVX512VL__)
100
_mm256_mask_store_epi64(ptr,mask,f);
101
#else
102
_mm256_maskstore_pd((double*)ptr,mask,_mm256_castsi256_pd(f));
103
#endif
104
}
105
106
////////////////////////////////////////////////////////////////////////////////
107
/// Array Access
108
////////////////////////////////////////////////////////////////////////////////
109
110
__forceinline long long& operator [](size_t index) { assert(index < 4); return i[index]; }
111
__forceinline const long long& operator [](size_t index) const { assert(index < 4); return i[index]; }
112
113
};
114
115
////////////////////////////////////////////////////////////////////////////////
116
/// Select
117
////////////////////////////////////////////////////////////////////////////////
118
119
__forceinline vllong4 select(const vboold4& m, const vllong4& t, const vllong4& f) {
120
#if defined(__AVX512VL__)
121
return _mm256_mask_blend_epi64(m, f, t);
122
#else
123
return _mm256_castpd_si256(_mm256_blendv_pd(_mm256_castsi256_pd(f), _mm256_castsi256_pd(t), m));
124
#endif
125
}
126
127
////////////////////////////////////////////////////////////////////////////////
128
/// Unary Operators
129
////////////////////////////////////////////////////////////////////////////////
130
131
#if defined(__AVX512VL__)
132
__forceinline vboold4 asBool(const vllong4& a) { return _mm256_movepi64_mask(a); }
133
#else
134
__forceinline vboold4 asBool(const vllong4& a) { return _mm256_castsi256_pd(a); }
135
#endif
136
137
__forceinline vllong4 operator +(const vllong4& a) { return a; }
138
__forceinline vllong4 operator -(const vllong4& a) { return _mm256_sub_epi64(_mm256_setzero_si256(), a); }
139
140
////////////////////////////////////////////////////////////////////////////////
141
/// Binary Operators
142
////////////////////////////////////////////////////////////////////////////////
143
144
__forceinline vllong4 operator +(const vllong4& a, const vllong4& b) { return _mm256_add_epi64(a, b); }
145
__forceinline vllong4 operator +(const vllong4& a, long long b) { return a + vllong4(b); }
146
__forceinline vllong4 operator +(long long a, const vllong4& b) { return vllong4(a) + b; }
147
148
__forceinline vllong4 operator -(const vllong4& a, const vllong4& b) { return _mm256_sub_epi64(a, b); }
149
__forceinline vllong4 operator -(const vllong4& a, long long b) { return a - vllong4(b); }
150
__forceinline vllong4 operator -(long long a, const vllong4& b) { return vllong4(a) - b; }
151
152
/* only low 32bit part */
153
__forceinline vllong4 operator *(const vllong4& a, const vllong4& b) { return _mm256_mul_epi32(a, b); }
154
__forceinline vllong4 operator *(const vllong4& a, long long b) { return a * vllong4(b); }
155
__forceinline vllong4 operator *(long long a, const vllong4& b) { return vllong4(a) * b; }
156
157
__forceinline vllong4 operator &(const vllong4& a, const vllong4& b) { return _mm256_and_si256(a, b); }
158
__forceinline vllong4 operator &(const vllong4& a, long long b) { return a & vllong4(b); }
159
__forceinline vllong4 operator &(long long a, const vllong4& b) { return vllong4(a) & b; }
160
161
__forceinline vllong4 operator |(const vllong4& a, const vllong4& b) { return _mm256_or_si256(a, b); }
162
__forceinline vllong4 operator |(const vllong4& a, long long b) { return a | vllong4(b); }
163
__forceinline vllong4 operator |(long long a, const vllong4& b) { return vllong4(a) | b; }
164
165
__forceinline vllong4 operator ^(const vllong4& a, const vllong4& b) { return _mm256_xor_si256(a, b); }
166
__forceinline vllong4 operator ^(const vllong4& a, long long b) { return a ^ vllong4(b); }
167
__forceinline vllong4 operator ^(long long a, const vllong4& b) { return vllong4(a) ^ b; }
168
169
__forceinline vllong4 operator <<(const vllong4& a, long long n) { return _mm256_slli_epi64(a, (int)n); }
170
//__forceinline vllong4 operator >>(const vllong4& a, long long n) { return _mm256_srai_epi64(a, n); }
171
172
__forceinline vllong4 operator <<(const vllong4& a, const vllong4& n) { return _mm256_sllv_epi64(a, n); }
173
//__forceinline vllong4 operator >>(const vllong4& a, const vllong4& n) { return _mm256_srav_epi64(a, n); }
174
//__forceinline vllong4 sra(const vllong4& a, long long b) { return _mm256_srai_epi64(a, b); }
175
176
__forceinline vllong4 srl(const vllong4& a, long long b) { return _mm256_srli_epi64(a, (int)b); }
177
178
//__forceinline vllong4 min(const vllong4& a, const vllong4& b) { return _mm256_min_epi64(a, b); }
179
//__forceinline vllong4 min(const vllong4& a, long long b) { return min(a,vllong4(b)); }
180
//__forceinline vllong4 min(long long a, const vllong4& b) { return min(vllong4(a),b); }
181
182
//__forceinline vllong4 max(const vllong4& a, const vllong4& b) { return _mm256_max_epi64(a, b); }
183
//__forceinline vllong4 max(const vllong4& a, long long b) { return max(a,vllong4(b)); }
184
//__forceinline vllong4 max(long long a, const vllong4& b) { return max(vllong4(a),b); }
185
186
#if defined(__AVX512VL__)
187
__forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_and_epi64(c,m,a,b); }
188
__forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return _mm256_mask_or_epi64(c,m,a,b); }
189
#else
190
__forceinline vllong4 mask_and(const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a & b, c); }
191
__forceinline vllong4 mask_or (const vboold4& m, const vllong4& c, const vllong4& a, const vllong4& b) { return select(m, a | b, c); }
192
#endif
193
194
////////////////////////////////////////////////////////////////////////////////
195
/// Assignment Operators
196
////////////////////////////////////////////////////////////////////////////////
197
198
__forceinline vllong4& operator +=(vllong4& a, const vllong4& b) { return a = a + b; }
199
__forceinline vllong4& operator +=(vllong4& a, long long b) { return a = a + b; }
200
201
__forceinline vllong4& operator -=(vllong4& a, const vllong4& b) { return a = a - b; }
202
__forceinline vllong4& operator -=(vllong4& a, long long b) { return a = a - b; }
203
204
__forceinline vllong4& operator *=(vllong4& a, const vllong4& b) { return a = a * b; }
205
__forceinline vllong4& operator *=(vllong4& a, long long b) { return a = a * b; }
206
207
__forceinline vllong4& operator &=(vllong4& a, const vllong4& b) { return a = a & b; }
208
__forceinline vllong4& operator &=(vllong4& a, long long b) { return a = a & b; }
209
210
__forceinline vllong4& operator |=(vllong4& a, const vllong4& b) { return a = a | b; }
211
__forceinline vllong4& operator |=(vllong4& a, long long b) { return a = a | b; }
212
213
__forceinline vllong4& operator <<=(vllong4& a, long long b) { return a = a << b; }
214
//__forceinline vllong4& operator >>=(vllong4& a, long long b) { return a = a >> b; }
215
216
////////////////////////////////////////////////////////////////////////////////
217
/// Comparison Operators
218
////////////////////////////////////////////////////////////////////////////////
219
220
#if defined(__AVX512VL__)
221
__forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_EQ); }
222
__forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_NE); }
223
__forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LT); }
224
__forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GE); }
225
__forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_GT); }
226
__forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return _mm256_cmp_epi64_mask(a,b,_MM_CMPINT_LE); }
227
#else
228
__forceinline vboold4 operator ==(const vllong4& a, const vllong4& b) { return _mm256_cmpeq_epi64(a,b); }
229
__forceinline vboold4 operator !=(const vllong4& a, const vllong4& b) { return !(a == b); }
230
__forceinline vboold4 operator > (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(a,b); }
231
__forceinline vboold4 operator < (const vllong4& a, const vllong4& b) { return _mm256_cmpgt_epi64(b,a); }
232
__forceinline vboold4 operator >=(const vllong4& a, const vllong4& b) { return !(a < b); }
233
__forceinline vboold4 operator <=(const vllong4& a, const vllong4& b) { return !(a > b); }
234
#endif
235
236
__forceinline vboold4 operator ==(const vllong4& a, long long b) { return a == vllong4(b); }
237
__forceinline vboold4 operator ==(long long a, const vllong4& b) { return vllong4(a) == b; }
238
239
__forceinline vboold4 operator !=(const vllong4& a, long long b) { return a != vllong4(b); }
240
__forceinline vboold4 operator !=(long long a, const vllong4& b) { return vllong4(a) != b; }
241
242
__forceinline vboold4 operator > (const vllong4& a, long long b) { return a > vllong4(b); }
243
__forceinline vboold4 operator > (long long a, const vllong4& b) { return vllong4(a) > b; }
244
245
__forceinline vboold4 operator < (const vllong4& a, long long b) { return a < vllong4(b); }
246
__forceinline vboold4 operator < (long long a, const vllong4& b) { return vllong4(a) < b; }
247
248
__forceinline vboold4 operator >=(const vllong4& a, long long b) { return a >= vllong4(b); }
249
__forceinline vboold4 operator >=(long long a, const vllong4& b) { return vllong4(a) >= b; }
250
251
__forceinline vboold4 operator <=(const vllong4& a, long long b) { return a <= vllong4(b); }
252
__forceinline vboold4 operator <=(long long a, const vllong4& b) { return vllong4(a) <= b; }
253
254
__forceinline vboold4 eq(const vllong4& a, const vllong4& b) { return a == b; }
255
__forceinline vboold4 ne(const vllong4& a, const vllong4& b) { return a != b; }
256
__forceinline vboold4 lt(const vllong4& a, const vllong4& b) { return a < b; }
257
__forceinline vboold4 ge(const vllong4& a, const vllong4& b) { return a >= b; }
258
__forceinline vboold4 gt(const vllong4& a, const vllong4& b) { return a > b; }
259
__forceinline vboold4 le(const vllong4& a, const vllong4& b) { return a <= b; }
260
261
#if defined(__AVX512VL__)
262
__forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_EQ); }
263
__forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_NE); }
264
__forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LT); }
265
__forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GE); }
266
__forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_GT); }
267
__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return _mm256_mask_cmp_epi64_mask(mask, a, b, _MM_CMPINT_LE); }
268
#else
269
__forceinline vboold4 eq(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a == b); }
270
__forceinline vboold4 ne(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a != b); }
271
__forceinline vboold4 lt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a < b); }
272
__forceinline vboold4 ge(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a >= b); }
273
__forceinline vboold4 gt(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a > b); }
274
__forceinline vboold4 le(const vboold4& mask, const vllong4& a, const vllong4& b) { return mask & (a <= b); }
275
#endif
276
277
////////////////////////////////////////////////////////////////////////////////
278
// Movement/Shifting/Shuffling Functions
279
////////////////////////////////////////////////////////////////////////////////
280
281
template<int i0, int i1>
282
__forceinline vllong4 shuffle(const vllong4& v) {
283
return _mm256_castpd_si256(_mm256_permute_pd(_mm256_castsi256_pd(v), (i1 << 3) | (i0 << 2) | (i1 << 1) | i0));
284
}
285
286
template<int i>
287
__forceinline vllong4 shuffle(const vllong4& v) {
288
return shuffle<i, i>(v);
289
}
290
291
template<int i0, int i1>
292
__forceinline vllong4 shuffle2(const vllong4& v) {
293
return _mm256_castpd_si256(_mm256_permute2f128_pd(_mm256_castsi256_pd(v), _mm256_castsi256_pd(v), (i1 << 4) | i0));
294
}
295
296
__forceinline long long toScalar(const vllong4& v) {
297
return _mm_cvtsi128_si64(_mm256_castsi256_si128(v));
298
}
299
300
#if defined(__AVX512VL__)
301
__forceinline vllong4 permute(const vllong4& a, const __m256i& index) {
302
// workaround for GCC 7.x
303
#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
304
return _mm256_permutex2var_epi64(a,index,a);
305
#else
306
return _mm256_permutexvar_epi64(index,a);
307
#endif
308
}
309
310
__forceinline vllong4 permutex2var(const vllong4& index, const vllong4& a, const vllong4& b) {
311
return _mm256_permutex2var_epi64(a,index,b);
312
}
313
314
#endif
315
////////////////////////////////////////////////////////////////////////////////
316
/// Reductions
317
////////////////////////////////////////////////////////////////////////////////
318
319
320
__forceinline vllong4 vreduce_and2(const vllong4& x) { return x & shuffle<1,0>(x); }
321
__forceinline vllong4 vreduce_and (const vllong4& y) { const vllong4 x = vreduce_and2(y); return x & shuffle2<1,0>(x); }
322
323
__forceinline vllong4 vreduce_or2(const vllong4& x) { return x | shuffle<1,0>(x); }
324
__forceinline vllong4 vreduce_or (const vllong4& y) { const vllong4 x = vreduce_or2(y); return x | shuffle2<1,0>(x); }
325
326
__forceinline vllong4 vreduce_add2(const vllong4& x) { return x + shuffle<1,0>(x); }
327
__forceinline vllong4 vreduce_add (const vllong4& y) { const vllong4 x = vreduce_add2(y); return x + shuffle2<1,0>(x); }
328
329
__forceinline long long reduce_add(const vllong4& a) { return toScalar(vreduce_add(a)); }
330
__forceinline long long reduce_or (const vllong4& a) { return toScalar(vreduce_or(a)); }
331
__forceinline long long reduce_and(const vllong4& a) { return toScalar(vreduce_and(a)); }
332
333
////////////////////////////////////////////////////////////////////////////////
334
/// Output Operators
335
////////////////////////////////////////////////////////////////////////////////
336
337
__forceinline embree_ostream operator <<(embree_ostream cout, const vllong4& v)
338
{
339
cout << "<" << v[0];
340
for (size_t i=1; i<4; i++) cout << ", " << v[i];
341
cout << ">";
342
return cout;
343
}
344
}
345
346
#undef vboolf
347
#undef vboold
348
#undef vint
349
#undef vuint
350
#undef vllong
351
#undef vfloat
352
#undef vdouble
353
354