Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
emscripten-core
GitHub Repository: emscripten-core/emscripten
Path: blob/main/system/include/compat/smmintrin.h
6171 views
1
/*
2
* Copyright 2020 The Emscripten Authors. All rights reserved.
3
* Emscripten is available under two separate licenses, the MIT license and the
4
* University of Illinois/NCSA Open Source License. Both these licenses can be
5
* found in the LICENSE file.
6
*/
7
#ifndef __emscripten_smmintrin_h__
8
#define __emscripten_smmintrin_h__
9
10
#ifndef __SSE4_1__
11
#error "SSE4.1 instruction set not enabled"
12
#endif
13
14
#include <tmmintrin.h>
15
#include <math.h> // For rint and rintf
16
17
#define _mm_blend_epi16(__a, __b, __imm8) __extension__ ({ \
18
(__m128i)__builtin_shufflevector((__i16x8)(__m128i)(__a), \
19
(__i16x8)(__m128i)(__b), \
20
(((__imm8) & 1) ? 8 : 0), \
21
(((__imm8) & 2) ? 9 : 1), \
22
(((__imm8) & 4) ? 10 : 2), \
23
(((__imm8) & 8) ? 11 : 3), \
24
(((__imm8) & 16) ? 12 : 4), \
25
(((__imm8) & 32) ? 13 : 5), \
26
(((__imm8) & 64) ? 14 : 6), \
27
(((__imm8) & 128) ? 15 : 7)); })
28
29
#define _mm_blend_pd(__a, __b, __imm8) __extension__ ({ \
30
(__m128d)__builtin_shufflevector((__f64x2)(__m128d)(__a), \
31
(__f64x2)(__m128d)(__b), \
32
(((__imm8) & 0x01) ? 2 : 0), \
33
(((__imm8) & 0x02) ? 3 : 1)); })
34
35
#define _mm_blend_ps(__a, __b, __imm8) __extension__ ({ \
36
(__m128)__builtin_shufflevector((__f32x4)(__m128)(__a), (__f32x4)(__m128)(__b), \
37
(((__imm8) & 0x01) ? 4 : 0), \
38
(((__imm8) & 0x02) ? 5 : 1), \
39
(((__imm8) & 0x04) ? 6 : 2), \
40
(((__imm8) & 0x08) ? 7 : 3)); })
41
42
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
43
_mm_blendv_epi8(__m128i __a, __m128i __b, __m128i __mask)
44
{
45
v128_t __M = wasm_i8x16_shr((v128_t)__mask, 7);
46
return (__m128i)wasm_v128_bitselect((v128_t)__b, (v128_t)__a, __M);
47
}
48
49
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
50
_mm_blendv_pd(__m128d __a, __m128d __b, __m128d __mask)
51
{
52
v128_t __M = wasm_i64x2_shr((v128_t)__mask, 63);
53
return (__m128d)wasm_v128_bitselect((v128_t)__b, (v128_t)__a, __M);
54
}
55
56
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
57
_mm_blendv_ps(__m128 __a, __m128 __b, __m128 __mask)
58
{
59
v128_t __M = wasm_i32x4_shr((v128_t)__mask, 31);
60
return (__m128)wasm_v128_bitselect((v128_t)__b, (v128_t)__a, __M);
61
}
62
63
#define _MM_FROUND_TO_NEAREST_INT 0x00
64
#define _MM_FROUND_TO_NEG_INF 0x01
65
#define _MM_FROUND_TO_POS_INF 0x02
66
#define _MM_FROUND_TO_ZERO 0x03
67
#define _MM_FROUND_CUR_DIRECTION 0x04
68
69
#define _MM_FROUND_RAISE_EXC 0x00
70
#define _MM_FROUND_NO_EXC 0x08
71
72
#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
73
#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
74
#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
75
#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
76
#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
77
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
78
79
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
80
_mm_ceil_pd(__m128d __a)
81
{
82
return (__m128d)wasm_f64x2_ceil((v128_t)__a);
83
}
84
85
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
86
_mm_ceil_ps(__m128 __a)
87
{
88
return (__m128)wasm_f32x4_ceil((v128_t)__a);
89
}
90
91
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
92
_mm_ceil_ss(__m128 __a, __m128 __b)
93
{
94
return _mm_move_ss(__a, _mm_ceil_ps(__b));
95
}
96
97
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
98
_mm_ceil_sd(__m128d __a, __m128d __b)
99
{
100
return _mm_move_sd(__a, _mm_ceil_pd(__b));
101
}
102
103
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
104
_mm_floor_pd(__m128d __a)
105
{
106
return (__m128d)wasm_f64x2_floor((v128_t)__a);
107
}
108
109
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
110
_mm_floor_ps(__m128 __a)
111
{
112
return (__m128)wasm_f32x4_floor((v128_t)__a);
113
}
114
115
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
116
_mm_floor_ss(__m128 __a, __m128 __b)
117
{
118
return _mm_move_ss(__a, _mm_floor_ps(__b));
119
}
120
121
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
122
_mm_floor_sd(__m128d __a, __m128d __b)
123
{
124
return _mm_move_sd(__a, _mm_floor_pd(__b));
125
}
126
127
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
128
_mm_round_pd(__m128d __a, int __rounding)
129
{
130
switch(__rounding & 7)
131
{
132
case _MM_FROUND_TO_NEG_INF: return _mm_floor_pd(__a);
133
case _MM_FROUND_TO_POS_INF: return _mm_ceil_pd(__a);
134
case _MM_FROUND_TO_ZERO:
135
return (__m128d)wasm_f64x2_trunc((v128_t)__a);
136
default:
137
// _MM_FROUND_TO_NEAREST_INT and _MM_FROUND_CUR_DIRECTION (which is always nearest in Wasm SIMD)
138
// SSE implements "Banker's rounding", where even half-ways, e.g. 2.5 are rounded down,
139
// and odd numbers e.g. 3.5 are rounded up.
140
return (__m128d)wasm_f64x2_nearest((v128_t)__a);
141
}
142
}
143
144
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
145
_mm_round_ps(__m128 __a, int __rounding)
146
{
147
switch(__rounding & 7)
148
{
149
case _MM_FROUND_TO_NEG_INF: return _mm_floor_ps(__a);
150
case _MM_FROUND_TO_POS_INF: return _mm_ceil_ps(__a);
151
case _MM_FROUND_TO_ZERO:
152
return (__m128)wasm_f32x4_trunc((v128_t)__a);
153
default:
154
// _MM_FROUND_TO_NEAREST_INT and _MM_FROUND_CUR_DIRECTION (which is always nearest in Wasm SIMD)
155
// SSE implements "Banker's rounding", where even half-ways, e.g. 2.5 are rounded down,
156
// and odd numbers e.g. 3.5 are rounded up.
157
return (__m128)wasm_f32x4_nearest((v128_t)__a);
158
}
159
}
160
161
static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
162
_mm_round_ss(__m128 __a, __m128 __b, int __rounding)
163
{
164
return _mm_move_ss(__a, _mm_round_ps(__b, __rounding));
165
}
166
167
static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
168
_mm_round_sd(__m128d __a, __m128d __b, int __rounding)
169
{
170
return _mm_move_sd(__a, _mm_round_pd(__b, __rounding));
171
}
172
173
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
174
_mm_mullo_epi32(__m128i __a, __m128i __b)
175
{
176
return (__m128i)wasm_i32x4_mul(__a, __b);
177
}
178
179
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
180
_mm_mul_epi32(__m128i __a, __m128i __b)
181
{
182
return (__m128i)wasm_i64x2_extmul_low_i32x4(
183
(v128_t)_mm_shuffle_epi32(__a, _MM_SHUFFLE(2, 0, 2, 0)),
184
(v128_t)_mm_shuffle_epi32(__b, _MM_SHUFFLE(2, 0, 2, 0)));
185
}
186
187
#define _mm_dp_ps(__a, __b, __imm8) __extension__ ({ \
188
__m128 __tmp = _mm_mul_ps(__a, __b); \
189
__m128 __zero = _mm_setzero_ps(); \
190
__tmp = _mm_blend_ps(__zero, __tmp, __imm8 >> 4); \
191
__m128 __sum = _mm_add_ps(__tmp, _mm_shuffle_ps(__tmp, __tmp, _MM_SHUFFLE(2, 3, 0, 1))); \
192
__sum = _mm_add_ps(__sum, _mm_shuffle_ps(__sum, __sum, _MM_SHUFFLE(1, 0, 3, 2))); \
193
_mm_blend_ps(__zero, __sum, __imm8); })
194
195
#define _mm_dp_pd(__a, __b, __imm8) __extension__ ({ \
196
__m128d __tmp = _mm_mul_pd(__a, __b); \
197
__m128d __zero = _mm_setzero_pd(); \
198
__tmp = _mm_blend_pd(__zero, __tmp, __imm8 >> 4); \
199
__m128d __sum = _mm_add_pd(__tmp, _mm_shuffle_pd(__tmp, __tmp, _MM_SHUFFLE2(0, 1))); \
200
_mm_blend_pd(__zero, __sum, __imm8); })
201
202
#define _mm_stream_load_si128 _mm_load_si128
203
204
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
205
_mm_min_epi8(__m128i __a, __m128i __b)
206
{
207
return (__m128i)wasm_i8x16_min(__a, __b);
208
}
209
210
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
211
_mm_max_epi8(__m128i __a, __m128i __b)
212
{
213
return (__m128i)wasm_i8x16_max(__a, __b);
214
}
215
216
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
217
_mm_min_epu16(__m128i __a, __m128i __b)
218
{
219
return (__m128i)wasm_u16x8_min(__a, __b);
220
}
221
222
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
223
_mm_max_epu16(__m128i __a, __m128i __b)
224
{
225
return (__m128i)wasm_u16x8_max(__a, __b);
226
}
227
228
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
229
_mm_min_epi32(__m128i __a, __m128i __b)
230
{
231
return (__m128i)wasm_i32x4_min(__a, __b);
232
}
233
234
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
235
_mm_max_epi32(__m128i __a, __m128i __b)
236
{
237
return (__m128i)wasm_i32x4_max(__a, __b);
238
}
239
240
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
241
_mm_min_epu32(__m128i __a, __m128i __b)
242
{
243
return (__m128i)wasm_u32x4_min(__a, __b);
244
}
245
246
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
247
_mm_max_epu32(__m128i __a, __m128i __b)
248
{
249
return (__m128i)wasm_u32x4_max(__a, __b);
250
}
251
252
#define _mm_insert_ps(__a, __b, __imm8) __extension__ ({ \
253
_Static_assert(__builtin_constant_p(__imm8), "Expected constant"); \
254
__m128 __tmp = __builtin_shufflevector((__f32x4)__a, (__f32x4)__b, \
255
((((__imm8) >> 4) & 3) == 0) ? ((((__imm8) >> 6) & 3) + 4) : 0, \
256
((((__imm8) >> 4) & 3) == 1) ? ((((__imm8) >> 6) & 3) + 4) : 1, \
257
((((__imm8) >> 4) & 3) == 2) ? ((((__imm8) >> 6) & 3) + 4) : 2, \
258
((((__imm8) >> 4) & 3) == 3) ? ((((__imm8) >> 6) & 3) + 4) : 3); \
259
(__m128)__builtin_shufflevector(__tmp, _mm_setzero_ps(), \
260
(((__imm8) & 1) ? 4 : 0), \
261
(((__imm8) & 2) ? 5 : 1), \
262
(((__imm8) & 4) ? 6 : 2), \
263
(((__imm8) & 8) ? 7 : 3)); })
264
265
#define _mm_extract_ps(__a, __imm8) \
266
__extension__({ wasm_i32x4_extract_lane((v128_t)(__a), (__imm8)&3); })
267
268
#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __f32x4 __a = (__f32x4)(X); \
269
(D) = __a[N]; }))
270
271
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
272
273
#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps(_mm_setzero_ps(), (X), \
274
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
275
276
#define _mm_insert_epi8(__a, __i, __imm8) __extension__ ({ \
277
(__m128i)wasm_i8x16_replace_lane((__a), (__imm8) & 15, (__i)); })
278
279
#define _mm_insert_epi32(__a, __i, __imm8) __extension__ ({ \
280
(__m128i)wasm_i32x4_replace_lane((__a), (__imm8) & 3, (__i)); })
281
282
#define _mm_insert_epi64(__a, __i, __imm8) __extension__ ({ \
283
(__m128i)wasm_i64x2_replace_lane((__a), (__imm8) & 1, (__i)); })
284
285
#define _mm_extract_epi8(__a, __imm8) __extension__ ({ \
286
wasm_u8x16_extract_lane((__a), (__imm8) & 15); })
287
288
#define _mm_extract_epi32(__a, __imm8) __extension__ ({ \
289
wasm_i32x4_extract_lane((__a), (__imm8) & 3); })
290
291
#define _mm_extract_epi64(__a, __imm8) __extension__ ({ \
292
wasm_i64x2_extract_lane((__a), (__imm8) & 1); })
293
294
static __inline__ int __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW))
295
_mm_testz_si128(__m128i __a, __m128i __b)
296
{
297
v128_t __m = wasm_v128_and(__a, __b);
298
return (wasm_i64x2_extract_lane(__m, 0) | wasm_i64x2_extract_lane(__m, 1)) == 0;
299
}
300
301
static __inline__ int __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW))
302
_mm_testc_si128(__m128i __a, __m128i __b)
303
{
304
v128_t __m = wasm_v128_andnot(__b, __a);
305
return (wasm_i64x2_extract_lane(__m, 0) | wasm_i64x2_extract_lane(__m, 1)) == 0;
306
}
307
308
static __inline__ int __attribute__((__always_inline__, __nodebug__))
309
_mm_testnzc_si128(__m128i __a, __m128i __b)
310
{
311
v128_t __m1 = wasm_v128_and(__a, __b);
312
v128_t __m2 = wasm_v128_andnot(__b, __a);
313
return (wasm_i64x2_extract_lane(__m1, 0) | wasm_i64x2_extract_lane(__m1, 1))
314
&& (wasm_i64x2_extract_lane(__m2, 0) | wasm_i64x2_extract_lane(__m2, 1));
315
}
316
317
static __inline__ int __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW))
318
_mm_test_all_ones(__m128i __a)
319
{
320
return (wasm_i64x2_extract_lane(__a, 0) & wasm_i64x2_extract_lane(__a, 1)) == 0xFFFFFFFFFFFFFFFFull;
321
}
322
323
static __inline__ int __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW))
324
_mm_test_all_zeros(__m128i __a, __m128i __mask)
325
{
326
v128_t __m = wasm_v128_and(__a, __mask);
327
return (wasm_i64x2_extract_lane(__m, 0) | wasm_i64x2_extract_lane(__m, 1)) == 0;
328
}
329
330
static __inline__ int __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW))
331
_mm_test_mix_ones_zeros(__m128i __a, __m128i __mask)
332
{
333
v128_t __m = wasm_v128_and(__a, __mask);
334
long long __c0 = wasm_i64x2_extract_lane(__m, 0);
335
long long __c1 = wasm_i64x2_extract_lane(__m, 1);
336
long long __ones = __c0 | __c1;
337
long long __zeros = ~(__c0 & __c1);
338
return __ones && __zeros;
339
}
340
341
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
342
_mm_cmpeq_epi64(__m128i __a, __m128i __b)
343
{
344
const __m128i __mask = _mm_cmpeq_epi32(__a, __b);
345
return _mm_and_si128(__mask, _mm_shuffle_epi32(__mask, _MM_SHUFFLE(2, 3, 0, 1)));
346
}
347
348
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
349
_mm_cvtepi8_epi16(__m128i __a)
350
{
351
return (__m128i)wasm_i16x8_widen_low_i8x16((v128_t)__a);
352
}
353
354
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
355
_mm_cvtepi8_epi32(__m128i __a)
356
{
357
return (__m128i)wasm_i32x4_widen_low_i16x8(wasm_i16x8_widen_low_i8x16((v128_t)__a));
358
}
359
360
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
361
_mm_cvtepi8_epi64(__m128i __a)
362
{
363
const __m128i __exta = _mm_cvtepi8_epi32(__a);
364
const __m128i __sign = _mm_cmpgt_epi32(_mm_setzero_si128(), __exta);
365
return _mm_unpacklo_epi32(__exta, __sign);
366
}
367
368
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
369
_mm_cvtepi16_epi32(__m128i __a)
370
{
371
return (__m128i)wasm_i32x4_widen_low_i16x8((v128_t)__a);
372
}
373
374
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
375
_mm_cvtepi16_epi64(__m128i __a)
376
{
377
const __m128i __exta = _mm_cvtepi16_epi32(__a);
378
const __m128i __sign = _mm_cmpgt_epi32(_mm_setzero_si128(), __exta);
379
return _mm_unpacklo_epi32(__exta, __sign);
380
}
381
382
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
383
_mm_cvtepi32_epi64(__m128i __a)
384
{
385
const __m128i __sign = _mm_cmpgt_epi32(_mm_setzero_si128(), __a);
386
return _mm_unpacklo_epi32(__a, __sign);
387
}
388
389
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
390
_mm_cvtepu8_epi16(__m128i __a)
391
{
392
return (__m128i)wasm_u16x8_extend_low_u8x16((v128_t)__a);
393
}
394
395
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
396
_mm_cvtepu8_epi32(__m128i __a)
397
{
398
return (__m128i)wasm_u32x4_extend_low_u16x8(wasm_i16x8_widen_low_u8x16((v128_t)__a));
399
}
400
401
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
402
_mm_cvtepu8_epi64(__m128i __a)
403
{
404
const __m128i __zero = _mm_setzero_si128();
405
return _mm_unpacklo_epi32(_mm_unpacklo_epi16(_mm_unpacklo_epi8(__a, __zero), __zero), __zero);
406
}
407
408
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
409
_mm_cvtepu16_epi32(__m128i __a)
410
{
411
return (__m128i)wasm_u32x4_extend_low_u16x8((v128_t)__a);
412
}
413
414
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
415
_mm_cvtepu16_epi64(__m128i __a)
416
{
417
const __m128i __zero = _mm_setzero_si128();
418
return _mm_unpacklo_epi32(_mm_unpacklo_epi16(__a, __zero), __zero);
419
}
420
421
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
422
_mm_cvtepu32_epi64(__m128i __a)
423
{
424
const __m128i __zero = _mm_setzero_si128();
425
return _mm_unpacklo_epi32(__a, __zero);
426
}
427
428
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
429
_mm_packus_epi32(__m128i __a, __m128i __b)
430
{
431
return (__m128i)wasm_u16x8_narrow_i32x4(__a, __b);
432
}
433
434
static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
435
__uabs(int __i)
436
{
437
return (unsigned short)((__i >= 0) ? __i : -__i);
438
}
439
440
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
441
_mm_mpsadbw_epu8(__m128i __a, __m128i __b, int __imm8)
442
{
443
int __aOffset = __imm8 & 4;
444
int __bOffset = (__imm8 & 3) << 2;
445
unsigned short __ret[8];
446
for(int __i = 0; __i < 8; ++__i)
447
{
448
__ret[__i] = __uabs(((__u8x16)__a)[__i + __aOffset ] - ((__u8x16)__b)[__bOffset ])
449
+ __uabs(((__u8x16)__a)[__i + __aOffset + 1] - ((__u8x16)__b)[__bOffset + 1])
450
+ __uabs(((__u8x16)__a)[__i + __aOffset + 2] - ((__u8x16)__b)[__bOffset + 2])
451
+ __uabs(((__u8x16)__a)[__i + __aOffset + 3] - ((__u8x16)__b)[__bOffset + 3]);
452
}
453
return (__m128i)wasm_v128_load(__ret);
454
}
455
456
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
457
_mm_minpos_epu16(__m128i __a)
458
{
459
unsigned short __min[2] = { 0xFFFF, 0 };
460
for(int __i = 0; __i < 8; ++__i)
461
{
462
unsigned short __v = ((__u16x8)__a)[__i];
463
if (__v < __min[0])
464
{
465
__min[0] = __v;
466
__min[1] = __i;
467
}
468
}
469
return (__m128i)wasm_i32x4_make(*(int*)__min, 0, 0, 0);
470
}
471
472
// Clang and GCC compatibility: Both Clang and GCC include SSE4.2 headers from SSE4.1 headers
473
#ifdef __SSE4_2__
474
#include <nmmintrin.h>
475
#endif
476
477
#endif /* __emscripten_smmintrin_h__ */
478
479