Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/astcenc/astcenc_vecmathlib_sse_4.h
9903 views
1
// SPDX-License-Identifier: Apache-2.0
2
// ----------------------------------------------------------------------------
3
// Copyright 2019-2024 Arm Limited
4
//
5
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
6
// use this file except in compliance with the License. You may obtain a copy
7
// of the License at:
8
//
9
// http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing, software
12
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
// License for the specific language governing permissions and limitations
15
// under the License.
16
// ----------------------------------------------------------------------------
17
18
/**
19
* @brief 4x32-bit vectors, implemented using SSE.
20
*
21
* This module implements 4-wide 32-bit float, int, and mask vectors for x86
22
* SSE. The implementation requires at least SSE2, but higher levels of SSE can
23
* be selected at compile time to improve performance.
24
*
25
* There is a baseline level of functionality provided by all vector widths and
26
* implementations. This is implemented using identical function signatures,
27
* modulo data type, so we can use them as substitutable implementations in VLA
28
* code.
29
*
30
* The 4-wide vectors are also used as a fixed-width type, and significantly
31
* extend the functionality above that available to VLA code.
32
*/
33
34
#ifndef ASTC_VECMATHLIB_SSE_4_H_INCLUDED
35
#define ASTC_VECMATHLIB_SSE_4_H_INCLUDED
36
37
#ifndef ASTCENC_SIMD_INLINE
38
#error "Include astcenc_vecmathlib.h, do not include directly"
39
#endif
40
41
#include <cstdio>
42
#include <cstring>
43
44
// ============================================================================
45
// vfloat4 data type
46
// ============================================================================
47
48
/**
49
* @brief Data type for 4-wide floats.
50
*/
51
struct vfloat4
52
{
53
/**
54
* @brief Construct from zero-initialized value.
55
*/
56
ASTCENC_SIMD_INLINE vfloat4() = default;
57
58
/**
59
* @brief Construct from 4 values loaded from an unaligned address.
60
*
61
* Consider using loada() which is better with vectors if data is aligned
62
* to vector length.
63
*/
64
ASTCENC_SIMD_INLINE explicit vfloat4(const float *p)
65
{
66
m = _mm_loadu_ps(p);
67
}
68
69
/**
70
* @brief Construct from 1 scalar value replicated across all lanes.
71
*
72
* Consider using zero() for constexpr zeros.
73
*/
74
ASTCENC_SIMD_INLINE explicit vfloat4(float a)
75
{
76
m = _mm_set1_ps(a);
77
}
78
79
/**
80
* @brief Construct from 4 scalar values.
81
*
82
* The value of @c a is stored to lane 0 (LSB) in the SIMD register.
83
*/
84
ASTCENC_SIMD_INLINE explicit vfloat4(float a, float b, float c, float d)
85
{
86
m = _mm_set_ps(d, c, b, a);
87
}
88
89
/**
90
* @brief Construct from an existing SIMD register.
91
*/
92
ASTCENC_SIMD_INLINE explicit vfloat4(__m128 a)
93
{
94
m = a;
95
}
96
97
/**
98
* @brief Get the scalar value of a single lane.
99
*/
100
template <int l> ASTCENC_SIMD_INLINE float lane() const
101
{
102
return _mm_cvtss_f32(_mm_shuffle_ps(m, m, l));
103
}
104
105
/**
106
* @brief Set the scalar value of a single lane.
107
*/
108
template <int l> ASTCENC_SIMD_INLINE void set_lane(float a)
109
{
110
#if ASTCENC_SSE >= 41
111
__m128 v = _mm_set1_ps(a);
112
m = _mm_insert_ps(m, v, l << 6 | l << 4);
113
#else
114
alignas(16) float idx[4];
115
_mm_store_ps(idx, m);
116
idx[l] = a;
117
m = _mm_load_ps(idx);
118
#endif
119
}
120
121
/**
122
* @brief Factory that returns a vector of zeros.
123
*/
124
static ASTCENC_SIMD_INLINE vfloat4 zero()
125
{
126
return vfloat4(_mm_setzero_ps());
127
}
128
129
/**
130
* @brief Factory that returns a replicated scalar loaded from memory.
131
*/
132
static ASTCENC_SIMD_INLINE vfloat4 load1(const float* p)
133
{
134
return vfloat4(_mm_load_ps1(p));
135
}
136
137
/**
138
* @brief Factory that returns a vector loaded from 16B aligned memory.
139
*/
140
static ASTCENC_SIMD_INLINE vfloat4 loada(const float* p)
141
{
142
return vfloat4(_mm_load_ps(p));
143
}
144
145
/**
146
* @brief Return a swizzled float 2.
147
*/
148
template <int l0, int l1> ASTCENC_SIMD_INLINE vfloat4 swz() const
149
{
150
vfloat4 result(_mm_shuffle_ps(m, m, l0 | l1 << 2));
151
result.set_lane<2>(0.0f);
152
result.set_lane<3>(0.0f);
153
return result;
154
}
155
156
/**
157
* @brief Return a swizzled float 3.
158
*/
159
template <int l0, int l1, int l2> ASTCENC_SIMD_INLINE vfloat4 swz() const
160
{
161
vfloat4 result(_mm_shuffle_ps(m, m, l0 | l1 << 2 | l2 << 4));
162
result.set_lane<3>(0.0f);
163
return result;
164
}
165
166
/**
167
* @brief Return a swizzled float 4.
168
*/
169
template <int l0, int l1, int l2, int l3> ASTCENC_SIMD_INLINE vfloat4 swz() const
170
{
171
return vfloat4(_mm_shuffle_ps(m, m, l0 | l1 << 2 | l2 << 4 | l3 << 6));
172
}
173
174
/**
175
* @brief The vector ...
176
*/
177
__m128 m;
178
};
179
180
// ============================================================================
181
// vint4 data type
182
// ============================================================================
183
184
/**
185
* @brief Data type for 4-wide ints.
186
*/
187
struct vint4
188
{
189
/**
190
* @brief Construct from zero-initialized value.
191
*/
192
ASTCENC_SIMD_INLINE vint4() = default;
193
194
/**
195
* @brief Construct from 4 values loaded from an unaligned address.
196
*
197
* Consider using loada() which is better with vectors if data is aligned
198
* to vector length.
199
*/
200
ASTCENC_SIMD_INLINE explicit vint4(const int *p)
201
{
202
m = _mm_loadu_si128(reinterpret_cast<const __m128i*>(p));
203
}
204
205
/**
206
* @brief Construct from 4 uint8_t loaded from an unaligned address.
207
*/
208
ASTCENC_SIMD_INLINE explicit vint4(const uint8_t *p)
209
{
210
// _mm_loadu_si32 would be nicer syntax, but missing on older GCC
211
__m128i t = _mm_cvtsi32_si128(*reinterpret_cast<const int*>(p));
212
213
#if ASTCENC_SSE >= 41
214
m = _mm_cvtepu8_epi32(t);
215
#else
216
t = _mm_unpacklo_epi8(t, _mm_setzero_si128());
217
m = _mm_unpacklo_epi16(t, _mm_setzero_si128());
218
#endif
219
}
220
221
/**
222
* @brief Construct from 1 scalar value replicated across all lanes.
223
*
224
* Consider using zero() for constexpr zeros.
225
*/
226
ASTCENC_SIMD_INLINE explicit vint4(int a)
227
{
228
m = _mm_set1_epi32(a);
229
}
230
231
/**
232
* @brief Construct from 4 scalar values.
233
*
234
* The value of @c a is stored to lane 0 (LSB) in the SIMD register.
235
*/
236
ASTCENC_SIMD_INLINE explicit vint4(int a, int b, int c, int d)
237
{
238
m = _mm_set_epi32(d, c, b, a);
239
}
240
241
/**
242
* @brief Construct from an existing SIMD register.
243
*/
244
ASTCENC_SIMD_INLINE explicit vint4(__m128i a)
245
{
246
m = a;
247
}
248
249
/**
250
* @brief Get the scalar from a single lane.
251
*/
252
template <int l> ASTCENC_SIMD_INLINE int lane() const
253
{
254
return _mm_cvtsi128_si32(_mm_shuffle_epi32(m, l));
255
}
256
257
/**
258
* @brief Set the scalar value of a single lane.
259
*/
260
template <int l> ASTCENC_SIMD_INLINE void set_lane(int a)
261
{
262
#if ASTCENC_SSE >= 41
263
m = _mm_insert_epi32(m, a, l);
264
#else
265
alignas(16) int idx[4];
266
_mm_store_si128(reinterpret_cast<__m128i*>(idx), m);
267
idx[l] = a;
268
m = _mm_load_si128(reinterpret_cast<const __m128i*>(idx));
269
#endif
270
}
271
272
/**
273
* @brief Factory that returns a vector of zeros.
274
*/
275
static ASTCENC_SIMD_INLINE vint4 zero()
276
{
277
return vint4(_mm_setzero_si128());
278
}
279
280
/**
281
* @brief Factory that returns a replicated scalar loaded from memory.
282
*/
283
static ASTCENC_SIMD_INLINE vint4 load1(const int* p)
284
{
285
return vint4(*p);
286
}
287
288
/**
289
* @brief Factory that returns a vector loaded from unaligned memory.
290
*/
291
static ASTCENC_SIMD_INLINE vint4 load(const uint8_t* p)
292
{
293
#if ASTCENC_SSE >= 41
294
return vint4(_mm_lddqu_si128(reinterpret_cast<const __m128i*>(p)));
295
#else
296
return vint4(_mm_loadu_si128(reinterpret_cast<const __m128i*>(p)));
297
#endif
298
}
299
300
/**
301
* @brief Factory that returns a vector loaded from 16B aligned memory.
302
*/
303
static ASTCENC_SIMD_INLINE vint4 loada(const int* p)
304
{
305
return vint4(_mm_load_si128(reinterpret_cast<const __m128i*>(p)));
306
}
307
308
/**
309
* @brief Factory that returns a vector containing the lane IDs.
310
*/
311
static ASTCENC_SIMD_INLINE vint4 lane_id()
312
{
313
return vint4(_mm_set_epi32(3, 2, 1, 0));
314
}
315
316
/**
317
* @brief The vector ...
318
*/
319
__m128i m;
320
};
321
322
// ============================================================================
323
// vmask4 data type
324
// ============================================================================
325
326
/**
327
* @brief Data type for 4-wide control plane masks.
328
*/
329
struct vmask4
330
{
331
/**
332
* @brief Construct from an existing SIMD register.
333
*/
334
ASTCENC_SIMD_INLINE explicit vmask4(__m128 a)
335
{
336
m = a;
337
}
338
339
/**
340
* @brief Construct from an existing SIMD register.
341
*/
342
ASTCENC_SIMD_INLINE explicit vmask4(__m128i a)
343
{
344
m = _mm_castsi128_ps(a);
345
}
346
347
/**
348
* @brief Construct from 1 scalar value.
349
*/
350
ASTCENC_SIMD_INLINE explicit vmask4(bool a)
351
{
352
vint4 mask(a == false ? 0 : -1);
353
m = _mm_castsi128_ps(mask.m);
354
}
355
356
/**
357
* @brief Construct from 4 scalar values.
358
*
359
* The value of @c a is stored to lane 0 (LSB) in the SIMD register.
360
*/
361
ASTCENC_SIMD_INLINE explicit vmask4(bool a, bool b, bool c, bool d)
362
{
363
vint4 mask(a == false ? 0 : -1,
364
b == false ? 0 : -1,
365
c == false ? 0 : -1,
366
d == false ? 0 : -1);
367
368
m = _mm_castsi128_ps(mask.m);
369
}
370
371
/**
372
* @brief Get the scalar value of a single lane.
373
*/
374
template <int l> ASTCENC_SIMD_INLINE bool lane() const
375
{
376
return _mm_cvtss_f32(_mm_shuffle_ps(m, m, l)) != 0.0f;
377
}
378
379
/**
380
* @brief The vector ...
381
*/
382
__m128 m;
383
};
384
385
// ============================================================================
386
// vmask4 operators and functions
387
// ============================================================================
388
389
/**
390
* @brief Overload: mask union (or).
391
*/
392
ASTCENC_SIMD_INLINE vmask4 operator|(vmask4 a, vmask4 b)
393
{
394
return vmask4(_mm_or_ps(a.m, b.m));
395
}
396
397
/**
398
* @brief Overload: mask intersect (and).
399
*/
400
ASTCENC_SIMD_INLINE vmask4 operator&(vmask4 a, vmask4 b)
401
{
402
return vmask4(_mm_and_ps(a.m, b.m));
403
}
404
405
/**
406
* @brief Overload: mask difference (xor).
407
*/
408
ASTCENC_SIMD_INLINE vmask4 operator^(vmask4 a, vmask4 b)
409
{
410
return vmask4(_mm_xor_ps(a.m, b.m));
411
}
412
413
/**
414
* @brief Overload: mask invert (not).
415
*/
416
ASTCENC_SIMD_INLINE vmask4 operator~(vmask4 a)
417
{
418
return vmask4(_mm_xor_si128(_mm_castps_si128(a.m), _mm_set1_epi32(-1)));
419
}
420
421
/**
422
* @brief Return a 4-bit mask code indicating mask status.
423
*
424
* bit0 = lane 0
425
*/
426
ASTCENC_SIMD_INLINE unsigned int mask(vmask4 a)
427
{
428
return static_cast<unsigned int>(_mm_movemask_ps(a.m));
429
}
430
431
/**
432
* @brief True if any lanes are enabled, false otherwise.
433
*/
434
ASTCENC_SIMD_INLINE bool any(vmask4 a)
435
{
436
return mask(a) != 0;
437
}
438
439
/**
440
* @brief True if all lanes are enabled, false otherwise.
441
*/
442
ASTCENC_SIMD_INLINE bool all(vmask4 a)
443
{
444
return mask(a) == 0xF;
445
}
446
447
// ============================================================================
448
// vint4 operators and functions
449
// ============================================================================
450
451
/**
452
* @brief Overload: vector by vector addition.
453
*/
454
ASTCENC_SIMD_INLINE vint4 operator+(vint4 a, vint4 b)
455
{
456
return vint4(_mm_add_epi32(a.m, b.m));
457
}
458
459
/**
460
* @brief Overload: vector by vector subtraction.
461
*/
462
ASTCENC_SIMD_INLINE vint4 operator-(vint4 a, vint4 b)
463
{
464
return vint4(_mm_sub_epi32(a.m, b.m));
465
}
466
467
/**
468
* @brief Overload: vector by vector multiplication.
469
*/
470
ASTCENC_SIMD_INLINE vint4 operator*(vint4 a, vint4 b)
471
{
472
#if ASTCENC_SSE >= 41
473
return vint4(_mm_mullo_epi32 (a.m, b.m));
474
#else
475
__m128i t1 = _mm_mul_epu32(a.m, b.m);
476
__m128i t2 = _mm_mul_epu32(
477
_mm_srli_si128(a.m, 4),
478
_mm_srli_si128(b.m, 4));
479
__m128i r = _mm_unpacklo_epi32(
480
_mm_shuffle_epi32(t1, _MM_SHUFFLE (0, 0, 2, 0)),
481
_mm_shuffle_epi32(t2, _MM_SHUFFLE (0, 0, 2, 0)));
482
return vint4(r);
483
#endif
484
}
485
486
/**
487
* @brief Overload: vector bit invert.
488
*/
489
ASTCENC_SIMD_INLINE vint4 operator~(vint4 a)
490
{
491
return vint4(_mm_xor_si128(a.m, _mm_set1_epi32(-1)));
492
}
493
494
/**
495
* @brief Overload: vector by vector bitwise or.
496
*/
497
ASTCENC_SIMD_INLINE vint4 operator|(vint4 a, vint4 b)
498
{
499
return vint4(_mm_or_si128(a.m, b.m));
500
}
501
502
/**
503
* @brief Overload: vector by vector bitwise and.
504
*/
505
ASTCENC_SIMD_INLINE vint4 operator&(vint4 a, vint4 b)
506
{
507
return vint4(_mm_and_si128(a.m, b.m));
508
}
509
510
/**
511
* @brief Overload: vector by vector bitwise xor.
512
*/
513
ASTCENC_SIMD_INLINE vint4 operator^(vint4 a, vint4 b)
514
{
515
return vint4(_mm_xor_si128(a.m, b.m));
516
}
517
518
/**
519
* @brief Overload: vector by vector equality.
520
*/
521
ASTCENC_SIMD_INLINE vmask4 operator==(vint4 a, vint4 b)
522
{
523
return vmask4(_mm_cmpeq_epi32(a.m, b.m));
524
}
525
526
/**
527
* @brief Overload: vector by vector inequality.
528
*/
529
ASTCENC_SIMD_INLINE vmask4 operator!=(vint4 a, vint4 b)
530
{
531
return ~vmask4(_mm_cmpeq_epi32(a.m, b.m));
532
}
533
534
/**
535
* @brief Overload: vector by vector less than.
536
*/
537
ASTCENC_SIMD_INLINE vmask4 operator<(vint4 a, vint4 b)
538
{
539
return vmask4(_mm_cmplt_epi32(a.m, b.m));
540
}
541
542
/**
543
* @brief Overload: vector by vector greater than.
544
*/
545
ASTCENC_SIMD_INLINE vmask4 operator>(vint4 a, vint4 b)
546
{
547
return vmask4(_mm_cmpgt_epi32(a.m, b.m));
548
}
549
550
/**
551
* @brief Logical shift left.
552
*/
553
template <int s> ASTCENC_SIMD_INLINE vint4 lsl(vint4 a)
554
{
555
return vint4(_mm_slli_epi32(a.m, s));
556
}
557
558
/**
559
* @brief Logical shift right.
560
*/
561
template <int s> ASTCENC_SIMD_INLINE vint4 lsr(vint4 a)
562
{
563
return vint4(_mm_srli_epi32(a.m, s));
564
}
565
566
/**
567
* @brief Arithmetic shift right.
568
*/
569
template <int s> ASTCENC_SIMD_INLINE vint4 asr(vint4 a)
570
{
571
return vint4(_mm_srai_epi32(a.m, s));
572
}
573
574
/**
575
* @brief Return the min vector of two vectors.
576
*/
577
ASTCENC_SIMD_INLINE vint4 min(vint4 a, vint4 b)
578
{
579
#if ASTCENC_SSE >= 41
580
return vint4(_mm_min_epi32(a.m, b.m));
581
#else
582
vmask4 d = a < b;
583
__m128i ap = _mm_and_si128(_mm_castps_si128(d.m), a.m);
584
__m128i bp = _mm_andnot_si128(_mm_castps_si128(d.m), b.m);
585
return vint4(_mm_or_si128(ap,bp));
586
#endif
587
}
588
589
/**
590
* @brief Return the max vector of two vectors.
591
*/
592
ASTCENC_SIMD_INLINE vint4 max(vint4 a, vint4 b)
593
{
594
#if ASTCENC_SSE >= 41
595
return vint4(_mm_max_epi32(a.m, b.m));
596
#else
597
vmask4 d = a > b;
598
__m128i ap = _mm_and_si128(_mm_castps_si128(d.m), a.m);
599
__m128i bp = _mm_andnot_si128(_mm_castps_si128(d.m), b.m);
600
return vint4(_mm_or_si128(ap,bp));
601
#endif
602
}
603
604
/**
605
* @brief Return the horizontal minimum of a vector.
606
*/
607
ASTCENC_SIMD_INLINE vint4 hmin(vint4 a)
608
{
609
a = min(a, vint4(_mm_shuffle_epi32(a.m, _MM_SHUFFLE(2, 3, 0, 1))));
610
a = min(a, vint4(_mm_shuffle_epi32(a.m, _MM_SHUFFLE(1, 0, 3, 2))));
611
return a;
612
}
613
614
/*
615
* @brief Return the horizontal maximum of a vector.
616
*/
617
ASTCENC_SIMD_INLINE vint4 hmax(vint4 a)
618
{
619
a = max(a, vint4(_mm_shuffle_epi32(a.m, _MM_SHUFFLE(2, 3, 0, 1))));
620
a = max(a, vint4(_mm_shuffle_epi32(a.m, _MM_SHUFFLE(1, 0, 3, 2))));
621
return a;
622
}
623
624
/**
625
* @brief Store a vector to a 16B aligned memory address.
626
*/
627
ASTCENC_SIMD_INLINE void storea(vint4 a, int* p)
628
{
629
_mm_store_si128(reinterpret_cast<__m128i*>(p), a.m);
630
}
631
632
/**
633
* @brief Store a vector to an unaligned memory address.
634
*/
635
ASTCENC_SIMD_INLINE void store(vint4 a, int* p)
636
{
637
// Cast due to missing intrinsics
638
_mm_storeu_ps(reinterpret_cast<float*>(p), _mm_castsi128_ps(a.m));
639
}
640
641
/**
642
* @brief Store a vector to an unaligned memory address.
643
*/
644
ASTCENC_SIMD_INLINE void store(vint4 a, uint8_t* p)
645
{
646
std::memcpy(p, &a.m, sizeof(int) * 4);
647
}
648
649
/**
650
* @brief Store lowest N (vector width) bytes into an unaligned address.
651
*/
652
ASTCENC_SIMD_INLINE void store_nbytes(vint4 a, uint8_t* p)
653
{
654
// Cast due to missing intrinsics
655
_mm_store_ss(reinterpret_cast<float*>(p), _mm_castsi128_ps(a.m));
656
}
657
658
/**
659
* @brief Pack low 8 bits of N (vector width) lanes into bottom of vector.
660
*/
661
ASTCENC_SIMD_INLINE void pack_and_store_low_bytes(vint4 a, uint8_t* p)
662
{
663
#if ASTCENC_SSE >= 41
664
__m128i shuf = _mm_set_epi8(0,0,0,0, 0,0,0,0, 0,0,0,0, 12,8,4,0);
665
a = vint4(_mm_shuffle_epi8(a.m, shuf));
666
store_nbytes(a, p);
667
#else
668
__m128i va = _mm_unpacklo_epi8(a.m, _mm_shuffle_epi32(a.m, _MM_SHUFFLE(1,1,1,1)));
669
__m128i vb = _mm_unpackhi_epi8(a.m, _mm_shuffle_epi32(a.m, _MM_SHUFFLE(3,3,3,3)));
670
a = vint4(_mm_unpacklo_epi16(va, vb));
671
store_nbytes(a, p);
672
#endif
673
}
674
675
/**
676
* @brief Return lanes from @c b if @c cond is set, else @c a.
677
*/
678
ASTCENC_SIMD_INLINE vint4 select(vint4 a, vint4 b, vmask4 cond)
679
{
680
__m128i condi = _mm_castps_si128(cond.m);
681
682
#if ASTCENC_SSE >= 41
683
return vint4(_mm_blendv_epi8(a.m, b.m, condi));
684
#else
685
return vint4(_mm_or_si128(_mm_and_si128(condi, b.m), _mm_andnot_si128(condi, a.m)));
686
#endif
687
}
688
689
// ============================================================================
690
// vfloat4 operators and functions
691
// ============================================================================
692
693
/**
694
* @brief Overload: vector by vector addition.
695
*/
696
ASTCENC_SIMD_INLINE vfloat4 operator+(vfloat4 a, vfloat4 b)
697
{
698
return vfloat4(_mm_add_ps(a.m, b.m));
699
}
700
701
/**
702
* @brief Overload: vector by vector subtraction.
703
*/
704
ASTCENC_SIMD_INLINE vfloat4 operator-(vfloat4 a, vfloat4 b)
705
{
706
return vfloat4(_mm_sub_ps(a.m, b.m));
707
}
708
709
/**
710
* @brief Overload: vector by vector multiplication.
711
*/
712
ASTCENC_SIMD_INLINE vfloat4 operator*(vfloat4 a, vfloat4 b)
713
{
714
return vfloat4(_mm_mul_ps(a.m, b.m));
715
}
716
717
/**
718
* @brief Overload: vector by vector division.
719
*/
720
ASTCENC_SIMD_INLINE vfloat4 operator/(vfloat4 a, vfloat4 b)
721
{
722
return vfloat4(_mm_div_ps(a.m, b.m));
723
}
724
725
/**
726
* @brief Overload: vector by vector equality.
727
*/
728
ASTCENC_SIMD_INLINE vmask4 operator==(vfloat4 a, vfloat4 b)
729
{
730
return vmask4(_mm_cmpeq_ps(a.m, b.m));
731
}
732
733
/**
734
* @brief Overload: vector by vector inequality.
735
*/
736
ASTCENC_SIMD_INLINE vmask4 operator!=(vfloat4 a, vfloat4 b)
737
{
738
return vmask4(_mm_cmpneq_ps(a.m, b.m));
739
}
740
741
/**
742
* @brief Overload: vector by vector less than.
743
*/
744
ASTCENC_SIMD_INLINE vmask4 operator<(vfloat4 a, vfloat4 b)
745
{
746
return vmask4(_mm_cmplt_ps(a.m, b.m));
747
}
748
749
/**
750
* @brief Overload: vector by vector greater than.
751
*/
752
ASTCENC_SIMD_INLINE vmask4 operator>(vfloat4 a, vfloat4 b)
753
{
754
return vmask4(_mm_cmpgt_ps(a.m, b.m));
755
}
756
757
/**
758
* @brief Overload: vector by vector less than or equal.
759
*/
760
ASTCENC_SIMD_INLINE vmask4 operator<=(vfloat4 a, vfloat4 b)
761
{
762
return vmask4(_mm_cmple_ps(a.m, b.m));
763
}
764
765
/**
766
* @brief Overload: vector by vector greater than or equal.
767
*/
768
ASTCENC_SIMD_INLINE vmask4 operator>=(vfloat4 a, vfloat4 b)
769
{
770
return vmask4(_mm_cmpge_ps(a.m, b.m));
771
}
772
773
/**
774
* @brief Return the min vector of two vectors.
775
*
776
* If either lane value is NaN, @c b will be returned for that lane.
777
*/
778
ASTCENC_SIMD_INLINE vfloat4 min(vfloat4 a, vfloat4 b)
779
{
780
// Do not reorder - second operand will return if either is NaN
781
return vfloat4(_mm_min_ps(a.m, b.m));
782
}
783
784
/**
785
* @brief Return the max vector of two vectors.
786
*
787
* If either lane value is NaN, @c b will be returned for that lane.
788
*/
789
ASTCENC_SIMD_INLINE vfloat4 max(vfloat4 a, vfloat4 b)
790
{
791
// Do not reorder - second operand will return if either is NaN
792
return vfloat4(_mm_max_ps(a.m, b.m));
793
}
794
795
/**
796
* @brief Return the absolute value of the float vector.
797
*/
798
ASTCENC_SIMD_INLINE vfloat4 abs(vfloat4 a)
799
{
800
return vfloat4(_mm_max_ps(_mm_sub_ps(_mm_setzero_ps(), a.m), a.m));
801
}
802
803
/**
804
* @brief Return a float rounded to the nearest integer value.
805
*/
806
ASTCENC_SIMD_INLINE vfloat4 round(vfloat4 a)
807
{
808
#if ASTCENC_SSE >= 41
809
constexpr int flags = _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC;
810
return vfloat4(_mm_round_ps(a.m, flags));
811
#else
812
__m128 v = a.m;
813
__m128 neg_zero = _mm_castsi128_ps(_mm_set1_epi32(static_cast<int>(0x80000000)));
814
__m128 no_fraction = _mm_set1_ps(8388608.0f);
815
__m128 abs_mask = _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF));
816
__m128 sign = _mm_and_ps(v, neg_zero);
817
__m128 s_magic = _mm_or_ps(no_fraction, sign);
818
__m128 r1 = _mm_add_ps(v, s_magic);
819
r1 = _mm_sub_ps(r1, s_magic);
820
__m128 r2 = _mm_and_ps(v, abs_mask);
821
__m128 mask = _mm_cmple_ps(r2, no_fraction);
822
r2 = _mm_andnot_ps(mask, v);
823
r1 = _mm_and_ps(r1, mask);
824
return vfloat4(_mm_xor_ps(r1, r2));
825
#endif
826
}
827
828
/**
829
* @brief Return the horizontal minimum of a vector.
830
*/
831
ASTCENC_SIMD_INLINE vfloat4 hmin(vfloat4 a)
832
{
833
a = min(a, vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 3, 2))));
834
a = min(a, vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 0, 1))));
835
return vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 0, 0)));
836
}
837
838
/**
839
* @brief Return the horizontal maximum of a vector.
840
*/
841
ASTCENC_SIMD_INLINE vfloat4 hmax(vfloat4 a)
842
{
843
a = max(a, vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 3, 2))));
844
a = max(a, vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 0, 1))));
845
return vfloat4(_mm_shuffle_ps(a.m, a.m, _MM_SHUFFLE(0, 0, 0, 0)));
846
}
847
848
/**
849
* @brief Return the horizontal sum of a vector as a scalar.
850
*/
851
ASTCENC_SIMD_INLINE float hadd_s(vfloat4 a)
852
{
853
// Add top and bottom halves, lane 1/0
854
__m128 t = _mm_add_ps(a.m, _mm_movehl_ps(a.m, a.m));
855
856
// Add top and bottom halves, lane 0 (_mm_hadd_ps exists but slow)
857
t = _mm_add_ss(t, _mm_shuffle_ps(t, t, 0x55));
858
859
return _mm_cvtss_f32(t);
860
}
861
862
/**
863
* @brief Return the sqrt of the lanes in the vector.
864
*/
865
ASTCENC_SIMD_INLINE vfloat4 sqrt(vfloat4 a)
866
{
867
return vfloat4(_mm_sqrt_ps(a.m));
868
}
869
870
/**
871
* @brief Return lanes from @c b if @c cond is set, else @c a.
872
*/
873
ASTCENC_SIMD_INLINE vfloat4 select(vfloat4 a, vfloat4 b, vmask4 cond)
874
{
875
#if ASTCENC_SSE >= 41
876
return vfloat4(_mm_blendv_ps(a.m, b.m, cond.m));
877
#else
878
return vfloat4(_mm_or_ps(_mm_and_ps(cond.m, b.m), _mm_andnot_ps(cond.m, a.m)));
879
#endif
880
}
881
882
/**
883
* @brief Load a vector of gathered results from an array;
884
*/
885
ASTCENC_SIMD_INLINE vfloat4 gatherf(const float* base, vint4 indices)
886
{
887
#if ASTCENC_AVX >= 2 && ASTCENC_X86_GATHERS != 0
888
return vfloat4(_mm_i32gather_ps(base, indices.m, 4));
889
#else
890
alignas(16) int idx[4];
891
storea(indices, idx);
892
return vfloat4(base[idx[0]], base[idx[1]], base[idx[2]], base[idx[3]]);
893
#endif
894
}
895
896
/**
897
* @brief Load a vector of gathered results from an array using byte indices from memory
898
*/
899
template<>
900
ASTCENC_SIMD_INLINE vfloat4 gatherf_byte_inds<vfloat4>(const float* base, const uint8_t* indices)
901
{
902
// Experimentally, in this particular use case (byte indices in memory),
903
// using 4 separate scalar loads is appreciably faster than using gathers
904
// even if they're available, on every x86 uArch tried, so always do the
905
// separate loads even when ASTCENC_X86_GATHERS is enabled.
906
//
907
// Tested on:
908
// - Intel Skylake-X, Coffee Lake, Crestmont, Redwood Cove
909
// - AMD Zen 2, Zen 4
910
return vfloat4(base[indices[0]], base[indices[1]], base[indices[2]], base[indices[3]]);
911
}
912
913
/**
914
* @brief Store a vector to an unaligned memory address.
915
*/
916
ASTCENC_SIMD_INLINE void store(vfloat4 a, float* p)
917
{
918
_mm_storeu_ps(p, a.m);
919
}
920
921
/**
922
* @brief Store a vector to a 16B aligned memory address.
923
*/
924
ASTCENC_SIMD_INLINE void storea(vfloat4 a, float* p)
925
{
926
_mm_store_ps(p, a.m);
927
}
928
929
/**
930
* @brief Return a integer value for a float vector, using truncation.
931
*/
932
ASTCENC_SIMD_INLINE vint4 float_to_int(vfloat4 a)
933
{
934
return vint4(_mm_cvttps_epi32(a.m));
935
}
936
937
/**
938
* @brief Return a integer value for a float vector, using round-to-nearest.
939
*/
940
ASTCENC_SIMD_INLINE vint4 float_to_int_rtn(vfloat4 a)
941
{
942
a = a + vfloat4(0.5f);
943
return vint4(_mm_cvttps_epi32(a.m));
944
}
945
946
/**
947
* @brief Return a float value for an integer vector.
948
*/
949
ASTCENC_SIMD_INLINE vfloat4 int_to_float(vint4 a)
950
{
951
return vfloat4(_mm_cvtepi32_ps(a.m));
952
}
953
954
/**
955
* @brief Return a float16 value for a float vector, using round-to-nearest.
956
*/
957
ASTCENC_SIMD_INLINE vint4 float_to_float16(vfloat4 a)
958
{
959
#if ASTCENC_F16C >= 1
960
__m128i packedf16 = _mm_cvtps_ph(a.m, 0);
961
__m128i f16 = _mm_cvtepu16_epi32(packedf16);
962
return vint4(f16);
963
#else
964
return vint4(
965
float_to_sf16(a.lane<0>()),
966
float_to_sf16(a.lane<1>()),
967
float_to_sf16(a.lane<2>()),
968
float_to_sf16(a.lane<3>()));
969
#endif
970
}
971
972
/**
973
* @brief Return a float16 value for a float scalar, using round-to-nearest.
974
*/
975
static inline uint16_t float_to_float16(float a)
976
{
977
#if ASTCENC_F16C >= 1
978
__m128i f16 = _mm_cvtps_ph(_mm_set1_ps(a), 0);
979
return static_cast<uint16_t>(_mm_cvtsi128_si32(f16));
980
#else
981
return float_to_sf16(a);
982
#endif
983
}
984
985
/**
986
* @brief Return a float value for a float16 vector.
987
*/
988
ASTCENC_SIMD_INLINE vfloat4 float16_to_float(vint4 a)
989
{
990
#if ASTCENC_F16C >= 1
991
__m128i packed = _mm_packs_epi32(a.m, a.m);
992
__m128 f32 = _mm_cvtph_ps(packed);
993
return vfloat4(f32);
994
#else
995
return vfloat4(
996
sf16_to_float(static_cast<uint16_t>(a.lane<0>())),
997
sf16_to_float(static_cast<uint16_t>(a.lane<1>())),
998
sf16_to_float(static_cast<uint16_t>(a.lane<2>())),
999
sf16_to_float(static_cast<uint16_t>(a.lane<3>())));
1000
#endif
1001
}
1002
1003
/**
1004
* @brief Return a float value for a float16 scalar.
1005
*/
1006
ASTCENC_SIMD_INLINE float float16_to_float(uint16_t a)
1007
{
1008
#if ASTCENC_F16C >= 1
1009
__m128i packed = _mm_set1_epi16(static_cast<short>(a));
1010
__m128 f32 = _mm_cvtph_ps(packed);
1011
return _mm_cvtss_f32(f32);
1012
#else
1013
return sf16_to_float(a);
1014
#endif
1015
}
1016
1017
/**
1018
* @brief Return a float value as an integer bit pattern (i.e. no conversion).
1019
*
1020
* It is a common trick to convert floats into integer bit patterns, perform
1021
* some bit hackery based on knowledge they are IEEE 754 layout, and then
1022
* convert them back again. This is the first half of that flip.
1023
*/
1024
ASTCENC_SIMD_INLINE vint4 float_as_int(vfloat4 a)
1025
{
1026
return vint4(_mm_castps_si128(a.m));
1027
}
1028
1029
/**
1030
* @brief Return a integer value as a float bit pattern (i.e. no conversion).
1031
*
1032
* It is a common trick to convert floats into integer bit patterns, perform
1033
* some bit hackery based on knowledge they are IEEE 754 layout, and then
1034
* convert them back again. This is the second half of that flip.
1035
*/
1036
ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 v)
1037
{
1038
return vfloat4(_mm_castsi128_ps(v.m));
1039
}
1040
1041
/*
1042
* Table structure for a 16x 8-bit entry table.
1043
*/
1044
struct vtable4_16x8 {
1045
#if ASTCENC_SSE >= 41
1046
vint4 t0;
1047
#else
1048
const uint8_t* data;
1049
#endif
1050
};
1051
1052
/*
1053
* Table structure for a 32x 8-bit entry table.
1054
*/
1055
struct vtable4_32x8 {
1056
#if ASTCENC_SSE >= 41
1057
vint4 t0;
1058
vint4 t1;
1059
#else
1060
const uint8_t* data;
1061
#endif
1062
};
1063
1064
/*
1065
* Table structure for a 64x 8-bit entry table.
1066
*/
1067
struct vtable4_64x8 {
1068
#if ASTCENC_SSE >= 41
1069
vint4 t0;
1070
vint4 t1;
1071
vint4 t2;
1072
vint4 t3;
1073
#else
1074
const uint8_t* data;
1075
#endif
1076
};
1077
1078
/**
1079
* @brief Prepare a vtable lookup table for 16x 8-bit entry table.
1080
*/
1081
ASTCENC_SIMD_INLINE void vtable_prepare(
1082
vtable4_16x8& table,
1083
const uint8_t* data
1084
) {
1085
#if ASTCENC_SSE >= 41
1086
table.t0 = vint4::load(data);
1087
#else
1088
table.data = data;
1089
#endif
1090
}
1091
1092
/**
1093
* @brief Prepare a vtable lookup table for 32x 8-bit entry table.
1094
*/
1095
ASTCENC_SIMD_INLINE void vtable_prepare(
1096
vtable4_32x8& table,
1097
const uint8_t* data
1098
) {
1099
#if ASTCENC_SSE >= 41
1100
table.t0 = vint4::load(data);
1101
table.t1 = vint4::load(data + 16);
1102
1103
table.t1 = table.t1 ^ table.t0;
1104
#else
1105
table.data = data;
1106
#endif
1107
}
1108
1109
/**
1110
* @brief Prepare a vtable lookup table 64x 8-bit entry table.
1111
*/
1112
ASTCENC_SIMD_INLINE void vtable_prepare(
1113
vtable4_64x8& table,
1114
const uint8_t* data
1115
) {
1116
#if ASTCENC_SSE >= 41
1117
table.t0 = vint4::load(data);
1118
table.t1 = vint4::load(data + 16);
1119
table.t2 = vint4::load(data + 32);
1120
table.t3 = vint4::load(data + 48);
1121
1122
table.t3 = table.t3 ^ table.t2;
1123
table.t2 = table.t2 ^ table.t1;
1124
table.t1 = table.t1 ^ table.t0;
1125
#else
1126
table.data = data;
1127
#endif
1128
}
1129
1130
/**
1131
* @brief Perform a vtable lookup in a 16x 8-bit table with 32-bit indices.
1132
*/
1133
ASTCENC_SIMD_INLINE vint4 vtable_lookup_32bit(
1134
const vtable4_16x8& tbl,
1135
vint4 idx
1136
) {
1137
#if ASTCENC_SSE >= 41
1138
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
1139
__m128i idxx = _mm_or_si128(idx.m, _mm_set1_epi32(static_cast<int>(0xFFFFFF00)));
1140
1141
__m128i result = _mm_shuffle_epi8(tbl.t0.m, idxx);
1142
return vint4(result);
1143
#else
1144
return vint4(tbl.data[idx.lane<0>()],
1145
tbl.data[idx.lane<1>()],
1146
tbl.data[idx.lane<2>()],
1147
tbl.data[idx.lane<3>()]);
1148
#endif
1149
}
1150
1151
/**
1152
* @brief Perform a vtable lookup in a 32x 8-bit table with 32-bit indices.
1153
*/
1154
ASTCENC_SIMD_INLINE vint4 vtable_lookup_32bit(
1155
const vtable4_32x8& tbl,
1156
vint4 idx
1157
) {
1158
#if ASTCENC_SSE >= 41
1159
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
1160
__m128i idxx = _mm_or_si128(idx.m, _mm_set1_epi32(static_cast<int>(0xFFFFFF00)));
1161
1162
__m128i result = _mm_shuffle_epi8(tbl.t0.m, idxx);
1163
idxx = _mm_sub_epi8(idxx, _mm_set1_epi8(16));
1164
1165
__m128i result2 = _mm_shuffle_epi8(tbl.t1.m, idxx);
1166
result = _mm_xor_si128(result, result2);
1167
1168
return vint4(result);
1169
#else
1170
return vint4(tbl.data[idx.lane<0>()],
1171
tbl.data[idx.lane<1>()],
1172
tbl.data[idx.lane<2>()],
1173
tbl.data[idx.lane<3>()]);
1174
#endif
1175
}
1176
1177
/**
1178
* @brief Perform a vtable lookup in a 64x 8-bit table with 32-bit indices.
1179
*/
1180
ASTCENC_SIMD_INLINE vint4 vtable_lookup_32bit(
1181
const vtable4_64x8& tbl,
1182
vint4 idx
1183
) {
1184
#if ASTCENC_SSE >= 41
1185
// Set index byte MSB to 1 for unused bytes so shuffle returns zero
1186
__m128i idxx = _mm_or_si128(idx.m, _mm_set1_epi32(static_cast<int>(0xFFFFFF00)));
1187
1188
__m128i result = _mm_shuffle_epi8(tbl.t0.m, idxx);
1189
idxx = _mm_sub_epi8(idxx, _mm_set1_epi8(16));
1190
1191
__m128i result2 = _mm_shuffle_epi8(tbl.t1.m, idxx);
1192
result = _mm_xor_si128(result, result2);
1193
idxx = _mm_sub_epi8(idxx, _mm_set1_epi8(16));
1194
1195
result2 = _mm_shuffle_epi8(tbl.t2.m, idxx);
1196
result = _mm_xor_si128(result, result2);
1197
idxx = _mm_sub_epi8(idxx, _mm_set1_epi8(16));
1198
1199
result2 = _mm_shuffle_epi8(tbl.t3.m, idxx);
1200
result = _mm_xor_si128(result, result2);
1201
1202
return vint4(result);
1203
#else
1204
return vint4(tbl.data[idx.lane<0>()],
1205
tbl.data[idx.lane<1>()],
1206
tbl.data[idx.lane<2>()],
1207
tbl.data[idx.lane<3>()]);
1208
#endif
1209
}
1210
1211
/**
1212
* @brief Return a vector of interleaved RGBA data.
1213
*
1214
* Input vectors have the value stored in the bottom 8 bits of each lane,
1215
* with high bits set to zero.
1216
*
1217
* Output vector stores a single RGBA texel packed in each lane.
1218
*/
1219
ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)
1220
{
1221
// Workaround an XCode compiler internal fault; note is slower than slli_epi32
1222
// so we should revert this when we get the opportunity
1223
#if defined(__APPLE__)
1224
__m128i value = r.m;
1225
value = _mm_add_epi32(value, _mm_bslli_si128(g.m, 1));
1226
value = _mm_add_epi32(value, _mm_bslli_si128(b.m, 2));
1227
value = _mm_add_epi32(value, _mm_bslli_si128(a.m, 3));
1228
return vint4(value);
1229
#else
1230
__m128i value = r.m;
1231
value = _mm_add_epi32(value, _mm_slli_epi32(g.m, 8));
1232
value = _mm_add_epi32(value, _mm_slli_epi32(b.m, 16));
1233
value = _mm_add_epi32(value, _mm_slli_epi32(a.m, 24));
1234
return vint4(value);
1235
#endif
1236
}
1237
1238
/**
1239
* @brief Store a single vector lane to an unaligned address.
1240
*/
1241
ASTCENC_SIMD_INLINE void store_lane(uint8_t* base, int data)
1242
{
1243
std::memcpy(base, &data, sizeof(int));
1244
}
1245
1246
/**
1247
* @brief Store a vector, skipping masked lanes.
1248
*
1249
* All masked lanes must be at the end of vector, after all non-masked lanes.
1250
*/
1251
ASTCENC_SIMD_INLINE void store_lanes_masked(uint8_t* base, vint4 data, vmask4 mask)
1252
{
1253
#if ASTCENC_AVX >= 2
1254
_mm_maskstore_epi32(reinterpret_cast<int*>(base), _mm_castps_si128(mask.m), data.m);
1255
#else
1256
// Note - we cannot use _mm_maskmoveu_si128 as the underlying hardware doesn't guarantee
1257
// fault suppression on masked lanes so we can get page faults at the end of an image.
1258
if (mask.lane<3>() != 0.0f)
1259
{
1260
store(data, base);
1261
}
1262
else if (mask.lane<2>() != 0.0f)
1263
{
1264
store_lane(base + 0, data.lane<0>());
1265
store_lane(base + 4, data.lane<1>());
1266
store_lane(base + 8, data.lane<2>());
1267
}
1268
else if (mask.lane<1>() != 0.0f)
1269
{
1270
store_lane(base + 0, data.lane<0>());
1271
store_lane(base + 4, data.lane<1>());
1272
}
1273
else if (mask.lane<0>() != 0.0f)
1274
{
1275
store_lane(base + 0, data.lane<0>());
1276
}
1277
#endif
1278
}
1279
1280
#if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)
1281
1282
#define ASTCENC_USE_NATIVE_DOT_PRODUCT 1
1283
1284
/**
1285
* @brief Return the dot product for the full 4 lanes, returning scalar.
1286
*/
1287
ASTCENC_SIMD_INLINE float dot_s(vfloat4 a, vfloat4 b)
1288
{
1289
return _mm_cvtss_f32(_mm_dp_ps(a.m, b.m, 0xFF));
1290
}
1291
1292
/**
1293
* @brief Return the dot product for the full 4 lanes, returning vector.
1294
*/
1295
ASTCENC_SIMD_INLINE vfloat4 dot(vfloat4 a, vfloat4 b)
1296
{
1297
return vfloat4(_mm_dp_ps(a.m, b.m, 0xFF));
1298
}
1299
1300
/**
1301
* @brief Return the dot product for the bottom 3 lanes, returning scalar.
1302
*/
1303
ASTCENC_SIMD_INLINE float dot3_s(vfloat4 a, vfloat4 b)
1304
{
1305
return _mm_cvtss_f32(_mm_dp_ps(a.m, b.m, 0x77));
1306
}
1307
1308
/**
1309
* @brief Return the dot product for the bottom 3 lanes, returning vector.
1310
*/
1311
ASTCENC_SIMD_INLINE vfloat4 dot3(vfloat4 a, vfloat4 b)
1312
{
1313
return vfloat4(_mm_dp_ps(a.m, b.m, 0x77));
1314
}
1315
1316
#endif // #if defined(ASTCENC_NO_INVARIANCE) && (ASTCENC_SSE >= 41)
1317
1318
#if ASTCENC_POPCNT >= 1
1319
1320
#define ASTCENC_USE_NATIVE_POPCOUNT 1
1321
1322
/**
1323
* @brief Population bit count.
1324
*
1325
* @param v The value to population count.
1326
*
1327
* @return The number of 1 bits.
1328
*/
1329
ASTCENC_SIMD_INLINE int popcount(uint64_t v)
1330
{
1331
#if !defined(__x86_64__) && !defined(_M_AMD64)
1332
return static_cast<int>(__builtin_popcountll(v));
1333
#else
1334
return static_cast<int>(_mm_popcnt_u64(v));
1335
#endif
1336
}
1337
1338
#endif // ASTCENC_POPCNT >= 1
1339
1340
#endif // #ifndef ASTC_VECMATHLIB_SSE_4_H_INCLUDED
1341
1342