Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/astcenc/astcenc_vecmathlib_sve_8.h
9898 views
1
// SPDX-License-Identifier: Apache-2.0
2
// ----------------------------------------------------------------------------
3
// Copyright 2019-2024 Arm Limited
4
//
5
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
6
// use this file except in compliance with the License. You may obtain a copy
7
// of the License at:
8
//
9
// http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing, software
12
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14
// License for the specific language governing permissions and limitations
15
// under the License.
16
// ----------------------------------------------------------------------------
17
18
/**
19
* @brief 8x32-bit vectors, implemented using SVE.
20
*
21
* This module implements 8-wide 32-bit float, int, and mask vectors for Arm
22
* SVE.
23
*
24
* There is a baseline level of functionality provided by all vector widths and
25
* implementations. This is implemented using identical function signatures,
26
* modulo data type, so we can use them as substitutable implementations in VLA
27
* code.
28
*/
29
30
#ifndef ASTC_VECMATHLIB_SVE_8_H_INCLUDED
31
#define ASTC_VECMATHLIB_SVE_8_H_INCLUDED
32
33
#ifndef ASTCENC_SIMD_INLINE
34
#error "Include astcenc_vecmathlib.h, do not include directly"
35
#endif
36
37
#include <cstdio>
38
39
typedef svbool_t svbool_8_t __attribute__((arm_sve_vector_bits(256)));
40
typedef svuint8_t svuint8_8_t __attribute__((arm_sve_vector_bits(256)));
41
typedef svuint16_t svuint16_8_t __attribute__((arm_sve_vector_bits(256)));
42
typedef svuint32_t svuint32_8_t __attribute__((arm_sve_vector_bits(256)));
43
typedef svint32_t svint32_8_t __attribute__((arm_sve_vector_bits(256)));
44
typedef svfloat32_t svfloat32_8_t __attribute__((arm_sve_vector_bits(256)));
45
46
// ============================================================================
47
// vfloat8 data type
48
// ============================================================================
49
50
/**
51
* @brief Data type for 8-wide floats.
52
*/
53
struct vfloat8
54
{
55
/**
56
* @brief Construct from zero-initialized value.
57
*/
58
ASTCENC_SIMD_INLINE vfloat8() = default;
59
60
/**
61
* @brief Construct from 8 values loaded from an unaligned address.
62
*
63
* Consider using loada() which is better with vectors if data is aligned
64
* to vector length.
65
*/
66
ASTCENC_SIMD_INLINE explicit vfloat8(const float *p)
67
{
68
m = svld1_f32(svptrue_b32(), p);
69
}
70
71
/**
72
* @brief Construct from 1 scalar value replicated across all lanes.
73
*
74
* Consider using zero() for constexpr zeros.
75
*/
76
ASTCENC_SIMD_INLINE explicit vfloat8(float a)
77
{
78
m = svdup_f32(a);
79
}
80
81
/**
82
* @brief Construct from an existing SIMD register.
83
*/
84
ASTCENC_SIMD_INLINE explicit vfloat8(svfloat32_8_t a)
85
{
86
m = a;
87
}
88
89
/**
90
* @brief Factory that returns a vector of zeros.
91
*/
92
static ASTCENC_SIMD_INLINE vfloat8 zero()
93
{
94
return vfloat8(0.0f);
95
}
96
97
/**
98
* @brief Factory that returns a replicated scalar loaded from memory.
99
*/
100
static ASTCENC_SIMD_INLINE vfloat8 load1(const float* p)
101
{
102
return vfloat8(*p);
103
}
104
105
/**
106
* @brief Factory that returns a vector loaded from 32B aligned memory.
107
*/
108
static ASTCENC_SIMD_INLINE vfloat8 loada(const float* p)
109
{
110
return vfloat8(p);
111
}
112
113
/**
114
* @brief The vector ...
115
*/
116
svfloat32_8_t m;
117
};
118
119
// ============================================================================
120
// vint8 data type
121
// ============================================================================
122
123
/**
124
* @brief Data type for 8-wide ints.
125
*/
126
struct vint8
127
{
128
/**
129
* @brief Construct from zero-initialized value.
130
*/
131
ASTCENC_SIMD_INLINE vint8() = default;
132
133
/**
134
* @brief Construct from 8 values loaded from an unaligned address.
135
*
136
* Consider using loada() which is better with vectors if data is aligned
137
* to vector length.
138
*/
139
ASTCENC_SIMD_INLINE explicit vint8(const int *p)
140
{
141
m = svld1_s32(svptrue_b32(), p);
142
}
143
144
/**
145
* @brief Construct from 8 uint8_t loaded from an unaligned address.
146
*/
147
ASTCENC_SIMD_INLINE explicit vint8(const uint8_t *p)
148
{
149
// Load 8-bit values and expand to 32-bits
150
m = svld1ub_s32(svptrue_b32(), p);
151
}
152
153
/**
154
* @brief Construct from 1 scalar value replicated across all lanes.
155
*
156
* Consider using zero() for constexpr zeros.
157
*/
158
ASTCENC_SIMD_INLINE explicit vint8(int a)
159
{
160
m = svdup_s32(a);
161
}
162
163
/**
164
* @brief Construct from an existing SIMD register.
165
*/
166
ASTCENC_SIMD_INLINE explicit vint8(svint32_8_t a)
167
{
168
m = a;
169
}
170
171
/**
172
* @brief Factory that returns a vector of zeros.
173
*/
174
static ASTCENC_SIMD_INLINE vint8 zero()
175
{
176
return vint8(0.0f);
177
}
178
179
/**
180
* @brief Factory that returns a replicated scalar loaded from memory.
181
*/
182
static ASTCENC_SIMD_INLINE vint8 load1(const int* p)
183
{
184
return vint8(*p);
185
}
186
187
/**
188
* @brief Factory that returns a vector loaded from unaligned memory.
189
*/
190
static ASTCENC_SIMD_INLINE vint8 load(const uint8_t* p)
191
{
192
svuint8_8_t data = svld1_u8(svptrue_b8(), p);
193
return vint8(svreinterpret_s32_u8(data));
194
}
195
196
/**
197
* @brief Factory that returns a vector loaded from 32B aligned memory.
198
*/
199
static ASTCENC_SIMD_INLINE vint8 loada(const int* p)
200
{
201
return vint8(p);
202
}
203
204
/**
205
* @brief Factory that returns a vector containing the lane IDs.
206
*/
207
static ASTCENC_SIMD_INLINE vint8 lane_id()
208
{
209
return vint8(svindex_s32(0, 1));
210
}
211
212
/**
213
* @brief The vector ...
214
*/
215
svint32_8_t m;
216
};
217
218
// ============================================================================
219
// vmask8 data type
220
// ============================================================================
221
222
/**
223
* @brief Data type for 8-wide control plane masks.
224
*/
225
struct vmask8
226
{
227
/**
228
* @brief Construct from an existing SIMD register.
229
*/
230
ASTCENC_SIMD_INLINE explicit vmask8(svbool_8_t a)
231
{
232
m = a;
233
}
234
235
/**
236
* @brief Construct from 1 scalar value.
237
*/
238
ASTCENC_SIMD_INLINE explicit vmask8(bool a)
239
{
240
m = svdup_b32(a);
241
}
242
243
/**
244
* @brief The vector ...
245
*/
246
svbool_8_t m;
247
};
248
249
// ============================================================================
250
// vmask8 operators and functions
251
// ============================================================================
252
253
/**
254
* @brief Overload: mask union (or).
255
*/
256
ASTCENC_SIMD_INLINE vmask8 operator|(vmask8 a, vmask8 b)
257
{
258
return vmask8(svorr_z(svptrue_b32(), a.m, b.m));
259
}
260
261
/**
262
* @brief Overload: mask intersect (and).
263
*/
264
ASTCENC_SIMD_INLINE vmask8 operator&(vmask8 a, vmask8 b)
265
{
266
return vmask8(svand_z(svptrue_b32(), a.m, b.m));
267
}
268
269
/**
270
* @brief Overload: mask difference (xor).
271
*/
272
ASTCENC_SIMD_INLINE vmask8 operator^(vmask8 a, vmask8 b)
273
{
274
return vmask8(sveor_z(svptrue_b32(), a.m, b.m));
275
}
276
277
/**
278
* @brief Overload: mask invert (not).
279
*/
280
ASTCENC_SIMD_INLINE vmask8 operator~(vmask8 a)
281
{
282
return vmask8(svnot_z(svptrue_b32(), a.m));
283
}
284
285
/**
286
* @brief Return a 8-bit mask code indicating mask status.
287
*
288
* bit0 = lane 0
289
*/
290
ASTCENC_SIMD_INLINE unsigned int mask(vmask8 a)
291
{
292
alignas(32) const int shifta[8] { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80 };
293
svint32_8_t template_vals = svld1_s32(svptrue_b32(), shifta);
294
svint32_8_t active_vals = svsel_s32(a.m, template_vals, svdup_s32(0));
295
return static_cast<unsigned int>(svaddv_s32(svptrue_b32(), active_vals));
296
}
297
298
/**
299
* @brief True if any lanes are enabled, false otherwise.
300
*/
301
ASTCENC_SIMD_INLINE bool any(vmask8 a)
302
{
303
return svptest_any(svptrue_b32(), a.m);
304
}
305
306
/**
307
* @brief True if all lanes are enabled, false otherwise.
308
*/
309
ASTCENC_SIMD_INLINE bool all(vmask8 a)
310
{
311
return !svptest_any(svptrue_b32(), (~a).m);
312
}
313
314
// ============================================================================
315
// vint8 operators and functions
316
// ============================================================================
317
/**
318
* @brief Overload: vector by vector addition.
319
*/
320
ASTCENC_SIMD_INLINE vint8 operator+(vint8 a, vint8 b)
321
{
322
return vint8(svadd_s32_x(svptrue_b32(), a.m, b.m));
323
}
324
325
/**
326
* @brief Overload: vector by vector incremental addition.
327
*/
328
ASTCENC_SIMD_INLINE vint8& operator+=(vint8& a, const vint8& b)
329
{
330
a = a + b;
331
return a;
332
}
333
334
/**
335
* @brief Overload: vector by vector subtraction.
336
*/
337
ASTCENC_SIMD_INLINE vint8 operator-(vint8 a, vint8 b)
338
{
339
return vint8(svsub_s32_x(svptrue_b32(), a.m, b.m));
340
}
341
342
/**
343
* @brief Overload: vector by vector multiplication.
344
*/
345
ASTCENC_SIMD_INLINE vint8 operator*(vint8 a, vint8 b)
346
{
347
return vint8(svmul_s32_x(svptrue_b32(), a.m, b.m));
348
}
349
350
/**
351
* @brief Overload: vector bit invert.
352
*/
353
ASTCENC_SIMD_INLINE vint8 operator~(vint8 a)
354
{
355
return vint8(svnot_s32_x(svptrue_b32(), a.m));
356
}
357
358
/**
359
* @brief Overload: vector by vector bitwise or.
360
*/
361
ASTCENC_SIMD_INLINE vint8 operator|(vint8 a, vint8 b)
362
{
363
return vint8(svorr_s32_x(svptrue_b32(), a.m, b.m));
364
}
365
366
/**
367
* @brief Overload: vector by vector bitwise and.
368
*/
369
ASTCENC_SIMD_INLINE vint8 operator&(vint8 a, vint8 b)
370
{
371
return vint8(svand_s32_x(svptrue_b32(), a.m, b.m));
372
}
373
374
/**
375
* @brief Overload: vector by vector bitwise xor.
376
*/
377
ASTCENC_SIMD_INLINE vint8 operator^(vint8 a, vint8 b)
378
{
379
return vint8(sveor_s32_x(svptrue_b32(), a.m, b.m));
380
}
381
382
/**
383
* @brief Overload: vector by vector equality.
384
*/
385
ASTCENC_SIMD_INLINE vmask8 operator==(vint8 a, vint8 b)
386
{
387
return vmask8(svcmpeq_s32(svptrue_b32(), a.m, b.m));
388
}
389
390
/**
391
* @brief Overload: vector by vector inequality.
392
*/
393
ASTCENC_SIMD_INLINE vmask8 operator!=(vint8 a, vint8 b)
394
{
395
return vmask8(svcmpne_s32(svptrue_b32(), a.m, b.m));
396
}
397
398
/**
399
* @brief Overload: vector by vector less than.
400
*/
401
ASTCENC_SIMD_INLINE vmask8 operator<(vint8 a, vint8 b)
402
{
403
return vmask8(svcmplt_s32(svptrue_b32(), a.m, b.m));
404
}
405
406
/**
407
* @brief Overload: vector by vector greater than.
408
*/
409
ASTCENC_SIMD_INLINE vmask8 operator>(vint8 a, vint8 b)
410
{
411
return vmask8(svcmpgt_s32(svptrue_b32(), a.m, b.m));
412
}
413
414
/**
415
* @brief Logical shift left.
416
*/
417
template <int s> ASTCENC_SIMD_INLINE vint8 lsl(vint8 a)
418
{
419
return vint8(svlsl_n_s32_x(svptrue_b32(), a.m, s));
420
}
421
422
/**
423
* @brief Arithmetic shift right.
424
*/
425
template <int s> ASTCENC_SIMD_INLINE vint8 asr(vint8 a)
426
{
427
return vint8(svasr_n_s32_x(svptrue_b32(), a.m, s));
428
}
429
430
/**
431
* @brief Logical shift right.
432
*/
433
template <int s> ASTCENC_SIMD_INLINE vint8 lsr(vint8 a)
434
{
435
svuint32_8_t r = svreinterpret_u32_s32(a.m);
436
r = svlsr_n_u32_x(svptrue_b32(), r, s);
437
return vint8(svreinterpret_s32_u32(r));
438
}
439
440
/**
441
* @brief Return the min vector of two vectors.
442
*/
443
ASTCENC_SIMD_INLINE vint8 min(vint8 a, vint8 b)
444
{
445
return vint8(svmin_s32_x(svptrue_b32(), a.m, b.m));
446
}
447
448
/**
449
* @brief Return the max vector of two vectors.
450
*/
451
ASTCENC_SIMD_INLINE vint8 max(vint8 a, vint8 b)
452
{
453
return vint8(svmax_s32_x(svptrue_b32(), a.m, b.m));
454
}
455
456
/**
457
* @brief Return the horizontal minimum of a vector.
458
*/
459
ASTCENC_SIMD_INLINE vint8 hmin(vint8 a)
460
{
461
return vint8(svminv_s32(svptrue_b32(), a.m));
462
}
463
464
/**
465
* @brief Return the horizontal minimum of a vector.
466
*/
467
ASTCENC_SIMD_INLINE int hmin_s(vint8 a)
468
{
469
return svminv_s32(svptrue_b32(), a.m);
470
}
471
472
/**
473
* @brief Return the horizontal maximum of a vector.
474
*/
475
ASTCENC_SIMD_INLINE vint8 hmax(vint8 a)
476
{
477
return vint8(svmaxv_s32(svptrue_b32(), a.m));
478
}
479
480
/**
481
* @brief Return the horizontal maximum of a vector.
482
*/
483
ASTCENC_SIMD_INLINE int hmax_s(vint8 a)
484
{
485
return svmaxv_s32(svptrue_b32(), a.m);
486
}
487
488
/**
489
* @brief Generate a vint8 from a size_t.
490
*/
491
ASTCENC_SIMD_INLINE vint8 vint8_from_size(size_t a)
492
{
493
assert(a <= std::numeric_limits<int>::max());
494
return vint8(static_cast<int>(a));
495
}
496
497
/**
498
* @brief Store a vector to a 16B aligned memory address.
499
*/
500
ASTCENC_SIMD_INLINE void storea(vint8 a, int* p)
501
{
502
svst1_s32(svptrue_b32(), p, a.m);
503
}
504
505
/**
506
* @brief Store a vector to an unaligned memory address.
507
*/
508
ASTCENC_SIMD_INLINE void store(vint8 a, int* p)
509
{
510
svst1_s32(svptrue_b32(), p, a.m);
511
}
512
513
/**
514
* @brief Store lowest N (vector width) bytes into an unaligned address.
515
*/
516
ASTCENC_SIMD_INLINE void store_nbytes(vint8 a, uint8_t* p)
517
{
518
svuint8_8_t r = svreinterpret_u8_s32(a.m);
519
svst1_u8(svptrue_pat_b8(SV_VL8), p, r);
520
}
521
522
/**
523
* @brief Pack low 8 bits of N (vector width) lanes into bottom of vector.
524
*/
525
ASTCENC_SIMD_INLINE void pack_and_store_low_bytes(vint8 v, uint8_t* p)
526
{
527
svuint32_8_t data = svreinterpret_u32_s32(v.m);
528
svst1b_u32(svptrue_b32(), p, data);
529
}
530
531
/**
532
* @brief Return lanes from @c b if @c cond is set, else @c a.
533
*/
534
ASTCENC_SIMD_INLINE vint8 select(vint8 a, vint8 b, vmask8 cond)
535
{
536
return vint8(svsel_s32(cond.m, b.m, a.m));
537
}
538
539
// ============================================================================
540
// vfloat8 operators and functions
541
// ============================================================================
542
543
/**
544
* @brief Overload: vector by vector addition.
545
*/
546
ASTCENC_SIMD_INLINE vfloat8 operator+(vfloat8 a, vfloat8 b)
547
{
548
return vfloat8(svadd_f32_x(svptrue_b32(), a.m, b.m));
549
}
550
551
/**
552
* @brief Overload: vector by vector incremental addition.
553
*/
554
ASTCENC_SIMD_INLINE vfloat8& operator+=(vfloat8& a, const vfloat8& b)
555
{
556
a = a + b;
557
return a;
558
}
559
560
/**
561
* @brief Overload: vector by vector subtraction.
562
*/
563
ASTCENC_SIMD_INLINE vfloat8 operator-(vfloat8 a, vfloat8 b)
564
{
565
return vfloat8(svsub_f32_x(svptrue_b32(), a.m, b.m));
566
}
567
568
/**
569
* @brief Overload: vector by vector multiplication.
570
*/
571
ASTCENC_SIMD_INLINE vfloat8 operator*(vfloat8 a, vfloat8 b)
572
{
573
return vfloat8(svmul_f32_x(svptrue_b32(), a.m, b.m));
574
}
575
576
/**
577
* @brief Overload: vector by scalar multiplication.
578
*/
579
ASTCENC_SIMD_INLINE vfloat8 operator*(vfloat8 a, float b)
580
{
581
return vfloat8(svmul_f32_x(svptrue_b32(), a.m, svdup_f32(b)));
582
}
583
584
/**
585
* @brief Overload: scalar by vector multiplication.
586
*/
587
ASTCENC_SIMD_INLINE vfloat8 operator*(float a, vfloat8 b)
588
{
589
return vfloat8(svmul_f32_x(svptrue_b32(), svdup_f32(a), b.m));
590
}
591
592
/**
593
* @brief Overload: vector by vector division.
594
*/
595
ASTCENC_SIMD_INLINE vfloat8 operator/(vfloat8 a, vfloat8 b)
596
{
597
return vfloat8(svdiv_f32_x(svptrue_b32(), a.m, b.m));
598
}
599
600
/**
601
* @brief Overload: vector by scalar division.
602
*/
603
ASTCENC_SIMD_INLINE vfloat8 operator/(vfloat8 a, float b)
604
{
605
return vfloat8(svdiv_f32_x(svptrue_b32(), a.m, svdup_f32(b)));
606
}
607
608
/**
609
* @brief Overload: scalar by vector division.
610
*/
611
ASTCENC_SIMD_INLINE vfloat8 operator/(float a, vfloat8 b)
612
{
613
return vfloat8(svdiv_f32_x(svptrue_b32(), svdup_f32(a), b.m));
614
}
615
616
/**
617
* @brief Overload: vector by vector equality.
618
*/
619
ASTCENC_SIMD_INLINE vmask8 operator==(vfloat8 a, vfloat8 b)
620
{
621
return vmask8(svcmpeq_f32(svptrue_b32(), a.m, b.m));
622
}
623
624
/**
625
* @brief Overload: vector by vector inequality.
626
*/
627
ASTCENC_SIMD_INLINE vmask8 operator!=(vfloat8 a, vfloat8 b)
628
{
629
return vmask8(svcmpne_f32(svptrue_b32(), a.m, b.m));
630
}
631
632
/**
633
* @brief Overload: vector by vector less than.
634
*/
635
ASTCENC_SIMD_INLINE vmask8 operator<(vfloat8 a, vfloat8 b)
636
{
637
return vmask8(svcmplt_f32(svptrue_b32(), a.m, b.m));;
638
}
639
640
/**
641
* @brief Overload: vector by vector greater than.
642
*/
643
ASTCENC_SIMD_INLINE vmask8 operator>(vfloat8 a, vfloat8 b)
644
{
645
return vmask8(svcmpgt_f32(svptrue_b32(), a.m, b.m));
646
}
647
648
/**
649
* @brief Overload: vector by vector less than or equal.
650
*/
651
ASTCENC_SIMD_INLINE vmask8 operator<=(vfloat8 a, vfloat8 b)
652
{
653
return vmask8(svcmple_f32(svptrue_b32(), a.m, b.m));
654
}
655
656
/**
657
* @brief Overload: vector by vector greater than or equal.
658
*/
659
ASTCENC_SIMD_INLINE vmask8 operator>=(vfloat8 a, vfloat8 b)
660
{
661
return vmask8(svcmpge_f32(svptrue_b32(), a.m, b.m));
662
}
663
664
/**
665
* @brief Return the min vector of two vectors.
666
*
667
* If either lane value is NaN, the other lane will be returned.
668
*/
669
ASTCENC_SIMD_INLINE vfloat8 min(vfloat8 a, vfloat8 b)
670
{
671
return vfloat8(svminnm_f32_x(svptrue_b32(), a.m, b.m));
672
}
673
674
/**
675
* @brief Return the min vector of a vector and a scalar.
676
*
677
* If either lane value is NaN, the other lane will be returned.
678
*/
679
ASTCENC_SIMD_INLINE vfloat8 min(vfloat8 a, float b)
680
{
681
return min(a, vfloat8(b));
682
}
683
684
/**
685
* @brief Return the max vector of two vectors.
686
*
687
* If either lane value is NaN, the other lane will be returned.
688
*/
689
ASTCENC_SIMD_INLINE vfloat8 max(vfloat8 a, vfloat8 b)
690
{
691
return vfloat8(svmaxnm_f32_x(svptrue_b32(), a.m, b.m));
692
}
693
694
/**
695
* @brief Return the max vector of a vector and a scalar.
696
*
697
* If either lane value is NaN, the other lane will be returned.
698
*/
699
ASTCENC_SIMD_INLINE vfloat8 max(vfloat8 a, float b)
700
{
701
return max(a, vfloat8(b));
702
}
703
704
/**
705
* @brief Return the clamped value between min and max.
706
*
707
* It is assumed that neither @c min nor @c max are NaN values. If @c a is NaN
708
* then @c min will be returned for that lane.
709
*/
710
ASTCENC_SIMD_INLINE vfloat8 clamp(float minv, float maxv, vfloat8 a)
711
{
712
return min(max(a, minv), maxv);
713
}
714
715
/**
716
* @brief Return a clamped value between 0.0f and 1.0f.
717
*
718
* If @c a is NaN then zero will be returned for that lane.
719
*/
720
ASTCENC_SIMD_INLINE vfloat8 clampzo(vfloat8 a)
721
{
722
return clamp(0.0f, 1.0f, a);
723
}
724
725
/**
726
* @brief Return the absolute value of the float vector.
727
*/
728
ASTCENC_SIMD_INLINE vfloat8 abs(vfloat8 a)
729
{
730
return vfloat8(svabs_f32_x(svptrue_b32(), a.m));
731
}
732
733
/**
734
* @brief Return a float rounded to the nearest integer value.
735
*/
736
ASTCENC_SIMD_INLINE vfloat8 round(vfloat8 a)
737
{
738
return vfloat8(svrintn_f32_x(svptrue_b32(), a.m));
739
}
740
741
/**
742
* @brief Return the horizontal minimum of a vector.
743
*/
744
ASTCENC_SIMD_INLINE vfloat8 hmin(vfloat8 a)
745
{
746
return vfloat8(svminnmv_f32(svptrue_b32(), a.m));
747
}
748
749
/**
750
* @brief Return the horizontal minimum of a vector.
751
*/
752
ASTCENC_SIMD_INLINE float hmin_s(vfloat8 a)
753
{
754
return svminnmv_f32(svptrue_b32(), a.m);
755
}
756
757
/**
758
* @brief Return the horizontal maximum of a vector.
759
*/
760
ASTCENC_SIMD_INLINE vfloat8 hmax(vfloat8 a)
761
{
762
return vfloat8(svmaxnmv_f32(svptrue_b32(), a.m));
763
}
764
765
/**
766
* @brief Return the horizontal maximum of a vector.
767
*/
768
ASTCENC_SIMD_INLINE float hmax_s(vfloat8 a)
769
{
770
return svmaxnmv_f32(svptrue_b32(), a.m);
771
}
772
773
/**
774
* @brief Return the horizontal sum of a vector.
775
*/
776
ASTCENC_SIMD_INLINE float hadd_s(vfloat8 a)
777
{
778
// Can't use svaddv - it's not invariant
779
vfloat4 lo(svget_neonq_f32(a.m));
780
vfloat4 hi(svget_neonq_f32(svext_f32(a.m, a.m, 4)));
781
return hadd_s(lo) + hadd_s(hi);
782
}
783
784
/**
785
* @brief Return lanes from @c b if @c cond is set, else @c a.
786
*/
787
ASTCENC_SIMD_INLINE vfloat8 select(vfloat8 a, vfloat8 b, vmask8 cond)
788
{
789
return vfloat8(svsel_f32(cond.m, b.m, a.m));
790
}
791
792
/**
793
* @brief Accumulate lane-wise sums for a vector, folded 4-wide.
794
*
795
* This is invariant with 4-wide implementations.
796
*/
797
ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a)
798
{
799
vfloat4 lo(svget_neonq_f32(a.m));
800
haccumulate(accum, lo);
801
802
vfloat4 hi(svget_neonq_f32(svext_f32(a.m, a.m, 4)));
803
haccumulate(accum, hi);
804
}
805
806
/**
807
* @brief Accumulate lane-wise sums for a vector.
808
*
809
* This is NOT invariant with 4-wide implementations.
810
*/
811
ASTCENC_SIMD_INLINE void haccumulate(vfloat8& accum, vfloat8 a)
812
{
813
accum += a;
814
}
815
816
/**
817
* @brief Accumulate masked lane-wise sums for a vector, folded 4-wide.
818
*
819
* This is invariant with 4-wide implementations.
820
*/
821
ASTCENC_SIMD_INLINE void haccumulate(vfloat4& accum, vfloat8 a, vmask8 m)
822
{
823
a = select(vfloat8::zero(), a, m);
824
haccumulate(accum, a);
825
}
826
827
/**
828
* @brief Accumulate masked lane-wise sums for a vector.
829
*
830
* This is NOT invariant with 4-wide implementations.
831
*/
832
ASTCENC_SIMD_INLINE void haccumulate(vfloat8& accum, vfloat8 a, vmask8 m)
833
{
834
accum.m = svadd_f32_m(m.m, accum.m, a.m);
835
}
836
837
/**
838
* @brief Return the sqrt of the lanes in the vector.
839
*/
840
ASTCENC_SIMD_INLINE vfloat8 sqrt(vfloat8 a)
841
{
842
return vfloat8(svsqrt_f32_x(svptrue_b32(), a.m));
843
}
844
845
/**
846
* @brief Load a vector of gathered results from an array;
847
*/
848
ASTCENC_SIMD_INLINE vfloat8 gatherf(const float* base, vint8 indices)
849
{
850
return vfloat8(svld1_gather_s32index_f32(svptrue_b32(), base, indices.m));
851
}
852
853
/**
854
* @brief Load a vector of gathered results from an array using byte indices from memory
855
*/
856
template<>
857
ASTCENC_SIMD_INLINE vfloat8 gatherf_byte_inds<vfloat8>(const float* base, const uint8_t* indices)
858
{
859
svint32_t offsets = svld1ub_s32(svptrue_b32(), indices);
860
return vfloat8(svld1_gather_s32index_f32(svptrue_b32(), base, offsets));
861
}
862
863
/**
864
* @brief Store a vector to an unaligned memory address.
865
*/
866
ASTCENC_SIMD_INLINE void store(vfloat8 a, float* p)
867
{
868
svst1_f32(svptrue_b32(), p, a.m);
869
}
870
871
/**
872
* @brief Store a vector to a 32B aligned memory address.
873
*/
874
ASTCENC_SIMD_INLINE void storea(vfloat8 a, float* p)
875
{
876
svst1_f32(svptrue_b32(), p, a.m);
877
}
878
879
/**
880
* @brief Return a integer value for a float vector, using truncation.
881
*/
882
ASTCENC_SIMD_INLINE vint8 float_to_int(vfloat8 a)
883
{
884
return vint8(svcvt_s32_f32_x(svptrue_b32(), a.m));
885
}
886
887
/**
888
* @brief Return a integer value for a float vector, using round-to-nearest.
889
*/
890
ASTCENC_SIMD_INLINE vint8 float_to_int_rtn(vfloat8 a)
891
{
892
a = a + vfloat8(0.5f);
893
return vint8(svcvt_s32_f32_x(svptrue_b32(), a.m));
894
}
895
896
/**
897
* @brief Return a float value for an integer vector.
898
*/
899
ASTCENC_SIMD_INLINE vfloat8 int_to_float(vint8 a)
900
{
901
return vfloat8(svcvt_f32_s32_x(svptrue_b32(), a.m));
902
}
903
904
/**
905
* @brief Return a float value as an integer bit pattern (i.e. no conversion).
906
*
907
* It is a common trick to convert floats into integer bit patterns, perform
908
* some bit hackery based on knowledge they are IEEE 754 layout, and then
909
* convert them back again. This is the first half of that flip.
910
*/
911
ASTCENC_SIMD_INLINE vint8 float_as_int(vfloat8 a)
912
{
913
return vint8(svreinterpret_s32_f32(a.m));
914
}
915
916
/**
917
* @brief Return a integer value as a float bit pattern (i.e. no conversion).
918
*
919
* It is a common trick to convert floats into integer bit patterns, perform
920
* some bit hackery based on knowledge they are IEEE 754 layout, and then
921
* convert them back again. This is the second half of that flip.
922
*/
923
ASTCENC_SIMD_INLINE vfloat8 int_as_float(vint8 a)
924
{
925
return vfloat8(svreinterpret_f32_s32(a.m));
926
}
927
928
/*
929
* Table structure for a 16x 8-bit entry table.
930
*/
931
struct vtable8_16x8 {
932
svuint8_8_t t0;
933
};
934
935
/*
936
* Table structure for a 32x 8-bit entry table.
937
*/
938
struct vtable8_32x8 {
939
svuint8_8_t t0;
940
};
941
942
/*
943
* Table structure for a 64x 8-bit entry table.
944
*/
945
struct vtable8_64x8 {
946
svuint8_8_t t0;
947
svuint8_8_t t1;
948
};
949
950
/**
951
* @brief Prepare a vtable lookup table for 16x 8-bit entry table.
952
*/
953
ASTCENC_SIMD_INLINE void vtable_prepare(
954
vtable8_16x8& table,
955
const uint8_t* data
956
) {
957
// Top half of register will be zeros
958
table.t0 = svld1_u8(svptrue_pat_b8(SV_VL16), data);
959
}
960
961
/**
962
* @brief Prepare a vtable lookup table for 32x 8-bit entry table.
963
*/
964
ASTCENC_SIMD_INLINE void vtable_prepare(
965
vtable8_32x8& table,
966
const uint8_t* data
967
) {
968
table.t0 = svld1_u8(svptrue_b8(), data);
969
}
970
971
/**
972
* @brief Prepare a vtable lookup table 64x 8-bit entry table.
973
*/
974
ASTCENC_SIMD_INLINE void vtable_prepare(
975
vtable8_64x8& table,
976
const uint8_t* data
977
) {
978
table.t0 = svld1_u8(svptrue_b8(), data);
979
table.t1 = svld1_u8(svptrue_b8(), data + 32);
980
}
981
982
/**
983
* @brief Perform a vtable lookup in a 16x 8-bit table with 32-bit indices.
984
*/
985
ASTCENC_SIMD_INLINE vint8 vtable_lookup_32bit(
986
const vtable8_16x8& tbl,
987
vint8 idx
988
) {
989
// Set index byte above max index for unused bytes so table lookup returns zero
990
svint32_8_t idx_masked = svorr_s32_x(svptrue_b32(), idx.m, svdup_s32(0xFFFFFF00));
991
svuint8_8_t idx_bytes = svreinterpret_u8_s32(idx_masked);
992
993
svuint8_8_t result = svtbl_u8(tbl.t0, idx_bytes);
994
return vint8(svreinterpret_s32_u8(result));
995
}
996
997
/**
998
* @brief Perform a vtable lookup in a 32x 8-bit table with 32-bit indices.
999
*/
1000
ASTCENC_SIMD_INLINE vint8 vtable_lookup_32bit(
1001
const vtable8_32x8& tbl,
1002
vint8 idx
1003
) {
1004
// Set index byte above max index for unused bytes so table lookup returns zero
1005
svint32_8_t idx_masked = svorr_s32_x(svptrue_b32(), idx.m, svdup_s32(0xFFFFFF00));
1006
svuint8_8_t idx_bytes = svreinterpret_u8_s32(idx_masked);
1007
1008
svuint8_8_t result = svtbl_u8(tbl.t0, idx_bytes);
1009
return vint8(svreinterpret_s32_u8(result));
1010
}
1011
1012
/**
1013
* @brief Perform a vtable lookup in a 64x 8-bit table with 32-bit indices.
1014
*
1015
* Future: SVE2 can directly do svtbl2_u8() for a two register table.
1016
*/
1017
ASTCENC_SIMD_INLINE vint8 vtable_lookup_32bit(
1018
const vtable8_64x8& tbl,
1019
vint8 idx
1020
) {
1021
// Set index byte above max index for unused bytes so table lookup returns zero
1022
svint32_8_t idxm = svorr_s32_x(svptrue_b32(), idx.m, svdup_s32(0xFFFFFF00));
1023
1024
svuint8_8_t idxm8 = svreinterpret_u8_s32(idxm);
1025
svuint8_8_t t0_lookup = svtbl_u8(tbl.t0, idxm8);
1026
1027
idxm8 = svsub_u8_x(svptrue_b8(), idxm8, svdup_u8(32));
1028
svuint8_8_t t1_lookup = svtbl_u8(tbl.t1, idxm8);
1029
1030
svuint8_8_t result = svorr_u8_x(svptrue_b32(), t0_lookup, t1_lookup);
1031
return vint8(svreinterpret_s32_u8(result));
1032
}
1033
1034
/**
1035
* @brief Return a vector of interleaved RGBA data.
1036
*
1037
* Input vectors have the value stored in the bottom 8 bits of each lane,
1038
* with high bits set to zero.
1039
*
1040
* Output vector stores a single RGBA texel packed in each lane.
1041
*/
1042
ASTCENC_SIMD_INLINE vint8 interleave_rgba8(vint8 r, vint8 g, vint8 b, vint8 a)
1043
{
1044
return r + lsl<8>(g) + lsl<16>(b) + lsl<24>(a);
1045
}
1046
1047
/**
1048
* @brief Store a vector, skipping masked lanes.
1049
*
1050
* All masked lanes must be at the end of vector, after all non-masked lanes.
1051
*/
1052
ASTCENC_SIMD_INLINE void store_lanes_masked(uint8_t* base, vint8 data, vmask8 mask)
1053
{
1054
svst1_s32(mask.m, reinterpret_cast<int32_t*>(base), data.m);
1055
}
1056
1057
/**
1058
* @brief Debug function to print a vector of ints.
1059
*/
1060
ASTCENC_SIMD_INLINE void print(vint8 a)
1061
{
1062
alignas(32) int v[8];
1063
storea(a, v);
1064
printf("v8_i32:\n %8d %8d %8d %8d %8d %8d %8d %8d\n",
1065
v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
1066
}
1067
1068
/**
1069
* @brief Debug function to print a vector of ints.
1070
*/
1071
ASTCENC_SIMD_INLINE void printx(vint8 a)
1072
{
1073
alignas(32) int v[8];
1074
storea(a, v);
1075
printf("v8_i32:\n %08x %08x %08x %08x %08x %08x %08x %08x\n",
1076
v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
1077
}
1078
1079
/**
1080
* @brief Debug function to print a vector of floats.
1081
*/
1082
ASTCENC_SIMD_INLINE void print(vfloat8 a)
1083
{
1084
alignas(32) float v[8];
1085
storea(a, v);
1086
printf("v8_f32:\n %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f %0.4f\n",
1087
static_cast<double>(v[0]), static_cast<double>(v[1]),
1088
static_cast<double>(v[2]), static_cast<double>(v[3]),
1089
static_cast<double>(v[4]), static_cast<double>(v[5]),
1090
static_cast<double>(v[6]), static_cast<double>(v[7]));
1091
}
1092
1093
/**
1094
* @brief Debug function to print a vector of masks.
1095
*/
1096
ASTCENC_SIMD_INLINE void print(vmask8 a)
1097
{
1098
print(select(vint8(0), vint8(1), a));
1099
}
1100
1101
#endif // #ifndef ASTC_VECMATHLIB_SVE_8_H_INCLUDED
1102
1103