Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmliblzma/liblzma/check/crc_x86_clmul.h
3153 views
1
// SPDX-License-Identifier: 0BSD
2
3
///////////////////////////////////////////////////////////////////////////////
4
//
5
/// \file crc_x86_clmul.h
6
/// \brief CRC32 and CRC64 implementations using CLMUL instructions.
7
///
8
/// The CRC32 and CRC64 implementations use 32/64-bit x86 SSSE3, SSE4.1, and
9
/// CLMUL instructions. This is compatible with Elbrus 2000 (E2K) too.
10
///
11
/// They were derived from
12
/// https://www.researchgate.net/publication/263424619_Fast_CRC_computation
13
/// and the public domain code from https://github.com/rawrunprotected/crc
14
/// (URLs were checked on 2023-10-14).
15
///
16
/// While this file has both CRC32 and CRC64 implementations, only one
17
/// should be built at a time to ensure that crc_simd_body() is inlined
18
/// even with compilers with which lzma_always_inline expands to plain inline.
19
/// The version to build is selected by defining BUILDING_CRC32_CLMUL or
20
/// BUILDING_CRC64_CLMUL before including this file.
21
///
22
/// FIXME: Builds for 32-bit x86 use the assembly .S files by default
23
/// unless configured with --disable-assembler. Even then the lookup table
24
/// isn't omitted in crc64_table.c since it doesn't know that assembly
25
/// code has been disabled.
26
//
27
// Authors: Ilya Kurdyukov
28
// Hans Jansen
29
// Lasse Collin
30
// Jia Tan
31
//
32
///////////////////////////////////////////////////////////////////////////////
33
34
// This file must not be included more than once.
35
#ifdef LZMA_CRC_X86_CLMUL_H
36
# error crc_x86_clmul.h was included twice.
37
#endif
38
#define LZMA_CRC_X86_CLMUL_H
39
40
#include <immintrin.h>
41
42
#if defined(_MSC_VER)
43
# include <intrin.h>
44
#elif defined(HAVE_CPUID_H)
45
# include <cpuid.h>
46
#endif
47
48
49
// EDG-based compilers (Intel's classic compiler and compiler for E2K) can
50
// define __GNUC__ but the attribute must not be used with them.
51
// The new Clang-based ICX needs the attribute.
52
//
53
// NOTE: Build systems check for this too, keep them in sync with this.
54
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__EDG__)
55
# define crc_attr_target \
56
__attribute__((__target__("ssse3,sse4.1,pclmul")))
57
#else
58
# define crc_attr_target
59
#endif
60
61
62
#define MASK_L(in, mask, r) r = _mm_shuffle_epi8(in, mask)
63
64
#define MASK_H(in, mask, r) \
65
r = _mm_shuffle_epi8(in, _mm_xor_si128(mask, vsign))
66
67
#define MASK_LH(in, mask, low, high) \
68
MASK_L(in, mask, low); \
69
MASK_H(in, mask, high)
70
71
72
crc_attr_target
73
crc_attr_no_sanitize_address
74
static lzma_always_inline void
75
crc_simd_body(const uint8_t *buf, const size_t size, __m128i *v0, __m128i *v1,
76
const __m128i vfold16, const __m128i initial_crc)
77
{
78
// Create a vector with 8-bit values 0 to 15. This is used to
79
// construct control masks for _mm_blendv_epi8 and _mm_shuffle_epi8.
80
const __m128i vramp = _mm_setr_epi32(
81
0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c);
82
83
// This is used to inverse the control mask of _mm_shuffle_epi8
84
// so that bytes that wouldn't be picked with the original mask
85
// will be picked and vice versa.
86
const __m128i vsign = _mm_set1_epi8(-0x80);
87
88
// Memory addresses A to D and the distances between them:
89
//
90
// A B C D
91
// [skip_start][size][skip_end]
92
// [ size2 ]
93
//
94
// A and D are 16-byte aligned. B and C are 1-byte aligned.
95
// skip_start and skip_end are 0-15 bytes. size is at least 1 byte.
96
//
97
// A = aligned_buf will initially point to this address.
98
// B = The address pointed by the caller-supplied buf.
99
// C = buf + size == aligned_buf + size2
100
// D = buf + size + skip_end == aligned_buf + size2 + skip_end
101
const size_t skip_start = (size_t)((uintptr_t)buf & 15);
102
const size_t skip_end = (size_t)((0U - (uintptr_t)(buf + size)) & 15);
103
const __m128i *aligned_buf = (const __m128i *)(
104
(uintptr_t)buf & ~(uintptr_t)15);
105
106
// If size2 <= 16 then the whole input fits into a single 16-byte
107
// vector. If size2 > 16 then at least two 16-byte vectors must
108
// be processed. If size2 > 16 && size <= 16 then there is only
109
// one 16-byte vector's worth of input but it is unaligned in memory.
110
//
111
// NOTE: There is no integer overflow here if the arguments
112
// are valid. If this overflowed, buf + size would too.
113
const size_t size2 = skip_start + size;
114
115
// Masks to be used with _mm_blendv_epi8 and _mm_shuffle_epi8:
116
// The first skip_start or skip_end bytes in the vectors will have
117
// the high bit (0x80) set. _mm_blendv_epi8 and _mm_shuffle_epi8
118
// will produce zeros for these positions. (Bitwise-xor of these
119
// masks with vsign will produce the opposite behavior.)
120
const __m128i mask_start
121
= _mm_sub_epi8(vramp, _mm_set1_epi8((char)skip_start));
122
const __m128i mask_end
123
= _mm_sub_epi8(vramp, _mm_set1_epi8((char)skip_end));
124
125
// Get the first 1-16 bytes into data0. If loading less than 16
126
// bytes, the bytes are loaded to the high bits of the vector and
127
// the least significant positions are filled with zeros.
128
const __m128i data0 = _mm_blendv_epi8(_mm_load_si128(aligned_buf),
129
_mm_setzero_si128(), mask_start);
130
aligned_buf++;
131
132
__m128i v2, v3;
133
134
#ifndef CRC_USE_GENERIC_FOR_SMALL_INPUTS
135
if (size <= 16) {
136
// Right-shift initial_crc by 1-16 bytes based on "size"
137
// and store the result in v1 (high bytes) and v0 (low bytes).
138
//
139
// NOTE: The highest 8 bytes of initial_crc are zeros so
140
// v1 will be filled with zeros if size >= 8. The highest
141
// 8 bytes of v1 will always become zeros.
142
//
143
// [ v1 ][ v0 ]
144
// [ initial_crc ] size == 1
145
// [ initial_crc ] size == 2
146
// [ initial_crc ] size == 15
147
// [ initial_crc ] size == 16 (all in v0)
148
const __m128i mask_low = _mm_add_epi8(
149
vramp, _mm_set1_epi8((char)(size - 16)));
150
MASK_LH(initial_crc, mask_low, *v0, *v1);
151
152
if (size2 <= 16) {
153
// There are 1-16 bytes of input and it is all
154
// in data0. Copy the input bytes to v3. If there
155
// are fewer than 16 bytes, the low bytes in v3
156
// will be filled with zeros. That is, the input
157
// bytes are stored to the same position as
158
// (part of) initial_crc is in v0.
159
MASK_L(data0, mask_end, v3);
160
} else {
161
// There are 2-16 bytes of input but not all bytes
162
// are in data0.
163
const __m128i data1 = _mm_load_si128(aligned_buf);
164
165
// Collect the 2-16 input bytes from data0 and data1
166
// to v2 and v3, and bitwise-xor them with the
167
// low bits of initial_crc in v0. Note that the
168
// the second xor is below this else-block as it
169
// is shared with the other branch.
170
MASK_H(data0, mask_end, v2);
171
MASK_L(data1, mask_end, v3);
172
*v0 = _mm_xor_si128(*v0, v2);
173
}
174
175
*v0 = _mm_xor_si128(*v0, v3);
176
*v1 = _mm_alignr_epi8(*v1, *v0, 8);
177
} else
178
#endif
179
{
180
// There is more than 16 bytes of input.
181
const __m128i data1 = _mm_load_si128(aligned_buf);
182
const __m128i *end = (const __m128i*)(
183
(const char *)aligned_buf - 16 + size2);
184
aligned_buf++;
185
186
MASK_LH(initial_crc, mask_start, *v0, *v1);
187
*v0 = _mm_xor_si128(*v0, data0);
188
*v1 = _mm_xor_si128(*v1, data1);
189
190
while (aligned_buf < end) {
191
*v1 = _mm_xor_si128(*v1, _mm_clmulepi64_si128(
192
*v0, vfold16, 0x00));
193
*v0 = _mm_xor_si128(*v1, _mm_clmulepi64_si128(
194
*v0, vfold16, 0x11));
195
*v1 = _mm_load_si128(aligned_buf++);
196
}
197
198
if (aligned_buf != end) {
199
MASK_H(*v0, mask_end, v2);
200
MASK_L(*v0, mask_end, *v0);
201
MASK_L(*v1, mask_end, v3);
202
*v1 = _mm_or_si128(v2, v3);
203
}
204
205
*v1 = _mm_xor_si128(*v1, _mm_clmulepi64_si128(
206
*v0, vfold16, 0x00));
207
*v0 = _mm_xor_si128(*v1, _mm_clmulepi64_si128(
208
*v0, vfold16, 0x11));
209
*v1 = _mm_srli_si128(*v0, 8);
210
}
211
}
212
213
214
/////////////////////
215
// x86 CLMUL CRC32 //
216
/////////////////////
217
218
/*
219
// These functions were used to generate the constants
220
// at the top of crc32_arch_optimized().
221
static uint64_t
222
calc_lo(uint64_t p, uint64_t a, int n)
223
{
224
uint64_t b = 0; int i;
225
for (i = 0; i < n; i++) {
226
b = b >> 1 | (a & 1) << (n - 1);
227
a = (a >> 1) ^ ((0 - (a & 1)) & p);
228
}
229
return b;
230
}
231
232
// same as ~crc(&a, sizeof(a), ~0)
233
static uint64_t
234
calc_hi(uint64_t p, uint64_t a, int n)
235
{
236
int i;
237
for (i = 0; i < n; i++)
238
a = (a >> 1) ^ ((0 - (a & 1)) & p);
239
return a;
240
}
241
*/
242
243
#ifdef BUILDING_CRC32_CLMUL
244
245
crc_attr_target
246
crc_attr_no_sanitize_address
247
static uint32_t
248
crc32_arch_optimized(const uint8_t *buf, size_t size, uint32_t crc)
249
{
250
#ifndef CRC_USE_GENERIC_FOR_SMALL_INPUTS
251
// The code assumes that there is at least one byte of input.
252
if (size == 0)
253
return crc;
254
#endif
255
256
// uint32_t poly = 0xedb88320;
257
const int64_t p = 0x1db710640; // p << 1
258
const int64_t mu = 0x1f7011641; // calc_lo(p, p, 32) << 1 | 1
259
const int64_t k5 = 0x163cd6124; // calc_hi(p, p, 32) << 1
260
const int64_t k4 = 0x0ccaa009e; // calc_hi(p, p, 64) << 1
261
const int64_t k3 = 0x1751997d0; // calc_hi(p, p, 128) << 1
262
263
const __m128i vfold4 = _mm_set_epi64x(mu, p);
264
const __m128i vfold8 = _mm_set_epi64x(0, k5);
265
const __m128i vfold16 = _mm_set_epi64x(k4, k3);
266
267
__m128i v0, v1, v2;
268
269
crc_simd_body(buf, size, &v0, &v1, vfold16,
270
_mm_cvtsi32_si128((int32_t)~crc));
271
272
v1 = _mm_xor_si128(
273
_mm_clmulepi64_si128(v0, vfold16, 0x10), v1); // xxx0
274
v2 = _mm_shuffle_epi32(v1, 0xe7); // 0xx0
275
v0 = _mm_slli_epi64(v1, 32); // [0]
276
v0 = _mm_clmulepi64_si128(v0, vfold8, 0x00);
277
v0 = _mm_xor_si128(v0, v2); // [1] [2]
278
v2 = _mm_clmulepi64_si128(v0, vfold4, 0x10);
279
v2 = _mm_clmulepi64_si128(v2, vfold4, 0x00);
280
v0 = _mm_xor_si128(v0, v2); // [2]
281
return ~(uint32_t)_mm_extract_epi32(v0, 2);
282
}
283
#endif // BUILDING_CRC32_CLMUL
284
285
286
/////////////////////
287
// x86 CLMUL CRC64 //
288
/////////////////////
289
290
/*
291
// These functions were used to generate the constants
292
// at the top of crc64_arch_optimized().
293
static uint64_t
294
calc_lo(uint64_t poly)
295
{
296
uint64_t a = poly;
297
uint64_t b = 0;
298
299
for (unsigned i = 0; i < 64; ++i) {
300
b = (b >> 1) | (a << 63);
301
a = (a >> 1) ^ (a & 1 ? poly : 0);
302
}
303
304
return b;
305
}
306
307
static uint64_t
308
calc_hi(uint64_t poly, uint64_t a)
309
{
310
for (unsigned i = 0; i < 64; ++i)
311
a = (a >> 1) ^ (a & 1 ? poly : 0);
312
313
return a;
314
}
315
*/
316
317
#ifdef BUILDING_CRC64_CLMUL
318
319
// MSVC (VS2015 - VS2022) produces bad 32-bit x86 code from the CLMUL CRC
320
// code when optimizations are enabled (release build). According to the bug
321
// report, the ebx register is corrupted and the calculated result is wrong.
322
// Trying to workaround the problem with "__asm mov ebx, ebx" didn't help.
323
// The following pragma works and performance is still good. x86-64 builds
324
// and CRC32 CLMUL aren't affected by this problem. The problem does not
325
// happen in crc_simd_body() either (which is shared with CRC32 CLMUL anyway).
326
//
327
// NOTE: Another pragma after crc64_arch_optimized() restores
328
// the optimizations. If the #if condition here is updated,
329
// the other one must be updated too.
330
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) \
331
&& defined(_M_IX86)
332
# pragma optimize("g", off)
333
#endif
334
335
crc_attr_target
336
crc_attr_no_sanitize_address
337
static uint64_t
338
crc64_arch_optimized(const uint8_t *buf, size_t size, uint64_t crc)
339
{
340
#ifndef CRC_USE_GENERIC_FOR_SMALL_INPUTS
341
// The code assumes that there is at least one byte of input.
342
if (size == 0)
343
return crc;
344
#endif
345
346
// const uint64_t poly = 0xc96c5795d7870f42; // CRC polynomial
347
const uint64_t p = 0x92d8af2baf0e1e85; // (poly << 1) | 1
348
const uint64_t mu = 0x9c3e466c172963d5; // (calc_lo(poly) << 1) | 1
349
const uint64_t k2 = 0xdabe95afc7875f40; // calc_hi(poly, 1)
350
const uint64_t k1 = 0xe05dd497ca393ae4; // calc_hi(poly, k2)
351
352
const __m128i vfold8 = _mm_set_epi64x((int64_t)p, (int64_t)mu);
353
const __m128i vfold16 = _mm_set_epi64x((int64_t)k2, (int64_t)k1);
354
355
__m128i v0, v1, v2;
356
357
#if defined(__i386__) || defined(_M_IX86)
358
crc_simd_body(buf, size, &v0, &v1, vfold16,
359
_mm_set_epi64x(0, (int64_t)~crc));
360
#else
361
// GCC and Clang would produce good code with _mm_set_epi64x
362
// but MSVC needs _mm_cvtsi64_si128 on x86-64.
363
crc_simd_body(buf, size, &v0, &v1, vfold16,
364
_mm_cvtsi64_si128((int64_t)~crc));
365
#endif
366
367
v1 = _mm_xor_si128(_mm_clmulepi64_si128(v0, vfold16, 0x10), v1);
368
v0 = _mm_clmulepi64_si128(v1, vfold8, 0x00);
369
v2 = _mm_clmulepi64_si128(v0, vfold8, 0x10);
370
v0 = _mm_xor_si128(_mm_xor_si128(v1, _mm_slli_si128(v0, 8)), v2);
371
372
#if defined(__i386__) || defined(_M_IX86)
373
return ~(((uint64_t)(uint32_t)_mm_extract_epi32(v0, 3) << 32) |
374
(uint64_t)(uint32_t)_mm_extract_epi32(v0, 2));
375
#else
376
return ~(uint64_t)_mm_extract_epi64(v0, 1);
377
#endif
378
}
379
380
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__clang__) \
381
&& defined(_M_IX86)
382
# pragma optimize("", on)
383
#endif
384
385
#endif // BUILDING_CRC64_CLMUL
386
387
388
// Even though this is an inline function, compile it only when needed.
389
// This way it won't appear in E2K builds at all.
390
#if defined(CRC32_GENERIC) || defined(CRC64_GENERIC)
391
// Inlining this function duplicates the function body in crc32_resolve() and
392
// crc64_resolve(), but this is acceptable because this is a tiny function.
393
static inline bool
394
is_arch_extension_supported(void)
395
{
396
int success = 1;
397
uint32_t r[4]; // eax, ebx, ecx, edx
398
399
#if defined(_MSC_VER)
400
// This needs <intrin.h> with MSVC. ICC has it as a built-in
401
// on all platforms.
402
__cpuid(r, 1);
403
#elif defined(HAVE_CPUID_H)
404
// Compared to just using __asm__ to run CPUID, this also checks
405
// that CPUID is supported and saves and restores ebx as that is
406
// needed with GCC < 5 with position-independent code (PIC).
407
success = __get_cpuid(1, &r[0], &r[1], &r[2], &r[3]);
408
#else
409
// Just a fallback that shouldn't be needed.
410
__asm__("cpuid\n\t"
411
: "=a"(r[0]), "=b"(r[1]), "=c"(r[2]), "=d"(r[3])
412
: "a"(1), "c"(0));
413
#endif
414
415
// Returns true if these are supported:
416
// CLMUL (bit 1 in ecx)
417
// SSSE3 (bit 9 in ecx)
418
// SSE4.1 (bit 19 in ecx)
419
const uint32_t ecx_mask = (1 << 1) | (1 << 9) | (1 << 19);
420
return success && (r[2] & ecx_mask) == ecx_mask;
421
422
// Alternative methods that weren't used:
423
// - ICC's _may_i_use_cpu_feature: the other methods should work too.
424
// - GCC >= 6 / Clang / ICX __builtin_cpu_supports("pclmul")
425
//
426
// CPUID decoding is needed with MSVC anyway and older GCC. This keeps
427
// the feature checks in the build system simpler too. The nice thing
428
// about __builtin_cpu_supports would be that it generates very short
429
// code as is it only reads a variable set at startup but a few bytes
430
// doesn't matter here.
431
}
432
#endif
433
434