Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/zcommon/zfs_fletcher.c
48383 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24
* Use is subject to license terms.
25
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
26
*/
27
/*
28
* Copyright 2013 Saso Kiselkov. All rights reserved.
29
*/
30
31
/*
32
* Copyright (c) 2016 by Delphix. All rights reserved.
33
*/
34
35
/*
36
* Fletcher Checksums
37
* ------------------
38
*
39
* ZFS's 2nd and 4th order Fletcher checksums are defined by the following
40
* recurrence relations:
41
*
42
* a = a + f
43
* i i-1 i-1
44
*
45
* b = b + a
46
* i i-1 i
47
*
48
* c = c + b (fletcher-4 only)
49
* i i-1 i
50
*
51
* d = d + c (fletcher-4 only)
52
* i i-1 i
53
*
54
* Where
55
* a_0 = b_0 = c_0 = d_0 = 0
56
* and
57
* f_0 .. f_(n-1) are the input data.
58
*
59
* Using standard techniques, these translate into the following series:
60
*
61
* __n_ __n_
62
* \ | \ |
63
* a = > f b = > i * f
64
* n /___| n - i n /___| n - i
65
* i = 1 i = 1
66
*
67
*
68
* __n_ __n_
69
* \ | i*(i+1) \ | i*(i+1)*(i+2)
70
* c = > ------- f d = > ------------- f
71
* n /___| 2 n - i n /___| 6 n - i
72
* i = 1 i = 1
73
*
74
* For fletcher-2, the f_is are 64-bit, and [ab]_i are 64-bit accumulators.
75
* Since the additions are done mod (2^64), errors in the high bits may not
76
* be noticed. For this reason, fletcher-2 is deprecated.
77
*
78
* For fletcher-4, the f_is are 32-bit, and [abcd]_i are 64-bit accumulators.
79
* A conservative estimate of how big the buffer can get before we overflow
80
* can be estimated using f_i = 0xffffffff for all i:
81
*
82
* % bc
83
* f=2^32-1;d=0; for (i = 1; d<2^64; i++) { d += f*i*(i+1)*(i+2)/6 }; (i-1)*4
84
* 2264
85
* quit
86
* %
87
*
88
* So blocks of up to 2k will not overflow. Our largest block size is
89
* 128k, which has 32k 4-byte words, so we can compute the largest possible
90
* accumulators, then divide by 2^64 to figure the max amount of overflow:
91
*
92
* % bc
93
* a=b=c=d=0; f=2^32-1; for (i=1; i<=32*1024; i++) { a+=f; b+=a; c+=b; d+=c }
94
* a/2^64;b/2^64;c/2^64;d/2^64
95
* 0
96
* 0
97
* 1365
98
* 11186858
99
* quit
100
* %
101
*
102
* So a and b cannot overflow. To make sure each bit of input has some
103
* effect on the contents of c and d, we can look at what the factors of
104
* the coefficients in the equations for c_n and d_n are. The number of 2s
105
* in the factors determines the lowest set bit in the multiplier. Running
106
* through the cases for n*(n+1)/2 reveals that the highest power of 2 is
107
* 2^14, and for n*(n+1)*(n+2)/6 it is 2^15. So while some data may overflow
108
* the 64-bit accumulators, every bit of every f_i effects every accumulator,
109
* even for 128k blocks.
110
*
111
* If we wanted to make a stronger version of fletcher4 (fletcher4c?),
112
* we could do our calculations mod (2^32 - 1) by adding in the carries
113
* periodically, and store the number of carries in the top 32-bits.
114
*
115
* --------------------
116
* Checksum Performance
117
* --------------------
118
*
119
* There are two interesting components to checksum performance: cached and
120
* uncached performance. With cached data, fletcher-2 is about four times
121
* faster than fletcher-4. With uncached data, the performance difference is
122
* negligible, since the cost of a cache fill dominates the processing time.
123
* Even though fletcher-4 is slower than fletcher-2, it is still a pretty
124
* efficient pass over the data.
125
*
126
* In normal operation, the data which is being checksummed is in a buffer
127
* which has been filled either by:
128
*
129
* 1. a compression step, which will be mostly cached, or
130
* 2. a memcpy() or copyin(), which will be uncached
131
* (because the copy is cache-bypassing).
132
*
133
* For both cached and uncached data, both fletcher checksums are much faster
134
* than sha-256, and slower than 'off', which doesn't touch the data at all.
135
*/
136
137
#include <sys/types.h>
138
#include <sys/sysmacros.h>
139
#include <sys/byteorder.h>
140
#include <sys/simd.h>
141
#include <sys/spa.h>
142
#include <sys/zio_checksum.h>
143
#include <sys/zfs_context.h>
144
#include <zfs_fletcher.h>
145
146
#define FLETCHER_MIN_SIMD_SIZE 64
147
148
static void fletcher_4_scalar_init(fletcher_4_ctx_t *ctx);
149
static void fletcher_4_scalar_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp);
150
static void fletcher_4_scalar_native(fletcher_4_ctx_t *ctx,
151
const void *buf, uint64_t size);
152
static void fletcher_4_scalar_byteswap(fletcher_4_ctx_t *ctx,
153
const void *buf, uint64_t size);
154
static boolean_t fletcher_4_scalar_valid(void);
155
156
static const fletcher_4_ops_t fletcher_4_scalar_ops = {
157
.init_native = fletcher_4_scalar_init,
158
.fini_native = fletcher_4_scalar_fini,
159
.compute_native = fletcher_4_scalar_native,
160
.init_byteswap = fletcher_4_scalar_init,
161
.fini_byteswap = fletcher_4_scalar_fini,
162
.compute_byteswap = fletcher_4_scalar_byteswap,
163
.valid = fletcher_4_scalar_valid,
164
.uses_fpu = B_FALSE,
165
.name = "scalar"
166
};
167
168
static fletcher_4_ops_t fletcher_4_fastest_impl = {
169
.name = "fastest",
170
.valid = fletcher_4_scalar_valid
171
};
172
173
static const fletcher_4_ops_t *fletcher_4_impls[] = {
174
&fletcher_4_scalar_ops,
175
&fletcher_4_superscalar_ops,
176
&fletcher_4_superscalar4_ops,
177
#if defined(HAVE_SSE2)
178
&fletcher_4_sse2_ops,
179
#endif
180
#if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
181
&fletcher_4_ssse3_ops,
182
#endif
183
#if defined(HAVE_AVX) && defined(HAVE_AVX2)
184
&fletcher_4_avx2_ops,
185
#endif
186
#if defined(__x86_64) && defined(HAVE_AVX512F)
187
&fletcher_4_avx512f_ops,
188
#endif
189
#if defined(__x86_64) && defined(HAVE_AVX512BW)
190
&fletcher_4_avx512bw_ops,
191
#endif
192
#if defined(__aarch64__) && !defined(__FreeBSD__)
193
&fletcher_4_aarch64_neon_ops,
194
#endif
195
};
196
197
/* Hold all supported implementations */
198
static uint32_t fletcher_4_supp_impls_cnt = 0;
199
static fletcher_4_ops_t *fletcher_4_supp_impls[ARRAY_SIZE(fletcher_4_impls)];
200
201
/* Select fletcher4 implementation */
202
#define IMPL_FASTEST (UINT32_MAX)
203
#define IMPL_CYCLE (UINT32_MAX - 1)
204
#define IMPL_SCALAR (0)
205
206
static uint32_t fletcher_4_impl_chosen = IMPL_FASTEST;
207
208
#define IMPL_READ(i) (*(volatile uint32_t *) &(i))
209
210
static struct fletcher_4_impl_selector {
211
const char *fis_name;
212
uint32_t fis_sel;
213
} fletcher_4_impl_selectors[] = {
214
{ "cycle", IMPL_CYCLE },
215
{ "fastest", IMPL_FASTEST },
216
{ "scalar", IMPL_SCALAR }
217
};
218
219
#if defined(_KERNEL)
220
static kstat_t *fletcher_4_kstat;
221
222
static struct fletcher_4_kstat {
223
uint64_t native;
224
uint64_t byteswap;
225
} fletcher_4_stat_data[ARRAY_SIZE(fletcher_4_impls) + 1];
226
#endif
227
228
/* Indicate that benchmark has been completed */
229
static boolean_t fletcher_4_initialized = B_FALSE;
230
231
void
232
fletcher_init(zio_cksum_t *zcp)
233
{
234
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
235
}
236
237
int
238
fletcher_2_incremental_native(void *buf, size_t size, void *data)
239
{
240
zio_cksum_t *zcp = data;
241
242
const uint64_t *ip = buf;
243
const uint64_t *ipend = ip + (size / sizeof (uint64_t));
244
uint64_t a0, b0, a1, b1;
245
246
a0 = zcp->zc_word[0];
247
a1 = zcp->zc_word[1];
248
b0 = zcp->zc_word[2];
249
b1 = zcp->zc_word[3];
250
251
for (; ip < ipend; ip += 2) {
252
a0 += ip[0];
253
a1 += ip[1];
254
b0 += a0;
255
b1 += a1;
256
}
257
258
ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
259
return (0);
260
}
261
262
void
263
fletcher_2_native(const void *buf, uint64_t size,
264
const void *ctx_template, zio_cksum_t *zcp)
265
{
266
(void) ctx_template;
267
fletcher_init(zcp);
268
(void) fletcher_2_incremental_native((void *) buf, size, zcp);
269
}
270
271
int
272
fletcher_2_incremental_byteswap(void *buf, size_t size, void *data)
273
{
274
zio_cksum_t *zcp = data;
275
276
const uint64_t *ip = buf;
277
const uint64_t *ipend = ip + (size / sizeof (uint64_t));
278
uint64_t a0, b0, a1, b1;
279
280
a0 = zcp->zc_word[0];
281
a1 = zcp->zc_word[1];
282
b0 = zcp->zc_word[2];
283
b1 = zcp->zc_word[3];
284
285
for (; ip < ipend; ip += 2) {
286
a0 += BSWAP_64(ip[0]);
287
a1 += BSWAP_64(ip[1]);
288
b0 += a0;
289
b1 += a1;
290
}
291
292
ZIO_SET_CHECKSUM(zcp, a0, a1, b0, b1);
293
return (0);
294
}
295
296
void
297
fletcher_2_byteswap(const void *buf, uint64_t size,
298
const void *ctx_template, zio_cksum_t *zcp)
299
{
300
(void) ctx_template;
301
fletcher_init(zcp);
302
(void) fletcher_2_incremental_byteswap((void *) buf, size, zcp);
303
}
304
305
static void
306
fletcher_4_scalar_init(fletcher_4_ctx_t *ctx)
307
{
308
ZIO_SET_CHECKSUM(&ctx->scalar, 0, 0, 0, 0);
309
}
310
311
static void
312
fletcher_4_scalar_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
313
{
314
memcpy(zcp, &ctx->scalar, sizeof (zio_cksum_t));
315
}
316
317
static void
318
fletcher_4_scalar_native(fletcher_4_ctx_t *ctx, const void *buf,
319
uint64_t size)
320
{
321
const uint32_t *ip = buf;
322
const uint32_t *ipend = ip + (size / sizeof (uint32_t));
323
uint64_t a, b, c, d;
324
325
a = ctx->scalar.zc_word[0];
326
b = ctx->scalar.zc_word[1];
327
c = ctx->scalar.zc_word[2];
328
d = ctx->scalar.zc_word[3];
329
330
for (; ip < ipend; ip++) {
331
a += ip[0];
332
b += a;
333
c += b;
334
d += c;
335
}
336
337
ZIO_SET_CHECKSUM(&ctx->scalar, a, b, c, d);
338
}
339
340
static void
341
fletcher_4_scalar_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
342
uint64_t size)
343
{
344
const uint32_t *ip = buf;
345
const uint32_t *ipend = ip + (size / sizeof (uint32_t));
346
uint64_t a, b, c, d;
347
348
a = ctx->scalar.zc_word[0];
349
b = ctx->scalar.zc_word[1];
350
c = ctx->scalar.zc_word[2];
351
d = ctx->scalar.zc_word[3];
352
353
for (; ip < ipend; ip++) {
354
a += BSWAP_32(ip[0]);
355
b += a;
356
c += b;
357
d += c;
358
}
359
360
ZIO_SET_CHECKSUM(&ctx->scalar, a, b, c, d);
361
}
362
363
static boolean_t
364
fletcher_4_scalar_valid(void)
365
{
366
return (B_TRUE);
367
}
368
369
int
370
fletcher_4_impl_set(const char *val)
371
{
372
int err = -EINVAL;
373
uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
374
size_t i, val_len;
375
376
val_len = strlen(val);
377
while ((val_len > 0) && !!isspace(val[val_len-1])) /* trim '\n' */
378
val_len--;
379
380
/* check mandatory implementations */
381
for (i = 0; i < ARRAY_SIZE(fletcher_4_impl_selectors); i++) {
382
const char *name = fletcher_4_impl_selectors[i].fis_name;
383
384
if (val_len == strlen(name) &&
385
strncmp(val, name, val_len) == 0) {
386
impl = fletcher_4_impl_selectors[i].fis_sel;
387
err = 0;
388
break;
389
}
390
}
391
392
if (err != 0 && fletcher_4_initialized) {
393
/* check all supported implementations */
394
for (i = 0; i < fletcher_4_supp_impls_cnt; i++) {
395
const char *name = fletcher_4_supp_impls[i]->name;
396
397
if (val_len == strlen(name) &&
398
strncmp(val, name, val_len) == 0) {
399
impl = i;
400
err = 0;
401
break;
402
}
403
}
404
}
405
406
if (err == 0) {
407
atomic_swap_32(&fletcher_4_impl_chosen, impl);
408
membar_producer();
409
}
410
411
return (err);
412
}
413
414
/*
415
* Returns the Fletcher 4 operations for checksums. When a SIMD
416
* implementation is not allowed in the current context, then fallback
417
* to the fastest generic implementation.
418
*/
419
static inline const fletcher_4_ops_t *
420
fletcher_4_impl_get(void)
421
{
422
if (!kfpu_allowed())
423
return (&fletcher_4_superscalar4_ops);
424
425
const fletcher_4_ops_t *ops = NULL;
426
uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
427
428
switch (impl) {
429
case IMPL_FASTEST:
430
ASSERT(fletcher_4_initialized);
431
ops = &fletcher_4_fastest_impl;
432
break;
433
case IMPL_CYCLE:
434
/* Cycle through supported implementations */
435
ASSERT(fletcher_4_initialized);
436
ASSERT3U(fletcher_4_supp_impls_cnt, >, 0);
437
static uint32_t cycle_count = 0;
438
uint32_t idx = (++cycle_count) % fletcher_4_supp_impls_cnt;
439
ops = fletcher_4_supp_impls[idx];
440
break;
441
default:
442
ASSERT3U(fletcher_4_supp_impls_cnt, >, 0);
443
ASSERT3U(impl, <, fletcher_4_supp_impls_cnt);
444
ops = fletcher_4_supp_impls[impl];
445
break;
446
}
447
448
ASSERT3P(ops, !=, NULL);
449
450
return (ops);
451
}
452
453
static inline void
454
fletcher_4_native_impl(const void *buf, uint64_t size, zio_cksum_t *zcp)
455
{
456
fletcher_4_ctx_t ctx;
457
const fletcher_4_ops_t *ops = fletcher_4_impl_get();
458
459
if (ops->uses_fpu == B_TRUE) {
460
kfpu_begin();
461
}
462
ops->init_native(&ctx);
463
ops->compute_native(&ctx, buf, size);
464
ops->fini_native(&ctx, zcp);
465
if (ops->uses_fpu == B_TRUE) {
466
kfpu_end();
467
}
468
}
469
470
void
471
fletcher_4_native(const void *buf, uint64_t size,
472
const void *ctx_template, zio_cksum_t *zcp)
473
{
474
(void) ctx_template;
475
const uint64_t p2size = P2ALIGN_TYPED(size, FLETCHER_MIN_SIMD_SIZE,
476
uint64_t);
477
478
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
479
480
if (size == 0 || p2size == 0) {
481
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
482
483
if (size > 0)
484
fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp,
485
buf, size);
486
} else {
487
fletcher_4_native_impl(buf, p2size, zcp);
488
489
if (p2size < size)
490
fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp,
491
(char *)buf + p2size, size - p2size);
492
}
493
}
494
495
void
496
fletcher_4_native_varsize(const void *buf, uint64_t size, zio_cksum_t *zcp)
497
{
498
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
499
fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size);
500
}
501
502
static inline void
503
fletcher_4_byteswap_impl(const void *buf, uint64_t size, zio_cksum_t *zcp)
504
{
505
fletcher_4_ctx_t ctx;
506
const fletcher_4_ops_t *ops = fletcher_4_impl_get();
507
508
if (ops->uses_fpu == B_TRUE) {
509
kfpu_begin();
510
}
511
ops->init_byteswap(&ctx);
512
ops->compute_byteswap(&ctx, buf, size);
513
ops->fini_byteswap(&ctx, zcp);
514
if (ops->uses_fpu == B_TRUE) {
515
kfpu_end();
516
}
517
}
518
519
void
520
fletcher_4_byteswap(const void *buf, uint64_t size,
521
const void *ctx_template, zio_cksum_t *zcp)
522
{
523
(void) ctx_template;
524
const uint64_t p2size = P2ALIGN_TYPED(size, FLETCHER_MIN_SIMD_SIZE,
525
uint64_t);
526
527
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
528
529
if (size == 0 || p2size == 0) {
530
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
531
532
if (size > 0)
533
fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
534
buf, size);
535
} else {
536
fletcher_4_byteswap_impl(buf, p2size, zcp);
537
538
if (p2size < size)
539
fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
540
(char *)buf + p2size, size - p2size);
541
}
542
}
543
544
/* Incremental Fletcher 4 */
545
546
#define ZFS_FLETCHER_4_INC_MAX_SIZE (8ULL << 20)
547
548
static inline void
549
fletcher_4_incremental_combine(zio_cksum_t *zcp, const uint64_t size,
550
const zio_cksum_t *nzcp)
551
{
552
const uint64_t c1 = size / sizeof (uint32_t);
553
const uint64_t c2 = c1 * (c1 + 1) / 2;
554
const uint64_t c3 = c2 * (c1 + 2) / 3;
555
556
/*
557
* Value of 'c3' overflows on buffer sizes close to 16MiB. For that
558
* reason we split incremental fletcher4 computation of large buffers
559
* to steps of (ZFS_FLETCHER_4_INC_MAX_SIZE) size.
560
*/
561
ASSERT3U(size, <=, ZFS_FLETCHER_4_INC_MAX_SIZE);
562
563
zcp->zc_word[3] += nzcp->zc_word[3] + c1 * zcp->zc_word[2] +
564
c2 * zcp->zc_word[1] + c3 * zcp->zc_word[0];
565
zcp->zc_word[2] += nzcp->zc_word[2] + c1 * zcp->zc_word[1] +
566
c2 * zcp->zc_word[0];
567
zcp->zc_word[1] += nzcp->zc_word[1] + c1 * zcp->zc_word[0];
568
zcp->zc_word[0] += nzcp->zc_word[0];
569
}
570
571
static inline void
572
fletcher_4_incremental_impl(boolean_t native, const void *buf, uint64_t size,
573
zio_cksum_t *zcp)
574
{
575
while (size > 0) {
576
zio_cksum_t nzc;
577
uint64_t len = MIN(size, ZFS_FLETCHER_4_INC_MAX_SIZE);
578
579
if (native)
580
fletcher_4_native(buf, len, NULL, &nzc);
581
else
582
fletcher_4_byteswap(buf, len, NULL, &nzc);
583
584
fletcher_4_incremental_combine(zcp, len, &nzc);
585
586
size -= len;
587
buf += len;
588
}
589
}
590
591
int
592
fletcher_4_incremental_native(void *buf, size_t size, void *data)
593
{
594
zio_cksum_t *zcp = data;
595
/* Use scalar impl to directly update cksum of small blocks */
596
if (size < SPA_MINBLOCKSIZE)
597
fletcher_4_scalar_native((fletcher_4_ctx_t *)zcp, buf, size);
598
else
599
fletcher_4_incremental_impl(B_TRUE, buf, size, zcp);
600
return (0);
601
}
602
603
int
604
fletcher_4_incremental_byteswap(void *buf, size_t size, void *data)
605
{
606
zio_cksum_t *zcp = data;
607
/* Use scalar impl to directly update cksum of small blocks */
608
if (size < SPA_MINBLOCKSIZE)
609
fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp, buf, size);
610
else
611
fletcher_4_incremental_impl(B_FALSE, buf, size, zcp);
612
return (0);
613
}
614
615
#if defined(_KERNEL)
616
/*
617
* Fletcher 4 kstats
618
*/
619
static int
620
fletcher_4_kstat_headers(char *buf, size_t size)
621
{
622
ssize_t off = 0;
623
624
off += snprintf(buf + off, size, "%-17s", "implementation");
625
off += snprintf(buf + off, size - off, "%-15s", "native");
626
(void) snprintf(buf + off, size - off, "%-15s\n", "byteswap");
627
628
return (0);
629
}
630
631
static int
632
fletcher_4_kstat_data(char *buf, size_t size, void *data)
633
{
634
struct fletcher_4_kstat *fastest_stat =
635
&fletcher_4_stat_data[fletcher_4_supp_impls_cnt];
636
struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *)data;
637
ssize_t off = 0;
638
639
if (curr_stat == fastest_stat) {
640
off += snprintf(buf + off, size - off, "%-17s", "fastest");
641
off += snprintf(buf + off, size - off, "%-15s",
642
fletcher_4_supp_impls[fastest_stat->native]->name);
643
(void) snprintf(buf + off, size - off, "%-15s\n",
644
fletcher_4_supp_impls[fastest_stat->byteswap]->name);
645
} else {
646
ptrdiff_t id = curr_stat - fletcher_4_stat_data;
647
648
off += snprintf(buf + off, size - off, "%-17s",
649
fletcher_4_supp_impls[id]->name);
650
off += snprintf(buf + off, size - off, "%-15llu",
651
(u_longlong_t)curr_stat->native);
652
(void) snprintf(buf + off, size - off, "%-15llu\n",
653
(u_longlong_t)curr_stat->byteswap);
654
}
655
656
return (0);
657
}
658
659
static void *
660
fletcher_4_kstat_addr(kstat_t *ksp, loff_t n)
661
{
662
if (n <= fletcher_4_supp_impls_cnt)
663
ksp->ks_private = (void *) (fletcher_4_stat_data + n);
664
else
665
ksp->ks_private = NULL;
666
667
return (ksp->ks_private);
668
}
669
#endif
670
671
#define FLETCHER_4_FASTEST_FN_COPY(type, src) \
672
{ \
673
fletcher_4_fastest_impl.init_ ## type = src->init_ ## type; \
674
fletcher_4_fastest_impl.fini_ ## type = src->fini_ ## type; \
675
fletcher_4_fastest_impl.compute_ ## type = src->compute_ ## type; \
676
fletcher_4_fastest_impl.uses_fpu = src->uses_fpu; \
677
}
678
679
#define FLETCHER_4_BENCH_NS (MSEC2NSEC(1)) /* 1ms */
680
681
typedef void fletcher_checksum_func_t(const void *, uint64_t, const void *,
682
zio_cksum_t *);
683
684
#if defined(_KERNEL)
685
static void
686
fletcher_4_benchmark_impl(boolean_t native, char *data, uint64_t data_size)
687
{
688
689
struct fletcher_4_kstat *fastest_stat =
690
&fletcher_4_stat_data[fletcher_4_supp_impls_cnt];
691
hrtime_t start;
692
uint64_t run_bw, run_time_ns, best_run = 0;
693
zio_cksum_t zc;
694
uint32_t i, l, sel_save = IMPL_READ(fletcher_4_impl_chosen);
695
696
fletcher_checksum_func_t *fletcher_4_test = native ?
697
fletcher_4_native : fletcher_4_byteswap;
698
699
for (i = 0; i < fletcher_4_supp_impls_cnt; i++) {
700
struct fletcher_4_kstat *stat = &fletcher_4_stat_data[i];
701
uint64_t run_count = 0;
702
703
/* temporary set an implementation */
704
fletcher_4_impl_chosen = i;
705
706
kpreempt_disable();
707
start = gethrtime();
708
do {
709
for (l = 0; l < 32; l++, run_count++)
710
fletcher_4_test(data, data_size, NULL, &zc);
711
712
run_time_ns = gethrtime() - start;
713
} while (run_time_ns < FLETCHER_4_BENCH_NS);
714
kpreempt_enable();
715
716
run_bw = data_size * run_count * NANOSEC;
717
run_bw /= run_time_ns; /* B/s */
718
719
if (native)
720
stat->native = run_bw;
721
else
722
stat->byteswap = run_bw;
723
724
if (run_bw > best_run) {
725
best_run = run_bw;
726
727
if (native) {
728
fastest_stat->native = i;
729
FLETCHER_4_FASTEST_FN_COPY(native,
730
fletcher_4_supp_impls[i]);
731
} else {
732
fastest_stat->byteswap = i;
733
FLETCHER_4_FASTEST_FN_COPY(byteswap,
734
fletcher_4_supp_impls[i]);
735
}
736
}
737
}
738
739
/* restore original selection */
740
atomic_swap_32(&fletcher_4_impl_chosen, sel_save);
741
}
742
#endif /* _KERNEL */
743
744
/*
745
* Initialize and benchmark all supported implementations.
746
*/
747
static void
748
fletcher_4_benchmark(void)
749
{
750
fletcher_4_ops_t *curr_impl;
751
int i, c;
752
753
/* Move supported implementations into fletcher_4_supp_impls */
754
for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) {
755
curr_impl = (fletcher_4_ops_t *)fletcher_4_impls[i];
756
757
if (curr_impl->valid && curr_impl->valid())
758
fletcher_4_supp_impls[c++] = curr_impl;
759
}
760
membar_producer(); /* complete fletcher_4_supp_impls[] init */
761
fletcher_4_supp_impls_cnt = c; /* number of supported impl */
762
763
#if defined(_KERNEL)
764
static const size_t data_size = 1 << SPA_OLD_MAXBLOCKSHIFT; /* 128kiB */
765
char *databuf = vmem_alloc(data_size, KM_SLEEP);
766
767
for (i = 0; i < data_size / sizeof (uint64_t); i++)
768
((uint64_t *)databuf)[i] = (uintptr_t)(databuf+i); /* warm-up */
769
770
fletcher_4_benchmark_impl(B_FALSE, databuf, data_size);
771
fletcher_4_benchmark_impl(B_TRUE, databuf, data_size);
772
773
vmem_free(databuf, data_size);
774
#else
775
/*
776
* Skip the benchmark in user space to avoid impacting libzpool
777
* consumers (zdb, zhack, zinject, ztest). The last implementation
778
* is assumed to be the fastest and used by default.
779
*/
780
memcpy(&fletcher_4_fastest_impl,
781
fletcher_4_supp_impls[fletcher_4_supp_impls_cnt - 1],
782
sizeof (fletcher_4_fastest_impl));
783
fletcher_4_fastest_impl.name = "fastest";
784
membar_producer();
785
#endif /* _KERNEL */
786
}
787
788
void
789
fletcher_4_init(void)
790
{
791
/* Determine the fastest available implementation. */
792
fletcher_4_benchmark();
793
794
#if defined(_KERNEL)
795
/* Install kstats for all implementations */
796
fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
797
KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
798
if (fletcher_4_kstat != NULL) {
799
fletcher_4_kstat->ks_data = NULL;
800
fletcher_4_kstat->ks_ndata = UINT32_MAX;
801
kstat_set_raw_ops(fletcher_4_kstat,
802
fletcher_4_kstat_headers,
803
fletcher_4_kstat_data,
804
fletcher_4_kstat_addr);
805
kstat_install(fletcher_4_kstat);
806
}
807
#endif
808
809
/* Finish initialization */
810
fletcher_4_initialized = B_TRUE;
811
}
812
813
void
814
fletcher_4_fini(void)
815
{
816
#if defined(_KERNEL)
817
if (fletcher_4_kstat != NULL) {
818
kstat_delete(fletcher_4_kstat);
819
fletcher_4_kstat = NULL;
820
}
821
#endif
822
}
823
824
/* ABD adapters */
825
826
static void
827
abd_fletcher_4_init(zio_abd_checksum_data_t *cdp)
828
{
829
const fletcher_4_ops_t *ops = fletcher_4_impl_get();
830
cdp->acd_private = (void *) ops;
831
832
if (ops->uses_fpu == B_TRUE) {
833
kfpu_begin();
834
}
835
if (cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE)
836
ops->init_native(cdp->acd_ctx);
837
else
838
ops->init_byteswap(cdp->acd_ctx);
839
840
}
841
842
static void
843
abd_fletcher_4_fini(zio_abd_checksum_data_t *cdp)
844
{
845
fletcher_4_ops_t *ops = (fletcher_4_ops_t *)cdp->acd_private;
846
847
ASSERT(ops);
848
849
if (cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE)
850
ops->fini_native(cdp->acd_ctx, cdp->acd_zcp);
851
else
852
ops->fini_byteswap(cdp->acd_ctx, cdp->acd_zcp);
853
854
if (ops->uses_fpu == B_TRUE) {
855
kfpu_end();
856
}
857
}
858
859
860
static void
861
abd_fletcher_4_simd2scalar(boolean_t native, void *data, size_t size,
862
zio_abd_checksum_data_t *cdp)
863
{
864
zio_cksum_t *zcp = cdp->acd_zcp;
865
866
ASSERT3U(size, <, FLETCHER_MIN_SIMD_SIZE);
867
868
abd_fletcher_4_fini(cdp);
869
cdp->acd_private = (void *)&fletcher_4_scalar_ops;
870
871
if (native)
872
fletcher_4_incremental_native(data, size, zcp);
873
else
874
fletcher_4_incremental_byteswap(data, size, zcp);
875
}
876
877
static int
878
abd_fletcher_4_iter(void *data, size_t size, void *private)
879
{
880
zio_abd_checksum_data_t *cdp = (zio_abd_checksum_data_t *)private;
881
fletcher_4_ctx_t *ctx = cdp->acd_ctx;
882
fletcher_4_ops_t *ops = (fletcher_4_ops_t *)cdp->acd_private;
883
boolean_t native = cdp->acd_byteorder == ZIO_CHECKSUM_NATIVE;
884
uint64_t asize = P2ALIGN_TYPED(size, FLETCHER_MIN_SIMD_SIZE, uint64_t);
885
886
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
887
888
if (asize > 0) {
889
if (native)
890
ops->compute_native(ctx, data, asize);
891
else
892
ops->compute_byteswap(ctx, data, asize);
893
894
size -= asize;
895
data = (char *)data + asize;
896
}
897
898
if (size > 0) {
899
ASSERT3U(size, <, FLETCHER_MIN_SIMD_SIZE);
900
/* At this point we have to switch to scalar impl */
901
abd_fletcher_4_simd2scalar(native, data, size, cdp);
902
}
903
904
return (0);
905
}
906
907
zio_abd_checksum_func_t fletcher_4_abd_ops = {
908
.acf_init = abd_fletcher_4_init,
909
.acf_fini = abd_fletcher_4_fini,
910
.acf_iter = abd_fletcher_4_iter
911
};
912
913
#if defined(_KERNEL)
914
915
#define IMPL_FMT(impl, i) (((impl) == (i)) ? "[%s] " : "%s ")
916
917
#if defined(__linux__)
918
919
static int
920
fletcher_4_param_get(char *buffer, zfs_kernel_param_t *unused)
921
{
922
const uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
923
char *fmt;
924
int cnt = 0;
925
926
/* list fastest */
927
fmt = IMPL_FMT(impl, IMPL_FASTEST);
928
cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt, "fastest");
929
930
/* list all supported implementations */
931
for (uint32_t i = 0; i < fletcher_4_supp_impls_cnt; ++i) {
932
fmt = IMPL_FMT(impl, i);
933
cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
934
fletcher_4_supp_impls[i]->name);
935
}
936
937
return (cnt);
938
}
939
940
static int
941
fletcher_4_param_set(const char *val, zfs_kernel_param_t *unused)
942
{
943
return (fletcher_4_impl_set(val));
944
}
945
946
#else
947
948
#include <sys/sbuf.h>
949
950
static int
951
fletcher_4_param(ZFS_MODULE_PARAM_ARGS)
952
{
953
int err;
954
955
if (req->newptr == NULL) {
956
const uint32_t impl = IMPL_READ(fletcher_4_impl_chosen);
957
const int init_buflen = 64;
958
const char *fmt;
959
struct sbuf *s;
960
961
s = sbuf_new_for_sysctl(NULL, NULL, init_buflen, req);
962
963
/* list fastest */
964
fmt = IMPL_FMT(impl, IMPL_FASTEST);
965
(void) sbuf_printf(s, fmt, "fastest");
966
967
/* list all supported implementations */
968
for (uint32_t i = 0; i < fletcher_4_supp_impls_cnt; ++i) {
969
fmt = IMPL_FMT(impl, i);
970
(void) sbuf_printf(s, fmt,
971
fletcher_4_supp_impls[i]->name);
972
}
973
974
err = sbuf_finish(s);
975
sbuf_delete(s);
976
977
return (err);
978
}
979
980
char buf[16];
981
982
err = sysctl_handle_string(oidp, buf, sizeof (buf), req);
983
if (err)
984
return (err);
985
return (-fletcher_4_impl_set(buf));
986
}
987
988
#endif
989
990
#undef IMPL_FMT
991
992
/*
993
* Choose a fletcher 4 implementation in ZFS.
994
* Users can choose "cycle" to exercise all implementations, but this is
995
* for testing purpose therefore it can only be set in user space.
996
*/
997
ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs, zfs_, fletcher_4_impl,
998
fletcher_4_param_set, fletcher_4_param_get, ZMOD_RW,
999
"Select fletcher 4 implementation.");
1000
1001
EXPORT_SYMBOL(fletcher_init);
1002
EXPORT_SYMBOL(fletcher_2_incremental_native);
1003
EXPORT_SYMBOL(fletcher_2_incremental_byteswap);
1004
EXPORT_SYMBOL(fletcher_4_init);
1005
EXPORT_SYMBOL(fletcher_4_fini);
1006
EXPORT_SYMBOL(fletcher_2_native);
1007
EXPORT_SYMBOL(fletcher_2_byteswap);
1008
EXPORT_SYMBOL(fletcher_4_native);
1009
EXPORT_SYMBOL(fletcher_4_native_varsize);
1010
EXPORT_SYMBOL(fletcher_4_byteswap);
1011
EXPORT_SYMBOL(fletcher_4_incremental_native);
1012
EXPORT_SYMBOL(fletcher_4_incremental_byteswap);
1013
EXPORT_SYMBOL(fletcher_4_abd_ops);
1014
#endif
1015
1016