Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/icp/io/sha2_mod.c
48529 views
1
// SPDX-License-Identifier: CDDL-1.0
2
/*
3
* CDDL HEADER START
4
*
5
* The contents of this file are subject to the terms of the
6
* Common Development and Distribution License (the "License").
7
* You may not use this file except in compliance with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or https://opensource.org/licenses/CDDL-1.0.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
23
/*
24
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
25
* Use is subject to license terms.
26
*/
27
28
#include <sys/zfs_context.h>
29
#include <sys/crypto/common.h>
30
#include <sys/crypto/spi.h>
31
#include <sys/crypto/icp.h>
32
#include <sys/sha2.h>
33
#include <sha2/sha2_impl.h>
34
35
/*
36
* Macros to access the SHA2 or SHA2-HMAC contexts from a context passed
37
* by KCF to one of the entry points.
38
*/
39
40
#define PROV_SHA2_CTX(ctx) ((sha2_ctx_t *)(ctx)->cc_provider_private)
41
#define PROV_SHA2_HMAC_CTX(ctx) ((sha2_hmac_ctx_t *)(ctx)->cc_provider_private)
42
43
/* to extract the digest length passed as mechanism parameter */
44
#define PROV_SHA2_GET_DIGEST_LEN(m, len) { \
45
if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
46
(len) = (uint32_t)*((ulong_t *)(m)->cm_param); \
47
else { \
48
ulong_t tmp_ulong; \
49
memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t)); \
50
(len) = (uint32_t)tmp_ulong; \
51
} \
52
}
53
54
#define PROV_SHA2_DIGEST_KEY(mech, ctx, key, len, digest) { \
55
SHA2Init(mech, ctx); \
56
SHA2Update(ctx, key, len); \
57
SHA2Final(digest, ctx); \
58
}
59
60
/*
61
* Mechanism info structure passed to KCF during registration.
62
*/
63
static const crypto_mech_info_t sha2_mech_info_tab[] = {
64
/* SHA512-HMAC */
65
{SUN_CKM_SHA512_HMAC, SHA512_HMAC_MECH_INFO_TYPE,
66
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
67
};
68
69
static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
70
crypto_spi_ctx_template_t);
71
static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *);
72
static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *);
73
static int sha2_mac_atomic(crypto_mechanism_t *, crypto_key_t *,
74
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
75
static int sha2_mac_verify_atomic(crypto_mechanism_t *, crypto_key_t *,
76
crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
77
78
static const crypto_mac_ops_t sha2_mac_ops = {
79
.mac_init = sha2_mac_init,
80
.mac = NULL,
81
.mac_update = sha2_mac_update,
82
.mac_final = sha2_mac_final,
83
.mac_atomic = sha2_mac_atomic,
84
.mac_verify_atomic = sha2_mac_verify_atomic
85
};
86
87
static int sha2_create_ctx_template(crypto_mechanism_t *, crypto_key_t *,
88
crypto_spi_ctx_template_t *, size_t *);
89
static int sha2_free_context(crypto_ctx_t *);
90
91
static const crypto_ctx_ops_t sha2_ctx_ops = {
92
.create_ctx_template = sha2_create_ctx_template,
93
.free_context = sha2_free_context
94
};
95
96
static const crypto_ops_t sha2_crypto_ops = {
97
NULL,
98
&sha2_mac_ops,
99
&sha2_ctx_ops,
100
};
101
102
static const crypto_provider_info_t sha2_prov_info = {
103
"SHA2 Software Provider",
104
&sha2_crypto_ops,
105
sizeof (sha2_mech_info_tab) / sizeof (crypto_mech_info_t),
106
sha2_mech_info_tab
107
};
108
109
static crypto_kcf_provider_handle_t sha2_prov_handle = 0;
110
111
int
112
sha2_mod_init(void)
113
{
114
int ret;
115
116
/*
117
* Register with KCF. If the registration fails, log an
118
* error but do not uninstall the module, since the functionality
119
* provided by misc/sha2 should still be available.
120
*/
121
if ((ret = crypto_register_provider(&sha2_prov_info,
122
&sha2_prov_handle)) != CRYPTO_SUCCESS)
123
cmn_err(CE_WARN, "sha2 _init: "
124
"crypto_register_provider() failed (0x%x)", ret);
125
126
return (0);
127
}
128
129
int
130
sha2_mod_fini(void)
131
{
132
int ret = 0;
133
134
if (sha2_prov_handle != 0) {
135
if ((ret = crypto_unregister_provider(sha2_prov_handle)) !=
136
CRYPTO_SUCCESS) {
137
cmn_err(CE_WARN,
138
"sha2 _fini: crypto_unregister_provider() "
139
"failed (0x%x)", ret);
140
return (EBUSY);
141
}
142
sha2_prov_handle = 0;
143
}
144
145
return (ret);
146
}
147
148
/*
149
* Helper SHA2 digest update function for uio data.
150
*/
151
static int
152
sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
153
{
154
off_t offset = data->cd_offset;
155
size_t length = data->cd_length;
156
uint_t vec_idx = 0;
157
size_t cur_len;
158
159
/* we support only kernel buffer */
160
if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE)
161
return (CRYPTO_ARGUMENTS_BAD);
162
163
/*
164
* Jump to the first iovec containing data to be
165
* digested.
166
*/
167
offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx);
168
if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) {
169
/*
170
* The caller specified an offset that is larger than the
171
* total size of the buffers it provided.
172
*/
173
return (CRYPTO_DATA_LEN_RANGE);
174
}
175
176
/*
177
* Now do the digesting on the iovecs.
178
*/
179
while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) {
180
cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) -
181
offset, length);
182
183
SHA2Update(sha2_ctx, (uint8_t *)zfs_uio_iovbase(data->cd_uio,
184
vec_idx) + offset, cur_len);
185
length -= cur_len;
186
vec_idx++;
187
offset = 0;
188
}
189
190
if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) {
191
/*
192
* The end of the specified iovec's was reached but
193
* the length requested could not be processed, i.e.
194
* The caller requested to digest more data than it provided.
195
*/
196
return (CRYPTO_DATA_LEN_RANGE);
197
}
198
199
return (CRYPTO_SUCCESS);
200
}
201
202
/*
203
* Helper SHA2 digest final function for uio data.
204
* digest_len is the length of the desired digest. If digest_len
205
* is smaller than the default SHA2 digest length, the caller
206
* must pass a scratch buffer, digest_scratch, which must
207
* be at least the algorithm's digest length bytes.
208
*/
209
static int
210
sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
211
ulong_t digest_len, uchar_t *digest_scratch)
212
{
213
off_t offset = digest->cd_offset;
214
uint_t vec_idx = 0;
215
216
/* we support only kernel buffer */
217
if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE)
218
return (CRYPTO_ARGUMENTS_BAD);
219
220
/*
221
* Jump to the first iovec containing ptr to the digest to
222
* be returned.
223
*/
224
offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx);
225
if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) {
226
/*
227
* The caller specified an offset that is
228
* larger than the total size of the buffers
229
* it provided.
230
*/
231
return (CRYPTO_DATA_LEN_RANGE);
232
}
233
234
if (offset + digest_len <=
235
zfs_uio_iovlen(digest->cd_uio, vec_idx)) {
236
/*
237
* The computed SHA2 digest will fit in the current
238
* iovec.
239
*/
240
ASSERT3U(sha2_ctx->algotype, ==, SHA512_HMAC_MECH_INFO_TYPE);
241
if (digest_len != SHA512_DIGEST_LENGTH) {
242
/*
243
* The caller requested a short digest. Digest
244
* into a scratch buffer and return to
245
* the user only what was requested.
246
*/
247
SHA2Final(digest_scratch, sha2_ctx);
248
249
memcpy((uchar_t *)
250
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
251
digest_scratch, digest_len);
252
} else {
253
SHA2Final((uchar_t *)zfs_uio_iovbase(digest->
254
cd_uio, vec_idx) + offset,
255
sha2_ctx);
256
257
}
258
} else {
259
/*
260
* The computed digest will be crossing one or more iovec's.
261
* This is bad performance-wise but we need to support it.
262
* Allocate a small scratch buffer on the stack and
263
* copy it piece meal to the specified digest iovec's.
264
*/
265
uchar_t digest_tmp[SHA512_DIGEST_LENGTH];
266
off_t scratch_offset = 0;
267
size_t length = digest_len;
268
size_t cur_len;
269
270
SHA2Final(digest_tmp, sha2_ctx);
271
272
while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
273
cur_len =
274
MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
275
offset, length);
276
memcpy(
277
zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
278
digest_tmp + scratch_offset,
279
cur_len);
280
281
length -= cur_len;
282
vec_idx++;
283
scratch_offset += cur_len;
284
offset = 0;
285
}
286
287
if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
288
/*
289
* The end of the specified iovec's was reached but
290
* the length requested could not be processed, i.e.
291
* The caller requested to digest more data than it
292
* provided.
293
*/
294
return (CRYPTO_DATA_LEN_RANGE);
295
}
296
}
297
298
return (CRYPTO_SUCCESS);
299
}
300
301
/*
302
* KCF software provider mac entry points.
303
*
304
* SHA2 HMAC is: SHA2(key XOR opad, SHA2(key XOR ipad, text))
305
*
306
* Init:
307
* The initialization routine initializes what we denote
308
* as the inner and outer contexts by doing
309
* - for inner context: SHA2(key XOR ipad)
310
* - for outer context: SHA2(key XOR opad)
311
*
312
* Update:
313
* Each subsequent SHA2 HMAC update will result in an
314
* update of the inner context with the specified data.
315
*
316
* Final:
317
* The SHA2 HMAC final will do a SHA2 final operation on the
318
* inner context, and the resulting digest will be used
319
* as the data for an update on the outer context. Last
320
* but not least, a SHA2 final on the outer context will
321
* be performed to obtain the SHA2 HMAC digest to return
322
* to the user.
323
*/
324
325
/*
326
* Initialize a SHA2-HMAC context.
327
*/
328
static void
329
sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
330
{
331
uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
332
uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
333
int i, block_size, blocks_per_int64;
334
335
/* Determine the block size */
336
ASSERT3U(ctx->hc_mech_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
337
block_size = SHA512_HMAC_BLOCK_SIZE;
338
blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
339
340
(void) memset(ipad, 0, block_size);
341
(void) memset(opad, 0, block_size);
342
343
if (keyval != NULL) {
344
(void) memcpy(ipad, keyval, length_in_bytes);
345
(void) memcpy(opad, keyval, length_in_bytes);
346
} else {
347
ASSERT0(length_in_bytes);
348
}
349
350
/* XOR key with ipad (0x36) and opad (0x5c) */
351
for (i = 0; i < blocks_per_int64; i ++) {
352
ipad[i] ^= 0x3636363636363636;
353
opad[i] ^= 0x5c5c5c5c5c5c5c5c;
354
}
355
356
/* perform SHA2 on ipad */
357
SHA2Init(ctx->hc_mech_type, &ctx->hc_icontext);
358
SHA2Update(&ctx->hc_icontext, (uint8_t *)ipad, block_size);
359
360
/* perform SHA2 on opad */
361
SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
362
SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
363
}
364
365
/*
366
*/
367
static int
368
sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
369
crypto_key_t *key, crypto_spi_ctx_template_t ctx_template)
370
{
371
int ret = CRYPTO_SUCCESS;
372
uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
373
uint_t sha_digest_len, sha_hmac_block_size;
374
375
/*
376
* Set the digest length and block size to values appropriate to the
377
* mechanism
378
*/
379
switch (mechanism->cm_type) {
380
case SHA512_HMAC_MECH_INFO_TYPE:
381
sha_digest_len = SHA512_DIGEST_LENGTH;
382
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
383
break;
384
default:
385
return (CRYPTO_MECHANISM_INVALID);
386
}
387
388
ctx->cc_provider_private =
389
kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP);
390
if (ctx->cc_provider_private == NULL)
391
return (CRYPTO_HOST_MEMORY);
392
393
PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
394
if (ctx_template != NULL) {
395
/* reuse context template */
396
memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template,
397
sizeof (sha2_hmac_ctx_t));
398
} else {
399
/* no context template, compute context */
400
if (keylen_in_bytes > sha_hmac_block_size) {
401
uchar_t digested_key[SHA512_DIGEST_LENGTH];
402
sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
403
404
/*
405
* Hash the passed-in key to get a smaller key.
406
* The inner context is used since it hasn't been
407
* initialized yet.
408
*/
409
PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
410
&hmac_ctx->hc_icontext,
411
key->ck_data, keylen_in_bytes, digested_key);
412
sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
413
digested_key, sha_digest_len);
414
} else {
415
sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
416
key->ck_data, keylen_in_bytes);
417
}
418
}
419
420
if (ret != CRYPTO_SUCCESS) {
421
memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
422
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
423
ctx->cc_provider_private = NULL;
424
}
425
426
return (ret);
427
}
428
429
static int
430
sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data)
431
{
432
int ret = CRYPTO_SUCCESS;
433
434
ASSERT(ctx->cc_provider_private != NULL);
435
436
/*
437
* Do a SHA2 update of the inner context using the specified
438
* data.
439
*/
440
switch (data->cd_format) {
441
case CRYPTO_DATA_RAW:
442
SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext,
443
(uint8_t *)data->cd_raw.iov_base + data->cd_offset,
444
data->cd_length);
445
break;
446
case CRYPTO_DATA_UIO:
447
ret = sha2_digest_update_uio(
448
&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, data);
449
break;
450
default:
451
ret = CRYPTO_ARGUMENTS_BAD;
452
}
453
454
return (ret);
455
}
456
457
static int
458
sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
459
{
460
int ret = CRYPTO_SUCCESS;
461
uchar_t digest[SHA512_DIGEST_LENGTH];
462
uint32_t digest_len, sha_digest_len;
463
464
ASSERT(ctx->cc_provider_private != NULL);
465
466
/* Set the digest lengths to values appropriate to the mechanism */
467
switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) {
468
case SHA512_HMAC_MECH_INFO_TYPE:
469
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
470
break;
471
default:
472
return (CRYPTO_ARGUMENTS_BAD);
473
}
474
475
/*
476
* We need to just return the length needed to store the output.
477
* We should not destroy the context for the following cases.
478
*/
479
if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
480
mac->cd_length = digest_len;
481
return (CRYPTO_BUFFER_TOO_SMALL);
482
}
483
484
/*
485
* Do a SHA2 final on the inner context.
486
*/
487
SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext);
488
489
/*
490
* Do a SHA2 update on the outer context, feeding the inner
491
* digest as data.
492
*/
493
SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, digest,
494
sha_digest_len);
495
496
/*
497
* Do a SHA2 final on the outer context, storing the computing
498
* digest in the users buffer.
499
*/
500
switch (mac->cd_format) {
501
case CRYPTO_DATA_RAW:
502
if (digest_len != sha_digest_len) {
503
/*
504
* The caller requested a short digest. Digest
505
* into a scratch buffer and return to
506
* the user only what was requested.
507
*/
508
SHA2Final(digest,
509
&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
510
memcpy((unsigned char *)mac->cd_raw.iov_base +
511
mac->cd_offset, digest, digest_len);
512
} else {
513
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
514
mac->cd_offset,
515
&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
516
}
517
break;
518
case CRYPTO_DATA_UIO:
519
ret = sha2_digest_final_uio(
520
&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, mac,
521
digest_len, digest);
522
break;
523
default:
524
ret = CRYPTO_ARGUMENTS_BAD;
525
}
526
527
if (ret == CRYPTO_SUCCESS)
528
mac->cd_length = digest_len;
529
else
530
mac->cd_length = 0;
531
532
memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
533
kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
534
ctx->cc_provider_private = NULL;
535
536
return (ret);
537
}
538
539
#define SHA2_MAC_UPDATE(data, ctx, ret) { \
540
switch (data->cd_format) { \
541
case CRYPTO_DATA_RAW: \
542
SHA2Update(&(ctx).hc_icontext, \
543
(uint8_t *)data->cd_raw.iov_base + \
544
data->cd_offset, data->cd_length); \
545
break; \
546
case CRYPTO_DATA_UIO: \
547
ret = sha2_digest_update_uio(&(ctx).hc_icontext, data); \
548
break; \
549
default: \
550
ret = CRYPTO_ARGUMENTS_BAD; \
551
} \
552
}
553
554
static int
555
sha2_mac_atomic(crypto_mechanism_t *mechanism,
556
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
557
crypto_spi_ctx_template_t ctx_template)
558
{
559
int ret = CRYPTO_SUCCESS;
560
uchar_t digest[SHA512_DIGEST_LENGTH];
561
sha2_hmac_ctx_t sha2_hmac_ctx;
562
uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
563
uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
564
565
/*
566
* Set the digest length and block size to values appropriate to the
567
* mechanism
568
*/
569
switch (mechanism->cm_type) {
570
case SHA512_HMAC_MECH_INFO_TYPE:
571
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
572
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
573
break;
574
default:
575
return (CRYPTO_MECHANISM_INVALID);
576
}
577
578
if (ctx_template != NULL) {
579
/* reuse context template */
580
memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
581
} else {
582
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
583
/* no context template, initialize context */
584
if (keylen_in_bytes > sha_hmac_block_size) {
585
/*
586
* Hash the passed-in key to get a smaller key.
587
* The inner context is used since it hasn't been
588
* initialized yet.
589
*/
590
PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
591
&sha2_hmac_ctx.hc_icontext,
592
key->ck_data, keylen_in_bytes, digest);
593
sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
594
sha_digest_len);
595
} else {
596
sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
597
keylen_in_bytes);
598
}
599
}
600
601
/* do a SHA2 update of the inner context using the specified data */
602
SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
603
if (ret != CRYPTO_SUCCESS)
604
/* the update failed, free context and bail */
605
goto bail;
606
607
/*
608
* Do a SHA2 final on the inner context.
609
*/
610
SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
611
612
/*
613
* Do an SHA2 update on the outer context, feeding the inner
614
* digest as data.
615
*/
616
ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
617
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
618
619
/*
620
* Do a SHA2 final on the outer context, storing the computed
621
* digest in the users buffer.
622
*/
623
switch (mac->cd_format) {
624
case CRYPTO_DATA_RAW:
625
if (digest_len != sha_digest_len) {
626
/*
627
* The caller requested a short digest. Digest
628
* into a scratch buffer and return to
629
* the user only what was requested.
630
*/
631
SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
632
memcpy((unsigned char *)mac->cd_raw.iov_base +
633
mac->cd_offset, digest, digest_len);
634
} else {
635
SHA2Final((unsigned char *)mac->cd_raw.iov_base +
636
mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
637
}
638
break;
639
case CRYPTO_DATA_UIO:
640
ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac,
641
digest_len, digest);
642
break;
643
default:
644
ret = CRYPTO_ARGUMENTS_BAD;
645
}
646
647
if (ret == CRYPTO_SUCCESS) {
648
mac->cd_length = digest_len;
649
return (CRYPTO_SUCCESS);
650
}
651
bail:
652
memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
653
mac->cd_length = 0;
654
return (ret);
655
}
656
657
static int
658
sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
659
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
660
crypto_spi_ctx_template_t ctx_template)
661
{
662
int ret = CRYPTO_SUCCESS;
663
uchar_t digest[SHA512_DIGEST_LENGTH];
664
sha2_hmac_ctx_t sha2_hmac_ctx;
665
uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
666
uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
667
668
/*
669
* Set the digest length and block size to values appropriate to the
670
* mechanism
671
*/
672
switch (mechanism->cm_type) {
673
case SHA512_HMAC_MECH_INFO_TYPE:
674
sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
675
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
676
break;
677
default:
678
return (CRYPTO_MECHANISM_INVALID);
679
}
680
681
if (ctx_template != NULL) {
682
/* reuse context template */
683
memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
684
} else {
685
sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
686
/* no context template, initialize context */
687
if (keylen_in_bytes > sha_hmac_block_size) {
688
/*
689
* Hash the passed-in key to get a smaller key.
690
* The inner context is used since it hasn't been
691
* initialized yet.
692
*/
693
PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
694
&sha2_hmac_ctx.hc_icontext,
695
key->ck_data, keylen_in_bytes, digest);
696
sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
697
sha_digest_len);
698
} else {
699
sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
700
keylen_in_bytes);
701
}
702
}
703
704
if (mac->cd_length != digest_len) {
705
ret = CRYPTO_INVALID_MAC;
706
goto bail;
707
}
708
709
/* do a SHA2 update of the inner context using the specified data */
710
SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
711
if (ret != CRYPTO_SUCCESS)
712
/* the update failed, free context and bail */
713
goto bail;
714
715
/* do a SHA2 final on the inner context */
716
SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
717
718
/*
719
* Do an SHA2 update on the outer context, feeding the inner
720
* digest as data.
721
*/
722
ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
723
SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
724
725
/*
726
* Do a SHA2 final on the outer context, storing the computed
727
* digest in the users buffer.
728
*/
729
SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
730
731
/*
732
* Compare the computed digest against the expected digest passed
733
* as argument.
734
*/
735
736
switch (mac->cd_format) {
737
738
case CRYPTO_DATA_RAW:
739
if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
740
mac->cd_offset, digest_len) != 0)
741
ret = CRYPTO_INVALID_MAC;
742
break;
743
744
case CRYPTO_DATA_UIO: {
745
off_t offset = mac->cd_offset;
746
uint_t vec_idx = 0;
747
off_t scratch_offset = 0;
748
size_t length = digest_len;
749
size_t cur_len;
750
751
/* we support only kernel buffer */
752
if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE)
753
return (CRYPTO_ARGUMENTS_BAD);
754
755
/* jump to the first iovec containing the expected digest */
756
offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx);
757
if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) {
758
/*
759
* The caller specified an offset that is
760
* larger than the total size of the buffers
761
* it provided.
762
*/
763
ret = CRYPTO_DATA_LEN_RANGE;
764
break;
765
}
766
767
/* do the comparison of computed digest vs specified one */
768
while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) {
769
cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
770
offset, length);
771
772
if (memcmp(digest + scratch_offset,
773
zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
774
cur_len) != 0) {
775
ret = CRYPTO_INVALID_MAC;
776
break;
777
}
778
779
length -= cur_len;
780
vec_idx++;
781
scratch_offset += cur_len;
782
offset = 0;
783
}
784
break;
785
}
786
787
default:
788
ret = CRYPTO_ARGUMENTS_BAD;
789
}
790
791
return (ret);
792
bail:
793
memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
794
mac->cd_length = 0;
795
return (ret);
796
}
797
798
/*
799
* KCF software provider context management entry points.
800
*/
801
802
static int
803
sha2_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
804
crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size)
805
{
806
sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl;
807
uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
808
uint32_t sha_digest_len, sha_hmac_block_size;
809
810
/*
811
* Set the digest length and block size to values appropriate to the
812
* mechanism
813
*/
814
switch (mechanism->cm_type) {
815
case SHA512_HMAC_MECH_INFO_TYPE:
816
sha_digest_len = SHA512_DIGEST_LENGTH;
817
sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
818
break;
819
default:
820
return (CRYPTO_MECHANISM_INVALID);
821
}
822
823
/*
824
* Allocate and initialize SHA2 context.
825
*/
826
sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP);
827
if (sha2_hmac_ctx_tmpl == NULL)
828
return (CRYPTO_HOST_MEMORY);
829
830
sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
831
832
if (keylen_in_bytes > sha_hmac_block_size) {
833
uchar_t digested_key[SHA512_DIGEST_LENGTH];
834
835
/*
836
* Hash the passed-in key to get a smaller key.
837
* The inner context is used since it hasn't been
838
* initialized yet.
839
*/
840
PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
841
&sha2_hmac_ctx_tmpl->hc_icontext,
842
key->ck_data, keylen_in_bytes, digested_key);
843
sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key,
844
sha_digest_len);
845
} else {
846
sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data,
847
keylen_in_bytes);
848
}
849
850
*ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl;
851
*ctx_template_size = sizeof (sha2_hmac_ctx_t);
852
853
return (CRYPTO_SUCCESS);
854
}
855
856
static int
857
sha2_free_context(crypto_ctx_t *ctx)
858
{
859
uint_t ctx_len;
860
861
if (ctx->cc_provider_private == NULL)
862
return (CRYPTO_SUCCESS);
863
864
ASSERT3U(PROV_SHA2_CTX(ctx)->sc_mech_type, ==,
865
SHA512_HMAC_MECH_INFO_TYPE);
866
ctx_len = sizeof (sha2_hmac_ctx_t);
867
868
memset(ctx->cc_provider_private, 0, ctx_len);
869
kmem_free(ctx->cc_provider_private, ctx_len);
870
ctx->cc_provider_private = NULL;
871
872
return (CRYPTO_SUCCESS);
873
}
874
875