Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/crypto/phmac_s390.c
51383 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Copyright IBM Corp. 2025
4
*
5
* s390 specific HMAC support for protected keys.
6
*/
7
8
#define pr_fmt(fmt) "phmac_s390: " fmt
9
10
#include <asm/cpacf.h>
11
#include <asm/pkey.h>
12
#include <crypto/engine.h>
13
#include <crypto/hash.h>
14
#include <crypto/internal/hash.h>
15
#include <crypto/sha2.h>
16
#include <linux/atomic.h>
17
#include <linux/cpufeature.h>
18
#include <linux/delay.h>
19
#include <linux/miscdevice.h>
20
#include <linux/module.h>
21
#include <linux/spinlock.h>
22
23
static struct crypto_engine *phmac_crypto_engine;
24
#define MAX_QLEN 10
25
26
static bool pkey_clrkey_allowed;
27
module_param_named(clrkey, pkey_clrkey_allowed, bool, 0444);
28
MODULE_PARM_DESC(clrkey, "Allow clear key material (default N)");
29
30
/*
31
* A simple hash walk helper
32
*/
33
34
struct hash_walk_helper {
35
struct crypto_hash_walk walk;
36
const u8 *walkaddr;
37
int walkbytes;
38
};
39
40
/*
41
* Prepare hash walk helper.
42
* Set up the base hash walk, fill walkaddr and walkbytes.
43
* Returns 0 on success or negative value on error.
44
*/
45
static inline int hwh_prepare(struct ahash_request *req,
46
struct hash_walk_helper *hwh)
47
{
48
hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
49
if (hwh->walkbytes < 0)
50
return hwh->walkbytes;
51
hwh->walkaddr = hwh->walk.data;
52
return 0;
53
}
54
55
/*
56
* Advance hash walk helper by n bytes.
57
* Progress the walkbytes and walkaddr fields by n bytes.
58
* If walkbytes is then 0, pull next hunk from hash walk
59
* and update walkbytes and walkaddr.
60
* If n is negative, unmap hash walk and return error.
61
* Returns 0 on success or negative value on error.
62
*/
63
static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
64
{
65
if (n < 0)
66
return crypto_hash_walk_done(&hwh->walk, n);
67
68
hwh->walkbytes -= n;
69
hwh->walkaddr += n;
70
if (hwh->walkbytes > 0)
71
return 0;
72
73
hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
74
if (hwh->walkbytes < 0)
75
return hwh->walkbytes;
76
77
hwh->walkaddr = hwh->walk.data;
78
return 0;
79
}
80
81
/*
82
* KMAC param block layout for sha2 function codes:
83
* The layout of the param block for the KMAC instruction depends on the
84
* blocksize of the used hashing sha2-algorithm function codes. The param block
85
* contains the hash chaining value (cv), the input message bit-length (imbl)
86
* and the hmac-secret (key). To prevent code duplication, the sizes of all
87
* these are calculated based on the blocksize.
88
*
89
* param-block:
90
* +-------+
91
* | cv |
92
* +-------+
93
* | imbl |
94
* +-------+
95
* | key |
96
* +-------+
97
*
98
* sizes:
99
* part | sh2-alg | calculation | size | type
100
* -----+---------+-------------+------+--------
101
* cv | 224/256 | blocksize/2 | 32 | u64[8]
102
* | 384/512 | | 64 | u128[8]
103
* imbl | 224/256 | blocksize/8 | 8 | u64
104
* | 384/512 | | 16 | u128
105
* key | 224/256 | blocksize | 96 | u8[96]
106
* | 384/512 | | 160 | u8[160]
107
*/
108
109
#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
110
#define MAX_IMBL_SIZE sizeof(u128)
111
#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
112
113
#define SHA2_CV_SIZE(bs) ((bs) >> 1)
114
#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
115
116
#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
117
#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
118
119
#define PHMAC_MAX_KEYSIZE 256
120
#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32)
121
#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32)
122
#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE
123
124
/* phmac protected key struct */
125
struct phmac_protkey {
126
u32 type;
127
u32 len;
128
u8 protkey[PHMAC_MAX_PK_SIZE];
129
};
130
131
#define PK_STATE_NO_KEY 0
132
#define PK_STATE_CONVERT_IN_PROGRESS 1
133
#define PK_STATE_VALID 2
134
135
/* phmac tfm context */
136
struct phmac_tfm_ctx {
137
/* source key material used to derive a protected key from */
138
u8 keybuf[PHMAC_MAX_KEYSIZE];
139
unsigned int keylen;
140
141
/* cpacf function code to use with this protected key type */
142
long fc;
143
144
/* nr of requests enqueued via crypto engine which use this tfm ctx */
145
atomic_t via_engine_ctr;
146
147
/* spinlock to atomic read/update all the following fields */
148
spinlock_t pk_lock;
149
150
/* see PK_STATE* defines above, < 0 holds convert failure rc */
151
int pk_state;
152
/* if state is valid, pk holds the protected key */
153
struct phmac_protkey pk;
154
};
155
156
union kmac_gr0 {
157
unsigned long reg;
158
struct {
159
unsigned long : 48;
160
unsigned long ikp : 1;
161
unsigned long iimp : 1;
162
unsigned long ccup : 1;
163
unsigned long : 6;
164
unsigned long fc : 7;
165
};
166
};
167
168
struct kmac_sha2_ctx {
169
u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
170
union kmac_gr0 gr0;
171
u8 buf[MAX_BLOCK_SIZE];
172
u64 buflen[2];
173
};
174
175
enum async_op {
176
OP_NOP = 0,
177
OP_UPDATE,
178
OP_FINAL,
179
OP_FINUP,
180
};
181
182
/* phmac request context */
183
struct phmac_req_ctx {
184
struct hash_walk_helper hwh;
185
struct kmac_sha2_ctx kmac_ctx;
186
enum async_op async_op;
187
};
188
189
/*
190
* Pkey 'token' struct used to derive a protected key value from a clear key.
191
*/
192
struct hmac_clrkey_token {
193
u8 type;
194
u8 res0[3];
195
u8 version;
196
u8 res1[3];
197
u32 keytype;
198
u32 len;
199
u8 key[];
200
} __packed;
201
202
static int hash_key(const u8 *in, unsigned int inlen,
203
u8 *digest, unsigned int digestsize)
204
{
205
unsigned long func;
206
union {
207
struct sha256_paramblock {
208
u32 h[8];
209
u64 mbl;
210
} sha256;
211
struct sha512_paramblock {
212
u64 h[8];
213
u128 mbl;
214
} sha512;
215
} __packed param;
216
217
#define PARAM_INIT(x, y, z) \
218
param.sha##x.h[0] = SHA##y ## _H0; \
219
param.sha##x.h[1] = SHA##y ## _H1; \
220
param.sha##x.h[2] = SHA##y ## _H2; \
221
param.sha##x.h[3] = SHA##y ## _H3; \
222
param.sha##x.h[4] = SHA##y ## _H4; \
223
param.sha##x.h[5] = SHA##y ## _H5; \
224
param.sha##x.h[6] = SHA##y ## _H6; \
225
param.sha##x.h[7] = SHA##y ## _H7; \
226
param.sha##x.mbl = (z)
227
228
switch (digestsize) {
229
case SHA224_DIGEST_SIZE:
230
func = CPACF_KLMD_SHA_256;
231
PARAM_INIT(256, 224, inlen * 8);
232
break;
233
case SHA256_DIGEST_SIZE:
234
func = CPACF_KLMD_SHA_256;
235
PARAM_INIT(256, 256, inlen * 8);
236
break;
237
case SHA384_DIGEST_SIZE:
238
func = CPACF_KLMD_SHA_512;
239
PARAM_INIT(512, 384, inlen * 8);
240
break;
241
case SHA512_DIGEST_SIZE:
242
func = CPACF_KLMD_SHA_512;
243
PARAM_INIT(512, 512, inlen * 8);
244
break;
245
default:
246
return -EINVAL;
247
}
248
249
#undef PARAM_INIT
250
251
cpacf_klmd(func, &param, in, inlen);
252
253
memcpy(digest, &param, digestsize);
254
255
return 0;
256
}
257
258
/*
259
* make_clrkey_token() - wrap the clear key into a pkey clearkey token.
260
*/
261
static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
262
unsigned int digestsize, u8 *dest)
263
{
264
struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
265
unsigned int blocksize;
266
int rc;
267
268
token->type = 0x00;
269
token->version = 0x02;
270
switch (digestsize) {
271
case SHA224_DIGEST_SIZE:
272
case SHA256_DIGEST_SIZE:
273
token->keytype = PKEY_KEYTYPE_HMAC_512;
274
blocksize = 64;
275
break;
276
case SHA384_DIGEST_SIZE:
277
case SHA512_DIGEST_SIZE:
278
token->keytype = PKEY_KEYTYPE_HMAC_1024;
279
blocksize = 128;
280
break;
281
default:
282
return -EINVAL;
283
}
284
token->len = blocksize;
285
286
if (clrkeylen > blocksize) {
287
rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
288
if (rc)
289
return rc;
290
} else {
291
memcpy(token->key, clrkey, clrkeylen);
292
}
293
294
return 0;
295
}
296
297
/*
298
* phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct
299
* a clear key token digestible by pkey from a clear key value.
300
*/
301
static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
302
const u8 *key, unsigned int keylen)
303
{
304
if (keylen > sizeof(tfm_ctx->keybuf))
305
return -EINVAL;
306
307
memcpy(tfm_ctx->keybuf, key, keylen);
308
tfm_ctx->keylen = keylen;
309
310
return 0;
311
}
312
313
/*
314
* Convert the raw key material into a protected key via PKEY api.
315
* This function may sleep - don't call in non-sleeping context.
316
*/
317
static inline int convert_key(const u8 *key, unsigned int keylen,
318
struct phmac_protkey *pk, bool tested)
319
{
320
u32 xflags = PKEY_XFLAG_NOMEMALLOC;
321
int rc, i;
322
323
if (tested && !pkey_clrkey_allowed)
324
xflags |= PKEY_XFLAG_NOCLEARKEY;
325
326
pk->len = sizeof(pk->protkey);
327
328
/*
329
* In case of a busy card retry with increasing delay
330
* of 200, 400, 800 and 1600 ms - in total 3 s.
331
*/
332
for (rc = -EIO, i = 0; rc && i < 5; i++) {
333
if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
334
rc = -EINTR;
335
goto out;
336
}
337
rc = pkey_key2protkey(key, keylen,
338
pk->protkey, &pk->len, &pk->type,
339
xflags);
340
}
341
342
out:
343
pr_debug("rc=%d\n", rc);
344
return rc;
345
}
346
347
/*
348
* (Re-)Convert the raw key material from the tfm ctx into a protected
349
* key via convert_key() function. Update the pk_state, pk_type, pk_len
350
* and the protected key in the tfm context.
351
* Please note this function may be invoked concurrently with the very
352
* same tfm context. The pk_lock spinlock in the context ensures an
353
* atomic update of the pk and the pk state but does not guarantee any
354
* order of update. So a fresh converted valid protected key may get
355
* updated with an 'old' expired key value. As the cpacf instructions
356
* detect this, refuse to operate with an invalid key and the calling
357
* code triggers a (re-)conversion this does no harm. This may lead to
358
* unnecessary additional conversion but never to invalid data on the
359
* hash operation.
360
*/
361
static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx, bool tested)
362
{
363
struct phmac_protkey pk;
364
int rc;
365
366
spin_lock_bh(&tfm_ctx->pk_lock);
367
tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
368
spin_unlock_bh(&tfm_ctx->pk_lock);
369
370
rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk, tested);
371
372
/* update context */
373
spin_lock_bh(&tfm_ctx->pk_lock);
374
if (rc) {
375
tfm_ctx->pk_state = rc;
376
} else {
377
tfm_ctx->pk_state = PK_STATE_VALID;
378
tfm_ctx->pk = pk;
379
}
380
spin_unlock_bh(&tfm_ctx->pk_lock);
381
382
memzero_explicit(&pk, sizeof(pk));
383
pr_debug("rc=%d\n", rc);
384
return rc;
385
}
386
387
/*
388
* kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
389
*/
390
static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
391
u64 buflen_hi, unsigned int blocksize)
392
{
393
u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
394
395
switch (blocksize) {
396
case SHA256_BLOCK_SIZE:
397
*(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
398
break;
399
case SHA512_BLOCK_SIZE:
400
*(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
401
break;
402
default:
403
break;
404
}
405
}
406
407
static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
408
{
409
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
410
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
411
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
412
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
413
struct hash_walk_helper *hwh = &req_ctx->hwh;
414
unsigned int bs = crypto_ahash_blocksize(tfm);
415
bool tested = crypto_ahash_tested(tfm);
416
unsigned int offset, k, n;
417
int rc = 0;
418
419
/*
420
* The walk is always mapped when this function is called.
421
* Note that in case of partial processing or failure the walk
422
* is NOT unmapped here. So a follow up task may reuse the walk
423
* or in case of unrecoverable failure needs to unmap it.
424
*/
425
426
while (hwh->walkbytes > 0) {
427
/* check sha2 context buffer */
428
offset = ctx->buflen[0] % bs;
429
if (offset + hwh->walkbytes < bs)
430
goto store;
431
432
if (offset) {
433
/* fill ctx buffer up to blocksize and process this block */
434
n = bs - offset;
435
memcpy(ctx->buf + offset, hwh->walkaddr, n);
436
ctx->gr0.iimp = 1;
437
for (;;) {
438
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
439
if (likely(k == bs))
440
break;
441
if (unlikely(k > 0)) {
442
/*
443
* Can't deal with hunks smaller than blocksize.
444
* And kmac should always return the nr of
445
* processed bytes as 0 or a multiple of the
446
* blocksize.
447
*/
448
rc = -EIO;
449
goto out;
450
}
451
/* protected key is invalid and needs re-conversion */
452
if (!maysleep) {
453
rc = -EKEYEXPIRED;
454
goto out;
455
}
456
rc = phmac_convert_key(tfm_ctx, tested);
457
if (rc)
458
goto out;
459
spin_lock_bh(&tfm_ctx->pk_lock);
460
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
461
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
462
spin_unlock_bh(&tfm_ctx->pk_lock);
463
}
464
ctx->buflen[0] += n;
465
if (ctx->buflen[0] < n)
466
ctx->buflen[1]++;
467
rc = hwh_advance(hwh, n);
468
if (unlikely(rc))
469
goto out;
470
offset = 0;
471
}
472
473
/* process as many blocks as possible from the walk */
474
while (hwh->walkbytes >= bs) {
475
n = (hwh->walkbytes / bs) * bs;
476
ctx->gr0.iimp = 1;
477
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
478
if (likely(k > 0)) {
479
ctx->buflen[0] += k;
480
if (ctx->buflen[0] < k)
481
ctx->buflen[1]++;
482
rc = hwh_advance(hwh, k);
483
if (unlikely(rc))
484
goto out;
485
}
486
if (unlikely(k < n)) {
487
/* protected key is invalid and needs re-conversion */
488
if (!maysleep) {
489
rc = -EKEYEXPIRED;
490
goto out;
491
}
492
rc = phmac_convert_key(tfm_ctx, tested);
493
if (rc)
494
goto out;
495
spin_lock_bh(&tfm_ctx->pk_lock);
496
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
497
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
498
spin_unlock_bh(&tfm_ctx->pk_lock);
499
}
500
}
501
502
store:
503
/* store incomplete block in context buffer */
504
if (hwh->walkbytes) {
505
memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
506
ctx->buflen[0] += hwh->walkbytes;
507
if (ctx->buflen[0] < hwh->walkbytes)
508
ctx->buflen[1]++;
509
rc = hwh_advance(hwh, hwh->walkbytes);
510
if (unlikely(rc))
511
goto out;
512
}
513
514
} /* end of while (hwh->walkbytes > 0) */
515
516
out:
517
pr_debug("rc=%d\n", rc);
518
return rc;
519
}
520
521
static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
522
{
523
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
524
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
525
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
526
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
527
unsigned int ds = crypto_ahash_digestsize(tfm);
528
unsigned int bs = crypto_ahash_blocksize(tfm);
529
bool tested = crypto_ahash_tested(tfm);
530
unsigned int k, n;
531
int rc = 0;
532
533
n = ctx->buflen[0] % bs;
534
ctx->gr0.iimp = 0;
535
kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
536
for (;;) {
537
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
538
if (likely(k == n))
539
break;
540
if (unlikely(k > 0)) {
541
/* Can't deal with hunks smaller than blocksize. */
542
rc = -EIO;
543
goto out;
544
}
545
/* protected key is invalid and needs re-conversion */
546
if (!maysleep) {
547
rc = -EKEYEXPIRED;
548
goto out;
549
}
550
rc = phmac_convert_key(tfm_ctx, tested);
551
if (rc)
552
goto out;
553
spin_lock_bh(&tfm_ctx->pk_lock);
554
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
555
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
556
spin_unlock_bh(&tfm_ctx->pk_lock);
557
}
558
559
memcpy(req->result, ctx->param, ds);
560
561
out:
562
pr_debug("rc=%d\n", rc);
563
return rc;
564
}
565
566
static int phmac_init(struct ahash_request *req)
567
{
568
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
569
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
570
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
571
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
572
unsigned int bs = crypto_ahash_blocksize(tfm);
573
int rc = 0;
574
575
/* zero request context (includes the kmac sha2 context) */
576
memset(req_ctx, 0, sizeof(*req_ctx));
577
578
/*
579
* setkey() should have set a valid fc into the tfm context.
580
* Copy this function code into the gr0 field of the kmac context.
581
*/
582
if (!tfm_ctx->fc) {
583
rc = -ENOKEY;
584
goto out;
585
}
586
kmac_ctx->gr0.fc = tfm_ctx->fc;
587
588
/*
589
* Copy the pk from tfm ctx into kmac ctx. The protected key
590
* may be outdated but update() and final() will handle this.
591
*/
592
spin_lock_bh(&tfm_ctx->pk_lock);
593
memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
594
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
595
spin_unlock_bh(&tfm_ctx->pk_lock);
596
597
out:
598
pr_debug("rc=%d\n", rc);
599
return rc;
600
}
601
602
static int phmac_update(struct ahash_request *req)
603
{
604
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
605
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
606
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
607
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
608
struct hash_walk_helper *hwh = &req_ctx->hwh;
609
int rc;
610
611
/* prep the walk in the request context */
612
rc = hwh_prepare(req, hwh);
613
if (rc)
614
goto out;
615
616
/* Try synchronous operation if no active engine usage */
617
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
618
rc = phmac_kmac_update(req, false);
619
if (rc == 0)
620
goto out;
621
}
622
623
/*
624
* If sync operation failed or key expired or there are already
625
* requests enqueued via engine, fallback to async. Mark tfm as
626
* using engine to serialize requests.
627
*/
628
if (rc == 0 || rc == -EKEYEXPIRED) {
629
req_ctx->async_op = OP_UPDATE;
630
atomic_inc(&tfm_ctx->via_engine_ctr);
631
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
632
if (rc != -EINPROGRESS)
633
atomic_dec(&tfm_ctx->via_engine_ctr);
634
}
635
636
if (rc != -EINPROGRESS) {
637
hwh_advance(hwh, rc);
638
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
639
}
640
641
out:
642
pr_debug("rc=%d\n", rc);
643
return rc;
644
}
645
646
static int phmac_final(struct ahash_request *req)
647
{
648
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
649
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
650
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
651
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
652
int rc = 0;
653
654
/* Try synchronous operation if no active engine usage */
655
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
656
rc = phmac_kmac_final(req, false);
657
if (rc == 0)
658
goto out;
659
}
660
661
/*
662
* If sync operation failed or key expired or there are already
663
* requests enqueued via engine, fallback to async. Mark tfm as
664
* using engine to serialize requests.
665
*/
666
if (rc == 0 || rc == -EKEYEXPIRED) {
667
req_ctx->async_op = OP_FINAL;
668
atomic_inc(&tfm_ctx->via_engine_ctr);
669
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
670
if (rc != -EINPROGRESS)
671
atomic_dec(&tfm_ctx->via_engine_ctr);
672
}
673
674
out:
675
if (rc != -EINPROGRESS)
676
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
677
pr_debug("rc=%d\n", rc);
678
return rc;
679
}
680
681
static int phmac_finup(struct ahash_request *req)
682
{
683
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
684
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
685
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
686
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
687
struct hash_walk_helper *hwh = &req_ctx->hwh;
688
int rc;
689
690
/* prep the walk in the request context */
691
rc = hwh_prepare(req, hwh);
692
if (rc)
693
goto out;
694
695
req_ctx->async_op = OP_FINUP;
696
697
/* Try synchronous operations if no active engine usage */
698
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
699
rc = phmac_kmac_update(req, false);
700
if (rc == 0)
701
req_ctx->async_op = OP_FINAL;
702
}
703
if (!rc && req_ctx->async_op == OP_FINAL &&
704
!atomic_read(&tfm_ctx->via_engine_ctr)) {
705
rc = phmac_kmac_final(req, false);
706
if (rc == 0)
707
goto out;
708
}
709
710
/*
711
* If sync operation failed or key expired or there are already
712
* requests enqueued via engine, fallback to async. Mark tfm as
713
* using engine to serialize requests.
714
*/
715
if (rc == 0 || rc == -EKEYEXPIRED) {
716
/* req->async_op has been set to either OP_FINUP or OP_FINAL */
717
atomic_inc(&tfm_ctx->via_engine_ctr);
718
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
719
if (rc != -EINPROGRESS)
720
atomic_dec(&tfm_ctx->via_engine_ctr);
721
}
722
723
if (rc != -EINPROGRESS)
724
hwh_advance(hwh, rc);
725
726
out:
727
if (rc != -EINPROGRESS)
728
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
729
pr_debug("rc=%d\n", rc);
730
return rc;
731
}
732
733
static int phmac_digest(struct ahash_request *req)
734
{
735
int rc;
736
737
rc = phmac_init(req);
738
if (rc)
739
goto out;
740
741
rc = phmac_finup(req);
742
743
out:
744
pr_debug("rc=%d\n", rc);
745
return rc;
746
}
747
748
static int phmac_setkey(struct crypto_ahash *tfm,
749
const u8 *key, unsigned int keylen)
750
{
751
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
752
unsigned int ds = crypto_ahash_digestsize(tfm);
753
unsigned int bs = crypto_ahash_blocksize(tfm);
754
bool tested = crypto_ahash_tested(tfm);
755
unsigned int tmpkeylen;
756
u8 *tmpkey = NULL;
757
int rc = 0;
758
759
if (!tested) {
760
/*
761
* selftest running: key is a raw hmac clear key and needs
762
* to get embedded into a 'clear key token' in order to have
763
* it correctly processed by the pkey module.
764
*/
765
tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
766
tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
767
if (!tmpkey) {
768
rc = -ENOMEM;
769
goto out;
770
}
771
rc = make_clrkey_token(key, keylen, ds, tmpkey);
772
if (rc)
773
goto out;
774
keylen = tmpkeylen;
775
key = tmpkey;
776
}
777
778
/* copy raw key into tfm context */
779
rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
780
if (rc)
781
goto out;
782
783
/* convert raw key into protected key */
784
rc = phmac_convert_key(tfm_ctx, tested);
785
if (rc)
786
goto out;
787
788
/* set function code in tfm context, check for valid pk type */
789
switch (ds) {
790
case SHA224_DIGEST_SIZE:
791
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
792
rc = -EINVAL;
793
else
794
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
795
break;
796
case SHA256_DIGEST_SIZE:
797
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
798
rc = -EINVAL;
799
else
800
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
801
break;
802
case SHA384_DIGEST_SIZE:
803
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
804
rc = -EINVAL;
805
else
806
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
807
break;
808
case SHA512_DIGEST_SIZE:
809
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
810
rc = -EINVAL;
811
else
812
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
813
break;
814
default:
815
tfm_ctx->fc = 0;
816
rc = -EINVAL;
817
}
818
819
out:
820
kfree(tmpkey);
821
pr_debug("rc=%d\n", rc);
822
return rc;
823
}
824
825
static int phmac_export(struct ahash_request *req, void *out)
826
{
827
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
828
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
829
830
memcpy(out, ctx, sizeof(*ctx));
831
832
return 0;
833
}
834
835
static int phmac_import(struct ahash_request *req, const void *in)
836
{
837
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
838
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
839
840
memset(req_ctx, 0, sizeof(*req_ctx));
841
memcpy(ctx, in, sizeof(*ctx));
842
843
return 0;
844
}
845
846
static int phmac_init_tfm(struct crypto_ahash *tfm)
847
{
848
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
849
850
memset(tfm_ctx, 0, sizeof(*tfm_ctx));
851
spin_lock_init(&tfm_ctx->pk_lock);
852
853
crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
854
855
return 0;
856
}
857
858
static void phmac_exit_tfm(struct crypto_ahash *tfm)
859
{
860
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
861
862
memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
863
memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
864
}
865
866
static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
867
{
868
struct ahash_request *req = ahash_request_cast(areq);
869
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
870
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
871
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
872
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
873
struct hash_walk_helper *hwh = &req_ctx->hwh;
874
int rc = -EINVAL;
875
876
/*
877
* Three kinds of requests come in here:
878
* 1. req->async_op == OP_UPDATE with req->nbytes > 0
879
* 2. req->async_op == OP_FINUP with req->nbytes > 0
880
* 3. req->async_op == OP_FINAL
881
* For update and finup the hwh walk has already been prepared
882
* by the caller. For final there is no hwh walk needed.
883
*/
884
885
switch (req_ctx->async_op) {
886
case OP_UPDATE:
887
case OP_FINUP:
888
rc = phmac_kmac_update(req, true);
889
if (rc == -EKEYEXPIRED) {
890
/*
891
* Protected key expired, conversion is in process.
892
* Trigger a re-schedule of this request by returning
893
* -ENOSPC ("hardware queue full") to the crypto engine.
894
* To avoid immediately re-invocation of this callback,
895
* tell scheduler to voluntarily give up the CPU here.
896
*/
897
pr_debug("rescheduling request\n");
898
cond_resched();
899
return -ENOSPC;
900
} else if (rc) {
901
hwh_advance(hwh, rc);
902
goto out;
903
}
904
if (req_ctx->async_op == OP_UPDATE)
905
break;
906
req_ctx->async_op = OP_FINAL;
907
fallthrough;
908
case OP_FINAL:
909
rc = phmac_kmac_final(req, true);
910
if (rc == -EKEYEXPIRED) {
911
/*
912
* Protected key expired, conversion is in process.
913
* Trigger a re-schedule of this request by returning
914
* -ENOSPC ("hardware queue full") to the crypto engine.
915
* To avoid immediately re-invocation of this callback,
916
* tell scheduler to voluntarily give up the CPU here.
917
*/
918
pr_debug("rescheduling request\n");
919
cond_resched();
920
return -ENOSPC;
921
}
922
break;
923
default:
924
/* unknown/unsupported/unimplemented asynch op */
925
return -EOPNOTSUPP;
926
}
927
928
out:
929
if (rc || req_ctx->async_op == OP_FINAL)
930
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
931
pr_debug("request complete with rc=%d\n", rc);
932
local_bh_disable();
933
atomic_dec(&tfm_ctx->via_engine_ctr);
934
crypto_finalize_hash_request(engine, req, rc);
935
local_bh_enable();
936
return rc;
937
}
938
939
#define S390_ASYNC_PHMAC_ALG(x) \
940
{ \
941
.base = { \
942
.init = phmac_init, \
943
.update = phmac_update, \
944
.final = phmac_final, \
945
.finup = phmac_finup, \
946
.digest = phmac_digest, \
947
.setkey = phmac_setkey, \
948
.import = phmac_import, \
949
.export = phmac_export, \
950
.init_tfm = phmac_init_tfm, \
951
.exit_tfm = phmac_exit_tfm, \
952
.halg = { \
953
.digestsize = SHA##x##_DIGEST_SIZE, \
954
.statesize = sizeof(struct kmac_sha2_ctx), \
955
.base = { \
956
.cra_name = "phmac(sha" #x ")", \
957
.cra_driver_name = "phmac_s390_sha" #x, \
958
.cra_blocksize = SHA##x##_BLOCK_SIZE, \
959
.cra_priority = 400, \
960
.cra_flags = CRYPTO_ALG_ASYNC | \
961
CRYPTO_ALG_NO_FALLBACK, \
962
.cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
963
.cra_module = THIS_MODULE, \
964
}, \
965
}, \
966
}, \
967
.op = { \
968
.do_one_request = phmac_do_one_request, \
969
}, \
970
}
971
972
static struct phmac_alg {
973
unsigned int fc;
974
struct ahash_engine_alg alg;
975
bool registered;
976
} phmac_algs[] = {
977
{
978
.fc = CPACF_KMAC_PHMAC_SHA_224,
979
.alg = S390_ASYNC_PHMAC_ALG(224),
980
}, {
981
.fc = CPACF_KMAC_PHMAC_SHA_256,
982
.alg = S390_ASYNC_PHMAC_ALG(256),
983
}, {
984
.fc = CPACF_KMAC_PHMAC_SHA_384,
985
.alg = S390_ASYNC_PHMAC_ALG(384),
986
}, {
987
.fc = CPACF_KMAC_PHMAC_SHA_512,
988
.alg = S390_ASYNC_PHMAC_ALG(512),
989
}
990
};
991
992
static struct miscdevice phmac_dev = {
993
.name = "phmac",
994
.minor = MISC_DYNAMIC_MINOR,
995
};
996
997
static void s390_phmac_exit(void)
998
{
999
struct phmac_alg *phmac;
1000
int i;
1001
1002
if (phmac_crypto_engine) {
1003
crypto_engine_stop(phmac_crypto_engine);
1004
crypto_engine_exit(phmac_crypto_engine);
1005
}
1006
1007
for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
1008
phmac = &phmac_algs[i];
1009
if (phmac->registered)
1010
crypto_engine_unregister_ahash(&phmac->alg);
1011
}
1012
1013
misc_deregister(&phmac_dev);
1014
}
1015
1016
static int __init s390_phmac_init(void)
1017
{
1018
struct phmac_alg *phmac;
1019
int i, rc;
1020
1021
/* for selftest cpacf klmd subfunction is needed */
1022
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
1023
return -ENODEV;
1024
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
1025
return -ENODEV;
1026
1027
/* register a simple phmac pseudo misc device */
1028
rc = misc_register(&phmac_dev);
1029
if (rc)
1030
return rc;
1031
1032
/* with this pseudo device alloc and start a crypto engine */
1033
phmac_crypto_engine =
1034
crypto_engine_alloc_init_and_set(phmac_dev.this_device,
1035
true, false, MAX_QLEN);
1036
if (!phmac_crypto_engine) {
1037
rc = -ENOMEM;
1038
goto out_err;
1039
}
1040
rc = crypto_engine_start(phmac_crypto_engine);
1041
if (rc) {
1042
crypto_engine_exit(phmac_crypto_engine);
1043
phmac_crypto_engine = NULL;
1044
goto out_err;
1045
}
1046
1047
for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
1048
phmac = &phmac_algs[i];
1049
if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
1050
continue;
1051
rc = crypto_engine_register_ahash(&phmac->alg);
1052
if (rc)
1053
goto out_err;
1054
phmac->registered = true;
1055
pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
1056
}
1057
1058
return 0;
1059
1060
out_err:
1061
s390_phmac_exit();
1062
return rc;
1063
}
1064
1065
module_init(s390_phmac_init);
1066
module_exit(s390_phmac_exit);
1067
1068
MODULE_ALIAS_CRYPTO("phmac(sha224)");
1069
MODULE_ALIAS_CRYPTO("phmac(sha256)");
1070
MODULE_ALIAS_CRYPTO("phmac(sha384)");
1071
MODULE_ALIAS_CRYPTO("phmac(sha512)");
1072
1073
MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
1074
MODULE_LICENSE("GPL");
1075
1076