Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/crypto/phmac_s390.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Copyright IBM Corp. 2025
4
*
5
* s390 specific HMAC support for protected keys.
6
*/
7
8
#define KMSG_COMPONENT "phmac_s390"
9
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11
#include <asm/cpacf.h>
12
#include <asm/pkey.h>
13
#include <crypto/engine.h>
14
#include <crypto/hash.h>
15
#include <crypto/internal/hash.h>
16
#include <crypto/sha2.h>
17
#include <linux/atomic.h>
18
#include <linux/cpufeature.h>
19
#include <linux/delay.h>
20
#include <linux/miscdevice.h>
21
#include <linux/module.h>
22
#include <linux/spinlock.h>
23
24
static struct crypto_engine *phmac_crypto_engine;
25
#define MAX_QLEN 10
26
27
/*
28
* A simple hash walk helper
29
*/
30
31
struct hash_walk_helper {
32
struct crypto_hash_walk walk;
33
const u8 *walkaddr;
34
int walkbytes;
35
};
36
37
/*
38
* Prepare hash walk helper.
39
* Set up the base hash walk, fill walkaddr and walkbytes.
40
* Returns 0 on success or negative value on error.
41
*/
42
static inline int hwh_prepare(struct ahash_request *req,
43
struct hash_walk_helper *hwh)
44
{
45
hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk);
46
if (hwh->walkbytes < 0)
47
return hwh->walkbytes;
48
hwh->walkaddr = hwh->walk.data;
49
return 0;
50
}
51
52
/*
53
* Advance hash walk helper by n bytes.
54
* Progress the walkbytes and walkaddr fields by n bytes.
55
* If walkbytes is then 0, pull next hunk from hash walk
56
* and update walkbytes and walkaddr.
57
* If n is negative, unmap hash walk and return error.
58
* Returns 0 on success or negative value on error.
59
*/
60
static inline int hwh_advance(struct hash_walk_helper *hwh, int n)
61
{
62
if (n < 0)
63
return crypto_hash_walk_done(&hwh->walk, n);
64
65
hwh->walkbytes -= n;
66
hwh->walkaddr += n;
67
if (hwh->walkbytes > 0)
68
return 0;
69
70
hwh->walkbytes = crypto_hash_walk_done(&hwh->walk, 0);
71
if (hwh->walkbytes < 0)
72
return hwh->walkbytes;
73
74
hwh->walkaddr = hwh->walk.data;
75
return 0;
76
}
77
78
/*
79
* KMAC param block layout for sha2 function codes:
80
* The layout of the param block for the KMAC instruction depends on the
81
* blocksize of the used hashing sha2-algorithm function codes. The param block
82
* contains the hash chaining value (cv), the input message bit-length (imbl)
83
* and the hmac-secret (key). To prevent code duplication, the sizes of all
84
* these are calculated based on the blocksize.
85
*
86
* param-block:
87
* +-------+
88
* | cv |
89
* +-------+
90
* | imbl |
91
* +-------+
92
* | key |
93
* +-------+
94
*
95
* sizes:
96
* part | sh2-alg | calculation | size | type
97
* -----+---------+-------------+------+--------
98
* cv | 224/256 | blocksize/2 | 32 | u64[8]
99
* | 384/512 | | 64 | u128[8]
100
* imbl | 224/256 | blocksize/8 | 8 | u64
101
* | 384/512 | | 16 | u128
102
* key | 224/256 | blocksize | 96 | u8[96]
103
* | 384/512 | | 160 | u8[160]
104
*/
105
106
#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
107
#define MAX_IMBL_SIZE sizeof(u128)
108
#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
109
110
#define SHA2_CV_SIZE(bs) ((bs) >> 1)
111
#define SHA2_IMBL_SIZE(bs) ((bs) >> 3)
112
113
#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs))
114
#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs))
115
116
#define PHMAC_MAX_KEYSIZE 256
117
#define PHMAC_SHA256_PK_SIZE (SHA256_BLOCK_SIZE + 32)
118
#define PHMAC_SHA512_PK_SIZE (SHA512_BLOCK_SIZE + 32)
119
#define PHMAC_MAX_PK_SIZE PHMAC_SHA512_PK_SIZE
120
121
/* phmac protected key struct */
122
struct phmac_protkey {
123
u32 type;
124
u32 len;
125
u8 protkey[PHMAC_MAX_PK_SIZE];
126
};
127
128
#define PK_STATE_NO_KEY 0
129
#define PK_STATE_CONVERT_IN_PROGRESS 1
130
#define PK_STATE_VALID 2
131
132
/* phmac tfm context */
133
struct phmac_tfm_ctx {
134
/* source key material used to derive a protected key from */
135
u8 keybuf[PHMAC_MAX_KEYSIZE];
136
unsigned int keylen;
137
138
/* cpacf function code to use with this protected key type */
139
long fc;
140
141
/* nr of requests enqueued via crypto engine which use this tfm ctx */
142
atomic_t via_engine_ctr;
143
144
/* spinlock to atomic read/update all the following fields */
145
spinlock_t pk_lock;
146
147
/* see PK_STATE* defines above, < 0 holds convert failure rc */
148
int pk_state;
149
/* if state is valid, pk holds the protected key */
150
struct phmac_protkey pk;
151
};
152
153
union kmac_gr0 {
154
unsigned long reg;
155
struct {
156
unsigned long : 48;
157
unsigned long ikp : 1;
158
unsigned long iimp : 1;
159
unsigned long ccup : 1;
160
unsigned long : 6;
161
unsigned long fc : 7;
162
};
163
};
164
165
struct kmac_sha2_ctx {
166
u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + PHMAC_MAX_PK_SIZE];
167
union kmac_gr0 gr0;
168
u8 buf[MAX_BLOCK_SIZE];
169
u64 buflen[2];
170
};
171
172
/* phmac request context */
173
struct phmac_req_ctx {
174
struct hash_walk_helper hwh;
175
struct kmac_sha2_ctx kmac_ctx;
176
bool final;
177
};
178
179
/*
180
* Pkey 'token' struct used to derive a protected key value from a clear key.
181
*/
182
struct hmac_clrkey_token {
183
u8 type;
184
u8 res0[3];
185
u8 version;
186
u8 res1[3];
187
u32 keytype;
188
u32 len;
189
u8 key[];
190
} __packed;
191
192
static int hash_key(const u8 *in, unsigned int inlen,
193
u8 *digest, unsigned int digestsize)
194
{
195
unsigned long func;
196
union {
197
struct sha256_paramblock {
198
u32 h[8];
199
u64 mbl;
200
} sha256;
201
struct sha512_paramblock {
202
u64 h[8];
203
u128 mbl;
204
} sha512;
205
} __packed param;
206
207
#define PARAM_INIT(x, y, z) \
208
param.sha##x.h[0] = SHA##y ## _H0; \
209
param.sha##x.h[1] = SHA##y ## _H1; \
210
param.sha##x.h[2] = SHA##y ## _H2; \
211
param.sha##x.h[3] = SHA##y ## _H3; \
212
param.sha##x.h[4] = SHA##y ## _H4; \
213
param.sha##x.h[5] = SHA##y ## _H5; \
214
param.sha##x.h[6] = SHA##y ## _H6; \
215
param.sha##x.h[7] = SHA##y ## _H7; \
216
param.sha##x.mbl = (z)
217
218
switch (digestsize) {
219
case SHA224_DIGEST_SIZE:
220
func = CPACF_KLMD_SHA_256;
221
PARAM_INIT(256, 224, inlen * 8);
222
break;
223
case SHA256_DIGEST_SIZE:
224
func = CPACF_KLMD_SHA_256;
225
PARAM_INIT(256, 256, inlen * 8);
226
break;
227
case SHA384_DIGEST_SIZE:
228
func = CPACF_KLMD_SHA_512;
229
PARAM_INIT(512, 384, inlen * 8);
230
break;
231
case SHA512_DIGEST_SIZE:
232
func = CPACF_KLMD_SHA_512;
233
PARAM_INIT(512, 512, inlen * 8);
234
break;
235
default:
236
return -EINVAL;
237
}
238
239
#undef PARAM_INIT
240
241
cpacf_klmd(func, &param, in, inlen);
242
243
memcpy(digest, &param, digestsize);
244
245
return 0;
246
}
247
248
/*
249
* make_clrkey_token() - wrap the clear key into a pkey clearkey token.
250
*/
251
static inline int make_clrkey_token(const u8 *clrkey, size_t clrkeylen,
252
unsigned int digestsize, u8 *dest)
253
{
254
struct hmac_clrkey_token *token = (struct hmac_clrkey_token *)dest;
255
unsigned int blocksize;
256
int rc;
257
258
token->type = 0x00;
259
token->version = 0x02;
260
switch (digestsize) {
261
case SHA224_DIGEST_SIZE:
262
case SHA256_DIGEST_SIZE:
263
token->keytype = PKEY_KEYTYPE_HMAC_512;
264
blocksize = 64;
265
break;
266
case SHA384_DIGEST_SIZE:
267
case SHA512_DIGEST_SIZE:
268
token->keytype = PKEY_KEYTYPE_HMAC_1024;
269
blocksize = 128;
270
break;
271
default:
272
return -EINVAL;
273
}
274
token->len = blocksize;
275
276
if (clrkeylen > blocksize) {
277
rc = hash_key(clrkey, clrkeylen, token->key, digestsize);
278
if (rc)
279
return rc;
280
} else {
281
memcpy(token->key, clrkey, clrkeylen);
282
}
283
284
return 0;
285
}
286
287
/*
288
* phmac_tfm_ctx_setkey() - Set key value into tfm context, maybe construct
289
* a clear key token digestible by pkey from a clear key value.
290
*/
291
static inline int phmac_tfm_ctx_setkey(struct phmac_tfm_ctx *tfm_ctx,
292
const u8 *key, unsigned int keylen)
293
{
294
if (keylen > sizeof(tfm_ctx->keybuf))
295
return -EINVAL;
296
297
memcpy(tfm_ctx->keybuf, key, keylen);
298
tfm_ctx->keylen = keylen;
299
300
return 0;
301
}
302
303
/*
304
* Convert the raw key material into a protected key via PKEY api.
305
* This function may sleep - don't call in non-sleeping context.
306
*/
307
static inline int convert_key(const u8 *key, unsigned int keylen,
308
struct phmac_protkey *pk)
309
{
310
int rc, i;
311
312
pk->len = sizeof(pk->protkey);
313
314
/*
315
* In case of a busy card retry with increasing delay
316
* of 200, 400, 800 and 1600 ms - in total 3 s.
317
*/
318
for (rc = -EIO, i = 0; rc && i < 5; i++) {
319
if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
320
rc = -EINTR;
321
goto out;
322
}
323
rc = pkey_key2protkey(key, keylen,
324
pk->protkey, &pk->len, &pk->type,
325
PKEY_XFLAG_NOMEMALLOC);
326
}
327
328
out:
329
pr_debug("rc=%d\n", rc);
330
return rc;
331
}
332
333
/*
334
* (Re-)Convert the raw key material from the tfm ctx into a protected
335
* key via convert_key() function. Update the pk_state, pk_type, pk_len
336
* and the protected key in the tfm context.
337
* Please note this function may be invoked concurrently with the very
338
* same tfm context. The pk_lock spinlock in the context ensures an
339
* atomic update of the pk and the pk state but does not guarantee any
340
* order of update. So a fresh converted valid protected key may get
341
* updated with an 'old' expired key value. As the cpacf instructions
342
* detect this, refuse to operate with an invalid key and the calling
343
* code triggers a (re-)conversion this does no harm. This may lead to
344
* unnecessary additional conversion but never to invalid data on the
345
* hash operation.
346
*/
347
static int phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
348
{
349
struct phmac_protkey pk;
350
int rc;
351
352
spin_lock_bh(&tfm_ctx->pk_lock);
353
tfm_ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
354
spin_unlock_bh(&tfm_ctx->pk_lock);
355
356
rc = convert_key(tfm_ctx->keybuf, tfm_ctx->keylen, &pk);
357
358
/* update context */
359
spin_lock_bh(&tfm_ctx->pk_lock);
360
if (rc) {
361
tfm_ctx->pk_state = rc;
362
} else {
363
tfm_ctx->pk_state = PK_STATE_VALID;
364
tfm_ctx->pk = pk;
365
}
366
spin_unlock_bh(&tfm_ctx->pk_lock);
367
368
memzero_explicit(&pk, sizeof(pk));
369
pr_debug("rc=%d\n", rc);
370
return rc;
371
}
372
373
/*
374
* kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize
375
*/
376
static inline void kmac_sha2_set_imbl(u8 *param, u64 buflen_lo,
377
u64 buflen_hi, unsigned int blocksize)
378
{
379
u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize);
380
381
switch (blocksize) {
382
case SHA256_BLOCK_SIZE:
383
*(u64 *)imbl = buflen_lo * BITS_PER_BYTE;
384
break;
385
case SHA512_BLOCK_SIZE:
386
*(u128 *)imbl = (((u128)buflen_hi << 64) + buflen_lo) << 3;
387
break;
388
default:
389
break;
390
}
391
}
392
393
static int phmac_kmac_update(struct ahash_request *req, bool maysleep)
394
{
395
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
397
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
398
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
399
struct hash_walk_helper *hwh = &req_ctx->hwh;
400
unsigned int bs = crypto_ahash_blocksize(tfm);
401
unsigned int offset, k, n;
402
int rc = 0;
403
404
/*
405
* The walk is always mapped when this function is called.
406
* Note that in case of partial processing or failure the walk
407
* is NOT unmapped here. So a follow up task may reuse the walk
408
* or in case of unrecoverable failure needs to unmap it.
409
*/
410
411
while (hwh->walkbytes > 0) {
412
/* check sha2 context buffer */
413
offset = ctx->buflen[0] % bs;
414
if (offset + hwh->walkbytes < bs)
415
goto store;
416
417
if (offset) {
418
/* fill ctx buffer up to blocksize and process this block */
419
n = bs - offset;
420
memcpy(ctx->buf + offset, hwh->walkaddr, n);
421
ctx->gr0.iimp = 1;
422
for (;;) {
423
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs);
424
if (likely(k == bs))
425
break;
426
if (unlikely(k > 0)) {
427
/*
428
* Can't deal with hunks smaller than blocksize.
429
* And kmac should always return the nr of
430
* processed bytes as 0 or a multiple of the
431
* blocksize.
432
*/
433
rc = -EIO;
434
goto out;
435
}
436
/* protected key is invalid and needs re-conversion */
437
if (!maysleep) {
438
rc = -EKEYEXPIRED;
439
goto out;
440
}
441
rc = phmac_convert_key(tfm_ctx);
442
if (rc)
443
goto out;
444
spin_lock_bh(&tfm_ctx->pk_lock);
445
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
446
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
447
spin_unlock_bh(&tfm_ctx->pk_lock);
448
}
449
ctx->buflen[0] += n;
450
if (ctx->buflen[0] < n)
451
ctx->buflen[1]++;
452
rc = hwh_advance(hwh, n);
453
if (unlikely(rc))
454
goto out;
455
offset = 0;
456
}
457
458
/* process as many blocks as possible from the walk */
459
while (hwh->walkbytes >= bs) {
460
n = (hwh->walkbytes / bs) * bs;
461
ctx->gr0.iimp = 1;
462
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n);
463
if (likely(k > 0)) {
464
ctx->buflen[0] += k;
465
if (ctx->buflen[0] < k)
466
ctx->buflen[1]++;
467
rc = hwh_advance(hwh, k);
468
if (unlikely(rc))
469
goto out;
470
}
471
if (unlikely(k < n)) {
472
/* protected key is invalid and needs re-conversion */
473
if (!maysleep) {
474
rc = -EKEYEXPIRED;
475
goto out;
476
}
477
rc = phmac_convert_key(tfm_ctx);
478
if (rc)
479
goto out;
480
spin_lock_bh(&tfm_ctx->pk_lock);
481
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
482
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
483
spin_unlock_bh(&tfm_ctx->pk_lock);
484
}
485
}
486
487
store:
488
/* store incomplete block in context buffer */
489
if (hwh->walkbytes) {
490
memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
491
ctx->buflen[0] += hwh->walkbytes;
492
if (ctx->buflen[0] < hwh->walkbytes)
493
ctx->buflen[1]++;
494
rc = hwh_advance(hwh, hwh->walkbytes);
495
if (unlikely(rc))
496
goto out;
497
}
498
499
} /* end of while (hwh->walkbytes > 0) */
500
501
out:
502
pr_debug("rc=%d\n", rc);
503
return rc;
504
}
505
506
static int phmac_kmac_final(struct ahash_request *req, bool maysleep)
507
{
508
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
509
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
510
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
511
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
512
unsigned int ds = crypto_ahash_digestsize(tfm);
513
unsigned int bs = crypto_ahash_blocksize(tfm);
514
unsigned int k, n;
515
int rc = 0;
516
517
n = ctx->buflen[0] % bs;
518
ctx->gr0.iimp = 0;
519
kmac_sha2_set_imbl(ctx->param, ctx->buflen[0], ctx->buflen[1], bs);
520
for (;;) {
521
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, n);
522
if (likely(k == n))
523
break;
524
if (unlikely(k > 0)) {
525
/* Can't deal with hunks smaller than blocksize. */
526
rc = -EIO;
527
goto out;
528
}
529
/* protected key is invalid and needs re-conversion */
530
if (!maysleep) {
531
rc = -EKEYEXPIRED;
532
goto out;
533
}
534
rc = phmac_convert_key(tfm_ctx);
535
if (rc)
536
goto out;
537
spin_lock_bh(&tfm_ctx->pk_lock);
538
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
539
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
540
spin_unlock_bh(&tfm_ctx->pk_lock);
541
}
542
543
memcpy(req->result, ctx->param, ds);
544
545
out:
546
pr_debug("rc=%d\n", rc);
547
return rc;
548
}
549
550
static int phmac_init(struct ahash_request *req)
551
{
552
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
553
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
554
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
555
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
556
unsigned int bs = crypto_ahash_blocksize(tfm);
557
int rc = 0;
558
559
/* zero request context (includes the kmac sha2 context) */
560
memset(req_ctx, 0, sizeof(*req_ctx));
561
562
/*
563
* setkey() should have set a valid fc into the tfm context.
564
* Copy this function code into the gr0 field of the kmac context.
565
*/
566
if (!tfm_ctx->fc) {
567
rc = -ENOKEY;
568
goto out;
569
}
570
kmac_ctx->gr0.fc = tfm_ctx->fc;
571
572
/*
573
* Copy the pk from tfm ctx into kmac ctx. The protected key
574
* may be outdated but update() and final() will handle this.
575
*/
576
spin_lock_bh(&tfm_ctx->pk_lock);
577
memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
578
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
579
spin_unlock_bh(&tfm_ctx->pk_lock);
580
581
out:
582
pr_debug("rc=%d\n", rc);
583
return rc;
584
}
585
586
static int phmac_update(struct ahash_request *req)
587
{
588
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
589
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
590
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
591
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
592
struct hash_walk_helper *hwh = &req_ctx->hwh;
593
int rc;
594
595
/* prep the walk in the request context */
596
rc = hwh_prepare(req, hwh);
597
if (rc)
598
goto out;
599
600
/* Try synchronous operation if no active engine usage */
601
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
602
rc = phmac_kmac_update(req, false);
603
if (rc == 0)
604
goto out;
605
}
606
607
/*
608
* If sync operation failed or key expired or there are already
609
* requests enqueued via engine, fallback to async. Mark tfm as
610
* using engine to serialize requests.
611
*/
612
if (rc == 0 || rc == -EKEYEXPIRED) {
613
atomic_inc(&tfm_ctx->via_engine_ctr);
614
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
615
if (rc != -EINPROGRESS)
616
atomic_dec(&tfm_ctx->via_engine_ctr);
617
}
618
619
if (rc != -EINPROGRESS) {
620
hwh_advance(hwh, rc);
621
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
622
}
623
624
out:
625
pr_debug("rc=%d\n", rc);
626
return rc;
627
}
628
629
static int phmac_final(struct ahash_request *req)
630
{
631
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
632
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
633
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
634
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
635
int rc = 0;
636
637
/* Try synchronous operation if no active engine usage */
638
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
639
rc = phmac_kmac_final(req, false);
640
if (rc == 0)
641
goto out;
642
}
643
644
/*
645
* If sync operation failed or key expired or there are already
646
* requests enqueued via engine, fallback to async. Mark tfm as
647
* using engine to serialize requests.
648
*/
649
if (rc == 0 || rc == -EKEYEXPIRED) {
650
req->nbytes = 0;
651
req_ctx->final = true;
652
atomic_inc(&tfm_ctx->via_engine_ctr);
653
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
654
if (rc != -EINPROGRESS)
655
atomic_dec(&tfm_ctx->via_engine_ctr);
656
}
657
658
out:
659
if (rc != -EINPROGRESS)
660
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
661
pr_debug("rc=%d\n", rc);
662
return rc;
663
}
664
665
static int phmac_finup(struct ahash_request *req)
666
{
667
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
668
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
669
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
670
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
671
struct hash_walk_helper *hwh = &req_ctx->hwh;
672
int rc;
673
674
/* prep the walk in the request context */
675
rc = hwh_prepare(req, hwh);
676
if (rc)
677
goto out;
678
679
/* Try synchronous operations if no active engine usage */
680
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
681
rc = phmac_kmac_update(req, false);
682
if (rc == 0)
683
req->nbytes = 0;
684
}
685
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
686
rc = phmac_kmac_final(req, false);
687
if (rc == 0)
688
goto out;
689
}
690
691
/*
692
* If sync operation failed or key expired or there are already
693
* requests enqueued via engine, fallback to async. Mark tfm as
694
* using engine to serialize requests.
695
*/
696
if (rc == 0 || rc == -EKEYEXPIRED) {
697
req_ctx->final = true;
698
atomic_inc(&tfm_ctx->via_engine_ctr);
699
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
700
if (rc != -EINPROGRESS)
701
atomic_dec(&tfm_ctx->via_engine_ctr);
702
}
703
704
if (rc != -EINPROGRESS)
705
hwh_advance(hwh, rc);
706
707
out:
708
if (rc != -EINPROGRESS)
709
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
710
pr_debug("rc=%d\n", rc);
711
return rc;
712
}
713
714
static int phmac_digest(struct ahash_request *req)
715
{
716
int rc;
717
718
rc = phmac_init(req);
719
if (rc)
720
goto out;
721
722
rc = phmac_finup(req);
723
724
out:
725
pr_debug("rc=%d\n", rc);
726
return rc;
727
}
728
729
static int phmac_setkey(struct crypto_ahash *tfm,
730
const u8 *key, unsigned int keylen)
731
{
732
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
733
unsigned int ds = crypto_ahash_digestsize(tfm);
734
unsigned int bs = crypto_ahash_blocksize(tfm);
735
unsigned int tmpkeylen;
736
u8 *tmpkey = NULL;
737
int rc = 0;
738
739
if (!crypto_ahash_tested(tfm)) {
740
/*
741
* selftest running: key is a raw hmac clear key and needs
742
* to get embedded into a 'clear key token' in order to have
743
* it correctly processed by the pkey module.
744
*/
745
tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
746
tmpkey = kzalloc(tmpkeylen, GFP_KERNEL);
747
if (!tmpkey) {
748
rc = -ENOMEM;
749
goto out;
750
}
751
rc = make_clrkey_token(key, keylen, ds, tmpkey);
752
if (rc)
753
goto out;
754
keylen = tmpkeylen;
755
key = tmpkey;
756
}
757
758
/* copy raw key into tfm context */
759
rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen);
760
if (rc)
761
goto out;
762
763
/* convert raw key into protected key */
764
rc = phmac_convert_key(tfm_ctx);
765
if (rc)
766
goto out;
767
768
/* set function code in tfm context, check for valid pk type */
769
switch (ds) {
770
case SHA224_DIGEST_SIZE:
771
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
772
rc = -EINVAL;
773
else
774
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224;
775
break;
776
case SHA256_DIGEST_SIZE:
777
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
778
rc = -EINVAL;
779
else
780
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256;
781
break;
782
case SHA384_DIGEST_SIZE:
783
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
784
rc = -EINVAL;
785
else
786
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384;
787
break;
788
case SHA512_DIGEST_SIZE:
789
if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
790
rc = -EINVAL;
791
else
792
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512;
793
break;
794
default:
795
tfm_ctx->fc = 0;
796
rc = -EINVAL;
797
}
798
799
out:
800
kfree(tmpkey);
801
pr_debug("rc=%d\n", rc);
802
return rc;
803
}
804
805
static int phmac_export(struct ahash_request *req, void *out)
806
{
807
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
808
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
809
810
memcpy(out, ctx, sizeof(*ctx));
811
812
return 0;
813
}
814
815
static int phmac_import(struct ahash_request *req, const void *in)
816
{
817
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
818
struct kmac_sha2_ctx *ctx = &req_ctx->kmac_ctx;
819
820
memset(req_ctx, 0, sizeof(*req_ctx));
821
memcpy(ctx, in, sizeof(*ctx));
822
823
return 0;
824
}
825
826
static int phmac_init_tfm(struct crypto_ahash *tfm)
827
{
828
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
829
830
memset(tfm_ctx, 0, sizeof(*tfm_ctx));
831
spin_lock_init(&tfm_ctx->pk_lock);
832
833
crypto_ahash_set_reqsize(tfm, sizeof(struct phmac_req_ctx));
834
835
return 0;
836
}
837
838
static void phmac_exit_tfm(struct crypto_ahash *tfm)
839
{
840
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
841
842
memzero_explicit(tfm_ctx->keybuf, sizeof(tfm_ctx->keybuf));
843
memzero_explicit(&tfm_ctx->pk, sizeof(tfm_ctx->pk));
844
}
845
846
static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
847
{
848
struct ahash_request *req = ahash_request_cast(areq);
849
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
850
struct phmac_tfm_ctx *tfm_ctx = crypto_ahash_ctx(tfm);
851
struct phmac_req_ctx *req_ctx = ahash_request_ctx(req);
852
struct kmac_sha2_ctx *kmac_ctx = &req_ctx->kmac_ctx;
853
struct hash_walk_helper *hwh = &req_ctx->hwh;
854
int rc = -EINVAL;
855
856
/*
857
* Three kinds of requests come in here:
858
* update when req->nbytes > 0 and req_ctx->final is false
859
* final when req->nbytes = 0 and req_ctx->final is true
860
* finup when req->nbytes > 0 and req_ctx->final is true
861
* For update and finup the hwh walk needs to be prepared and
862
* up to date but the actual nr of bytes in req->nbytes may be
863
* any non zero number. For final there is no hwh walk needed.
864
*/
865
866
if (req->nbytes) {
867
rc = phmac_kmac_update(req, true);
868
if (rc == -EKEYEXPIRED) {
869
/*
870
* Protected key expired, conversion is in process.
871
* Trigger a re-schedule of this request by returning
872
* -ENOSPC ("hardware queue full") to the crypto engine.
873
* To avoid immediately re-invocation of this callback,
874
* tell scheduler to voluntarily give up the CPU here.
875
*/
876
pr_debug("rescheduling request\n");
877
cond_resched();
878
return -ENOSPC;
879
} else if (rc) {
880
hwh_advance(hwh, rc);
881
goto out;
882
}
883
req->nbytes = 0;
884
}
885
886
if (req_ctx->final) {
887
rc = phmac_kmac_final(req, true);
888
if (rc == -EKEYEXPIRED) {
889
/*
890
* Protected key expired, conversion is in process.
891
* Trigger a re-schedule of this request by returning
892
* -ENOSPC ("hardware queue full") to the crypto engine.
893
* To avoid immediately re-invocation of this callback,
894
* tell scheduler to voluntarily give up the CPU here.
895
*/
896
pr_debug("rescheduling request\n");
897
cond_resched();
898
return -ENOSPC;
899
}
900
}
901
902
out:
903
if (rc || req_ctx->final)
904
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
905
pr_debug("request complete with rc=%d\n", rc);
906
local_bh_disable();
907
atomic_dec(&tfm_ctx->via_engine_ctr);
908
crypto_finalize_hash_request(engine, req, rc);
909
local_bh_enable();
910
return rc;
911
}
912
913
#define S390_ASYNC_PHMAC_ALG(x) \
914
{ \
915
.base = { \
916
.init = phmac_init, \
917
.update = phmac_update, \
918
.final = phmac_final, \
919
.finup = phmac_finup, \
920
.digest = phmac_digest, \
921
.setkey = phmac_setkey, \
922
.import = phmac_import, \
923
.export = phmac_export, \
924
.init_tfm = phmac_init_tfm, \
925
.exit_tfm = phmac_exit_tfm, \
926
.halg = { \
927
.digestsize = SHA##x##_DIGEST_SIZE, \
928
.statesize = sizeof(struct kmac_sha2_ctx), \
929
.base = { \
930
.cra_name = "phmac(sha" #x ")", \
931
.cra_driver_name = "phmac_s390_sha" #x, \
932
.cra_blocksize = SHA##x##_BLOCK_SIZE, \
933
.cra_priority = 400, \
934
.cra_flags = CRYPTO_ALG_ASYNC | \
935
CRYPTO_ALG_NO_FALLBACK, \
936
.cra_ctxsize = sizeof(struct phmac_tfm_ctx), \
937
.cra_module = THIS_MODULE, \
938
}, \
939
}, \
940
}, \
941
.op = { \
942
.do_one_request = phmac_do_one_request, \
943
}, \
944
}
945
946
static struct phmac_alg {
947
unsigned int fc;
948
struct ahash_engine_alg alg;
949
bool registered;
950
} phmac_algs[] = {
951
{
952
.fc = CPACF_KMAC_PHMAC_SHA_224,
953
.alg = S390_ASYNC_PHMAC_ALG(224),
954
}, {
955
.fc = CPACF_KMAC_PHMAC_SHA_256,
956
.alg = S390_ASYNC_PHMAC_ALG(256),
957
}, {
958
.fc = CPACF_KMAC_PHMAC_SHA_384,
959
.alg = S390_ASYNC_PHMAC_ALG(384),
960
}, {
961
.fc = CPACF_KMAC_PHMAC_SHA_512,
962
.alg = S390_ASYNC_PHMAC_ALG(512),
963
}
964
};
965
966
static struct miscdevice phmac_dev = {
967
.name = "phmac",
968
.minor = MISC_DYNAMIC_MINOR,
969
};
970
971
static void s390_phmac_exit(void)
972
{
973
struct phmac_alg *phmac;
974
int i;
975
976
if (phmac_crypto_engine) {
977
crypto_engine_stop(phmac_crypto_engine);
978
crypto_engine_exit(phmac_crypto_engine);
979
}
980
981
for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
982
phmac = &phmac_algs[i];
983
if (phmac->registered)
984
crypto_engine_unregister_ahash(&phmac->alg);
985
}
986
987
misc_deregister(&phmac_dev);
988
}
989
990
static int __init s390_phmac_init(void)
991
{
992
struct phmac_alg *phmac;
993
int i, rc;
994
995
/* for selftest cpacf klmd subfunction is needed */
996
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256))
997
return -ENODEV;
998
if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512))
999
return -ENODEV;
1000
1001
/* register a simple phmac pseudo misc device */
1002
rc = misc_register(&phmac_dev);
1003
if (rc)
1004
return rc;
1005
1006
/* with this pseudo device alloc and start a crypto engine */
1007
phmac_crypto_engine =
1008
crypto_engine_alloc_init_and_set(phmac_dev.this_device,
1009
true, false, MAX_QLEN);
1010
if (!phmac_crypto_engine) {
1011
rc = -ENOMEM;
1012
goto out_err;
1013
}
1014
rc = crypto_engine_start(phmac_crypto_engine);
1015
if (rc) {
1016
crypto_engine_exit(phmac_crypto_engine);
1017
phmac_crypto_engine = NULL;
1018
goto out_err;
1019
}
1020
1021
for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
1022
phmac = &phmac_algs[i];
1023
if (!cpacf_query_func(CPACF_KMAC, phmac->fc))
1024
continue;
1025
rc = crypto_engine_register_ahash(&phmac->alg);
1026
if (rc)
1027
goto out_err;
1028
phmac->registered = true;
1029
pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
1030
}
1031
1032
return 0;
1033
1034
out_err:
1035
s390_phmac_exit();
1036
return rc;
1037
}
1038
1039
module_init(s390_phmac_init);
1040
module_exit(s390_phmac_exit);
1041
1042
MODULE_ALIAS_CRYPTO("phmac(sha224)");
1043
MODULE_ALIAS_CRYPTO("phmac(sha256)");
1044
MODULE_ALIAS_CRYPTO("phmac(sha384)");
1045
MODULE_ALIAS_CRYPTO("phmac(sha512)");
1046
1047
MODULE_DESCRIPTION("S390 HMAC driver for protected keys");
1048
MODULE_LICENSE("GPL");
1049
1050