Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/crypto/paes_s390.c
26451 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Cryptographic API.
4
*
5
* s390 implementation of the AES Cipher Algorithm with protected keys.
6
*
7
* s390 Version:
8
* Copyright IBM Corp. 2017, 2025
9
* Author(s): Martin Schwidefsky <[email protected]>
10
* Harald Freudenberger <[email protected]>
11
*/
12
13
#define KMSG_COMPONENT "paes_s390"
14
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16
#include <linux/atomic.h>
17
#include <linux/cpufeature.h>
18
#include <linux/delay.h>
19
#include <linux/err.h>
20
#include <linux/init.h>
21
#include <linux/miscdevice.h>
22
#include <linux/module.h>
23
#include <linux/mutex.h>
24
#include <linux/spinlock.h>
25
#include <crypto/aes.h>
26
#include <crypto/algapi.h>
27
#include <crypto/engine.h>
28
#include <crypto/internal/skcipher.h>
29
#include <crypto/xts.h>
30
#include <asm/cpacf.h>
31
#include <asm/pkey.h>
32
33
/*
34
* Key blobs smaller/bigger than these defines are rejected
35
* by the common code even before the individual setkey function
36
* is called. As paes can handle different kinds of key blobs
37
* and padding is also possible, the limits need to be generous.
38
*/
39
#define PAES_MIN_KEYSIZE 16
40
#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
41
#define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */
42
#define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
43
44
static u8 *ctrblk;
45
static DEFINE_MUTEX(ctrblk_lock);
46
47
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
48
49
static struct crypto_engine *paes_crypto_engine;
50
#define MAX_QLEN 10
51
52
/*
53
* protected key specific stuff
54
*/
55
56
struct paes_protkey {
57
u32 type;
58
u32 len;
59
u8 protkey[PXTS_256_PROTKEY_SIZE];
60
};
61
62
#define PK_STATE_NO_KEY 0
63
#define PK_STATE_CONVERT_IN_PROGRESS 1
64
#define PK_STATE_VALID 2
65
66
struct s390_paes_ctx {
67
/* source key material used to derive a protected key from */
68
u8 keybuf[PAES_MAX_KEYSIZE];
69
unsigned int keylen;
70
71
/* cpacf function code to use with this protected key type */
72
long fc;
73
74
/* nr of requests enqueued via crypto engine which use this tfm ctx */
75
atomic_t via_engine_ctr;
76
77
/* spinlock to atomic read/update all the following fields */
78
spinlock_t pk_lock;
79
80
/* see PK_STATE* defines above, < 0 holds convert failure rc */
81
int pk_state;
82
/* if state is valid, pk holds the protected key */
83
struct paes_protkey pk;
84
};
85
86
struct s390_pxts_ctx {
87
/* source key material used to derive a protected key from */
88
u8 keybuf[2 * PAES_MAX_KEYSIZE];
89
unsigned int keylen;
90
91
/* cpacf function code to use with this protected key type */
92
long fc;
93
94
/* nr of requests enqueued via crypto engine which use this tfm ctx */
95
atomic_t via_engine_ctr;
96
97
/* spinlock to atomic read/update all the following fields */
98
spinlock_t pk_lock;
99
100
/* see PK_STATE* defines above, < 0 holds convert failure rc */
101
int pk_state;
102
/* if state is valid, pk[] hold(s) the protected key(s) */
103
struct paes_protkey pk[2];
104
};
105
106
/*
107
* make_clrkey_token() - wrap the raw key ck with pkey clearkey token
108
* information.
109
* @returns the size of the clearkey token
110
*/
111
static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
112
{
113
struct clrkey_token {
114
u8 type;
115
u8 res0[3];
116
u8 version;
117
u8 res1[3];
118
u32 keytype;
119
u32 len;
120
u8 key[];
121
} __packed *token = (struct clrkey_token *)dest;
122
123
token->type = 0x00;
124
token->version = 0x02;
125
token->keytype = (cklen - 8) >> 3;
126
token->len = cklen;
127
memcpy(token->key, ck, cklen);
128
129
return sizeof(*token) + cklen;
130
}
131
132
/*
133
* paes_ctx_setkey() - Set key value into context, maybe construct
134
* a clear key token digestible by pkey from a clear key value.
135
*/
136
static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx,
137
const u8 *key, unsigned int keylen)
138
{
139
if (keylen > sizeof(ctx->keybuf))
140
return -EINVAL;
141
142
switch (keylen) {
143
case 16:
144
case 24:
145
case 32:
146
/* clear key value, prepare pkey clear key token in keybuf */
147
memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
148
ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf);
149
break;
150
default:
151
/* other key material, let pkey handle this */
152
memcpy(ctx->keybuf, key, keylen);
153
ctx->keylen = keylen;
154
break;
155
}
156
157
return 0;
158
}
159
160
/*
161
* pxts_ctx_setkey() - Set key value into context, maybe construct
162
* a clear key token digestible by pkey from a clear key value.
163
*/
164
static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx,
165
const u8 *key, unsigned int keylen)
166
{
167
size_t cklen = keylen / 2;
168
169
if (keylen > sizeof(ctx->keybuf))
170
return -EINVAL;
171
172
switch (keylen) {
173
case 32:
174
case 64:
175
/* clear key value, prepare pkey clear key tokens in keybuf */
176
memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
177
ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf);
178
ctx->keylen += make_clrkey_token(key + cklen, cklen,
179
ctx->keybuf + ctx->keylen);
180
break;
181
default:
182
/* other key material, let pkey handle this */
183
memcpy(ctx->keybuf, key, keylen);
184
ctx->keylen = keylen;
185
break;
186
}
187
188
return 0;
189
}
190
191
/*
192
* Convert the raw key material into a protected key via PKEY api.
193
* This function may sleep - don't call in non-sleeping context.
194
*/
195
static inline int convert_key(const u8 *key, unsigned int keylen,
196
struct paes_protkey *pk)
197
{
198
int rc, i;
199
200
pk->len = sizeof(pk->protkey);
201
202
/*
203
* In case of a busy card retry with increasing delay
204
* of 200, 400, 800 and 1600 ms - in total 3 s.
205
*/
206
for (rc = -EIO, i = 0; rc && i < 5; i++) {
207
if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
208
rc = -EINTR;
209
goto out;
210
}
211
rc = pkey_key2protkey(key, keylen,
212
pk->protkey, &pk->len, &pk->type,
213
PKEY_XFLAG_NOMEMALLOC);
214
}
215
216
out:
217
pr_debug("rc=%d\n", rc);
218
return rc;
219
}
220
221
/*
222
* (Re-)Convert the raw key material from the ctx into a protected key
223
* via convert_key() function. Update the pk_state, pk_type, pk_len
224
* and the protected key in the tfm context.
225
* Please note this function may be invoked concurrently with the very
226
* same tfm context. The pk_lock spinlock in the context ensures an
227
* atomic update of the pk and the pk state but does not guarantee any
228
* order of update. So a fresh converted valid protected key may get
229
* updated with an 'old' expired key value. As the cpacf instructions
230
* detect this, refuse to operate with an invalid key and the calling
231
* code triggers a (re-)conversion this does no harm. This may lead to
232
* unnecessary additional conversion but never to invalid data on en-
233
* or decrypt operations.
234
*/
235
static int paes_convert_key(struct s390_paes_ctx *ctx)
236
{
237
struct paes_protkey pk;
238
int rc;
239
240
spin_lock_bh(&ctx->pk_lock);
241
ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
242
spin_unlock_bh(&ctx->pk_lock);
243
244
rc = convert_key(ctx->keybuf, ctx->keylen, &pk);
245
246
/* update context */
247
spin_lock_bh(&ctx->pk_lock);
248
if (rc) {
249
ctx->pk_state = rc;
250
} else {
251
ctx->pk_state = PK_STATE_VALID;
252
ctx->pk = pk;
253
}
254
spin_unlock_bh(&ctx->pk_lock);
255
256
memzero_explicit(&pk, sizeof(pk));
257
pr_debug("rc=%d\n", rc);
258
return rc;
259
}
260
261
/*
262
* (Re-)Convert the raw xts key material from the ctx into a
263
* protected key via convert_key() function. Update the pk_state,
264
* pk_type, pk_len and the protected key in the tfm context.
265
* See also comments on function paes_convert_key.
266
*/
267
static int pxts_convert_key(struct s390_pxts_ctx *ctx)
268
{
269
struct paes_protkey pk0, pk1;
270
size_t split_keylen;
271
int rc;
272
273
spin_lock_bh(&ctx->pk_lock);
274
ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
275
spin_unlock_bh(&ctx->pk_lock);
276
277
rc = convert_key(ctx->keybuf, ctx->keylen, &pk0);
278
if (rc)
279
goto out;
280
281
switch (pk0.type) {
282
case PKEY_KEYTYPE_AES_128:
283
case PKEY_KEYTYPE_AES_256:
284
/* second keytoken required */
285
if (ctx->keylen % 2) {
286
rc = -EINVAL;
287
goto out;
288
}
289
split_keylen = ctx->keylen / 2;
290
rc = convert_key(ctx->keybuf + split_keylen,
291
split_keylen, &pk1);
292
if (rc)
293
goto out;
294
if (pk0.type != pk1.type) {
295
rc = -EINVAL;
296
goto out;
297
}
298
break;
299
case PKEY_KEYTYPE_AES_XTS_128:
300
case PKEY_KEYTYPE_AES_XTS_256:
301
/* single key */
302
pk1.type = 0;
303
break;
304
default:
305
/* unsupported protected keytype */
306
rc = -EINVAL;
307
goto out;
308
}
309
310
out:
311
/* update context */
312
spin_lock_bh(&ctx->pk_lock);
313
if (rc) {
314
ctx->pk_state = rc;
315
} else {
316
ctx->pk_state = PK_STATE_VALID;
317
ctx->pk[0] = pk0;
318
ctx->pk[1] = pk1;
319
}
320
spin_unlock_bh(&ctx->pk_lock);
321
322
memzero_explicit(&pk0, sizeof(pk0));
323
memzero_explicit(&pk1, sizeof(pk1));
324
pr_debug("rc=%d\n", rc);
325
return rc;
326
}
327
328
/*
329
* PAES ECB implementation
330
*/
331
332
struct ecb_param {
333
u8 key[PAES_256_PROTKEY_SIZE];
334
} __packed;
335
336
struct s390_pecb_req_ctx {
337
unsigned long modifier;
338
struct skcipher_walk walk;
339
bool param_init_done;
340
struct ecb_param param;
341
};
342
343
static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
344
unsigned int key_len)
345
{
346
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
347
long fc;
348
int rc;
349
350
/* set raw key into context */
351
rc = paes_ctx_setkey(ctx, in_key, key_len);
352
if (rc)
353
goto out;
354
355
/* convert key into protected key */
356
rc = paes_convert_key(ctx);
357
if (rc)
358
goto out;
359
360
/* Pick the correct function code based on the protected key type */
361
switch (ctx->pk.type) {
362
case PKEY_KEYTYPE_AES_128:
363
fc = CPACF_KM_PAES_128;
364
break;
365
case PKEY_KEYTYPE_AES_192:
366
fc = CPACF_KM_PAES_192;
367
break;
368
case PKEY_KEYTYPE_AES_256:
369
fc = CPACF_KM_PAES_256;
370
break;
371
default:
372
fc = 0;
373
break;
374
}
375
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
376
377
rc = fc ? 0 : -EINVAL;
378
379
out:
380
pr_debug("rc=%d\n", rc);
381
return rc;
382
}
383
384
static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx,
385
struct s390_pecb_req_ctx *req_ctx,
386
bool maysleep)
387
{
388
struct ecb_param *param = &req_ctx->param;
389
struct skcipher_walk *walk = &req_ctx->walk;
390
unsigned int nbytes, n, k;
391
int pk_state, rc = 0;
392
393
if (!req_ctx->param_init_done) {
394
/* fetch and check protected key state */
395
spin_lock_bh(&ctx->pk_lock);
396
pk_state = ctx->pk_state;
397
switch (pk_state) {
398
case PK_STATE_NO_KEY:
399
rc = -ENOKEY;
400
break;
401
case PK_STATE_CONVERT_IN_PROGRESS:
402
rc = -EKEYEXPIRED;
403
break;
404
case PK_STATE_VALID:
405
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
406
req_ctx->param_init_done = true;
407
break;
408
default:
409
rc = pk_state < 0 ? pk_state : -EIO;
410
break;
411
}
412
spin_unlock_bh(&ctx->pk_lock);
413
}
414
if (rc)
415
goto out;
416
417
/*
418
* Note that in case of partial processing or failure the walk
419
* is NOT unmapped here. So a follow up task may reuse the walk
420
* or in case of unrecoverable failure needs to unmap it.
421
*/
422
while ((nbytes = walk->nbytes) != 0) {
423
/* only use complete blocks */
424
n = nbytes & ~(AES_BLOCK_SIZE - 1);
425
k = cpacf_km(ctx->fc | req_ctx->modifier, param,
426
walk->dst.virt.addr, walk->src.virt.addr, n);
427
if (k)
428
rc = skcipher_walk_done(walk, nbytes - k);
429
if (k < n) {
430
if (!maysleep) {
431
rc = -EKEYEXPIRED;
432
goto out;
433
}
434
rc = paes_convert_key(ctx);
435
if (rc)
436
goto out;
437
spin_lock_bh(&ctx->pk_lock);
438
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
439
spin_unlock_bh(&ctx->pk_lock);
440
}
441
}
442
443
out:
444
pr_debug("rc=%d\n", rc);
445
return rc;
446
}
447
448
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
449
{
450
struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
451
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
453
struct skcipher_walk *walk = &req_ctx->walk;
454
int rc;
455
456
/*
457
* Attempt synchronous encryption first. If it fails, schedule the request
458
* asynchronously via the crypto engine. To preserve execution order,
459
* once a request is queued to the engine, further requests using the same
460
* tfm will also be routed through the engine.
461
*/
462
463
rc = skcipher_walk_virt(walk, req, false);
464
if (rc)
465
goto out;
466
467
req_ctx->modifier = modifier;
468
req_ctx->param_init_done = false;
469
470
/* Try synchronous operation if no active engine usage */
471
if (!atomic_read(&ctx->via_engine_ctr)) {
472
rc = ecb_paes_do_crypt(ctx, req_ctx, false);
473
if (rc == 0)
474
goto out;
475
}
476
477
/*
478
* If sync operation failed or key expired or there are already
479
* requests enqueued via engine, fallback to async. Mark tfm as
480
* using engine to serialize requests.
481
*/
482
if (rc == 0 || rc == -EKEYEXPIRED) {
483
atomic_inc(&ctx->via_engine_ctr);
484
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
485
if (rc != -EINPROGRESS)
486
atomic_dec(&ctx->via_engine_ctr);
487
}
488
489
if (rc != -EINPROGRESS)
490
skcipher_walk_done(walk, rc);
491
492
out:
493
if (rc != -EINPROGRESS)
494
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
495
pr_debug("rc=%d\n", rc);
496
return rc;
497
}
498
499
static int ecb_paes_encrypt(struct skcipher_request *req)
500
{
501
return ecb_paes_crypt(req, 0);
502
}
503
504
static int ecb_paes_decrypt(struct skcipher_request *req)
505
{
506
return ecb_paes_crypt(req, CPACF_DECRYPT);
507
}
508
509
static int ecb_paes_init(struct crypto_skcipher *tfm)
510
{
511
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
512
513
memset(ctx, 0, sizeof(*ctx));
514
spin_lock_init(&ctx->pk_lock);
515
516
crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx));
517
518
return 0;
519
}
520
521
static void ecb_paes_exit(struct crypto_skcipher *tfm)
522
{
523
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
524
525
memzero_explicit(ctx, sizeof(*ctx));
526
}
527
528
static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq)
529
{
530
struct skcipher_request *req = skcipher_request_cast(areq);
531
struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
532
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
533
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
534
struct skcipher_walk *walk = &req_ctx->walk;
535
int rc;
536
537
/* walk has already been prepared */
538
539
rc = ecb_paes_do_crypt(ctx, req_ctx, true);
540
if (rc == -EKEYEXPIRED) {
541
/*
542
* Protected key expired, conversion is in process.
543
* Trigger a re-schedule of this request by returning
544
* -ENOSPC ("hardware queue is full") to the crypto engine.
545
* To avoid immediately re-invocation of this callback,
546
* tell the scheduler to voluntarily give up the CPU here.
547
*/
548
cond_resched();
549
pr_debug("rescheduling request\n");
550
return -ENOSPC;
551
} else if (rc) {
552
skcipher_walk_done(walk, rc);
553
}
554
555
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
556
pr_debug("request complete with rc=%d\n", rc);
557
local_bh_disable();
558
atomic_dec(&ctx->via_engine_ctr);
559
crypto_finalize_skcipher_request(engine, req, rc);
560
local_bh_enable();
561
return rc;
562
}
563
564
static struct skcipher_engine_alg ecb_paes_alg = {
565
.base = {
566
.base.cra_name = "ecb(paes)",
567
.base.cra_driver_name = "ecb-paes-s390",
568
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
569
.base.cra_blocksize = AES_BLOCK_SIZE,
570
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
571
.base.cra_module = THIS_MODULE,
572
.base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list),
573
.init = ecb_paes_init,
574
.exit = ecb_paes_exit,
575
.min_keysize = PAES_MIN_KEYSIZE,
576
.max_keysize = PAES_MAX_KEYSIZE,
577
.setkey = ecb_paes_setkey,
578
.encrypt = ecb_paes_encrypt,
579
.decrypt = ecb_paes_decrypt,
580
},
581
.op = {
582
.do_one_request = ecb_paes_do_one_request,
583
},
584
};
585
586
/*
587
* PAES CBC implementation
588
*/
589
590
struct cbc_param {
591
u8 iv[AES_BLOCK_SIZE];
592
u8 key[PAES_256_PROTKEY_SIZE];
593
} __packed;
594
595
struct s390_pcbc_req_ctx {
596
unsigned long modifier;
597
struct skcipher_walk walk;
598
bool param_init_done;
599
struct cbc_param param;
600
};
601
602
static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
603
unsigned int key_len)
604
{
605
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
606
long fc;
607
int rc;
608
609
/* set raw key into context */
610
rc = paes_ctx_setkey(ctx, in_key, key_len);
611
if (rc)
612
goto out;
613
614
/* convert raw key into protected key */
615
rc = paes_convert_key(ctx);
616
if (rc)
617
goto out;
618
619
/* Pick the correct function code based on the protected key type */
620
switch (ctx->pk.type) {
621
case PKEY_KEYTYPE_AES_128:
622
fc = CPACF_KMC_PAES_128;
623
break;
624
case PKEY_KEYTYPE_AES_192:
625
fc = CPACF_KMC_PAES_192;
626
break;
627
case PKEY_KEYTYPE_AES_256:
628
fc = CPACF_KMC_PAES_256;
629
break;
630
default:
631
fc = 0;
632
break;
633
}
634
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
635
636
rc = fc ? 0 : -EINVAL;
637
638
out:
639
pr_debug("rc=%d\n", rc);
640
return rc;
641
}
642
643
static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx,
644
struct s390_pcbc_req_ctx *req_ctx,
645
bool maysleep)
646
{
647
struct cbc_param *param = &req_ctx->param;
648
struct skcipher_walk *walk = &req_ctx->walk;
649
unsigned int nbytes, n, k;
650
int pk_state, rc = 0;
651
652
if (!req_ctx->param_init_done) {
653
/* fetch and check protected key state */
654
spin_lock_bh(&ctx->pk_lock);
655
pk_state = ctx->pk_state;
656
switch (pk_state) {
657
case PK_STATE_NO_KEY:
658
rc = -ENOKEY;
659
break;
660
case PK_STATE_CONVERT_IN_PROGRESS:
661
rc = -EKEYEXPIRED;
662
break;
663
case PK_STATE_VALID:
664
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
665
req_ctx->param_init_done = true;
666
break;
667
default:
668
rc = pk_state < 0 ? pk_state : -EIO;
669
break;
670
}
671
spin_unlock_bh(&ctx->pk_lock);
672
}
673
if (rc)
674
goto out;
675
676
memcpy(param->iv, walk->iv, AES_BLOCK_SIZE);
677
678
/*
679
* Note that in case of partial processing or failure the walk
680
* is NOT unmapped here. So a follow up task may reuse the walk
681
* or in case of unrecoverable failure needs to unmap it.
682
*/
683
while ((nbytes = walk->nbytes) != 0) {
684
/* only use complete blocks */
685
n = nbytes & ~(AES_BLOCK_SIZE - 1);
686
k = cpacf_kmc(ctx->fc | req_ctx->modifier, param,
687
walk->dst.virt.addr, walk->src.virt.addr, n);
688
if (k) {
689
memcpy(walk->iv, param->iv, AES_BLOCK_SIZE);
690
rc = skcipher_walk_done(walk, nbytes - k);
691
}
692
if (k < n) {
693
if (!maysleep) {
694
rc = -EKEYEXPIRED;
695
goto out;
696
}
697
rc = paes_convert_key(ctx);
698
if (rc)
699
goto out;
700
spin_lock_bh(&ctx->pk_lock);
701
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
702
spin_unlock_bh(&ctx->pk_lock);
703
}
704
}
705
706
out:
707
pr_debug("rc=%d\n", rc);
708
return rc;
709
}
710
711
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
712
{
713
struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
714
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
715
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
716
struct skcipher_walk *walk = &req_ctx->walk;
717
int rc;
718
719
/*
720
* Attempt synchronous encryption first. If it fails, schedule the request
721
* asynchronously via the crypto engine. To preserve execution order,
722
* once a request is queued to the engine, further requests using the same
723
* tfm will also be routed through the engine.
724
*/
725
726
rc = skcipher_walk_virt(walk, req, false);
727
if (rc)
728
goto out;
729
730
req_ctx->modifier = modifier;
731
req_ctx->param_init_done = false;
732
733
/* Try synchronous operation if no active engine usage */
734
if (!atomic_read(&ctx->via_engine_ctr)) {
735
rc = cbc_paes_do_crypt(ctx, req_ctx, false);
736
if (rc == 0)
737
goto out;
738
}
739
740
/*
741
* If sync operation failed or key expired or there are already
742
* requests enqueued via engine, fallback to async. Mark tfm as
743
* using engine to serialize requests.
744
*/
745
if (rc == 0 || rc == -EKEYEXPIRED) {
746
atomic_inc(&ctx->via_engine_ctr);
747
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
748
if (rc != -EINPROGRESS)
749
atomic_dec(&ctx->via_engine_ctr);
750
}
751
752
if (rc != -EINPROGRESS)
753
skcipher_walk_done(walk, rc);
754
755
out:
756
if (rc != -EINPROGRESS)
757
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
758
pr_debug("rc=%d\n", rc);
759
return rc;
760
}
761
762
static int cbc_paes_encrypt(struct skcipher_request *req)
763
{
764
return cbc_paes_crypt(req, 0);
765
}
766
767
static int cbc_paes_decrypt(struct skcipher_request *req)
768
{
769
return cbc_paes_crypt(req, CPACF_DECRYPT);
770
}
771
772
static int cbc_paes_init(struct crypto_skcipher *tfm)
773
{
774
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
775
776
memset(ctx, 0, sizeof(*ctx));
777
spin_lock_init(&ctx->pk_lock);
778
779
crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx));
780
781
return 0;
782
}
783
784
static void cbc_paes_exit(struct crypto_skcipher *tfm)
785
{
786
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
787
788
memzero_explicit(ctx, sizeof(*ctx));
789
}
790
791
static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq)
792
{
793
struct skcipher_request *req = skcipher_request_cast(areq);
794
struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
795
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
796
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
797
struct skcipher_walk *walk = &req_ctx->walk;
798
int rc;
799
800
/* walk has already been prepared */
801
802
rc = cbc_paes_do_crypt(ctx, req_ctx, true);
803
if (rc == -EKEYEXPIRED) {
804
/*
805
* Protected key expired, conversion is in process.
806
* Trigger a re-schedule of this request by returning
807
* -ENOSPC ("hardware queue is full") to the crypto engine.
808
* To avoid immediately re-invocation of this callback,
809
* tell the scheduler to voluntarily give up the CPU here.
810
*/
811
cond_resched();
812
pr_debug("rescheduling request\n");
813
return -ENOSPC;
814
} else if (rc) {
815
skcipher_walk_done(walk, rc);
816
}
817
818
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
819
pr_debug("request complete with rc=%d\n", rc);
820
local_bh_disable();
821
atomic_dec(&ctx->via_engine_ctr);
822
crypto_finalize_skcipher_request(engine, req, rc);
823
local_bh_enable();
824
return rc;
825
}
826
827
static struct skcipher_engine_alg cbc_paes_alg = {
828
.base = {
829
.base.cra_name = "cbc(paes)",
830
.base.cra_driver_name = "cbc-paes-s390",
831
.base.cra_priority = 402, /* cbc-paes-s390 + 1 */
832
.base.cra_blocksize = AES_BLOCK_SIZE,
833
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
834
.base.cra_module = THIS_MODULE,
835
.base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list),
836
.init = cbc_paes_init,
837
.exit = cbc_paes_exit,
838
.min_keysize = PAES_MIN_KEYSIZE,
839
.max_keysize = PAES_MAX_KEYSIZE,
840
.ivsize = AES_BLOCK_SIZE,
841
.setkey = cbc_paes_setkey,
842
.encrypt = cbc_paes_encrypt,
843
.decrypt = cbc_paes_decrypt,
844
},
845
.op = {
846
.do_one_request = cbc_paes_do_one_request,
847
},
848
};
849
850
/*
851
* PAES CTR implementation
852
*/
853
854
struct ctr_param {
855
u8 key[PAES_256_PROTKEY_SIZE];
856
} __packed;
857
858
struct s390_pctr_req_ctx {
859
unsigned long modifier;
860
struct skcipher_walk walk;
861
bool param_init_done;
862
struct ctr_param param;
863
};
864
865
static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
866
unsigned int key_len)
867
{
868
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
869
long fc;
870
int rc;
871
872
/* set raw key into context */
873
rc = paes_ctx_setkey(ctx, in_key, key_len);
874
if (rc)
875
goto out;
876
877
/* convert raw key into protected key */
878
rc = paes_convert_key(ctx);
879
if (rc)
880
goto out;
881
882
/* Pick the correct function code based on the protected key type */
883
switch (ctx->pk.type) {
884
case PKEY_KEYTYPE_AES_128:
885
fc = CPACF_KMCTR_PAES_128;
886
break;
887
case PKEY_KEYTYPE_AES_192:
888
fc = CPACF_KMCTR_PAES_192;
889
break;
890
case PKEY_KEYTYPE_AES_256:
891
fc = CPACF_KMCTR_PAES_256;
892
break;
893
default:
894
fc = 0;
895
break;
896
}
897
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
898
899
rc = fc ? 0 : -EINVAL;
900
901
out:
902
pr_debug("rc=%d\n", rc);
903
return rc;
904
}
905
906
static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
907
{
908
unsigned int i, n;
909
910
/* only use complete blocks, max. PAGE_SIZE */
911
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
912
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
913
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
914
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
915
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
916
ctrptr += AES_BLOCK_SIZE;
917
}
918
return n;
919
}
920
921
static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
922
struct s390_pctr_req_ctx *req_ctx,
923
bool maysleep)
924
{
925
struct ctr_param *param = &req_ctx->param;
926
struct skcipher_walk *walk = &req_ctx->walk;
927
u8 buf[AES_BLOCK_SIZE], *ctrptr;
928
unsigned int nbytes, n, k;
929
int pk_state, locked, rc = 0;
930
931
if (!req_ctx->param_init_done) {
932
/* fetch and check protected key state */
933
spin_lock_bh(&ctx->pk_lock);
934
pk_state = ctx->pk_state;
935
switch (pk_state) {
936
case PK_STATE_NO_KEY:
937
rc = -ENOKEY;
938
break;
939
case PK_STATE_CONVERT_IN_PROGRESS:
940
rc = -EKEYEXPIRED;
941
break;
942
case PK_STATE_VALID:
943
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
944
req_ctx->param_init_done = true;
945
break;
946
default:
947
rc = pk_state < 0 ? pk_state : -EIO;
948
break;
949
}
950
spin_unlock_bh(&ctx->pk_lock);
951
}
952
if (rc)
953
goto out;
954
955
locked = mutex_trylock(&ctrblk_lock);
956
957
/*
958
* Note that in case of partial processing or failure the walk
959
* is NOT unmapped here. So a follow up task may reuse the walk
960
* or in case of unrecoverable failure needs to unmap it.
961
*/
962
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
963
n = AES_BLOCK_SIZE;
964
if (nbytes >= 2 * AES_BLOCK_SIZE && locked)
965
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
966
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
967
k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr,
968
walk->src.virt.addr, n, ctrptr);
969
if (k) {
970
if (ctrptr == ctrblk)
971
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
972
AES_BLOCK_SIZE);
973
crypto_inc(walk->iv, AES_BLOCK_SIZE);
974
rc = skcipher_walk_done(walk, nbytes - k);
975
}
976
if (k < n) {
977
if (!maysleep) {
978
if (locked)
979
mutex_unlock(&ctrblk_lock);
980
rc = -EKEYEXPIRED;
981
goto out;
982
}
983
rc = paes_convert_key(ctx);
984
if (rc) {
985
if (locked)
986
mutex_unlock(&ctrblk_lock);
987
goto out;
988
}
989
spin_lock_bh(&ctx->pk_lock);
990
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
991
spin_unlock_bh(&ctx->pk_lock);
992
}
993
}
994
if (locked)
995
mutex_unlock(&ctrblk_lock);
996
997
/* final block may be < AES_BLOCK_SIZE, copy only nbytes */
998
if (nbytes) {
999
memset(buf, 0, AES_BLOCK_SIZE);
1000
memcpy(buf, walk->src.virt.addr, nbytes);
1001
while (1) {
1002
if (cpacf_kmctr(ctx->fc, param, buf,
1003
buf, AES_BLOCK_SIZE,
1004
walk->iv) == AES_BLOCK_SIZE)
1005
break;
1006
if (!maysleep) {
1007
rc = -EKEYEXPIRED;
1008
goto out;
1009
}
1010
rc = paes_convert_key(ctx);
1011
if (rc)
1012
goto out;
1013
spin_lock_bh(&ctx->pk_lock);
1014
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
1015
spin_unlock_bh(&ctx->pk_lock);
1016
}
1017
memcpy(walk->dst.virt.addr, buf, nbytes);
1018
crypto_inc(walk->iv, AES_BLOCK_SIZE);
1019
rc = skcipher_walk_done(walk, 0);
1020
}
1021
1022
out:
1023
pr_debug("rc=%d\n", rc);
1024
return rc;
1025
}
1026
1027
static int ctr_paes_crypt(struct skcipher_request *req)
1028
{
1029
struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
1030
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1031
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1032
struct skcipher_walk *walk = &req_ctx->walk;
1033
int rc;
1034
1035
/*
1036
* Attempt synchronous encryption first. If it fails, schedule the request
1037
* asynchronously via the crypto engine. To preserve execution order,
1038
* once a request is queued to the engine, further requests using the same
1039
* tfm will also be routed through the engine.
1040
*/
1041
1042
rc = skcipher_walk_virt(walk, req, false);
1043
if (rc)
1044
goto out;
1045
1046
req_ctx->param_init_done = false;
1047
1048
/* Try synchronous operation if no active engine usage */
1049
if (!atomic_read(&ctx->via_engine_ctr)) {
1050
rc = ctr_paes_do_crypt(ctx, req_ctx, false);
1051
if (rc == 0)
1052
goto out;
1053
}
1054
1055
/*
1056
* If sync operation failed or key expired or there are already
1057
* requests enqueued via engine, fallback to async. Mark tfm as
1058
* using engine to serialize requests.
1059
*/
1060
if (rc == 0 || rc == -EKEYEXPIRED) {
1061
atomic_inc(&ctx->via_engine_ctr);
1062
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
1063
if (rc != -EINPROGRESS)
1064
atomic_dec(&ctx->via_engine_ctr);
1065
}
1066
1067
if (rc != -EINPROGRESS)
1068
skcipher_walk_done(walk, rc);
1069
1070
out:
1071
if (rc != -EINPROGRESS)
1072
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1073
pr_debug("rc=%d\n", rc);
1074
return rc;
1075
}
1076
1077
static int ctr_paes_init(struct crypto_skcipher *tfm)
1078
{
1079
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1080
1081
memset(ctx, 0, sizeof(*ctx));
1082
spin_lock_init(&ctx->pk_lock);
1083
1084
crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx));
1085
1086
return 0;
1087
}
1088
1089
static void ctr_paes_exit(struct crypto_skcipher *tfm)
1090
{
1091
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1092
1093
memzero_explicit(ctx, sizeof(*ctx));
1094
}
1095
1096
static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq)
1097
{
1098
struct skcipher_request *req = skcipher_request_cast(areq);
1099
struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
1100
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1101
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1102
struct skcipher_walk *walk = &req_ctx->walk;
1103
int rc;
1104
1105
/* walk has already been prepared */
1106
1107
rc = ctr_paes_do_crypt(ctx, req_ctx, true);
1108
if (rc == -EKEYEXPIRED) {
1109
/*
1110
* Protected key expired, conversion is in process.
1111
* Trigger a re-schedule of this request by returning
1112
* -ENOSPC ("hardware queue is full") to the crypto engine.
1113
* To avoid immediately re-invocation of this callback,
1114
* tell the scheduler to voluntarily give up the CPU here.
1115
*/
1116
cond_resched();
1117
pr_debug("rescheduling request\n");
1118
return -ENOSPC;
1119
} else if (rc) {
1120
skcipher_walk_done(walk, rc);
1121
}
1122
1123
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1124
pr_debug("request complete with rc=%d\n", rc);
1125
local_bh_disable();
1126
atomic_dec(&ctx->via_engine_ctr);
1127
crypto_finalize_skcipher_request(engine, req, rc);
1128
local_bh_enable();
1129
return rc;
1130
}
1131
1132
static struct skcipher_engine_alg ctr_paes_alg = {
1133
.base = {
1134
.base.cra_name = "ctr(paes)",
1135
.base.cra_driver_name = "ctr-paes-s390",
1136
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
1137
.base.cra_blocksize = 1,
1138
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
1139
.base.cra_module = THIS_MODULE,
1140
.base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list),
1141
.init = ctr_paes_init,
1142
.exit = ctr_paes_exit,
1143
.min_keysize = PAES_MIN_KEYSIZE,
1144
.max_keysize = PAES_MAX_KEYSIZE,
1145
.ivsize = AES_BLOCK_SIZE,
1146
.setkey = ctr_paes_setkey,
1147
.encrypt = ctr_paes_crypt,
1148
.decrypt = ctr_paes_crypt,
1149
.chunksize = AES_BLOCK_SIZE,
1150
},
1151
.op = {
1152
.do_one_request = ctr_paes_do_one_request,
1153
},
1154
};
1155
1156
/*
1157
* PAES XTS implementation
1158
*/
1159
1160
struct xts_full_km_param {
1161
u8 key[64];
1162
u8 tweak[16];
1163
u8 nap[16];
1164
u8 wkvp[32];
1165
} __packed;
1166
1167
struct xts_km_param {
1168
u8 key[PAES_256_PROTKEY_SIZE];
1169
u8 init[16];
1170
} __packed;
1171
1172
struct xts_pcc_param {
1173
u8 key[PAES_256_PROTKEY_SIZE];
1174
u8 tweak[16];
1175
u8 block[16];
1176
u8 bit[16];
1177
u8 xts[16];
1178
} __packed;
1179
1180
struct s390_pxts_req_ctx {
1181
unsigned long modifier;
1182
struct skcipher_walk walk;
1183
bool param_init_done;
1184
union {
1185
struct xts_full_km_param full_km_param;
1186
struct xts_km_param km_param;
1187
} param;
1188
};
1189
1190
static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
1191
unsigned int in_keylen)
1192
{
1193
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1194
u8 ckey[2 * AES_MAX_KEY_SIZE];
1195
unsigned int ckey_len;
1196
long fc;
1197
int rc;
1198
1199
if ((in_keylen == 32 || in_keylen == 64) &&
1200
xts_verify_key(tfm, in_key, in_keylen))
1201
return -EINVAL;
1202
1203
/* set raw key into context */
1204
rc = pxts_ctx_setkey(ctx, in_key, in_keylen);
1205
if (rc)
1206
goto out;
1207
1208
/* convert raw key(s) into protected key(s) */
1209
rc = pxts_convert_key(ctx);
1210
if (rc)
1211
goto out;
1212
1213
/*
1214
* xts_verify_key verifies the key length is not odd and makes
1215
* sure that the two keys are not the same. This can be done
1216
* on the two protected keys as well - but not for full xts keys.
1217
*/
1218
if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 ||
1219
ctx->pk[0].type == PKEY_KEYTYPE_AES_256) {
1220
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
1221
AES_KEYSIZE_128 : AES_KEYSIZE_256;
1222
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
1223
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
1224
rc = xts_verify_key(tfm, ckey, 2 * ckey_len);
1225
memzero_explicit(ckey, sizeof(ckey));
1226
if (rc)
1227
goto out;
1228
}
1229
1230
/* Pick the correct function code based on the protected key type */
1231
switch (ctx->pk[0].type) {
1232
case PKEY_KEYTYPE_AES_128:
1233
fc = CPACF_KM_PXTS_128;
1234
break;
1235
case PKEY_KEYTYPE_AES_256:
1236
fc = CPACF_KM_PXTS_256;
1237
break;
1238
case PKEY_KEYTYPE_AES_XTS_128:
1239
fc = CPACF_KM_PXTS_128_FULL;
1240
break;
1241
case PKEY_KEYTYPE_AES_XTS_256:
1242
fc = CPACF_KM_PXTS_256_FULL;
1243
break;
1244
default:
1245
fc = 0;
1246
break;
1247
}
1248
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
1249
1250
rc = fc ? 0 : -EINVAL;
1251
1252
out:
1253
pr_debug("rc=%d\n", rc);
1254
return rc;
1255
}
1256
1257
static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx,
1258
struct s390_pxts_req_ctx *req_ctx,
1259
bool maysleep)
1260
{
1261
struct xts_full_km_param *param = &req_ctx->param.full_km_param;
1262
struct skcipher_walk *walk = &req_ctx->walk;
1263
unsigned int keylen, offset, nbytes, n, k;
1264
int rc = 0;
1265
1266
/*
1267
* The calling function xts_paes_do_crypt() ensures the
1268
* protected key state is always PK_STATE_VALID when this
1269
* function is invoked.
1270
*/
1271
1272
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
1273
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
1274
1275
if (!req_ctx->param_init_done) {
1276
memset(param, 0, sizeof(*param));
1277
spin_lock_bh(&ctx->pk_lock);
1278
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1279
memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
1280
spin_unlock_bh(&ctx->pk_lock);
1281
memcpy(param->tweak, walk->iv, sizeof(param->tweak));
1282
param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */
1283
req_ctx->param_init_done = true;
1284
}
1285
1286
/*
1287
* Note that in case of partial processing or failure the walk
1288
* is NOT unmapped here. So a follow up task may reuse the walk
1289
* or in case of unrecoverable failure needs to unmap it.
1290
*/
1291
while ((nbytes = walk->nbytes) != 0) {
1292
/* only use complete blocks */
1293
n = nbytes & ~(AES_BLOCK_SIZE - 1);
1294
k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
1295
walk->dst.virt.addr, walk->src.virt.addr, n);
1296
if (k)
1297
rc = skcipher_walk_done(walk, nbytes - k);
1298
if (k < n) {
1299
if (!maysleep) {
1300
rc = -EKEYEXPIRED;
1301
goto out;
1302
}
1303
rc = pxts_convert_key(ctx);
1304
if (rc)
1305
goto out;
1306
spin_lock_bh(&ctx->pk_lock);
1307
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1308
memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
1309
spin_unlock_bh(&ctx->pk_lock);
1310
}
1311
}
1312
1313
out:
1314
pr_debug("rc=%d\n", rc);
1315
return rc;
1316
}
1317
1318
static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
1319
struct xts_km_param *param,
1320
struct skcipher_walk *walk,
1321
unsigned int keylen,
1322
unsigned int offset, bool maysleep)
1323
{
1324
struct xts_pcc_param pcc_param;
1325
unsigned long cc = 1;
1326
int rc = 0;
1327
1328
while (cc) {
1329
memset(&pcc_param, 0, sizeof(pcc_param));
1330
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
1331
spin_lock_bh(&ctx->pk_lock);
1332
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
1333
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1334
spin_unlock_bh(&ctx->pk_lock);
1335
cc = cpacf_pcc(ctx->fc, pcc_param.key + offset);
1336
if (cc) {
1337
if (!maysleep) {
1338
rc = -EKEYEXPIRED;
1339
break;
1340
}
1341
rc = pxts_convert_key(ctx);
1342
if (rc)
1343
break;
1344
continue;
1345
}
1346
memcpy(param->init, pcc_param.xts, 16);
1347
}
1348
1349
memzero_explicit(pcc_param.key, sizeof(pcc_param.key));
1350
return rc;
1351
}
1352
1353
static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
1354
struct s390_pxts_req_ctx *req_ctx,
1355
bool maysleep)
1356
{
1357
struct xts_km_param *param = &req_ctx->param.km_param;
1358
struct skcipher_walk *walk = &req_ctx->walk;
1359
unsigned int keylen, offset, nbytes, n, k;
1360
int rc = 0;
1361
1362
/*
1363
* The calling function xts_paes_do_crypt() ensures the
1364
* protected key state is always PK_STATE_VALID when this
1365
* function is invoked.
1366
*/
1367
1368
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
1369
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
1370
1371
if (!req_ctx->param_init_done) {
1372
rc = __xts_2keys_prep_param(ctx, param, walk,
1373
keylen, offset, maysleep);
1374
if (rc)
1375
goto out;
1376
req_ctx->param_init_done = true;
1377
}
1378
1379
/*
1380
* Note that in case of partial processing or failure the walk
1381
* is NOT unmapped here. So a follow up task may reuse the walk
1382
* or in case of unrecoverable failure needs to unmap it.
1383
*/
1384
while ((nbytes = walk->nbytes) != 0) {
1385
/* only use complete blocks */
1386
n = nbytes & ~(AES_BLOCK_SIZE - 1);
1387
k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
1388
walk->dst.virt.addr, walk->src.virt.addr, n);
1389
if (k)
1390
rc = skcipher_walk_done(walk, nbytes - k);
1391
if (k < n) {
1392
if (!maysleep) {
1393
rc = -EKEYEXPIRED;
1394
goto out;
1395
}
1396
rc = pxts_convert_key(ctx);
1397
if (rc)
1398
goto out;
1399
spin_lock_bh(&ctx->pk_lock);
1400
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1401
spin_unlock_bh(&ctx->pk_lock);
1402
}
1403
}
1404
1405
out:
1406
pr_debug("rc=%d\n", rc);
1407
return rc;
1408
}
1409
1410
static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx,
1411
struct s390_pxts_req_ctx *req_ctx,
1412
bool maysleep)
1413
{
1414
int pk_state, rc = 0;
1415
1416
/* fetch and check protected key state */
1417
spin_lock_bh(&ctx->pk_lock);
1418
pk_state = ctx->pk_state;
1419
switch (pk_state) {
1420
case PK_STATE_NO_KEY:
1421
rc = -ENOKEY;
1422
break;
1423
case PK_STATE_CONVERT_IN_PROGRESS:
1424
rc = -EKEYEXPIRED;
1425
break;
1426
case PK_STATE_VALID:
1427
break;
1428
default:
1429
rc = pk_state < 0 ? pk_state : -EIO;
1430
break;
1431
}
1432
spin_unlock_bh(&ctx->pk_lock);
1433
if (rc)
1434
goto out;
1435
1436
/* Call the 'real' crypt function based on the xts prot key type. */
1437
switch (ctx->fc) {
1438
case CPACF_KM_PXTS_128:
1439
case CPACF_KM_PXTS_256:
1440
rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep);
1441
break;
1442
case CPACF_KM_PXTS_128_FULL:
1443
case CPACF_KM_PXTS_256_FULL:
1444
rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep);
1445
break;
1446
default:
1447
rc = -EINVAL;
1448
}
1449
1450
out:
1451
pr_debug("rc=%d\n", rc);
1452
return rc;
1453
}
1454
1455
static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
1456
{
1457
struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
1458
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1459
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1460
struct skcipher_walk *walk = &req_ctx->walk;
1461
int rc;
1462
1463
/*
1464
* Attempt synchronous encryption first. If it fails, schedule the request
1465
* asynchronously via the crypto engine. To preserve execution order,
1466
* once a request is queued to the engine, further requests using the same
1467
* tfm will also be routed through the engine.
1468
*/
1469
1470
rc = skcipher_walk_virt(walk, req, false);
1471
if (rc)
1472
goto out;
1473
1474
req_ctx->modifier = modifier;
1475
req_ctx->param_init_done = false;
1476
1477
/* Try synchronous operation if no active engine usage */
1478
if (!atomic_read(&ctx->via_engine_ctr)) {
1479
rc = xts_paes_do_crypt(ctx, req_ctx, false);
1480
if (rc == 0)
1481
goto out;
1482
}
1483
1484
/*
1485
* If sync operation failed or key expired or there are already
1486
* requests enqueued via engine, fallback to async. Mark tfm as
1487
* using engine to serialize requests.
1488
*/
1489
if (rc == 0 || rc == -EKEYEXPIRED) {
1490
atomic_inc(&ctx->via_engine_ctr);
1491
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
1492
if (rc != -EINPROGRESS)
1493
atomic_dec(&ctx->via_engine_ctr);
1494
}
1495
1496
if (rc != -EINPROGRESS)
1497
skcipher_walk_done(walk, rc);
1498
1499
out:
1500
if (rc != -EINPROGRESS)
1501
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1502
pr_debug("rc=%d\n", rc);
1503
return rc;
1504
}
1505
1506
static int xts_paes_encrypt(struct skcipher_request *req)
1507
{
1508
return xts_paes_crypt(req, 0);
1509
}
1510
1511
static int xts_paes_decrypt(struct skcipher_request *req)
1512
{
1513
return xts_paes_crypt(req, CPACF_DECRYPT);
1514
}
1515
1516
static int xts_paes_init(struct crypto_skcipher *tfm)
1517
{
1518
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1519
1520
memset(ctx, 0, sizeof(*ctx));
1521
spin_lock_init(&ctx->pk_lock);
1522
1523
crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx));
1524
1525
return 0;
1526
}
1527
1528
static void xts_paes_exit(struct crypto_skcipher *tfm)
1529
{
1530
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1531
1532
memzero_explicit(ctx, sizeof(*ctx));
1533
}
1534
1535
static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq)
1536
{
1537
struct skcipher_request *req = skcipher_request_cast(areq);
1538
struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
1539
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1540
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1541
struct skcipher_walk *walk = &req_ctx->walk;
1542
int rc;
1543
1544
/* walk has already been prepared */
1545
1546
rc = xts_paes_do_crypt(ctx, req_ctx, true);
1547
if (rc == -EKEYEXPIRED) {
1548
/*
1549
* Protected key expired, conversion is in process.
1550
* Trigger a re-schedule of this request by returning
1551
* -ENOSPC ("hardware queue is full") to the crypto engine.
1552
* To avoid immediately re-invocation of this callback,
1553
* tell the scheduler to voluntarily give up the CPU here.
1554
*/
1555
cond_resched();
1556
pr_debug("rescheduling request\n");
1557
return -ENOSPC;
1558
} else if (rc) {
1559
skcipher_walk_done(walk, rc);
1560
}
1561
1562
memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1563
pr_debug("request complete with rc=%d\n", rc);
1564
local_bh_disable();
1565
atomic_dec(&ctx->via_engine_ctr);
1566
crypto_finalize_skcipher_request(engine, req, rc);
1567
local_bh_enable();
1568
return rc;
1569
}
1570
1571
static struct skcipher_engine_alg xts_paes_alg = {
1572
.base = {
1573
.base.cra_name = "xts(paes)",
1574
.base.cra_driver_name = "xts-paes-s390",
1575
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
1576
.base.cra_blocksize = AES_BLOCK_SIZE,
1577
.base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
1578
.base.cra_module = THIS_MODULE,
1579
.base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list),
1580
.init = xts_paes_init,
1581
.exit = xts_paes_exit,
1582
.min_keysize = 2 * PAES_MIN_KEYSIZE,
1583
.max_keysize = 2 * PAES_MAX_KEYSIZE,
1584
.ivsize = AES_BLOCK_SIZE,
1585
.setkey = xts_paes_setkey,
1586
.encrypt = xts_paes_encrypt,
1587
.decrypt = xts_paes_decrypt,
1588
},
1589
.op = {
1590
.do_one_request = xts_paes_do_one_request,
1591
},
1592
};
1593
1594
/*
1595
* alg register, unregister, module init, exit
1596
*/
1597
1598
static struct miscdevice paes_dev = {
1599
.name = "paes",
1600
.minor = MISC_DYNAMIC_MINOR,
1601
};
1602
1603
static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg)
1604
{
1605
if (!list_empty(&alg->base.base.cra_list))
1606
crypto_engine_unregister_skcipher(alg);
1607
}
1608
1609
static void paes_s390_fini(void)
1610
{
1611
if (paes_crypto_engine) {
1612
crypto_engine_stop(paes_crypto_engine);
1613
crypto_engine_exit(paes_crypto_engine);
1614
}
1615
__crypto_unregister_skcipher(&ctr_paes_alg);
1616
__crypto_unregister_skcipher(&xts_paes_alg);
1617
__crypto_unregister_skcipher(&cbc_paes_alg);
1618
__crypto_unregister_skcipher(&ecb_paes_alg);
1619
if (ctrblk)
1620
free_page((unsigned long)ctrblk);
1621
misc_deregister(&paes_dev);
1622
}
1623
1624
static int __init paes_s390_init(void)
1625
{
1626
int rc;
1627
1628
/* register a simple paes pseudo misc device */
1629
rc = misc_register(&paes_dev);
1630
if (rc)
1631
return rc;
1632
1633
/* with this pseudo devie alloc and start a crypto engine */
1634
paes_crypto_engine =
1635
crypto_engine_alloc_init_and_set(paes_dev.this_device,
1636
true, false, MAX_QLEN);
1637
if (!paes_crypto_engine) {
1638
rc = -ENOMEM;
1639
goto out_err;
1640
}
1641
rc = crypto_engine_start(paes_crypto_engine);
1642
if (rc) {
1643
crypto_engine_exit(paes_crypto_engine);
1644
paes_crypto_engine = NULL;
1645
goto out_err;
1646
}
1647
1648
/* Query available functions for KM, KMC and KMCTR */
1649
cpacf_query(CPACF_KM, &km_functions);
1650
cpacf_query(CPACF_KMC, &kmc_functions);
1651
cpacf_query(CPACF_KMCTR, &kmctr_functions);
1652
1653
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
1654
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
1655
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
1656
rc = crypto_engine_register_skcipher(&ecb_paes_alg);
1657
if (rc)
1658
goto out_err;
1659
pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name);
1660
}
1661
1662
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
1663
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
1664
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
1665
rc = crypto_engine_register_skcipher(&cbc_paes_alg);
1666
if (rc)
1667
goto out_err;
1668
pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name);
1669
}
1670
1671
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
1672
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
1673
rc = crypto_engine_register_skcipher(&xts_paes_alg);
1674
if (rc)
1675
goto out_err;
1676
pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name);
1677
}
1678
1679
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
1680
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
1681
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
1682
ctrblk = (u8 *)__get_free_page(GFP_KERNEL);
1683
if (!ctrblk) {
1684
rc = -ENOMEM;
1685
goto out_err;
1686
}
1687
rc = crypto_engine_register_skcipher(&ctr_paes_alg);
1688
if (rc)
1689
goto out_err;
1690
pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name);
1691
}
1692
1693
return 0;
1694
1695
out_err:
1696
paes_s390_fini();
1697
return rc;
1698
}
1699
1700
module_init(paes_s390_init);
1701
module_exit(paes_s390_fini);
1702
1703
MODULE_ALIAS_CRYPTO("ecb(paes)");
1704
MODULE_ALIAS_CRYPTO("cbc(paes)");
1705
MODULE_ALIAS_CRYPTO("ctr(paes)");
1706
MODULE_ALIAS_CRYPTO("xts(paes)");
1707
1708
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
1709
MODULE_LICENSE("GPL");
1710
1711