Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/crypto/aes_s390.c
51667 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Cryptographic API.
4
*
5
* s390 implementation of the AES Cipher Algorithm.
6
*
7
* s390 Version:
8
* Copyright IBM Corp. 2005, 2017
9
* Author(s): Jan Glauber ([email protected])
10
* Sebastian Siewior ([email protected]> SW-Fallback
11
* Patrick Steuer <[email protected]>
12
* Harald Freudenberger <[email protected]>
13
*
14
* Derived from "crypto/aes_generic.c"
15
*/
16
17
#define pr_fmt(fmt) "aes_s390: " fmt
18
19
#include <crypto/aes.h>
20
#include <crypto/algapi.h>
21
#include <crypto/ghash.h>
22
#include <crypto/internal/aead.h>
23
#include <crypto/internal/skcipher.h>
24
#include <crypto/scatterwalk.h>
25
#include <linux/err.h>
26
#include <linux/module.h>
27
#include <linux/cpufeature.h>
28
#include <linux/init.h>
29
#include <linux/mutex.h>
30
#include <linux/fips.h>
31
#include <linux/string.h>
32
#include <crypto/xts.h>
33
#include <asm/cpacf.h>
34
35
static u8 *ctrblk;
36
static DEFINE_MUTEX(ctrblk_lock);
37
38
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
39
kma_functions;
40
41
struct s390_aes_ctx {
42
u8 key[AES_MAX_KEY_SIZE];
43
int key_len;
44
unsigned long fc;
45
union {
46
struct crypto_skcipher *skcipher;
47
} fallback;
48
};
49
50
struct s390_xts_ctx {
51
union {
52
u8 keys[64];
53
struct {
54
u8 key[32];
55
u8 pcc_key[32];
56
};
57
};
58
int key_len;
59
unsigned long fc;
60
struct crypto_skcipher *fallback;
61
};
62
63
struct gcm_sg_walk {
64
struct scatter_walk walk;
65
unsigned int walk_bytes;
66
unsigned int walk_bytes_remain;
67
u8 buf[AES_BLOCK_SIZE];
68
unsigned int buf_bytes;
69
u8 *ptr;
70
unsigned int nbytes;
71
};
72
73
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
74
unsigned int len)
75
{
76
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
77
78
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
79
CRYPTO_TFM_REQ_MASK);
80
crypto_skcipher_set_flags(sctx->fallback.skcipher,
81
crypto_skcipher_get_flags(tfm) &
82
CRYPTO_TFM_REQ_MASK);
83
return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
84
}
85
86
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
87
struct skcipher_request *req,
88
unsigned long modifier)
89
{
90
struct skcipher_request *subreq = skcipher_request_ctx(req);
91
92
*subreq = *req;
93
skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
94
return (modifier & CPACF_DECRYPT) ?
95
crypto_skcipher_decrypt(subreq) :
96
crypto_skcipher_encrypt(subreq);
97
}
98
99
static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
100
unsigned int key_len)
101
{
102
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
103
unsigned long fc;
104
105
/* Pick the correct function code based on the key length */
106
fc = (key_len == 16) ? CPACF_KM_AES_128 :
107
(key_len == 24) ? CPACF_KM_AES_192 :
108
(key_len == 32) ? CPACF_KM_AES_256 : 0;
109
110
/* Check if the function code is available */
111
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
112
if (!sctx->fc)
113
return setkey_fallback_skcipher(tfm, in_key, key_len);
114
115
sctx->key_len = key_len;
116
memcpy(sctx->key, in_key, key_len);
117
return 0;
118
}
119
120
static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
121
{
122
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
123
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
124
struct skcipher_walk walk;
125
unsigned int nbytes, n;
126
int ret;
127
128
if (unlikely(!sctx->fc))
129
return fallback_skcipher_crypt(sctx, req, modifier);
130
131
ret = skcipher_walk_virt(&walk, req, false);
132
while ((nbytes = walk.nbytes) != 0) {
133
/* only use complete blocks */
134
n = nbytes & ~(AES_BLOCK_SIZE - 1);
135
cpacf_km(sctx->fc | modifier, sctx->key,
136
walk.dst.virt.addr, walk.src.virt.addr, n);
137
ret = skcipher_walk_done(&walk, nbytes - n);
138
}
139
return ret;
140
}
141
142
static int ecb_aes_encrypt(struct skcipher_request *req)
143
{
144
return ecb_aes_crypt(req, 0);
145
}
146
147
static int ecb_aes_decrypt(struct skcipher_request *req)
148
{
149
return ecb_aes_crypt(req, CPACF_DECRYPT);
150
}
151
152
static int fallback_init_skcipher(struct crypto_skcipher *tfm)
153
{
154
const char *name = crypto_tfm_alg_name(&tfm->base);
155
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
156
157
sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
158
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
159
160
if (IS_ERR(sctx->fallback.skcipher)) {
161
pr_err("Allocating AES fallback algorithm %s failed\n",
162
name);
163
return PTR_ERR(sctx->fallback.skcipher);
164
}
165
166
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
167
crypto_skcipher_reqsize(sctx->fallback.skcipher));
168
return 0;
169
}
170
171
static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
172
{
173
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
174
175
crypto_free_skcipher(sctx->fallback.skcipher);
176
}
177
178
static struct skcipher_alg ecb_aes_alg = {
179
.base.cra_name = "ecb(aes)",
180
.base.cra_driver_name = "ecb-aes-s390",
181
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
182
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
183
.base.cra_blocksize = AES_BLOCK_SIZE,
184
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
185
.base.cra_module = THIS_MODULE,
186
.init = fallback_init_skcipher,
187
.exit = fallback_exit_skcipher,
188
.min_keysize = AES_MIN_KEY_SIZE,
189
.max_keysize = AES_MAX_KEY_SIZE,
190
.setkey = ecb_aes_set_key,
191
.encrypt = ecb_aes_encrypt,
192
.decrypt = ecb_aes_decrypt,
193
};
194
195
static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
196
unsigned int key_len)
197
{
198
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
199
unsigned long fc;
200
201
/* Pick the correct function code based on the key length */
202
fc = (key_len == 16) ? CPACF_KMC_AES_128 :
203
(key_len == 24) ? CPACF_KMC_AES_192 :
204
(key_len == 32) ? CPACF_KMC_AES_256 : 0;
205
206
/* Check if the function code is available */
207
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
208
if (!sctx->fc)
209
return setkey_fallback_skcipher(tfm, in_key, key_len);
210
211
sctx->key_len = key_len;
212
memcpy(sctx->key, in_key, key_len);
213
return 0;
214
}
215
216
static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
217
{
218
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
219
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
220
struct skcipher_walk walk;
221
unsigned int nbytes, n;
222
int ret;
223
struct {
224
u8 iv[AES_BLOCK_SIZE];
225
u8 key[AES_MAX_KEY_SIZE];
226
} param;
227
228
if (unlikely(!sctx->fc))
229
return fallback_skcipher_crypt(sctx, req, modifier);
230
231
ret = skcipher_walk_virt(&walk, req, false);
232
if (ret)
233
return ret;
234
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
235
memcpy(param.key, sctx->key, sctx->key_len);
236
while ((nbytes = walk.nbytes) != 0) {
237
/* only use complete blocks */
238
n = nbytes & ~(AES_BLOCK_SIZE - 1);
239
cpacf_kmc(sctx->fc | modifier, &param,
240
walk.dst.virt.addr, walk.src.virt.addr, n);
241
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
242
ret = skcipher_walk_done(&walk, nbytes - n);
243
}
244
memzero_explicit(&param, sizeof(param));
245
return ret;
246
}
247
248
static int cbc_aes_encrypt(struct skcipher_request *req)
249
{
250
return cbc_aes_crypt(req, 0);
251
}
252
253
static int cbc_aes_decrypt(struct skcipher_request *req)
254
{
255
return cbc_aes_crypt(req, CPACF_DECRYPT);
256
}
257
258
static struct skcipher_alg cbc_aes_alg = {
259
.base.cra_name = "cbc(aes)",
260
.base.cra_driver_name = "cbc-aes-s390",
261
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
262
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
263
.base.cra_blocksize = AES_BLOCK_SIZE,
264
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
265
.base.cra_module = THIS_MODULE,
266
.init = fallback_init_skcipher,
267
.exit = fallback_exit_skcipher,
268
.min_keysize = AES_MIN_KEY_SIZE,
269
.max_keysize = AES_MAX_KEY_SIZE,
270
.ivsize = AES_BLOCK_SIZE,
271
.setkey = cbc_aes_set_key,
272
.encrypt = cbc_aes_encrypt,
273
.decrypt = cbc_aes_decrypt,
274
};
275
276
static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
277
unsigned int len)
278
{
279
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
280
281
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
282
crypto_skcipher_set_flags(xts_ctx->fallback,
283
crypto_skcipher_get_flags(tfm) &
284
CRYPTO_TFM_REQ_MASK);
285
return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
286
}
287
288
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
289
unsigned int key_len)
290
{
291
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
292
unsigned long fc;
293
int err;
294
295
err = xts_fallback_setkey(tfm, in_key, key_len);
296
if (err)
297
return err;
298
299
/* Pick the correct function code based on the key length */
300
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
301
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
302
303
/* Check if the function code is available */
304
xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
305
if (!xts_ctx->fc)
306
return 0;
307
308
/* Split the XTS key into the two subkeys */
309
key_len = key_len / 2;
310
xts_ctx->key_len = key_len;
311
memcpy(xts_ctx->key, in_key, key_len);
312
memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
313
return 0;
314
}
315
316
static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
317
{
318
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
319
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
320
struct skcipher_walk walk;
321
unsigned int offset, nbytes, n;
322
int ret;
323
struct {
324
u8 key[32];
325
u8 tweak[16];
326
u8 block[16];
327
u8 bit[16];
328
u8 xts[16];
329
} pcc_param;
330
struct {
331
u8 key[32];
332
u8 init[16];
333
} xts_param;
334
335
if (req->cryptlen < AES_BLOCK_SIZE)
336
return -EINVAL;
337
338
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
339
struct skcipher_request *subreq = skcipher_request_ctx(req);
340
341
*subreq = *req;
342
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
343
return (modifier & CPACF_DECRYPT) ?
344
crypto_skcipher_decrypt(subreq) :
345
crypto_skcipher_encrypt(subreq);
346
}
347
348
ret = skcipher_walk_virt(&walk, req, false);
349
if (ret)
350
return ret;
351
offset = xts_ctx->key_len & 0x10;
352
memset(pcc_param.block, 0, sizeof(pcc_param.block));
353
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
354
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
355
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
356
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
357
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
358
359
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
360
memcpy(xts_param.init, pcc_param.xts, 16);
361
362
while ((nbytes = walk.nbytes) != 0) {
363
/* only use complete blocks */
364
n = nbytes & ~(AES_BLOCK_SIZE - 1);
365
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
366
walk.dst.virt.addr, walk.src.virt.addr, n);
367
ret = skcipher_walk_done(&walk, nbytes - n);
368
}
369
memzero_explicit(&pcc_param, sizeof(pcc_param));
370
memzero_explicit(&xts_param, sizeof(xts_param));
371
return ret;
372
}
373
374
static int xts_aes_encrypt(struct skcipher_request *req)
375
{
376
return xts_aes_crypt(req, 0);
377
}
378
379
static int xts_aes_decrypt(struct skcipher_request *req)
380
{
381
return xts_aes_crypt(req, CPACF_DECRYPT);
382
}
383
384
static int xts_fallback_init(struct crypto_skcipher *tfm)
385
{
386
const char *name = crypto_tfm_alg_name(&tfm->base);
387
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
388
389
xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
390
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
391
392
if (IS_ERR(xts_ctx->fallback)) {
393
pr_err("Allocating XTS fallback algorithm %s failed\n",
394
name);
395
return PTR_ERR(xts_ctx->fallback);
396
}
397
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
398
crypto_skcipher_reqsize(xts_ctx->fallback));
399
return 0;
400
}
401
402
static void xts_fallback_exit(struct crypto_skcipher *tfm)
403
{
404
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
405
406
crypto_free_skcipher(xts_ctx->fallback);
407
}
408
409
static struct skcipher_alg xts_aes_alg = {
410
.base.cra_name = "xts(aes)",
411
.base.cra_driver_name = "xts-aes-s390",
412
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
413
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
414
.base.cra_blocksize = AES_BLOCK_SIZE,
415
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
416
.base.cra_module = THIS_MODULE,
417
.init = xts_fallback_init,
418
.exit = xts_fallback_exit,
419
.min_keysize = 2 * AES_MIN_KEY_SIZE,
420
.max_keysize = 2 * AES_MAX_KEY_SIZE,
421
.ivsize = AES_BLOCK_SIZE,
422
.setkey = xts_aes_set_key,
423
.encrypt = xts_aes_encrypt,
424
.decrypt = xts_aes_decrypt,
425
};
426
427
static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
428
unsigned int key_len)
429
{
430
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
431
unsigned long fc;
432
int err;
433
434
err = xts_fallback_setkey(tfm, in_key, key_len);
435
if (err)
436
return err;
437
438
/* Pick the correct function code based on the key length */
439
fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
440
(key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
441
442
/* Check if the function code is available */
443
xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
444
if (!xts_ctx->fc)
445
return 0;
446
447
/* Store double-key */
448
memcpy(xts_ctx->keys, in_key, key_len);
449
xts_ctx->key_len = key_len;
450
return 0;
451
}
452
453
static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
454
{
455
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
456
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
457
unsigned int offset, nbytes, n;
458
struct skcipher_walk walk;
459
int ret;
460
struct {
461
__u8 key[64];
462
__u8 tweak[16];
463
__u8 nap[16];
464
} fxts_param = {
465
.nap = {0},
466
};
467
468
if (req->cryptlen < AES_BLOCK_SIZE)
469
return -EINVAL;
470
471
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
472
struct skcipher_request *subreq = skcipher_request_ctx(req);
473
474
*subreq = *req;
475
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
476
return (modifier & CPACF_DECRYPT) ?
477
crypto_skcipher_decrypt(subreq) :
478
crypto_skcipher_encrypt(subreq);
479
}
480
481
ret = skcipher_walk_virt(&walk, req, false);
482
if (ret)
483
return ret;
484
485
offset = xts_ctx->key_len & 0x20;
486
memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
487
memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
488
fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
489
490
while ((nbytes = walk.nbytes) != 0) {
491
/* only use complete blocks */
492
n = nbytes & ~(AES_BLOCK_SIZE - 1);
493
cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
494
walk.dst.virt.addr, walk.src.virt.addr, n);
495
ret = skcipher_walk_done(&walk, nbytes - n);
496
}
497
memzero_explicit(&fxts_param, sizeof(fxts_param));
498
return ret;
499
}
500
501
static int fullxts_aes_encrypt(struct skcipher_request *req)
502
{
503
return fullxts_aes_crypt(req, 0);
504
}
505
506
static int fullxts_aes_decrypt(struct skcipher_request *req)
507
{
508
return fullxts_aes_crypt(req, CPACF_DECRYPT);
509
}
510
511
static struct skcipher_alg fullxts_aes_alg = {
512
.base.cra_name = "xts(aes)",
513
.base.cra_driver_name = "full-xts-aes-s390",
514
.base.cra_priority = 403, /* aes-xts-s390 + 1 */
515
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
516
.base.cra_blocksize = AES_BLOCK_SIZE,
517
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
518
.base.cra_module = THIS_MODULE,
519
.init = xts_fallback_init,
520
.exit = xts_fallback_exit,
521
.min_keysize = 2 * AES_MIN_KEY_SIZE,
522
.max_keysize = 2 * AES_MAX_KEY_SIZE,
523
.ivsize = AES_BLOCK_SIZE,
524
.setkey = fullxts_aes_set_key,
525
.encrypt = fullxts_aes_encrypt,
526
.decrypt = fullxts_aes_decrypt,
527
};
528
529
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
530
unsigned int key_len)
531
{
532
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
533
unsigned long fc;
534
535
/* Pick the correct function code based on the key length */
536
fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
537
(key_len == 24) ? CPACF_KMCTR_AES_192 :
538
(key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
539
540
/* Check if the function code is available */
541
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
542
if (!sctx->fc)
543
return setkey_fallback_skcipher(tfm, in_key, key_len);
544
545
sctx->key_len = key_len;
546
memcpy(sctx->key, in_key, key_len);
547
return 0;
548
}
549
550
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
551
{
552
unsigned int i, n;
553
554
/* only use complete blocks, max. PAGE_SIZE */
555
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
556
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
557
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
558
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
559
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
560
ctrptr += AES_BLOCK_SIZE;
561
}
562
return n;
563
}
564
565
static int ctr_aes_crypt(struct skcipher_request *req)
566
{
567
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
568
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
569
u8 buf[AES_BLOCK_SIZE], *ctrptr;
570
struct skcipher_walk walk;
571
unsigned int n, nbytes;
572
int ret, locked;
573
574
if (unlikely(!sctx->fc))
575
return fallback_skcipher_crypt(sctx, req, 0);
576
577
locked = mutex_trylock(&ctrblk_lock);
578
579
ret = skcipher_walk_virt(&walk, req, false);
580
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
581
n = AES_BLOCK_SIZE;
582
583
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
584
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
585
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
586
cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
587
walk.src.virt.addr, n, ctrptr);
588
if (ctrptr == ctrblk)
589
memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
590
AES_BLOCK_SIZE);
591
crypto_inc(walk.iv, AES_BLOCK_SIZE);
592
ret = skcipher_walk_done(&walk, nbytes - n);
593
}
594
if (locked)
595
mutex_unlock(&ctrblk_lock);
596
/*
597
* final block may be < AES_BLOCK_SIZE, copy only nbytes
598
*/
599
if (nbytes) {
600
memset(buf, 0, AES_BLOCK_SIZE);
601
memcpy(buf, walk.src.virt.addr, nbytes);
602
cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
603
AES_BLOCK_SIZE, walk.iv);
604
memcpy(walk.dst.virt.addr, buf, nbytes);
605
crypto_inc(walk.iv, AES_BLOCK_SIZE);
606
ret = skcipher_walk_done(&walk, 0);
607
}
608
609
return ret;
610
}
611
612
static struct skcipher_alg ctr_aes_alg = {
613
.base.cra_name = "ctr(aes)",
614
.base.cra_driver_name = "ctr-aes-s390",
615
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
616
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
617
.base.cra_blocksize = 1,
618
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
619
.base.cra_module = THIS_MODULE,
620
.init = fallback_init_skcipher,
621
.exit = fallback_exit_skcipher,
622
.min_keysize = AES_MIN_KEY_SIZE,
623
.max_keysize = AES_MAX_KEY_SIZE,
624
.ivsize = AES_BLOCK_SIZE,
625
.setkey = ctr_aes_set_key,
626
.encrypt = ctr_aes_crypt,
627
.decrypt = ctr_aes_crypt,
628
.chunksize = AES_BLOCK_SIZE,
629
};
630
631
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
632
unsigned int keylen)
633
{
634
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
635
636
switch (keylen) {
637
case AES_KEYSIZE_128:
638
ctx->fc = CPACF_KMA_GCM_AES_128;
639
break;
640
case AES_KEYSIZE_192:
641
ctx->fc = CPACF_KMA_GCM_AES_192;
642
break;
643
case AES_KEYSIZE_256:
644
ctx->fc = CPACF_KMA_GCM_AES_256;
645
break;
646
default:
647
return -EINVAL;
648
}
649
650
memcpy(ctx->key, key, keylen);
651
ctx->key_len = keylen;
652
return 0;
653
}
654
655
static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
656
{
657
switch (authsize) {
658
case 4:
659
case 8:
660
case 12:
661
case 13:
662
case 14:
663
case 15:
664
case 16:
665
break;
666
default:
667
return -EINVAL;
668
}
669
670
return 0;
671
}
672
673
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
674
unsigned int len)
675
{
676
memset(gw, 0, sizeof(*gw));
677
gw->walk_bytes_remain = len;
678
scatterwalk_start(&gw->walk, sg);
679
}
680
681
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
682
{
683
if (gw->walk_bytes_remain == 0)
684
return 0;
685
gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain);
686
return gw->walk_bytes;
687
}
688
689
static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
690
unsigned int nbytes, bool out)
691
{
692
gw->walk_bytes_remain -= nbytes;
693
if (out)
694
scatterwalk_done_dst(&gw->walk, nbytes);
695
else
696
scatterwalk_done_src(&gw->walk, nbytes);
697
}
698
699
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
700
{
701
int n;
702
703
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
704
gw->ptr = gw->buf;
705
gw->nbytes = gw->buf_bytes;
706
goto out;
707
}
708
709
if (gw->walk_bytes_remain == 0) {
710
gw->ptr = NULL;
711
gw->nbytes = 0;
712
goto out;
713
}
714
715
if (!_gcm_sg_clamp_and_map(gw)) {
716
gw->ptr = NULL;
717
gw->nbytes = 0;
718
goto out;
719
}
720
721
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
722
gw->ptr = gw->walk.addr;
723
gw->nbytes = gw->walk_bytes;
724
goto out;
725
}
726
727
while (1) {
728
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
729
memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n);
730
gw->buf_bytes += n;
731
_gcm_sg_unmap_and_advance(gw, n, false);
732
if (gw->buf_bytes >= minbytesneeded) {
733
gw->ptr = gw->buf;
734
gw->nbytes = gw->buf_bytes;
735
goto out;
736
}
737
if (!_gcm_sg_clamp_and_map(gw)) {
738
gw->ptr = NULL;
739
gw->nbytes = 0;
740
goto out;
741
}
742
}
743
744
out:
745
return gw->nbytes;
746
}
747
748
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
749
{
750
if (gw->walk_bytes_remain == 0) {
751
gw->ptr = NULL;
752
gw->nbytes = 0;
753
goto out;
754
}
755
756
if (!_gcm_sg_clamp_and_map(gw)) {
757
gw->ptr = NULL;
758
gw->nbytes = 0;
759
goto out;
760
}
761
762
if (gw->walk_bytes >= minbytesneeded) {
763
gw->ptr = gw->walk.addr;
764
gw->nbytes = gw->walk_bytes;
765
goto out;
766
}
767
768
scatterwalk_unmap(&gw->walk);
769
770
gw->ptr = gw->buf;
771
gw->nbytes = sizeof(gw->buf);
772
773
out:
774
return gw->nbytes;
775
}
776
777
static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
778
{
779
if (gw->ptr == NULL)
780
return 0;
781
782
if (gw->ptr == gw->buf) {
783
int n = gw->buf_bytes - bytesdone;
784
if (n > 0) {
785
memmove(gw->buf, gw->buf + bytesdone, n);
786
gw->buf_bytes = n;
787
} else
788
gw->buf_bytes = 0;
789
} else
790
_gcm_sg_unmap_and_advance(gw, bytesdone, false);
791
792
return bytesdone;
793
}
794
795
static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
796
{
797
int i, n;
798
799
if (gw->ptr == NULL)
800
return 0;
801
802
if (gw->ptr == gw->buf) {
803
for (i = 0; i < bytesdone; i += n) {
804
if (!_gcm_sg_clamp_and_map(gw))
805
return i;
806
n = min(gw->walk_bytes, bytesdone - i);
807
memcpy(gw->walk.addr, gw->buf + i, n);
808
_gcm_sg_unmap_and_advance(gw, n, true);
809
}
810
} else
811
_gcm_sg_unmap_and_advance(gw, bytesdone, true);
812
813
return bytesdone;
814
}
815
816
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
817
{
818
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
819
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
820
unsigned int ivsize = crypto_aead_ivsize(tfm);
821
unsigned int taglen = crypto_aead_authsize(tfm);
822
unsigned int aadlen = req->assoclen;
823
unsigned int pclen = req->cryptlen;
824
int ret = 0;
825
826
unsigned int n, len, in_bytes, out_bytes,
827
min_bytes, bytes, aad_bytes, pc_bytes;
828
struct gcm_sg_walk gw_in, gw_out;
829
u8 tag[GHASH_DIGEST_SIZE];
830
831
struct {
832
u32 _[3]; /* reserved */
833
u32 cv; /* Counter Value */
834
u8 t[GHASH_DIGEST_SIZE];/* Tag */
835
u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
836
u64 taadl; /* Total AAD Length */
837
u64 tpcl; /* Total Plain-/Cipher-text Length */
838
u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
839
u8 k[AES_MAX_KEY_SIZE]; /* Key */
840
} param;
841
842
/*
843
* encrypt
844
* req->src: aad||plaintext
845
* req->dst: aad||ciphertext||tag
846
* decrypt
847
* req->src: aad||ciphertext||tag
848
* req->dst: aad||plaintext, return 0 or -EBADMSG
849
* aad, plaintext and ciphertext may be empty.
850
*/
851
if (flags & CPACF_DECRYPT)
852
pclen -= taglen;
853
len = aadlen + pclen;
854
855
memset(&param, 0, sizeof(param));
856
param.cv = 1;
857
param.taadl = aadlen * 8;
858
param.tpcl = pclen * 8;
859
memcpy(param.j0, req->iv, ivsize);
860
*(u32 *)(param.j0 + ivsize) = 1;
861
memcpy(param.k, ctx->key, ctx->key_len);
862
863
gcm_walk_start(&gw_in, req->src, len);
864
gcm_walk_start(&gw_out, req->dst, len);
865
866
do {
867
min_bytes = min_t(unsigned int,
868
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
869
in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
870
out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
871
bytes = min(in_bytes, out_bytes);
872
873
if (aadlen + pclen <= bytes) {
874
aad_bytes = aadlen;
875
pc_bytes = pclen;
876
flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
877
} else {
878
if (aadlen <= bytes) {
879
aad_bytes = aadlen;
880
pc_bytes = (bytes - aadlen) &
881
~(AES_BLOCK_SIZE - 1);
882
flags |= CPACF_KMA_LAAD;
883
} else {
884
aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
885
pc_bytes = 0;
886
}
887
}
888
889
if (aad_bytes > 0)
890
memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
891
892
cpacf_kma(ctx->fc | flags, &param,
893
gw_out.ptr + aad_bytes,
894
gw_in.ptr + aad_bytes, pc_bytes,
895
gw_in.ptr, aad_bytes);
896
897
n = aad_bytes + pc_bytes;
898
if (gcm_in_walk_done(&gw_in, n) != n)
899
return -ENOMEM;
900
if (gcm_out_walk_done(&gw_out, n) != n)
901
return -ENOMEM;
902
aadlen -= aad_bytes;
903
pclen -= pc_bytes;
904
} while (aadlen + pclen > 0);
905
906
if (flags & CPACF_DECRYPT) {
907
scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
908
if (crypto_memneq(tag, param.t, taglen))
909
ret = -EBADMSG;
910
} else
911
scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
912
913
memzero_explicit(&param, sizeof(param));
914
return ret;
915
}
916
917
static int gcm_aes_encrypt(struct aead_request *req)
918
{
919
return gcm_aes_crypt(req, CPACF_ENCRYPT);
920
}
921
922
static int gcm_aes_decrypt(struct aead_request *req)
923
{
924
return gcm_aes_crypt(req, CPACF_DECRYPT);
925
}
926
927
static struct aead_alg gcm_aes_aead = {
928
.setkey = gcm_aes_setkey,
929
.setauthsize = gcm_aes_setauthsize,
930
.encrypt = gcm_aes_encrypt,
931
.decrypt = gcm_aes_decrypt,
932
933
.ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
934
.maxauthsize = GHASH_DIGEST_SIZE,
935
.chunksize = AES_BLOCK_SIZE,
936
937
.base = {
938
.cra_blocksize = 1,
939
.cra_ctxsize = sizeof(struct s390_aes_ctx),
940
.cra_priority = 900,
941
.cra_name = "gcm(aes)",
942
.cra_driver_name = "gcm-aes-s390",
943
.cra_module = THIS_MODULE,
944
},
945
};
946
947
static struct skcipher_alg *aes_s390_skcipher_algs[5];
948
static int aes_s390_skciphers_num;
949
static struct aead_alg *aes_s390_aead_alg;
950
951
static int aes_s390_register_skcipher(struct skcipher_alg *alg)
952
{
953
int ret;
954
955
ret = crypto_register_skcipher(alg);
956
if (!ret)
957
aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
958
return ret;
959
}
960
961
static void aes_s390_fini(void)
962
{
963
while (aes_s390_skciphers_num--)
964
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
965
if (ctrblk)
966
free_page((unsigned long) ctrblk);
967
968
if (aes_s390_aead_alg)
969
crypto_unregister_aead(aes_s390_aead_alg);
970
}
971
972
static int __init aes_s390_init(void)
973
{
974
int ret;
975
976
/* Query available functions for KM, KMC, KMCTR and KMA */
977
cpacf_query(CPACF_KM, &km_functions);
978
cpacf_query(CPACF_KMC, &kmc_functions);
979
cpacf_query(CPACF_KMCTR, &kmctr_functions);
980
cpacf_query(CPACF_KMA, &kma_functions);
981
982
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
983
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
984
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
985
ret = aes_s390_register_skcipher(&ecb_aes_alg);
986
if (ret)
987
goto out_err;
988
}
989
990
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
991
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
992
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
993
ret = aes_s390_register_skcipher(&cbc_aes_alg);
994
if (ret)
995
goto out_err;
996
}
997
998
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
999
cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
1000
ret = aes_s390_register_skcipher(&fullxts_aes_alg);
1001
if (ret)
1002
goto out_err;
1003
}
1004
1005
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1006
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1007
ret = aes_s390_register_skcipher(&xts_aes_alg);
1008
if (ret)
1009
goto out_err;
1010
}
1011
1012
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1013
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1014
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1015
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1016
if (!ctrblk) {
1017
ret = -ENOMEM;
1018
goto out_err;
1019
}
1020
ret = aes_s390_register_skcipher(&ctr_aes_alg);
1021
if (ret)
1022
goto out_err;
1023
}
1024
1025
if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1026
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1027
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1028
ret = crypto_register_aead(&gcm_aes_aead);
1029
if (ret)
1030
goto out_err;
1031
aes_s390_aead_alg = &gcm_aes_aead;
1032
}
1033
1034
return 0;
1035
out_err:
1036
aes_s390_fini();
1037
return ret;
1038
}
1039
1040
module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1041
module_exit(aes_s390_fini);
1042
1043
MODULE_ALIAS_CRYPTO("aes-all");
1044
1045
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1046
MODULE_LICENSE("GPL");
1047
1048