Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/crypto/aesni-intel_glue.c
10817 views
1
/*
2
* Support for Intel AES-NI instructions. This file contains glue
3
* code, the real AES implementation is in intel-aes_asm.S.
4
*
5
* Copyright (C) 2008, Intel Corp.
6
* Author: Huang Ying <[email protected]>
7
*
8
* Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9
* interface for 64-bit kernels.
10
* Authors: Adrian Hoban <[email protected]>
11
* Gabriele Paoloni <[email protected]>
12
* Tadeusz Struk ([email protected])
13
* Aidan O'Mahony ([email protected])
14
* Copyright (c) 2010, Intel Corporation.
15
*
16
* This program is free software; you can redistribute it and/or modify
17
* it under the terms of the GNU General Public License as published by
18
* the Free Software Foundation; either version 2 of the License, or
19
* (at your option) any later version.
20
*/
21
22
#include <linux/hardirq.h>
23
#include <linux/types.h>
24
#include <linux/crypto.h>
25
#include <linux/err.h>
26
#include <crypto/algapi.h>
27
#include <crypto/aes.h>
28
#include <crypto/cryptd.h>
29
#include <crypto/ctr.h>
30
#include <asm/i387.h>
31
#include <asm/aes.h>
32
#include <crypto/scatterwalk.h>
33
#include <crypto/internal/aead.h>
34
#include <linux/workqueue.h>
35
#include <linux/spinlock.h>
36
37
#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38
#define HAS_CTR
39
#endif
40
41
#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42
#define HAS_LRW
43
#endif
44
45
#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46
#define HAS_PCBC
47
#endif
48
49
#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50
#define HAS_XTS
51
#endif
52
53
struct async_aes_ctx {
54
struct cryptd_ablkcipher *cryptd_tfm;
55
};
56
57
/* This data is stored at the end of the crypto_tfm struct.
58
* It's a type of per "session" data storage location.
59
* This needs to be 16 byte aligned.
60
*/
61
struct aesni_rfc4106_gcm_ctx {
62
u8 hash_subkey[16];
63
struct crypto_aes_ctx aes_key_expanded;
64
u8 nonce[4];
65
struct cryptd_aead *cryptd_tfm;
66
};
67
68
struct aesni_gcm_set_hash_subkey_result {
69
int err;
70
struct completion completion;
71
};
72
73
struct aesni_hash_subkey_req_data {
74
u8 iv[16];
75
struct aesni_gcm_set_hash_subkey_result result;
76
struct scatterlist sg;
77
};
78
79
#define AESNI_ALIGN (16)
80
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
81
#define RFC4106_HASH_SUBKEY_SIZE 16
82
83
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84
unsigned int key_len);
85
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86
const u8 *in);
87
asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88
const u8 *in);
89
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90
const u8 *in, unsigned int len);
91
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92
const u8 *in, unsigned int len);
93
asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94
const u8 *in, unsigned int len, u8 *iv);
95
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96
const u8 *in, unsigned int len, u8 *iv);
97
98
int crypto_fpu_init(void);
99
void crypto_fpu_exit(void);
100
101
#ifdef CONFIG_X86_64
102
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
103
const u8 *in, unsigned int len, u8 *iv);
104
105
/* asmlinkage void aesni_gcm_enc()
106
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
107
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
108
* const u8 *in, Plaintext input
109
* unsigned long plaintext_len, Length of data in bytes for encryption.
110
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
111
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
112
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
113
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
114
* const u8 *aad, Additional Authentication Data (AAD)
115
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
116
* is going to be 8 or 12 bytes
117
* u8 *auth_tag, Authenticated Tag output.
118
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
119
* Valid values are 16 (most likely), 12 or 8.
120
*/
121
asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
122
const u8 *in, unsigned long plaintext_len, u8 *iv,
123
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
124
u8 *auth_tag, unsigned long auth_tag_len);
125
126
/* asmlinkage void aesni_gcm_dec()
127
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
128
* u8 *out, Plaintext output. Decrypt in-place is allowed.
129
* const u8 *in, Ciphertext input
130
* unsigned long ciphertext_len, Length of data in bytes for decryption.
131
* u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
132
* concatenated with 8 byte Initialisation Vector (from IPSec ESP
133
* Payload) concatenated with 0x00000001. 16-byte aligned pointer.
134
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
135
* const u8 *aad, Additional Authentication Data (AAD)
136
* unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
137
* to be 8 or 12 bytes
138
* u8 *auth_tag, Authenticated Tag output.
139
* unsigned long auth_tag_len) Authenticated Tag Length in bytes.
140
* Valid values are 16 (most likely), 12 or 8.
141
*/
142
asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
143
const u8 *in, unsigned long ciphertext_len, u8 *iv,
144
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
145
u8 *auth_tag, unsigned long auth_tag_len);
146
147
static inline struct
148
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
149
{
150
return
151
(struct aesni_rfc4106_gcm_ctx *)
152
PTR_ALIGN((u8 *)
153
crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
154
}
155
#endif
156
157
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
158
{
159
unsigned long addr = (unsigned long)raw_ctx;
160
unsigned long align = AESNI_ALIGN;
161
162
if (align <= crypto_tfm_ctx_alignment())
163
align = 1;
164
return (struct crypto_aes_ctx *)ALIGN(addr, align);
165
}
166
167
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
168
const u8 *in_key, unsigned int key_len)
169
{
170
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
171
u32 *flags = &tfm->crt_flags;
172
int err;
173
174
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
175
key_len != AES_KEYSIZE_256) {
176
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
177
return -EINVAL;
178
}
179
180
if (!irq_fpu_usable())
181
err = crypto_aes_expand_key(ctx, in_key, key_len);
182
else {
183
kernel_fpu_begin();
184
err = aesni_set_key(ctx, in_key, key_len);
185
kernel_fpu_end();
186
}
187
188
return err;
189
}
190
191
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
192
unsigned int key_len)
193
{
194
return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
195
}
196
197
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
198
{
199
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
200
201
if (!irq_fpu_usable())
202
crypto_aes_encrypt_x86(ctx, dst, src);
203
else {
204
kernel_fpu_begin();
205
aesni_enc(ctx, dst, src);
206
kernel_fpu_end();
207
}
208
}
209
210
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
211
{
212
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
213
214
if (!irq_fpu_usable())
215
crypto_aes_decrypt_x86(ctx, dst, src);
216
else {
217
kernel_fpu_begin();
218
aesni_dec(ctx, dst, src);
219
kernel_fpu_end();
220
}
221
}
222
223
static struct crypto_alg aesni_alg = {
224
.cra_name = "aes",
225
.cra_driver_name = "aes-aesni",
226
.cra_priority = 300,
227
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
228
.cra_blocksize = AES_BLOCK_SIZE,
229
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
230
.cra_alignmask = 0,
231
.cra_module = THIS_MODULE,
232
.cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
233
.cra_u = {
234
.cipher = {
235
.cia_min_keysize = AES_MIN_KEY_SIZE,
236
.cia_max_keysize = AES_MAX_KEY_SIZE,
237
.cia_setkey = aes_set_key,
238
.cia_encrypt = aes_encrypt,
239
.cia_decrypt = aes_decrypt
240
}
241
}
242
};
243
244
static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
245
{
246
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
247
248
aesni_enc(ctx, dst, src);
249
}
250
251
static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
252
{
253
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
254
255
aesni_dec(ctx, dst, src);
256
}
257
258
static struct crypto_alg __aesni_alg = {
259
.cra_name = "__aes-aesni",
260
.cra_driver_name = "__driver-aes-aesni",
261
.cra_priority = 0,
262
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
263
.cra_blocksize = AES_BLOCK_SIZE,
264
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
265
.cra_alignmask = 0,
266
.cra_module = THIS_MODULE,
267
.cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
268
.cra_u = {
269
.cipher = {
270
.cia_min_keysize = AES_MIN_KEY_SIZE,
271
.cia_max_keysize = AES_MAX_KEY_SIZE,
272
.cia_setkey = aes_set_key,
273
.cia_encrypt = __aes_encrypt,
274
.cia_decrypt = __aes_decrypt
275
}
276
}
277
};
278
279
static int ecb_encrypt(struct blkcipher_desc *desc,
280
struct scatterlist *dst, struct scatterlist *src,
281
unsigned int nbytes)
282
{
283
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
284
struct blkcipher_walk walk;
285
int err;
286
287
blkcipher_walk_init(&walk, dst, src, nbytes);
288
err = blkcipher_walk_virt(desc, &walk);
289
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
290
291
kernel_fpu_begin();
292
while ((nbytes = walk.nbytes)) {
293
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
294
nbytes & AES_BLOCK_MASK);
295
nbytes &= AES_BLOCK_SIZE - 1;
296
err = blkcipher_walk_done(desc, &walk, nbytes);
297
}
298
kernel_fpu_end();
299
300
return err;
301
}
302
303
static int ecb_decrypt(struct blkcipher_desc *desc,
304
struct scatterlist *dst, struct scatterlist *src,
305
unsigned int nbytes)
306
{
307
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
308
struct blkcipher_walk walk;
309
int err;
310
311
blkcipher_walk_init(&walk, dst, src, nbytes);
312
err = blkcipher_walk_virt(desc, &walk);
313
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
314
315
kernel_fpu_begin();
316
while ((nbytes = walk.nbytes)) {
317
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
318
nbytes & AES_BLOCK_MASK);
319
nbytes &= AES_BLOCK_SIZE - 1;
320
err = blkcipher_walk_done(desc, &walk, nbytes);
321
}
322
kernel_fpu_end();
323
324
return err;
325
}
326
327
static struct crypto_alg blk_ecb_alg = {
328
.cra_name = "__ecb-aes-aesni",
329
.cra_driver_name = "__driver-ecb-aes-aesni",
330
.cra_priority = 0,
331
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
332
.cra_blocksize = AES_BLOCK_SIZE,
333
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
334
.cra_alignmask = 0,
335
.cra_type = &crypto_blkcipher_type,
336
.cra_module = THIS_MODULE,
337
.cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
338
.cra_u = {
339
.blkcipher = {
340
.min_keysize = AES_MIN_KEY_SIZE,
341
.max_keysize = AES_MAX_KEY_SIZE,
342
.setkey = aes_set_key,
343
.encrypt = ecb_encrypt,
344
.decrypt = ecb_decrypt,
345
},
346
},
347
};
348
349
static int cbc_encrypt(struct blkcipher_desc *desc,
350
struct scatterlist *dst, struct scatterlist *src,
351
unsigned int nbytes)
352
{
353
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
354
struct blkcipher_walk walk;
355
int err;
356
357
blkcipher_walk_init(&walk, dst, src, nbytes);
358
err = blkcipher_walk_virt(desc, &walk);
359
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
360
361
kernel_fpu_begin();
362
while ((nbytes = walk.nbytes)) {
363
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
364
nbytes & AES_BLOCK_MASK, walk.iv);
365
nbytes &= AES_BLOCK_SIZE - 1;
366
err = blkcipher_walk_done(desc, &walk, nbytes);
367
}
368
kernel_fpu_end();
369
370
return err;
371
}
372
373
static int cbc_decrypt(struct blkcipher_desc *desc,
374
struct scatterlist *dst, struct scatterlist *src,
375
unsigned int nbytes)
376
{
377
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
378
struct blkcipher_walk walk;
379
int err;
380
381
blkcipher_walk_init(&walk, dst, src, nbytes);
382
err = blkcipher_walk_virt(desc, &walk);
383
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
384
385
kernel_fpu_begin();
386
while ((nbytes = walk.nbytes)) {
387
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
388
nbytes & AES_BLOCK_MASK, walk.iv);
389
nbytes &= AES_BLOCK_SIZE - 1;
390
err = blkcipher_walk_done(desc, &walk, nbytes);
391
}
392
kernel_fpu_end();
393
394
return err;
395
}
396
397
static struct crypto_alg blk_cbc_alg = {
398
.cra_name = "__cbc-aes-aesni",
399
.cra_driver_name = "__driver-cbc-aes-aesni",
400
.cra_priority = 0,
401
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
402
.cra_blocksize = AES_BLOCK_SIZE,
403
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
404
.cra_alignmask = 0,
405
.cra_type = &crypto_blkcipher_type,
406
.cra_module = THIS_MODULE,
407
.cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
408
.cra_u = {
409
.blkcipher = {
410
.min_keysize = AES_MIN_KEY_SIZE,
411
.max_keysize = AES_MAX_KEY_SIZE,
412
.setkey = aes_set_key,
413
.encrypt = cbc_encrypt,
414
.decrypt = cbc_decrypt,
415
},
416
},
417
};
418
419
#ifdef CONFIG_X86_64
420
static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
421
struct blkcipher_walk *walk)
422
{
423
u8 *ctrblk = walk->iv;
424
u8 keystream[AES_BLOCK_SIZE];
425
u8 *src = walk->src.virt.addr;
426
u8 *dst = walk->dst.virt.addr;
427
unsigned int nbytes = walk->nbytes;
428
429
aesni_enc(ctx, keystream, ctrblk);
430
crypto_xor(keystream, src, nbytes);
431
memcpy(dst, keystream, nbytes);
432
crypto_inc(ctrblk, AES_BLOCK_SIZE);
433
}
434
435
static int ctr_crypt(struct blkcipher_desc *desc,
436
struct scatterlist *dst, struct scatterlist *src,
437
unsigned int nbytes)
438
{
439
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
440
struct blkcipher_walk walk;
441
int err;
442
443
blkcipher_walk_init(&walk, dst, src, nbytes);
444
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
445
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
446
447
kernel_fpu_begin();
448
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
449
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
450
nbytes & AES_BLOCK_MASK, walk.iv);
451
nbytes &= AES_BLOCK_SIZE - 1;
452
err = blkcipher_walk_done(desc, &walk, nbytes);
453
}
454
if (walk.nbytes) {
455
ctr_crypt_final(ctx, &walk);
456
err = blkcipher_walk_done(desc, &walk, 0);
457
}
458
kernel_fpu_end();
459
460
return err;
461
}
462
463
static struct crypto_alg blk_ctr_alg = {
464
.cra_name = "__ctr-aes-aesni",
465
.cra_driver_name = "__driver-ctr-aes-aesni",
466
.cra_priority = 0,
467
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
468
.cra_blocksize = 1,
469
.cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
470
.cra_alignmask = 0,
471
.cra_type = &crypto_blkcipher_type,
472
.cra_module = THIS_MODULE,
473
.cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
474
.cra_u = {
475
.blkcipher = {
476
.min_keysize = AES_MIN_KEY_SIZE,
477
.max_keysize = AES_MAX_KEY_SIZE,
478
.ivsize = AES_BLOCK_SIZE,
479
.setkey = aes_set_key,
480
.encrypt = ctr_crypt,
481
.decrypt = ctr_crypt,
482
},
483
},
484
};
485
#endif
486
487
static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
488
unsigned int key_len)
489
{
490
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
491
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
492
int err;
493
494
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
495
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
496
& CRYPTO_TFM_REQ_MASK);
497
err = crypto_ablkcipher_setkey(child, key, key_len);
498
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
499
& CRYPTO_TFM_RES_MASK);
500
return err;
501
}
502
503
static int ablk_encrypt(struct ablkcipher_request *req)
504
{
505
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
506
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
507
508
if (!irq_fpu_usable()) {
509
struct ablkcipher_request *cryptd_req =
510
ablkcipher_request_ctx(req);
511
memcpy(cryptd_req, req, sizeof(*req));
512
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
513
return crypto_ablkcipher_encrypt(cryptd_req);
514
} else {
515
struct blkcipher_desc desc;
516
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
517
desc.info = req->info;
518
desc.flags = 0;
519
return crypto_blkcipher_crt(desc.tfm)->encrypt(
520
&desc, req->dst, req->src, req->nbytes);
521
}
522
}
523
524
static int ablk_decrypt(struct ablkcipher_request *req)
525
{
526
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
527
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
528
529
if (!irq_fpu_usable()) {
530
struct ablkcipher_request *cryptd_req =
531
ablkcipher_request_ctx(req);
532
memcpy(cryptd_req, req, sizeof(*req));
533
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
534
return crypto_ablkcipher_decrypt(cryptd_req);
535
} else {
536
struct blkcipher_desc desc;
537
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
538
desc.info = req->info;
539
desc.flags = 0;
540
return crypto_blkcipher_crt(desc.tfm)->decrypt(
541
&desc, req->dst, req->src, req->nbytes);
542
}
543
}
544
545
static void ablk_exit(struct crypto_tfm *tfm)
546
{
547
struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
548
549
cryptd_free_ablkcipher(ctx->cryptd_tfm);
550
}
551
552
static void ablk_init_common(struct crypto_tfm *tfm,
553
struct cryptd_ablkcipher *cryptd_tfm)
554
{
555
struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
556
557
ctx->cryptd_tfm = cryptd_tfm;
558
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
559
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
560
}
561
562
static int ablk_ecb_init(struct crypto_tfm *tfm)
563
{
564
struct cryptd_ablkcipher *cryptd_tfm;
565
566
cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
567
if (IS_ERR(cryptd_tfm))
568
return PTR_ERR(cryptd_tfm);
569
ablk_init_common(tfm, cryptd_tfm);
570
return 0;
571
}
572
573
static struct crypto_alg ablk_ecb_alg = {
574
.cra_name = "ecb(aes)",
575
.cra_driver_name = "ecb-aes-aesni",
576
.cra_priority = 400,
577
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
578
.cra_blocksize = AES_BLOCK_SIZE,
579
.cra_ctxsize = sizeof(struct async_aes_ctx),
580
.cra_alignmask = 0,
581
.cra_type = &crypto_ablkcipher_type,
582
.cra_module = THIS_MODULE,
583
.cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
584
.cra_init = ablk_ecb_init,
585
.cra_exit = ablk_exit,
586
.cra_u = {
587
.ablkcipher = {
588
.min_keysize = AES_MIN_KEY_SIZE,
589
.max_keysize = AES_MAX_KEY_SIZE,
590
.setkey = ablk_set_key,
591
.encrypt = ablk_encrypt,
592
.decrypt = ablk_decrypt,
593
},
594
},
595
};
596
597
static int ablk_cbc_init(struct crypto_tfm *tfm)
598
{
599
struct cryptd_ablkcipher *cryptd_tfm;
600
601
cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
602
if (IS_ERR(cryptd_tfm))
603
return PTR_ERR(cryptd_tfm);
604
ablk_init_common(tfm, cryptd_tfm);
605
return 0;
606
}
607
608
static struct crypto_alg ablk_cbc_alg = {
609
.cra_name = "cbc(aes)",
610
.cra_driver_name = "cbc-aes-aesni",
611
.cra_priority = 400,
612
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
613
.cra_blocksize = AES_BLOCK_SIZE,
614
.cra_ctxsize = sizeof(struct async_aes_ctx),
615
.cra_alignmask = 0,
616
.cra_type = &crypto_ablkcipher_type,
617
.cra_module = THIS_MODULE,
618
.cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
619
.cra_init = ablk_cbc_init,
620
.cra_exit = ablk_exit,
621
.cra_u = {
622
.ablkcipher = {
623
.min_keysize = AES_MIN_KEY_SIZE,
624
.max_keysize = AES_MAX_KEY_SIZE,
625
.ivsize = AES_BLOCK_SIZE,
626
.setkey = ablk_set_key,
627
.encrypt = ablk_encrypt,
628
.decrypt = ablk_decrypt,
629
},
630
},
631
};
632
633
#ifdef CONFIG_X86_64
634
static int ablk_ctr_init(struct crypto_tfm *tfm)
635
{
636
struct cryptd_ablkcipher *cryptd_tfm;
637
638
cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
639
if (IS_ERR(cryptd_tfm))
640
return PTR_ERR(cryptd_tfm);
641
ablk_init_common(tfm, cryptd_tfm);
642
return 0;
643
}
644
645
static struct crypto_alg ablk_ctr_alg = {
646
.cra_name = "ctr(aes)",
647
.cra_driver_name = "ctr-aes-aesni",
648
.cra_priority = 400,
649
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
650
.cra_blocksize = 1,
651
.cra_ctxsize = sizeof(struct async_aes_ctx),
652
.cra_alignmask = 0,
653
.cra_type = &crypto_ablkcipher_type,
654
.cra_module = THIS_MODULE,
655
.cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
656
.cra_init = ablk_ctr_init,
657
.cra_exit = ablk_exit,
658
.cra_u = {
659
.ablkcipher = {
660
.min_keysize = AES_MIN_KEY_SIZE,
661
.max_keysize = AES_MAX_KEY_SIZE,
662
.ivsize = AES_BLOCK_SIZE,
663
.setkey = ablk_set_key,
664
.encrypt = ablk_encrypt,
665
.decrypt = ablk_encrypt,
666
.geniv = "chainiv",
667
},
668
},
669
};
670
671
#ifdef HAS_CTR
672
static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
673
{
674
struct cryptd_ablkcipher *cryptd_tfm;
675
676
cryptd_tfm = cryptd_alloc_ablkcipher(
677
"rfc3686(__driver-ctr-aes-aesni)", 0, 0);
678
if (IS_ERR(cryptd_tfm))
679
return PTR_ERR(cryptd_tfm);
680
ablk_init_common(tfm, cryptd_tfm);
681
return 0;
682
}
683
684
static struct crypto_alg ablk_rfc3686_ctr_alg = {
685
.cra_name = "rfc3686(ctr(aes))",
686
.cra_driver_name = "rfc3686-ctr-aes-aesni",
687
.cra_priority = 400,
688
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
689
.cra_blocksize = 1,
690
.cra_ctxsize = sizeof(struct async_aes_ctx),
691
.cra_alignmask = 0,
692
.cra_type = &crypto_ablkcipher_type,
693
.cra_module = THIS_MODULE,
694
.cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
695
.cra_init = ablk_rfc3686_ctr_init,
696
.cra_exit = ablk_exit,
697
.cra_u = {
698
.ablkcipher = {
699
.min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
700
.max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
701
.ivsize = CTR_RFC3686_IV_SIZE,
702
.setkey = ablk_set_key,
703
.encrypt = ablk_encrypt,
704
.decrypt = ablk_decrypt,
705
.geniv = "seqiv",
706
},
707
},
708
};
709
#endif
710
#endif
711
712
#ifdef HAS_LRW
713
static int ablk_lrw_init(struct crypto_tfm *tfm)
714
{
715
struct cryptd_ablkcipher *cryptd_tfm;
716
717
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
718
0, 0);
719
if (IS_ERR(cryptd_tfm))
720
return PTR_ERR(cryptd_tfm);
721
ablk_init_common(tfm, cryptd_tfm);
722
return 0;
723
}
724
725
static struct crypto_alg ablk_lrw_alg = {
726
.cra_name = "lrw(aes)",
727
.cra_driver_name = "lrw-aes-aesni",
728
.cra_priority = 400,
729
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
730
.cra_blocksize = AES_BLOCK_SIZE,
731
.cra_ctxsize = sizeof(struct async_aes_ctx),
732
.cra_alignmask = 0,
733
.cra_type = &crypto_ablkcipher_type,
734
.cra_module = THIS_MODULE,
735
.cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
736
.cra_init = ablk_lrw_init,
737
.cra_exit = ablk_exit,
738
.cra_u = {
739
.ablkcipher = {
740
.min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
741
.max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
742
.ivsize = AES_BLOCK_SIZE,
743
.setkey = ablk_set_key,
744
.encrypt = ablk_encrypt,
745
.decrypt = ablk_decrypt,
746
},
747
},
748
};
749
#endif
750
751
#ifdef HAS_PCBC
752
static int ablk_pcbc_init(struct crypto_tfm *tfm)
753
{
754
struct cryptd_ablkcipher *cryptd_tfm;
755
756
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
757
0, 0);
758
if (IS_ERR(cryptd_tfm))
759
return PTR_ERR(cryptd_tfm);
760
ablk_init_common(tfm, cryptd_tfm);
761
return 0;
762
}
763
764
static struct crypto_alg ablk_pcbc_alg = {
765
.cra_name = "pcbc(aes)",
766
.cra_driver_name = "pcbc-aes-aesni",
767
.cra_priority = 400,
768
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
769
.cra_blocksize = AES_BLOCK_SIZE,
770
.cra_ctxsize = sizeof(struct async_aes_ctx),
771
.cra_alignmask = 0,
772
.cra_type = &crypto_ablkcipher_type,
773
.cra_module = THIS_MODULE,
774
.cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
775
.cra_init = ablk_pcbc_init,
776
.cra_exit = ablk_exit,
777
.cra_u = {
778
.ablkcipher = {
779
.min_keysize = AES_MIN_KEY_SIZE,
780
.max_keysize = AES_MAX_KEY_SIZE,
781
.ivsize = AES_BLOCK_SIZE,
782
.setkey = ablk_set_key,
783
.encrypt = ablk_encrypt,
784
.decrypt = ablk_decrypt,
785
},
786
},
787
};
788
#endif
789
790
#ifdef HAS_XTS
791
static int ablk_xts_init(struct crypto_tfm *tfm)
792
{
793
struct cryptd_ablkcipher *cryptd_tfm;
794
795
cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
796
0, 0);
797
if (IS_ERR(cryptd_tfm))
798
return PTR_ERR(cryptd_tfm);
799
ablk_init_common(tfm, cryptd_tfm);
800
return 0;
801
}
802
803
static struct crypto_alg ablk_xts_alg = {
804
.cra_name = "xts(aes)",
805
.cra_driver_name = "xts-aes-aesni",
806
.cra_priority = 400,
807
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
808
.cra_blocksize = AES_BLOCK_SIZE,
809
.cra_ctxsize = sizeof(struct async_aes_ctx),
810
.cra_alignmask = 0,
811
.cra_type = &crypto_ablkcipher_type,
812
.cra_module = THIS_MODULE,
813
.cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
814
.cra_init = ablk_xts_init,
815
.cra_exit = ablk_exit,
816
.cra_u = {
817
.ablkcipher = {
818
.min_keysize = 2 * AES_MIN_KEY_SIZE,
819
.max_keysize = 2 * AES_MAX_KEY_SIZE,
820
.ivsize = AES_BLOCK_SIZE,
821
.setkey = ablk_set_key,
822
.encrypt = ablk_encrypt,
823
.decrypt = ablk_decrypt,
824
},
825
},
826
};
827
#endif
828
829
#ifdef CONFIG_X86_64
830
static int rfc4106_init(struct crypto_tfm *tfm)
831
{
832
struct cryptd_aead *cryptd_tfm;
833
struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
834
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
835
struct crypto_aead *cryptd_child;
836
struct aesni_rfc4106_gcm_ctx *child_ctx;
837
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
838
if (IS_ERR(cryptd_tfm))
839
return PTR_ERR(cryptd_tfm);
840
841
cryptd_child = cryptd_aead_child(cryptd_tfm);
842
child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
843
memcpy(child_ctx, ctx, sizeof(*ctx));
844
ctx->cryptd_tfm = cryptd_tfm;
845
tfm->crt_aead.reqsize = sizeof(struct aead_request)
846
+ crypto_aead_reqsize(&cryptd_tfm->base);
847
return 0;
848
}
849
850
static void rfc4106_exit(struct crypto_tfm *tfm)
851
{
852
struct aesni_rfc4106_gcm_ctx *ctx =
853
(struct aesni_rfc4106_gcm_ctx *)
854
PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
855
if (!IS_ERR(ctx->cryptd_tfm))
856
cryptd_free_aead(ctx->cryptd_tfm);
857
return;
858
}
859
860
static void
861
rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
862
{
863
struct aesni_gcm_set_hash_subkey_result *result = req->data;
864
865
if (err == -EINPROGRESS)
866
return;
867
result->err = err;
868
complete(&result->completion);
869
}
870
871
static int
872
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
873
{
874
struct crypto_ablkcipher *ctr_tfm;
875
struct ablkcipher_request *req;
876
int ret = -EINVAL;
877
struct aesni_hash_subkey_req_data *req_data;
878
879
ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
880
if (IS_ERR(ctr_tfm))
881
return PTR_ERR(ctr_tfm);
882
883
crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
884
885
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
886
if (ret)
887
goto out_free_ablkcipher;
888
889
ret = -ENOMEM;
890
req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
891
if (!req)
892
goto out_free_ablkcipher;
893
894
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
895
if (!req_data)
896
goto out_free_request;
897
898
memset(req_data->iv, 0, sizeof(req_data->iv));
899
900
/* Clear the data in the hash sub key container to zero.*/
901
/* We want to cipher all zeros to create the hash sub key. */
902
memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
903
904
init_completion(&req_data->result.completion);
905
sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
906
ablkcipher_request_set_tfm(req, ctr_tfm);
907
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
908
CRYPTO_TFM_REQ_MAY_BACKLOG,
909
rfc4106_set_hash_subkey_done,
910
&req_data->result);
911
912
ablkcipher_request_set_crypt(req, &req_data->sg,
913
&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
914
915
ret = crypto_ablkcipher_encrypt(req);
916
if (ret == -EINPROGRESS || ret == -EBUSY) {
917
ret = wait_for_completion_interruptible
918
(&req_data->result.completion);
919
if (!ret)
920
ret = req_data->result.err;
921
}
922
kfree(req_data);
923
out_free_request:
924
ablkcipher_request_free(req);
925
out_free_ablkcipher:
926
crypto_free_ablkcipher(ctr_tfm);
927
return ret;
928
}
929
930
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
931
unsigned int key_len)
932
{
933
int ret = 0;
934
struct crypto_tfm *tfm = crypto_aead_tfm(parent);
935
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
936
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
937
struct aesni_rfc4106_gcm_ctx *child_ctx =
938
aesni_rfc4106_gcm_ctx_get(cryptd_child);
939
u8 *new_key_mem = NULL;
940
941
if (key_len < 4) {
942
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
943
return -EINVAL;
944
}
945
/*Account for 4 byte nonce at the end.*/
946
key_len -= 4;
947
if (key_len != AES_KEYSIZE_128) {
948
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
949
return -EINVAL;
950
}
951
952
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
953
/*This must be on a 16 byte boundary!*/
954
if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
955
return -EINVAL;
956
957
if ((unsigned long)key % AESNI_ALIGN) {
958
/*key is not aligned: use an auxuliar aligned pointer*/
959
new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
960
if (!new_key_mem)
961
return -ENOMEM;
962
963
new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
964
memcpy(new_key_mem, key, key_len);
965
key = new_key_mem;
966
}
967
968
if (!irq_fpu_usable())
969
ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
970
key, key_len);
971
else {
972
kernel_fpu_begin();
973
ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
974
kernel_fpu_end();
975
}
976
/*This must be on a 16 byte boundary!*/
977
if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
978
ret = -EINVAL;
979
goto exit;
980
}
981
ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
982
memcpy(child_ctx, ctx, sizeof(*ctx));
983
exit:
984
kfree(new_key_mem);
985
return ret;
986
}
987
988
/* This is the Integrity Check Value (aka the authentication tag length and can
989
* be 8, 12 or 16 bytes long. */
990
static int rfc4106_set_authsize(struct crypto_aead *parent,
991
unsigned int authsize)
992
{
993
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
994
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
995
996
switch (authsize) {
997
case 8:
998
case 12:
999
case 16:
1000
break;
1001
default:
1002
return -EINVAL;
1003
}
1004
crypto_aead_crt(parent)->authsize = authsize;
1005
crypto_aead_crt(cryptd_child)->authsize = authsize;
1006
return 0;
1007
}
1008
1009
static int rfc4106_encrypt(struct aead_request *req)
1010
{
1011
int ret;
1012
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1013
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1014
1015
if (!irq_fpu_usable()) {
1016
struct aead_request *cryptd_req =
1017
(struct aead_request *) aead_request_ctx(req);
1018
memcpy(cryptd_req, req, sizeof(*req));
1019
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1020
return crypto_aead_encrypt(cryptd_req);
1021
} else {
1022
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1023
kernel_fpu_begin();
1024
ret = cryptd_child->base.crt_aead.encrypt(req);
1025
kernel_fpu_end();
1026
return ret;
1027
}
1028
}
1029
1030
static int rfc4106_decrypt(struct aead_request *req)
1031
{
1032
int ret;
1033
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1034
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1035
1036
if (!irq_fpu_usable()) {
1037
struct aead_request *cryptd_req =
1038
(struct aead_request *) aead_request_ctx(req);
1039
memcpy(cryptd_req, req, sizeof(*req));
1040
aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1041
return crypto_aead_decrypt(cryptd_req);
1042
} else {
1043
struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1044
kernel_fpu_begin();
1045
ret = cryptd_child->base.crt_aead.decrypt(req);
1046
kernel_fpu_end();
1047
return ret;
1048
}
1049
}
1050
1051
static struct crypto_alg rfc4106_alg = {
1052
.cra_name = "rfc4106(gcm(aes))",
1053
.cra_driver_name = "rfc4106-gcm-aesni",
1054
.cra_priority = 400,
1055
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1056
.cra_blocksize = 1,
1057
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1058
.cra_alignmask = 0,
1059
.cra_type = &crypto_nivaead_type,
1060
.cra_module = THIS_MODULE,
1061
.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1062
.cra_init = rfc4106_init,
1063
.cra_exit = rfc4106_exit,
1064
.cra_u = {
1065
.aead = {
1066
.setkey = rfc4106_set_key,
1067
.setauthsize = rfc4106_set_authsize,
1068
.encrypt = rfc4106_encrypt,
1069
.decrypt = rfc4106_decrypt,
1070
.geniv = "seqiv",
1071
.ivsize = 8,
1072
.maxauthsize = 16,
1073
},
1074
},
1075
};
1076
1077
static int __driver_rfc4106_encrypt(struct aead_request *req)
1078
{
1079
u8 one_entry_in_sg = 0;
1080
u8 *src, *dst, *assoc;
1081
__be32 counter = cpu_to_be32(1);
1082
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1083
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1084
void *aes_ctx = &(ctx->aes_key_expanded);
1085
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1086
u8 iv_tab[16+AESNI_ALIGN];
1087
u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1088
struct scatter_walk src_sg_walk;
1089
struct scatter_walk assoc_sg_walk;
1090
struct scatter_walk dst_sg_walk;
1091
unsigned int i;
1092
1093
/* Assuming we are supporting rfc4106 64-bit extended */
1094
/* sequence numbers We need to have the AAD length equal */
1095
/* to 8 or 12 bytes */
1096
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1097
return -EINVAL;
1098
/* IV below built */
1099
for (i = 0; i < 4; i++)
1100
*(iv+i) = ctx->nonce[i];
1101
for (i = 0; i < 8; i++)
1102
*(iv+4+i) = req->iv[i];
1103
*((__be32 *)(iv+12)) = counter;
1104
1105
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1106
one_entry_in_sg = 1;
1107
scatterwalk_start(&src_sg_walk, req->src);
1108
scatterwalk_start(&assoc_sg_walk, req->assoc);
1109
src = scatterwalk_map(&src_sg_walk, 0);
1110
assoc = scatterwalk_map(&assoc_sg_walk, 0);
1111
dst = src;
1112
if (unlikely(req->src != req->dst)) {
1113
scatterwalk_start(&dst_sg_walk, req->dst);
1114
dst = scatterwalk_map(&dst_sg_walk, 0);
1115
}
1116
1117
} else {
1118
/* Allocate memory for src, dst, assoc */
1119
src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1120
GFP_ATOMIC);
1121
if (unlikely(!src))
1122
return -ENOMEM;
1123
assoc = (src + req->cryptlen + auth_tag_len);
1124
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1125
scatterwalk_map_and_copy(assoc, req->assoc, 0,
1126
req->assoclen, 0);
1127
dst = src;
1128
}
1129
1130
aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1131
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1132
+ ((unsigned long)req->cryptlen), auth_tag_len);
1133
1134
/* The authTag (aka the Integrity Check Value) needs to be written
1135
* back to the packet. */
1136
if (one_entry_in_sg) {
1137
if (unlikely(req->src != req->dst)) {
1138
scatterwalk_unmap(dst, 0);
1139
scatterwalk_done(&dst_sg_walk, 0, 0);
1140
}
1141
scatterwalk_unmap(src, 0);
1142
scatterwalk_unmap(assoc, 0);
1143
scatterwalk_done(&src_sg_walk, 0, 0);
1144
scatterwalk_done(&assoc_sg_walk, 0, 0);
1145
} else {
1146
scatterwalk_map_and_copy(dst, req->dst, 0,
1147
req->cryptlen + auth_tag_len, 1);
1148
kfree(src);
1149
}
1150
return 0;
1151
}
1152
1153
static int __driver_rfc4106_decrypt(struct aead_request *req)
1154
{
1155
u8 one_entry_in_sg = 0;
1156
u8 *src, *dst, *assoc;
1157
unsigned long tempCipherLen = 0;
1158
__be32 counter = cpu_to_be32(1);
1159
int retval = 0;
1160
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1162
void *aes_ctx = &(ctx->aes_key_expanded);
1163
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1164
u8 iv_and_authTag[32+AESNI_ALIGN];
1165
u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1166
u8 *authTag = iv + 16;
1167
struct scatter_walk src_sg_walk;
1168
struct scatter_walk assoc_sg_walk;
1169
struct scatter_walk dst_sg_walk;
1170
unsigned int i;
1171
1172
if (unlikely((req->cryptlen < auth_tag_len) ||
1173
(req->assoclen != 8 && req->assoclen != 12)))
1174
return -EINVAL;
1175
/* Assuming we are supporting rfc4106 64-bit extended */
1176
/* sequence numbers We need to have the AAD length */
1177
/* equal to 8 or 12 bytes */
1178
1179
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1180
/* IV below built */
1181
for (i = 0; i < 4; i++)
1182
*(iv+i) = ctx->nonce[i];
1183
for (i = 0; i < 8; i++)
1184
*(iv+4+i) = req->iv[i];
1185
*((__be32 *)(iv+12)) = counter;
1186
1187
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1188
one_entry_in_sg = 1;
1189
scatterwalk_start(&src_sg_walk, req->src);
1190
scatterwalk_start(&assoc_sg_walk, req->assoc);
1191
src = scatterwalk_map(&src_sg_walk, 0);
1192
assoc = scatterwalk_map(&assoc_sg_walk, 0);
1193
dst = src;
1194
if (unlikely(req->src != req->dst)) {
1195
scatterwalk_start(&dst_sg_walk, req->dst);
1196
dst = scatterwalk_map(&dst_sg_walk, 0);
1197
}
1198
1199
} else {
1200
/* Allocate memory for src, dst, assoc */
1201
src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1202
if (!src)
1203
return -ENOMEM;
1204
assoc = (src + req->cryptlen + auth_tag_len);
1205
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1206
scatterwalk_map_and_copy(assoc, req->assoc, 0,
1207
req->assoclen, 0);
1208
dst = src;
1209
}
1210
1211
aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1212
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1213
authTag, auth_tag_len);
1214
1215
/* Compare generated tag with passed in tag. */
1216
retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1217
-EBADMSG : 0;
1218
1219
if (one_entry_in_sg) {
1220
if (unlikely(req->src != req->dst)) {
1221
scatterwalk_unmap(dst, 0);
1222
scatterwalk_done(&dst_sg_walk, 0, 0);
1223
}
1224
scatterwalk_unmap(src, 0);
1225
scatterwalk_unmap(assoc, 0);
1226
scatterwalk_done(&src_sg_walk, 0, 0);
1227
scatterwalk_done(&assoc_sg_walk, 0, 0);
1228
} else {
1229
scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1230
kfree(src);
1231
}
1232
return retval;
1233
}
1234
1235
static struct crypto_alg __rfc4106_alg = {
1236
.cra_name = "__gcm-aes-aesni",
1237
.cra_driver_name = "__driver-gcm-aes-aesni",
1238
.cra_priority = 0,
1239
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
1240
.cra_blocksize = 1,
1241
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1242
.cra_alignmask = 0,
1243
.cra_type = &crypto_aead_type,
1244
.cra_module = THIS_MODULE,
1245
.cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1246
.cra_u = {
1247
.aead = {
1248
.encrypt = __driver_rfc4106_encrypt,
1249
.decrypt = __driver_rfc4106_decrypt,
1250
},
1251
},
1252
};
1253
#endif
1254
1255
static int __init aesni_init(void)
1256
{
1257
int err;
1258
1259
if (!cpu_has_aes) {
1260
printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
1261
return -ENODEV;
1262
}
1263
1264
if ((err = crypto_fpu_init()))
1265
goto fpu_err;
1266
if ((err = crypto_register_alg(&aesni_alg)))
1267
goto aes_err;
1268
if ((err = crypto_register_alg(&__aesni_alg)))
1269
goto __aes_err;
1270
if ((err = crypto_register_alg(&blk_ecb_alg)))
1271
goto blk_ecb_err;
1272
if ((err = crypto_register_alg(&blk_cbc_alg)))
1273
goto blk_cbc_err;
1274
if ((err = crypto_register_alg(&ablk_ecb_alg)))
1275
goto ablk_ecb_err;
1276
if ((err = crypto_register_alg(&ablk_cbc_alg)))
1277
goto ablk_cbc_err;
1278
#ifdef CONFIG_X86_64
1279
if ((err = crypto_register_alg(&blk_ctr_alg)))
1280
goto blk_ctr_err;
1281
if ((err = crypto_register_alg(&ablk_ctr_alg)))
1282
goto ablk_ctr_err;
1283
if ((err = crypto_register_alg(&__rfc4106_alg)))
1284
goto __aead_gcm_err;
1285
if ((err = crypto_register_alg(&rfc4106_alg)))
1286
goto aead_gcm_err;
1287
#ifdef HAS_CTR
1288
if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1289
goto ablk_rfc3686_ctr_err;
1290
#endif
1291
#endif
1292
#ifdef HAS_LRW
1293
if ((err = crypto_register_alg(&ablk_lrw_alg)))
1294
goto ablk_lrw_err;
1295
#endif
1296
#ifdef HAS_PCBC
1297
if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1298
goto ablk_pcbc_err;
1299
#endif
1300
#ifdef HAS_XTS
1301
if ((err = crypto_register_alg(&ablk_xts_alg)))
1302
goto ablk_xts_err;
1303
#endif
1304
return err;
1305
1306
#ifdef HAS_XTS
1307
ablk_xts_err:
1308
#endif
1309
#ifdef HAS_PCBC
1310
crypto_unregister_alg(&ablk_pcbc_alg);
1311
ablk_pcbc_err:
1312
#endif
1313
#ifdef HAS_LRW
1314
crypto_unregister_alg(&ablk_lrw_alg);
1315
ablk_lrw_err:
1316
#endif
1317
#ifdef CONFIG_X86_64
1318
#ifdef HAS_CTR
1319
crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1320
ablk_rfc3686_ctr_err:
1321
#endif
1322
crypto_unregister_alg(&rfc4106_alg);
1323
aead_gcm_err:
1324
crypto_unregister_alg(&__rfc4106_alg);
1325
__aead_gcm_err:
1326
crypto_unregister_alg(&ablk_ctr_alg);
1327
ablk_ctr_err:
1328
crypto_unregister_alg(&blk_ctr_alg);
1329
blk_ctr_err:
1330
#endif
1331
crypto_unregister_alg(&ablk_cbc_alg);
1332
ablk_cbc_err:
1333
crypto_unregister_alg(&ablk_ecb_alg);
1334
ablk_ecb_err:
1335
crypto_unregister_alg(&blk_cbc_alg);
1336
blk_cbc_err:
1337
crypto_unregister_alg(&blk_ecb_alg);
1338
blk_ecb_err:
1339
crypto_unregister_alg(&__aesni_alg);
1340
__aes_err:
1341
crypto_unregister_alg(&aesni_alg);
1342
aes_err:
1343
fpu_err:
1344
return err;
1345
}
1346
1347
static void __exit aesni_exit(void)
1348
{
1349
#ifdef HAS_XTS
1350
crypto_unregister_alg(&ablk_xts_alg);
1351
#endif
1352
#ifdef HAS_PCBC
1353
crypto_unregister_alg(&ablk_pcbc_alg);
1354
#endif
1355
#ifdef HAS_LRW
1356
crypto_unregister_alg(&ablk_lrw_alg);
1357
#endif
1358
#ifdef CONFIG_X86_64
1359
#ifdef HAS_CTR
1360
crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1361
#endif
1362
crypto_unregister_alg(&rfc4106_alg);
1363
crypto_unregister_alg(&__rfc4106_alg);
1364
crypto_unregister_alg(&ablk_ctr_alg);
1365
crypto_unregister_alg(&blk_ctr_alg);
1366
#endif
1367
crypto_unregister_alg(&ablk_cbc_alg);
1368
crypto_unregister_alg(&ablk_ecb_alg);
1369
crypto_unregister_alg(&blk_cbc_alg);
1370
crypto_unregister_alg(&blk_ecb_alg);
1371
crypto_unregister_alg(&__aesni_alg);
1372
crypto_unregister_alg(&aesni_alg);
1373
1374
crypto_fpu_exit();
1375
}
1376
1377
module_init(aesni_init);
1378
module_exit(aesni_exit);
1379
1380
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1381
MODULE_LICENSE("GPL");
1382
MODULE_ALIAS("aes");
1383
1384