Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/crypto/aes-neonbs-glue.c
52979 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Bit sliced AES using NEON instructions
4
*
5
* Copyright (C) 2017 Linaro Ltd <[email protected]>
6
*/
7
8
#include <asm/neon.h>
9
#include <asm/simd.h>
10
#include <crypto/aes.h>
11
#include <crypto/internal/skcipher.h>
12
#include <crypto/scatterwalk.h>
13
#include <crypto/xts.h>
14
#include <linux/module.h>
15
16
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
17
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
18
MODULE_LICENSE("GPL v2");
19
20
MODULE_ALIAS_CRYPTO("ecb(aes)");
21
MODULE_ALIAS_CRYPTO("cbc(aes)");
22
MODULE_ALIAS_CRYPTO("ctr(aes)");
23
MODULE_ALIAS_CRYPTO("xts(aes)");
24
25
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
26
27
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
28
int rounds, int blocks);
29
asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
30
int rounds, int blocks);
31
32
asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
33
int rounds, int blocks, u8 iv[]);
34
35
asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
36
int rounds, int blocks, u8 ctr[]);
37
38
asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[],
39
int rounds, int blocks, u8 iv[], int);
40
asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
41
int rounds, int blocks, u8 iv[], int);
42
43
struct aesbs_ctx {
44
int rounds;
45
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE);
46
};
47
48
struct aesbs_cbc_ctx {
49
struct aesbs_ctx key;
50
struct aes_enckey fallback;
51
};
52
53
struct aesbs_xts_ctx {
54
struct aesbs_ctx key;
55
struct aes_key fallback;
56
struct aes_enckey tweak_key;
57
};
58
59
static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
60
unsigned int key_len)
61
{
62
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
63
struct crypto_aes_ctx rk;
64
int err;
65
66
err = aes_expandkey(&rk, in_key, key_len);
67
if (err)
68
return err;
69
70
ctx->rounds = 6 + key_len / 4;
71
72
kernel_neon_begin();
73
aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
74
kernel_neon_end();
75
76
return 0;
77
}
78
79
static int __ecb_crypt(struct skcipher_request *req,
80
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
81
int rounds, int blocks))
82
{
83
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
84
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
85
struct skcipher_walk walk;
86
int err;
87
88
err = skcipher_walk_virt(&walk, req, false);
89
90
while (walk.nbytes >= AES_BLOCK_SIZE) {
91
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
92
93
if (walk.nbytes < walk.total)
94
blocks = round_down(blocks,
95
walk.stride / AES_BLOCK_SIZE);
96
97
kernel_neon_begin();
98
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
99
ctx->rounds, blocks);
100
kernel_neon_end();
101
err = skcipher_walk_done(&walk,
102
walk.nbytes - blocks * AES_BLOCK_SIZE);
103
}
104
105
return err;
106
}
107
108
static int ecb_encrypt(struct skcipher_request *req)
109
{
110
return __ecb_crypt(req, aesbs_ecb_encrypt);
111
}
112
113
static int ecb_decrypt(struct skcipher_request *req)
114
{
115
return __ecb_crypt(req, aesbs_ecb_decrypt);
116
}
117
118
static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
119
unsigned int key_len)
120
{
121
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
122
int err;
123
124
err = aes_prepareenckey(&ctx->fallback, in_key, key_len);
125
if (err)
126
return err;
127
128
ctx->key.rounds = 6 + key_len / 4;
129
130
/*
131
* Note: this assumes that the arm implementation of the AES library
132
* stores the standard round keys in k.rndkeys.
133
*/
134
kernel_neon_begin();
135
aesbs_convert_key(ctx->key.rk, ctx->fallback.k.rndkeys,
136
ctx->key.rounds);
137
kernel_neon_end();
138
139
return 0;
140
}
141
142
static int cbc_encrypt(struct skcipher_request *req)
143
{
144
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
145
const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
146
struct skcipher_walk walk;
147
unsigned int nbytes;
148
int err;
149
150
err = skcipher_walk_virt(&walk, req, false);
151
152
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
153
const u8 *src = walk.src.virt.addr;
154
u8 *dst = walk.dst.virt.addr;
155
u8 *prev = walk.iv;
156
157
do {
158
crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
159
aes_encrypt(&ctx->fallback, dst, dst);
160
prev = dst;
161
src += AES_BLOCK_SIZE;
162
dst += AES_BLOCK_SIZE;
163
nbytes -= AES_BLOCK_SIZE;
164
} while (nbytes >= AES_BLOCK_SIZE);
165
memcpy(walk.iv, prev, AES_BLOCK_SIZE);
166
err = skcipher_walk_done(&walk, nbytes);
167
}
168
return err;
169
}
170
171
static int cbc_decrypt(struct skcipher_request *req)
172
{
173
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
174
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
175
struct skcipher_walk walk;
176
int err;
177
178
err = skcipher_walk_virt(&walk, req, false);
179
180
while (walk.nbytes >= AES_BLOCK_SIZE) {
181
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
182
183
if (walk.nbytes < walk.total)
184
blocks = round_down(blocks,
185
walk.stride / AES_BLOCK_SIZE);
186
187
kernel_neon_begin();
188
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
189
ctx->key.rk, ctx->key.rounds, blocks,
190
walk.iv);
191
kernel_neon_end();
192
err = skcipher_walk_done(&walk,
193
walk.nbytes - blocks * AES_BLOCK_SIZE);
194
}
195
196
return err;
197
}
198
199
static int ctr_encrypt(struct skcipher_request *req)
200
{
201
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
202
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
203
struct skcipher_walk walk;
204
u8 buf[AES_BLOCK_SIZE];
205
int err;
206
207
err = skcipher_walk_virt(&walk, req, false);
208
209
while (walk.nbytes > 0) {
210
const u8 *src = walk.src.virt.addr;
211
u8 *dst = walk.dst.virt.addr;
212
unsigned int bytes = walk.nbytes;
213
214
if (unlikely(bytes < AES_BLOCK_SIZE))
215
src = dst = memcpy(buf + sizeof(buf) - bytes,
216
src, bytes);
217
else if (walk.nbytes < walk.total)
218
bytes &= ~(8 * AES_BLOCK_SIZE - 1);
219
220
kernel_neon_begin();
221
aesbs_ctr_encrypt(dst, src, ctx->rk, ctx->rounds, bytes, walk.iv);
222
kernel_neon_end();
223
224
if (unlikely(bytes < AES_BLOCK_SIZE))
225
memcpy(walk.dst.virt.addr,
226
buf + sizeof(buf) - bytes, bytes);
227
228
err = skcipher_walk_done(&walk, walk.nbytes - bytes);
229
}
230
231
return err;
232
}
233
234
static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
235
unsigned int key_len)
236
{
237
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
238
int err;
239
240
err = xts_verify_key(tfm, in_key, key_len);
241
if (err)
242
return err;
243
244
key_len /= 2;
245
err = aes_preparekey(&ctx->fallback, in_key, key_len);
246
if (err)
247
return err;
248
err = aes_prepareenckey(&ctx->tweak_key, in_key + key_len, key_len);
249
if (err)
250
return err;
251
252
return aesbs_setkey(tfm, in_key, key_len);
253
}
254
255
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
256
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
257
int rounds, int blocks, u8 iv[], int))
258
{
259
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
260
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
261
const int rounds = ctx->key.rounds;
262
int tail = req->cryptlen % AES_BLOCK_SIZE;
263
struct skcipher_request subreq;
264
u8 buf[2 * AES_BLOCK_SIZE];
265
struct skcipher_walk walk;
266
int err;
267
268
if (req->cryptlen < AES_BLOCK_SIZE)
269
return -EINVAL;
270
271
if (unlikely(tail)) {
272
skcipher_request_set_tfm(&subreq, tfm);
273
skcipher_request_set_callback(&subreq,
274
skcipher_request_flags(req),
275
NULL, NULL);
276
skcipher_request_set_crypt(&subreq, req->src, req->dst,
277
req->cryptlen - tail, req->iv);
278
req = &subreq;
279
}
280
281
err = skcipher_walk_virt(&walk, req, true);
282
if (err)
283
return err;
284
285
aes_encrypt(&ctx->tweak_key, walk.iv, walk.iv);
286
287
while (walk.nbytes >= AES_BLOCK_SIZE) {
288
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
289
int reorder_last_tweak = !encrypt && tail > 0;
290
291
if (walk.nbytes < walk.total) {
292
blocks = round_down(blocks,
293
walk.stride / AES_BLOCK_SIZE);
294
reorder_last_tweak = 0;
295
}
296
297
kernel_neon_begin();
298
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
299
rounds, blocks, walk.iv, reorder_last_tweak);
300
kernel_neon_end();
301
err = skcipher_walk_done(&walk,
302
walk.nbytes - blocks * AES_BLOCK_SIZE);
303
}
304
305
if (err || likely(!tail))
306
return err;
307
308
/* handle ciphertext stealing */
309
scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
310
AES_BLOCK_SIZE, 0);
311
memcpy(buf + AES_BLOCK_SIZE, buf, tail);
312
scatterwalk_map_and_copy(buf, req->src, req->cryptlen, tail, 0);
313
314
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
315
316
if (encrypt)
317
aes_encrypt(&ctx->fallback, buf, buf);
318
else
319
aes_decrypt(&ctx->fallback, buf, buf);
320
321
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
322
323
scatterwalk_map_and_copy(buf, req->dst, req->cryptlen - AES_BLOCK_SIZE,
324
AES_BLOCK_SIZE + tail, 1);
325
return 0;
326
}
327
328
static int xts_encrypt(struct skcipher_request *req)
329
{
330
return __xts_crypt(req, true, aesbs_xts_encrypt);
331
}
332
333
static int xts_decrypt(struct skcipher_request *req)
334
{
335
return __xts_crypt(req, false, aesbs_xts_decrypt);
336
}
337
338
static struct skcipher_alg aes_algs[] = { {
339
.base.cra_name = "ecb(aes)",
340
.base.cra_driver_name = "ecb-aes-neonbs",
341
.base.cra_priority = 250,
342
.base.cra_blocksize = AES_BLOCK_SIZE,
343
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
344
.base.cra_module = THIS_MODULE,
345
346
.min_keysize = AES_MIN_KEY_SIZE,
347
.max_keysize = AES_MAX_KEY_SIZE,
348
.walksize = 8 * AES_BLOCK_SIZE,
349
.setkey = aesbs_setkey,
350
.encrypt = ecb_encrypt,
351
.decrypt = ecb_decrypt,
352
}, {
353
.base.cra_name = "cbc(aes)",
354
.base.cra_driver_name = "cbc-aes-neonbs",
355
.base.cra_priority = 250,
356
.base.cra_blocksize = AES_BLOCK_SIZE,
357
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
358
.base.cra_module = THIS_MODULE,
359
360
.min_keysize = AES_MIN_KEY_SIZE,
361
.max_keysize = AES_MAX_KEY_SIZE,
362
.walksize = 8 * AES_BLOCK_SIZE,
363
.ivsize = AES_BLOCK_SIZE,
364
.setkey = aesbs_cbc_setkey,
365
.encrypt = cbc_encrypt,
366
.decrypt = cbc_decrypt,
367
}, {
368
.base.cra_name = "ctr(aes)",
369
.base.cra_driver_name = "ctr-aes-neonbs",
370
.base.cra_priority = 250,
371
.base.cra_blocksize = 1,
372
.base.cra_ctxsize = sizeof(struct aesbs_ctx),
373
.base.cra_module = THIS_MODULE,
374
375
.min_keysize = AES_MIN_KEY_SIZE,
376
.max_keysize = AES_MAX_KEY_SIZE,
377
.chunksize = AES_BLOCK_SIZE,
378
.walksize = 8 * AES_BLOCK_SIZE,
379
.ivsize = AES_BLOCK_SIZE,
380
.setkey = aesbs_setkey,
381
.encrypt = ctr_encrypt,
382
.decrypt = ctr_encrypt,
383
}, {
384
.base.cra_name = "xts(aes)",
385
.base.cra_driver_name = "xts-aes-neonbs",
386
.base.cra_priority = 250,
387
.base.cra_blocksize = AES_BLOCK_SIZE,
388
.base.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
389
.base.cra_module = THIS_MODULE,
390
391
.min_keysize = 2 * AES_MIN_KEY_SIZE,
392
.max_keysize = 2 * AES_MAX_KEY_SIZE,
393
.walksize = 8 * AES_BLOCK_SIZE,
394
.ivsize = AES_BLOCK_SIZE,
395
.setkey = aesbs_xts_setkey,
396
.encrypt = xts_encrypt,
397
.decrypt = xts_decrypt,
398
} };
399
400
static void aes_exit(void)
401
{
402
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
403
}
404
405
static int __init aes_init(void)
406
{
407
if (!(elf_hwcap & HWCAP_NEON))
408
return -ENODEV;
409
410
return crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
411
}
412
413
module_init(aes_init);
414
module_exit(aes_exit);
415
416