Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/crypto/adiantum.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Adiantum length-preserving encryption mode
4
*
5
* Copyright 2018 Google LLC
6
*/
7
8
/*
9
* Adiantum is a tweakable, length-preserving encryption mode designed for fast
10
* and secure disk encryption, especially on CPUs without dedicated crypto
11
* instructions. Adiantum encrypts each sector using the XChaCha12 stream
12
* cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
13
* NH and Poly1305, and an invocation of the AES-256 block cipher on a single
14
* 16-byte block. See the paper for details:
15
*
16
* Adiantum: length-preserving encryption for entry-level processors
17
* (https://eprint.iacr.org/2018/720.pdf)
18
*
19
* For flexibility, this implementation also allows other ciphers:
20
*
21
* - Stream cipher: XChaCha12 or XChaCha20
22
* - Block cipher: any with a 128-bit block size and 256-bit key
23
*
24
* This implementation doesn't currently allow other ε-∆U hash functions, i.e.
25
* HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
26
* but still provably as secure, and also the ε-∆U hash function of HBSH is
27
* formally defined to take two inputs (tweak, message) which makes it difficult
28
* to wrap with the crypto_shash API. Rather, some details need to be handled
29
* here. Nevertheless, if needed in the future, support for other ε-∆U hash
30
* functions could be added here.
31
*/
32
33
#include <crypto/b128ops.h>
34
#include <crypto/chacha.h>
35
#include <crypto/internal/cipher.h>
36
#include <crypto/internal/hash.h>
37
#include <crypto/internal/poly1305.h>
38
#include <crypto/internal/skcipher.h>
39
#include <crypto/nhpoly1305.h>
40
#include <crypto/scatterwalk.h>
41
#include <linux/module.h>
42
43
/*
44
* Size of right-hand part of input data, in bytes; also the size of the block
45
* cipher's block size and the hash function's output.
46
*/
47
#define BLOCKCIPHER_BLOCK_SIZE 16
48
49
/* Size of the block cipher key (K_E) in bytes */
50
#define BLOCKCIPHER_KEY_SIZE 32
51
52
/* Size of the hash key (K_H) in bytes */
53
#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
54
55
/*
56
* The specification allows variable-length tweaks, but Linux's crypto API
57
* currently only allows algorithms to support a single length. The "natural"
58
* tweak length for Adiantum is 16, since that fits into one Poly1305 block for
59
* the best performance. But longer tweaks are useful for fscrypt, to avoid
60
* needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
61
*/
62
#define TWEAK_SIZE 32
63
64
struct adiantum_instance_ctx {
65
struct crypto_skcipher_spawn streamcipher_spawn;
66
struct crypto_cipher_spawn blockcipher_spawn;
67
struct crypto_shash_spawn hash_spawn;
68
};
69
70
struct adiantum_tfm_ctx {
71
struct crypto_skcipher *streamcipher;
72
struct crypto_cipher *blockcipher;
73
struct crypto_shash *hash;
74
struct poly1305_core_key header_hash_key;
75
};
76
77
struct adiantum_request_ctx {
78
79
/*
80
* Buffer for right-hand part of data, i.e.
81
*
82
* P_L => P_M => C_M => C_R when encrypting, or
83
* C_R => C_M => P_M => P_L when decrypting.
84
*
85
* Also used to build the IV for the stream cipher.
86
*/
87
union {
88
u8 bytes[XCHACHA_IV_SIZE];
89
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
90
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
91
} rbuf;
92
93
bool enc; /* true if encrypting, false if decrypting */
94
95
/*
96
* The result of the Poly1305 ε-∆U hash function applied to
97
* (bulk length, tweak)
98
*/
99
le128 header_hash;
100
101
/* Sub-requests, must be last */
102
union {
103
struct shash_desc hash_desc;
104
struct skcipher_request streamcipher_req;
105
} u;
106
};
107
108
/*
109
* Given the XChaCha stream key K_S, derive the block cipher key K_E and the
110
* hash key K_H as follows:
111
*
112
* K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
113
*
114
* Note that this denotes using bits from the XChaCha keystream, which here we
115
* get indirectly by encrypting a buffer containing all 0's.
116
*/
117
static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
118
unsigned int keylen)
119
{
120
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
121
struct {
122
u8 iv[XCHACHA_IV_SIZE];
123
u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
124
struct scatterlist sg;
125
struct crypto_wait wait;
126
struct skcipher_request req; /* must be last */
127
} *data;
128
u8 *keyp;
129
int err;
130
131
/* Set the stream cipher key (K_S) */
132
crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
133
crypto_skcipher_set_flags(tctx->streamcipher,
134
crypto_skcipher_get_flags(tfm) &
135
CRYPTO_TFM_REQ_MASK);
136
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
137
if (err)
138
return err;
139
140
/* Derive the subkeys */
141
data = kzalloc(sizeof(*data) +
142
crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
143
if (!data)
144
return -ENOMEM;
145
data->iv[0] = 1;
146
sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
147
crypto_init_wait(&data->wait);
148
skcipher_request_set_tfm(&data->req, tctx->streamcipher);
149
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
150
CRYPTO_TFM_REQ_MAY_BACKLOG,
151
crypto_req_done, &data->wait);
152
skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
153
sizeof(data->derived_keys), data->iv);
154
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
155
if (err)
156
goto out;
157
keyp = data->derived_keys;
158
159
/* Set the block cipher key (K_E) */
160
crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
161
crypto_cipher_set_flags(tctx->blockcipher,
162
crypto_skcipher_get_flags(tfm) &
163
CRYPTO_TFM_REQ_MASK);
164
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
165
BLOCKCIPHER_KEY_SIZE);
166
if (err)
167
goto out;
168
keyp += BLOCKCIPHER_KEY_SIZE;
169
170
/* Set the hash key (K_H) */
171
poly1305_core_setkey(&tctx->header_hash_key, keyp);
172
keyp += POLY1305_BLOCK_SIZE;
173
174
crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
175
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
176
CRYPTO_TFM_REQ_MASK);
177
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
178
keyp += NHPOLY1305_KEY_SIZE;
179
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
180
out:
181
kfree_sensitive(data);
182
return err;
183
}
184
185
/* Addition in Z/(2^{128}Z) */
186
static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
187
{
188
u64 x = le64_to_cpu(v1->b);
189
u64 y = le64_to_cpu(v2->b);
190
191
r->b = cpu_to_le64(x + y);
192
r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
193
(x + y < x));
194
}
195
196
/* Subtraction in Z/(2^{128}Z) */
197
static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
198
{
199
u64 x = le64_to_cpu(v1->b);
200
u64 y = le64_to_cpu(v2->b);
201
202
r->b = cpu_to_le64(x - y);
203
r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
204
(x - y > x));
205
}
206
207
/*
208
* Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
209
* result to rctx->header_hash. This is the calculation
210
*
211
* H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
212
*
213
* from the procedure in section 6.4 of the Adiantum paper. The resulting value
214
* is reused in both the first and second hash steps. Specifically, it's added
215
* to the result of an independently keyed ε-∆U hash function (for equal length
216
* inputs only) taken over the left-hand part (the "bulk") of the message, to
217
* give the overall Adiantum hash of the (tweak, left-hand part) pair.
218
*/
219
static void adiantum_hash_header(struct skcipher_request *req)
220
{
221
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
222
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
223
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
224
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
225
struct {
226
__le64 message_bits;
227
__le64 padding;
228
} header = {
229
.message_bits = cpu_to_le64((u64)bulk_len * 8)
230
};
231
struct poly1305_state state;
232
233
poly1305_core_init(&state);
234
235
BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
236
poly1305_core_blocks(&state, &tctx->header_hash_key,
237
&header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
238
239
BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
240
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
241
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
242
243
poly1305_core_emit(&state, NULL, &rctx->header_hash);
244
}
245
246
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
247
static int adiantum_hash_message(struct skcipher_request *req,
248
struct scatterlist *sgl, unsigned int nents,
249
le128 *digest)
250
{
251
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
252
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
253
struct shash_desc *hash_desc = &rctx->u.hash_desc;
254
struct sg_mapping_iter miter;
255
unsigned int i, n;
256
int err;
257
258
err = crypto_shash_init(hash_desc);
259
if (err)
260
return err;
261
262
sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
263
for (i = 0; i < bulk_len; i += n) {
264
sg_miter_next(&miter);
265
n = min_t(unsigned int, miter.length, bulk_len - i);
266
err = crypto_shash_update(hash_desc, miter.addr, n);
267
if (err)
268
break;
269
}
270
sg_miter_stop(&miter);
271
if (err)
272
return err;
273
274
return crypto_shash_final(hash_desc, (u8 *)digest);
275
}
276
277
/* Continue Adiantum encryption/decryption after the stream cipher step */
278
static int adiantum_finish(struct skcipher_request *req)
279
{
280
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
282
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
283
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
284
struct scatterlist *dst = req->dst;
285
const unsigned int dst_nents = sg_nents(dst);
286
le128 digest;
287
int err;
288
289
/* If decrypting, decrypt C_M with the block cipher to get P_M */
290
if (!rctx->enc)
291
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
292
rctx->rbuf.bytes);
293
294
/*
295
* Second hash step
296
* enc: C_R = C_M - H_{K_H}(T, C_L)
297
* dec: P_R = P_M - H_{K_H}(T, P_L)
298
*/
299
rctx->u.hash_desc.tfm = tctx->hash;
300
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
301
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
302
/* Fast path for single-page destination */
303
struct page *page = sg_page(dst);
304
void *virt = kmap_local_page(page) + dst->offset;
305
306
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
307
(u8 *)&digest);
308
if (err) {
309
kunmap_local(virt);
310
return err;
311
}
312
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
313
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
314
flush_dcache_page(page);
315
kunmap_local(virt);
316
} else {
317
/* Slow path that works for any destination scatterlist */
318
err = adiantum_hash_message(req, dst, dst_nents, &digest);
319
if (err)
320
return err;
321
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
322
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
323
bulk_len, sizeof(le128), 1);
324
}
325
return 0;
326
}
327
328
static void adiantum_streamcipher_done(void *data, int err)
329
{
330
struct skcipher_request *req = data;
331
332
if (!err)
333
err = adiantum_finish(req);
334
335
skcipher_request_complete(req, err);
336
}
337
338
static int adiantum_crypt(struct skcipher_request *req, bool enc)
339
{
340
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
341
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
342
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
343
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
344
struct scatterlist *src = req->src;
345
const unsigned int src_nents = sg_nents(src);
346
unsigned int stream_len;
347
le128 digest;
348
int err;
349
350
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
351
return -EINVAL;
352
353
rctx->enc = enc;
354
355
/*
356
* First hash step
357
* enc: P_M = P_R + H_{K_H}(T, P_L)
358
* dec: C_M = C_R + H_{K_H}(T, C_L)
359
*/
360
adiantum_hash_header(req);
361
rctx->u.hash_desc.tfm = tctx->hash;
362
if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
363
/* Fast path for single-page source */
364
void *virt = kmap_local_page(sg_page(src)) + src->offset;
365
366
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
367
(u8 *)&digest);
368
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
369
kunmap_local(virt);
370
} else {
371
/* Slow path that works for any source scatterlist */
372
err = adiantum_hash_message(req, src, src_nents, &digest);
373
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
374
bulk_len, sizeof(le128), 0);
375
}
376
if (err)
377
return err;
378
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
379
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
380
381
/* If encrypting, encrypt P_M with the block cipher to get C_M */
382
if (enc)
383
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
384
rctx->rbuf.bytes);
385
386
/* Initialize the rest of the XChaCha IV (first part is C_M) */
387
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
388
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
389
rctx->rbuf.words[4] = cpu_to_le32(1);
390
rctx->rbuf.words[5] = 0;
391
rctx->rbuf.words[6] = 0;
392
rctx->rbuf.words[7] = 0;
393
394
/*
395
* XChaCha needs to be done on all the data except the last 16 bytes;
396
* for disk encryption that usually means 4080 or 496 bytes. But ChaCha
397
* implementations tend to be most efficient when passed a whole number
398
* of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
399
* And here it doesn't matter whether the last 16 bytes are written to,
400
* as the second hash step will overwrite them. Thus, round the XChaCha
401
* length up to the next 64-byte boundary if possible.
402
*/
403
stream_len = bulk_len;
404
if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
405
stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
406
407
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
408
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
409
req->dst, stream_len, &rctx->rbuf);
410
skcipher_request_set_callback(&rctx->u.streamcipher_req,
411
req->base.flags,
412
adiantum_streamcipher_done, req);
413
return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
414
adiantum_finish(req);
415
}
416
417
static int adiantum_encrypt(struct skcipher_request *req)
418
{
419
return adiantum_crypt(req, true);
420
}
421
422
static int adiantum_decrypt(struct skcipher_request *req)
423
{
424
return adiantum_crypt(req, false);
425
}
426
427
static int adiantum_init_tfm(struct crypto_skcipher *tfm)
428
{
429
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
430
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
431
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
432
struct crypto_skcipher *streamcipher;
433
struct crypto_cipher *blockcipher;
434
struct crypto_shash *hash;
435
unsigned int subreq_size;
436
int err;
437
438
streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
439
if (IS_ERR(streamcipher))
440
return PTR_ERR(streamcipher);
441
442
blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
443
if (IS_ERR(blockcipher)) {
444
err = PTR_ERR(blockcipher);
445
goto err_free_streamcipher;
446
}
447
448
hash = crypto_spawn_shash(&ictx->hash_spawn);
449
if (IS_ERR(hash)) {
450
err = PTR_ERR(hash);
451
goto err_free_blockcipher;
452
}
453
454
tctx->streamcipher = streamcipher;
455
tctx->blockcipher = blockcipher;
456
tctx->hash = hash;
457
458
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
459
sizeof(struct adiantum_request_ctx));
460
subreq_size = max(sizeof_field(struct adiantum_request_ctx,
461
u.hash_desc) +
462
crypto_shash_descsize(hash),
463
sizeof_field(struct adiantum_request_ctx,
464
u.streamcipher_req) +
465
crypto_skcipher_reqsize(streamcipher));
466
467
crypto_skcipher_set_reqsize(tfm,
468
offsetof(struct adiantum_request_ctx, u) +
469
subreq_size);
470
return 0;
471
472
err_free_blockcipher:
473
crypto_free_cipher(blockcipher);
474
err_free_streamcipher:
475
crypto_free_skcipher(streamcipher);
476
return err;
477
}
478
479
static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
480
{
481
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
482
483
crypto_free_skcipher(tctx->streamcipher);
484
crypto_free_cipher(tctx->blockcipher);
485
crypto_free_shash(tctx->hash);
486
}
487
488
static void adiantum_free_instance(struct skcipher_instance *inst)
489
{
490
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
491
492
crypto_drop_skcipher(&ictx->streamcipher_spawn);
493
crypto_drop_cipher(&ictx->blockcipher_spawn);
494
crypto_drop_shash(&ictx->hash_spawn);
495
kfree(inst);
496
}
497
498
/*
499
* Check for a supported set of inner algorithms.
500
* See the comment at the beginning of this file.
501
*/
502
static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
503
struct crypto_alg *blockcipher_alg,
504
struct shash_alg *hash_alg)
505
{
506
if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
507
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
508
return false;
509
510
if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
511
blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
512
return false;
513
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
514
return false;
515
516
if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
517
return false;
518
519
return true;
520
}
521
522
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
523
{
524
u32 mask;
525
const char *nhpoly1305_name;
526
struct skcipher_instance *inst;
527
struct adiantum_instance_ctx *ictx;
528
struct skcipher_alg_common *streamcipher_alg;
529
struct crypto_alg *blockcipher_alg;
530
struct shash_alg *hash_alg;
531
int err;
532
533
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
534
if (err)
535
return err;
536
537
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
538
if (!inst)
539
return -ENOMEM;
540
ictx = skcipher_instance_ctx(inst);
541
542
/* Stream cipher, e.g. "xchacha12" */
543
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
544
skcipher_crypto_instance(inst),
545
crypto_attr_alg_name(tb[1]), 0, mask);
546
if (err)
547
goto err_free_inst;
548
streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
549
550
/* Block cipher, e.g. "aes" */
551
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
552
skcipher_crypto_instance(inst),
553
crypto_attr_alg_name(tb[2]), 0, mask);
554
if (err)
555
goto err_free_inst;
556
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
557
558
/* NHPoly1305 ε-∆U hash function */
559
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
560
if (nhpoly1305_name == ERR_PTR(-ENOENT))
561
nhpoly1305_name = "nhpoly1305";
562
err = crypto_grab_shash(&ictx->hash_spawn,
563
skcipher_crypto_instance(inst),
564
nhpoly1305_name, 0, mask);
565
if (err)
566
goto err_free_inst;
567
hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
568
569
/* Check the set of algorithms */
570
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
571
hash_alg)) {
572
pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
573
streamcipher_alg->base.cra_name,
574
blockcipher_alg->cra_name, hash_alg->base.cra_name);
575
err = -EINVAL;
576
goto err_free_inst;
577
}
578
579
/* Instance fields */
580
581
err = -ENAMETOOLONG;
582
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
583
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
584
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
585
goto err_free_inst;
586
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
587
"adiantum(%s,%s,%s)",
588
streamcipher_alg->base.cra_driver_name,
589
blockcipher_alg->cra_driver_name,
590
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
591
goto err_free_inst;
592
593
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
594
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
595
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask;
596
/*
597
* The block cipher is only invoked once per message, so for long
598
* messages (e.g. sectors for disk encryption) its performance doesn't
599
* matter as much as that of the stream cipher and hash function. Thus,
600
* weigh the block cipher's ->cra_priority less.
601
*/
602
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
603
2 * hash_alg->base.cra_priority +
604
blockcipher_alg->cra_priority) / 7;
605
606
inst->alg.setkey = adiantum_setkey;
607
inst->alg.encrypt = adiantum_encrypt;
608
inst->alg.decrypt = adiantum_decrypt;
609
inst->alg.init = adiantum_init_tfm;
610
inst->alg.exit = adiantum_exit_tfm;
611
inst->alg.min_keysize = streamcipher_alg->min_keysize;
612
inst->alg.max_keysize = streamcipher_alg->max_keysize;
613
inst->alg.ivsize = TWEAK_SIZE;
614
615
inst->free = adiantum_free_instance;
616
617
err = skcipher_register_instance(tmpl, inst);
618
if (err) {
619
err_free_inst:
620
adiantum_free_instance(inst);
621
}
622
return err;
623
}
624
625
/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
626
static struct crypto_template adiantum_tmpl = {
627
.name = "adiantum",
628
.create = adiantum_create,
629
.module = THIS_MODULE,
630
};
631
632
static int __init adiantum_module_init(void)
633
{
634
return crypto_register_template(&adiantum_tmpl);
635
}
636
637
static void __exit adiantum_module_exit(void)
638
{
639
crypto_unregister_template(&adiantum_tmpl);
640
}
641
642
module_init(adiantum_module_init);
643
module_exit(adiantum_module_exit);
644
645
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
646
MODULE_LICENSE("GPL v2");
647
MODULE_AUTHOR("Eric Biggers <[email protected]>");
648
MODULE_ALIAS_CRYPTO("adiantum");
649
MODULE_IMPORT_NS("CRYPTO_INTERNAL");
650
651