Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/crypto/chainiv.c
10814 views
1
/*
2
* chainiv: Chain IV Generator
3
*
4
* Generate IVs simply be using the last block of the previous encryption.
5
* This is mainly useful for CBC with a synchronous algorithm.
6
*
7
* Copyright (c) 2007 Herbert Xu <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or modify it
10
* under the terms of the GNU General Public License as published by the Free
11
* Software Foundation; either version 2 of the License, or (at your option)
12
* any later version.
13
*
14
*/
15
16
#include <crypto/internal/skcipher.h>
17
#include <crypto/rng.h>
18
#include <crypto/crypto_wq.h>
19
#include <linux/err.h>
20
#include <linux/init.h>
21
#include <linux/kernel.h>
22
#include <linux/module.h>
23
#include <linux/spinlock.h>
24
#include <linux/string.h>
25
#include <linux/workqueue.h>
26
27
enum {
28
CHAINIV_STATE_INUSE = 0,
29
};
30
31
struct chainiv_ctx {
32
spinlock_t lock;
33
char iv[];
34
};
35
36
struct async_chainiv_ctx {
37
unsigned long state;
38
39
spinlock_t lock;
40
int err;
41
42
struct crypto_queue queue;
43
struct work_struct postponed;
44
45
char iv[];
46
};
47
48
static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
49
{
50
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
51
struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
52
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
53
unsigned int ivsize;
54
int err;
55
56
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
57
ablkcipher_request_set_callback(subreq, req->creq.base.flags &
58
~CRYPTO_TFM_REQ_MAY_SLEEP,
59
req->creq.base.complete,
60
req->creq.base.data);
61
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
62
req->creq.nbytes, req->creq.info);
63
64
spin_lock_bh(&ctx->lock);
65
66
ivsize = crypto_ablkcipher_ivsize(geniv);
67
68
memcpy(req->giv, ctx->iv, ivsize);
69
memcpy(subreq->info, ctx->iv, ivsize);
70
71
err = crypto_ablkcipher_encrypt(subreq);
72
if (err)
73
goto unlock;
74
75
memcpy(ctx->iv, subreq->info, ivsize);
76
77
unlock:
78
spin_unlock_bh(&ctx->lock);
79
80
return err;
81
}
82
83
static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
84
{
85
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
86
struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
87
int err = 0;
88
89
spin_lock_bh(&ctx->lock);
90
if (crypto_ablkcipher_crt(geniv)->givencrypt !=
91
chainiv_givencrypt_first)
92
goto unlock;
93
94
crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
95
err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
96
crypto_ablkcipher_ivsize(geniv));
97
98
unlock:
99
spin_unlock_bh(&ctx->lock);
100
101
if (err)
102
return err;
103
104
return chainiv_givencrypt(req);
105
}
106
107
static int chainiv_init_common(struct crypto_tfm *tfm)
108
{
109
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
110
111
return skcipher_geniv_init(tfm);
112
}
113
114
static int chainiv_init(struct crypto_tfm *tfm)
115
{
116
struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
117
118
spin_lock_init(&ctx->lock);
119
120
return chainiv_init_common(tfm);
121
}
122
123
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
124
{
125
int queued;
126
int err = ctx->err;
127
128
if (!ctx->queue.qlen) {
129
smp_mb__before_clear_bit();
130
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
131
132
if (!ctx->queue.qlen ||
133
test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
134
goto out;
135
}
136
137
queued = queue_work(kcrypto_wq, &ctx->postponed);
138
BUG_ON(!queued);
139
140
out:
141
return err;
142
}
143
144
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
145
{
146
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
147
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
148
int err;
149
150
spin_lock_bh(&ctx->lock);
151
err = skcipher_enqueue_givcrypt(&ctx->queue, req);
152
spin_unlock_bh(&ctx->lock);
153
154
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
155
return err;
156
157
ctx->err = err;
158
return async_chainiv_schedule_work(ctx);
159
}
160
161
static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
162
{
163
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
164
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
165
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
166
unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
167
168
memcpy(req->giv, ctx->iv, ivsize);
169
memcpy(subreq->info, ctx->iv, ivsize);
170
171
ctx->err = crypto_ablkcipher_encrypt(subreq);
172
if (ctx->err)
173
goto out;
174
175
memcpy(ctx->iv, subreq->info, ivsize);
176
177
out:
178
return async_chainiv_schedule_work(ctx);
179
}
180
181
static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
182
{
183
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
184
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
185
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
186
187
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
188
ablkcipher_request_set_callback(subreq, req->creq.base.flags,
189
req->creq.base.complete,
190
req->creq.base.data);
191
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
192
req->creq.nbytes, req->creq.info);
193
194
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
195
goto postpone;
196
197
if (ctx->queue.qlen) {
198
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
199
goto postpone;
200
}
201
202
return async_chainiv_givencrypt_tail(req);
203
204
postpone:
205
return async_chainiv_postpone_request(req);
206
}
207
208
static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
209
{
210
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
211
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
212
int err = 0;
213
214
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
215
goto out;
216
217
if (crypto_ablkcipher_crt(geniv)->givencrypt !=
218
async_chainiv_givencrypt_first)
219
goto unlock;
220
221
crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
222
err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
223
crypto_ablkcipher_ivsize(geniv));
224
225
unlock:
226
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
227
228
if (err)
229
return err;
230
231
out:
232
return async_chainiv_givencrypt(req);
233
}
234
235
static void async_chainiv_do_postponed(struct work_struct *work)
236
{
237
struct async_chainiv_ctx *ctx = container_of(work,
238
struct async_chainiv_ctx,
239
postponed);
240
struct skcipher_givcrypt_request *req;
241
struct ablkcipher_request *subreq;
242
int err;
243
244
/* Only handle one request at a time to avoid hogging keventd. */
245
spin_lock_bh(&ctx->lock);
246
req = skcipher_dequeue_givcrypt(&ctx->queue);
247
spin_unlock_bh(&ctx->lock);
248
249
if (!req) {
250
async_chainiv_schedule_work(ctx);
251
return;
252
}
253
254
subreq = skcipher_givcrypt_reqctx(req);
255
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
256
257
err = async_chainiv_givencrypt_tail(req);
258
259
local_bh_disable();
260
skcipher_givcrypt_complete(req, err);
261
local_bh_enable();
262
}
263
264
static int async_chainiv_init(struct crypto_tfm *tfm)
265
{
266
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
267
268
spin_lock_init(&ctx->lock);
269
270
crypto_init_queue(&ctx->queue, 100);
271
INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
272
273
return chainiv_init_common(tfm);
274
}
275
276
static void async_chainiv_exit(struct crypto_tfm *tfm)
277
{
278
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
279
280
BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
281
282
skcipher_geniv_exit(tfm);
283
}
284
285
static struct crypto_template chainiv_tmpl;
286
287
static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
288
{
289
struct crypto_attr_type *algt;
290
struct crypto_instance *inst;
291
int err;
292
293
algt = crypto_get_attr_type(tb);
294
err = PTR_ERR(algt);
295
if (IS_ERR(algt))
296
return ERR_PTR(err);
297
298
err = crypto_get_default_rng();
299
if (err)
300
return ERR_PTR(err);
301
302
inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
303
if (IS_ERR(inst))
304
goto put_rng;
305
306
inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
307
308
inst->alg.cra_init = chainiv_init;
309
inst->alg.cra_exit = skcipher_geniv_exit;
310
311
inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
312
313
if (!crypto_requires_sync(algt->type, algt->mask)) {
314
inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
315
316
inst->alg.cra_ablkcipher.givencrypt =
317
async_chainiv_givencrypt_first;
318
319
inst->alg.cra_init = async_chainiv_init;
320
inst->alg.cra_exit = async_chainiv_exit;
321
322
inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
323
}
324
325
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
326
327
out:
328
return inst;
329
330
put_rng:
331
crypto_put_default_rng();
332
goto out;
333
}
334
335
static void chainiv_free(struct crypto_instance *inst)
336
{
337
skcipher_geniv_free(inst);
338
crypto_put_default_rng();
339
}
340
341
static struct crypto_template chainiv_tmpl = {
342
.name = "chainiv",
343
.alloc = chainiv_alloc,
344
.free = chainiv_free,
345
.module = THIS_MODULE,
346
};
347
348
static int __init chainiv_module_init(void)
349
{
350
return crypto_register_template(&chainiv_tmpl);
351
}
352
353
static void chainiv_module_exit(void)
354
{
355
crypto_unregister_template(&chainiv_tmpl);
356
}
357
358
module_init(chainiv_module_init);
359
module_exit(chainiv_module_exit);
360
361
MODULE_LICENSE("GPL");
362
MODULE_DESCRIPTION("Chain IV Generator");
363
364