Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/omap-aes-gcm.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Cryptographic API.
4
*
5
* Support for OMAP AES GCM HW acceleration.
6
*
7
* Copyright (c) 2016 Texas Instruments Incorporated
8
*/
9
10
#include <crypto/aes.h>
11
#include <crypto/engine.h>
12
#include <crypto/gcm.h>
13
#include <crypto/internal/aead.h>
14
#include <crypto/scatterwalk.h>
15
#include <crypto/skcipher.h>
16
#include <linux/errno.h>
17
#include <linux/dma-mapping.h>
18
#include <linux/dmaengine.h>
19
#include <linux/interrupt.h>
20
#include <linux/kernel.h>
21
#include <linux/omap-dma.h>
22
#include <linux/pm_runtime.h>
23
#include <linux/scatterlist.h>
24
#include <linux/string.h>
25
26
#include "omap-crypto.h"
27
#include "omap-aes.h"
28
29
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
30
struct aead_request *req);
31
32
static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
33
{
34
struct aead_request *req = dd->aead_req;
35
36
dd->in_sg = NULL;
37
dd->out_sg = NULL;
38
39
crypto_finalize_aead_request(dd->engine, req, ret);
40
41
pm_runtime_put_autosuspend(dd->dev);
42
}
43
44
static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
45
{
46
u8 *tag;
47
int alen, clen, i, ret = 0, nsg;
48
struct omap_aes_reqctx *rctx;
49
50
alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
51
clen = ALIGN(dd->total, AES_BLOCK_SIZE);
52
rctx = aead_request_ctx(dd->aead_req);
53
54
nsg = !!(dd->assoc_len && dd->total);
55
56
dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
57
DMA_FROM_DEVICE);
58
dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
59
dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
60
omap_aes_crypt_dma_stop(dd);
61
62
omap_crypto_cleanup(dd->out_sg, dd->orig_out,
63
dd->aead_req->assoclen, dd->total,
64
FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
65
66
if (dd->flags & FLAGS_ENCRYPT)
67
scatterwalk_map_and_copy(rctx->auth_tag,
68
dd->aead_req->dst,
69
dd->total + dd->aead_req->assoclen,
70
dd->authsize, 1);
71
72
omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
73
FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
74
75
omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
76
FLAGS_IN_DATA_ST_SHIFT, dd->flags);
77
78
if (!(dd->flags & FLAGS_ENCRYPT)) {
79
tag = (u8 *)rctx->auth_tag;
80
for (i = 0; i < dd->authsize; i++) {
81
if (tag[i]) {
82
ret = -EBADMSG;
83
}
84
}
85
}
86
87
omap_aes_gcm_finish_req(dd, ret);
88
}
89
90
static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
91
struct aead_request *req)
92
{
93
int alen, clen, cryptlen, assoclen, ret;
94
struct crypto_aead *aead = crypto_aead_reqtfm(req);
95
unsigned int authlen = crypto_aead_authsize(aead);
96
struct scatterlist *tmp, sg_arr[2];
97
int nsg;
98
u16 flags;
99
100
assoclen = req->assoclen;
101
cryptlen = req->cryptlen;
102
103
if (dd->flags & FLAGS_RFC4106_GCM)
104
assoclen -= 8;
105
106
if (!(dd->flags & FLAGS_ENCRYPT))
107
cryptlen -= authlen;
108
109
alen = ALIGN(assoclen, AES_BLOCK_SIZE);
110
clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
111
112
nsg = !!(assoclen && cryptlen);
113
114
omap_aes_clear_copy_flags(dd);
115
116
sg_init_table(dd->in_sgl, nsg + 1);
117
if (assoclen) {
118
tmp = req->src;
119
ret = omap_crypto_align_sg(&tmp, assoclen,
120
AES_BLOCK_SIZE, dd->in_sgl,
121
OMAP_CRYPTO_COPY_DATA |
122
OMAP_CRYPTO_ZERO_BUF |
123
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
124
FLAGS_ASSOC_DATA_ST_SHIFT,
125
&dd->flags);
126
if (ret)
127
return ret;
128
}
129
130
if (cryptlen) {
131
tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
132
133
if (nsg)
134
sg_unmark_end(dd->in_sgl);
135
136
ret = omap_crypto_align_sg(&tmp, cryptlen,
137
AES_BLOCK_SIZE, &dd->in_sgl[nsg],
138
OMAP_CRYPTO_COPY_DATA |
139
OMAP_CRYPTO_ZERO_BUF |
140
OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
141
FLAGS_IN_DATA_ST_SHIFT,
142
&dd->flags);
143
if (ret)
144
return ret;
145
}
146
147
dd->in_sg = dd->in_sgl;
148
dd->total = cryptlen;
149
dd->assoc_len = assoclen;
150
dd->authsize = authlen;
151
152
dd->out_sg = req->dst;
153
dd->orig_out = req->dst;
154
155
dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
156
157
flags = 0;
158
if (req->src == req->dst || dd->out_sg == sg_arr)
159
flags |= OMAP_CRYPTO_FORCE_COPY;
160
161
if (cryptlen) {
162
ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
163
AES_BLOCK_SIZE, &dd->out_sgl,
164
flags,
165
FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
166
if (ret)
167
return ret;
168
}
169
170
dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
171
dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
172
173
return 0;
174
}
175
176
static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
177
{
178
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
179
180
aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
181
return 0;
182
}
183
184
void omap_aes_gcm_dma_out_callback(void *data)
185
{
186
struct omap_aes_dev *dd = data;
187
struct omap_aes_reqctx *rctx;
188
int i, val;
189
u32 *auth_tag, tag[4];
190
191
if (!(dd->flags & FLAGS_ENCRYPT))
192
scatterwalk_map_and_copy(tag, dd->aead_req->src,
193
dd->total + dd->aead_req->assoclen,
194
dd->authsize, 0);
195
196
rctx = aead_request_ctx(dd->aead_req);
197
auth_tag = (u32 *)rctx->auth_tag;
198
for (i = 0; i < 4; i++) {
199
val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
200
auth_tag[i] = val ^ auth_tag[i];
201
if (!(dd->flags & FLAGS_ENCRYPT))
202
auth_tag[i] = auth_tag[i] ^ tag[i];
203
}
204
205
omap_aes_gcm_done_task(dd);
206
}
207
208
static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
209
struct aead_request *req)
210
{
211
if (req)
212
return crypto_transfer_aead_request_to_engine(dd->engine, req);
213
214
return 0;
215
}
216
217
static int omap_aes_gcm_prepare_req(struct aead_request *req,
218
struct omap_aes_dev *dd)
219
{
220
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
221
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
222
int err;
223
224
dd->aead_req = req;
225
226
rctx->mode &= FLAGS_MODE_MASK;
227
dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
228
229
err = omap_aes_gcm_copy_buffers(dd, req);
230
if (err)
231
return err;
232
233
dd->ctx = &ctx->octx;
234
235
return omap_aes_write_ctrl(dd);
236
}
237
238
static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
239
{
240
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
241
struct crypto_aead *aead = crypto_aead_reqtfm(req);
242
unsigned int authlen = crypto_aead_authsize(aead);
243
struct omap_aes_dev *dd;
244
__be32 counter = cpu_to_be32(1);
245
int err, assoclen;
246
247
memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
248
memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
249
250
err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
251
if (err)
252
return err;
253
254
if (mode & FLAGS_RFC4106_GCM)
255
assoclen = req->assoclen - 8;
256
else
257
assoclen = req->assoclen;
258
if (assoclen + req->cryptlen == 0) {
259
scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
260
1);
261
return 0;
262
}
263
264
dd = omap_aes_find_dev(rctx);
265
if (!dd)
266
return -ENODEV;
267
rctx->mode = mode;
268
269
return omap_aes_gcm_handle_queue(dd, req);
270
}
271
272
int omap_aes_gcm_encrypt(struct aead_request *req)
273
{
274
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
275
276
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
277
return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
278
}
279
280
int omap_aes_gcm_decrypt(struct aead_request *req)
281
{
282
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
283
284
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
285
return omap_aes_gcm_crypt(req, FLAGS_GCM);
286
}
287
288
int omap_aes_4106gcm_encrypt(struct aead_request *req)
289
{
290
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
291
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
292
293
memcpy(rctx->iv, ctx->octx.nonce, 4);
294
memcpy(rctx->iv + 4, req->iv, 8);
295
return crypto_ipsec_check_assoclen(req->assoclen) ?:
296
omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
297
FLAGS_RFC4106_GCM);
298
}
299
300
int omap_aes_4106gcm_decrypt(struct aead_request *req)
301
{
302
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
303
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
304
305
memcpy(rctx->iv, ctx->octx.nonce, 4);
306
memcpy(rctx->iv + 4, req->iv, 8);
307
return crypto_ipsec_check_assoclen(req->assoclen) ?:
308
omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
309
}
310
311
int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
312
unsigned int keylen)
313
{
314
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
315
int ret;
316
317
ret = aes_expandkey(&ctx->actx, key, keylen);
318
if (ret)
319
return ret;
320
321
memcpy(ctx->octx.key, key, keylen);
322
ctx->octx.keylen = keylen;
323
324
return 0;
325
}
326
327
int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
328
unsigned int keylen)
329
{
330
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
331
int ret;
332
333
if (keylen < 4)
334
return -EINVAL;
335
keylen -= 4;
336
337
ret = aes_expandkey(&ctx->actx, key, keylen);
338
if (ret)
339
return ret;
340
341
memcpy(ctx->octx.key, key, keylen);
342
memcpy(ctx->octx.nonce, key + keylen, 4);
343
ctx->octx.keylen = keylen;
344
345
return 0;
346
}
347
348
int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
349
{
350
return crypto_gcm_check_authsize(authsize);
351
}
352
353
int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
354
unsigned int authsize)
355
{
356
return crypto_rfc4106_check_authsize(authsize);
357
}
358
359
int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
360
{
361
struct aead_request *req = container_of(areq, struct aead_request,
362
base);
363
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
364
struct omap_aes_dev *dd = rctx->dd;
365
int ret;
366
367
if (!dd)
368
return -ENODEV;
369
370
ret = omap_aes_gcm_prepare_req(req, dd);
371
if (ret)
372
return ret;
373
374
if (dd->in_sg_len)
375
ret = omap_aes_crypt_dma_start(dd);
376
else
377
omap_aes_gcm_dma_out_callback(dd);
378
379
return ret;
380
}
381
382
int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
383
{
384
crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
385
386
return 0;
387
}
388
389