Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/nx/nx-aes-gcm.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AES GCM routines supporting the Power 7+ Nest Accelerators driver
4
*
5
* Copyright (C) 2012 International Business Machines Inc.
6
*
7
* Author: Kent Yoder <[email protected]>
8
*/
9
10
#include <crypto/internal/aead.h>
11
#include <crypto/aes.h>
12
#include <crypto/algapi.h>
13
#include <crypto/gcm.h>
14
#include <crypto/scatterwalk.h>
15
#include <linux/module.h>
16
#include <linux/types.h>
17
#include <asm/vio.h>
18
19
#include "nx_csbcpb.h"
20
#include "nx.h"
21
22
23
static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
24
const u8 *in_key,
25
unsigned int key_len)
26
{
27
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
28
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
29
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
30
31
nx_ctx_init(nx_ctx, HCOP_FC_AES);
32
33
switch (key_len) {
34
case AES_KEYSIZE_128:
35
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
36
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
37
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
38
break;
39
case AES_KEYSIZE_192:
40
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
41
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
42
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
43
break;
44
case AES_KEYSIZE_256:
45
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
46
NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
47
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
48
break;
49
default:
50
return -EINVAL;
51
}
52
53
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
54
memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
55
56
csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
57
memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
58
59
return 0;
60
}
61
62
static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
63
const u8 *in_key,
64
unsigned int key_len)
65
{
66
struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
67
char *nonce = nx_ctx->priv.gcm.nonce;
68
int rc;
69
70
if (key_len < 4)
71
return -EINVAL;
72
73
key_len -= 4;
74
75
rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
76
if (rc)
77
goto out;
78
79
memcpy(nonce, in_key + key_len, 4);
80
out:
81
return rc;
82
}
83
84
static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
85
unsigned int authsize)
86
{
87
switch (authsize) {
88
case 8:
89
case 12:
90
case 16:
91
break;
92
default:
93
return -EINVAL;
94
}
95
96
return 0;
97
}
98
99
static int nx_gca(struct nx_crypto_ctx *nx_ctx,
100
struct aead_request *req,
101
u8 *out,
102
unsigned int assoclen)
103
{
104
int rc;
105
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
106
struct nx_sg *nx_sg = nx_ctx->in_sg;
107
unsigned int nbytes = assoclen;
108
unsigned int processed = 0, to_process;
109
unsigned int max_sg_len;
110
111
if (nbytes <= AES_BLOCK_SIZE) {
112
memcpy_from_sglist(out, req->src, 0, nbytes);
113
return 0;
114
}
115
116
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
117
118
/* page_limit: number of sg entries that fit on one page */
119
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
120
nx_ctx->ap->sglen);
121
max_sg_len = min_t(u64, max_sg_len,
122
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
123
124
do {
125
/*
126
* to_process: the data chunk to process in this update.
127
* This value is bound by sg list limits.
128
*/
129
to_process = min_t(u64, nbytes - processed,
130
nx_ctx->ap->databytelen);
131
to_process = min_t(u64, to_process,
132
NX_PAGE_SIZE * (max_sg_len - 1));
133
134
nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
135
req->src, processed, &to_process);
136
137
if ((to_process + processed) < nbytes)
138
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
139
else
140
NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
141
142
nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
143
* sizeof(struct nx_sg);
144
145
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
146
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
147
if (rc)
148
return rc;
149
150
memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
151
csbcpb_aead->cpb.aes_gca.out_pat,
152
AES_BLOCK_SIZE);
153
NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
154
155
atomic_inc(&(nx_ctx->stats->aes_ops));
156
atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
157
158
processed += to_process;
159
} while (processed < nbytes);
160
161
memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
162
163
return rc;
164
}
165
166
static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
167
{
168
int rc;
169
struct nx_crypto_ctx *nx_ctx =
170
crypto_aead_ctx(crypto_aead_reqtfm(req));
171
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
172
struct nx_sg *nx_sg;
173
unsigned int nbytes = assoclen;
174
unsigned int processed = 0, to_process;
175
unsigned int max_sg_len;
176
177
/* Set GMAC mode */
178
csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
179
180
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
181
182
/* page_limit: number of sg entries that fit on one page */
183
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
184
nx_ctx->ap->sglen);
185
max_sg_len = min_t(u64, max_sg_len,
186
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
187
188
/* Copy IV */
189
memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
190
191
do {
192
/*
193
* to_process: the data chunk to process in this update.
194
* This value is bound by sg list limits.
195
*/
196
to_process = min_t(u64, nbytes - processed,
197
nx_ctx->ap->databytelen);
198
to_process = min_t(u64, to_process,
199
NX_PAGE_SIZE * (max_sg_len - 1));
200
201
nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
202
req->src, processed, &to_process);
203
204
if ((to_process + processed) < nbytes)
205
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
206
else
207
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
208
209
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
210
* sizeof(struct nx_sg);
211
212
csbcpb->cpb.aes_gcm.bit_length_data = 0;
213
csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
214
215
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
216
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
217
if (rc)
218
goto out;
219
220
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
221
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
222
memcpy(csbcpb->cpb.aes_gcm.in_s0,
223
csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
224
225
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
226
227
atomic_inc(&(nx_ctx->stats->aes_ops));
228
atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
229
230
processed += to_process;
231
} while (processed < nbytes);
232
233
out:
234
/* Restore GCM mode */
235
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
236
return rc;
237
}
238
239
static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
240
{
241
int rc;
242
struct nx_crypto_ctx *nx_ctx =
243
crypto_aead_ctx(crypto_aead_reqtfm(req));
244
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
245
char out[AES_BLOCK_SIZE];
246
struct nx_sg *in_sg, *out_sg;
247
int len;
248
249
/* For scenarios where the input message is zero length, AES CTR mode
250
* may be used. Set the source data to be a single block (16B) of all
251
* zeros, and set the input IV value to be the same as the GMAC IV
252
* value. - nx_wb 4.8.1.3 */
253
254
/* Change to ECB mode */
255
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
256
memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
257
sizeof(csbcpb->cpb.aes_ecb.key));
258
if (enc)
259
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
260
else
261
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
262
263
len = AES_BLOCK_SIZE;
264
265
/* Encrypt the counter/IV */
266
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
267
&len, nx_ctx->ap->sglen);
268
269
if (len != AES_BLOCK_SIZE)
270
return -EINVAL;
271
272
len = sizeof(out);
273
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
274
nx_ctx->ap->sglen);
275
276
if (len != sizeof(out))
277
return -EINVAL;
278
279
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
280
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
281
282
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
283
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
284
if (rc)
285
goto out;
286
atomic_inc(&(nx_ctx->stats->aes_ops));
287
288
/* Copy out the auth tag */
289
memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
290
crypto_aead_authsize(crypto_aead_reqtfm(req)));
291
out:
292
/* Restore XCBC mode */
293
csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
294
295
/*
296
* ECB key uses the same region that GCM AAD and counter, so it's safe
297
* to just fill it with zeroes.
298
*/
299
memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
300
301
return rc;
302
}
303
304
static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
305
unsigned int assoclen)
306
{
307
struct nx_crypto_ctx *nx_ctx =
308
crypto_aead_ctx(crypto_aead_reqtfm(req));
309
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
310
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
311
unsigned int nbytes = req->cryptlen;
312
unsigned int processed = 0, to_process;
313
unsigned long irq_flags;
314
int rc = -EINVAL;
315
316
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
317
318
/* initialize the counter */
319
*(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
320
321
if (nbytes == 0) {
322
if (assoclen == 0)
323
rc = gcm_empty(req, rctx->iv, enc);
324
else
325
rc = gmac(req, rctx->iv, assoclen);
326
if (rc)
327
goto out;
328
else
329
goto mac;
330
}
331
332
/* Process associated data */
333
csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
334
if (assoclen) {
335
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
336
assoclen);
337
if (rc)
338
goto out;
339
}
340
341
/* Set flags for encryption */
342
NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
343
if (enc) {
344
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
345
} else {
346
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
347
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
348
}
349
350
do {
351
to_process = nbytes - processed;
352
353
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
354
rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
355
req->src, &to_process,
356
processed + req->assoclen,
357
csbcpb->cpb.aes_gcm.iv_or_cnt);
358
359
if (rc)
360
goto out;
361
362
if ((to_process + processed) < nbytes)
363
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
364
else
365
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
366
367
368
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
369
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
370
if (rc)
371
goto out;
372
373
memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
374
memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
375
csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
376
memcpy(csbcpb->cpb.aes_gcm.in_s0,
377
csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
378
379
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
380
381
atomic_inc(&(nx_ctx->stats->aes_ops));
382
atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
383
&(nx_ctx->stats->aes_bytes));
384
385
processed += to_process;
386
} while (processed < nbytes);
387
388
mac:
389
if (enc) {
390
/* copy out the auth tag */
391
memcpy_to_sglist(
392
req->dst, req->assoclen + nbytes,
393
csbcpb->cpb.aes_gcm.out_pat_or_mac,
394
crypto_aead_authsize(crypto_aead_reqtfm(req)));
395
} else {
396
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
397
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
398
399
memcpy_from_sglist(
400
itag, req->src, req->assoclen + nbytes,
401
crypto_aead_authsize(crypto_aead_reqtfm(req)));
402
rc = crypto_memneq(itag, otag,
403
crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
404
-EBADMSG : 0;
405
}
406
out:
407
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
408
return rc;
409
}
410
411
static int gcm_aes_nx_encrypt(struct aead_request *req)
412
{
413
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
414
char *iv = rctx->iv;
415
416
memcpy(iv, req->iv, GCM_AES_IV_SIZE);
417
418
return gcm_aes_nx_crypt(req, 1, req->assoclen);
419
}
420
421
static int gcm_aes_nx_decrypt(struct aead_request *req)
422
{
423
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
424
char *iv = rctx->iv;
425
426
memcpy(iv, req->iv, GCM_AES_IV_SIZE);
427
428
return gcm_aes_nx_crypt(req, 0, req->assoclen);
429
}
430
431
static int gcm4106_aes_nx_encrypt(struct aead_request *req)
432
{
433
struct nx_crypto_ctx *nx_ctx =
434
crypto_aead_ctx(crypto_aead_reqtfm(req));
435
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
436
char *iv = rctx->iv;
437
char *nonce = nx_ctx->priv.gcm.nonce;
438
439
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
440
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
441
442
if (req->assoclen < 8)
443
return -EINVAL;
444
445
return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
446
}
447
448
static int gcm4106_aes_nx_decrypt(struct aead_request *req)
449
{
450
struct nx_crypto_ctx *nx_ctx =
451
crypto_aead_ctx(crypto_aead_reqtfm(req));
452
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
453
char *iv = rctx->iv;
454
char *nonce = nx_ctx->priv.gcm.nonce;
455
456
memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
457
memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
458
459
if (req->assoclen < 8)
460
return -EINVAL;
461
462
return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
463
}
464
465
struct aead_alg nx_gcm_aes_alg = {
466
.base = {
467
.cra_name = "gcm(aes)",
468
.cra_driver_name = "gcm-aes-nx",
469
.cra_priority = 300,
470
.cra_blocksize = 1,
471
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
472
.cra_module = THIS_MODULE,
473
},
474
.init = nx_crypto_ctx_aes_gcm_init,
475
.exit = nx_crypto_ctx_aead_exit,
476
.ivsize = GCM_AES_IV_SIZE,
477
.maxauthsize = AES_BLOCK_SIZE,
478
.setkey = gcm_aes_nx_set_key,
479
.encrypt = gcm_aes_nx_encrypt,
480
.decrypt = gcm_aes_nx_decrypt,
481
};
482
483
struct aead_alg nx_gcm4106_aes_alg = {
484
.base = {
485
.cra_name = "rfc4106(gcm(aes))",
486
.cra_driver_name = "rfc4106-gcm-aes-nx",
487
.cra_priority = 300,
488
.cra_blocksize = 1,
489
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
490
.cra_module = THIS_MODULE,
491
},
492
.init = nx_crypto_ctx_aes_gcm_init,
493
.exit = nx_crypto_ctx_aead_exit,
494
.ivsize = GCM_RFC4106_IV_SIZE,
495
.maxauthsize = AES_BLOCK_SIZE,
496
.setkey = gcm4106_aes_nx_set_key,
497
.setauthsize = gcm4106_aes_nx_setauthsize,
498
.encrypt = gcm4106_aes_nx_encrypt,
499
.decrypt = gcm4106_aes_nx_decrypt,
500
};
501
502