Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/nx/nx-aes-xcbc.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* AES XCBC routines supporting the Power 7+ Nest Accelerators driver
4
*
5
* Copyright (C) 2011-2012 International Business Machines Inc.
6
*
7
* Author: Kent Yoder <[email protected]>
8
*/
9
10
#include <crypto/aes.h>
11
#include <crypto/internal/hash.h>
12
#include <linux/atomic.h>
13
#include <linux/errno.h>
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/spinlock.h>
17
#include <linux/string.h>
18
19
#include "nx_csbcpb.h"
20
#include "nx.h"
21
22
23
struct xcbc_state {
24
u8 state[AES_BLOCK_SIZE];
25
};
26
27
static int nx_xcbc_set_key(struct crypto_shash *desc,
28
const u8 *in_key,
29
unsigned int key_len)
30
{
31
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
32
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
33
34
switch (key_len) {
35
case AES_KEYSIZE_128:
36
nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
37
break;
38
default:
39
return -EINVAL;
40
}
41
42
memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
43
44
return 0;
45
}
46
47
/*
48
* Based on RFC 3566, for a zero-length message:
49
*
50
* n = 1
51
* K1 = E(K, 0x01010101010101010101010101010101)
52
* K3 = E(K, 0x03030303030303030303030303030303)
53
* E[0] = 0x00000000000000000000000000000000
54
* M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
55
* E[1] = (K1, M[1] ^ E[0] ^ K3)
56
* Tag = M[1]
57
*/
58
static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
59
{
60
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
61
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
62
struct nx_sg *in_sg, *out_sg;
63
u8 keys[2][AES_BLOCK_SIZE];
64
u8 key[32];
65
int rc = 0;
66
int len;
67
68
/* Change to ECB mode */
69
csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
70
memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
71
memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
72
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
73
74
/* K1 and K3 base patterns */
75
memset(keys[0], 0x01, sizeof(keys[0]));
76
memset(keys[1], 0x03, sizeof(keys[1]));
77
78
len = sizeof(keys);
79
/* Generate K1 and K3 encrypting the patterns */
80
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
81
nx_ctx->ap->sglen);
82
83
if (len != sizeof(keys))
84
return -EINVAL;
85
86
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
87
nx_ctx->ap->sglen);
88
89
if (len != sizeof(keys))
90
return -EINVAL;
91
92
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
93
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
94
95
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
96
if (rc)
97
goto out;
98
atomic_inc(&(nx_ctx->stats->aes_ops));
99
100
/* XOr K3 with the padding for a 0 length message */
101
keys[1][0] ^= 0x80;
102
103
len = sizeof(keys[1]);
104
105
/* Encrypt the final result */
106
memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
107
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
108
nx_ctx->ap->sglen);
109
110
if (len != sizeof(keys[1]))
111
return -EINVAL;
112
113
len = AES_BLOCK_SIZE;
114
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
115
nx_ctx->ap->sglen);
116
117
if (len != AES_BLOCK_SIZE)
118
return -EINVAL;
119
120
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
121
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
122
123
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
124
if (rc)
125
goto out;
126
atomic_inc(&(nx_ctx->stats->aes_ops));
127
128
out:
129
/* Restore XCBC mode */
130
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
131
memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
132
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
133
134
return rc;
135
}
136
137
static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_shash *tfm)
138
{
139
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(tfm);
140
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
141
int err;
142
143
err = nx_crypto_ctx_aes_xcbc_init(tfm);
144
if (err)
145
return err;
146
147
nx_ctx_init(nx_ctx, HCOP_FC_AES);
148
149
NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
150
csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
151
152
return 0;
153
}
154
155
static int nx_xcbc_init(struct shash_desc *desc)
156
{
157
struct xcbc_state *sctx = shash_desc_ctx(desc);
158
159
memset(sctx, 0, sizeof *sctx);
160
161
return 0;
162
}
163
164
static int nx_xcbc_update(struct shash_desc *desc,
165
const u8 *data,
166
unsigned int len)
167
{
168
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
169
struct xcbc_state *sctx = shash_desc_ctx(desc);
170
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
171
struct nx_sg *in_sg;
172
struct nx_sg *out_sg;
173
unsigned int max_sg_len;
174
unsigned long irq_flags;
175
u32 to_process, total;
176
int rc = 0;
177
int data_len;
178
179
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
180
181
memcpy(csbcpb->cpb.aes_xcbc.out_cv_mac, sctx->state, AES_BLOCK_SIZE);
182
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
183
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
184
185
total = len;
186
187
in_sg = nx_ctx->in_sg;
188
max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
189
nx_ctx->ap->sglen);
190
max_sg_len = min_t(u64, max_sg_len,
191
nx_ctx->ap->databytelen/NX_PAGE_SIZE);
192
193
data_len = AES_BLOCK_SIZE;
194
out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
195
&data_len, nx_ctx->ap->sglen);
196
197
if (data_len != AES_BLOCK_SIZE) {
198
rc = -EINVAL;
199
goto out;
200
}
201
202
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
203
204
do {
205
to_process = total & ~(AES_BLOCK_SIZE - 1);
206
207
in_sg = nx_build_sg_list(in_sg,
208
(u8 *) data,
209
&to_process,
210
max_sg_len);
211
212
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
213
sizeof(struct nx_sg);
214
215
/* we've hit the nx chip previously and we're updating again,
216
* so copy over the partial digest */
217
memcpy(csbcpb->cpb.aes_xcbc.cv,
218
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
219
220
if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
221
rc = -EINVAL;
222
goto out;
223
}
224
225
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
226
if (rc)
227
goto out;
228
229
atomic_inc(&(nx_ctx->stats->aes_ops));
230
231
total -= to_process;
232
data += to_process;
233
in_sg = nx_ctx->in_sg;
234
} while (total >= AES_BLOCK_SIZE);
235
236
rc = total;
237
memcpy(sctx->state, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
238
239
out:
240
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
241
return rc;
242
}
243
244
static int nx_xcbc_finup(struct shash_desc *desc, const u8 *src,
245
unsigned int nbytes, u8 *out)
246
{
247
struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc->tfm);
248
struct xcbc_state *sctx = shash_desc_ctx(desc);
249
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
250
struct nx_sg *in_sg, *out_sg;
251
unsigned long irq_flags;
252
int rc = 0;
253
int len;
254
255
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
256
257
if (nbytes) {
258
/* non-zero final, so copy over the partial digest */
259
memcpy(csbcpb->cpb.aes_xcbc.cv, sctx->state, AES_BLOCK_SIZE);
260
} else {
261
/*
262
* we've never seen an update, so this is a 0 byte op. The
263
* hardware cannot handle a 0 byte op, so just ECB to
264
* generate the hash.
265
*/
266
rc = nx_xcbc_empty(desc, out);
267
goto out;
268
}
269
270
/* final is represented by continuing the operation and indicating that
271
* this is not an intermediate operation */
272
NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
273
274
len = nbytes;
275
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)src, &len,
276
nx_ctx->ap->sglen);
277
278
if (len != nbytes) {
279
rc = -EINVAL;
280
goto out;
281
}
282
283
len = AES_BLOCK_SIZE;
284
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
285
nx_ctx->ap->sglen);
286
287
if (len != AES_BLOCK_SIZE) {
288
rc = -EINVAL;
289
goto out;
290
}
291
292
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
293
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
294
295
if (!nx_ctx->op.outlen) {
296
rc = -EINVAL;
297
goto out;
298
}
299
300
rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
301
if (rc)
302
goto out;
303
304
atomic_inc(&(nx_ctx->stats->aes_ops));
305
306
memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
307
out:
308
spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
309
return rc;
310
}
311
312
struct shash_alg nx_shash_aes_xcbc_alg = {
313
.digestsize = AES_BLOCK_SIZE,
314
.init = nx_xcbc_init,
315
.update = nx_xcbc_update,
316
.finup = nx_xcbc_finup,
317
.setkey = nx_xcbc_set_key,
318
.descsize = sizeof(struct xcbc_state),
319
.init_tfm = nx_crypto_ctx_aes_xcbc_init2,
320
.exit_tfm = nx_crypto_ctx_shash_exit,
321
.base = {
322
.cra_name = "xcbc(aes)",
323
.cra_driver_name = "xcbc-aes-nx",
324
.cra_priority = 300,
325
.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
326
CRYPTO_AHASH_ALG_FINAL_NONZERO,
327
.cra_blocksize = AES_BLOCK_SIZE,
328
.cra_module = THIS_MODULE,
329
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
330
}
331
};
332
333