Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/bcm/spu2.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright 2016 Broadcom
4
*/
5
6
/*
7
* This file works with the SPU2 version of the SPU. SPU2 has different message
8
* formats than the previous version of the SPU. All SPU message format
9
* differences should be hidden in the spux.c,h files.
10
*/
11
12
#include <linux/kernel.h>
13
#include <linux/string.h>
14
#include <linux/string_choices.h>
15
16
#include "util.h"
17
#include "spu.h"
18
#include "spu2.h"
19
20
#define SPU2_TX_STATUS_LEN 0 /* SPU2 has no STATUS in input packet */
21
22
/*
23
* Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0
24
* register. Defaults to 2.
25
*/
26
#define SPU2_RX_STATUS_LEN 2
27
28
enum spu2_proto_sel {
29
SPU2_PROTO_RESV = 0,
30
SPU2_MACSEC_SECTAG8_ECB = 1,
31
SPU2_MACSEC_SECTAG8_SCB = 2,
32
SPU2_MACSEC_SECTAG16 = 3,
33
SPU2_MACSEC_SECTAG16_8_XPN = 4,
34
SPU2_IPSEC = 5,
35
SPU2_IPSEC_ESN = 6,
36
SPU2_TLS_CIPHER = 7,
37
SPU2_TLS_AEAD = 8,
38
SPU2_DTLS_CIPHER = 9,
39
SPU2_DTLS_AEAD = 10
40
};
41
42
static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256",
43
"DES", "3DES"
44
};
45
46
static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB",
47
"XTS", "CCM", "GCM"
48
};
49
50
static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256",
51
"Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384",
52
"SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256",
53
"SHA3-384", "SHA3-512"
54
};
55
56
static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC",
57
"Rabin", "CCM", "GCM", "Reserved"
58
};
59
60
static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type)
61
{
62
if (cipher_type >= SPU2_CIPHER_TYPE_LAST)
63
return "Reserved";
64
return spu2_cipher_type_names[cipher_type];
65
}
66
67
static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode)
68
{
69
if (cipher_mode >= SPU2_CIPHER_MODE_LAST)
70
return "Reserved";
71
return spu2_cipher_mode_names[cipher_mode];
72
}
73
74
static char *spu2_hash_type_name(enum spu2_hash_type hash_type)
75
{
76
if (hash_type >= SPU2_HASH_TYPE_LAST)
77
return "Reserved";
78
return spu2_hash_type_names[hash_type];
79
}
80
81
static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode)
82
{
83
if (hash_mode >= SPU2_HASH_MODE_LAST)
84
return "Reserved";
85
return spu2_hash_mode_names[hash_mode];
86
}
87
88
/*
89
* Convert from a software cipher mode value to the corresponding value
90
* for SPU2.
91
*/
92
static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode,
93
enum spu2_cipher_mode *spu2_mode)
94
{
95
switch (cipher_mode) {
96
case CIPHER_MODE_ECB:
97
*spu2_mode = SPU2_CIPHER_MODE_ECB;
98
break;
99
case CIPHER_MODE_CBC:
100
*spu2_mode = SPU2_CIPHER_MODE_CBC;
101
break;
102
case CIPHER_MODE_OFB:
103
*spu2_mode = SPU2_CIPHER_MODE_OFB;
104
break;
105
case CIPHER_MODE_CFB:
106
*spu2_mode = SPU2_CIPHER_MODE_CFB;
107
break;
108
case CIPHER_MODE_CTR:
109
*spu2_mode = SPU2_CIPHER_MODE_CTR;
110
break;
111
case CIPHER_MODE_CCM:
112
*spu2_mode = SPU2_CIPHER_MODE_CCM;
113
break;
114
case CIPHER_MODE_GCM:
115
*spu2_mode = SPU2_CIPHER_MODE_GCM;
116
break;
117
case CIPHER_MODE_XTS:
118
*spu2_mode = SPU2_CIPHER_MODE_XTS;
119
break;
120
default:
121
return -EINVAL;
122
}
123
return 0;
124
}
125
126
/**
127
* spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2
128
* cipher type and mode.
129
* @cipher_alg: [in] cipher algorithm value from software enumeration
130
* @cipher_mode: [in] cipher mode value from software enumeration
131
* @cipher_type: [in] cipher type value from software enumeration
132
* @spu2_type: [out] cipher type value used by spu2 hardware
133
* @spu2_mode: [out] cipher mode value used by spu2 hardware
134
*
135
* Return: 0 if successful
136
*/
137
static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg,
138
enum spu_cipher_mode cipher_mode,
139
enum spu_cipher_type cipher_type,
140
enum spu2_cipher_type *spu2_type,
141
enum spu2_cipher_mode *spu2_mode)
142
{
143
int err;
144
145
err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode);
146
if (err) {
147
flow_log("Invalid cipher mode %d\n", cipher_mode);
148
return err;
149
}
150
151
switch (cipher_alg) {
152
case CIPHER_ALG_NONE:
153
*spu2_type = SPU2_CIPHER_TYPE_NONE;
154
break;
155
case CIPHER_ALG_RC4:
156
/* SPU2 does not support RC4 */
157
err = -EINVAL;
158
*spu2_type = SPU2_CIPHER_TYPE_NONE;
159
break;
160
case CIPHER_ALG_DES:
161
*spu2_type = SPU2_CIPHER_TYPE_DES;
162
break;
163
case CIPHER_ALG_3DES:
164
*spu2_type = SPU2_CIPHER_TYPE_3DES;
165
break;
166
case CIPHER_ALG_AES:
167
switch (cipher_type) {
168
case CIPHER_TYPE_AES128:
169
*spu2_type = SPU2_CIPHER_TYPE_AES128;
170
break;
171
case CIPHER_TYPE_AES192:
172
*spu2_type = SPU2_CIPHER_TYPE_AES192;
173
break;
174
case CIPHER_TYPE_AES256:
175
*spu2_type = SPU2_CIPHER_TYPE_AES256;
176
break;
177
default:
178
err = -EINVAL;
179
}
180
break;
181
case CIPHER_ALG_LAST:
182
default:
183
err = -EINVAL;
184
break;
185
}
186
187
if (err)
188
flow_log("Invalid cipher alg %d or type %d\n",
189
cipher_alg, cipher_type);
190
return err;
191
}
192
193
/*
194
* Convert from a software hash mode value to the corresponding value
195
* for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value.
196
*/
197
static int spu2_hash_mode_xlate(enum hash_mode hash_mode,
198
enum spu2_hash_mode *spu2_mode)
199
{
200
switch (hash_mode) {
201
case HASH_MODE_XCBC:
202
*spu2_mode = SPU2_HASH_MODE_XCBC_MAC;
203
break;
204
case HASH_MODE_CMAC:
205
*spu2_mode = SPU2_HASH_MODE_CMAC;
206
break;
207
case HASH_MODE_HMAC:
208
*spu2_mode = SPU2_HASH_MODE_HMAC;
209
break;
210
case HASH_MODE_CCM:
211
*spu2_mode = SPU2_HASH_MODE_CCM;
212
break;
213
case HASH_MODE_GCM:
214
*spu2_mode = SPU2_HASH_MODE_GCM;
215
break;
216
default:
217
return -EINVAL;
218
}
219
return 0;
220
}
221
222
/**
223
* spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type
224
* and mode.
225
* @hash_alg: [in] hash algorithm value from software enumeration
226
* @hash_mode: [in] hash mode value from software enumeration
227
* @hash_type: [in] hash type value from software enumeration
228
* @ciph_type: [in] cipher type value from software enumeration
229
* @spu2_type: [out] hash type value used by SPU2 hardware
230
* @spu2_mode: [out] hash mode value used by SPU2 hardware
231
*
232
* Return: 0 if successful
233
*/
234
static int
235
spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
236
enum hash_type hash_type, enum spu_cipher_type ciph_type,
237
enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode)
238
{
239
int err;
240
241
err = spu2_hash_mode_xlate(hash_mode, spu2_mode);
242
if (err) {
243
flow_log("Invalid hash mode %d\n", hash_mode);
244
return err;
245
}
246
247
switch (hash_alg) {
248
case HASH_ALG_NONE:
249
*spu2_type = SPU2_HASH_TYPE_NONE;
250
break;
251
case HASH_ALG_MD5:
252
*spu2_type = SPU2_HASH_TYPE_MD5;
253
break;
254
case HASH_ALG_SHA1:
255
*spu2_type = SPU2_HASH_TYPE_SHA1;
256
break;
257
case HASH_ALG_SHA224:
258
*spu2_type = SPU2_HASH_TYPE_SHA224;
259
break;
260
case HASH_ALG_SHA256:
261
*spu2_type = SPU2_HASH_TYPE_SHA256;
262
break;
263
case HASH_ALG_SHA384:
264
*spu2_type = SPU2_HASH_TYPE_SHA384;
265
break;
266
case HASH_ALG_SHA512:
267
*spu2_type = SPU2_HASH_TYPE_SHA512;
268
break;
269
case HASH_ALG_AES:
270
switch (ciph_type) {
271
case CIPHER_TYPE_AES128:
272
*spu2_type = SPU2_HASH_TYPE_AES128;
273
break;
274
case CIPHER_TYPE_AES192:
275
*spu2_type = SPU2_HASH_TYPE_AES192;
276
break;
277
case CIPHER_TYPE_AES256:
278
*spu2_type = SPU2_HASH_TYPE_AES256;
279
break;
280
default:
281
err = -EINVAL;
282
}
283
break;
284
case HASH_ALG_SHA3_224:
285
*spu2_type = SPU2_HASH_TYPE_SHA3_224;
286
break;
287
case HASH_ALG_SHA3_256:
288
*spu2_type = SPU2_HASH_TYPE_SHA3_256;
289
break;
290
case HASH_ALG_SHA3_384:
291
*spu2_type = SPU2_HASH_TYPE_SHA3_384;
292
break;
293
case HASH_ALG_SHA3_512:
294
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
295
break;
296
case HASH_ALG_LAST:
297
default:
298
err = -EINVAL;
299
break;
300
}
301
302
if (err)
303
flow_log("Invalid hash alg %d or type %d\n",
304
hash_alg, hash_type);
305
return err;
306
}
307
308
/* Dump FMD ctrl0. The ctrl0 input is in host byte order */
309
static void spu2_dump_fmd_ctrl0(u64 ctrl0)
310
{
311
enum spu2_cipher_type ciph_type;
312
enum spu2_cipher_mode ciph_mode;
313
enum spu2_hash_type hash_type;
314
enum spu2_hash_mode hash_mode;
315
char *ciph_name;
316
char *ciph_mode_name;
317
char *hash_name;
318
char *hash_mode_name;
319
u8 cfb;
320
u8 proto;
321
322
packet_log(" FMD CTRL0 %#16llx\n", ctrl0);
323
if (ctrl0 & SPU2_CIPH_ENCRYPT_EN)
324
packet_log(" encrypt\n");
325
else
326
packet_log(" decrypt\n");
327
328
ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT;
329
ciph_name = spu2_ciph_type_name(ciph_type);
330
packet_log(" Cipher type: %s\n", ciph_name);
331
332
if (ciph_type != SPU2_CIPHER_TYPE_NONE) {
333
ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT;
334
ciph_mode_name = spu2_ciph_mode_name(ciph_mode);
335
packet_log(" Cipher mode: %s\n", ciph_mode_name);
336
}
337
338
cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT;
339
packet_log(" CFB %#x\n", cfb);
340
341
proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT;
342
packet_log(" protocol %#x\n", proto);
343
344
if (ctrl0 & SPU2_HASH_FIRST)
345
packet_log(" hash first\n");
346
else
347
packet_log(" cipher first\n");
348
349
if (ctrl0 & SPU2_CHK_TAG)
350
packet_log(" check tag\n");
351
352
hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT;
353
hash_name = spu2_hash_type_name(hash_type);
354
packet_log(" Hash type: %s\n", hash_name);
355
356
if (hash_type != SPU2_HASH_TYPE_NONE) {
357
hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT;
358
hash_mode_name = spu2_hash_mode_name(hash_mode);
359
packet_log(" Hash mode: %s\n", hash_mode_name);
360
}
361
362
if (ctrl0 & SPU2_CIPH_PAD_EN) {
363
packet_log(" Cipher pad: %#2llx\n",
364
(ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT);
365
}
366
}
367
368
/* Dump FMD ctrl1. The ctrl1 input is in host byte order */
369
static void spu2_dump_fmd_ctrl1(u64 ctrl1)
370
{
371
u8 hash_key_len;
372
u8 ciph_key_len;
373
u8 ret_iv_len;
374
u8 iv_offset;
375
u8 iv_len;
376
u8 hash_tag_len;
377
u8 ret_md;
378
379
packet_log(" FMD CTRL1 %#16llx\n", ctrl1);
380
if (ctrl1 & SPU2_TAG_LOC)
381
packet_log(" Tag after payload\n");
382
383
packet_log(" Msg includes ");
384
if (ctrl1 & SPU2_HAS_FR_DATA)
385
packet_log("FD ");
386
if (ctrl1 & SPU2_HAS_AAD1)
387
packet_log("AAD1 ");
388
if (ctrl1 & SPU2_HAS_NAAD)
389
packet_log("NAAD ");
390
if (ctrl1 & SPU2_HAS_AAD2)
391
packet_log("AAD2 ");
392
if (ctrl1 & SPU2_HAS_ESN)
393
packet_log("ESN ");
394
packet_log("\n");
395
396
hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
397
packet_log(" Hash key len %u\n", hash_key_len);
398
399
ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
400
packet_log(" Cipher key len %u\n", ciph_key_len);
401
402
if (ctrl1 & SPU2_GENIV)
403
packet_log(" Generate IV\n");
404
405
if (ctrl1 & SPU2_HASH_IV)
406
packet_log(" IV included in hash\n");
407
408
if (ctrl1 & SPU2_RET_IV)
409
packet_log(" Return IV in output before payload\n");
410
411
ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT;
412
packet_log(" Length of returned IV %u bytes\n",
413
ret_iv_len ? ret_iv_len : 16);
414
415
iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT;
416
packet_log(" IV offset %u\n", iv_offset);
417
418
iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
419
packet_log(" Input IV len %u bytes\n", iv_len);
420
421
hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT;
422
packet_log(" Hash tag length %u bytes\n", hash_tag_len);
423
424
packet_log(" Return ");
425
ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT;
426
if (ret_md)
427
packet_log("FMD ");
428
if (ret_md == SPU2_RET_FMD_OMD)
429
packet_log("OMD ");
430
else if (ret_md == SPU2_RET_FMD_OMD_IV)
431
packet_log("OMD IV ");
432
if (ctrl1 & SPU2_RETURN_FD)
433
packet_log("FD ");
434
if (ctrl1 & SPU2_RETURN_AAD1)
435
packet_log("AAD1 ");
436
if (ctrl1 & SPU2_RETURN_NAAD)
437
packet_log("NAAD ");
438
if (ctrl1 & SPU2_RETURN_AAD2)
439
packet_log("AAD2 ");
440
if (ctrl1 & SPU2_RETURN_PAY)
441
packet_log("Payload");
442
packet_log("\n");
443
}
444
445
/* Dump FMD ctrl2. The ctrl2 input is in host byte order */
446
static void spu2_dump_fmd_ctrl2(u64 ctrl2)
447
{
448
packet_log(" FMD CTRL2 %#16llx\n", ctrl2);
449
450
packet_log(" AAD1 offset %llu length %llu bytes\n",
451
ctrl2 & SPU2_AAD1_OFFSET,
452
(ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT);
453
packet_log(" AAD2 offset %llu\n",
454
(ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT);
455
packet_log(" Payload offset %llu\n",
456
(ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT);
457
}
458
459
/* Dump FMD ctrl3. The ctrl3 input is in host byte order */
460
static void spu2_dump_fmd_ctrl3(u64 ctrl3)
461
{
462
packet_log(" FMD CTRL3 %#16llx\n", ctrl3);
463
464
packet_log(" Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN);
465
packet_log(" TLS length %llu bytes\n",
466
(ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT);
467
}
468
469
static void spu2_dump_fmd(struct SPU2_FMD *fmd)
470
{
471
spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0));
472
spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1));
473
spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2));
474
spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3));
475
}
476
477
static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
478
u16 hash_iv_len, u16 ciph_iv_len)
479
{
480
u8 *ptr = omd;
481
482
packet_log(" OMD:\n");
483
484
if (hash_key_len) {
485
packet_log(" Hash Key Length %u bytes\n", hash_key_len);
486
packet_dump(" KEY: ", ptr, hash_key_len);
487
ptr += hash_key_len;
488
}
489
490
if (ciph_key_len) {
491
packet_log(" Cipher Key Length %u bytes\n", ciph_key_len);
492
packet_dump(" KEY: ", ptr, ciph_key_len);
493
ptr += ciph_key_len;
494
}
495
496
if (hash_iv_len) {
497
packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
498
packet_dump(" hash IV: ", ptr, hash_iv_len);
499
ptr += hash_iv_len;
500
}
501
502
if (ciph_iv_len) {
503
packet_log(" Cipher IV Length %u bytes\n", ciph_iv_len);
504
packet_dump(" cipher IV: ", ptr, ciph_iv_len);
505
}
506
}
507
508
/* Dump a SPU2 header for debug */
509
void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
510
{
511
struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf;
512
u8 *omd;
513
u64 ctrl1;
514
u16 hash_key_len;
515
u16 ciph_key_len;
516
u16 hash_iv_len;
517
u16 ciph_iv_len;
518
u16 omd_len;
519
520
packet_log("\n");
521
packet_log("SPU2 message header %p len: %u\n", buf, buf_len);
522
523
spu2_dump_fmd(fmd);
524
omd = (u8 *)(fmd + 1);
525
526
ctrl1 = le64_to_cpu(fmd->ctrl1);
527
hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT;
528
ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT;
529
hash_iv_len = 0;
530
ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT;
531
spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len,
532
ciph_iv_len);
533
534
/* Double check sanity */
535
omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len;
536
if (FMD_SIZE + omd_len != buf_len) {
537
packet_log
538
(" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n",
539
buf_len, FMD_SIZE + omd_len);
540
}
541
packet_log("\n");
542
}
543
544
/**
545
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
546
* subsequent skcipher requests for this context.
547
* @fmd: Start of FMD field to be written
548
* @spu2_type: Cipher algorithm
549
* @spu2_mode: Cipher mode
550
* @cipher_key_len: Length of cipher key, in bytes
551
* @cipher_iv_len: Length of cipher initialization vector, in bytes
552
*
553
* Return: 0 (success)
554
*/
555
static int spu2_fmd_init(struct SPU2_FMD *fmd,
556
enum spu2_cipher_type spu2_type,
557
enum spu2_cipher_mode spu2_mode,
558
u32 cipher_key_len, u32 cipher_iv_len)
559
{
560
u64 ctrl0;
561
u64 ctrl1;
562
u64 ctrl2;
563
u64 ctrl3;
564
u32 aad1_offset;
565
u32 aad2_offset;
566
u16 aad1_len = 0;
567
u64 payload_offset;
568
569
ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) |
570
(spu2_mode << SPU2_CIPH_MODE_SHIFT);
571
572
ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) |
573
((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) |
574
((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY;
575
576
/*
577
* AAD1 offset is from start of FD. FD length is always 0 for this
578
* driver. So AAD1_offset is always 0.
579
*/
580
aad1_offset = 0;
581
aad2_offset = aad1_offset;
582
payload_offset = 0;
583
ctrl2 = aad1_offset |
584
(aad1_len << SPU2_AAD1_LEN_SHIFT) |
585
(aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
586
(payload_offset << SPU2_PL_OFFSET_SHIFT);
587
588
ctrl3 = 0;
589
590
fmd->ctrl0 = cpu_to_le64(ctrl0);
591
fmd->ctrl1 = cpu_to_le64(ctrl1);
592
fmd->ctrl2 = cpu_to_le64(ctrl2);
593
fmd->ctrl3 = cpu_to_le64(ctrl3);
594
595
return 0;
596
}
597
598
/**
599
* spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of
600
* SPU request packet.
601
* @fmd: Start of FMD field to be written
602
* @is_inbound: true if decrypting. false if encrypting.
603
* @auth_first: true if alg authenticates before encrypting
604
* @protocol: protocol selector
605
* @cipher_type: cipher algorithm
606
* @cipher_mode: cipher mode
607
* @auth_type: authentication type
608
* @auth_mode: authentication mode
609
*/
610
static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
611
bool is_inbound, bool auth_first,
612
enum spu2_proto_sel protocol,
613
enum spu2_cipher_type cipher_type,
614
enum spu2_cipher_mode cipher_mode,
615
enum spu2_hash_type auth_type,
616
enum spu2_hash_mode auth_mode)
617
{
618
u64 ctrl0 = 0;
619
620
if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound)
621
ctrl0 |= SPU2_CIPH_ENCRYPT_EN;
622
623
ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) |
624
((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT);
625
626
if (protocol)
627
ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT;
628
629
if (auth_first)
630
ctrl0 |= SPU2_HASH_FIRST;
631
632
if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE))
633
ctrl0 |= SPU2_CHK_TAG;
634
635
ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) |
636
((u64)auth_mode << SPU2_HASH_MODE_SHIFT));
637
638
fmd->ctrl0 = cpu_to_le64(ctrl0);
639
}
640
641
/**
642
* spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
643
* SPU request packet.
644
* @fmd: Start of FMD field to be written
645
* @is_inbound: true if decrypting. false if encrypting.
646
* @assoc_size: Length of additional associated data, in bytes
647
* @auth_key_len: Length of authentication key, in bytes
648
* @cipher_key_len: Length of cipher key, in bytes
649
* @gen_iv: If true, hw generates IV and returns in response
650
* @hash_iv: IV participates in hash. Used for IPSEC and TLS.
651
* @return_iv: Return IV in output packet before payload
652
* @ret_iv_len: Length of IV returned from SPU, in bytes
653
* @ret_iv_offset: Offset into full IV of start of returned IV
654
* @cipher_iv_len: Length of input cipher IV, in bytes
655
* @digest_size: Length of digest (aka, hash tag or ICV), in bytes
656
* @return_payload: Return payload in SPU response
657
* @return_md : return metadata in SPU response
658
*
659
* Packet can have AAD2 w/o AAD1. For algorithms currently supported,
660
* associated data goes in AAD2.
661
*/
662
static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound,
663
u64 assoc_size,
664
u64 auth_key_len, u64 cipher_key_len,
665
bool gen_iv, bool hash_iv, bool return_iv,
666
u64 ret_iv_len, u64 ret_iv_offset,
667
u64 cipher_iv_len, u64 digest_size,
668
bool return_payload, bool return_md)
669
{
670
u64 ctrl1 = 0;
671
672
if (is_inbound && digest_size)
673
ctrl1 |= SPU2_TAG_LOC;
674
675
if (assoc_size) {
676
ctrl1 |= SPU2_HAS_AAD2;
677
ctrl1 |= SPU2_RETURN_AAD2; /* need aad2 for gcm aes esp */
678
}
679
680
if (auth_key_len)
681
ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) &
682
SPU2_HASH_KEY_LEN);
683
684
if (cipher_key_len)
685
ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) &
686
SPU2_CIPH_KEY_LEN);
687
688
if (gen_iv)
689
ctrl1 |= SPU2_GENIV;
690
691
if (hash_iv)
692
ctrl1 |= SPU2_HASH_IV;
693
694
if (return_iv) {
695
ctrl1 |= SPU2_RET_IV;
696
ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT;
697
ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT;
698
}
699
700
ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN);
701
702
if (digest_size)
703
ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) &
704
SPU2_HASH_TAG_LEN);
705
706
/* Let's ask for the output pkt to include FMD, but don't need to
707
* get keys and IVs back in OMD.
708
*/
709
if (return_md)
710
ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT);
711
else
712
ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT);
713
714
/* Crypto API does not get assoc data back. So no need for AAD2. */
715
716
if (return_payload)
717
ctrl1 |= SPU2_RETURN_PAY;
718
719
fmd->ctrl1 = cpu_to_le64(ctrl1);
720
}
721
722
/**
723
* spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of
724
* SPU2 header.
725
* @fmd: Start of FMD field to be written
726
* @cipher_offset: Number of bytes from Start of Packet (end of FD field) where
727
* data to be encrypted or decrypted begins
728
* @auth_key_len: Length of authentication key, in bytes
729
* @auth_iv_len: Length of authentication initialization vector, in bytes
730
* @cipher_key_len: Length of cipher key, in bytes
731
* @cipher_iv_len: Length of cipher IV, in bytes
732
*/
733
static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset,
734
u64 auth_key_len, u64 auth_iv_len,
735
u64 cipher_key_len, u64 cipher_iv_len)
736
{
737
u64 ctrl2;
738
u64 aad1_offset;
739
u64 aad2_offset;
740
u16 aad1_len = 0;
741
u64 payload_offset;
742
743
/* AAD1 offset is from start of FD. FD length always 0. */
744
aad1_offset = 0;
745
746
aad2_offset = aad1_offset;
747
payload_offset = cipher_offset;
748
ctrl2 = aad1_offset |
749
(aad1_len << SPU2_AAD1_LEN_SHIFT) |
750
(aad2_offset << SPU2_AAD2_OFFSET_SHIFT) |
751
(payload_offset << SPU2_PL_OFFSET_SHIFT);
752
753
fmd->ctrl2 = cpu_to_le64(ctrl2);
754
}
755
756
/**
757
* spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD
758
* @fmd: Fixed meta data. First field in SPU2 msg header.
759
* @payload_len: Length of payload, in bytes
760
*/
761
static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len)
762
{
763
u64 ctrl3;
764
765
ctrl3 = payload_len & SPU2_PL_LEN;
766
767
fmd->ctrl3 = cpu_to_le64(ctrl3);
768
}
769
770
/**
771
* spu2_ctx_max_payload() - Determine the maximum length of the payload for a
772
* SPU message for a given cipher and hash alg context.
773
* @cipher_alg: The cipher algorithm
774
* @cipher_mode: The cipher mode
775
* @blocksize: The size of a block of data for this algo
776
*
777
* For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of
778
* FMD and just keeps computing until it receives a DMA descriptor with the EOF
779
* flag set. So we consider the max payload to be infinite. AES CCM is an
780
* exception.
781
*
782
* Return: Max payload length in bytes
783
*/
784
u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
785
enum spu_cipher_mode cipher_mode,
786
unsigned int blocksize)
787
{
788
if ((cipher_alg == CIPHER_ALG_AES) &&
789
(cipher_mode == CIPHER_MODE_CCM)) {
790
u32 excess = SPU2_MAX_PAYLOAD % blocksize;
791
792
return SPU2_MAX_PAYLOAD - excess;
793
} else {
794
return SPU_MAX_PAYLOAD_INF;
795
}
796
}
797
798
/**
799
* spu2_payload_length() - Given a SPU2 message header, extract the payload
800
* length.
801
* @spu_hdr: Start of SPU message header (FMD)
802
*
803
* Return: payload length, in bytes
804
*/
805
u32 spu2_payload_length(u8 *spu_hdr)
806
{
807
struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr;
808
u32 pl_len;
809
u64 ctrl3;
810
811
ctrl3 = le64_to_cpu(fmd->ctrl3);
812
pl_len = ctrl3 & SPU2_PL_LEN;
813
814
return pl_len;
815
}
816
817
/**
818
* spu2_response_hdr_len() - Determine the expected length of a SPU response
819
* header.
820
* @auth_key_len: Length of authentication key, in bytes
821
* @enc_key_len: Length of encryption key, in bytes
822
* @is_hash: Unused
823
*
824
* For SPU2, includes just FMD. OMD is never requested.
825
*
826
* Return: Length of FMD, in bytes
827
*/
828
u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
829
{
830
return FMD_SIZE;
831
}
832
833
/**
834
* spu2_hash_pad_len() - Calculate the length of hash padding required to extend
835
* data to a full block size.
836
* @hash_alg: hash algorithm
837
* @hash_mode: hash mode
838
* @chunksize: length of data, in bytes
839
* @hash_block_size: size of a hash block, in bytes
840
*
841
* SPU2 hardware does all hash padding
842
*
843
* Return: length of hash pad in bytes
844
*/
845
u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
846
u32 chunksize, u16 hash_block_size)
847
{
848
return 0;
849
}
850
851
/**
852
* spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
853
* the AAD field or the data.
854
* @cipher_mode: Unused
855
* @data_size: Unused
856
*
857
* Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
858
*/
859
u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
860
unsigned int data_size)
861
{
862
return 0;
863
}
864
865
/**
866
* spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
867
* associated data in a SPU2 output packet.
868
* @cipher_mode: cipher mode
869
* @assoc_len: length of additional associated data, in bytes
870
* @iv_len: length of initialization vector, in bytes
871
* @is_encrypt: true if encrypting. false if decrypt.
872
*
873
* Return: Length of buffer to catch associated data in response
874
*/
875
u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
876
unsigned int assoc_len, unsigned int iv_len,
877
bool is_encrypt)
878
{
879
u32 resp_len = assoc_len;
880
881
if (is_encrypt)
882
/* gcm aes esp has to write 8-byte IV in response */
883
resp_len += iv_len;
884
return resp_len;
885
}
886
887
/**
888
* spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
889
* in a SPU request after the AAD and before the payload.
890
* @cipher_mode: cipher mode
891
* @iv_len: initialization vector length in bytes
892
*
893
* For SPU2, AEAD IV is included in OMD and does not need to be repeated
894
* prior to the payload.
895
*
896
* Return: Length of AEAD IV in bytes
897
*/
898
u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len)
899
{
900
return 0;
901
}
902
903
/**
904
* spu2_hash_type() - Determine the type of hash operation.
905
* @src_sent: The number of bytes in the current request that have already
906
* been sent to the SPU to be hashed.
907
*
908
* SPU2 always does a FULL hash operation
909
*/
910
enum hash_type spu2_hash_type(u32 src_sent)
911
{
912
return HASH_TYPE_FULL;
913
}
914
915
/**
916
* spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
917
* return.
918
* @alg_digest_size: Number of bytes in the final digest for the given algo
919
* @alg: The hash algorithm
920
* @htype: Type of hash operation (init, update, full, etc)
921
*
922
*/
923
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
924
enum hash_type htype)
925
{
926
return alg_digest_size;
927
}
928
929
/**
930
* spu2_create_request() - Build a SPU2 request message header, includint FMD and
931
* OMD.
932
* @spu_hdr: Start of buffer where SPU request header is to be written
933
* @req_opts: SPU request message options
934
* @cipher_parms: Parameters related to cipher algorithm
935
* @hash_parms: Parameters related to hash algorithm
936
* @aead_parms: Parameters related to AEAD operation
937
* @data_size: Length of data to be encrypted or authenticated. If AEAD, does
938
* not include length of AAD.
939
*
940
* Construct the message starting at spu_hdr. Caller should allocate this buffer
941
* in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
942
*
943
* Return: the length of the SPU header in bytes. 0 if an error occurs.
944
*/
945
u32 spu2_create_request(u8 *spu_hdr,
946
struct spu_request_opts *req_opts,
947
struct spu_cipher_parms *cipher_parms,
948
struct spu_hash_parms *hash_parms,
949
struct spu_aead_parms *aead_parms,
950
unsigned int data_size)
951
{
952
struct SPU2_FMD *fmd;
953
u8 *ptr;
954
unsigned int buf_len;
955
int err;
956
enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
957
enum spu2_cipher_mode spu2_ciph_mode;
958
enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE;
959
enum spu2_hash_mode spu2_auth_mode;
960
bool return_md = true;
961
enum spu2_proto_sel proto = SPU2_PROTO_RESV;
962
963
/* size of the payload */
964
unsigned int payload_len =
965
hash_parms->prebuf_len + data_size + hash_parms->pad_len -
966
((req_opts->is_aead && req_opts->is_inbound) ?
967
hash_parms->digestsize : 0);
968
969
/* offset of prebuf or data from start of AAD2 */
970
unsigned int cipher_offset = aead_parms->assoc_size +
971
aead_parms->aad_pad_len + aead_parms->iv_len;
972
973
/* total size of the data following OMD (without STAT word padding) */
974
unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size,
975
aead_parms->iv_len,
976
hash_parms->prebuf_len,
977
data_size,
978
aead_parms->aad_pad_len,
979
aead_parms->data_pad_len,
980
hash_parms->pad_len);
981
unsigned int assoc_size = aead_parms->assoc_size;
982
983
if (req_opts->is_aead &&
984
(cipher_parms->alg == CIPHER_ALG_AES) &&
985
(cipher_parms->mode == CIPHER_MODE_GCM))
986
/*
987
* On SPU 2, aes gcm cipher first on encrypt, auth first on
988
* decrypt
989
*/
990
req_opts->auth_first = req_opts->is_inbound;
991
992
/* and do opposite for ccm (auth 1st on encrypt) */
993
if (req_opts->is_aead &&
994
(cipher_parms->alg == CIPHER_ALG_AES) &&
995
(cipher_parms->mode == CIPHER_MODE_CCM))
996
req_opts->auth_first = !req_opts->is_inbound;
997
998
flow_log("%s()\n", __func__);
999
flow_log(" in:%u authFirst:%u\n",
1000
req_opts->is_inbound, req_opts->auth_first);
1001
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
1002
cipher_parms->mode, cipher_parms->type);
1003
flow_log(" is_esp: %s\n", str_yes_no(req_opts->is_esp));
1004
flow_log(" key: %d\n", cipher_parms->key_len);
1005
flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
1006
flow_log(" iv: %d\n", cipher_parms->iv_len);
1007
flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
1008
flow_log(" auth alg:%u mode:%u type %u\n",
1009
hash_parms->alg, hash_parms->mode, hash_parms->type);
1010
flow_log(" digestsize: %u\n", hash_parms->digestsize);
1011
flow_log(" authkey: %d\n", hash_parms->key_len);
1012
flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len);
1013
flow_log(" assoc_size:%u\n", assoc_size);
1014
flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len);
1015
flow_log(" data_size:%u\n", data_size);
1016
flow_log(" hash_pad_len:%u\n", hash_parms->pad_len);
1017
flow_log(" real_db_size:%u\n", real_db_size);
1018
flow_log(" cipher_offset:%u payload_len:%u\n",
1019
cipher_offset, payload_len);
1020
flow_log(" aead_iv: %u\n", aead_parms->iv_len);
1021
1022
/* Convert to spu2 values for cipher alg, hash alg */
1023
err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
1024
cipher_parms->type,
1025
&spu2_ciph_type, &spu2_ciph_mode);
1026
1027
/* If we are doing GCM hashing only - either via rfc4543 transform
1028
* or because we happen to do GCM with AAD only and no payload - we
1029
* need to configure hardware to use hash key rather than cipher key
1030
* and put data into payload. This is because unlike SPU-M, running
1031
* GCM cipher with 0 size payload is not permitted.
1032
*/
1033
if ((req_opts->is_rfc4543) ||
1034
((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) &&
1035
(payload_len == 0))) {
1036
/* Use hashing (only) and set up hash key */
1037
spu2_ciph_type = SPU2_CIPHER_TYPE_NONE;
1038
hash_parms->key_len = cipher_parms->key_len;
1039
memcpy(hash_parms->key_buf, cipher_parms->key_buf,
1040
cipher_parms->key_len);
1041
cipher_parms->key_len = 0;
1042
1043
if (req_opts->is_rfc4543)
1044
payload_len += assoc_size;
1045
else
1046
payload_len = assoc_size;
1047
cipher_offset = 0;
1048
assoc_size = 0;
1049
}
1050
1051
if (err)
1052
return 0;
1053
1054
flow_log("spu2 cipher type %s, cipher mode %s\n",
1055
spu2_ciph_type_name(spu2_ciph_type),
1056
spu2_ciph_mode_name(spu2_ciph_mode));
1057
1058
err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode,
1059
hash_parms->type,
1060
cipher_parms->type,
1061
&spu2_auth_type, &spu2_auth_mode);
1062
if (err)
1063
return 0;
1064
1065
flow_log("spu2 hash type %s, hash mode %s\n",
1066
spu2_hash_type_name(spu2_auth_type),
1067
spu2_hash_mode_name(spu2_auth_mode));
1068
1069
fmd = (struct SPU2_FMD *)spu_hdr;
1070
1071
spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first,
1072
proto, spu2_ciph_type, spu2_ciph_mode,
1073
spu2_auth_type, spu2_auth_mode);
1074
1075
spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size,
1076
hash_parms->key_len, cipher_parms->key_len,
1077
false, false,
1078
aead_parms->return_iv, aead_parms->ret_iv_len,
1079
aead_parms->ret_iv_off,
1080
cipher_parms->iv_len, hash_parms->digestsize,
1081
!req_opts->bd_suppress, return_md);
1082
1083
spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0,
1084
cipher_parms->key_len, cipher_parms->iv_len);
1085
1086
spu2_fmd_ctrl3_write(fmd, payload_len);
1087
1088
ptr = (u8 *)(fmd + 1);
1089
buf_len = sizeof(struct SPU2_FMD);
1090
1091
/* Write OMD */
1092
if (hash_parms->key_len) {
1093
memcpy(ptr, hash_parms->key_buf, hash_parms->key_len);
1094
ptr += hash_parms->key_len;
1095
buf_len += hash_parms->key_len;
1096
}
1097
if (cipher_parms->key_len) {
1098
memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len);
1099
ptr += cipher_parms->key_len;
1100
buf_len += cipher_parms->key_len;
1101
}
1102
if (cipher_parms->iv_len) {
1103
memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len);
1104
ptr += cipher_parms->iv_len;
1105
buf_len += cipher_parms->iv_len;
1106
}
1107
1108
packet_dump(" SPU request header: ", spu_hdr, buf_len);
1109
1110
return buf_len;
1111
}
1112
1113
/**
1114
* spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
1115
* including FMD and OMD.
1116
* @spu_hdr: Location of start of SPU request (FMD field)
1117
* @cipher_parms: Parameters describing cipher request
1118
*
1119
* Called at setkey time to initialize a msg header that can be reused for all
1120
* subsequent skcipher requests. Construct the message starting at spu_hdr.
1121
* Caller should allocate this buffer in DMA-able memory at least
1122
* SPU_HEADER_ALLOC_LEN bytes long.
1123
*
1124
* Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an
1125
* error occurs.
1126
*/
1127
u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
1128
{
1129
struct SPU2_FMD *fmd;
1130
u8 *omd;
1131
enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE;
1132
enum spu2_cipher_mode spu2_mode;
1133
int err;
1134
1135
flow_log("%s()\n", __func__);
1136
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
1137
cipher_parms->mode, cipher_parms->type);
1138
flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len);
1139
flow_log(" key: %d\n", cipher_parms->key_len);
1140
flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len);
1141
1142
/* Convert to spu2 values */
1143
err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode,
1144
cipher_parms->type, &spu2_type, &spu2_mode);
1145
if (err)
1146
return 0;
1147
1148
flow_log("spu2 cipher type %s, cipher mode %s\n",
1149
spu2_ciph_type_name(spu2_type),
1150
spu2_ciph_mode_name(spu2_mode));
1151
1152
/* Construct the FMD header */
1153
fmd = (struct SPU2_FMD *)spu_hdr;
1154
err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len,
1155
cipher_parms->iv_len);
1156
if (err)
1157
return 0;
1158
1159
/* Write cipher key to OMD */
1160
omd = (u8 *)(fmd + 1);
1161
if (cipher_parms->key_buf && cipher_parms->key_len)
1162
memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len);
1163
1164
packet_dump(" SPU request header: ", spu_hdr,
1165
FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len);
1166
1167
return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len;
1168
}
1169
1170
/**
1171
* spu2_cipher_req_finish() - Finish building a SPU request message header for a
1172
* block cipher request.
1173
* @spu_hdr: Start of the request message header (MH field)
1174
* @spu_req_hdr_len: Length in bytes of the SPU request header
1175
* @is_inbound: 0 encrypt, 1 decrypt
1176
* @cipher_parms: Parameters describing cipher operation to be performed
1177
* @data_size: Length of the data in the BD field
1178
*
1179
* Assumes much of the header was already filled in at setkey() time in
1180
* spu_cipher_req_init().
1181
* spu_cipher_req_init() fills in the encryption key.
1182
*/
1183
void spu2_cipher_req_finish(u8 *spu_hdr,
1184
u16 spu_req_hdr_len,
1185
unsigned int is_inbound,
1186
struct spu_cipher_parms *cipher_parms,
1187
unsigned int data_size)
1188
{
1189
struct SPU2_FMD *fmd;
1190
u8 *omd; /* start of optional metadata */
1191
u64 ctrl0;
1192
u64 ctrl3;
1193
1194
flow_log("%s()\n", __func__);
1195
flow_log(" in: %u\n", is_inbound);
1196
flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg,
1197
cipher_parms->type);
1198
flow_log(" iv len: %d\n", cipher_parms->iv_len);
1199
flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len);
1200
flow_log(" data_size: %u\n", data_size);
1201
1202
fmd = (struct SPU2_FMD *)spu_hdr;
1203
omd = (u8 *)(fmd + 1);
1204
1205
/*
1206
* FMD ctrl0 was initialized at setkey time. update it to indicate
1207
* whether we are encrypting or decrypting.
1208
*/
1209
ctrl0 = le64_to_cpu(fmd->ctrl0);
1210
if (is_inbound)
1211
ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN; /* decrypt */
1212
else
1213
ctrl0 |= SPU2_CIPH_ENCRYPT_EN; /* encrypt */
1214
fmd->ctrl0 = cpu_to_le64(ctrl0);
1215
1216
if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) {
1217
/* cipher iv provided so put it in here */
1218
memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf,
1219
cipher_parms->iv_len);
1220
}
1221
1222
ctrl3 = le64_to_cpu(fmd->ctrl3);
1223
data_size &= SPU2_PL_LEN;
1224
ctrl3 |= data_size;
1225
fmd->ctrl3 = cpu_to_le64(ctrl3);
1226
1227
packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len);
1228
}
1229
1230
/**
1231
* spu2_request_pad() - Create pad bytes at the end of the data.
1232
* @pad_start: Start of buffer where pad bytes are to be written
1233
* @gcm_padding: Length of GCM padding, in bytes
1234
* @hash_pad_len: Number of bytes of padding extend data to full block
1235
* @auth_alg: Authentication algorithm
1236
* @auth_mode: Authentication mode
1237
* @total_sent: Length inserted at end of hash pad
1238
* @status_padding: Number of bytes of padding to align STATUS word
1239
*
1240
* There may be three forms of pad:
1241
* 1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment
1242
* 2. hash pad - pad to a block length, with 0x80 data terminator and
1243
* size at the end
1244
* 3. STAT pad - to ensure the STAT field is 4-byte aligned
1245
*/
1246
void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len,
1247
enum hash_alg auth_alg, enum hash_mode auth_mode,
1248
unsigned int total_sent, u32 status_padding)
1249
{
1250
u8 *ptr = pad_start;
1251
1252
/* fix data alignent for GCM */
1253
if (gcm_padding > 0) {
1254
flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
1255
gcm_padding);
1256
memset(ptr, 0, gcm_padding);
1257
ptr += gcm_padding;
1258
}
1259
1260
if (hash_pad_len > 0) {
1261
/* clear the padding section */
1262
memset(ptr, 0, hash_pad_len);
1263
1264
/* terminate the data */
1265
*ptr = 0x80;
1266
ptr += (hash_pad_len - sizeof(u64));
1267
1268
/* add the size at the end as required per alg */
1269
if (auth_alg == HASH_ALG_MD5)
1270
*(__le64 *)ptr = cpu_to_le64(total_sent * 8ull);
1271
else /* SHA1, SHA2-224, SHA2-256 */
1272
*(__be64 *)ptr = cpu_to_be64(total_sent * 8ull);
1273
ptr += sizeof(u64);
1274
}
1275
1276
/* pad to a 4byte alignment for STAT */
1277
if (status_padding > 0) {
1278
flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
1279
status_padding);
1280
1281
memset(ptr, 0, status_padding);
1282
ptr += status_padding;
1283
}
1284
}
1285
1286
/**
1287
* spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS
1288
* tweak field in the packet payload (it uses IV instead)
1289
*
1290
* Return: 0
1291
*/
1292
u8 spu2_xts_tweak_in_payload(void)
1293
{
1294
return 0;
1295
}
1296
1297
/**
1298
* spu2_tx_status_len() - Return the length of the STATUS field in a SPU
1299
* response message.
1300
*
1301
* Return: Length of STATUS field in bytes.
1302
*/
1303
u8 spu2_tx_status_len(void)
1304
{
1305
return SPU2_TX_STATUS_LEN;
1306
}
1307
1308
/**
1309
* spu2_rx_status_len() - Return the length of the STATUS field in a SPU
1310
* response message.
1311
*
1312
* Return: Length of STATUS field in bytes.
1313
*/
1314
u8 spu2_rx_status_len(void)
1315
{
1316
return SPU2_RX_STATUS_LEN;
1317
}
1318
1319
/**
1320
* spu2_status_process() - Process the status from a SPU response message.
1321
* @statp: start of STATUS word
1322
*
1323
* Return: 0 - if status is good and response should be processed
1324
* !0 - status indicates an error and response is invalid
1325
*/
1326
int spu2_status_process(u8 *statp)
1327
{
1328
/* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */
1329
u16 status = le16_to_cpu(*(__le16 *)statp);
1330
1331
if (status == 0)
1332
return 0;
1333
1334
flow_log("rx status is %#x\n", status);
1335
if (status == SPU2_INVALID_ICV)
1336
return SPU_INVALID_ICV;
1337
1338
return -EBADMSG;
1339
}
1340
1341
/**
1342
* spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1343
*
1344
* @digestsize: Digest size of this request
1345
* @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
1346
* @assoclen: Length of AAD data
1347
* @chunksize: length of input data to be sent in this req
1348
* @is_encrypt: true if this is an output/encrypt operation
1349
* @is_esp: true if this is an ESP / RFC4309 operation
1350
*
1351
*/
1352
void spu2_ccm_update_iv(unsigned int digestsize,
1353
struct spu_cipher_parms *cipher_parms,
1354
unsigned int assoclen, unsigned int chunksize,
1355
bool is_encrypt, bool is_esp)
1356
{
1357
int L; /* size of length field, in bytes */
1358
1359
/*
1360
* In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from
1361
* testmgr contains (L-1) in bottom 3 bits of first byte,
1362
* per RFC 3610.
1363
*/
1364
if (is_esp)
1365
L = CCM_ESP_L_VALUE;
1366
else
1367
L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >>
1368
CCM_B0_L_PRIME_SHIFT) + 1;
1369
1370
/* SPU2 doesn't want these length bytes nor the first byte... */
1371
cipher_parms->iv_len -= (1 + L);
1372
memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1],
1373
cipher_parms->iv_len);
1374
}
1375
1376
/**
1377
* spu2_wordalign_padlen() - SPU2 does not require padding.
1378
* @data_size: length of data field in bytes
1379
*
1380
* Return: length of status field padding, in bytes (always 0 on SPU2)
1381
*/
1382
u32 spu2_wordalign_padlen(u32 data_size)
1383
{
1384
return 0;
1385
}
1386
1387