Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/crypto/bcm/cipher.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright 2016 Broadcom
4
*/
5
6
#include <linux/err.h>
7
#include <linux/module.h>
8
#include <linux/init.h>
9
#include <linux/errno.h>
10
#include <linux/kernel.h>
11
#include <linux/interrupt.h>
12
#include <linux/platform_device.h>
13
#include <linux/scatterlist.h>
14
#include <linux/crypto.h>
15
#include <linux/kthread.h>
16
#include <linux/rtnetlink.h>
17
#include <linux/sched.h>
18
#include <linux/string_choices.h>
19
#include <linux/of.h>
20
#include <linux/io.h>
21
#include <linux/bitops.h>
22
23
#include <crypto/algapi.h>
24
#include <crypto/aead.h>
25
#include <crypto/internal/aead.h>
26
#include <crypto/aes.h>
27
#include <crypto/internal/des.h>
28
#include <crypto/hmac.h>
29
#include <crypto/md5.h>
30
#include <crypto/authenc.h>
31
#include <crypto/skcipher.h>
32
#include <crypto/hash.h>
33
#include <crypto/sha1.h>
34
#include <crypto/sha2.h>
35
#include <crypto/sha3.h>
36
37
#include "util.h"
38
#include "cipher.h"
39
#include "spu.h"
40
#include "spum.h"
41
#include "spu2.h"
42
43
/* ================= Device Structure ================== */
44
45
struct bcm_device_private iproc_priv;
46
47
/* ==================== Parameters ===================== */
48
49
int flow_debug_logging;
50
module_param(flow_debug_logging, int, 0644);
51
MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
52
53
int packet_debug_logging;
54
module_param(packet_debug_logging, int, 0644);
55
MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
56
57
int debug_logging_sleep;
58
module_param(debug_logging_sleep, int, 0644);
59
MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
60
61
/*
62
* The value of these module parameters is used to set the priority for each
63
* algo type when this driver registers algos with the kernel crypto API.
64
* To use a priority other than the default, set the priority in the insmod or
65
* modprobe. Changing the module priority after init time has no effect.
66
*
67
* The default priorities are chosen to be lower (less preferred) than ARMv8 CE
68
* algos, but more preferred than generic software algos.
69
*/
70
static int cipher_pri = 150;
71
module_param(cipher_pri, int, 0644);
72
MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
73
74
static int hash_pri = 100;
75
module_param(hash_pri, int, 0644);
76
MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
77
78
static int aead_pri = 150;
79
module_param(aead_pri, int, 0644);
80
MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
81
82
/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
83
* Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
84
* 0x60 - ring 0
85
* 0x68 - ring 1
86
* 0x70 - ring 2
87
* 0x78 - ring 3
88
*/
89
static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
90
/*
91
* Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
92
* is set dynamically after reading SPU type from device tree.
93
*/
94
#define BCM_HDR_LEN iproc_priv.bcm_hdr_len
95
96
/* min and max time to sleep before retrying when mbox queue is full. usec */
97
#define MBOX_SLEEP_MIN 800
98
#define MBOX_SLEEP_MAX 1000
99
100
/**
101
* select_channel() - Select a SPU channel to handle a crypto request. Selects
102
* channel in round robin order.
103
*
104
* Return: channel index
105
*/
106
static u8 select_channel(void)
107
{
108
u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
109
110
return chan_idx % iproc_priv.spu.num_chan;
111
}
112
113
/**
114
* spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
115
* receive a SPU response message for an skcipher request. Includes buffers to
116
* catch SPU message headers and the response data.
117
* @mssg: mailbox message containing the receive sg
118
* @rctx: crypto request context
119
* @rx_frag_num: number of scatterlist elements required to hold the
120
* SPU response message
121
* @chunksize: Number of bytes of response data expected
122
* @stat_pad_len: Number of bytes required to pad the STAT field to
123
* a 4-byte boundary
124
*
125
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
126
* when the request completes, whether the request is handled successfully or
127
* there is an error.
128
*
129
* Returns:
130
* 0 if successful
131
* < 0 if an error
132
*/
133
static int
134
spu_skcipher_rx_sg_create(struct brcm_message *mssg,
135
struct iproc_reqctx_s *rctx,
136
u8 rx_frag_num,
137
unsigned int chunksize, u32 stat_pad_len)
138
{
139
struct spu_hw *spu = &iproc_priv.spu;
140
struct scatterlist *sg; /* used to build sgs in mbox message */
141
struct iproc_ctx_s *ctx = rctx->ctx;
142
u32 datalen; /* Number of bytes of response data expected */
143
144
mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
145
rctx->gfp);
146
if (!mssg->spu.dst)
147
return -ENOMEM;
148
149
sg = mssg->spu.dst;
150
sg_init_table(sg, rx_frag_num);
151
/* Space for SPU message header */
152
sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
153
154
/* If XTS tweak in payload, add buffer to receive encrypted tweak */
155
if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
156
spu->spu_xts_tweak_in_payload())
157
sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
158
SPU_XTS_TWEAK_SIZE);
159
160
/* Copy in each dst sg entry from request, up to chunksize */
161
datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
162
rctx->dst_nents, chunksize);
163
if (datalen < chunksize) {
164
pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
165
__func__, chunksize, datalen);
166
return -EFAULT;
167
}
168
169
if (stat_pad_len)
170
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
171
172
memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
173
sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
174
175
return 0;
176
}
177
178
/**
179
* spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
180
* send a SPU request message for an skcipher request. Includes SPU message
181
* headers and the request data.
182
* @mssg: mailbox message containing the transmit sg
183
* @rctx: crypto request context
184
* @tx_frag_num: number of scatterlist elements required to construct the
185
* SPU request message
186
* @chunksize: Number of bytes of request data
187
* @pad_len: Number of pad bytes
188
*
189
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
190
* when the request completes, whether the request is handled successfully or
191
* there is an error.
192
*
193
* Returns:
194
* 0 if successful
195
* < 0 if an error
196
*/
197
static int
198
spu_skcipher_tx_sg_create(struct brcm_message *mssg,
199
struct iproc_reqctx_s *rctx,
200
u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
201
{
202
struct spu_hw *spu = &iproc_priv.spu;
203
struct scatterlist *sg; /* used to build sgs in mbox message */
204
struct iproc_ctx_s *ctx = rctx->ctx;
205
u32 datalen; /* Number of bytes of response data expected */
206
u32 stat_len;
207
208
mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
209
rctx->gfp);
210
if (unlikely(!mssg->spu.src))
211
return -ENOMEM;
212
213
sg = mssg->spu.src;
214
sg_init_table(sg, tx_frag_num);
215
216
sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
217
BCM_HDR_LEN + ctx->spu_req_hdr_len);
218
219
/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
220
if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
221
spu->spu_xts_tweak_in_payload())
222
sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
223
224
/* Copy in each src sg entry from request, up to chunksize */
225
datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
226
rctx->src_nents, chunksize);
227
if (unlikely(datalen < chunksize)) {
228
pr_err("%s(): failed to copy src sg to mbox msg",
229
__func__);
230
return -EFAULT;
231
}
232
233
if (pad_len)
234
sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
235
236
stat_len = spu->spu_tx_status_len();
237
if (stat_len) {
238
memset(rctx->msg_buf.tx_stat, 0, stat_len);
239
sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
240
}
241
return 0;
242
}
243
244
static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
245
u8 chan_idx)
246
{
247
int err;
248
int retry_cnt = 0;
249
struct device *dev = &(iproc_priv.pdev->dev);
250
251
err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
252
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
253
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
254
/*
255
* Mailbox queue is full. Since MAY_SLEEP is set, assume
256
* not in atomic context and we can wait and try again.
257
*/
258
retry_cnt++;
259
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
260
err = mbox_send_message(iproc_priv.mbox[chan_idx],
261
mssg);
262
atomic_inc(&iproc_priv.mb_no_spc);
263
}
264
}
265
if (err < 0) {
266
atomic_inc(&iproc_priv.mb_send_fail);
267
return err;
268
}
269
270
/* Check error returned by mailbox controller */
271
err = mssg->error;
272
if (unlikely(err < 0)) {
273
dev_err(dev, "message error %d", err);
274
/* Signal txdone for mailbox channel */
275
}
276
277
/* Signal txdone for mailbox channel */
278
mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
279
return err;
280
}
281
282
/**
283
* handle_skcipher_req() - Submit as much of a block cipher request as fits in
284
* a single SPU request message, starting at the current position in the request
285
* data.
286
* @rctx: Crypto request context
287
*
288
* This may be called on the crypto API thread, or, when a request is so large
289
* it must be broken into multiple SPU messages, on the thread used to invoke
290
* the response callback. When requests are broken into multiple SPU
291
* messages, we assume subsequent messages depend on previous results, and
292
* thus always wait for previous results before submitting the next message.
293
* Because requests are submitted in lock step like this, there is no need
294
* to synchronize access to request data structures.
295
*
296
* Return: -EINPROGRESS: request has been accepted and result will be returned
297
* asynchronously
298
* Any other value indicates an error
299
*/
300
static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
301
{
302
struct spu_hw *spu = &iproc_priv.spu;
303
struct crypto_async_request *areq = rctx->parent;
304
struct skcipher_request *req =
305
container_of(areq, struct skcipher_request, base);
306
struct iproc_ctx_s *ctx = rctx->ctx;
307
struct spu_cipher_parms cipher_parms;
308
int err;
309
unsigned int chunksize; /* Num bytes of request to submit */
310
int remaining; /* Bytes of request still to process */
311
int chunk_start; /* Beginning of data for current SPU msg */
312
313
/* IV or ctr value to use in this SPU msg */
314
u8 local_iv_ctr[MAX_IV_SIZE];
315
u32 stat_pad_len; /* num bytes to align status field */
316
u32 pad_len; /* total length of all padding */
317
struct brcm_message *mssg; /* mailbox message */
318
319
/* number of entries in src and dst sg in mailbox message. */
320
u8 rx_frag_num = 2; /* response header and STATUS */
321
u8 tx_frag_num = 1; /* request header */
322
323
flow_log("%s\n", __func__);
324
325
cipher_parms.alg = ctx->cipher.alg;
326
cipher_parms.mode = ctx->cipher.mode;
327
cipher_parms.type = ctx->cipher_type;
328
cipher_parms.key_len = ctx->enckeylen;
329
cipher_parms.key_buf = ctx->enckey;
330
cipher_parms.iv_buf = local_iv_ctr;
331
cipher_parms.iv_len = rctx->iv_ctr_len;
332
333
mssg = &rctx->mb_mssg;
334
chunk_start = rctx->src_sent;
335
remaining = rctx->total_todo - chunk_start;
336
337
/* determine the chunk we are breaking off and update the indexes */
338
if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
339
(remaining > ctx->max_payload))
340
chunksize = ctx->max_payload;
341
else
342
chunksize = remaining;
343
344
rctx->src_sent += chunksize;
345
rctx->total_sent = rctx->src_sent;
346
347
/* Count number of sg entries to be included in this request */
348
rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
349
rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
350
351
if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
352
rctx->is_encrypt && chunk_start)
353
/*
354
* Encrypting non-first first chunk. Copy last block of
355
* previous result to IV for this chunk.
356
*/
357
sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
358
rctx->iv_ctr_len,
359
chunk_start - rctx->iv_ctr_len);
360
361
if (rctx->iv_ctr_len) {
362
/* get our local copy of the iv */
363
__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
364
rctx->iv_ctr_len);
365
366
/* generate the next IV if possible */
367
if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
368
!rctx->is_encrypt) {
369
/*
370
* CBC Decrypt: next IV is the last ciphertext block in
371
* this chunk
372
*/
373
sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
374
rctx->iv_ctr_len,
375
rctx->src_sent - rctx->iv_ctr_len);
376
} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
377
/*
378
* The SPU hardware increments the counter once for
379
* each AES block of 16 bytes. So update the counter
380
* for the next chunk, if there is one. Note that for
381
* this chunk, the counter has already been copied to
382
* local_iv_ctr. We can assume a block size of 16,
383
* because we only support CTR mode for AES, not for
384
* any other cipher alg.
385
*/
386
add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
387
}
388
}
389
390
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
391
flow_log("max_payload infinite\n");
392
else
393
flow_log("max_payload %u\n", ctx->max_payload);
394
395
flow_log("sent:%u start:%u remains:%u size:%u\n",
396
rctx->src_sent, chunk_start, remaining, chunksize);
397
398
/* Copy SPU header template created at setkey time */
399
memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
400
sizeof(rctx->msg_buf.bcm_spu_req_hdr));
401
402
spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
403
ctx->spu_req_hdr_len, !(rctx->is_encrypt),
404
&cipher_parms, chunksize);
405
406
atomic64_add(chunksize, &iproc_priv.bytes_out);
407
408
stat_pad_len = spu->spu_wordalign_padlen(chunksize);
409
if (stat_pad_len)
410
rx_frag_num++;
411
pad_len = stat_pad_len;
412
if (pad_len) {
413
tx_frag_num++;
414
spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
415
0, ctx->auth.alg, ctx->auth.mode,
416
rctx->total_sent, stat_pad_len);
417
}
418
419
spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
420
ctx->spu_req_hdr_len);
421
packet_log("payload:\n");
422
dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
423
packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
424
425
/*
426
* Build mailbox message containing SPU request msg and rx buffers
427
* to catch response message
428
*/
429
memset(mssg, 0, sizeof(*mssg));
430
mssg->type = BRCM_MESSAGE_SPU;
431
mssg->ctx = rctx; /* Will be returned in response */
432
433
/* Create rx scatterlist to catch result */
434
rx_frag_num += rctx->dst_nents;
435
436
if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
437
spu->spu_xts_tweak_in_payload())
438
rx_frag_num++; /* extra sg to insert tweak */
439
440
err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
441
stat_pad_len);
442
if (err)
443
return err;
444
445
/* Create tx scatterlist containing SPU request message */
446
tx_frag_num += rctx->src_nents;
447
if (spu->spu_tx_status_len())
448
tx_frag_num++;
449
450
if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
451
spu->spu_xts_tweak_in_payload())
452
tx_frag_num++; /* extra sg to insert tweak */
453
454
err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
455
pad_len);
456
if (err)
457
return err;
458
459
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
460
if (unlikely(err < 0))
461
return err;
462
463
return -EINPROGRESS;
464
}
465
466
/**
467
* handle_skcipher_resp() - Process a block cipher SPU response. Updates the
468
* total received count for the request and updates global stats.
469
* @rctx: Crypto request context
470
*/
471
static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
472
{
473
struct spu_hw *spu = &iproc_priv.spu;
474
struct crypto_async_request *areq = rctx->parent;
475
struct skcipher_request *req = skcipher_request_cast(areq);
476
struct iproc_ctx_s *ctx = rctx->ctx;
477
u32 payload_len;
478
479
/* See how much data was returned */
480
payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
481
482
/*
483
* In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
484
* encrypted tweak ("i") value; we don't count those.
485
*/
486
if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
487
spu->spu_xts_tweak_in_payload() &&
488
(payload_len >= SPU_XTS_TWEAK_SIZE))
489
payload_len -= SPU_XTS_TWEAK_SIZE;
490
491
atomic64_add(payload_len, &iproc_priv.bytes_in);
492
493
flow_log("%s() offset: %u, bd_len: %u BD:\n",
494
__func__, rctx->total_received, payload_len);
495
496
dump_sg(req->dst, rctx->total_received, payload_len);
497
498
rctx->total_received += payload_len;
499
if (rctx->total_received == rctx->total_todo) {
500
atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
501
atomic_inc(
502
&iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
503
}
504
}
505
506
/**
507
* spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
508
* receive a SPU response message for an ahash request.
509
* @mssg: mailbox message containing the receive sg
510
* @rctx: crypto request context
511
* @rx_frag_num: number of scatterlist elements required to hold the
512
* SPU response message
513
* @digestsize: length of hash digest, in bytes
514
* @stat_pad_len: Number of bytes required to pad the STAT field to
515
* a 4-byte boundary
516
*
517
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
518
* when the request completes, whether the request is handled successfully or
519
* there is an error.
520
*
521
* Return:
522
* 0 if successful
523
* < 0 if an error
524
*/
525
static int
526
spu_ahash_rx_sg_create(struct brcm_message *mssg,
527
struct iproc_reqctx_s *rctx,
528
u8 rx_frag_num, unsigned int digestsize,
529
u32 stat_pad_len)
530
{
531
struct spu_hw *spu = &iproc_priv.spu;
532
struct scatterlist *sg; /* used to build sgs in mbox message */
533
struct iproc_ctx_s *ctx = rctx->ctx;
534
535
mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
536
rctx->gfp);
537
if (!mssg->spu.dst)
538
return -ENOMEM;
539
540
sg = mssg->spu.dst;
541
sg_init_table(sg, rx_frag_num);
542
/* Space for SPU message header */
543
sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
544
545
/* Space for digest */
546
sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
547
548
if (stat_pad_len)
549
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
550
551
memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
552
sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
553
return 0;
554
}
555
556
/**
557
* spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send
558
* a SPU request message for an ahash request. Includes SPU message headers and
559
* the request data.
560
* @mssg: mailbox message containing the transmit sg
561
* @rctx: crypto request context
562
* @tx_frag_num: number of scatterlist elements required to construct the
563
* SPU request message
564
* @spu_hdr_len: length in bytes of SPU message header
565
* @hash_carry_len: Number of bytes of data carried over from previous req
566
* @new_data_len: Number of bytes of new request data
567
* @pad_len: Number of pad bytes
568
*
569
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
570
* when the request completes, whether the request is handled successfully or
571
* there is an error.
572
*
573
* Return:
574
* 0 if successful
575
* < 0 if an error
576
*/
577
static int
578
spu_ahash_tx_sg_create(struct brcm_message *mssg,
579
struct iproc_reqctx_s *rctx,
580
u8 tx_frag_num,
581
u32 spu_hdr_len,
582
unsigned int hash_carry_len,
583
unsigned int new_data_len, u32 pad_len)
584
{
585
struct spu_hw *spu = &iproc_priv.spu;
586
struct scatterlist *sg; /* used to build sgs in mbox message */
587
u32 datalen; /* Number of bytes of response data expected */
588
u32 stat_len;
589
590
mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
591
rctx->gfp);
592
if (!mssg->spu.src)
593
return -ENOMEM;
594
595
sg = mssg->spu.src;
596
sg_init_table(sg, tx_frag_num);
597
598
sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
599
BCM_HDR_LEN + spu_hdr_len);
600
601
if (hash_carry_len)
602
sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
603
604
if (new_data_len) {
605
/* Copy in each src sg entry from request, up to chunksize */
606
datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
607
rctx->src_nents, new_data_len);
608
if (datalen < new_data_len) {
609
pr_err("%s(): failed to copy src sg to mbox msg",
610
__func__);
611
return -EFAULT;
612
}
613
}
614
615
if (pad_len)
616
sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
617
618
stat_len = spu->spu_tx_status_len();
619
if (stat_len) {
620
memset(rctx->msg_buf.tx_stat, 0, stat_len);
621
sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
622
}
623
624
return 0;
625
}
626
627
/**
628
* handle_ahash_req() - Process an asynchronous hash request from the crypto
629
* API.
630
* @rctx: Crypto request context
631
*
632
* Builds a SPU request message embedded in a mailbox message and submits the
633
* mailbox message on a selected mailbox channel. The SPU request message is
634
* constructed as a scatterlist, including entries from the crypto API's
635
* src scatterlist to avoid copying the data to be hashed. This function is
636
* called either on the thread from the crypto API, or, in the case that the
637
* crypto API request is too large to fit in a single SPU request message,
638
* on the thread that invokes the receive callback with a response message.
639
* Because some operations require the response from one chunk before the next
640
* chunk can be submitted, we always wait for the response for the previous
641
* chunk before submitting the next chunk. Because requests are submitted in
642
* lock step like this, there is no need to synchronize access to request data
643
* structures.
644
*
645
* Return:
646
* -EINPROGRESS: request has been submitted to SPU and response will be
647
* returned asynchronously
648
* -EAGAIN: non-final request included a small amount of data, which for
649
* efficiency we did not submit to the SPU, but instead stored
650
* to be submitted to the SPU with the next part of the request
651
* other: an error code
652
*/
653
static int handle_ahash_req(struct iproc_reqctx_s *rctx)
654
{
655
struct spu_hw *spu = &iproc_priv.spu;
656
struct crypto_async_request *areq = rctx->parent;
657
struct ahash_request *req = ahash_request_cast(areq);
658
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
659
struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
660
unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
661
struct iproc_ctx_s *ctx = rctx->ctx;
662
663
/* number of bytes still to be hashed in this req */
664
unsigned int nbytes_to_hash = 0;
665
int err;
666
unsigned int chunksize = 0; /* length of hash carry + new data */
667
/*
668
* length of new data, not from hash carry, to be submitted in
669
* this hw request
670
*/
671
unsigned int new_data_len;
672
673
unsigned int __maybe_unused chunk_start = 0;
674
u32 db_size; /* Length of data field, incl gcm and hash padding */
675
int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
676
u32 data_pad_len = 0; /* length of GCM/CCM padding */
677
u32 stat_pad_len = 0; /* length of padding to align STATUS word */
678
struct brcm_message *mssg; /* mailbox message */
679
struct spu_request_opts req_opts;
680
struct spu_cipher_parms cipher_parms;
681
struct spu_hash_parms hash_parms;
682
struct spu_aead_parms aead_parms;
683
unsigned int local_nbuf;
684
u32 spu_hdr_len;
685
unsigned int digestsize;
686
u16 rem = 0;
687
688
/*
689
* number of entries in src and dst sg. Always includes SPU msg header.
690
* rx always includes a buffer to catch digest and STATUS.
691
*/
692
u8 rx_frag_num = 3;
693
u8 tx_frag_num = 1;
694
695
flow_log("total_todo %u, total_sent %u\n",
696
rctx->total_todo, rctx->total_sent);
697
698
memset(&req_opts, 0, sizeof(req_opts));
699
memset(&cipher_parms, 0, sizeof(cipher_parms));
700
memset(&hash_parms, 0, sizeof(hash_parms));
701
memset(&aead_parms, 0, sizeof(aead_parms));
702
703
req_opts.bd_suppress = true;
704
hash_parms.alg = ctx->auth.alg;
705
hash_parms.mode = ctx->auth.mode;
706
hash_parms.type = HASH_TYPE_NONE;
707
hash_parms.key_buf = (u8 *)ctx->authkey;
708
hash_parms.key_len = ctx->authkeylen;
709
710
/*
711
* For hash algorithms below assignment looks bit odd but
712
* it's needed for AES-XCBC and AES-CMAC hash algorithms
713
* to differentiate between 128, 192, 256 bit key values.
714
* Based on the key values, hash algorithm is selected.
715
* For example for 128 bit key, hash algorithm is AES-128.
716
*/
717
cipher_parms.type = ctx->cipher_type;
718
719
mssg = &rctx->mb_mssg;
720
chunk_start = rctx->src_sent;
721
722
/*
723
* Compute the amount remaining to hash. This may include data
724
* carried over from previous requests.
725
*/
726
nbytes_to_hash = rctx->total_todo - rctx->total_sent;
727
chunksize = nbytes_to_hash;
728
if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
729
(chunksize > ctx->max_payload))
730
chunksize = ctx->max_payload;
731
732
/*
733
* If this is not a final request and the request data is not a multiple
734
* of a full block, then simply park the extra data and prefix it to the
735
* data for the next request.
736
*/
737
if (!rctx->is_final) {
738
u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
739
u16 new_len; /* len of data to add to hash carry */
740
741
rem = chunksize % blocksize; /* remainder */
742
if (rem) {
743
/* chunksize not a multiple of blocksize */
744
chunksize -= rem;
745
if (chunksize == 0) {
746
/* Don't have a full block to submit to hw */
747
new_len = rem - rctx->hash_carry_len;
748
sg_copy_part_to_buf(req->src, dest, new_len,
749
rctx->src_sent);
750
rctx->hash_carry_len = rem;
751
flow_log("Exiting with hash carry len: %u\n",
752
rctx->hash_carry_len);
753
packet_dump(" buf: ",
754
rctx->hash_carry,
755
rctx->hash_carry_len);
756
return -EAGAIN;
757
}
758
}
759
}
760
761
/* if we have hash carry, then prefix it to the data in this request */
762
local_nbuf = rctx->hash_carry_len;
763
rctx->hash_carry_len = 0;
764
if (local_nbuf)
765
tx_frag_num++;
766
new_data_len = chunksize - local_nbuf;
767
768
/* Count number of sg entries to be used in this request */
769
rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
770
new_data_len);
771
772
/* AES hashing keeps key size in type field, so need to copy it here */
773
if (hash_parms.alg == HASH_ALG_AES)
774
hash_parms.type = (enum hash_type)cipher_parms.type;
775
else
776
hash_parms.type = spu->spu_hash_type(rctx->total_sent);
777
778
digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
779
hash_parms.type);
780
hash_parms.digestsize = digestsize;
781
782
/* update the indexes */
783
rctx->total_sent += chunksize;
784
/* if you sent a prebuf then that wasn't from this req->src */
785
rctx->src_sent += new_data_len;
786
787
if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
788
hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
789
hash_parms.mode,
790
chunksize,
791
blocksize);
792
793
/*
794
* If a non-first chunk, then include the digest returned from the
795
* previous chunk so that hw can add to it (except for AES types).
796
*/
797
if ((hash_parms.type == HASH_TYPE_UPDT) &&
798
(hash_parms.alg != HASH_ALG_AES)) {
799
hash_parms.key_buf = rctx->incr_hash;
800
hash_parms.key_len = digestsize;
801
}
802
803
atomic64_add(chunksize, &iproc_priv.bytes_out);
804
805
flow_log("%s() final: %u nbuf: %u ",
806
__func__, rctx->is_final, local_nbuf);
807
808
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
809
flow_log("max_payload infinite\n");
810
else
811
flow_log("max_payload %u\n", ctx->max_payload);
812
813
flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
814
815
/* Prepend SPU header with type 3 BCM header */
816
memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
817
818
hash_parms.prebuf_len = local_nbuf;
819
spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
820
BCM_HDR_LEN,
821
&req_opts, &cipher_parms,
822
&hash_parms, &aead_parms,
823
new_data_len);
824
825
if (spu_hdr_len == 0) {
826
pr_err("Failed to create SPU request header\n");
827
return -EFAULT;
828
}
829
830
/*
831
* Determine total length of padding required. Put all padding in one
832
* buffer.
833
*/
834
data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
835
db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
836
0, 0, hash_parms.pad_len);
837
if (spu->spu_tx_status_len())
838
stat_pad_len = spu->spu_wordalign_padlen(db_size);
839
if (stat_pad_len)
840
rx_frag_num++;
841
pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
842
if (pad_len) {
843
tx_frag_num++;
844
spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
845
hash_parms.pad_len, ctx->auth.alg,
846
ctx->auth.mode, rctx->total_sent,
847
stat_pad_len);
848
}
849
850
spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
851
spu_hdr_len);
852
packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
853
flow_log("Data:\n");
854
dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
855
packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
856
857
/*
858
* Build mailbox message containing SPU request msg and rx buffers
859
* to catch response message
860
*/
861
memset(mssg, 0, sizeof(*mssg));
862
mssg->type = BRCM_MESSAGE_SPU;
863
mssg->ctx = rctx; /* Will be returned in response */
864
865
/* Create rx scatterlist to catch result */
866
err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
867
stat_pad_len);
868
if (err)
869
return err;
870
871
/* Create tx scatterlist containing SPU request message */
872
tx_frag_num += rctx->src_nents;
873
if (spu->spu_tx_status_len())
874
tx_frag_num++;
875
err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
876
local_nbuf, new_data_len, pad_len);
877
if (err)
878
return err;
879
880
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
881
if (unlikely(err < 0))
882
return err;
883
884
return -EINPROGRESS;
885
}
886
887
/**
888
* spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
889
* for an HMAC request.
890
* @req: The HMAC request from the crypto API
891
* @ctx: The session context
892
*
893
* Return: 0 if synchronous hash operation successful
894
* -EINVAL if the hash algo is unrecognized
895
* any other value indicates an error
896
*/
897
static int spu_hmac_outer_hash(struct ahash_request *req,
898
struct iproc_ctx_s *ctx)
899
{
900
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901
unsigned int blocksize =
902
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
903
int rc;
904
905
switch (ctx->auth.alg) {
906
case HASH_ALG_MD5:
907
rc = do_shash("md5", req->result, ctx->opad, blocksize,
908
req->result, ctx->digestsize, NULL, 0);
909
break;
910
case HASH_ALG_SHA1:
911
rc = do_shash("sha1", req->result, ctx->opad, blocksize,
912
req->result, ctx->digestsize, NULL, 0);
913
break;
914
case HASH_ALG_SHA224:
915
rc = do_shash("sha224", req->result, ctx->opad, blocksize,
916
req->result, ctx->digestsize, NULL, 0);
917
break;
918
case HASH_ALG_SHA256:
919
rc = do_shash("sha256", req->result, ctx->opad, blocksize,
920
req->result, ctx->digestsize, NULL, 0);
921
break;
922
case HASH_ALG_SHA384:
923
rc = do_shash("sha384", req->result, ctx->opad, blocksize,
924
req->result, ctx->digestsize, NULL, 0);
925
break;
926
case HASH_ALG_SHA512:
927
rc = do_shash("sha512", req->result, ctx->opad, blocksize,
928
req->result, ctx->digestsize, NULL, 0);
929
break;
930
default:
931
pr_err("%s() Error : unknown hmac type\n", __func__);
932
rc = -EINVAL;
933
}
934
return rc;
935
}
936
937
/**
938
* ahash_req_done() - Process a hash result from the SPU hardware.
939
* @rctx: Crypto request context
940
*
941
* Return: 0 if successful
942
* < 0 if an error
943
*/
944
static int ahash_req_done(struct iproc_reqctx_s *rctx)
945
{
946
struct spu_hw *spu = &iproc_priv.spu;
947
struct crypto_async_request *areq = rctx->parent;
948
struct ahash_request *req = ahash_request_cast(areq);
949
struct iproc_ctx_s *ctx = rctx->ctx;
950
int err;
951
952
memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
953
954
if (spu->spu_type == SPU_TYPE_SPUM) {
955
/* byte swap the output from the UPDT function to network byte
956
* order
957
*/
958
if (ctx->auth.alg == HASH_ALG_MD5) {
959
__swab32s((u32 *)req->result);
960
__swab32s(((u32 *)req->result) + 1);
961
__swab32s(((u32 *)req->result) + 2);
962
__swab32s(((u32 *)req->result) + 3);
963
__swab32s(((u32 *)req->result) + 4);
964
}
965
}
966
967
flow_dump(" digest ", req->result, ctx->digestsize);
968
969
/* if this an HMAC then do the outer hash */
970
if (rctx->is_sw_hmac) {
971
err = spu_hmac_outer_hash(req, ctx);
972
if (err < 0)
973
return err;
974
flow_dump(" hmac: ", req->result, ctx->digestsize);
975
}
976
977
if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
978
atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
979
atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
980
} else {
981
atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
982
atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
983
}
984
985
return 0;
986
}
987
988
/**
989
* handle_ahash_resp() - Process a SPU response message for a hash request.
990
* Checks if the entire crypto API request has been processed, and if so,
991
* invokes post processing on the result.
992
* @rctx: Crypto request context
993
*/
994
static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
995
{
996
struct iproc_ctx_s *ctx = rctx->ctx;
997
struct crypto_async_request *areq = rctx->parent;
998
struct ahash_request *req = ahash_request_cast(areq);
999
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1000
unsigned int blocksize =
1001
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1002
/*
1003
* Save hash to use as input to next op if incremental. Might be copying
1004
* too much, but that's easier than figuring out actual digest size here
1005
*/
1006
memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1007
1008
flow_log("%s() blocksize:%u digestsize:%u\n",
1009
__func__, blocksize, ctx->digestsize);
1010
1011
atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1012
1013
if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1014
ahash_req_done(rctx);
1015
}
1016
1017
/**
1018
* spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1019
* a SPU response message for an AEAD request. Includes buffers to catch SPU
1020
* message headers and the response data.
1021
* @mssg: mailbox message containing the receive sg
1022
* @req: Crypto API request
1023
* @rctx: crypto request context
1024
* @rx_frag_num: number of scatterlist elements required to hold the
1025
* SPU response message
1026
* @assoc_len: Length of associated data included in the crypto request
1027
* @ret_iv_len: Length of IV returned in response
1028
* @resp_len: Number of bytes of response data expected to be written to
1029
* dst buffer from crypto API
1030
* @digestsize: Length of hash digest, in bytes
1031
* @stat_pad_len: Number of bytes required to pad the STAT field to
1032
* a 4-byte boundary
1033
*
1034
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1035
* when the request completes, whether the request is handled successfully or
1036
* there is an error.
1037
*
1038
* Returns:
1039
* 0 if successful
1040
* < 0 if an error
1041
*/
1042
static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1043
struct aead_request *req,
1044
struct iproc_reqctx_s *rctx,
1045
u8 rx_frag_num,
1046
unsigned int assoc_len,
1047
u32 ret_iv_len, unsigned int resp_len,
1048
unsigned int digestsize, u32 stat_pad_len)
1049
{
1050
struct spu_hw *spu = &iproc_priv.spu;
1051
struct scatterlist *sg; /* used to build sgs in mbox message */
1052
struct iproc_ctx_s *ctx = rctx->ctx;
1053
u32 datalen; /* Number of bytes of response data expected */
1054
u32 assoc_buf_len;
1055
u8 data_padlen = 0;
1056
1057
if (ctx->is_rfc4543) {
1058
/* RFC4543: only pad after data, not after AAD */
1059
data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1060
assoc_len + resp_len);
1061
assoc_buf_len = assoc_len;
1062
} else {
1063
data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1064
resp_len);
1065
assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1066
assoc_len, ret_iv_len,
1067
rctx->is_encrypt);
1068
}
1069
1070
if (ctx->cipher.mode == CIPHER_MODE_CCM)
1071
/* ICV (after data) must be in the next 32-bit word for CCM */
1072
data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1073
resp_len +
1074
data_padlen);
1075
1076
if (data_padlen)
1077
/* have to catch gcm pad in separate buffer */
1078
rx_frag_num++;
1079
1080
mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist),
1081
rctx->gfp);
1082
if (!mssg->spu.dst)
1083
return -ENOMEM;
1084
1085
sg = mssg->spu.dst;
1086
sg_init_table(sg, rx_frag_num);
1087
1088
/* Space for SPU message header */
1089
sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1090
1091
if (assoc_buf_len) {
1092
/*
1093
* Don't write directly to req->dst, because SPU may pad the
1094
* assoc data in the response
1095
*/
1096
memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1097
sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1098
}
1099
1100
if (resp_len) {
1101
/*
1102
* Copy in each dst sg entry from request, up to chunksize.
1103
* dst sg catches just the data. digest caught in separate buf.
1104
*/
1105
datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1106
rctx->dst_nents, resp_len);
1107
if (datalen < (resp_len)) {
1108
pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1109
__func__, resp_len, datalen);
1110
return -EFAULT;
1111
}
1112
}
1113
1114
/* If GCM/CCM data is padded, catch padding in separate buffer */
1115
if (data_padlen) {
1116
memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1117
sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1118
}
1119
1120
/* Always catch ICV in separate buffer */
1121
sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1122
1123
flow_log("stat_pad_len %u\n", stat_pad_len);
1124
if (stat_pad_len) {
1125
memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1126
sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1127
}
1128
1129
memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1130
sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1131
1132
return 0;
1133
}
1134
1135
/**
1136
* spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1137
* SPU request message for an AEAD request. Includes SPU message headers and the
1138
* request data.
1139
* @mssg: mailbox message containing the transmit sg
1140
* @rctx: crypto request context
1141
* @tx_frag_num: number of scatterlist elements required to construct the
1142
* SPU request message
1143
* @spu_hdr_len: length of SPU message header in bytes
1144
* @assoc: crypto API associated data scatterlist
1145
* @assoc_len: length of associated data
1146
* @assoc_nents: number of scatterlist entries containing assoc data
1147
* @aead_iv_len: length of AEAD IV, if included
1148
* @chunksize: Number of bytes of request data
1149
* @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1150
* @pad_len: Number of pad bytes
1151
* @incl_icv: If true, write separate ICV buffer after data and
1152
* any padding
1153
*
1154
* The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1155
* when the request completes, whether the request is handled successfully or
1156
* there is an error.
1157
*
1158
* Return:
1159
* 0 if successful
1160
* < 0 if an error
1161
*/
1162
static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1163
struct iproc_reqctx_s *rctx,
1164
u8 tx_frag_num,
1165
u32 spu_hdr_len,
1166
struct scatterlist *assoc,
1167
unsigned int assoc_len,
1168
int assoc_nents,
1169
unsigned int aead_iv_len,
1170
unsigned int chunksize,
1171
u32 aad_pad_len, u32 pad_len, bool incl_icv)
1172
{
1173
struct spu_hw *spu = &iproc_priv.spu;
1174
struct scatterlist *sg; /* used to build sgs in mbox message */
1175
struct scatterlist *assoc_sg = assoc;
1176
struct iproc_ctx_s *ctx = rctx->ctx;
1177
u32 datalen; /* Number of bytes of data to write */
1178
u32 written; /* Number of bytes of data written */
1179
u32 assoc_offset = 0;
1180
u32 stat_len;
1181
1182
mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist),
1183
rctx->gfp);
1184
if (!mssg->spu.src)
1185
return -ENOMEM;
1186
1187
sg = mssg->spu.src;
1188
sg_init_table(sg, tx_frag_num);
1189
1190
sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1191
BCM_HDR_LEN + spu_hdr_len);
1192
1193
if (assoc_len) {
1194
/* Copy in each associated data sg entry from request */
1195
written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1196
assoc_nents, assoc_len);
1197
if (written < assoc_len) {
1198
pr_err("%s(): failed to copy assoc sg to mbox msg",
1199
__func__);
1200
return -EFAULT;
1201
}
1202
}
1203
1204
if (aead_iv_len)
1205
sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1206
1207
if (aad_pad_len) {
1208
memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1209
sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1210
}
1211
1212
datalen = chunksize;
1213
if ((chunksize > ctx->digestsize) && incl_icv)
1214
datalen -= ctx->digestsize;
1215
if (datalen) {
1216
/* For aead, a single msg should consume the entire src sg */
1217
written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1218
rctx->src_nents, datalen);
1219
if (written < datalen) {
1220
pr_err("%s(): failed to copy src sg to mbox msg",
1221
__func__);
1222
return -EFAULT;
1223
}
1224
}
1225
1226
if (pad_len) {
1227
memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1228
sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1229
}
1230
1231
if (incl_icv)
1232
sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1233
1234
stat_len = spu->spu_tx_status_len();
1235
if (stat_len) {
1236
memset(rctx->msg_buf.tx_stat, 0, stat_len);
1237
sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1238
}
1239
return 0;
1240
}
1241
1242
/**
1243
* handle_aead_req() - Submit a SPU request message for the next chunk of the
1244
* current AEAD request.
1245
* @rctx: Crypto request context
1246
*
1247
* Unlike other operation types, we assume the length of the request fits in
1248
* a single SPU request message. aead_enqueue() makes sure this is true.
1249
* Comments for other op types regarding threads applies here as well.
1250
*
1251
* Unlike incremental hash ops, where the spu returns the entire hash for
1252
* truncated algs like sha-224, the SPU returns just the truncated hash in
1253
* response to aead requests. So digestsize is always ctx->digestsize here.
1254
*
1255
* Return: -EINPROGRESS: crypto request has been accepted and result will be
1256
* returned asynchronously
1257
* Any other value indicates an error
1258
*/
1259
static int handle_aead_req(struct iproc_reqctx_s *rctx)
1260
{
1261
struct spu_hw *spu = &iproc_priv.spu;
1262
struct crypto_async_request *areq = rctx->parent;
1263
struct aead_request *req = container_of(areq,
1264
struct aead_request, base);
1265
struct iproc_ctx_s *ctx = rctx->ctx;
1266
int err;
1267
unsigned int chunksize;
1268
unsigned int resp_len;
1269
u32 spu_hdr_len;
1270
u32 db_size;
1271
u32 stat_pad_len;
1272
u32 pad_len;
1273
struct brcm_message *mssg; /* mailbox message */
1274
struct spu_request_opts req_opts;
1275
struct spu_cipher_parms cipher_parms;
1276
struct spu_hash_parms hash_parms;
1277
struct spu_aead_parms aead_parms;
1278
int assoc_nents = 0;
1279
bool incl_icv = false;
1280
unsigned int digestsize = ctx->digestsize;
1281
1282
/* number of entries in src and dst sg. Always includes SPU msg header.
1283
*/
1284
u8 rx_frag_num = 2; /* and STATUS */
1285
u8 tx_frag_num = 1;
1286
1287
/* doing the whole thing at once */
1288
chunksize = rctx->total_todo;
1289
1290
flow_log("%s: chunksize %u\n", __func__, chunksize);
1291
1292
memset(&req_opts, 0, sizeof(req_opts));
1293
memset(&hash_parms, 0, sizeof(hash_parms));
1294
memset(&aead_parms, 0, sizeof(aead_parms));
1295
1296
req_opts.is_inbound = !(rctx->is_encrypt);
1297
req_opts.auth_first = ctx->auth_first;
1298
req_opts.is_aead = true;
1299
req_opts.is_esp = ctx->is_esp;
1300
1301
cipher_parms.alg = ctx->cipher.alg;
1302
cipher_parms.mode = ctx->cipher.mode;
1303
cipher_parms.type = ctx->cipher_type;
1304
cipher_parms.key_buf = ctx->enckey;
1305
cipher_parms.key_len = ctx->enckeylen;
1306
cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1307
cipher_parms.iv_len = rctx->iv_ctr_len;
1308
1309
hash_parms.alg = ctx->auth.alg;
1310
hash_parms.mode = ctx->auth.mode;
1311
hash_parms.type = HASH_TYPE_NONE;
1312
hash_parms.key_buf = (u8 *)ctx->authkey;
1313
hash_parms.key_len = ctx->authkeylen;
1314
hash_parms.digestsize = digestsize;
1315
1316
if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1317
(ctx->authkeylen < SHA224_DIGEST_SIZE))
1318
hash_parms.key_len = SHA224_DIGEST_SIZE;
1319
1320
aead_parms.assoc_size = req->assoclen;
1321
if (ctx->is_esp && !ctx->is_rfc4543) {
1322
/*
1323
* 8-byte IV is included assoc data in request. SPU2
1324
* expects AAD to include just SPI and seqno. So
1325
* subtract off the IV len.
1326
*/
1327
aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1328
1329
if (rctx->is_encrypt) {
1330
aead_parms.return_iv = true;
1331
aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1332
aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1333
}
1334
} else {
1335
aead_parms.ret_iv_len = 0;
1336
}
1337
1338
/*
1339
* Count number of sg entries from the crypto API request that are to
1340
* be included in this mailbox message. For dst sg, don't count space
1341
* for digest. Digest gets caught in a separate buffer and copied back
1342
* to dst sg when processing response.
1343
*/
1344
rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1345
rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1346
if (aead_parms.assoc_size)
1347
assoc_nents = spu_sg_count(rctx->assoc, 0,
1348
aead_parms.assoc_size);
1349
1350
mssg = &rctx->mb_mssg;
1351
1352
rctx->total_sent = chunksize;
1353
rctx->src_sent = chunksize;
1354
if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1355
aead_parms.assoc_size,
1356
aead_parms.ret_iv_len,
1357
rctx->is_encrypt))
1358
rx_frag_num++;
1359
1360
aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1361
rctx->iv_ctr_len);
1362
1363
if (ctx->auth.alg == HASH_ALG_AES)
1364
hash_parms.type = (enum hash_type)ctx->cipher_type;
1365
1366
/* General case AAD padding (CCM and RFC4543 special cases below) */
1367
aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1368
aead_parms.assoc_size);
1369
1370
/* General case data padding (CCM decrypt special case below) */
1371
aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1372
chunksize);
1373
1374
if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1375
/*
1376
* for CCM, AAD len + 2 (rather than AAD len) needs to be
1377
* 128-bit aligned
1378
*/
1379
aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1380
ctx->cipher.mode,
1381
aead_parms.assoc_size + 2);
1382
1383
/*
1384
* And when decrypting CCM, need to pad without including
1385
* size of ICV which is tacked on to end of chunk
1386
*/
1387
if (!rctx->is_encrypt)
1388
aead_parms.data_pad_len =
1389
spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1390
chunksize - digestsize);
1391
1392
/* CCM also requires software to rewrite portions of IV: */
1393
spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1394
chunksize, rctx->is_encrypt,
1395
ctx->is_esp);
1396
}
1397
1398
if (ctx->is_rfc4543) {
1399
/*
1400
* RFC4543: data is included in AAD, so don't pad after AAD
1401
* and pad data based on both AAD + data size
1402
*/
1403
aead_parms.aad_pad_len = 0;
1404
if (!rctx->is_encrypt)
1405
aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1406
ctx->cipher.mode,
1407
aead_parms.assoc_size + chunksize -
1408
digestsize);
1409
else
1410
aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1411
ctx->cipher.mode,
1412
aead_parms.assoc_size + chunksize);
1413
1414
req_opts.is_rfc4543 = true;
1415
}
1416
1417
if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1418
incl_icv = true;
1419
tx_frag_num++;
1420
/* Copy ICV from end of src scatterlist to digest buf */
1421
sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1422
req->assoclen + rctx->total_sent -
1423
digestsize);
1424
}
1425
1426
atomic64_add(chunksize, &iproc_priv.bytes_out);
1427
1428
flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1429
1430
/* Prepend SPU header with type 3 BCM header */
1431
memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1432
1433
spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1434
BCM_HDR_LEN, &req_opts,
1435
&cipher_parms, &hash_parms,
1436
&aead_parms, chunksize);
1437
1438
/* Determine total length of padding. Put all padding in one buffer. */
1439
db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1440
chunksize, aead_parms.aad_pad_len,
1441
aead_parms.data_pad_len, 0);
1442
1443
stat_pad_len = spu->spu_wordalign_padlen(db_size);
1444
1445
if (stat_pad_len)
1446
rx_frag_num++;
1447
pad_len = aead_parms.data_pad_len + stat_pad_len;
1448
if (pad_len) {
1449
tx_frag_num++;
1450
spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1451
aead_parms.data_pad_len, 0,
1452
ctx->auth.alg, ctx->auth.mode,
1453
rctx->total_sent, stat_pad_len);
1454
}
1455
1456
spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1457
spu_hdr_len);
1458
dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1459
packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1460
packet_log("BD:\n");
1461
dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1462
packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1463
1464
/*
1465
* Build mailbox message containing SPU request msg and rx buffers
1466
* to catch response message
1467
*/
1468
memset(mssg, 0, sizeof(*mssg));
1469
mssg->type = BRCM_MESSAGE_SPU;
1470
mssg->ctx = rctx; /* Will be returned in response */
1471
1472
/* Create rx scatterlist to catch result */
1473
rx_frag_num += rctx->dst_nents;
1474
resp_len = chunksize;
1475
1476
/*
1477
* Always catch ICV in separate buffer. Have to for GCM/CCM because of
1478
* padding. Have to for SHA-224 and other truncated SHAs because SPU
1479
* sends entire digest back.
1480
*/
1481
rx_frag_num++;
1482
1483
if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1484
(ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1485
/*
1486
* Input is ciphertxt plus ICV, but ICV not incl
1487
* in output.
1488
*/
1489
resp_len -= ctx->digestsize;
1490
if (resp_len == 0)
1491
/* no rx frags to catch output data */
1492
rx_frag_num -= rctx->dst_nents;
1493
}
1494
1495
err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1496
aead_parms.assoc_size,
1497
aead_parms.ret_iv_len, resp_len, digestsize,
1498
stat_pad_len);
1499
if (err)
1500
return err;
1501
1502
/* Create tx scatterlist containing SPU request message */
1503
tx_frag_num += rctx->src_nents;
1504
tx_frag_num += assoc_nents;
1505
if (aead_parms.aad_pad_len)
1506
tx_frag_num++;
1507
if (aead_parms.iv_len)
1508
tx_frag_num++;
1509
if (spu->spu_tx_status_len())
1510
tx_frag_num++;
1511
err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1512
rctx->assoc, aead_parms.assoc_size,
1513
assoc_nents, aead_parms.iv_len, chunksize,
1514
aead_parms.aad_pad_len, pad_len, incl_icv);
1515
if (err)
1516
return err;
1517
1518
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1519
if (unlikely(err < 0))
1520
return err;
1521
1522
return -EINPROGRESS;
1523
}
1524
1525
/**
1526
* handle_aead_resp() - Process a SPU response message for an AEAD request.
1527
* @rctx: Crypto request context
1528
*/
1529
static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1530
{
1531
struct spu_hw *spu = &iproc_priv.spu;
1532
struct crypto_async_request *areq = rctx->parent;
1533
struct aead_request *req = container_of(areq,
1534
struct aead_request, base);
1535
struct iproc_ctx_s *ctx = rctx->ctx;
1536
u32 payload_len;
1537
unsigned int icv_offset;
1538
u32 result_len;
1539
1540
/* See how much data was returned */
1541
payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1542
flow_log("payload_len %u\n", payload_len);
1543
1544
/* only count payload */
1545
atomic64_add(payload_len, &iproc_priv.bytes_in);
1546
1547
if (req->assoclen)
1548
packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1549
req->assoclen);
1550
1551
/*
1552
* Copy the ICV back to the destination
1553
* buffer. In decrypt case, SPU gives us back the digest, but crypto
1554
* API doesn't expect ICV in dst buffer.
1555
*/
1556
result_len = req->cryptlen;
1557
if (rctx->is_encrypt) {
1558
icv_offset = req->assoclen + rctx->total_sent;
1559
packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1560
flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1561
sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1562
ctx->digestsize, icv_offset);
1563
result_len += ctx->digestsize;
1564
}
1565
1566
packet_log("response data: ");
1567
dump_sg(req->dst, req->assoclen, result_len);
1568
1569
atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1570
if (ctx->cipher.alg == CIPHER_ALG_AES) {
1571
if (ctx->cipher.mode == CIPHER_MODE_CCM)
1572
atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1573
else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1574
atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1575
else
1576
atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1577
} else {
1578
atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1579
}
1580
}
1581
1582
/**
1583
* spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1584
* @rctx: request context
1585
*
1586
* Mailbox scatterlists are allocated for each chunk. So free them after
1587
* processing each chunk.
1588
*/
1589
static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1590
{
1591
/* mailbox message used to tx request */
1592
struct brcm_message *mssg = &rctx->mb_mssg;
1593
1594
kfree(mssg->spu.src);
1595
kfree(mssg->spu.dst);
1596
memset(mssg, 0, sizeof(struct brcm_message));
1597
}
1598
1599
/**
1600
* finish_req() - Used to invoke the complete callback from the requester when
1601
* a request has been handled asynchronously.
1602
* @rctx: Request context
1603
* @err: Indicates whether the request was successful or not
1604
*
1605
* Ensures that cleanup has been done for request
1606
*/
1607
static void finish_req(struct iproc_reqctx_s *rctx, int err)
1608
{
1609
struct crypto_async_request *areq = rctx->parent;
1610
1611
flow_log("%s() err:%d\n\n", __func__, err);
1612
1613
/* No harm done if already called */
1614
spu_chunk_cleanup(rctx);
1615
1616
if (areq)
1617
crypto_request_complete(areq, err);
1618
}
1619
1620
/**
1621
* spu_rx_callback() - Callback from mailbox framework with a SPU response.
1622
* @cl: mailbox client structure for SPU driver
1623
* @msg: mailbox message containing SPU response
1624
*/
1625
static void spu_rx_callback(struct mbox_client *cl, void *msg)
1626
{
1627
struct spu_hw *spu = &iproc_priv.spu;
1628
struct brcm_message *mssg = msg;
1629
struct iproc_reqctx_s *rctx;
1630
int err;
1631
1632
rctx = mssg->ctx;
1633
if (unlikely(!rctx)) {
1634
/* This is fatal */
1635
pr_err("%s(): no request context", __func__);
1636
err = -EFAULT;
1637
goto cb_finish;
1638
}
1639
1640
/* process the SPU status */
1641
err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1642
if (err != 0) {
1643
if (err == SPU_INVALID_ICV)
1644
atomic_inc(&iproc_priv.bad_icv);
1645
err = -EBADMSG;
1646
goto cb_finish;
1647
}
1648
1649
/* Process the SPU response message */
1650
switch (rctx->ctx->alg->type) {
1651
case CRYPTO_ALG_TYPE_SKCIPHER:
1652
handle_skcipher_resp(rctx);
1653
break;
1654
case CRYPTO_ALG_TYPE_AHASH:
1655
handle_ahash_resp(rctx);
1656
break;
1657
case CRYPTO_ALG_TYPE_AEAD:
1658
handle_aead_resp(rctx);
1659
break;
1660
default:
1661
err = -EINVAL;
1662
goto cb_finish;
1663
}
1664
1665
/*
1666
* If this response does not complete the request, then send the next
1667
* request chunk.
1668
*/
1669
if (rctx->total_sent < rctx->total_todo) {
1670
/* Deallocate anything specific to previous chunk */
1671
spu_chunk_cleanup(rctx);
1672
1673
switch (rctx->ctx->alg->type) {
1674
case CRYPTO_ALG_TYPE_SKCIPHER:
1675
err = handle_skcipher_req(rctx);
1676
break;
1677
case CRYPTO_ALG_TYPE_AHASH:
1678
err = handle_ahash_req(rctx);
1679
if (err == -EAGAIN)
1680
/*
1681
* we saved data in hash carry, but tell crypto
1682
* API we successfully completed request.
1683
*/
1684
err = 0;
1685
break;
1686
case CRYPTO_ALG_TYPE_AEAD:
1687
err = handle_aead_req(rctx);
1688
break;
1689
default:
1690
err = -EINVAL;
1691
}
1692
1693
if (err == -EINPROGRESS)
1694
/* Successfully submitted request for next chunk */
1695
return;
1696
}
1697
1698
cb_finish:
1699
finish_req(rctx, err);
1700
}
1701
1702
/* ==================== Kernel Cryptographic API ==================== */
1703
1704
/**
1705
* skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
1706
* @req: Crypto API request
1707
* @encrypt: true if encrypting; false if decrypting
1708
*
1709
* Return: -EINPROGRESS if request accepted and result will be returned
1710
* asynchronously
1711
* < 0 if an error
1712
*/
1713
static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1714
{
1715
struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1716
struct iproc_ctx_s *ctx =
1717
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1718
int err;
1719
1720
flow_log("%s() enc:%u\n", __func__, encrypt);
1721
1722
rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1723
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1724
rctx->parent = &req->base;
1725
rctx->is_encrypt = encrypt;
1726
rctx->bd_suppress = false;
1727
rctx->total_todo = req->cryptlen;
1728
rctx->src_sent = 0;
1729
rctx->total_sent = 0;
1730
rctx->total_received = 0;
1731
rctx->ctx = ctx;
1732
1733
/* Initialize current position in src and dst scatterlists */
1734
rctx->src_sg = req->src;
1735
rctx->src_nents = 0;
1736
rctx->src_skip = 0;
1737
rctx->dst_sg = req->dst;
1738
rctx->dst_nents = 0;
1739
rctx->dst_skip = 0;
1740
1741
if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1742
ctx->cipher.mode == CIPHER_MODE_CTR ||
1743
ctx->cipher.mode == CIPHER_MODE_OFB ||
1744
ctx->cipher.mode == CIPHER_MODE_XTS ||
1745
ctx->cipher.mode == CIPHER_MODE_GCM ||
1746
ctx->cipher.mode == CIPHER_MODE_CCM) {
1747
rctx->iv_ctr_len =
1748
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1749
memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1750
} else {
1751
rctx->iv_ctr_len = 0;
1752
}
1753
1754
/* Choose a SPU to process this request */
1755
rctx->chan_idx = select_channel();
1756
err = handle_skcipher_req(rctx);
1757
if (err != -EINPROGRESS)
1758
/* synchronous result */
1759
spu_chunk_cleanup(rctx);
1760
1761
return err;
1762
}
1763
1764
static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1765
unsigned int keylen)
1766
{
1767
struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1768
int err;
1769
1770
err = verify_skcipher_des_key(cipher, key);
1771
if (err)
1772
return err;
1773
1774
ctx->cipher_type = CIPHER_TYPE_DES;
1775
return 0;
1776
}
1777
1778
static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1779
unsigned int keylen)
1780
{
1781
struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1782
int err;
1783
1784
err = verify_skcipher_des3_key(cipher, key);
1785
if (err)
1786
return err;
1787
1788
ctx->cipher_type = CIPHER_TYPE_3DES;
1789
return 0;
1790
}
1791
1792
static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1793
unsigned int keylen)
1794
{
1795
struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1796
1797
if (ctx->cipher.mode == CIPHER_MODE_XTS)
1798
/* XTS includes two keys of equal length */
1799
keylen = keylen / 2;
1800
1801
switch (keylen) {
1802
case AES_KEYSIZE_128:
1803
ctx->cipher_type = CIPHER_TYPE_AES128;
1804
break;
1805
case AES_KEYSIZE_192:
1806
ctx->cipher_type = CIPHER_TYPE_AES192;
1807
break;
1808
case AES_KEYSIZE_256:
1809
ctx->cipher_type = CIPHER_TYPE_AES256;
1810
break;
1811
default:
1812
return -EINVAL;
1813
}
1814
WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1815
((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1816
return 0;
1817
}
1818
1819
static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1820
unsigned int keylen)
1821
{
1822
struct spu_hw *spu = &iproc_priv.spu;
1823
struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1824
struct spu_cipher_parms cipher_parms;
1825
u32 alloc_len = 0;
1826
int err;
1827
1828
flow_log("skcipher_setkey() keylen: %d\n", keylen);
1829
flow_dump(" key: ", key, keylen);
1830
1831
switch (ctx->cipher.alg) {
1832
case CIPHER_ALG_DES:
1833
err = des_setkey(cipher, key, keylen);
1834
break;
1835
case CIPHER_ALG_3DES:
1836
err = threedes_setkey(cipher, key, keylen);
1837
break;
1838
case CIPHER_ALG_AES:
1839
err = aes_setkey(cipher, key, keylen);
1840
break;
1841
default:
1842
pr_err("%s() Error: unknown cipher alg\n", __func__);
1843
err = -EINVAL;
1844
}
1845
if (err)
1846
return err;
1847
1848
memcpy(ctx->enckey, key, keylen);
1849
ctx->enckeylen = keylen;
1850
1851
/* SPU needs XTS keys in the reverse order the crypto API presents */
1852
if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1853
(ctx->cipher.mode == CIPHER_MODE_XTS)) {
1854
unsigned int xts_keylen = keylen / 2;
1855
1856
memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1857
memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1858
}
1859
1860
if (spu->spu_type == SPU_TYPE_SPUM)
1861
alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1862
else if (spu->spu_type == SPU_TYPE_SPU2)
1863
alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1864
memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1865
cipher_parms.iv_buf = NULL;
1866
cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1867
flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1868
1869
cipher_parms.alg = ctx->cipher.alg;
1870
cipher_parms.mode = ctx->cipher.mode;
1871
cipher_parms.type = ctx->cipher_type;
1872
cipher_parms.key_buf = ctx->enckey;
1873
cipher_parms.key_len = ctx->enckeylen;
1874
1875
/* Prepend SPU request message with BCM header */
1876
memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1877
ctx->spu_req_hdr_len =
1878
spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1879
&cipher_parms);
1880
1881
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1882
ctx->enckeylen,
1883
false);
1884
1885
atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1886
1887
return 0;
1888
}
1889
1890
static int skcipher_encrypt(struct skcipher_request *req)
1891
{
1892
flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1893
1894
return skcipher_enqueue(req, true);
1895
}
1896
1897
static int skcipher_decrypt(struct skcipher_request *req)
1898
{
1899
flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1900
return skcipher_enqueue(req, false);
1901
}
1902
1903
static int ahash_enqueue(struct ahash_request *req)
1904
{
1905
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1906
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1907
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1908
int err;
1909
const char *alg_name;
1910
1911
flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1912
1913
rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1914
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1915
rctx->parent = &req->base;
1916
rctx->ctx = ctx;
1917
rctx->bd_suppress = true;
1918
memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1919
1920
/* Initialize position in src scatterlist */
1921
rctx->src_sg = req->src;
1922
rctx->src_skip = 0;
1923
rctx->src_nents = 0;
1924
rctx->dst_sg = NULL;
1925
rctx->dst_skip = 0;
1926
rctx->dst_nents = 0;
1927
1928
/* SPU2 hardware does not compute hash of zero length data */
1929
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1930
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1931
alg_name = crypto_ahash_alg_name(tfm);
1932
flow_log("Doing %sfinal %s zero-len hash request in software\n",
1933
rctx->is_final ? "" : "non-", alg_name);
1934
err = do_shash((unsigned char *)alg_name, req->result,
1935
NULL, 0, NULL, 0, ctx->authkey,
1936
ctx->authkeylen);
1937
if (err < 0)
1938
flow_log("Hash request failed with error %d\n", err);
1939
return err;
1940
}
1941
/* Choose a SPU to process this request */
1942
rctx->chan_idx = select_channel();
1943
1944
err = handle_ahash_req(rctx);
1945
if (err != -EINPROGRESS)
1946
/* synchronous result */
1947
spu_chunk_cleanup(rctx);
1948
1949
if (err == -EAGAIN)
1950
/*
1951
* we saved data in hash carry, but tell crypto API
1952
* we successfully completed request.
1953
*/
1954
err = 0;
1955
1956
return err;
1957
}
1958
1959
static int __ahash_init(struct ahash_request *req)
1960
{
1961
struct spu_hw *spu = &iproc_priv.spu;
1962
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1963
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1964
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1965
1966
flow_log("%s()\n", __func__);
1967
1968
/* Initialize the context */
1969
rctx->hash_carry_len = 0;
1970
rctx->is_final = 0;
1971
1972
rctx->total_todo = 0;
1973
rctx->src_sent = 0;
1974
rctx->total_sent = 0;
1975
rctx->total_received = 0;
1976
1977
ctx->digestsize = crypto_ahash_digestsize(tfm);
1978
/* If we add a hash whose digest is larger, catch it here. */
1979
WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
1980
1981
rctx->is_sw_hmac = false;
1982
1983
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
1984
true);
1985
1986
return 0;
1987
}
1988
1989
/**
1990
* spu_no_incr_hash() - Determine whether incremental hashing is supported.
1991
* @ctx: Crypto session context
1992
*
1993
* SPU-2 does not support incremental hashing (we'll have to revisit and
1994
* condition based on chip revision or device tree entry if future versions do
1995
* support incremental hash)
1996
*
1997
* SPU-M also doesn't support incremental hashing of AES-XCBC
1998
*
1999
* Return: true if incremental hashing is not supported
2000
* false otherwise
2001
*/
2002
static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2003
{
2004
struct spu_hw *spu = &iproc_priv.spu;
2005
2006
if (spu->spu_type == SPU_TYPE_SPU2)
2007
return true;
2008
2009
if ((ctx->auth.alg == HASH_ALG_AES) &&
2010
(ctx->auth.mode == HASH_MODE_XCBC))
2011
return true;
2012
2013
/* Otherwise, incremental hashing is supported */
2014
return false;
2015
}
2016
2017
static int ahash_init(struct ahash_request *req)
2018
{
2019
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2020
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2021
const char *alg_name;
2022
struct crypto_shash *hash;
2023
int ret;
2024
gfp_t gfp;
2025
2026
if (spu_no_incr_hash(ctx)) {
2027
/*
2028
* If we get an incremental hashing request and it's not
2029
* supported by the hardware, we need to handle it in software
2030
* by calling synchronous hash functions.
2031
*/
2032
alg_name = crypto_ahash_alg_name(tfm);
2033
hash = crypto_alloc_shash(alg_name, 0, 0);
2034
if (IS_ERR(hash)) {
2035
ret = PTR_ERR(hash);
2036
goto err;
2037
}
2038
2039
gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2040
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2041
ctx->shash = kmalloc(sizeof(*ctx->shash) +
2042
crypto_shash_descsize(hash), gfp);
2043
if (!ctx->shash) {
2044
ret = -ENOMEM;
2045
goto err_hash;
2046
}
2047
ctx->shash->tfm = hash;
2048
2049
/* Set the key using data we already have from setkey */
2050
if (ctx->authkeylen > 0) {
2051
ret = crypto_shash_setkey(hash, ctx->authkey,
2052
ctx->authkeylen);
2053
if (ret)
2054
goto err_shash;
2055
}
2056
2057
/* Initialize hash w/ this key and other params */
2058
ret = crypto_shash_init(ctx->shash);
2059
if (ret)
2060
goto err_shash;
2061
} else {
2062
/* Otherwise call the internal function which uses SPU hw */
2063
ret = __ahash_init(req);
2064
}
2065
2066
return ret;
2067
2068
err_shash:
2069
kfree(ctx->shash);
2070
err_hash:
2071
crypto_free_shash(hash);
2072
err:
2073
return ret;
2074
}
2075
2076
static int __ahash_update(struct ahash_request *req)
2077
{
2078
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2079
2080
flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2081
2082
if (!req->nbytes)
2083
return 0;
2084
rctx->total_todo += req->nbytes;
2085
rctx->src_sent = 0;
2086
2087
return ahash_enqueue(req);
2088
}
2089
2090
static int ahash_update(struct ahash_request *req)
2091
{
2092
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2093
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2094
u8 *tmpbuf;
2095
int ret;
2096
int nents;
2097
gfp_t gfp;
2098
2099
if (spu_no_incr_hash(ctx)) {
2100
/*
2101
* If we get an incremental hashing request and it's not
2102
* supported by the hardware, we need to handle it in software
2103
* by calling synchronous hash functions.
2104
*/
2105
if (req->src)
2106
nents = sg_nents(req->src);
2107
else
2108
return -EINVAL;
2109
2110
/* Copy data from req scatterlist to tmp buffer */
2111
gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2112
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2113
tmpbuf = kmalloc(req->nbytes, gfp);
2114
if (!tmpbuf)
2115
return -ENOMEM;
2116
2117
if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2118
req->nbytes) {
2119
kfree(tmpbuf);
2120
return -EINVAL;
2121
}
2122
2123
/* Call synchronous update */
2124
ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2125
kfree(tmpbuf);
2126
} else {
2127
/* Otherwise call the internal function which uses SPU hw */
2128
ret = __ahash_update(req);
2129
}
2130
2131
return ret;
2132
}
2133
2134
static int __ahash_final(struct ahash_request *req)
2135
{
2136
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2137
2138
flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2139
2140
rctx->is_final = 1;
2141
2142
return ahash_enqueue(req);
2143
}
2144
2145
static int ahash_final(struct ahash_request *req)
2146
{
2147
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2148
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2149
int ret;
2150
2151
if (spu_no_incr_hash(ctx)) {
2152
/*
2153
* If we get an incremental hashing request and it's not
2154
* supported by the hardware, we need to handle it in software
2155
* by calling synchronous hash functions.
2156
*/
2157
ret = crypto_shash_final(ctx->shash, req->result);
2158
2159
/* Done with hash, can deallocate it now */
2160
crypto_free_shash(ctx->shash->tfm);
2161
kfree(ctx->shash);
2162
2163
} else {
2164
/* Otherwise call the internal function which uses SPU hw */
2165
ret = __ahash_final(req);
2166
}
2167
2168
return ret;
2169
}
2170
2171
static int __ahash_finup(struct ahash_request *req)
2172
{
2173
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2174
2175
flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2176
2177
rctx->total_todo += req->nbytes;
2178
rctx->src_sent = 0;
2179
rctx->is_final = 1;
2180
2181
return ahash_enqueue(req);
2182
}
2183
2184
static int ahash_finup(struct ahash_request *req)
2185
{
2186
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2187
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2188
u8 *tmpbuf;
2189
int ret;
2190
int nents;
2191
gfp_t gfp;
2192
2193
if (spu_no_incr_hash(ctx)) {
2194
/*
2195
* If we get an incremental hashing request and it's not
2196
* supported by the hardware, we need to handle it in software
2197
* by calling synchronous hash functions.
2198
*/
2199
if (req->src) {
2200
nents = sg_nents(req->src);
2201
} else {
2202
ret = -EINVAL;
2203
goto ahash_finup_exit;
2204
}
2205
2206
/* Copy data from req scatterlist to tmp buffer */
2207
gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2208
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2209
tmpbuf = kmalloc(req->nbytes, gfp);
2210
if (!tmpbuf) {
2211
ret = -ENOMEM;
2212
goto ahash_finup_exit;
2213
}
2214
2215
if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2216
req->nbytes) {
2217
ret = -EINVAL;
2218
goto ahash_finup_free;
2219
}
2220
2221
/* Call synchronous update */
2222
ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2223
req->result);
2224
} else {
2225
/* Otherwise call the internal function which uses SPU hw */
2226
return __ahash_finup(req);
2227
}
2228
ahash_finup_free:
2229
kfree(tmpbuf);
2230
2231
ahash_finup_exit:
2232
/* Done with hash, can deallocate it now */
2233
crypto_free_shash(ctx->shash->tfm);
2234
kfree(ctx->shash);
2235
return ret;
2236
}
2237
2238
static int ahash_digest(struct ahash_request *req)
2239
{
2240
int err;
2241
2242
flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2243
2244
/* whole thing at once */
2245
err = __ahash_init(req);
2246
if (!err)
2247
err = __ahash_finup(req);
2248
2249
return err;
2250
}
2251
2252
static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2253
unsigned int keylen)
2254
{
2255
struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2256
2257
flow_log("%s() ahash:%p key:%p keylen:%u\n",
2258
__func__, ahash, key, keylen);
2259
flow_dump(" key: ", key, keylen);
2260
2261
if (ctx->auth.alg == HASH_ALG_AES) {
2262
switch (keylen) {
2263
case AES_KEYSIZE_128:
2264
ctx->cipher_type = CIPHER_TYPE_AES128;
2265
break;
2266
case AES_KEYSIZE_192:
2267
ctx->cipher_type = CIPHER_TYPE_AES192;
2268
break;
2269
case AES_KEYSIZE_256:
2270
ctx->cipher_type = CIPHER_TYPE_AES256;
2271
break;
2272
default:
2273
pr_err("%s() Error: Invalid key length\n", __func__);
2274
return -EINVAL;
2275
}
2276
} else {
2277
pr_err("%s() Error: unknown hash alg\n", __func__);
2278
return -EINVAL;
2279
}
2280
memcpy(ctx->authkey, key, keylen);
2281
ctx->authkeylen = keylen;
2282
2283
return 0;
2284
}
2285
2286
static int ahash_export(struct ahash_request *req, void *out)
2287
{
2288
const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2289
struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2290
2291
spu_exp->total_todo = rctx->total_todo;
2292
spu_exp->total_sent = rctx->total_sent;
2293
spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2294
memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2295
spu_exp->hash_carry_len = rctx->hash_carry_len;
2296
memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2297
2298
return 0;
2299
}
2300
2301
static int ahash_import(struct ahash_request *req, const void *in)
2302
{
2303
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2304
struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2305
2306
rctx->total_todo = spu_exp->total_todo;
2307
rctx->total_sent = spu_exp->total_sent;
2308
rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2309
memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2310
rctx->hash_carry_len = spu_exp->hash_carry_len;
2311
memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2312
2313
return 0;
2314
}
2315
2316
static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2317
unsigned int keylen)
2318
{
2319
struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2320
unsigned int blocksize =
2321
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2322
unsigned int digestsize = crypto_ahash_digestsize(ahash);
2323
unsigned int index;
2324
int rc;
2325
2326
flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2327
__func__, ahash, key, keylen, blocksize, digestsize);
2328
flow_dump(" key: ", key, keylen);
2329
2330
if (keylen > blocksize) {
2331
switch (ctx->auth.alg) {
2332
case HASH_ALG_MD5:
2333
rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2334
0, NULL, 0);
2335
break;
2336
case HASH_ALG_SHA1:
2337
rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2338
0, NULL, 0);
2339
break;
2340
case HASH_ALG_SHA224:
2341
rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2342
0, NULL, 0);
2343
break;
2344
case HASH_ALG_SHA256:
2345
rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2346
0, NULL, 0);
2347
break;
2348
case HASH_ALG_SHA384:
2349
rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2350
0, NULL, 0);
2351
break;
2352
case HASH_ALG_SHA512:
2353
rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2354
0, NULL, 0);
2355
break;
2356
case HASH_ALG_SHA3_224:
2357
rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2358
NULL, 0, NULL, 0);
2359
break;
2360
case HASH_ALG_SHA3_256:
2361
rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2362
NULL, 0, NULL, 0);
2363
break;
2364
case HASH_ALG_SHA3_384:
2365
rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2366
NULL, 0, NULL, 0);
2367
break;
2368
case HASH_ALG_SHA3_512:
2369
rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2370
NULL, 0, NULL, 0);
2371
break;
2372
default:
2373
pr_err("%s() Error: unknown hash alg\n", __func__);
2374
return -EINVAL;
2375
}
2376
if (rc < 0) {
2377
pr_err("%s() Error %d computing shash for %s\n",
2378
__func__, rc, hash_alg_name[ctx->auth.alg]);
2379
return rc;
2380
}
2381
ctx->authkeylen = digestsize;
2382
2383
flow_log(" keylen > digestsize... hashed\n");
2384
flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2385
} else {
2386
memcpy(ctx->authkey, key, keylen);
2387
ctx->authkeylen = keylen;
2388
}
2389
2390
/*
2391
* Full HMAC operation in SPUM is not verified,
2392
* So keeping the generation of IPAD, OPAD and
2393
* outer hashing in software.
2394
*/
2395
if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2396
memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2397
memset(ctx->ipad + ctx->authkeylen, 0,
2398
blocksize - ctx->authkeylen);
2399
ctx->authkeylen = 0;
2400
unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
2401
"fortified memcpy causes -Wrestrict warning");
2402
2403
for (index = 0; index < blocksize; index++) {
2404
ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2405
ctx->opad[index] ^= HMAC_OPAD_VALUE;
2406
}
2407
2408
flow_dump(" ipad: ", ctx->ipad, blocksize);
2409
flow_dump(" opad: ", ctx->opad, blocksize);
2410
}
2411
ctx->digestsize = digestsize;
2412
atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2413
2414
return 0;
2415
}
2416
2417
static int ahash_hmac_init(struct ahash_request *req)
2418
{
2419
int ret;
2420
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2421
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2422
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2423
unsigned int blocksize =
2424
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2425
2426
flow_log("ahash_hmac_init()\n");
2427
2428
/* init the context as a hash */
2429
ret = ahash_init(req);
2430
if (ret)
2431
return ret;
2432
2433
if (!spu_no_incr_hash(ctx)) {
2434
/* SPU-M can do incr hashing but needs sw for outer HMAC */
2435
rctx->is_sw_hmac = true;
2436
ctx->auth.mode = HASH_MODE_HASH;
2437
/* start with a prepended ipad */
2438
memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2439
rctx->hash_carry_len = blocksize;
2440
rctx->total_todo += blocksize;
2441
}
2442
2443
return 0;
2444
}
2445
2446
static int ahash_hmac_update(struct ahash_request *req)
2447
{
2448
flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2449
2450
if (!req->nbytes)
2451
return 0;
2452
2453
return ahash_update(req);
2454
}
2455
2456
static int ahash_hmac_final(struct ahash_request *req)
2457
{
2458
flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2459
2460
return ahash_final(req);
2461
}
2462
2463
static int ahash_hmac_finup(struct ahash_request *req)
2464
{
2465
flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2466
2467
return ahash_finup(req);
2468
}
2469
2470
static int ahash_hmac_digest(struct ahash_request *req)
2471
{
2472
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2473
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2474
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2475
unsigned int blocksize =
2476
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2477
2478
flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2479
2480
/* Perform initialization and then call finup */
2481
__ahash_init(req);
2482
2483
if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2484
/*
2485
* SPU2 supports full HMAC implementation in the
2486
* hardware, need not to generate IPAD, OPAD and
2487
* outer hash in software.
2488
* Only for hash key len > hash block size, SPU2
2489
* expects to perform hashing on the key, shorten
2490
* it to digest size and feed it as hash key.
2491
*/
2492
rctx->is_sw_hmac = false;
2493
ctx->auth.mode = HASH_MODE_HMAC;
2494
} else {
2495
rctx->is_sw_hmac = true;
2496
ctx->auth.mode = HASH_MODE_HASH;
2497
/* start with a prepended ipad */
2498
memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2499
rctx->hash_carry_len = blocksize;
2500
rctx->total_todo += blocksize;
2501
}
2502
2503
return __ahash_finup(req);
2504
}
2505
2506
/* aead helpers */
2507
2508
static int aead_need_fallback(struct aead_request *req)
2509
{
2510
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2511
struct spu_hw *spu = &iproc_priv.spu;
2512
struct crypto_aead *aead = crypto_aead_reqtfm(req);
2513
struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2514
u32 payload_len;
2515
2516
/*
2517
* SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2518
* and AAD are both 0 bytes long. So use fallback in this case.
2519
*/
2520
if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2521
(ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2522
(req->assoclen == 0)) {
2523
if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2524
(!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2525
flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2526
return 1;
2527
}
2528
}
2529
2530
/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2531
if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2532
(spu->spu_type == SPU_TYPE_SPUM) &&
2533
(ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2534
(ctx->digestsize != 16)) {
2535
flow_log("%s() AES CCM needs fallback for digest size %d\n",
2536
__func__, ctx->digestsize);
2537
return 1;
2538
}
2539
2540
/*
2541
* SPU-M on NSP has an issue where AES-CCM hash is not correct
2542
* when AAD size is 0
2543
*/
2544
if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2545
(spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2546
(req->assoclen == 0)) {
2547
flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2548
__func__);
2549
return 1;
2550
}
2551
2552
/*
2553
* RFC4106 and RFC4543 cannot handle the case where AAD is other than
2554
* 16 or 20 bytes long. So use fallback in this case.
2555
*/
2556
if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2557
ctx->cipher.alg == CIPHER_ALG_AES &&
2558
rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2559
req->assoclen != 16 && req->assoclen != 20) {
2560
flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2561
" other than 16 or 20 bytes\n");
2562
return 1;
2563
}
2564
2565
payload_len = req->cryptlen;
2566
if (spu->spu_type == SPU_TYPE_SPUM)
2567
payload_len += req->assoclen;
2568
2569
flow_log("%s() payload len: %u\n", __func__, payload_len);
2570
2571
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2572
return 0;
2573
else
2574
return payload_len > ctx->max_payload;
2575
}
2576
2577
static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2578
{
2579
struct crypto_aead *aead = crypto_aead_reqtfm(req);
2580
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2581
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2582
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2583
struct aead_request *subreq;
2584
2585
flow_log("%s() enc:%u\n", __func__, is_encrypt);
2586
2587
if (!ctx->fallback_cipher)
2588
return -EINVAL;
2589
2590
subreq = &rctx->req;
2591
aead_request_set_tfm(subreq, ctx->fallback_cipher);
2592
aead_request_set_callback(subreq, aead_request_flags(req),
2593
req->base.complete, req->base.data);
2594
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2595
req->iv);
2596
aead_request_set_ad(subreq, req->assoclen);
2597
2598
return is_encrypt ? crypto_aead_encrypt(req) :
2599
crypto_aead_decrypt(req);
2600
}
2601
2602
static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2603
{
2604
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2605
struct crypto_aead *aead = crypto_aead_reqtfm(req);
2606
struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2607
int err;
2608
2609
flow_log("%s() enc:%u\n", __func__, is_encrypt);
2610
2611
if (req->assoclen > MAX_ASSOC_SIZE) {
2612
pr_err
2613
("%s() Error: associated data too long. (%u > %u bytes)\n",
2614
__func__, req->assoclen, MAX_ASSOC_SIZE);
2615
return -EINVAL;
2616
}
2617
2618
rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2619
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2620
rctx->parent = &req->base;
2621
rctx->is_encrypt = is_encrypt;
2622
rctx->bd_suppress = false;
2623
rctx->total_todo = req->cryptlen;
2624
rctx->src_sent = 0;
2625
rctx->total_sent = 0;
2626
rctx->total_received = 0;
2627
rctx->is_sw_hmac = false;
2628
rctx->ctx = ctx;
2629
memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2630
2631
/* assoc data is at start of src sg */
2632
rctx->assoc = req->src;
2633
2634
/*
2635
* Init current position in src scatterlist to be after assoc data.
2636
* src_skip set to buffer offset where data begins. (Assoc data could
2637
* end in the middle of a buffer.)
2638
*/
2639
if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2640
&rctx->src_skip) < 0) {
2641
pr_err("%s() Error: Unable to find start of src data\n",
2642
__func__);
2643
return -EINVAL;
2644
}
2645
2646
rctx->src_nents = 0;
2647
rctx->dst_nents = 0;
2648
if (req->dst == req->src) {
2649
rctx->dst_sg = rctx->src_sg;
2650
rctx->dst_skip = rctx->src_skip;
2651
} else {
2652
/*
2653
* Expect req->dst to have room for assoc data followed by
2654
* output data and ICV, if encrypt. So initialize dst_sg
2655
* to point beyond assoc len offset.
2656
*/
2657
if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2658
&rctx->dst_skip) < 0) {
2659
pr_err("%s() Error: Unable to find start of dst data\n",
2660
__func__);
2661
return -EINVAL;
2662
}
2663
}
2664
2665
if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2666
ctx->cipher.mode == CIPHER_MODE_CTR ||
2667
ctx->cipher.mode == CIPHER_MODE_OFB ||
2668
ctx->cipher.mode == CIPHER_MODE_XTS ||
2669
ctx->cipher.mode == CIPHER_MODE_GCM) {
2670
rctx->iv_ctr_len =
2671
ctx->salt_len +
2672
crypto_aead_ivsize(crypto_aead_reqtfm(req));
2673
} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2674
rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2675
} else {
2676
rctx->iv_ctr_len = 0;
2677
}
2678
2679
rctx->hash_carry_len = 0;
2680
2681
flow_log(" src sg: %p\n", req->src);
2682
flow_log(" rctx->src_sg: %p, src_skip %u\n",
2683
rctx->src_sg, rctx->src_skip);
2684
flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2685
flow_log(" dst sg: %p\n", req->dst);
2686
flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2687
rctx->dst_sg, rctx->dst_skip);
2688
flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2689
flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2690
flow_log(" authkeylen:%u\n", ctx->authkeylen);
2691
flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp));
2692
2693
if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2694
flow_log(" max_payload infinite");
2695
else
2696
flow_log(" max_payload: %u\n", ctx->max_payload);
2697
2698
if (unlikely(aead_need_fallback(req)))
2699
return aead_do_fallback(req, is_encrypt);
2700
2701
/*
2702
* Do memory allocations for request after fallback check, because if we
2703
* do fallback, we won't call finish_req() to dealloc.
2704
*/
2705
if (rctx->iv_ctr_len) {
2706
if (ctx->salt_len)
2707
memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2708
ctx->salt, ctx->salt_len);
2709
memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2710
req->iv,
2711
rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2712
}
2713
2714
rctx->chan_idx = select_channel();
2715
err = handle_aead_req(rctx);
2716
if (err != -EINPROGRESS)
2717
/* synchronous result */
2718
spu_chunk_cleanup(rctx);
2719
2720
return err;
2721
}
2722
2723
static int aead_authenc_setkey(struct crypto_aead *cipher,
2724
const u8 *key, unsigned int keylen)
2725
{
2726
struct spu_hw *spu = &iproc_priv.spu;
2727
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2728
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2729
struct crypto_authenc_keys keys;
2730
int ret;
2731
2732
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2733
keylen);
2734
flow_dump(" key: ", key, keylen);
2735
2736
ret = crypto_authenc_extractkeys(&keys, key, keylen);
2737
if (ret)
2738
goto badkey;
2739
2740
if (keys.enckeylen > MAX_KEY_SIZE ||
2741
keys.authkeylen > MAX_KEY_SIZE)
2742
goto badkey;
2743
2744
ctx->enckeylen = keys.enckeylen;
2745
ctx->authkeylen = keys.authkeylen;
2746
2747
memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2748
/* May end up padding auth key. So make sure it's zeroed. */
2749
memset(ctx->authkey, 0, sizeof(ctx->authkey));
2750
memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2751
2752
switch (ctx->alg->cipher_info.alg) {
2753
case CIPHER_ALG_DES:
2754
if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2755
return -EINVAL;
2756
2757
ctx->cipher_type = CIPHER_TYPE_DES;
2758
break;
2759
case CIPHER_ALG_3DES:
2760
if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2761
return -EINVAL;
2762
2763
ctx->cipher_type = CIPHER_TYPE_3DES;
2764
break;
2765
case CIPHER_ALG_AES:
2766
switch (ctx->enckeylen) {
2767
case AES_KEYSIZE_128:
2768
ctx->cipher_type = CIPHER_TYPE_AES128;
2769
break;
2770
case AES_KEYSIZE_192:
2771
ctx->cipher_type = CIPHER_TYPE_AES192;
2772
break;
2773
case AES_KEYSIZE_256:
2774
ctx->cipher_type = CIPHER_TYPE_AES256;
2775
break;
2776
default:
2777
goto badkey;
2778
}
2779
break;
2780
default:
2781
pr_err("%s() Error: Unknown cipher alg\n", __func__);
2782
return -EINVAL;
2783
}
2784
2785
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2786
ctx->authkeylen);
2787
flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2788
flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2789
2790
/* setkey the fallback just in case we needto use it */
2791
if (ctx->fallback_cipher) {
2792
flow_log(" running fallback setkey()\n");
2793
2794
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2795
ctx->fallback_cipher->base.crt_flags |=
2796
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2797
ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2798
if (ret)
2799
flow_log(" fallback setkey() returned:%d\n", ret);
2800
}
2801
2802
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2803
ctx->enckeylen,
2804
false);
2805
2806
atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2807
2808
return ret;
2809
2810
badkey:
2811
ctx->enckeylen = 0;
2812
ctx->authkeylen = 0;
2813
ctx->digestsize = 0;
2814
2815
return -EINVAL;
2816
}
2817
2818
static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2819
const u8 *key, unsigned int keylen)
2820
{
2821
struct spu_hw *spu = &iproc_priv.spu;
2822
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2823
struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2824
2825
int ret = 0;
2826
2827
flow_log("%s() keylen:%u\n", __func__, keylen);
2828
flow_dump(" key: ", key, keylen);
2829
2830
if (!ctx->is_esp)
2831
ctx->digestsize = keylen;
2832
2833
ctx->enckeylen = keylen;
2834
ctx->authkeylen = 0;
2835
2836
switch (ctx->enckeylen) {
2837
case AES_KEYSIZE_128:
2838
ctx->cipher_type = CIPHER_TYPE_AES128;
2839
break;
2840
case AES_KEYSIZE_192:
2841
ctx->cipher_type = CIPHER_TYPE_AES192;
2842
break;
2843
case AES_KEYSIZE_256:
2844
ctx->cipher_type = CIPHER_TYPE_AES256;
2845
break;
2846
default:
2847
goto badkey;
2848
}
2849
2850
memcpy(ctx->enckey, key, ctx->enckeylen);
2851
2852
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2853
ctx->authkeylen);
2854
flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2855
flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2856
2857
/* setkey the fallback just in case we need to use it */
2858
if (ctx->fallback_cipher) {
2859
flow_log(" running fallback setkey()\n");
2860
2861
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2862
ctx->fallback_cipher->base.crt_flags |=
2863
tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2864
ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2865
keylen + ctx->salt_len);
2866
if (ret)
2867
flow_log(" fallback setkey() returned:%d\n", ret);
2868
}
2869
2870
ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2871
ctx->enckeylen,
2872
false);
2873
2874
atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2875
2876
flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2877
ctx->authkeylen);
2878
2879
return ret;
2880
2881
badkey:
2882
ctx->enckeylen = 0;
2883
ctx->authkeylen = 0;
2884
ctx->digestsize = 0;
2885
2886
return -EINVAL;
2887
}
2888
2889
/**
2890
* aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
2891
* @cipher: AEAD structure
2892
* @key: Key followed by 4 bytes of salt
2893
* @keylen: Length of key plus salt, in bytes
2894
*
2895
* Extracts salt from key and stores it to be prepended to IV on each request.
2896
* Digest is always 16 bytes
2897
*
2898
* Return: Value from generic gcm setkey.
2899
*/
2900
static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2901
const u8 *key, unsigned int keylen)
2902
{
2903
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2904
2905
flow_log("%s\n", __func__);
2906
2907
if (keylen < GCM_ESP_SALT_SIZE)
2908
return -EINVAL;
2909
2910
ctx->salt_len = GCM_ESP_SALT_SIZE;
2911
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2912
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2913
keylen -= GCM_ESP_SALT_SIZE;
2914
ctx->digestsize = GCM_ESP_DIGESTSIZE;
2915
ctx->is_esp = true;
2916
flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2917
2918
return aead_gcm_ccm_setkey(cipher, key, keylen);
2919
}
2920
2921
/**
2922
* rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
2923
* @cipher: AEAD structure
2924
* @key: Key followed by 4 bytes of salt
2925
* @keylen: Length of key plus salt, in bytes
2926
*
2927
* Extracts salt from key and stores it to be prepended to IV on each request.
2928
* Digest is always 16 bytes
2929
*
2930
* Return: Value from generic gcm setkey.
2931
*/
2932
static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
2933
const u8 *key, unsigned int keylen)
2934
{
2935
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2936
2937
flow_log("%s\n", __func__);
2938
2939
if (keylen < GCM_ESP_SALT_SIZE)
2940
return -EINVAL;
2941
2942
ctx->salt_len = GCM_ESP_SALT_SIZE;
2943
ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2944
memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2945
keylen -= GCM_ESP_SALT_SIZE;
2946
ctx->digestsize = GCM_ESP_DIGESTSIZE;
2947
ctx->is_esp = true;
2948
ctx->is_rfc4543 = true;
2949
flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2950
2951
return aead_gcm_ccm_setkey(cipher, key, keylen);
2952
}
2953
2954
/**
2955
* aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
2956
* @cipher: AEAD structure
2957
* @key: Key followed by 4 bytes of salt
2958
* @keylen: Length of key plus salt, in bytes
2959
*
2960
* Extracts salt from key and stores it to be prepended to IV on each request.
2961
* Digest is always 16 bytes
2962
*
2963
* Return: Value from generic ccm setkey.
2964
*/
2965
static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
2966
const u8 *key, unsigned int keylen)
2967
{
2968
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2969
2970
flow_log("%s\n", __func__);
2971
2972
if (keylen < CCM_ESP_SALT_SIZE)
2973
return -EINVAL;
2974
2975
ctx->salt_len = CCM_ESP_SALT_SIZE;
2976
ctx->salt_offset = CCM_ESP_SALT_OFFSET;
2977
memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
2978
keylen -= CCM_ESP_SALT_SIZE;
2979
ctx->is_esp = true;
2980
flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
2981
2982
return aead_gcm_ccm_setkey(cipher, key, keylen);
2983
}
2984
2985
static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
2986
{
2987
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2988
int ret = 0;
2989
2990
flow_log("%s() authkeylen:%u authsize:%u\n",
2991
__func__, ctx->authkeylen, authsize);
2992
2993
ctx->digestsize = authsize;
2994
2995
/* setkey the fallback just in case we needto use it */
2996
if (ctx->fallback_cipher) {
2997
flow_log(" running fallback setauth()\n");
2998
2999
ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3000
if (ret)
3001
flow_log(" fallback setauth() returned:%d\n", ret);
3002
}
3003
3004
return ret;
3005
}
3006
3007
static int aead_encrypt(struct aead_request *req)
3008
{
3009
flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3010
req->cryptlen);
3011
dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3012
flow_log(" assoc_len:%u\n", req->assoclen);
3013
3014
return aead_enqueue(req, true);
3015
}
3016
3017
static int aead_decrypt(struct aead_request *req)
3018
{
3019
flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3020
dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3021
flow_log(" assoc_len:%u\n", req->assoclen);
3022
3023
return aead_enqueue(req, false);
3024
}
3025
3026
/* ==================== Supported Cipher Algorithms ==================== */
3027
3028
static struct iproc_alg_s driver_algs[] = {
3029
{
3030
.type = CRYPTO_ALG_TYPE_AEAD,
3031
.alg.aead = {
3032
.base = {
3033
.cra_name = "gcm(aes)",
3034
.cra_driver_name = "gcm-aes-iproc",
3035
.cra_blocksize = AES_BLOCK_SIZE,
3036
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3037
},
3038
.setkey = aead_gcm_ccm_setkey,
3039
.ivsize = GCM_AES_IV_SIZE,
3040
.maxauthsize = AES_BLOCK_SIZE,
3041
},
3042
.cipher_info = {
3043
.alg = CIPHER_ALG_AES,
3044
.mode = CIPHER_MODE_GCM,
3045
},
3046
.auth_info = {
3047
.alg = HASH_ALG_AES,
3048
.mode = HASH_MODE_GCM,
3049
},
3050
.auth_first = 0,
3051
},
3052
{
3053
.type = CRYPTO_ALG_TYPE_AEAD,
3054
.alg.aead = {
3055
.base = {
3056
.cra_name = "ccm(aes)",
3057
.cra_driver_name = "ccm-aes-iproc",
3058
.cra_blocksize = AES_BLOCK_SIZE,
3059
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3060
},
3061
.setkey = aead_gcm_ccm_setkey,
3062
.ivsize = CCM_AES_IV_SIZE,
3063
.maxauthsize = AES_BLOCK_SIZE,
3064
},
3065
.cipher_info = {
3066
.alg = CIPHER_ALG_AES,
3067
.mode = CIPHER_MODE_CCM,
3068
},
3069
.auth_info = {
3070
.alg = HASH_ALG_AES,
3071
.mode = HASH_MODE_CCM,
3072
},
3073
.auth_first = 0,
3074
},
3075
{
3076
.type = CRYPTO_ALG_TYPE_AEAD,
3077
.alg.aead = {
3078
.base = {
3079
.cra_name = "rfc4106(gcm(aes))",
3080
.cra_driver_name = "gcm-aes-esp-iproc",
3081
.cra_blocksize = AES_BLOCK_SIZE,
3082
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3083
},
3084
.setkey = aead_gcm_esp_setkey,
3085
.ivsize = GCM_RFC4106_IV_SIZE,
3086
.maxauthsize = AES_BLOCK_SIZE,
3087
},
3088
.cipher_info = {
3089
.alg = CIPHER_ALG_AES,
3090
.mode = CIPHER_MODE_GCM,
3091
},
3092
.auth_info = {
3093
.alg = HASH_ALG_AES,
3094
.mode = HASH_MODE_GCM,
3095
},
3096
.auth_first = 0,
3097
},
3098
{
3099
.type = CRYPTO_ALG_TYPE_AEAD,
3100
.alg.aead = {
3101
.base = {
3102
.cra_name = "rfc4309(ccm(aes))",
3103
.cra_driver_name = "ccm-aes-esp-iproc",
3104
.cra_blocksize = AES_BLOCK_SIZE,
3105
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3106
},
3107
.setkey = aead_ccm_esp_setkey,
3108
.ivsize = CCM_AES_IV_SIZE,
3109
.maxauthsize = AES_BLOCK_SIZE,
3110
},
3111
.cipher_info = {
3112
.alg = CIPHER_ALG_AES,
3113
.mode = CIPHER_MODE_CCM,
3114
},
3115
.auth_info = {
3116
.alg = HASH_ALG_AES,
3117
.mode = HASH_MODE_CCM,
3118
},
3119
.auth_first = 0,
3120
},
3121
{
3122
.type = CRYPTO_ALG_TYPE_AEAD,
3123
.alg.aead = {
3124
.base = {
3125
.cra_name = "rfc4543(gcm(aes))",
3126
.cra_driver_name = "gmac-aes-esp-iproc",
3127
.cra_blocksize = AES_BLOCK_SIZE,
3128
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3129
},
3130
.setkey = rfc4543_gcm_esp_setkey,
3131
.ivsize = GCM_RFC4106_IV_SIZE,
3132
.maxauthsize = AES_BLOCK_SIZE,
3133
},
3134
.cipher_info = {
3135
.alg = CIPHER_ALG_AES,
3136
.mode = CIPHER_MODE_GCM,
3137
},
3138
.auth_info = {
3139
.alg = HASH_ALG_AES,
3140
.mode = HASH_MODE_GCM,
3141
},
3142
.auth_first = 0,
3143
},
3144
{
3145
.type = CRYPTO_ALG_TYPE_AEAD,
3146
.alg.aead = {
3147
.base = {
3148
.cra_name = "authenc(hmac(md5),cbc(aes))",
3149
.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3150
.cra_blocksize = AES_BLOCK_SIZE,
3151
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3152
CRYPTO_ALG_ASYNC |
3153
CRYPTO_ALG_ALLOCATES_MEMORY
3154
},
3155
.setkey = aead_authenc_setkey,
3156
.ivsize = AES_BLOCK_SIZE,
3157
.maxauthsize = MD5_DIGEST_SIZE,
3158
},
3159
.cipher_info = {
3160
.alg = CIPHER_ALG_AES,
3161
.mode = CIPHER_MODE_CBC,
3162
},
3163
.auth_info = {
3164
.alg = HASH_ALG_MD5,
3165
.mode = HASH_MODE_HMAC,
3166
},
3167
.auth_first = 0,
3168
},
3169
{
3170
.type = CRYPTO_ALG_TYPE_AEAD,
3171
.alg.aead = {
3172
.base = {
3173
.cra_name = "authenc(hmac(sha1),cbc(aes))",
3174
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3175
.cra_blocksize = AES_BLOCK_SIZE,
3176
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3177
CRYPTO_ALG_ASYNC |
3178
CRYPTO_ALG_ALLOCATES_MEMORY
3179
},
3180
.setkey = aead_authenc_setkey,
3181
.ivsize = AES_BLOCK_SIZE,
3182
.maxauthsize = SHA1_DIGEST_SIZE,
3183
},
3184
.cipher_info = {
3185
.alg = CIPHER_ALG_AES,
3186
.mode = CIPHER_MODE_CBC,
3187
},
3188
.auth_info = {
3189
.alg = HASH_ALG_SHA1,
3190
.mode = HASH_MODE_HMAC,
3191
},
3192
.auth_first = 0,
3193
},
3194
{
3195
.type = CRYPTO_ALG_TYPE_AEAD,
3196
.alg.aead = {
3197
.base = {
3198
.cra_name = "authenc(hmac(sha256),cbc(aes))",
3199
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3200
.cra_blocksize = AES_BLOCK_SIZE,
3201
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3202
CRYPTO_ALG_ASYNC |
3203
CRYPTO_ALG_ALLOCATES_MEMORY
3204
},
3205
.setkey = aead_authenc_setkey,
3206
.ivsize = AES_BLOCK_SIZE,
3207
.maxauthsize = SHA256_DIGEST_SIZE,
3208
},
3209
.cipher_info = {
3210
.alg = CIPHER_ALG_AES,
3211
.mode = CIPHER_MODE_CBC,
3212
},
3213
.auth_info = {
3214
.alg = HASH_ALG_SHA256,
3215
.mode = HASH_MODE_HMAC,
3216
},
3217
.auth_first = 0,
3218
},
3219
{
3220
.type = CRYPTO_ALG_TYPE_AEAD,
3221
.alg.aead = {
3222
.base = {
3223
.cra_name = "authenc(hmac(md5),cbc(des))",
3224
.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3225
.cra_blocksize = DES_BLOCK_SIZE,
3226
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3227
CRYPTO_ALG_ASYNC |
3228
CRYPTO_ALG_ALLOCATES_MEMORY
3229
},
3230
.setkey = aead_authenc_setkey,
3231
.ivsize = DES_BLOCK_SIZE,
3232
.maxauthsize = MD5_DIGEST_SIZE,
3233
},
3234
.cipher_info = {
3235
.alg = CIPHER_ALG_DES,
3236
.mode = CIPHER_MODE_CBC,
3237
},
3238
.auth_info = {
3239
.alg = HASH_ALG_MD5,
3240
.mode = HASH_MODE_HMAC,
3241
},
3242
.auth_first = 0,
3243
},
3244
{
3245
.type = CRYPTO_ALG_TYPE_AEAD,
3246
.alg.aead = {
3247
.base = {
3248
.cra_name = "authenc(hmac(sha1),cbc(des))",
3249
.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3250
.cra_blocksize = DES_BLOCK_SIZE,
3251
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3252
CRYPTO_ALG_ASYNC |
3253
CRYPTO_ALG_ALLOCATES_MEMORY
3254
},
3255
.setkey = aead_authenc_setkey,
3256
.ivsize = DES_BLOCK_SIZE,
3257
.maxauthsize = SHA1_DIGEST_SIZE,
3258
},
3259
.cipher_info = {
3260
.alg = CIPHER_ALG_DES,
3261
.mode = CIPHER_MODE_CBC,
3262
},
3263
.auth_info = {
3264
.alg = HASH_ALG_SHA1,
3265
.mode = HASH_MODE_HMAC,
3266
},
3267
.auth_first = 0,
3268
},
3269
{
3270
.type = CRYPTO_ALG_TYPE_AEAD,
3271
.alg.aead = {
3272
.base = {
3273
.cra_name = "authenc(hmac(sha224),cbc(des))",
3274
.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3275
.cra_blocksize = DES_BLOCK_SIZE,
3276
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3277
CRYPTO_ALG_ASYNC |
3278
CRYPTO_ALG_ALLOCATES_MEMORY
3279
},
3280
.setkey = aead_authenc_setkey,
3281
.ivsize = DES_BLOCK_SIZE,
3282
.maxauthsize = SHA224_DIGEST_SIZE,
3283
},
3284
.cipher_info = {
3285
.alg = CIPHER_ALG_DES,
3286
.mode = CIPHER_MODE_CBC,
3287
},
3288
.auth_info = {
3289
.alg = HASH_ALG_SHA224,
3290
.mode = HASH_MODE_HMAC,
3291
},
3292
.auth_first = 0,
3293
},
3294
{
3295
.type = CRYPTO_ALG_TYPE_AEAD,
3296
.alg.aead = {
3297
.base = {
3298
.cra_name = "authenc(hmac(sha256),cbc(des))",
3299
.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3300
.cra_blocksize = DES_BLOCK_SIZE,
3301
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3302
CRYPTO_ALG_ASYNC |
3303
CRYPTO_ALG_ALLOCATES_MEMORY
3304
},
3305
.setkey = aead_authenc_setkey,
3306
.ivsize = DES_BLOCK_SIZE,
3307
.maxauthsize = SHA256_DIGEST_SIZE,
3308
},
3309
.cipher_info = {
3310
.alg = CIPHER_ALG_DES,
3311
.mode = CIPHER_MODE_CBC,
3312
},
3313
.auth_info = {
3314
.alg = HASH_ALG_SHA256,
3315
.mode = HASH_MODE_HMAC,
3316
},
3317
.auth_first = 0,
3318
},
3319
{
3320
.type = CRYPTO_ALG_TYPE_AEAD,
3321
.alg.aead = {
3322
.base = {
3323
.cra_name = "authenc(hmac(sha384),cbc(des))",
3324
.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3325
.cra_blocksize = DES_BLOCK_SIZE,
3326
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3327
CRYPTO_ALG_ASYNC |
3328
CRYPTO_ALG_ALLOCATES_MEMORY
3329
},
3330
.setkey = aead_authenc_setkey,
3331
.ivsize = DES_BLOCK_SIZE,
3332
.maxauthsize = SHA384_DIGEST_SIZE,
3333
},
3334
.cipher_info = {
3335
.alg = CIPHER_ALG_DES,
3336
.mode = CIPHER_MODE_CBC,
3337
},
3338
.auth_info = {
3339
.alg = HASH_ALG_SHA384,
3340
.mode = HASH_MODE_HMAC,
3341
},
3342
.auth_first = 0,
3343
},
3344
{
3345
.type = CRYPTO_ALG_TYPE_AEAD,
3346
.alg.aead = {
3347
.base = {
3348
.cra_name = "authenc(hmac(sha512),cbc(des))",
3349
.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3350
.cra_blocksize = DES_BLOCK_SIZE,
3351
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3352
CRYPTO_ALG_ASYNC |
3353
CRYPTO_ALG_ALLOCATES_MEMORY
3354
},
3355
.setkey = aead_authenc_setkey,
3356
.ivsize = DES_BLOCK_SIZE,
3357
.maxauthsize = SHA512_DIGEST_SIZE,
3358
},
3359
.cipher_info = {
3360
.alg = CIPHER_ALG_DES,
3361
.mode = CIPHER_MODE_CBC,
3362
},
3363
.auth_info = {
3364
.alg = HASH_ALG_SHA512,
3365
.mode = HASH_MODE_HMAC,
3366
},
3367
.auth_first = 0,
3368
},
3369
{
3370
.type = CRYPTO_ALG_TYPE_AEAD,
3371
.alg.aead = {
3372
.base = {
3373
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3374
.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3375
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3376
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3377
CRYPTO_ALG_ASYNC |
3378
CRYPTO_ALG_ALLOCATES_MEMORY
3379
},
3380
.setkey = aead_authenc_setkey,
3381
.ivsize = DES3_EDE_BLOCK_SIZE,
3382
.maxauthsize = MD5_DIGEST_SIZE,
3383
},
3384
.cipher_info = {
3385
.alg = CIPHER_ALG_3DES,
3386
.mode = CIPHER_MODE_CBC,
3387
},
3388
.auth_info = {
3389
.alg = HASH_ALG_MD5,
3390
.mode = HASH_MODE_HMAC,
3391
},
3392
.auth_first = 0,
3393
},
3394
{
3395
.type = CRYPTO_ALG_TYPE_AEAD,
3396
.alg.aead = {
3397
.base = {
3398
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3399
.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3400
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3401
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3402
CRYPTO_ALG_ASYNC |
3403
CRYPTO_ALG_ALLOCATES_MEMORY
3404
},
3405
.setkey = aead_authenc_setkey,
3406
.ivsize = DES3_EDE_BLOCK_SIZE,
3407
.maxauthsize = SHA1_DIGEST_SIZE,
3408
},
3409
.cipher_info = {
3410
.alg = CIPHER_ALG_3DES,
3411
.mode = CIPHER_MODE_CBC,
3412
},
3413
.auth_info = {
3414
.alg = HASH_ALG_SHA1,
3415
.mode = HASH_MODE_HMAC,
3416
},
3417
.auth_first = 0,
3418
},
3419
{
3420
.type = CRYPTO_ALG_TYPE_AEAD,
3421
.alg.aead = {
3422
.base = {
3423
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3424
.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3425
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3426
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3427
CRYPTO_ALG_ASYNC |
3428
CRYPTO_ALG_ALLOCATES_MEMORY
3429
},
3430
.setkey = aead_authenc_setkey,
3431
.ivsize = DES3_EDE_BLOCK_SIZE,
3432
.maxauthsize = SHA224_DIGEST_SIZE,
3433
},
3434
.cipher_info = {
3435
.alg = CIPHER_ALG_3DES,
3436
.mode = CIPHER_MODE_CBC,
3437
},
3438
.auth_info = {
3439
.alg = HASH_ALG_SHA224,
3440
.mode = HASH_MODE_HMAC,
3441
},
3442
.auth_first = 0,
3443
},
3444
{
3445
.type = CRYPTO_ALG_TYPE_AEAD,
3446
.alg.aead = {
3447
.base = {
3448
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3449
.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3450
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3451
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3452
CRYPTO_ALG_ASYNC |
3453
CRYPTO_ALG_ALLOCATES_MEMORY
3454
},
3455
.setkey = aead_authenc_setkey,
3456
.ivsize = DES3_EDE_BLOCK_SIZE,
3457
.maxauthsize = SHA256_DIGEST_SIZE,
3458
},
3459
.cipher_info = {
3460
.alg = CIPHER_ALG_3DES,
3461
.mode = CIPHER_MODE_CBC,
3462
},
3463
.auth_info = {
3464
.alg = HASH_ALG_SHA256,
3465
.mode = HASH_MODE_HMAC,
3466
},
3467
.auth_first = 0,
3468
},
3469
{
3470
.type = CRYPTO_ALG_TYPE_AEAD,
3471
.alg.aead = {
3472
.base = {
3473
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3474
.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3475
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3476
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3477
CRYPTO_ALG_ASYNC |
3478
CRYPTO_ALG_ALLOCATES_MEMORY
3479
},
3480
.setkey = aead_authenc_setkey,
3481
.ivsize = DES3_EDE_BLOCK_SIZE,
3482
.maxauthsize = SHA384_DIGEST_SIZE,
3483
},
3484
.cipher_info = {
3485
.alg = CIPHER_ALG_3DES,
3486
.mode = CIPHER_MODE_CBC,
3487
},
3488
.auth_info = {
3489
.alg = HASH_ALG_SHA384,
3490
.mode = HASH_MODE_HMAC,
3491
},
3492
.auth_first = 0,
3493
},
3494
{
3495
.type = CRYPTO_ALG_TYPE_AEAD,
3496
.alg.aead = {
3497
.base = {
3498
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3499
.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3500
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3501
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3502
CRYPTO_ALG_ASYNC |
3503
CRYPTO_ALG_ALLOCATES_MEMORY
3504
},
3505
.setkey = aead_authenc_setkey,
3506
.ivsize = DES3_EDE_BLOCK_SIZE,
3507
.maxauthsize = SHA512_DIGEST_SIZE,
3508
},
3509
.cipher_info = {
3510
.alg = CIPHER_ALG_3DES,
3511
.mode = CIPHER_MODE_CBC,
3512
},
3513
.auth_info = {
3514
.alg = HASH_ALG_SHA512,
3515
.mode = HASH_MODE_HMAC,
3516
},
3517
.auth_first = 0,
3518
},
3519
3520
/* SKCIPHER algorithms. */
3521
{
3522
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3523
.alg.skcipher = {
3524
.base.cra_name = "cbc(des)",
3525
.base.cra_driver_name = "cbc-des-iproc",
3526
.base.cra_blocksize = DES_BLOCK_SIZE,
3527
.min_keysize = DES_KEY_SIZE,
3528
.max_keysize = DES_KEY_SIZE,
3529
.ivsize = DES_BLOCK_SIZE,
3530
},
3531
.cipher_info = {
3532
.alg = CIPHER_ALG_DES,
3533
.mode = CIPHER_MODE_CBC,
3534
},
3535
.auth_info = {
3536
.alg = HASH_ALG_NONE,
3537
.mode = HASH_MODE_NONE,
3538
},
3539
},
3540
{
3541
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3542
.alg.skcipher = {
3543
.base.cra_name = "ecb(des)",
3544
.base.cra_driver_name = "ecb-des-iproc",
3545
.base.cra_blocksize = DES_BLOCK_SIZE,
3546
.min_keysize = DES_KEY_SIZE,
3547
.max_keysize = DES_KEY_SIZE,
3548
.ivsize = 0,
3549
},
3550
.cipher_info = {
3551
.alg = CIPHER_ALG_DES,
3552
.mode = CIPHER_MODE_ECB,
3553
},
3554
.auth_info = {
3555
.alg = HASH_ALG_NONE,
3556
.mode = HASH_MODE_NONE,
3557
},
3558
},
3559
{
3560
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3561
.alg.skcipher = {
3562
.base.cra_name = "cbc(des3_ede)",
3563
.base.cra_driver_name = "cbc-des3-iproc",
3564
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3565
.min_keysize = DES3_EDE_KEY_SIZE,
3566
.max_keysize = DES3_EDE_KEY_SIZE,
3567
.ivsize = DES3_EDE_BLOCK_SIZE,
3568
},
3569
.cipher_info = {
3570
.alg = CIPHER_ALG_3DES,
3571
.mode = CIPHER_MODE_CBC,
3572
},
3573
.auth_info = {
3574
.alg = HASH_ALG_NONE,
3575
.mode = HASH_MODE_NONE,
3576
},
3577
},
3578
{
3579
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3580
.alg.skcipher = {
3581
.base.cra_name = "ecb(des3_ede)",
3582
.base.cra_driver_name = "ecb-des3-iproc",
3583
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3584
.min_keysize = DES3_EDE_KEY_SIZE,
3585
.max_keysize = DES3_EDE_KEY_SIZE,
3586
.ivsize = 0,
3587
},
3588
.cipher_info = {
3589
.alg = CIPHER_ALG_3DES,
3590
.mode = CIPHER_MODE_ECB,
3591
},
3592
.auth_info = {
3593
.alg = HASH_ALG_NONE,
3594
.mode = HASH_MODE_NONE,
3595
},
3596
},
3597
{
3598
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3599
.alg.skcipher = {
3600
.base.cra_name = "cbc(aes)",
3601
.base.cra_driver_name = "cbc-aes-iproc",
3602
.base.cra_blocksize = AES_BLOCK_SIZE,
3603
.min_keysize = AES_MIN_KEY_SIZE,
3604
.max_keysize = AES_MAX_KEY_SIZE,
3605
.ivsize = AES_BLOCK_SIZE,
3606
},
3607
.cipher_info = {
3608
.alg = CIPHER_ALG_AES,
3609
.mode = CIPHER_MODE_CBC,
3610
},
3611
.auth_info = {
3612
.alg = HASH_ALG_NONE,
3613
.mode = HASH_MODE_NONE,
3614
},
3615
},
3616
{
3617
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3618
.alg.skcipher = {
3619
.base.cra_name = "ecb(aes)",
3620
.base.cra_driver_name = "ecb-aes-iproc",
3621
.base.cra_blocksize = AES_BLOCK_SIZE,
3622
.min_keysize = AES_MIN_KEY_SIZE,
3623
.max_keysize = AES_MAX_KEY_SIZE,
3624
.ivsize = 0,
3625
},
3626
.cipher_info = {
3627
.alg = CIPHER_ALG_AES,
3628
.mode = CIPHER_MODE_ECB,
3629
},
3630
.auth_info = {
3631
.alg = HASH_ALG_NONE,
3632
.mode = HASH_MODE_NONE,
3633
},
3634
},
3635
{
3636
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3637
.alg.skcipher = {
3638
.base.cra_name = "ctr(aes)",
3639
.base.cra_driver_name = "ctr-aes-iproc",
3640
.base.cra_blocksize = AES_BLOCK_SIZE,
3641
.min_keysize = AES_MIN_KEY_SIZE,
3642
.max_keysize = AES_MAX_KEY_SIZE,
3643
.ivsize = AES_BLOCK_SIZE,
3644
},
3645
.cipher_info = {
3646
.alg = CIPHER_ALG_AES,
3647
.mode = CIPHER_MODE_CTR,
3648
},
3649
.auth_info = {
3650
.alg = HASH_ALG_NONE,
3651
.mode = HASH_MODE_NONE,
3652
},
3653
},
3654
{
3655
.type = CRYPTO_ALG_TYPE_SKCIPHER,
3656
.alg.skcipher = {
3657
.base.cra_name = "xts(aes)",
3658
.base.cra_driver_name = "xts-aes-iproc",
3659
.base.cra_blocksize = AES_BLOCK_SIZE,
3660
.min_keysize = 2 * AES_MIN_KEY_SIZE,
3661
.max_keysize = 2 * AES_MAX_KEY_SIZE,
3662
.ivsize = AES_BLOCK_SIZE,
3663
},
3664
.cipher_info = {
3665
.alg = CIPHER_ALG_AES,
3666
.mode = CIPHER_MODE_XTS,
3667
},
3668
.auth_info = {
3669
.alg = HASH_ALG_NONE,
3670
.mode = HASH_MODE_NONE,
3671
},
3672
},
3673
3674
/* AHASH algorithms. */
3675
{
3676
.type = CRYPTO_ALG_TYPE_AHASH,
3677
.alg.hash = {
3678
.halg.digestsize = MD5_DIGEST_SIZE,
3679
.halg.base = {
3680
.cra_name = "md5",
3681
.cra_driver_name = "md5-iproc",
3682
.cra_blocksize = MD5_BLOCK_WORDS * 4,
3683
.cra_flags = CRYPTO_ALG_ASYNC |
3684
CRYPTO_ALG_ALLOCATES_MEMORY,
3685
}
3686
},
3687
.cipher_info = {
3688
.alg = CIPHER_ALG_NONE,
3689
.mode = CIPHER_MODE_NONE,
3690
},
3691
.auth_info = {
3692
.alg = HASH_ALG_MD5,
3693
.mode = HASH_MODE_HASH,
3694
},
3695
},
3696
{
3697
.type = CRYPTO_ALG_TYPE_AHASH,
3698
.alg.hash = {
3699
.halg.digestsize = MD5_DIGEST_SIZE,
3700
.halg.base = {
3701
.cra_name = "hmac(md5)",
3702
.cra_driver_name = "hmac-md5-iproc",
3703
.cra_blocksize = MD5_BLOCK_WORDS * 4,
3704
}
3705
},
3706
.cipher_info = {
3707
.alg = CIPHER_ALG_NONE,
3708
.mode = CIPHER_MODE_NONE,
3709
},
3710
.auth_info = {
3711
.alg = HASH_ALG_MD5,
3712
.mode = HASH_MODE_HMAC,
3713
},
3714
},
3715
{.type = CRYPTO_ALG_TYPE_AHASH,
3716
.alg.hash = {
3717
.halg.digestsize = SHA1_DIGEST_SIZE,
3718
.halg.base = {
3719
.cra_name = "sha1",
3720
.cra_driver_name = "sha1-iproc",
3721
.cra_blocksize = SHA1_BLOCK_SIZE,
3722
}
3723
},
3724
.cipher_info = {
3725
.alg = CIPHER_ALG_NONE,
3726
.mode = CIPHER_MODE_NONE,
3727
},
3728
.auth_info = {
3729
.alg = HASH_ALG_SHA1,
3730
.mode = HASH_MODE_HASH,
3731
},
3732
},
3733
{.type = CRYPTO_ALG_TYPE_AHASH,
3734
.alg.hash = {
3735
.halg.digestsize = SHA1_DIGEST_SIZE,
3736
.halg.base = {
3737
.cra_name = "hmac(sha1)",
3738
.cra_driver_name = "hmac-sha1-iproc",
3739
.cra_blocksize = SHA1_BLOCK_SIZE,
3740
}
3741
},
3742
.cipher_info = {
3743
.alg = CIPHER_ALG_NONE,
3744
.mode = CIPHER_MODE_NONE,
3745
},
3746
.auth_info = {
3747
.alg = HASH_ALG_SHA1,
3748
.mode = HASH_MODE_HMAC,
3749
},
3750
},
3751
{.type = CRYPTO_ALG_TYPE_AHASH,
3752
.alg.hash = {
3753
.halg.digestsize = SHA224_DIGEST_SIZE,
3754
.halg.base = {
3755
.cra_name = "sha224",
3756
.cra_driver_name = "sha224-iproc",
3757
.cra_blocksize = SHA224_BLOCK_SIZE,
3758
}
3759
},
3760
.cipher_info = {
3761
.alg = CIPHER_ALG_NONE,
3762
.mode = CIPHER_MODE_NONE,
3763
},
3764
.auth_info = {
3765
.alg = HASH_ALG_SHA224,
3766
.mode = HASH_MODE_HASH,
3767
},
3768
},
3769
{.type = CRYPTO_ALG_TYPE_AHASH,
3770
.alg.hash = {
3771
.halg.digestsize = SHA224_DIGEST_SIZE,
3772
.halg.base = {
3773
.cra_name = "hmac(sha224)",
3774
.cra_driver_name = "hmac-sha224-iproc",
3775
.cra_blocksize = SHA224_BLOCK_SIZE,
3776
}
3777
},
3778
.cipher_info = {
3779
.alg = CIPHER_ALG_NONE,
3780
.mode = CIPHER_MODE_NONE,
3781
},
3782
.auth_info = {
3783
.alg = HASH_ALG_SHA224,
3784
.mode = HASH_MODE_HMAC,
3785
},
3786
},
3787
{.type = CRYPTO_ALG_TYPE_AHASH,
3788
.alg.hash = {
3789
.halg.digestsize = SHA256_DIGEST_SIZE,
3790
.halg.base = {
3791
.cra_name = "sha256",
3792
.cra_driver_name = "sha256-iproc",
3793
.cra_blocksize = SHA256_BLOCK_SIZE,
3794
}
3795
},
3796
.cipher_info = {
3797
.alg = CIPHER_ALG_NONE,
3798
.mode = CIPHER_MODE_NONE,
3799
},
3800
.auth_info = {
3801
.alg = HASH_ALG_SHA256,
3802
.mode = HASH_MODE_HASH,
3803
},
3804
},
3805
{.type = CRYPTO_ALG_TYPE_AHASH,
3806
.alg.hash = {
3807
.halg.digestsize = SHA256_DIGEST_SIZE,
3808
.halg.base = {
3809
.cra_name = "hmac(sha256)",
3810
.cra_driver_name = "hmac-sha256-iproc",
3811
.cra_blocksize = SHA256_BLOCK_SIZE,
3812
}
3813
},
3814
.cipher_info = {
3815
.alg = CIPHER_ALG_NONE,
3816
.mode = CIPHER_MODE_NONE,
3817
},
3818
.auth_info = {
3819
.alg = HASH_ALG_SHA256,
3820
.mode = HASH_MODE_HMAC,
3821
},
3822
},
3823
{
3824
.type = CRYPTO_ALG_TYPE_AHASH,
3825
.alg.hash = {
3826
.halg.digestsize = SHA384_DIGEST_SIZE,
3827
.halg.base = {
3828
.cra_name = "sha384",
3829
.cra_driver_name = "sha384-iproc",
3830
.cra_blocksize = SHA384_BLOCK_SIZE,
3831
}
3832
},
3833
.cipher_info = {
3834
.alg = CIPHER_ALG_NONE,
3835
.mode = CIPHER_MODE_NONE,
3836
},
3837
.auth_info = {
3838
.alg = HASH_ALG_SHA384,
3839
.mode = HASH_MODE_HASH,
3840
},
3841
},
3842
{
3843
.type = CRYPTO_ALG_TYPE_AHASH,
3844
.alg.hash = {
3845
.halg.digestsize = SHA384_DIGEST_SIZE,
3846
.halg.base = {
3847
.cra_name = "hmac(sha384)",
3848
.cra_driver_name = "hmac-sha384-iproc",
3849
.cra_blocksize = SHA384_BLOCK_SIZE,
3850
}
3851
},
3852
.cipher_info = {
3853
.alg = CIPHER_ALG_NONE,
3854
.mode = CIPHER_MODE_NONE,
3855
},
3856
.auth_info = {
3857
.alg = HASH_ALG_SHA384,
3858
.mode = HASH_MODE_HMAC,
3859
},
3860
},
3861
{
3862
.type = CRYPTO_ALG_TYPE_AHASH,
3863
.alg.hash = {
3864
.halg.digestsize = SHA512_DIGEST_SIZE,
3865
.halg.base = {
3866
.cra_name = "sha512",
3867
.cra_driver_name = "sha512-iproc",
3868
.cra_blocksize = SHA512_BLOCK_SIZE,
3869
}
3870
},
3871
.cipher_info = {
3872
.alg = CIPHER_ALG_NONE,
3873
.mode = CIPHER_MODE_NONE,
3874
},
3875
.auth_info = {
3876
.alg = HASH_ALG_SHA512,
3877
.mode = HASH_MODE_HASH,
3878
},
3879
},
3880
{
3881
.type = CRYPTO_ALG_TYPE_AHASH,
3882
.alg.hash = {
3883
.halg.digestsize = SHA512_DIGEST_SIZE,
3884
.halg.base = {
3885
.cra_name = "hmac(sha512)",
3886
.cra_driver_name = "hmac-sha512-iproc",
3887
.cra_blocksize = SHA512_BLOCK_SIZE,
3888
}
3889
},
3890
.cipher_info = {
3891
.alg = CIPHER_ALG_NONE,
3892
.mode = CIPHER_MODE_NONE,
3893
},
3894
.auth_info = {
3895
.alg = HASH_ALG_SHA512,
3896
.mode = HASH_MODE_HMAC,
3897
},
3898
},
3899
{
3900
.type = CRYPTO_ALG_TYPE_AHASH,
3901
.alg.hash = {
3902
.halg.digestsize = SHA3_224_DIGEST_SIZE,
3903
.halg.base = {
3904
.cra_name = "sha3-224",
3905
.cra_driver_name = "sha3-224-iproc",
3906
.cra_blocksize = SHA3_224_BLOCK_SIZE,
3907
}
3908
},
3909
.cipher_info = {
3910
.alg = CIPHER_ALG_NONE,
3911
.mode = CIPHER_MODE_NONE,
3912
},
3913
.auth_info = {
3914
.alg = HASH_ALG_SHA3_224,
3915
.mode = HASH_MODE_HASH,
3916
},
3917
},
3918
{
3919
.type = CRYPTO_ALG_TYPE_AHASH,
3920
.alg.hash = {
3921
.halg.digestsize = SHA3_224_DIGEST_SIZE,
3922
.halg.base = {
3923
.cra_name = "hmac(sha3-224)",
3924
.cra_driver_name = "hmac-sha3-224-iproc",
3925
.cra_blocksize = SHA3_224_BLOCK_SIZE,
3926
}
3927
},
3928
.cipher_info = {
3929
.alg = CIPHER_ALG_NONE,
3930
.mode = CIPHER_MODE_NONE,
3931
},
3932
.auth_info = {
3933
.alg = HASH_ALG_SHA3_224,
3934
.mode = HASH_MODE_HMAC
3935
},
3936
},
3937
{
3938
.type = CRYPTO_ALG_TYPE_AHASH,
3939
.alg.hash = {
3940
.halg.digestsize = SHA3_256_DIGEST_SIZE,
3941
.halg.base = {
3942
.cra_name = "sha3-256",
3943
.cra_driver_name = "sha3-256-iproc",
3944
.cra_blocksize = SHA3_256_BLOCK_SIZE,
3945
}
3946
},
3947
.cipher_info = {
3948
.alg = CIPHER_ALG_NONE,
3949
.mode = CIPHER_MODE_NONE,
3950
},
3951
.auth_info = {
3952
.alg = HASH_ALG_SHA3_256,
3953
.mode = HASH_MODE_HASH,
3954
},
3955
},
3956
{
3957
.type = CRYPTO_ALG_TYPE_AHASH,
3958
.alg.hash = {
3959
.halg.digestsize = SHA3_256_DIGEST_SIZE,
3960
.halg.base = {
3961
.cra_name = "hmac(sha3-256)",
3962
.cra_driver_name = "hmac-sha3-256-iproc",
3963
.cra_blocksize = SHA3_256_BLOCK_SIZE,
3964
}
3965
},
3966
.cipher_info = {
3967
.alg = CIPHER_ALG_NONE,
3968
.mode = CIPHER_MODE_NONE,
3969
},
3970
.auth_info = {
3971
.alg = HASH_ALG_SHA3_256,
3972
.mode = HASH_MODE_HMAC,
3973
},
3974
},
3975
{
3976
.type = CRYPTO_ALG_TYPE_AHASH,
3977
.alg.hash = {
3978
.halg.digestsize = SHA3_384_DIGEST_SIZE,
3979
.halg.base = {
3980
.cra_name = "sha3-384",
3981
.cra_driver_name = "sha3-384-iproc",
3982
.cra_blocksize = SHA3_224_BLOCK_SIZE,
3983
}
3984
},
3985
.cipher_info = {
3986
.alg = CIPHER_ALG_NONE,
3987
.mode = CIPHER_MODE_NONE,
3988
},
3989
.auth_info = {
3990
.alg = HASH_ALG_SHA3_384,
3991
.mode = HASH_MODE_HASH,
3992
},
3993
},
3994
{
3995
.type = CRYPTO_ALG_TYPE_AHASH,
3996
.alg.hash = {
3997
.halg.digestsize = SHA3_384_DIGEST_SIZE,
3998
.halg.base = {
3999
.cra_name = "hmac(sha3-384)",
4000
.cra_driver_name = "hmac-sha3-384-iproc",
4001
.cra_blocksize = SHA3_384_BLOCK_SIZE,
4002
}
4003
},
4004
.cipher_info = {
4005
.alg = CIPHER_ALG_NONE,
4006
.mode = CIPHER_MODE_NONE,
4007
},
4008
.auth_info = {
4009
.alg = HASH_ALG_SHA3_384,
4010
.mode = HASH_MODE_HMAC,
4011
},
4012
},
4013
{
4014
.type = CRYPTO_ALG_TYPE_AHASH,
4015
.alg.hash = {
4016
.halg.digestsize = SHA3_512_DIGEST_SIZE,
4017
.halg.base = {
4018
.cra_name = "sha3-512",
4019
.cra_driver_name = "sha3-512-iproc",
4020
.cra_blocksize = SHA3_512_BLOCK_SIZE,
4021
}
4022
},
4023
.cipher_info = {
4024
.alg = CIPHER_ALG_NONE,
4025
.mode = CIPHER_MODE_NONE,
4026
},
4027
.auth_info = {
4028
.alg = HASH_ALG_SHA3_512,
4029
.mode = HASH_MODE_HASH,
4030
},
4031
},
4032
{
4033
.type = CRYPTO_ALG_TYPE_AHASH,
4034
.alg.hash = {
4035
.halg.digestsize = SHA3_512_DIGEST_SIZE,
4036
.halg.base = {
4037
.cra_name = "hmac(sha3-512)",
4038
.cra_driver_name = "hmac-sha3-512-iproc",
4039
.cra_blocksize = SHA3_512_BLOCK_SIZE,
4040
}
4041
},
4042
.cipher_info = {
4043
.alg = CIPHER_ALG_NONE,
4044
.mode = CIPHER_MODE_NONE,
4045
},
4046
.auth_info = {
4047
.alg = HASH_ALG_SHA3_512,
4048
.mode = HASH_MODE_HMAC,
4049
},
4050
},
4051
{
4052
.type = CRYPTO_ALG_TYPE_AHASH,
4053
.alg.hash = {
4054
.halg.digestsize = AES_BLOCK_SIZE,
4055
.halg.base = {
4056
.cra_name = "xcbc(aes)",
4057
.cra_driver_name = "xcbc-aes-iproc",
4058
.cra_blocksize = AES_BLOCK_SIZE,
4059
}
4060
},
4061
.cipher_info = {
4062
.alg = CIPHER_ALG_NONE,
4063
.mode = CIPHER_MODE_NONE,
4064
},
4065
.auth_info = {
4066
.alg = HASH_ALG_AES,
4067
.mode = HASH_MODE_XCBC,
4068
},
4069
},
4070
{
4071
.type = CRYPTO_ALG_TYPE_AHASH,
4072
.alg.hash = {
4073
.halg.digestsize = AES_BLOCK_SIZE,
4074
.halg.base = {
4075
.cra_name = "cmac(aes)",
4076
.cra_driver_name = "cmac-aes-iproc",
4077
.cra_blocksize = AES_BLOCK_SIZE,
4078
}
4079
},
4080
.cipher_info = {
4081
.alg = CIPHER_ALG_NONE,
4082
.mode = CIPHER_MODE_NONE,
4083
},
4084
.auth_info = {
4085
.alg = HASH_ALG_AES,
4086
.mode = HASH_MODE_CMAC,
4087
},
4088
},
4089
};
4090
4091
static int generic_cra_init(struct crypto_tfm *tfm,
4092
struct iproc_alg_s *cipher_alg)
4093
{
4094
struct spu_hw *spu = &iproc_priv.spu;
4095
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4096
unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4097
4098
flow_log("%s()\n", __func__);
4099
4100
ctx->alg = cipher_alg;
4101
ctx->cipher = cipher_alg->cipher_info;
4102
ctx->auth = cipher_alg->auth_info;
4103
ctx->auth_first = cipher_alg->auth_first;
4104
ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4105
ctx->cipher.mode,
4106
blocksize);
4107
ctx->fallback_cipher = NULL;
4108
4109
ctx->enckeylen = 0;
4110
ctx->authkeylen = 0;
4111
4112
atomic_inc(&iproc_priv.stream_count);
4113
atomic_inc(&iproc_priv.session_count);
4114
4115
return 0;
4116
}
4117
4118
static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4119
{
4120
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4121
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4122
struct iproc_alg_s *cipher_alg;
4123
4124
flow_log("%s()\n", __func__);
4125
4126
crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4127
4128
cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4129
return generic_cra_init(tfm, cipher_alg);
4130
}
4131
4132
static int ahash_cra_init(struct crypto_tfm *tfm)
4133
{
4134
int err;
4135
struct crypto_alg *alg = tfm->__crt_alg;
4136
struct iproc_alg_s *cipher_alg;
4137
4138
cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4139
alg.hash);
4140
4141
err = generic_cra_init(tfm, cipher_alg);
4142
flow_log("%s()\n", __func__);
4143
4144
/*
4145
* export state size has to be < 512 bytes. So don't include msg bufs
4146
* in state size.
4147
*/
4148
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4149
sizeof(struct iproc_reqctx_s));
4150
4151
return err;
4152
}
4153
4154
static int aead_cra_init(struct crypto_aead *aead)
4155
{
4156
unsigned int reqsize = sizeof(struct iproc_reqctx_s);
4157
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4158
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4159
struct crypto_alg *alg = tfm->__crt_alg;
4160
struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4161
struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4162
alg.aead);
4163
4164
int err = generic_cra_init(tfm, cipher_alg);
4165
4166
flow_log("%s()\n", __func__);
4167
4168
ctx->is_esp = false;
4169
ctx->salt_len = 0;
4170
ctx->salt_offset = 0;
4171
4172
/* random first IV */
4173
get_random_bytes(ctx->iv, MAX_IV_SIZE);
4174
flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4175
4176
if (err)
4177
goto out;
4178
4179
if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK))
4180
goto reqsize;
4181
4182
flow_log("%s() creating fallback cipher\n", __func__);
4183
4184
ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0,
4185
CRYPTO_ALG_ASYNC |
4186
CRYPTO_ALG_NEED_FALLBACK);
4187
if (IS_ERR(ctx->fallback_cipher)) {
4188
pr_err("%s() Error: failed to allocate fallback for %s\n",
4189
__func__, alg->cra_name);
4190
return PTR_ERR(ctx->fallback_cipher);
4191
}
4192
4193
reqsize += crypto_aead_reqsize(ctx->fallback_cipher);
4194
4195
reqsize:
4196
crypto_aead_set_reqsize(aead, reqsize);
4197
4198
out:
4199
return err;
4200
}
4201
4202
static void generic_cra_exit(struct crypto_tfm *tfm)
4203
{
4204
atomic_dec(&iproc_priv.session_count);
4205
}
4206
4207
static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4208
{
4209
generic_cra_exit(crypto_skcipher_tfm(tfm));
4210
}
4211
4212
static void aead_cra_exit(struct crypto_aead *aead)
4213
{
4214
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4215
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4216
4217
generic_cra_exit(tfm);
4218
4219
if (ctx->fallback_cipher) {
4220
crypto_free_aead(ctx->fallback_cipher);
4221
ctx->fallback_cipher = NULL;
4222
}
4223
}
4224
4225
/**
4226
* spu_functions_register() - Specify hardware-specific SPU functions based on
4227
* SPU type read from device tree.
4228
* @dev: device structure
4229
* @spu_type: SPU hardware generation
4230
* @spu_subtype: SPU hardware version
4231
*/
4232
static void spu_functions_register(struct device *dev,
4233
enum spu_spu_type spu_type,
4234
enum spu_spu_subtype spu_subtype)
4235
{
4236
struct spu_hw *spu = &iproc_priv.spu;
4237
4238
if (spu_type == SPU_TYPE_SPUM) {
4239
dev_dbg(dev, "Registering SPUM functions");
4240
spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4241
spu->spu_payload_length = spum_payload_length;
4242
spu->spu_response_hdr_len = spum_response_hdr_len;
4243
spu->spu_hash_pad_len = spum_hash_pad_len;
4244
spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4245
spu->spu_assoc_resp_len = spum_assoc_resp_len;
4246
spu->spu_aead_ivlen = spum_aead_ivlen;
4247
spu->spu_hash_type = spum_hash_type;
4248
spu->spu_digest_size = spum_digest_size;
4249
spu->spu_create_request = spum_create_request;
4250
spu->spu_cipher_req_init = spum_cipher_req_init;
4251
spu->spu_cipher_req_finish = spum_cipher_req_finish;
4252
spu->spu_request_pad = spum_request_pad;
4253
spu->spu_tx_status_len = spum_tx_status_len;
4254
spu->spu_rx_status_len = spum_rx_status_len;
4255
spu->spu_status_process = spum_status_process;
4256
spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4257
spu->spu_ccm_update_iv = spum_ccm_update_iv;
4258
spu->spu_wordalign_padlen = spum_wordalign_padlen;
4259
if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4260
spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4261
else
4262
spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4263
} else {
4264
dev_dbg(dev, "Registering SPU2 functions");
4265
spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4266
spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4267
spu->spu_payload_length = spu2_payload_length;
4268
spu->spu_response_hdr_len = spu2_response_hdr_len;
4269
spu->spu_hash_pad_len = spu2_hash_pad_len;
4270
spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4271
spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4272
spu->spu_aead_ivlen = spu2_aead_ivlen;
4273
spu->spu_hash_type = spu2_hash_type;
4274
spu->spu_digest_size = spu2_digest_size;
4275
spu->spu_create_request = spu2_create_request;
4276
spu->spu_cipher_req_init = spu2_cipher_req_init;
4277
spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4278
spu->spu_request_pad = spu2_request_pad;
4279
spu->spu_tx_status_len = spu2_tx_status_len;
4280
spu->spu_rx_status_len = spu2_rx_status_len;
4281
spu->spu_status_process = spu2_status_process;
4282
spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4283
spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4284
spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4285
}
4286
}
4287
4288
/**
4289
* spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4290
* channel for the SPU being probed.
4291
* @dev: SPU driver device structure
4292
*
4293
* Return: 0 if successful
4294
* < 0 otherwise
4295
*/
4296
static int spu_mb_init(struct device *dev)
4297
{
4298
struct mbox_client *mcl = &iproc_priv.mcl;
4299
int err, i;
4300
4301
iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4302
sizeof(struct mbox_chan *), GFP_KERNEL);
4303
if (!iproc_priv.mbox)
4304
return -ENOMEM;
4305
4306
mcl->dev = dev;
4307
mcl->tx_block = false;
4308
mcl->tx_tout = 0;
4309
mcl->knows_txdone = true;
4310
mcl->rx_callback = spu_rx_callback;
4311
mcl->tx_done = NULL;
4312
4313
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4314
iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4315
if (IS_ERR(iproc_priv.mbox[i])) {
4316
err = PTR_ERR(iproc_priv.mbox[i]);
4317
dev_err(dev,
4318
"Mbox channel %d request failed with err %d",
4319
i, err);
4320
iproc_priv.mbox[i] = NULL;
4321
goto free_channels;
4322
}
4323
}
4324
4325
return 0;
4326
free_channels:
4327
for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4328
if (iproc_priv.mbox[i])
4329
mbox_free_channel(iproc_priv.mbox[i]);
4330
}
4331
4332
return err;
4333
}
4334
4335
static void spu_mb_release(struct platform_device *pdev)
4336
{
4337
int i;
4338
4339
for (i = 0; i < iproc_priv.spu.num_chan; i++)
4340
mbox_free_channel(iproc_priv.mbox[i]);
4341
}
4342
4343
static void spu_counters_init(void)
4344
{
4345
int i;
4346
int j;
4347
4348
atomic_set(&iproc_priv.session_count, 0);
4349
atomic_set(&iproc_priv.stream_count, 0);
4350
atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4351
atomic64_set(&iproc_priv.bytes_in, 0);
4352
atomic64_set(&iproc_priv.bytes_out, 0);
4353
for (i = 0; i < SPU_OP_NUM; i++) {
4354
atomic_set(&iproc_priv.op_counts[i], 0);
4355
atomic_set(&iproc_priv.setkey_cnt[i], 0);
4356
}
4357
for (i = 0; i < CIPHER_ALG_LAST; i++)
4358
for (j = 0; j < CIPHER_MODE_LAST; j++)
4359
atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4360
4361
for (i = 0; i < HASH_ALG_LAST; i++) {
4362
atomic_set(&iproc_priv.hash_cnt[i], 0);
4363
atomic_set(&iproc_priv.hmac_cnt[i], 0);
4364
}
4365
for (i = 0; i < AEAD_TYPE_LAST; i++)
4366
atomic_set(&iproc_priv.aead_cnt[i], 0);
4367
4368
atomic_set(&iproc_priv.mb_no_spc, 0);
4369
atomic_set(&iproc_priv.mb_send_fail, 0);
4370
atomic_set(&iproc_priv.bad_icv, 0);
4371
}
4372
4373
static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4374
{
4375
struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4376
int err;
4377
4378
crypto->base.cra_module = THIS_MODULE;
4379
crypto->base.cra_priority = cipher_pri;
4380
crypto->base.cra_alignmask = 0;
4381
crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4382
crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4383
CRYPTO_ALG_ALLOCATES_MEMORY |
4384
CRYPTO_ALG_KERN_DRIVER_ONLY;
4385
4386
crypto->init = skcipher_init_tfm;
4387
crypto->exit = skcipher_exit_tfm;
4388
crypto->setkey = skcipher_setkey;
4389
crypto->encrypt = skcipher_encrypt;
4390
crypto->decrypt = skcipher_decrypt;
4391
4392
err = crypto_register_skcipher(crypto);
4393
/* Mark alg as having been registered, if successful */
4394
if (err == 0)
4395
driver_alg->registered = true;
4396
pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
4397
return err;
4398
}
4399
4400
static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4401
{
4402
struct spu_hw *spu = &iproc_priv.spu;
4403
struct ahash_alg *hash = &driver_alg->alg.hash;
4404
int err;
4405
4406
/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4407
if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4408
(driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4409
(spu->spu_type == SPU_TYPE_SPUM))
4410
return 0;
4411
4412
/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4413
if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4414
(spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4415
return 0;
4416
4417
hash->halg.base.cra_module = THIS_MODULE;
4418
hash->halg.base.cra_priority = hash_pri;
4419
hash->halg.base.cra_alignmask = 0;
4420
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4421
hash->halg.base.cra_init = ahash_cra_init;
4422
hash->halg.base.cra_exit = generic_cra_exit;
4423
hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4424
CRYPTO_ALG_ALLOCATES_MEMORY;
4425
hash->halg.statesize = sizeof(struct spu_hash_export_s);
4426
4427
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4428
hash->init = ahash_init;
4429
hash->update = ahash_update;
4430
hash->final = ahash_final;
4431
hash->finup = ahash_finup;
4432
hash->digest = ahash_digest;
4433
if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4434
((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4435
(driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4436
hash->setkey = ahash_setkey;
4437
}
4438
} else {
4439
hash->setkey = ahash_hmac_setkey;
4440
hash->init = ahash_hmac_init;
4441
hash->update = ahash_hmac_update;
4442
hash->final = ahash_hmac_final;
4443
hash->finup = ahash_hmac_finup;
4444
hash->digest = ahash_hmac_digest;
4445
}
4446
hash->export = ahash_export;
4447
hash->import = ahash_import;
4448
4449
err = crypto_register_ahash(hash);
4450
/* Mark alg as having been registered, if successful */
4451
if (err == 0)
4452
driver_alg->registered = true;
4453
pr_debug(" registered ahash %s\n",
4454
hash->halg.base.cra_driver_name);
4455
return err;
4456
}
4457
4458
static int spu_register_aead(struct iproc_alg_s *driver_alg)
4459
{
4460
struct aead_alg *aead = &driver_alg->alg.aead;
4461
int err;
4462
4463
aead->base.cra_module = THIS_MODULE;
4464
aead->base.cra_priority = aead_pri;
4465
aead->base.cra_alignmask = 0;
4466
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4467
4468
aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4469
/* setkey set in alg initialization */
4470
aead->setauthsize = aead_setauthsize;
4471
aead->encrypt = aead_encrypt;
4472
aead->decrypt = aead_decrypt;
4473
aead->init = aead_cra_init;
4474
aead->exit = aead_cra_exit;
4475
4476
err = crypto_register_aead(aead);
4477
/* Mark alg as having been registered, if successful */
4478
if (err == 0)
4479
driver_alg->registered = true;
4480
pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4481
return err;
4482
}
4483
4484
/* register crypto algorithms the device supports */
4485
static int spu_algs_register(struct device *dev)
4486
{
4487
int i, j;
4488
int err;
4489
4490
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4491
switch (driver_algs[i].type) {
4492
case CRYPTO_ALG_TYPE_SKCIPHER:
4493
err = spu_register_skcipher(&driver_algs[i]);
4494
break;
4495
case CRYPTO_ALG_TYPE_AHASH:
4496
err = spu_register_ahash(&driver_algs[i]);
4497
break;
4498
case CRYPTO_ALG_TYPE_AEAD:
4499
err = spu_register_aead(&driver_algs[i]);
4500
break;
4501
default:
4502
dev_err(dev,
4503
"iproc-crypto: unknown alg type: %d",
4504
driver_algs[i].type);
4505
err = -EINVAL;
4506
}
4507
4508
if (err) {
4509
dev_err(dev, "alg registration failed with error %d\n",
4510
err);
4511
goto err_algs;
4512
}
4513
}
4514
4515
return 0;
4516
4517
err_algs:
4518
for (j = 0; j < i; j++) {
4519
/* Skip any algorithm not registered */
4520
if (!driver_algs[j].registered)
4521
continue;
4522
switch (driver_algs[j].type) {
4523
case CRYPTO_ALG_TYPE_SKCIPHER:
4524
crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4525
driver_algs[j].registered = false;
4526
break;
4527
case CRYPTO_ALG_TYPE_AHASH:
4528
crypto_unregister_ahash(&driver_algs[j].alg.hash);
4529
driver_algs[j].registered = false;
4530
break;
4531
case CRYPTO_ALG_TYPE_AEAD:
4532
crypto_unregister_aead(&driver_algs[j].alg.aead);
4533
driver_algs[j].registered = false;
4534
break;
4535
}
4536
}
4537
return err;
4538
}
4539
4540
/* ==================== Kernel Platform API ==================== */
4541
4542
static struct spu_type_subtype spum_ns2_types = {
4543
SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4544
};
4545
4546
static struct spu_type_subtype spum_nsp_types = {
4547
SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4548
};
4549
4550
static struct spu_type_subtype spu2_types = {
4551
SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4552
};
4553
4554
static struct spu_type_subtype spu2_v2_types = {
4555
SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4556
};
4557
4558
static const struct of_device_id bcm_spu_dt_ids[] = {
4559
{
4560
.compatible = "brcm,spum-crypto",
4561
.data = &spum_ns2_types,
4562
},
4563
{
4564
.compatible = "brcm,spum-nsp-crypto",
4565
.data = &spum_nsp_types,
4566
},
4567
{
4568
.compatible = "brcm,spu2-crypto",
4569
.data = &spu2_types,
4570
},
4571
{
4572
.compatible = "brcm,spu2-v2-crypto",
4573
.data = &spu2_v2_types,
4574
},
4575
{ /* sentinel */ }
4576
};
4577
4578
MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4579
4580
static int spu_dt_read(struct platform_device *pdev)
4581
{
4582
struct device *dev = &pdev->dev;
4583
struct spu_hw *spu = &iproc_priv.spu;
4584
struct resource *spu_ctrl_regs;
4585
const struct spu_type_subtype *matched_spu_type;
4586
struct device_node *dn = pdev->dev.of_node;
4587
int err, i;
4588
4589
/* Count number of mailbox channels */
4590
spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4591
4592
matched_spu_type = of_device_get_match_data(dev);
4593
if (!matched_spu_type) {
4594
dev_err(dev, "Failed to match device\n");
4595
return -ENODEV;
4596
}
4597
4598
spu->spu_type = matched_spu_type->type;
4599
spu->spu_subtype = matched_spu_type->subtype;
4600
4601
for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4602
platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4603
4604
spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4605
if (IS_ERR(spu->reg_vbase[i])) {
4606
err = PTR_ERR(spu->reg_vbase[i]);
4607
dev_err(dev, "Failed to map registers: %d\n",
4608
err);
4609
spu->reg_vbase[i] = NULL;
4610
return err;
4611
}
4612
}
4613
spu->num_spu = i;
4614
dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4615
4616
return 0;
4617
}
4618
4619
static int bcm_spu_probe(struct platform_device *pdev)
4620
{
4621
struct device *dev = &pdev->dev;
4622
struct spu_hw *spu = &iproc_priv.spu;
4623
int err;
4624
4625
iproc_priv.pdev = pdev;
4626
platform_set_drvdata(iproc_priv.pdev,
4627
&iproc_priv);
4628
4629
err = spu_dt_read(pdev);
4630
if (err < 0)
4631
goto failure;
4632
4633
err = spu_mb_init(dev);
4634
if (err < 0)
4635
goto failure;
4636
4637
if (spu->spu_type == SPU_TYPE_SPUM)
4638
iproc_priv.bcm_hdr_len = 8;
4639
else if (spu->spu_type == SPU_TYPE_SPU2)
4640
iproc_priv.bcm_hdr_len = 0;
4641
4642
spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4643
4644
spu_counters_init();
4645
4646
spu_setup_debugfs();
4647
4648
err = spu_algs_register(dev);
4649
if (err < 0)
4650
goto fail_reg;
4651
4652
return 0;
4653
4654
fail_reg:
4655
spu_free_debugfs();
4656
failure:
4657
spu_mb_release(pdev);
4658
dev_err(dev, "%s failed with error %d.\n", __func__, err);
4659
4660
return err;
4661
}
4662
4663
static void bcm_spu_remove(struct platform_device *pdev)
4664
{
4665
int i;
4666
struct device *dev = &pdev->dev;
4667
char *cdn;
4668
4669
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4670
/*
4671
* Not all algorithms were registered, depending on whether
4672
* hardware is SPU or SPU2. So here we make sure to skip
4673
* those algorithms that were not previously registered.
4674
*/
4675
if (!driver_algs[i].registered)
4676
continue;
4677
4678
switch (driver_algs[i].type) {
4679
case CRYPTO_ALG_TYPE_SKCIPHER:
4680
crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4681
dev_dbg(dev, " unregistered cipher %s\n",
4682
driver_algs[i].alg.skcipher.base.cra_driver_name);
4683
driver_algs[i].registered = false;
4684
break;
4685
case CRYPTO_ALG_TYPE_AHASH:
4686
crypto_unregister_ahash(&driver_algs[i].alg.hash);
4687
cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4688
dev_dbg(dev, " unregistered hash %s\n", cdn);
4689
driver_algs[i].registered = false;
4690
break;
4691
case CRYPTO_ALG_TYPE_AEAD:
4692
crypto_unregister_aead(&driver_algs[i].alg.aead);
4693
dev_dbg(dev, " unregistered aead %s\n",
4694
driver_algs[i].alg.aead.base.cra_driver_name);
4695
driver_algs[i].registered = false;
4696
break;
4697
}
4698
}
4699
spu_free_debugfs();
4700
spu_mb_release(pdev);
4701
}
4702
4703
/* ===== Kernel Module API ===== */
4704
4705
static struct platform_driver bcm_spu_pdriver = {
4706
.driver = {
4707
.name = "brcm-spu-crypto",
4708
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
4709
},
4710
.probe = bcm_spu_probe,
4711
.remove = bcm_spu_remove,
4712
};
4713
module_platform_driver(bcm_spu_pdriver);
4714
4715
MODULE_AUTHOR("Rob Rice <[email protected]>");
4716
MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4717
MODULE_LICENSE("GPL v2");
4718
4719