Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/cesa/cesa.c
39507 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (C) 2009-2011 Semihalf.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
/*
30
* CESA SRAM Memory Map:
31
*
32
* +------------------------+ <= sc->sc_sram_base_va + CESA_SRAM_SIZE
33
* | |
34
* | DATA |
35
* | |
36
* +------------------------+ <= sc->sc_sram_base_va + CESA_DATA(0)
37
* | struct cesa_sa_data |
38
* +------------------------+
39
* | struct cesa_sa_hdesc |
40
* +------------------------+ <= sc->sc_sram_base_va
41
*/
42
43
#include <sys/param.h>
44
#include <sys/systm.h>
45
#include <sys/bus.h>
46
#include <sys/endian.h>
47
#include <sys/kernel.h>
48
#include <sys/lock.h>
49
#include <sys/mbuf.h>
50
#include <sys/module.h>
51
#include <sys/mutex.h>
52
#include <sys/rman.h>
53
54
#include <machine/bus.h>
55
#include <machine/intr.h>
56
#include <machine/resource.h>
57
#include <machine/fdt.h>
58
59
#include <dev/fdt/simplebus.h>
60
#include <dev/fdt/fdt_common.h>
61
#include <dev/ofw/ofw_bus.h>
62
#include <dev/ofw/ofw_bus_subr.h>
63
64
#include <crypto/sha1.h>
65
#include <crypto/sha2/sha256.h>
66
#include <crypto/rijndael/rijndael.h>
67
#include <opencrypto/cryptodev.h>
68
#include <opencrypto/xform.h>
69
#include "cryptodev_if.h"
70
71
#include <arm/mv/mvreg.h>
72
#include <arm/mv/mvvar.h>
73
#include "cesa.h"
74
75
static int cesa_probe(device_t);
76
static int cesa_attach(device_t);
77
static int cesa_attach_late(device_t);
78
static int cesa_detach(device_t);
79
static void cesa_intr(void *);
80
static int cesa_probesession(device_t,
81
const struct crypto_session_params *);
82
static int cesa_newsession(device_t, crypto_session_t,
83
const struct crypto_session_params *);
84
static int cesa_process(device_t, struct cryptop *, int);
85
86
static struct resource_spec cesa_res_spec[] = {
87
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
88
{ SYS_RES_MEMORY, 1, RF_ACTIVE },
89
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
90
{ -1, 0 }
91
};
92
93
static device_method_t cesa_methods[] = {
94
/* Device interface */
95
DEVMETHOD(device_probe, cesa_probe),
96
DEVMETHOD(device_attach, cesa_attach),
97
DEVMETHOD(device_detach, cesa_detach),
98
99
/* Crypto device methods */
100
DEVMETHOD(cryptodev_probesession, cesa_probesession),
101
DEVMETHOD(cryptodev_newsession, cesa_newsession),
102
DEVMETHOD(cryptodev_process, cesa_process),
103
104
DEVMETHOD_END
105
};
106
107
static driver_t cesa_driver = {
108
"cesa",
109
cesa_methods,
110
sizeof (struct cesa_softc)
111
};
112
113
DRIVER_MODULE(cesa, simplebus, cesa_driver, 0, 0);
114
MODULE_DEPEND(cesa, crypto, 1, 1, 1);
115
116
static void
117
cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd)
118
{
119
#ifdef DEBUG
120
device_t dev;
121
122
dev = sc->sc_dev;
123
device_printf(dev, "CESA SA Hardware Descriptor:\n");
124
device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config);
125
device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src);
126
device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst);
127
device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen);
128
device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key);
129
device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv);
130
device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf);
131
device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src);
132
device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst);
133
device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen);
134
device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen);
135
device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in);
136
device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out);
137
#endif
138
}
139
140
static void
141
cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
142
{
143
struct cesa_dma_mem *cdm;
144
145
if (error)
146
return;
147
148
KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1."));
149
cdm = arg;
150
cdm->cdm_paddr = segs->ds_addr;
151
}
152
153
static int
154
cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm,
155
bus_size_t size)
156
{
157
int error;
158
159
KASSERT(cdm->cdm_vaddr == NULL,
160
("%s(): DMA memory descriptor in use.", __func__));
161
162
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
163
PAGE_SIZE, 0, /* alignment, boundary */
164
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
165
BUS_SPACE_MAXADDR, /* highaddr */
166
NULL, NULL, /* filtfunc, filtfuncarg */
167
size, 1, /* maxsize, nsegments */
168
size, 0, /* maxsegsz, flags */
169
NULL, NULL, /* lockfunc, lockfuncarg */
170
&cdm->cdm_tag); /* dmat */
171
if (error) {
172
device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
173
" %i!\n", error);
174
175
goto err1;
176
}
177
178
error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr,
179
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map);
180
if (error) {
181
device_printf(sc->sc_dev, "failed to allocate DMA safe"
182
" memory, error %i!\n", error);
183
184
goto err2;
185
}
186
187
error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr,
188
size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT);
189
if (error) {
190
device_printf(sc->sc_dev, "cannot get address of the DMA"
191
" memory, error %i\n", error);
192
193
goto err3;
194
}
195
196
return (0);
197
err3:
198
bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
199
err2:
200
bus_dma_tag_destroy(cdm->cdm_tag);
201
err1:
202
cdm->cdm_vaddr = NULL;
203
return (error);
204
}
205
206
static void
207
cesa_free_dma_mem(struct cesa_dma_mem *cdm)
208
{
209
210
bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map);
211
bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map);
212
bus_dma_tag_destroy(cdm->cdm_tag);
213
cdm->cdm_vaddr = NULL;
214
}
215
216
static void
217
cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op)
218
{
219
220
/* Sync only if dma memory is valid */
221
if (cdm->cdm_vaddr != NULL)
222
bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op);
223
}
224
225
static void
226
cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op)
227
{
228
229
cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op);
230
cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op);
231
cesa_sync_dma_mem(&sc->sc_requests_cdm, op);
232
}
233
234
static struct cesa_request *
235
cesa_alloc_request(struct cesa_softc *sc)
236
{
237
struct cesa_request *cr;
238
239
CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests);
240
if (!cr)
241
return (NULL);
242
243
STAILQ_INIT(&cr->cr_tdesc);
244
STAILQ_INIT(&cr->cr_sdesc);
245
246
return (cr);
247
}
248
249
static void
250
cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr)
251
{
252
253
/* Free TDMA descriptors assigned to this request */
254
CESA_LOCK(sc, tdesc);
255
STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc);
256
CESA_UNLOCK(sc, tdesc);
257
258
/* Free SA descriptors assigned to this request */
259
CESA_LOCK(sc, sdesc);
260
STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc);
261
CESA_UNLOCK(sc, sdesc);
262
263
/* Unload DMA memory associated with request */
264
if (cr->cr_dmap_loaded) {
265
bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap);
266
cr->cr_dmap_loaded = 0;
267
}
268
269
CESA_GENERIC_FREE_LOCKED(sc, cr, requests);
270
}
271
272
static void
273
cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr)
274
{
275
276
CESA_LOCK(sc, requests);
277
STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq);
278
CESA_UNLOCK(sc, requests);
279
}
280
281
static struct cesa_tdma_desc *
282
cesa_alloc_tdesc(struct cesa_softc *sc)
283
{
284
struct cesa_tdma_desc *ctd;
285
286
CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc);
287
288
if (!ctd)
289
device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. "
290
"Consider increasing CESA_TDMA_DESCRIPTORS.\n");
291
292
return (ctd);
293
}
294
295
static struct cesa_sa_desc *
296
cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr)
297
{
298
struct cesa_sa_desc *csd;
299
300
CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc);
301
if (!csd) {
302
device_printf(sc->sc_dev, "SA descriptors pool exhaused. "
303
"Consider increasing CESA_SA_DESCRIPTORS.\n");
304
return (NULL);
305
}
306
307
STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq);
308
309
/* Fill-in SA descriptor with default values */
310
csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key);
311
csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv);
312
csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv);
313
csd->csd_cshd->cshd_enc_src = 0;
314
csd->csd_cshd->cshd_enc_dst = 0;
315
csd->csd_cshd->cshd_enc_dlen = 0;
316
csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash);
317
csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in);
318
csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out);
319
csd->csd_cshd->cshd_mac_src = 0;
320
csd->csd_cshd->cshd_mac_dlen = 0;
321
322
return (csd);
323
}
324
325
static struct cesa_tdma_desc *
326
cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src,
327
bus_size_t size)
328
{
329
struct cesa_tdma_desc *ctd;
330
331
ctd = cesa_alloc_tdesc(sc);
332
if (!ctd)
333
return (NULL);
334
335
ctd->ctd_cthd->cthd_dst = dst;
336
ctd->ctd_cthd->cthd_src = src;
337
ctd->ctd_cthd->cthd_byte_count = size;
338
339
/* Handle special control packet */
340
if (size != 0)
341
ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED;
342
else
343
ctd->ctd_cthd->cthd_flags = 0;
344
345
return (ctd);
346
}
347
348
static struct cesa_tdma_desc *
349
cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
350
{
351
352
return (cesa_tdma_copy(sc, sc->sc_sram_base_pa +
353
sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr,
354
sizeof(struct cesa_sa_data)));
355
}
356
357
static struct cesa_tdma_desc *
358
cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr)
359
{
360
361
return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base_pa +
362
sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data)));
363
}
364
365
static struct cesa_tdma_desc *
366
cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd)
367
{
368
369
return (cesa_tdma_copy(sc, sc->sc_sram_base_pa, csd->csd_cshd_paddr,
370
sizeof(struct cesa_sa_hdesc)));
371
}
372
373
static void
374
cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd)
375
{
376
struct cesa_tdma_desc *ctd_prev;
377
378
if (!STAILQ_EMPTY(&cr->cr_tdesc)) {
379
ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq);
380
ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr;
381
}
382
383
ctd->ctd_cthd->cthd_next = 0;
384
STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq);
385
}
386
387
static int
388
cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr,
389
struct cesa_packet *cp, struct cesa_sa_desc *csd)
390
{
391
struct cesa_tdma_desc *ctd, *tmp;
392
393
/* Copy SA descriptor for this packet */
394
ctd = cesa_tdma_copy_sdesc(sc, csd);
395
if (!ctd)
396
return (ENOMEM);
397
398
cesa_append_tdesc(cr, ctd);
399
400
/* Copy data to be processed */
401
STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp)
402
cesa_append_tdesc(cr, ctd);
403
STAILQ_INIT(&cp->cp_copyin);
404
405
/* Insert control descriptor */
406
ctd = cesa_tdma_copy(sc, 0, 0, 0);
407
if (!ctd)
408
return (ENOMEM);
409
410
cesa_append_tdesc(cr, ctd);
411
412
/* Copy back results */
413
STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp)
414
cesa_append_tdesc(cr, ctd);
415
STAILQ_INIT(&cp->cp_copyout);
416
417
return (0);
418
}
419
420
static void
421
cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
422
{
423
union authctx auth_ctx;
424
uint32_t *hout;
425
uint32_t *hin;
426
int i;
427
428
hin = (uint32_t *)cs->cs_hiv_in;
429
hout = (uint32_t *)cs->cs_hiv_out;
430
431
switch (alg) {
432
case CRYPTO_SHA1_HMAC:
433
hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
434
memcpy(hin, auth_ctx.sha1ctx.h.b32,
435
sizeof(auth_ctx.sha1ctx.h.b32));
436
hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
437
memcpy(hout, auth_ctx.sha1ctx.h.b32,
438
sizeof(auth_ctx.sha1ctx.h.b32));
439
break;
440
case CRYPTO_SHA2_256_HMAC:
441
hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen,
442
&auth_ctx);
443
memcpy(hin, auth_ctx.sha256ctx.state,
444
sizeof(auth_ctx.sha256ctx.state));
445
hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen,
446
&auth_ctx);
447
memcpy(hout, auth_ctx.sha256ctx.state,
448
sizeof(auth_ctx.sha256ctx.state));
449
break;
450
default:
451
panic("shouldn't get here");
452
}
453
454
for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
455
hin[i] = htobe32(hin[i]);
456
hout[i] = htobe32(hout[i]);
457
}
458
explicit_bzero(&auth_ctx, sizeof(auth_ctx));
459
}
460
461
static int
462
cesa_prep_aes_key(struct cesa_session *cs,
463
const struct crypto_session_params *csp)
464
{
465
uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
466
uint32_t *dkey;
467
int i;
468
469
rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8);
470
471
cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
472
dkey = (uint32_t *)cs->cs_aes_dkey;
473
474
switch (csp->csp_cipher_klen) {
475
case 16:
476
cs->cs_config |= CESA_CSH_AES_KLEN_128;
477
for (i = 0; i < 4; i++)
478
*dkey++ = htobe32(ek[4 * 10 + i]);
479
break;
480
case 24:
481
cs->cs_config |= CESA_CSH_AES_KLEN_192;
482
for (i = 0; i < 4; i++)
483
*dkey++ = htobe32(ek[4 * 12 + i]);
484
for (i = 0; i < 2; i++)
485
*dkey++ = htobe32(ek[4 * 11 + 2 + i]);
486
break;
487
case 32:
488
cs->cs_config |= CESA_CSH_AES_KLEN_256;
489
for (i = 0; i < 4; i++)
490
*dkey++ = htobe32(ek[4 * 14 + i]);
491
for (i = 0; i < 4; i++)
492
*dkey++ = htobe32(ek[4 * 13 + i]);
493
break;
494
default:
495
return (EINVAL);
496
}
497
498
return (0);
499
}
500
501
static void
502
cesa_start_packet(struct cesa_packet *cp, unsigned int size)
503
{
504
505
cp->cp_size = size;
506
cp->cp_offset = 0;
507
STAILQ_INIT(&cp->cp_copyin);
508
STAILQ_INIT(&cp->cp_copyout);
509
}
510
511
static int
512
cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp,
513
bus_dma_segment_t *seg)
514
{
515
struct cesa_tdma_desc *ctd;
516
unsigned int bsize;
517
518
/* Calculate size of block copy */
519
bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset);
520
521
if (bsize > 0) {
522
ctd = cesa_tdma_copy(sc, sc->sc_sram_base_pa +
523
CESA_DATA(cp->cp_offset), seg->ds_addr, bsize);
524
if (!ctd)
525
return (-ENOMEM);
526
527
STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq);
528
529
ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base_pa +
530
CESA_DATA(cp->cp_offset), bsize);
531
if (!ctd)
532
return (-ENOMEM);
533
534
STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq);
535
536
seg->ds_len -= bsize;
537
seg->ds_addr += bsize;
538
cp->cp_offset += bsize;
539
}
540
541
return (bsize);
542
}
543
544
static void
545
cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
546
{
547
unsigned int mpsize, fragmented;
548
unsigned int mlen, mskip, tmlen;
549
struct cesa_chain_info *cci;
550
unsigned int elen, eskip;
551
unsigned int skip, len;
552
struct cesa_sa_desc *csd;
553
struct cesa_request *cr;
554
struct cryptop *crp;
555
struct cesa_softc *sc;
556
struct cesa_packet cp;
557
bus_dma_segment_t seg;
558
uint32_t config;
559
int size;
560
561
cci = arg;
562
sc = cci->cci_sc;
563
cr = cci->cci_cr;
564
crp = cr->cr_crp;
565
566
if (error) {
567
cci->cci_error = error;
568
return;
569
}
570
571
/*
572
* Only do a combined op if the AAD is adjacent to the payload
573
* and the AAD length is a multiple of the IV length. The
574
* checks against 'config' are to avoid recursing when the
575
* logic below invokes separate operations.
576
*/
577
config = cci->cci_config;
578
if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC ||
579
(config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) &&
580
crp->crp_aad_length != 0 &&
581
(crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) {
582
/*
583
* Data alignment in the request does not meet CESA requiremnts
584
* for combined encryption/decryption and hashing. We have to
585
* split the request to separate operations and process them
586
* one by one.
587
*/
588
if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
589
config &= ~CESA_CSHD_OP_MASK;
590
591
cci->cci_config = config | CESA_CSHD_MAC;
592
cesa_create_chain_cb(cci, segs, nseg, 0);
593
594
cci->cci_config = config | CESA_CSHD_ENC;
595
cesa_create_chain_cb(cci, segs, nseg, 0);
596
} else {
597
config &= ~CESA_CSHD_OP_MASK;
598
599
cci->cci_config = config | CESA_CSHD_ENC;
600
cesa_create_chain_cb(cci, segs, nseg, 0);
601
602
cci->cci_config = config | CESA_CSHD_MAC;
603
cesa_create_chain_cb(cci, segs, nseg, 0);
604
}
605
606
return;
607
}
608
609
mskip = mlen = eskip = elen = 0;
610
611
if (crp->crp_aad_length == 0) {
612
skip = crp->crp_payload_start;
613
len = crp->crp_payload_length;
614
switch (config & CESA_CSHD_OP_MASK) {
615
case CESA_CSHD_ENC:
616
eskip = skip;
617
elen = len;
618
break;
619
case CESA_CSHD_MAC:
620
mskip = skip;
621
mlen = len;
622
break;
623
default:
624
eskip = skip;
625
elen = len;
626
mskip = skip;
627
mlen = len;
628
break;
629
}
630
} else {
631
/*
632
* For an encryption-only separate request, only
633
* process the payload. For combined requests and
634
* hash-only requests, process the entire region.
635
*/
636
switch (config & CESA_CSHD_OP_MASK) {
637
case CESA_CSHD_ENC:
638
skip = crp->crp_payload_start;
639
len = crp->crp_payload_length;
640
eskip = skip;
641
elen = len;
642
break;
643
case CESA_CSHD_MAC:
644
skip = crp->crp_aad_start;
645
len = crp->crp_aad_length + crp->crp_payload_length;
646
mskip = skip;
647
mlen = len;
648
break;
649
default:
650
skip = crp->crp_aad_start;
651
len = crp->crp_aad_length + crp->crp_payload_length;
652
mskip = skip;
653
mlen = len;
654
eskip = crp->crp_payload_start;
655
elen = crp->crp_payload_length;
656
break;
657
}
658
}
659
660
tmlen = mlen;
661
fragmented = 0;
662
mpsize = CESA_MAX_PACKET_SIZE;
663
mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
664
665
/* Start first packet in chain */
666
cesa_start_packet(&cp, MIN(mpsize, len));
667
668
while (nseg-- && len > 0) {
669
seg = *(segs++);
670
671
/*
672
* Skip data in buffer on which neither ENC nor MAC operation
673
* is requested.
674
*/
675
if (skip > 0) {
676
size = MIN(skip, seg.ds_len);
677
skip -= size;
678
679
seg.ds_addr += size;
680
seg.ds_len -= size;
681
682
if (eskip > 0)
683
eskip -= size;
684
685
if (mskip > 0)
686
mskip -= size;
687
688
if (seg.ds_len == 0)
689
continue;
690
}
691
692
while (1) {
693
/*
694
* Fill in current packet with data. Break if there is
695
* no more data in current DMA segment or an error
696
* occurred.
697
*/
698
size = cesa_fill_packet(sc, &cp, &seg);
699
if (size <= 0) {
700
error = -size;
701
break;
702
}
703
704
len -= size;
705
706
/* If packet is full, append it to the chain */
707
if (cp.cp_size == cp.cp_offset) {
708
csd = cesa_alloc_sdesc(sc, cr);
709
if (!csd) {
710
error = ENOMEM;
711
break;
712
}
713
714
/* Create SA descriptor for this packet */
715
csd->csd_cshd->cshd_config = cci->cci_config;
716
csd->csd_cshd->cshd_mac_total_dlen = tmlen;
717
718
/*
719
* Enable fragmentation if request will not fit
720
* into one packet.
721
*/
722
if (len > 0) {
723
if (!fragmented) {
724
fragmented = 1;
725
csd->csd_cshd->cshd_config |=
726
CESA_CSHD_FRAG_FIRST;
727
} else
728
csd->csd_cshd->cshd_config |=
729
CESA_CSHD_FRAG_MIDDLE;
730
} else if (fragmented)
731
csd->csd_cshd->cshd_config |=
732
CESA_CSHD_FRAG_LAST;
733
734
if (eskip < cp.cp_size && elen > 0) {
735
csd->csd_cshd->cshd_enc_src =
736
CESA_DATA(eskip);
737
csd->csd_cshd->cshd_enc_dst =
738
CESA_DATA(eskip);
739
csd->csd_cshd->cshd_enc_dlen =
740
MIN(elen, cp.cp_size - eskip);
741
}
742
743
if (mskip < cp.cp_size && mlen > 0) {
744
csd->csd_cshd->cshd_mac_src =
745
CESA_DATA(mskip);
746
csd->csd_cshd->cshd_mac_dlen =
747
MIN(mlen, cp.cp_size - mskip);
748
}
749
750
elen -= csd->csd_cshd->cshd_enc_dlen;
751
eskip -= MIN(eskip, cp.cp_size);
752
mlen -= csd->csd_cshd->cshd_mac_dlen;
753
mskip -= MIN(mskip, cp.cp_size);
754
755
cesa_dump_cshd(sc, csd->csd_cshd);
756
757
/* Append packet to the request */
758
error = cesa_append_packet(sc, cr, &cp, csd);
759
if (error)
760
break;
761
762
/* Start a new packet, as current is full */
763
cesa_start_packet(&cp, MIN(mpsize, len));
764
}
765
}
766
767
if (error)
768
break;
769
}
770
771
if (error) {
772
/*
773
* Move all allocated resources to the request. They will be
774
* freed later.
775
*/
776
STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin);
777
STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout);
778
cci->cci_error = error;
779
}
780
}
781
782
static int
783
cesa_create_chain(struct cesa_softc *sc,
784
const struct crypto_session_params *csp, struct cesa_request *cr)
785
{
786
struct cesa_chain_info cci;
787
struct cesa_tdma_desc *ctd;
788
uint32_t config;
789
int error;
790
791
error = 0;
792
CESA_LOCK_ASSERT(sc, sessions);
793
794
/* Create request metadata */
795
if (csp->csp_cipher_klen != 0) {
796
if (csp->csp_cipher_alg == CRYPTO_AES_CBC &&
797
!CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
798
memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
799
csp->csp_cipher_klen);
800
else
801
memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
802
csp->csp_cipher_klen);
803
}
804
805
if (csp->csp_auth_klen != 0) {
806
memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
807
CESA_MAX_HASH_LEN);
808
memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
809
CESA_MAX_HASH_LEN);
810
}
811
812
ctd = cesa_tdma_copyin_sa_data(sc, cr);
813
if (!ctd)
814
return (ENOMEM);
815
816
cesa_append_tdesc(cr, ctd);
817
818
/* Prepare SA configuration */
819
config = cr->cr_cs->cs_config;
820
821
if (csp->csp_cipher_alg != 0 &&
822
!CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
823
config |= CESA_CSHD_DECRYPT;
824
switch (csp->csp_mode) {
825
case CSP_MODE_CIPHER:
826
config |= CESA_CSHD_ENC;
827
break;
828
case CSP_MODE_DIGEST:
829
config |= CESA_CSHD_MAC;
830
break;
831
case CSP_MODE_ETA:
832
config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
833
CESA_CSHD_ENC_AND_MAC;
834
break;
835
}
836
837
/* Create data packets */
838
cci.cci_sc = sc;
839
cci.cci_cr = cr;
840
cci.cci_config = config;
841
cci.cci_error = 0;
842
843
error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp,
844
cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT);
845
846
if (!error)
847
cr->cr_dmap_loaded = 1;
848
849
if (cci.cci_error)
850
error = cci.cci_error;
851
852
if (error)
853
return (error);
854
855
/* Read back request metadata */
856
ctd = cesa_tdma_copyout_sa_data(sc, cr);
857
if (!ctd)
858
return (ENOMEM);
859
860
cesa_append_tdesc(cr, ctd);
861
862
return (0);
863
}
864
865
static void
866
cesa_execute(struct cesa_softc *sc)
867
{
868
struct cesa_tdma_desc *prev_ctd, *ctd;
869
struct cesa_request *prev_cr, *cr;
870
871
CESA_LOCK(sc, requests);
872
873
/*
874
* If ready list is empty, there is nothing to execute. If queued list
875
* is not empty, the hardware is busy and we cannot start another
876
* execution.
877
*/
878
if (STAILQ_EMPTY(&sc->sc_ready_requests) ||
879
!STAILQ_EMPTY(&sc->sc_queued_requests)) {
880
CESA_UNLOCK(sc, requests);
881
return;
882
}
883
884
/* Move all ready requests to queued list */
885
STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests);
886
STAILQ_INIT(&sc->sc_ready_requests);
887
888
/* Create one execution chain from all requests on the list */
889
if (STAILQ_FIRST(&sc->sc_queued_requests) !=
890
STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) {
891
prev_cr = NULL;
892
cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD |
893
BUS_DMASYNC_POSTWRITE);
894
895
STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) {
896
if (prev_cr) {
897
ctd = STAILQ_FIRST(&cr->cr_tdesc);
898
prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc,
899
cesa_tdma_desc, ctd_stq);
900
901
prev_ctd->ctd_cthd->cthd_next =
902
ctd->ctd_cthd_paddr;
903
}
904
905
prev_cr = cr;
906
}
907
908
cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD |
909
BUS_DMASYNC_PREWRITE);
910
}
911
912
/* Start chain execution in hardware */
913
cr = STAILQ_FIRST(&sc->sc_queued_requests);
914
ctd = STAILQ_FIRST(&cr->cr_tdesc);
915
916
CESA_TDMA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr);
917
918
if (sc->sc_soc_id == MV_DEV_88F6828 ||
919
sc->sc_soc_id == MV_DEV_88F6820 ||
920
sc->sc_soc_id == MV_DEV_88F6810)
921
CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE | CESA_SA_CMD_SHA2);
922
else
923
CESA_REG_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE);
924
925
CESA_UNLOCK(sc, requests);
926
}
927
928
static int
929
cesa_setup_sram(struct cesa_softc *sc)
930
{
931
phandle_t sram_node;
932
ihandle_t sram_ihandle;
933
pcell_t sram_handle, sram_reg[2];
934
void *sram_va;
935
int rv;
936
937
rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "sram-handle",
938
(void *)&sram_handle, sizeof(sram_handle));
939
if (rv <= 0)
940
return (rv);
941
942
sram_ihandle = (ihandle_t)sram_handle;
943
sram_node = OF_instance_to_package(sram_ihandle);
944
945
rv = OF_getencprop(sram_node, "reg", (void *)sram_reg, sizeof(sram_reg));
946
if (rv <= 0)
947
return (rv);
948
949
sc->sc_sram_base_pa = sram_reg[0];
950
/* Store SRAM size to be able to unmap in detach() */
951
sc->sc_sram_size = sram_reg[1];
952
953
if (sc->sc_soc_id != MV_DEV_88F6828 &&
954
sc->sc_soc_id != MV_DEV_88F6820 &&
955
sc->sc_soc_id != MV_DEV_88F6810)
956
return (0);
957
958
/* SRAM memory was not mapped in platform_sram_devmap(), map it now */
959
sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
960
if (sram_va == NULL)
961
return (ENOMEM);
962
sc->sc_sram_base_va = sram_va;
963
964
return (0);
965
}
966
967
/*
968
* Function: device_from_node
969
* This function returns appropriate device_t to phandle_t
970
* Parameters:
971
* root - device where you want to start search
972
* if you provide NULL here, function will take
973
* "root0" device as root.
974
* node - we are checking every device_t to be
975
* appropriate with this.
976
*/
977
static device_t
978
device_from_node(device_t root, phandle_t node)
979
{
980
device_t *children, retval;
981
int nkid, i;
982
983
/* Nothing matches no node */
984
if (node == -1)
985
return (NULL);
986
987
if (root == NULL)
988
/* Get root of device tree */
989
if ((root = device_lookup_by_name("root0")) == NULL)
990
return (NULL);
991
992
if (device_get_children(root, &children, &nkid) != 0)
993
return (NULL);
994
995
retval = NULL;
996
for (i = 0; i < nkid; i++) {
997
/* Check if device and node matches */
998
if (OFW_BUS_GET_NODE(root, children[i]) == node) {
999
retval = children[i];
1000
break;
1001
}
1002
/* or go deeper */
1003
if ((retval = device_from_node(children[i], node)) != NULL)
1004
break;
1005
}
1006
free(children, M_TEMP);
1007
1008
return (retval);
1009
}
1010
1011
static int
1012
cesa_setup_sram_armada(struct cesa_softc *sc)
1013
{
1014
phandle_t sram_node;
1015
ihandle_t sram_ihandle;
1016
pcell_t sram_handle[2];
1017
void *sram_va;
1018
int rv, j;
1019
struct resource_list rl;
1020
struct resource_list_entry *rle;
1021
struct simplebus_softc *ssc;
1022
device_t sdev;
1023
1024
/* Get refs to SRAMS from CESA node */
1025
rv = OF_getencprop(ofw_bus_get_node(sc->sc_dev), "marvell,crypto-srams",
1026
(void *)sram_handle, sizeof(sram_handle));
1027
if (rv <= 0)
1028
return (rv);
1029
1030
if (sc->sc_cesa_engine_id >= 2)
1031
return (ENXIO);
1032
1033
/* Get SRAM node on the basis of sc_cesa_engine_id */
1034
sram_ihandle = (ihandle_t)sram_handle[sc->sc_cesa_engine_id];
1035
sram_node = OF_instance_to_package(sram_ihandle);
1036
1037
/* Get device_t of simplebus (sram_node parent) */
1038
sdev = device_from_node(NULL, OF_parent(sram_node));
1039
if (!sdev)
1040
return (ENXIO);
1041
1042
ssc = device_get_softc(sdev);
1043
1044
resource_list_init(&rl);
1045
/* Parse reg property to resource list */
1046
ofw_bus_reg_to_rl(sdev, sram_node, ssc->acells,
1047
ssc->scells, &rl);
1048
1049
/* We expect only one resource */
1050
rle = resource_list_find(&rl, SYS_RES_MEMORY, 0);
1051
if (rle == NULL)
1052
return (ENXIO);
1053
1054
/* Remap through ranges property */
1055
for (j = 0; j < ssc->nranges; j++) {
1056
if (rle->start >= ssc->ranges[j].bus &&
1057
rle->end < ssc->ranges[j].bus + ssc->ranges[j].size) {
1058
rle->start -= ssc->ranges[j].bus;
1059
rle->start += ssc->ranges[j].host;
1060
rle->end -= ssc->ranges[j].bus;
1061
rle->end += ssc->ranges[j].host;
1062
}
1063
}
1064
1065
sc->sc_sram_base_pa = rle->start;
1066
sc->sc_sram_size = rle->count;
1067
1068
/* SRAM memory was not mapped in platform_sram_devmap(), map it now */
1069
sram_va = pmap_mapdev(sc->sc_sram_base_pa, sc->sc_sram_size);
1070
if (sram_va == NULL)
1071
return (ENOMEM);
1072
sc->sc_sram_base_va = sram_va;
1073
1074
return (0);
1075
}
1076
1077
struct ofw_compat_data cesa_devices[] = {
1078
{ "mrvl,cesa", (uintptr_t)true },
1079
{ "marvell,armada-38x-crypto", (uintptr_t)true },
1080
{ NULL, 0 }
1081
};
1082
1083
static int
1084
cesa_probe(device_t dev)
1085
{
1086
1087
if (!ofw_bus_status_okay(dev))
1088
return (ENXIO);
1089
1090
if (!ofw_bus_search_compatible(dev, cesa_devices)->ocd_data)
1091
return (ENXIO);
1092
1093
device_set_desc(dev, "Marvell Cryptographic Engine and Security "
1094
"Accelerator");
1095
1096
return (BUS_PROBE_DEFAULT);
1097
}
1098
1099
static int
1100
cesa_attach(device_t dev)
1101
{
1102
static int engine_idx = 0;
1103
struct simplebus_devinfo *ndi;
1104
struct resource_list *rl;
1105
struct cesa_softc *sc;
1106
1107
if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1108
return (cesa_attach_late(dev));
1109
1110
/*
1111
* Get simplebus_devinfo which contains
1112
* resource list filled with adresses and
1113
* interrupts read form FDT.
1114
* Let's correct it by splitting resources
1115
* for each engine.
1116
*/
1117
if ((ndi = device_get_ivars(dev)) == NULL)
1118
return (ENXIO);
1119
1120
rl = &ndi->rl;
1121
1122
switch (engine_idx) {
1123
case 0:
1124
/* Update regs values */
1125
resource_list_add(rl, SYS_RES_MEMORY, 0, CESA0_TDMA_ADDR,
1126
CESA0_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1127
resource_list_add(rl, SYS_RES_MEMORY, 1, CESA0_CESA_ADDR,
1128
CESA0_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1129
1130
/* Remove unused interrupt */
1131
resource_list_delete(rl, SYS_RES_IRQ, 1);
1132
break;
1133
1134
case 1:
1135
/* Update regs values */
1136
resource_list_add(rl, SYS_RES_MEMORY, 0, CESA1_TDMA_ADDR,
1137
CESA1_TDMA_ADDR + CESA_TDMA_SIZE - 1, CESA_TDMA_SIZE);
1138
resource_list_add(rl, SYS_RES_MEMORY, 1, CESA1_CESA_ADDR,
1139
CESA1_CESA_ADDR + CESA_CESA_SIZE - 1, CESA_CESA_SIZE);
1140
1141
/* Remove unused interrupt */
1142
resource_list_delete(rl, SYS_RES_IRQ, 0);
1143
resource_list_find(rl, SYS_RES_IRQ, 1)->rid = 0;
1144
break;
1145
1146
default:
1147
device_printf(dev, "Bad cesa engine_idx\n");
1148
return (ENXIO);
1149
}
1150
1151
sc = device_get_softc(dev);
1152
sc->sc_cesa_engine_id = engine_idx;
1153
1154
/*
1155
* Call simplebus_add_device only once.
1156
* It will create second cesa driver instance
1157
* with the same FDT node as first instance.
1158
* When second driver reach this function,
1159
* it will be configured to use second cesa engine
1160
*/
1161
if (engine_idx == 0)
1162
simplebus_add_device(device_get_parent(dev), ofw_bus_get_node(dev),
1163
0, "cesa", 1, NULL);
1164
1165
engine_idx++;
1166
1167
return (cesa_attach_late(dev));
1168
}
1169
1170
static int
1171
cesa_attach_late(device_t dev)
1172
{
1173
struct cesa_softc *sc;
1174
uint32_t d, r, val;
1175
int error;
1176
int i;
1177
1178
sc = device_get_softc(dev);
1179
sc->sc_blocked = 0;
1180
sc->sc_error = 0;
1181
sc->sc_dev = dev;
1182
1183
soc_id(&d, &r);
1184
1185
switch (d) {
1186
case MV_DEV_88F6828:
1187
case MV_DEV_88F6820:
1188
case MV_DEV_88F6810:
1189
sc->sc_tperr = 0;
1190
break;
1191
default:
1192
return (ENXIO);
1193
}
1194
1195
sc->sc_soc_id = d;
1196
1197
/* Initialize mutexes */
1198
mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev),
1199
"CESA Shared Data", MTX_DEF);
1200
mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev),
1201
"CESA TDMA Descriptors Pool", MTX_DEF);
1202
mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev),
1203
"CESA SA Descriptors Pool", MTX_DEF);
1204
mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev),
1205
"CESA Requests Pool", MTX_DEF);
1206
mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
1207
"CESA Sessions Pool", MTX_DEF);
1208
1209
/* Allocate I/O and IRQ resources */
1210
error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res);
1211
if (error) {
1212
device_printf(dev, "could not allocate resources\n");
1213
goto err0;
1214
}
1215
1216
/* Acquire SRAM base address */
1217
if (!ofw_bus_is_compatible(dev, "marvell,armada-38x-crypto"))
1218
error = cesa_setup_sram(sc);
1219
else
1220
error = cesa_setup_sram_armada(sc);
1221
1222
if (error) {
1223
device_printf(dev, "could not setup SRAM\n");
1224
goto err1;
1225
}
1226
1227
/* Setup interrupt handler */
1228
error = bus_setup_intr(dev, sc->sc_res[RES_CESA_IRQ], INTR_TYPE_NET |
1229
INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie));
1230
if (error) {
1231
device_printf(dev, "could not setup engine completion irq\n");
1232
goto err2;
1233
}
1234
1235
/* Create DMA tag for processed data */
1236
error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1237
1, 0, /* alignment, boundary */
1238
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1239
BUS_SPACE_MAXADDR, /* highaddr */
1240
NULL, NULL, /* filtfunc, filtfuncarg */
1241
CESA_MAX_REQUEST_SIZE, /* maxsize */
1242
CESA_MAX_FRAGMENTS, /* nsegments */
1243
CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */
1244
NULL, NULL, /* lockfunc, lockfuncarg */
1245
&sc->sc_data_dtag); /* dmat */
1246
if (error)
1247
goto err3;
1248
1249
/* Initialize data structures: TDMA Descriptors Pool */
1250
error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm,
1251
CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc));
1252
if (error)
1253
goto err4;
1254
1255
STAILQ_INIT(&sc->sc_free_tdesc);
1256
for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) {
1257
sc->sc_tdesc[i].ctd_cthd =
1258
(struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i;
1259
sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr +
1260
(i * sizeof(struct cesa_tdma_hdesc));
1261
STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i],
1262
ctd_stq);
1263
}
1264
1265
/* Initialize data structures: SA Descriptors Pool */
1266
error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm,
1267
CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc));
1268
if (error)
1269
goto err5;
1270
1271
STAILQ_INIT(&sc->sc_free_sdesc);
1272
for (i = 0; i < CESA_SA_DESCRIPTORS; i++) {
1273
sc->sc_sdesc[i].csd_cshd =
1274
(struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i;
1275
sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr +
1276
(i * sizeof(struct cesa_sa_hdesc));
1277
STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i],
1278
csd_stq);
1279
}
1280
1281
/* Initialize data structures: Requests Pool */
1282
error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm,
1283
CESA_REQUESTS * sizeof(struct cesa_sa_data));
1284
if (error)
1285
goto err6;
1286
1287
STAILQ_INIT(&sc->sc_free_requests);
1288
STAILQ_INIT(&sc->sc_ready_requests);
1289
STAILQ_INIT(&sc->sc_queued_requests);
1290
for (i = 0; i < CESA_REQUESTS; i++) {
1291
sc->sc_requests[i].cr_csd =
1292
(struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i;
1293
sc->sc_requests[i].cr_csd_paddr =
1294
sc->sc_requests_cdm.cdm_paddr +
1295
(i * sizeof(struct cesa_sa_data));
1296
1297
/* Preallocate DMA maps */
1298
error = bus_dmamap_create(sc->sc_data_dtag, 0,
1299
&sc->sc_requests[i].cr_dmap);
1300
if (error && i > 0) {
1301
i--;
1302
do {
1303
bus_dmamap_destroy(sc->sc_data_dtag,
1304
sc->sc_requests[i].cr_dmap);
1305
} while (i--);
1306
1307
goto err7;
1308
}
1309
1310
STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i],
1311
cr_stq);
1312
}
1313
1314
/*
1315
* Initialize TDMA:
1316
* - Burst limit: 128 bytes,
1317
* - Outstanding reads enabled,
1318
* - No byte-swap.
1319
*/
1320
val = CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 |
1321
CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE;
1322
1323
if (sc->sc_soc_id == MV_DEV_88F6828 ||
1324
sc->sc_soc_id == MV_DEV_88F6820 ||
1325
sc->sc_soc_id == MV_DEV_88F6810)
1326
val |= CESA_TDMA_NUM_OUTSTAND;
1327
1328
CESA_TDMA_WRITE(sc, CESA_TDMA_CR, val);
1329
1330
/*
1331
* Initialize SA:
1332
* - SA descriptor is present at beginning of CESA SRAM,
1333
* - Multi-packet chain mode,
1334
* - Cooperation with TDMA enabled.
1335
*/
1336
CESA_REG_WRITE(sc, CESA_SA_DPR, 0);
1337
CESA_REG_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA |
1338
CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE);
1339
1340
/* Unmask interrupts */
1341
CESA_REG_WRITE(sc, CESA_ICR, 0);
1342
CESA_REG_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr);
1343
CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1344
CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS |
1345
CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT |
1346
CESA_TDMA_EMR_DATA_ERROR);
1347
1348
/* Register in OCF */
1349
sc->sc_cid = crypto_get_driverid(dev, sizeof(struct cesa_session),
1350
CRYPTOCAP_F_HARDWARE);
1351
if (sc->sc_cid < 0) {
1352
device_printf(dev, "could not get crypto driver id\n");
1353
goto err8;
1354
}
1355
1356
return (0);
1357
err8:
1358
for (i = 0; i < CESA_REQUESTS; i++)
1359
bus_dmamap_destroy(sc->sc_data_dtag,
1360
sc->sc_requests[i].cr_dmap);
1361
err7:
1362
cesa_free_dma_mem(&sc->sc_requests_cdm);
1363
err6:
1364
cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1365
err5:
1366
cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1367
err4:
1368
bus_dma_tag_destroy(sc->sc_data_dtag);
1369
err3:
1370
bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1371
err2:
1372
if (sc->sc_soc_id == MV_DEV_88F6828 ||
1373
sc->sc_soc_id == MV_DEV_88F6820 ||
1374
sc->sc_soc_id == MV_DEV_88F6810)
1375
pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1376
err1:
1377
bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1378
err0:
1379
mtx_destroy(&sc->sc_sessions_lock);
1380
mtx_destroy(&sc->sc_requests_lock);
1381
mtx_destroy(&sc->sc_sdesc_lock);
1382
mtx_destroy(&sc->sc_tdesc_lock);
1383
mtx_destroy(&sc->sc_sc_lock);
1384
return (ENXIO);
1385
}
1386
1387
static int
1388
cesa_detach(device_t dev)
1389
{
1390
struct cesa_softc *sc;
1391
int i;
1392
1393
sc = device_get_softc(dev);
1394
1395
/* TODO: Wait for queued requests completion before shutdown. */
1396
1397
/* Mask interrupts */
1398
CESA_REG_WRITE(sc, CESA_ICM, 0);
1399
CESA_TDMA_WRITE(sc, CESA_TDMA_EMR, 0);
1400
1401
/* Unregister from OCF */
1402
crypto_unregister_all(sc->sc_cid);
1403
1404
/* Free DMA Maps */
1405
for (i = 0; i < CESA_REQUESTS; i++)
1406
bus_dmamap_destroy(sc->sc_data_dtag,
1407
sc->sc_requests[i].cr_dmap);
1408
1409
/* Free DMA Memory */
1410
cesa_free_dma_mem(&sc->sc_requests_cdm);
1411
cesa_free_dma_mem(&sc->sc_sdesc_cdm);
1412
cesa_free_dma_mem(&sc->sc_tdesc_cdm);
1413
1414
/* Free DMA Tag */
1415
bus_dma_tag_destroy(sc->sc_data_dtag);
1416
1417
/* Stop interrupt */
1418
bus_teardown_intr(dev, sc->sc_res[RES_CESA_IRQ], sc->sc_icookie);
1419
1420
/* Relase I/O and IRQ resources */
1421
bus_release_resources(dev, cesa_res_spec, sc->sc_res);
1422
1423
/* Unmap SRAM memory */
1424
if (sc->sc_soc_id == MV_DEV_88F6828 ||
1425
sc->sc_soc_id == MV_DEV_88F6820 ||
1426
sc->sc_soc_id == MV_DEV_88F6810)
1427
pmap_unmapdev(sc->sc_sram_base_va, sc->sc_sram_size);
1428
1429
/* Destroy mutexes */
1430
mtx_destroy(&sc->sc_sessions_lock);
1431
mtx_destroy(&sc->sc_requests_lock);
1432
mtx_destroy(&sc->sc_sdesc_lock);
1433
mtx_destroy(&sc->sc_tdesc_lock);
1434
mtx_destroy(&sc->sc_sc_lock);
1435
1436
return (0);
1437
}
1438
1439
static void
1440
cesa_intr(void *arg)
1441
{
1442
STAILQ_HEAD(, cesa_request) requests;
1443
struct cesa_request *cr, *tmp;
1444
struct cesa_softc *sc;
1445
uint32_t ecr, icr;
1446
uint8_t hash[HASH_MAX_LEN];
1447
int blocked;
1448
1449
sc = arg;
1450
1451
/* Ack interrupt */
1452
ecr = CESA_TDMA_READ(sc, CESA_TDMA_ECR);
1453
CESA_TDMA_WRITE(sc, CESA_TDMA_ECR, 0);
1454
icr = CESA_REG_READ(sc, CESA_ICR);
1455
CESA_REG_WRITE(sc, CESA_ICR, 0);
1456
1457
/* Check for TDMA errors */
1458
if (ecr & CESA_TDMA_ECR_MISS) {
1459
device_printf(sc->sc_dev, "TDMA Miss error detected!\n");
1460
sc->sc_error = EIO;
1461
}
1462
1463
if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) {
1464
device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n");
1465
sc->sc_error = EIO;
1466
}
1467
1468
if (ecr & CESA_TDMA_ECR_BOTH_HIT) {
1469
device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n");
1470
sc->sc_error = EIO;
1471
}
1472
1473
if (ecr & CESA_TDMA_ECR_DATA_ERROR) {
1474
device_printf(sc->sc_dev, "TDMA Data error detected!\n");
1475
sc->sc_error = EIO;
1476
}
1477
1478
/* Check for CESA errors */
1479
if (icr & sc->sc_tperr) {
1480
device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n");
1481
sc->sc_error = EIO;
1482
}
1483
1484
/* If there is nothing more to do, return */
1485
if ((icr & CESA_ICR_ACCTDMA) == 0)
1486
return;
1487
1488
/* Get all finished requests */
1489
CESA_LOCK(sc, requests);
1490
STAILQ_INIT(&requests);
1491
STAILQ_CONCAT(&requests, &sc->sc_queued_requests);
1492
STAILQ_INIT(&sc->sc_queued_requests);
1493
CESA_UNLOCK(sc, requests);
1494
1495
/* Execute all ready requests */
1496
cesa_execute(sc);
1497
1498
/* Process completed requests */
1499
cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD |
1500
BUS_DMASYNC_POSTWRITE);
1501
1502
STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) {
1503
bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap,
1504
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1505
1506
cr->cr_crp->crp_etype = sc->sc_error;
1507
if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) {
1508
if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1509
crypto_copydata(cr->cr_crp,
1510
cr->cr_crp->crp_digest_start,
1511
cr->cr_cs->cs_hlen, hash);
1512
if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash,
1513
cr->cr_cs->cs_hlen) != 0)
1514
cr->cr_crp->crp_etype = EBADMSG;
1515
} else
1516
crypto_copyback(cr->cr_crp,
1517
cr->cr_crp->crp_digest_start,
1518
cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
1519
}
1520
crypto_done(cr->cr_crp);
1521
cesa_free_request(sc, cr);
1522
}
1523
1524
cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD |
1525
BUS_DMASYNC_PREWRITE);
1526
1527
sc->sc_error = 0;
1528
1529
/* Unblock driver if it ran out of resources */
1530
CESA_LOCK(sc, sc);
1531
blocked = sc->sc_blocked;
1532
sc->sc_blocked = 0;
1533
CESA_UNLOCK(sc, sc);
1534
1535
if (blocked)
1536
crypto_unblock(sc->sc_cid, blocked);
1537
}
1538
1539
static bool
1540
cesa_cipher_supported(const struct crypto_session_params *csp)
1541
{
1542
1543
switch (csp->csp_cipher_alg) {
1544
case CRYPTO_AES_CBC:
1545
if (csp->csp_ivlen != AES_BLOCK_LEN)
1546
return (false);
1547
break;
1548
default:
1549
return (false);
1550
}
1551
1552
if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN)
1553
return (false);
1554
1555
return (true);
1556
}
1557
1558
static bool
1559
cesa_auth_supported(struct cesa_softc *sc,
1560
const struct crypto_session_params *csp)
1561
{
1562
1563
switch (csp->csp_auth_alg) {
1564
case CRYPTO_SHA2_256_HMAC:
1565
if (!(sc->sc_soc_id == MV_DEV_88F6828 ||
1566
sc->sc_soc_id == MV_DEV_88F6820 ||
1567
sc->sc_soc_id == MV_DEV_88F6810))
1568
return (false);
1569
/* FALLTHROUGH */
1570
case CRYPTO_SHA1:
1571
case CRYPTO_SHA1_HMAC:
1572
break;
1573
default:
1574
return (false);
1575
}
1576
1577
if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN)
1578
return (false);
1579
1580
return (true);
1581
}
1582
1583
static int
1584
cesa_probesession(device_t dev, const struct crypto_session_params *csp)
1585
{
1586
struct cesa_softc *sc;
1587
1588
sc = device_get_softc(dev);
1589
if (csp->csp_flags != 0)
1590
return (EINVAL);
1591
switch (csp->csp_mode) {
1592
case CSP_MODE_DIGEST:
1593
if (!cesa_auth_supported(sc, csp))
1594
return (EINVAL);
1595
break;
1596
case CSP_MODE_CIPHER:
1597
if (!cesa_cipher_supported(csp))
1598
return (EINVAL);
1599
break;
1600
case CSP_MODE_ETA:
1601
if (!cesa_auth_supported(sc, csp) ||
1602
!cesa_cipher_supported(csp))
1603
return (EINVAL);
1604
break;
1605
default:
1606
return (EINVAL);
1607
}
1608
return (CRYPTODEV_PROBE_HARDWARE);
1609
}
1610
1611
static int
1612
cesa_newsession(device_t dev, crypto_session_t cses,
1613
const struct crypto_session_params *csp)
1614
{
1615
struct cesa_session *cs;
1616
int error;
1617
1618
error = 0;
1619
1620
/* Allocate session */
1621
cs = crypto_get_driver_session(cses);
1622
1623
/* Prepare CESA configuration */
1624
cs->cs_config = 0;
1625
cs->cs_ivlen = 1;
1626
cs->cs_mblen = 1;
1627
1628
switch (csp->csp_cipher_alg) {
1629
case CRYPTO_AES_CBC:
1630
cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
1631
cs->cs_ivlen = AES_BLOCK_LEN;
1632
break;
1633
}
1634
1635
switch (csp->csp_auth_alg) {
1636
case CRYPTO_SHA1:
1637
cs->cs_mblen = 1;
1638
cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1639
csp->csp_auth_mlen;
1640
cs->cs_config |= CESA_CSHD_SHA1;
1641
break;
1642
case CRYPTO_SHA1_HMAC:
1643
cs->cs_mblen = SHA1_BLOCK_LEN;
1644
cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
1645
csp->csp_auth_mlen;
1646
cs->cs_config |= CESA_CSHD_SHA1_HMAC;
1647
if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
1648
cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
1649
break;
1650
case CRYPTO_SHA2_256_HMAC:
1651
cs->cs_mblen = SHA2_256_BLOCK_LEN;
1652
cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN :
1653
csp->csp_auth_mlen;
1654
cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
1655
break;
1656
}
1657
1658
/* Save cipher key */
1659
if (csp->csp_cipher_key != NULL) {
1660
memcpy(cs->cs_key, csp->csp_cipher_key,
1661
csp->csp_cipher_klen);
1662
if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1663
error = cesa_prep_aes_key(cs, csp);
1664
}
1665
1666
/* Save digest key */
1667
if (csp->csp_auth_key != NULL)
1668
cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key,
1669
csp->csp_auth_klen);
1670
1671
return (error);
1672
}
1673
1674
static int
1675
cesa_process(device_t dev, struct cryptop *crp, int hint)
1676
{
1677
const struct crypto_session_params *csp;
1678
struct cesa_request *cr;
1679
struct cesa_session *cs;
1680
struct cesa_softc *sc;
1681
int error;
1682
1683
sc = device_get_softc(dev);
1684
error = 0;
1685
1686
cs = crypto_get_driver_session(crp->crp_session);
1687
csp = crypto_get_params(crp->crp_session);
1688
1689
/* Check and parse input */
1690
if (crypto_buffer_len(&crp->crp_buf) > CESA_MAX_REQUEST_SIZE) {
1691
crp->crp_etype = E2BIG;
1692
crypto_done(crp);
1693
return (0);
1694
}
1695
1696
/*
1697
* For requests with AAD, only requests where the AAD is
1698
* immediately adjacent to the payload are supported.
1699
*/
1700
if (crp->crp_aad_length != 0 &&
1701
(crp->crp_aad_start + crp->crp_aad_length) !=
1702
crp->crp_payload_start) {
1703
crp->crp_etype = EINVAL;
1704
crypto_done(crp);
1705
return (0);
1706
}
1707
1708
/*
1709
* Get request descriptor. Block driver if there is no free
1710
* descriptors in pool.
1711
*/
1712
cr = cesa_alloc_request(sc);
1713
if (!cr) {
1714
CESA_LOCK(sc, sc);
1715
sc->sc_blocked = CRYPTO_SYMQ;
1716
CESA_UNLOCK(sc, sc);
1717
return (ERESTART);
1718
}
1719
1720
/* Prepare request */
1721
cr->cr_crp = crp;
1722
cr->cr_cs = cs;
1723
1724
CESA_LOCK(sc, sessions);
1725
cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1726
1727
if (csp->csp_cipher_alg != 0)
1728
crypto_read_iv(crp, cr->cr_csd->csd_iv);
1729
1730
if (crp->crp_cipher_key != NULL) {
1731
memcpy(cs->cs_key, crp->crp_cipher_key,
1732
csp->csp_cipher_klen);
1733
if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
1734
error = cesa_prep_aes_key(cs, csp);
1735
}
1736
1737
if (!error && crp->crp_auth_key != NULL)
1738
cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key,
1739
csp->csp_auth_klen);
1740
1741
/* Convert request to chain of TDMA and SA descriptors */
1742
if (!error)
1743
error = cesa_create_chain(sc, csp, cr);
1744
1745
cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1746
CESA_UNLOCK(sc, sessions);
1747
1748
if (error) {
1749
cesa_free_request(sc, cr);
1750
crp->crp_etype = error;
1751
crypto_done(crp);
1752
return (0);
1753
}
1754
1755
bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD |
1756
BUS_DMASYNC_PREWRITE);
1757
1758
/* Enqueue request to execution */
1759
cesa_enqueue_request(sc, cr);
1760
1761
/* Start execution, if we have no more requests in queue */
1762
if ((hint & CRYPTO_HINT_MORE) == 0)
1763
cesa_execute(sc);
1764
1765
return (0);
1766
}
1767
1768