Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/opencrypto/crypto.c
39477 views
1
/*-
2
* Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
3
* Copyright (c) 2021 The FreeBSD Foundation
4
*
5
* Portions of this software were developed by Ararat River
6
* Consulting, LLC under sponsorship of the FreeBSD Foundation.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
*/
28
29
#include <sys/cdefs.h>
30
/*
31
* Cryptographic Subsystem.
32
*
33
* This code is derived from the Openbsd Cryptographic Framework (OCF)
34
* that has the copyright shown below. Very little of the original
35
* code remains.
36
*/
37
38
/*-
39
* The author of this code is Angelos D. Keromytis ([email protected])
40
*
41
* This code was written by Angelos D. Keromytis in Athens, Greece, in
42
* February 2000. Network Security Technologies Inc. (NSTI) kindly
43
* supported the development of this code.
44
*
45
* Copyright (c) 2000, 2001 Angelos D. Keromytis
46
*
47
* Permission to use, copy, and modify this software with or without fee
48
* is hereby granted, provided that this entire notice is included in
49
* all source code copies of any software which is or includes a copy or
50
* modification of this software.
51
*
52
* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
53
* IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
54
* REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
55
* MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
56
* PURPOSE.
57
*/
58
59
#include "opt_ddb.h"
60
61
#include <sys/param.h>
62
#include <sys/systm.h>
63
#include <sys/counter.h>
64
#include <sys/kernel.h>
65
#include <sys/kthread.h>
66
#include <sys/linker.h>
67
#include <sys/lock.h>
68
#include <sys/module.h>
69
#include <sys/mutex.h>
70
#include <sys/malloc.h>
71
#include <sys/mbuf.h>
72
#include <sys/proc.h>
73
#include <sys/refcount.h>
74
#include <sys/sdt.h>
75
#include <sys/smp.h>
76
#include <sys/sysctl.h>
77
#include <sys/taskqueue.h>
78
#include <sys/uio.h>
79
80
#include <ddb/ddb.h>
81
82
#include <machine/vmparam.h>
83
#include <vm/uma.h>
84
85
#include <crypto/intake.h>
86
#include <opencrypto/cryptodev.h>
87
#include <opencrypto/xform_auth.h>
88
#include <opencrypto/xform_enc.h>
89
90
#include <sys/kobj.h>
91
#include <sys/bus.h>
92
#include "cryptodev_if.h"
93
94
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
95
#include <machine/pcb.h>
96
#endif
97
98
SDT_PROVIDER_DEFINE(opencrypto);
99
100
/*
101
* Crypto drivers register themselves by allocating a slot in the
102
* crypto_drivers table with crypto_get_driverid().
103
*/
104
static struct mtx crypto_drivers_mtx; /* lock on driver table */
105
#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
106
#define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
107
#define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED)
108
109
/*
110
* Crypto device/driver capabilities structure.
111
*
112
* Synchronization:
113
* (d) - protected by CRYPTO_DRIVER_LOCK()
114
* (q) - protected by CRYPTO_Q_LOCK()
115
* Not tagged fields are read-only.
116
*/
117
struct cryptocap {
118
device_t cc_dev;
119
uint32_t cc_hid;
120
uint32_t cc_sessions; /* (d) # of sessions */
121
122
int cc_flags; /* (d) flags */
123
#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
124
int cc_qblocked; /* (q) symmetric q blocked */
125
size_t cc_session_size;
126
volatile int cc_refs;
127
};
128
129
static struct cryptocap **crypto_drivers = NULL;
130
static int crypto_drivers_size = 0;
131
132
struct crypto_session {
133
struct cryptocap *cap;
134
struct crypto_session_params csp;
135
uint64_t id;
136
/* Driver softc follows. */
137
};
138
139
static int crp_sleep = 0;
140
static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */
141
static struct mtx crypto_q_mtx;
142
#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
143
#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
144
145
SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
146
"In-kernel cryptography");
147
148
/*
149
* Taskqueue used to dispatch the crypto requests submitted with
150
* crypto_dispatch_async .
151
*/
152
static struct taskqueue *crypto_tq;
153
154
/*
155
* Crypto seq numbers are operated on with modular arithmetic
156
*/
157
#define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0)
158
159
struct crypto_ret_worker {
160
struct mtx crypto_ret_mtx;
161
162
TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */
163
TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */
164
165
uint32_t reorder_ops; /* total ordered sym jobs received */
166
uint32_t reorder_cur_seq; /* current sym job dispatched */
167
168
struct thread *td;
169
};
170
static struct crypto_ret_worker *crypto_ret_workers = NULL;
171
172
#define CRYPTO_RETW(i) (&crypto_ret_workers[i])
173
#define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers)
174
#define FOREACH_CRYPTO_RETW(w) \
175
for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w)
176
177
#define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx)
178
#define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx)
179
180
static int crypto_workers_num = 0;
181
SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
182
&crypto_workers_num, 0,
183
"Number of crypto workers used to dispatch crypto jobs");
184
#ifdef COMPAT_FREEBSD12
185
SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
186
&crypto_workers_num, 0,
187
"Number of crypto workers used to dispatch crypto jobs");
188
#endif
189
190
static uma_zone_t cryptop_zone;
191
192
int crypto_devallowsoft = 0;
193
SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN,
194
&crypto_devallowsoft, 0,
195
"Enable use of software crypto by /dev/crypto");
196
#ifdef COMPAT_FREEBSD12
197
SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN,
198
&crypto_devallowsoft, 0,
199
"Enable/disable use of software crypto by /dev/crypto");
200
#endif
201
202
#ifdef DIAGNOSTIC
203
bool crypto_destroyreq_check;
204
SYSCTL_BOOL(_kern_crypto, OID_AUTO, destroyreq_check, CTLFLAG_RWTUN,
205
&crypto_destroyreq_check, 0,
206
"Enable checks when destroying a request");
207
#endif
208
209
MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
210
211
static void crypto_dispatch_thread(void *arg);
212
static struct thread *cryptotd;
213
static void crypto_ret_thread(void *arg);
214
static void crypto_destroy(void);
215
static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
216
static void crypto_task_invoke(void *ctx, int pending);
217
static void crypto_batch_enqueue(struct cryptop *crp);
218
219
static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)];
220
SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW,
221
cryptostats, nitems(cryptostats),
222
"Crypto system statistics");
223
224
#define CRYPTOSTAT_INC(stat) do { \
225
counter_u64_add( \
226
cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\
227
1); \
228
} while (0)
229
230
static void
231
cryptostats_init(void *arg __unused)
232
{
233
COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK);
234
}
235
SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL);
236
237
static void
238
cryptostats_fini(void *arg __unused)
239
{
240
COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats));
241
}
242
SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini,
243
NULL);
244
245
/* Try to avoid directly exposing the key buffer as a symbol */
246
static struct keybuf *keybuf;
247
248
static struct keybuf empty_keybuf = {
249
.kb_nents = 0
250
};
251
252
/* Obtain the key buffer from boot metadata */
253
static void
254
keybuf_init(void)
255
{
256
keybuf = (struct keybuf *)preload_search_info(preload_kmdp,
257
MODINFO_METADATA | MODINFOMD_KEYBUF);
258
259
if (keybuf == NULL)
260
keybuf = &empty_keybuf;
261
}
262
263
/* It'd be nice if we could store these in some kind of secure memory... */
264
struct keybuf *
265
get_keybuf(void)
266
{
267
268
return (keybuf);
269
}
270
271
static struct cryptocap *
272
cap_ref(struct cryptocap *cap)
273
{
274
275
refcount_acquire(&cap->cc_refs);
276
return (cap);
277
}
278
279
static void
280
cap_rele(struct cryptocap *cap)
281
{
282
283
if (refcount_release(&cap->cc_refs) == 0)
284
return;
285
286
KASSERT(cap->cc_sessions == 0,
287
("freeing crypto driver with active sessions"));
288
289
free(cap, M_CRYPTO_DATA);
290
}
291
292
static int
293
crypto_init(void)
294
{
295
struct crypto_ret_worker *ret_worker;
296
struct proc *p;
297
int error;
298
299
mtx_init(&crypto_drivers_mtx, "crypto driver table", NULL, MTX_DEF);
300
301
TAILQ_INIT(&crp_q);
302
mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF);
303
304
cryptop_zone = uma_zcreate("cryptop",
305
sizeof(struct cryptop), NULL, NULL, NULL, NULL,
306
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
307
308
crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
309
crypto_drivers = malloc(crypto_drivers_size *
310
sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
311
312
if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus)
313
crypto_workers_num = mp_ncpus;
314
315
crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO,
316
taskqueue_thread_enqueue, &crypto_tq);
317
318
taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN,
319
"crypto");
320
321
p = NULL;
322
error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd,
323
0, 0, "crypto", "crypto");
324
if (error) {
325
printf("crypto_init: cannot start crypto thread; error %d",
326
error);
327
goto bad;
328
}
329
330
crypto_ret_workers = mallocarray(crypto_workers_num,
331
sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
332
333
FOREACH_CRYPTO_RETW(ret_worker) {
334
TAILQ_INIT(&ret_worker->crp_ordered_ret_q);
335
TAILQ_INIT(&ret_worker->crp_ret_q);
336
337
ret_worker->reorder_ops = 0;
338
ret_worker->reorder_cur_seq = 0;
339
340
mtx_init(&ret_worker->crypto_ret_mtx, "crypto return queues",
341
NULL, MTX_DEF);
342
343
error = kthread_add(crypto_ret_thread, ret_worker, p,
344
&ret_worker->td, 0, 0, "crypto returns %td",
345
CRYPTO_RETW_ID(ret_worker));
346
if (error) {
347
printf("crypto_init: cannot start cryptoret thread; error %d",
348
error);
349
goto bad;
350
}
351
}
352
353
keybuf_init();
354
355
return 0;
356
bad:
357
crypto_destroy();
358
return error;
359
}
360
361
/*
362
* Signal a crypto thread to terminate. We use the driver
363
* table lock to synchronize the sleep/wakeups so that we
364
* are sure the threads have terminated before we release
365
* the data structures they use. See crypto_finis below
366
* for the other half of this song-and-dance.
367
*/
368
static void
369
crypto_terminate(struct thread **tdp, void *q)
370
{
371
struct thread *td;
372
373
mtx_assert(&crypto_drivers_mtx, MA_OWNED);
374
td = *tdp;
375
*tdp = NULL;
376
if (td != NULL) {
377
wakeup_one(q);
378
mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0);
379
}
380
}
381
382
static void
383
hmac_init_pad(const struct auth_hash *axf, const char *key, int klen,
384
void *auth_ctx, uint8_t padval)
385
{
386
uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
387
u_int i;
388
389
KASSERT(axf->blocksize <= sizeof(hmac_key),
390
("Invalid HMAC block size %d", axf->blocksize));
391
392
/*
393
* If the key is larger than the block size, use the digest of
394
* the key as the key instead.
395
*/
396
memset(hmac_key, 0, sizeof(hmac_key));
397
if (klen > axf->blocksize) {
398
axf->Init(auth_ctx);
399
axf->Update(auth_ctx, key, klen);
400
axf->Final(hmac_key, auth_ctx);
401
klen = axf->hashsize;
402
} else
403
memcpy(hmac_key, key, klen);
404
405
for (i = 0; i < axf->blocksize; i++)
406
hmac_key[i] ^= padval;
407
408
axf->Init(auth_ctx);
409
axf->Update(auth_ctx, hmac_key, axf->blocksize);
410
explicit_bzero(hmac_key, sizeof(hmac_key));
411
}
412
413
void
414
hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen,
415
void *auth_ctx)
416
{
417
418
hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
419
}
420
421
void
422
hmac_init_opad(const struct auth_hash *axf, const char *key, int klen,
423
void *auth_ctx)
424
{
425
426
hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
427
}
428
429
static void
430
crypto_destroy(void)
431
{
432
struct crypto_ret_worker *ret_worker;
433
int i;
434
435
/*
436
* Terminate any crypto threads.
437
*/
438
if (crypto_tq != NULL)
439
taskqueue_drain_all(crypto_tq);
440
CRYPTO_DRIVER_LOCK();
441
crypto_terminate(&cryptotd, &crp_q);
442
FOREACH_CRYPTO_RETW(ret_worker)
443
crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q);
444
CRYPTO_DRIVER_UNLOCK();
445
446
/* XXX flush queues??? */
447
448
/*
449
* Reclaim dynamically allocated resources.
450
*/
451
for (i = 0; i < crypto_drivers_size; i++) {
452
if (crypto_drivers[i] != NULL)
453
cap_rele(crypto_drivers[i]);
454
}
455
free(crypto_drivers, M_CRYPTO_DATA);
456
457
if (cryptop_zone != NULL)
458
uma_zdestroy(cryptop_zone);
459
mtx_destroy(&crypto_q_mtx);
460
FOREACH_CRYPTO_RETW(ret_worker)
461
mtx_destroy(&ret_worker->crypto_ret_mtx);
462
free(crypto_ret_workers, M_CRYPTO_DATA);
463
if (crypto_tq != NULL)
464
taskqueue_free(crypto_tq);
465
mtx_destroy(&crypto_drivers_mtx);
466
}
467
468
uint32_t
469
crypto_ses2hid(crypto_session_t crypto_session)
470
{
471
return (crypto_session->cap->cc_hid);
472
}
473
474
uint32_t
475
crypto_ses2caps(crypto_session_t crypto_session)
476
{
477
return (crypto_session->cap->cc_flags & 0xff000000);
478
}
479
480
void *
481
crypto_get_driver_session(crypto_session_t crypto_session)
482
{
483
return (crypto_session + 1);
484
}
485
486
const struct crypto_session_params *
487
crypto_get_params(crypto_session_t crypto_session)
488
{
489
return (&crypto_session->csp);
490
}
491
492
const struct auth_hash *
493
crypto_auth_hash(const struct crypto_session_params *csp)
494
{
495
496
switch (csp->csp_auth_alg) {
497
case CRYPTO_SHA1_HMAC:
498
return (&auth_hash_hmac_sha1);
499
case CRYPTO_SHA2_224_HMAC:
500
return (&auth_hash_hmac_sha2_224);
501
case CRYPTO_SHA2_256_HMAC:
502
return (&auth_hash_hmac_sha2_256);
503
case CRYPTO_SHA2_384_HMAC:
504
return (&auth_hash_hmac_sha2_384);
505
case CRYPTO_SHA2_512_HMAC:
506
return (&auth_hash_hmac_sha2_512);
507
case CRYPTO_NULL_HMAC:
508
return (&auth_hash_null);
509
case CRYPTO_RIPEMD160_HMAC:
510
return (&auth_hash_hmac_ripemd_160);
511
case CRYPTO_RIPEMD160:
512
return (&auth_hash_ripemd_160);
513
case CRYPTO_SHA1:
514
return (&auth_hash_sha1);
515
case CRYPTO_SHA2_224:
516
return (&auth_hash_sha2_224);
517
case CRYPTO_SHA2_256:
518
return (&auth_hash_sha2_256);
519
case CRYPTO_SHA2_384:
520
return (&auth_hash_sha2_384);
521
case CRYPTO_SHA2_512:
522
return (&auth_hash_sha2_512);
523
case CRYPTO_AES_NIST_GMAC:
524
switch (csp->csp_auth_klen) {
525
case 128 / 8:
526
return (&auth_hash_nist_gmac_aes_128);
527
case 192 / 8:
528
return (&auth_hash_nist_gmac_aes_192);
529
case 256 / 8:
530
return (&auth_hash_nist_gmac_aes_256);
531
default:
532
return (NULL);
533
}
534
case CRYPTO_BLAKE2B:
535
return (&auth_hash_blake2b);
536
case CRYPTO_BLAKE2S:
537
return (&auth_hash_blake2s);
538
case CRYPTO_POLY1305:
539
return (&auth_hash_poly1305);
540
case CRYPTO_AES_CCM_CBC_MAC:
541
switch (csp->csp_auth_klen) {
542
case 128 / 8:
543
return (&auth_hash_ccm_cbc_mac_128);
544
case 192 / 8:
545
return (&auth_hash_ccm_cbc_mac_192);
546
case 256 / 8:
547
return (&auth_hash_ccm_cbc_mac_256);
548
default:
549
return (NULL);
550
}
551
default:
552
return (NULL);
553
}
554
}
555
556
const struct enc_xform *
557
crypto_cipher(const struct crypto_session_params *csp)
558
{
559
560
switch (csp->csp_cipher_alg) {
561
case CRYPTO_AES_CBC:
562
return (&enc_xform_aes_cbc);
563
case CRYPTO_AES_XTS:
564
return (&enc_xform_aes_xts);
565
case CRYPTO_AES_ICM:
566
return (&enc_xform_aes_icm);
567
case CRYPTO_AES_NIST_GCM_16:
568
return (&enc_xform_aes_nist_gcm);
569
case CRYPTO_CAMELLIA_CBC:
570
return (&enc_xform_camellia);
571
case CRYPTO_NULL_CBC:
572
return (&enc_xform_null);
573
case CRYPTO_CHACHA20:
574
return (&enc_xform_chacha20);
575
case CRYPTO_AES_CCM_16:
576
return (&enc_xform_ccm);
577
case CRYPTO_CHACHA20_POLY1305:
578
return (&enc_xform_chacha20_poly1305);
579
case CRYPTO_XCHACHA20_POLY1305:
580
return (&enc_xform_xchacha20_poly1305);
581
default:
582
return (NULL);
583
}
584
}
585
586
static struct cryptocap *
587
crypto_checkdriver(uint32_t hid)
588
{
589
590
return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
591
}
592
593
/*
594
* Select a driver for a new session that supports the specified
595
* algorithms and, optionally, is constrained according to the flags.
596
*/
597
static struct cryptocap *
598
crypto_select_driver(const struct crypto_session_params *csp, int flags)
599
{
600
struct cryptocap *cap, *best;
601
int best_match, error, hid;
602
603
CRYPTO_DRIVER_ASSERT();
604
605
best = NULL;
606
for (hid = 0; hid < crypto_drivers_size; hid++) {
607
/*
608
* If there is no driver for this slot, or the driver
609
* is not appropriate (hardware or software based on
610
* match), then skip.
611
*/
612
cap = crypto_drivers[hid];
613
if (cap == NULL ||
614
(cap->cc_flags & flags) == 0)
615
continue;
616
617
error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
618
if (error >= 0)
619
continue;
620
621
/*
622
* Use the driver with the highest probe value.
623
* Hardware drivers use a higher probe value than
624
* software. In case of a tie, prefer the driver with
625
* the fewest active sessions.
626
*/
627
if (best == NULL || error > best_match ||
628
(error == best_match &&
629
cap->cc_sessions < best->cc_sessions)) {
630
best = cap;
631
best_match = error;
632
}
633
}
634
return best;
635
}
636
637
static enum alg_type {
638
ALG_NONE = 0,
639
ALG_CIPHER,
640
ALG_DIGEST,
641
ALG_KEYED_DIGEST,
642
ALG_COMPRESSION,
643
ALG_AEAD
644
} alg_types[] = {
645
[CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST,
646
[CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST,
647
[CRYPTO_AES_CBC] = ALG_CIPHER,
648
[CRYPTO_SHA1] = ALG_DIGEST,
649
[CRYPTO_NULL_HMAC] = ALG_DIGEST,
650
[CRYPTO_NULL_CBC] = ALG_CIPHER,
651
[CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION,
652
[CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST,
653
[CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST,
654
[CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST,
655
[CRYPTO_CAMELLIA_CBC] = ALG_CIPHER,
656
[CRYPTO_AES_XTS] = ALG_CIPHER,
657
[CRYPTO_AES_ICM] = ALG_CIPHER,
658
[CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST,
659
[CRYPTO_AES_NIST_GCM_16] = ALG_AEAD,
660
[CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST,
661
[CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST,
662
[CRYPTO_CHACHA20] = ALG_CIPHER,
663
[CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST,
664
[CRYPTO_RIPEMD160] = ALG_DIGEST,
665
[CRYPTO_SHA2_224] = ALG_DIGEST,
666
[CRYPTO_SHA2_256] = ALG_DIGEST,
667
[CRYPTO_SHA2_384] = ALG_DIGEST,
668
[CRYPTO_SHA2_512] = ALG_DIGEST,
669
[CRYPTO_POLY1305] = ALG_KEYED_DIGEST,
670
[CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST,
671
[CRYPTO_AES_CCM_16] = ALG_AEAD,
672
[CRYPTO_CHACHA20_POLY1305] = ALG_AEAD,
673
[CRYPTO_XCHACHA20_POLY1305] = ALG_AEAD,
674
};
675
676
static enum alg_type
677
alg_type(int alg)
678
{
679
680
if (alg < nitems(alg_types))
681
return (alg_types[alg]);
682
return (ALG_NONE);
683
}
684
685
static bool
686
alg_is_compression(int alg)
687
{
688
689
return (alg_type(alg) == ALG_COMPRESSION);
690
}
691
692
static bool
693
alg_is_cipher(int alg)
694
{
695
696
return (alg_type(alg) == ALG_CIPHER);
697
}
698
699
static bool
700
alg_is_digest(int alg)
701
{
702
703
return (alg_type(alg) == ALG_DIGEST ||
704
alg_type(alg) == ALG_KEYED_DIGEST);
705
}
706
707
static bool
708
alg_is_keyed_digest(int alg)
709
{
710
711
return (alg_type(alg) == ALG_KEYED_DIGEST);
712
}
713
714
static bool
715
alg_is_aead(int alg)
716
{
717
718
return (alg_type(alg) == ALG_AEAD);
719
}
720
721
static bool
722
ccm_tag_length_valid(int len)
723
{
724
/* RFC 3610 */
725
switch (len) {
726
case 4:
727
case 6:
728
case 8:
729
case 10:
730
case 12:
731
case 14:
732
case 16:
733
return (true);
734
default:
735
return (false);
736
}
737
}
738
739
#define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
740
741
/* Various sanity checks on crypto session parameters. */
742
static bool
743
check_csp(const struct crypto_session_params *csp)
744
{
745
const struct auth_hash *axf;
746
747
/* Mode-independent checks. */
748
if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
749
return (false);
750
if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
751
csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
752
return (false);
753
if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
754
return (false);
755
if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
756
return (false);
757
758
switch (csp->csp_mode) {
759
case CSP_MODE_COMPRESS:
760
if (!alg_is_compression(csp->csp_cipher_alg))
761
return (false);
762
if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT)
763
return (false);
764
if (csp->csp_flags & CSP_F_SEPARATE_AAD)
765
return (false);
766
if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
767
csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
768
csp->csp_auth_mlen != 0)
769
return (false);
770
break;
771
case CSP_MODE_CIPHER:
772
if (!alg_is_cipher(csp->csp_cipher_alg))
773
return (false);
774
if (csp->csp_flags & CSP_F_SEPARATE_AAD)
775
return (false);
776
if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
777
if (csp->csp_cipher_klen == 0)
778
return (false);
779
if (csp->csp_ivlen == 0)
780
return (false);
781
}
782
if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
783
return (false);
784
if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
785
csp->csp_auth_mlen != 0)
786
return (false);
787
break;
788
case CSP_MODE_DIGEST:
789
if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
790
return (false);
791
792
if (csp->csp_flags & CSP_F_SEPARATE_AAD)
793
return (false);
794
795
/* IV is optional for digests (e.g. GMAC). */
796
switch (csp->csp_auth_alg) {
797
case CRYPTO_AES_CCM_CBC_MAC:
798
if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
799
return (false);
800
break;
801
case CRYPTO_AES_NIST_GMAC:
802
if (csp->csp_ivlen != AES_GCM_IV_LEN)
803
return (false);
804
break;
805
default:
806
if (csp->csp_ivlen != 0)
807
return (false);
808
break;
809
}
810
811
if (!alg_is_digest(csp->csp_auth_alg))
812
return (false);
813
814
/* Key is optional for BLAKE2 digests. */
815
if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
816
csp->csp_auth_alg == CRYPTO_BLAKE2S)
817
;
818
else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
819
if (csp->csp_auth_klen == 0)
820
return (false);
821
} else {
822
if (csp->csp_auth_klen != 0)
823
return (false);
824
}
825
if (csp->csp_auth_mlen != 0) {
826
axf = crypto_auth_hash(csp);
827
if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
828
return (false);
829
830
if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC &&
831
!ccm_tag_length_valid(csp->csp_auth_mlen))
832
return (false);
833
}
834
break;
835
case CSP_MODE_AEAD:
836
if (!alg_is_aead(csp->csp_cipher_alg))
837
return (false);
838
if (csp->csp_cipher_klen == 0)
839
return (false);
840
if (csp->csp_ivlen == 0 ||
841
csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
842
return (false);
843
if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
844
return (false);
845
846
switch (csp->csp_cipher_alg) {
847
case CRYPTO_AES_CCM_16:
848
if (csp->csp_auth_mlen != 0 &&
849
!ccm_tag_length_valid(csp->csp_auth_mlen))
850
return (false);
851
852
if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13)
853
return (false);
854
break;
855
case CRYPTO_AES_NIST_GCM_16:
856
if (csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
857
return (false);
858
859
if (csp->csp_ivlen != AES_GCM_IV_LEN)
860
return (false);
861
break;
862
case CRYPTO_CHACHA20_POLY1305:
863
if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12)
864
return (false);
865
if (csp->csp_auth_mlen > POLY1305_HASH_LEN)
866
return (false);
867
break;
868
case CRYPTO_XCHACHA20_POLY1305:
869
if (csp->csp_ivlen != XCHACHA20_POLY1305_IV_LEN)
870
return (false);
871
if (csp->csp_auth_mlen > POLY1305_HASH_LEN)
872
return (false);
873
break;
874
}
875
break;
876
case CSP_MODE_ETA:
877
if (!alg_is_cipher(csp->csp_cipher_alg))
878
return (false);
879
if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
880
if (csp->csp_cipher_klen == 0)
881
return (false);
882
if (csp->csp_ivlen == 0)
883
return (false);
884
}
885
if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
886
return (false);
887
if (!alg_is_digest(csp->csp_auth_alg))
888
return (false);
889
890
/* Key is optional for BLAKE2 digests. */
891
if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
892
csp->csp_auth_alg == CRYPTO_BLAKE2S)
893
;
894
else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
895
if (csp->csp_auth_klen == 0)
896
return (false);
897
} else {
898
if (csp->csp_auth_klen != 0)
899
return (false);
900
}
901
if (csp->csp_auth_mlen != 0) {
902
axf = crypto_auth_hash(csp);
903
if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
904
return (false);
905
}
906
break;
907
default:
908
return (false);
909
}
910
911
return (true);
912
}
913
914
/*
915
* Delete a session after it has been detached from its driver.
916
*/
917
static void
918
crypto_deletesession(crypto_session_t cses)
919
{
920
struct cryptocap *cap;
921
922
cap = cses->cap;
923
924
zfree(cses, M_CRYPTO_DATA);
925
926
CRYPTO_DRIVER_LOCK();
927
cap->cc_sessions--;
928
if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
929
wakeup(cap);
930
CRYPTO_DRIVER_UNLOCK();
931
cap_rele(cap);
932
}
933
934
/*
935
* Create a new session. The crid argument specifies a crypto
936
* driver to use or constraints on a driver to select (hardware
937
* only, software only, either). Whatever driver is selected
938
* must be capable of the requested crypto algorithms.
939
*/
940
int
941
crypto_newsession(crypto_session_t *cses,
942
const struct crypto_session_params *csp, int crid)
943
{
944
static uint64_t sessid = 0;
945
crypto_session_t res;
946
struct cryptocap *cap;
947
int err;
948
949
if (!check_csp(csp))
950
return (EINVAL);
951
952
res = NULL;
953
954
CRYPTO_DRIVER_LOCK();
955
if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
956
/*
957
* Use specified driver; verify it is capable.
958
*/
959
cap = crypto_checkdriver(crid);
960
if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
961
cap = NULL;
962
} else {
963
/*
964
* No requested driver; select based on crid flags.
965
*/
966
cap = crypto_select_driver(csp, crid);
967
}
968
if (cap == NULL) {
969
CRYPTO_DRIVER_UNLOCK();
970
CRYPTDEB("no driver");
971
return (EOPNOTSUPP);
972
}
973
cap_ref(cap);
974
cap->cc_sessions++;
975
CRYPTO_DRIVER_UNLOCK();
976
977
/* Allocate a single block for the generic session and driver softc. */
978
res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA,
979
M_WAITOK | M_ZERO);
980
res->cap = cap;
981
res->csp = *csp;
982
res->id = atomic_fetchadd_64(&sessid, 1);
983
984
/* Call the driver initialization routine. */
985
err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
986
if (err != 0) {
987
CRYPTDEB("dev newsession failed: %d", err);
988
crypto_deletesession(res);
989
return (err);
990
}
991
992
*cses = res;
993
return (0);
994
}
995
996
/*
997
* Delete an existing session (or a reserved session on an unregistered
998
* driver).
999
*/
1000
void
1001
crypto_freesession(crypto_session_t cses)
1002
{
1003
struct cryptocap *cap;
1004
1005
if (cses == NULL)
1006
return;
1007
1008
cap = cses->cap;
1009
1010
/* Call the driver cleanup routine, if available. */
1011
CRYPTODEV_FREESESSION(cap->cc_dev, cses);
1012
1013
crypto_deletesession(cses);
1014
}
1015
1016
/*
1017
* Return a new driver id. Registers a driver with the system so that
1018
* it can be probed by subsequent sessions.
1019
*/
1020
int32_t
1021
crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
1022
{
1023
struct cryptocap *cap, **newdrv;
1024
int i;
1025
1026
if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1027
device_printf(dev,
1028
"no flags specified when registering driver\n");
1029
return -1;
1030
}
1031
1032
cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1033
cap->cc_dev = dev;
1034
cap->cc_session_size = sessionsize;
1035
cap->cc_flags = flags;
1036
refcount_init(&cap->cc_refs, 1);
1037
1038
CRYPTO_DRIVER_LOCK();
1039
for (;;) {
1040
for (i = 0; i < crypto_drivers_size; i++) {
1041
if (crypto_drivers[i] == NULL)
1042
break;
1043
}
1044
1045
if (i < crypto_drivers_size)
1046
break;
1047
1048
/* Out of entries, allocate some more. */
1049
1050
if (2 * crypto_drivers_size <= crypto_drivers_size) {
1051
CRYPTO_DRIVER_UNLOCK();
1052
printf("crypto: driver count wraparound!\n");
1053
cap_rele(cap);
1054
return (-1);
1055
}
1056
CRYPTO_DRIVER_UNLOCK();
1057
1058
newdrv = malloc(2 * crypto_drivers_size *
1059
sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1060
1061
CRYPTO_DRIVER_LOCK();
1062
memcpy(newdrv, crypto_drivers,
1063
crypto_drivers_size * sizeof(*crypto_drivers));
1064
1065
crypto_drivers_size *= 2;
1066
1067
free(crypto_drivers, M_CRYPTO_DATA);
1068
crypto_drivers = newdrv;
1069
}
1070
1071
cap->cc_hid = i;
1072
crypto_drivers[i] = cap;
1073
CRYPTO_DRIVER_UNLOCK();
1074
1075
if (bootverbose)
1076
printf("crypto: assign %s driver id %u, flags 0x%x\n",
1077
device_get_nameunit(dev), i, flags);
1078
1079
return i;
1080
}
1081
1082
/*
1083
* Lookup a driver by name. We match against the full device
1084
* name and unit, and against just the name. The latter gives
1085
* us a simple widlcarding by device name. On success return the
1086
* driver/hardware identifier; otherwise return -1.
1087
*/
1088
int
1089
crypto_find_driver(const char *match)
1090
{
1091
struct cryptocap *cap;
1092
int i, len = strlen(match);
1093
1094
CRYPTO_DRIVER_LOCK();
1095
for (i = 0; i < crypto_drivers_size; i++) {
1096
if (crypto_drivers[i] == NULL)
1097
continue;
1098
cap = crypto_drivers[i];
1099
if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
1100
strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
1101
CRYPTO_DRIVER_UNLOCK();
1102
return (i);
1103
}
1104
}
1105
CRYPTO_DRIVER_UNLOCK();
1106
return (-1);
1107
}
1108
1109
/*
1110
* Return the device_t for the specified driver or NULL
1111
* if the driver identifier is invalid.
1112
*/
1113
device_t
1114
crypto_find_device_byhid(int hid)
1115
{
1116
struct cryptocap *cap;
1117
device_t dev;
1118
1119
dev = NULL;
1120
CRYPTO_DRIVER_LOCK();
1121
cap = crypto_checkdriver(hid);
1122
if (cap != NULL)
1123
dev = cap->cc_dev;
1124
CRYPTO_DRIVER_UNLOCK();
1125
return (dev);
1126
}
1127
1128
/*
1129
* Return the device/driver capabilities.
1130
*/
1131
int
1132
crypto_getcaps(int hid)
1133
{
1134
struct cryptocap *cap;
1135
int flags;
1136
1137
flags = 0;
1138
CRYPTO_DRIVER_LOCK();
1139
cap = crypto_checkdriver(hid);
1140
if (cap != NULL)
1141
flags = cap->cc_flags;
1142
CRYPTO_DRIVER_UNLOCK();
1143
return (flags);
1144
}
1145
1146
/*
1147
* Unregister all algorithms associated with a crypto driver.
1148
* If there are pending sessions using it, leave enough information
1149
* around so that subsequent calls using those sessions will
1150
* correctly detect the driver has been unregistered and reroute
1151
* requests.
1152
*/
1153
int
1154
crypto_unregister_all(uint32_t driverid)
1155
{
1156
struct cryptocap *cap;
1157
1158
CRYPTO_DRIVER_LOCK();
1159
cap = crypto_checkdriver(driverid);
1160
if (cap == NULL) {
1161
CRYPTO_DRIVER_UNLOCK();
1162
return (EINVAL);
1163
}
1164
1165
cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1166
crypto_drivers[driverid] = NULL;
1167
1168
/*
1169
* XXX: This doesn't do anything to kick sessions that
1170
* have no pending operations.
1171
*/
1172
while (cap->cc_sessions != 0)
1173
mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
1174
CRYPTO_DRIVER_UNLOCK();
1175
cap_rele(cap);
1176
1177
return (0);
1178
}
1179
1180
/*
1181
* Clear blockage on a driver. The what parameter indicates whether
1182
* the driver is now ready for cryptop's and/or cryptokop's.
1183
*/
1184
int
1185
crypto_unblock(uint32_t driverid, int what)
1186
{
1187
struct cryptocap *cap;
1188
int err;
1189
1190
CRYPTO_Q_LOCK();
1191
cap = crypto_checkdriver(driverid);
1192
if (cap != NULL) {
1193
if (what & CRYPTO_SYMQ)
1194
cap->cc_qblocked = 0;
1195
if (crp_sleep)
1196
wakeup_one(&crp_q);
1197
err = 0;
1198
} else
1199
err = EINVAL;
1200
CRYPTO_Q_UNLOCK();
1201
1202
return err;
1203
}
1204
1205
size_t
1206
crypto_buffer_len(struct crypto_buffer *cb)
1207
{
1208
switch (cb->cb_type) {
1209
case CRYPTO_BUF_CONTIG:
1210
return (cb->cb_buf_len);
1211
case CRYPTO_BUF_MBUF:
1212
if (cb->cb_mbuf->m_flags & M_PKTHDR)
1213
return (cb->cb_mbuf->m_pkthdr.len);
1214
return (m_length(cb->cb_mbuf, NULL));
1215
case CRYPTO_BUF_SINGLE_MBUF:
1216
return (cb->cb_mbuf->m_len);
1217
case CRYPTO_BUF_VMPAGE:
1218
return (cb->cb_vm_page_len);
1219
case CRYPTO_BUF_UIO:
1220
return (cb->cb_uio->uio_resid);
1221
default:
1222
return (0);
1223
}
1224
}
1225
1226
#ifdef INVARIANTS
1227
/* Various sanity checks on crypto requests. */
1228
static void
1229
cb_sanity(struct crypto_buffer *cb, const char *name)
1230
{
1231
KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST,
1232
("incoming crp with invalid %s buffer type", name));
1233
switch (cb->cb_type) {
1234
case CRYPTO_BUF_CONTIG:
1235
KASSERT(cb->cb_buf_len >= 0,
1236
("incoming crp with -ve %s buffer length", name));
1237
break;
1238
case CRYPTO_BUF_VMPAGE:
1239
KASSERT(CRYPTO_HAS_VMPAGE,
1240
("incoming crp uses dmap on supported arch"));
1241
KASSERT(cb->cb_vm_page_len >= 0,
1242
("incoming crp with -ve %s buffer length", name));
1243
KASSERT(cb->cb_vm_page_offset >= 0,
1244
("incoming crp with -ve %s buffer offset", name));
1245
KASSERT(cb->cb_vm_page_offset < PAGE_SIZE,
1246
("incoming crp with %s buffer offset greater than page size"
1247
, name));
1248
break;
1249
default:
1250
break;
1251
}
1252
}
1253
1254
static void
1255
crp_sanity(struct cryptop *crp)
1256
{
1257
struct crypto_session_params *csp;
1258
struct crypto_buffer *out;
1259
size_t ilen, len, olen;
1260
1261
KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
1262
KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE &&
1263
crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST,
1264
("incoming crp with invalid output buffer type"));
1265
KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
1266
1267
csp = &crp->crp_session->csp;
1268
cb_sanity(&crp->crp_buf, "input");
1269
ilen = crypto_buffer_len(&crp->crp_buf);
1270
olen = ilen;
1271
out = NULL;
1272
if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) {
1273
if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) {
1274
cb_sanity(&crp->crp_obuf, "output");
1275
out = &crp->crp_obuf;
1276
olen = crypto_buffer_len(out);
1277
}
1278
} else
1279
KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE,
1280
("incoming crp with separate output buffer "
1281
"but no session support"));
1282
1283
switch (csp->csp_mode) {
1284
case CSP_MODE_COMPRESS:
1285
KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
1286
crp->crp_op == CRYPTO_OP_DECOMPRESS,
1287
("invalid compression op %x", crp->crp_op));
1288
break;
1289
case CSP_MODE_CIPHER:
1290
KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
1291
crp->crp_op == CRYPTO_OP_DECRYPT,
1292
("invalid cipher op %x", crp->crp_op));
1293
break;
1294
case CSP_MODE_DIGEST:
1295
KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
1296
crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
1297
("invalid digest op %x", crp->crp_op));
1298
break;
1299
case CSP_MODE_AEAD:
1300
KASSERT(crp->crp_op ==
1301
(CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1302
crp->crp_op ==
1303
(CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1304
("invalid AEAD op %x", crp->crp_op));
1305
KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
1306
("AEAD without a separate IV"));
1307
break;
1308
case CSP_MODE_ETA:
1309
KASSERT(crp->crp_op ==
1310
(CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
1311
crp->crp_op ==
1312
(CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
1313
("invalid ETA op %x", crp->crp_op));
1314
break;
1315
}
1316
if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1317
if (crp->crp_aad == NULL) {
1318
KASSERT(crp->crp_aad_start == 0 ||
1319
crp->crp_aad_start < ilen,
1320
("invalid AAD start"));
1321
KASSERT(crp->crp_aad_length != 0 ||
1322
crp->crp_aad_start == 0,
1323
("AAD with zero length and non-zero start"));
1324
KASSERT(crp->crp_aad_length == 0 ||
1325
crp->crp_aad_start + crp->crp_aad_length <= ilen,
1326
("AAD outside input length"));
1327
} else {
1328
KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD,
1329
("session doesn't support separate AAD buffer"));
1330
KASSERT(crp->crp_aad_start == 0,
1331
("separate AAD buffer with non-zero AAD start"));
1332
KASSERT(crp->crp_aad_length != 0,
1333
("separate AAD buffer with zero length"));
1334
}
1335
} else {
1336
KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 &&
1337
crp->crp_aad_length == 0,
1338
("AAD region in request not supporting AAD"));
1339
}
1340
if (csp->csp_ivlen == 0) {
1341
KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0,
1342
("IV_SEPARATE set when IV isn't used"));
1343
KASSERT(crp->crp_iv_start == 0,
1344
("crp_iv_start set when IV isn't used"));
1345
} else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
1346
KASSERT(crp->crp_iv_start == 0,
1347
("IV_SEPARATE used with non-zero IV start"));
1348
} else {
1349
KASSERT(crp->crp_iv_start < ilen,
1350
("invalid IV start"));
1351
KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen,
1352
("IV outside buffer length"));
1353
}
1354
/* XXX: payload_start of 0 should always be < ilen? */
1355
KASSERT(crp->crp_payload_start == 0 ||
1356
crp->crp_payload_start < ilen,
1357
("invalid payload start"));
1358
KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
1359
ilen, ("payload outside input buffer"));
1360
if (out == NULL) {
1361
KASSERT(crp->crp_payload_output_start == 0,
1362
("payload output start non-zero without output buffer"));
1363
} else if (csp->csp_mode == CSP_MODE_DIGEST) {
1364
KASSERT(!(crp->crp_op & CRYPTO_OP_VERIFY_DIGEST),
1365
("digest verify with separate output buffer"));
1366
KASSERT(crp->crp_payload_output_start == 0,
1367
("digest operation with non-zero payload output start"));
1368
} else {
1369
KASSERT(crp->crp_payload_output_start == 0 ||
1370
crp->crp_payload_output_start < olen,
1371
("invalid payload output start"));
1372
KASSERT(crp->crp_payload_output_start +
1373
crp->crp_payload_length <= olen,
1374
("payload outside output buffer"));
1375
}
1376
if (csp->csp_mode == CSP_MODE_DIGEST ||
1377
csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
1378
if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST)
1379
len = ilen;
1380
else
1381
len = olen;
1382
KASSERT(crp->crp_digest_start == 0 ||
1383
crp->crp_digest_start < len,
1384
("invalid digest start"));
1385
/* XXX: For the mlen == 0 case this check isn't perfect. */
1386
KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len,
1387
("digest outside buffer"));
1388
} else {
1389
KASSERT(crp->crp_digest_start == 0,
1390
("non-zero digest start for request without a digest"));
1391
}
1392
if (csp->csp_cipher_klen != 0)
1393
KASSERT(csp->csp_cipher_key != NULL ||
1394
crp->crp_cipher_key != NULL,
1395
("cipher request without a key"));
1396
if (csp->csp_auth_klen != 0)
1397
KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
1398
("auth request without a key"));
1399
KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
1400
}
1401
#endif
1402
1403
static int
1404
crypto_dispatch_one(struct cryptop *crp, int hint)
1405
{
1406
struct cryptocap *cap;
1407
int result;
1408
1409
#ifdef INVARIANTS
1410
crp_sanity(crp);
1411
#endif
1412
CRYPTOSTAT_INC(cs_ops);
1413
1414
crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1415
1416
/*
1417
* Caller marked the request to be processed immediately; dispatch it
1418
* directly to the driver unless the driver is currently blocked, in
1419
* which case it is queued for deferred dispatch.
1420
*/
1421
cap = crp->crp_session->cap;
1422
if (!atomic_load_int(&cap->cc_qblocked)) {
1423
result = crypto_invoke(cap, crp, hint);
1424
if (result != ERESTART)
1425
return (result);
1426
1427
/*
1428
* The driver ran out of resources, put the request on the
1429
* queue.
1430
*/
1431
}
1432
crypto_batch_enqueue(crp);
1433
return (0);
1434
}
1435
1436
int
1437
crypto_dispatch(struct cryptop *crp)
1438
{
1439
return (crypto_dispatch_one(crp, 0));
1440
}
1441
1442
int
1443
crypto_dispatch_async(struct cryptop *crp, int flags)
1444
{
1445
struct crypto_ret_worker *ret_worker;
1446
1447
if (!CRYPTO_SESS_SYNC(crp->crp_session)) {
1448
/*
1449
* The driver issues completions asynchonously, don't bother
1450
* deferring dispatch to a worker thread.
1451
*/
1452
return (crypto_dispatch(crp));
1453
}
1454
1455
#ifdef INVARIANTS
1456
crp_sanity(crp);
1457
#endif
1458
CRYPTOSTAT_INC(cs_ops);
1459
1460
crp->crp_retw_id = crp->crp_session->id % crypto_workers_num;
1461
if ((flags & CRYPTO_ASYNC_ORDERED) != 0) {
1462
crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED;
1463
ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1464
CRYPTO_RETW_LOCK(ret_worker);
1465
crp->crp_seq = ret_worker->reorder_ops++;
1466
CRYPTO_RETW_UNLOCK(ret_worker);
1467
}
1468
TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp);
1469
taskqueue_enqueue(crypto_tq, &crp->crp_task);
1470
return (0);
1471
}
1472
1473
void
1474
crypto_dispatch_batch(struct cryptopq *crpq, int flags)
1475
{
1476
struct cryptop *crp;
1477
int hint;
1478
1479
while ((crp = TAILQ_FIRST(crpq)) != NULL) {
1480
hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0;
1481
TAILQ_REMOVE(crpq, crp, crp_next);
1482
if (crypto_dispatch_one(crp, hint) != 0)
1483
crypto_batch_enqueue(crp);
1484
}
1485
}
1486
1487
static void
1488
crypto_batch_enqueue(struct cryptop *crp)
1489
{
1490
1491
CRYPTO_Q_LOCK();
1492
TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1493
if (crp_sleep)
1494
wakeup_one(&crp_q);
1495
CRYPTO_Q_UNLOCK();
1496
}
1497
1498
static void
1499
crypto_task_invoke(void *ctx, int pending)
1500
{
1501
struct cryptocap *cap;
1502
struct cryptop *crp;
1503
int result;
1504
1505
crp = (struct cryptop *)ctx;
1506
cap = crp->crp_session->cap;
1507
result = crypto_invoke(cap, crp, 0);
1508
if (result == ERESTART)
1509
crypto_batch_enqueue(crp);
1510
}
1511
1512
/*
1513
* Dispatch a crypto request to the appropriate crypto devices.
1514
*/
1515
static int
1516
crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1517
{
1518
int error;
1519
1520
KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1521
KASSERT(crp->crp_callback != NULL,
1522
("%s: crp->crp_callback == NULL", __func__));
1523
KASSERT(crp->crp_session != NULL,
1524
("%s: crp->crp_session == NULL", __func__));
1525
1526
if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1527
struct crypto_session_params csp;
1528
crypto_session_t nses;
1529
1530
/*
1531
* Driver has unregistered; migrate the session and return
1532
* an error to the caller so they'll resubmit the op.
1533
*
1534
* XXX: What if there are more already queued requests for this
1535
* session?
1536
*
1537
* XXX: Real solution is to make sessions refcounted
1538
* and force callers to hold a reference when
1539
* assigning to crp_session. Could maybe change
1540
* crypto_getreq to accept a session pointer to make
1541
* that work. Alternatively, we could abandon the
1542
* notion of rewriting crp_session in requests forcing
1543
* the caller to deal with allocating a new session.
1544
* Perhaps provide a method to allow a crp's session to
1545
* be swapped that callers could use.
1546
*/
1547
csp = crp->crp_session->csp;
1548
crypto_freesession(crp->crp_session);
1549
1550
/*
1551
* XXX: Key pointers may no longer be valid. If we
1552
* really want to support this we need to define the
1553
* KPI such that 'csp' is required to be valid for the
1554
* duration of a session by the caller perhaps.
1555
*
1556
* XXX: If the keys have been changed this will reuse
1557
* the old keys. This probably suggests making
1558
* rekeying more explicit and updating the key
1559
* pointers in 'csp' when the keys change.
1560
*/
1561
if (crypto_newsession(&nses, &csp,
1562
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1563
crp->crp_session = nses;
1564
1565
crp->crp_etype = EAGAIN;
1566
crypto_done(crp);
1567
error = 0;
1568
} else {
1569
/*
1570
* Invoke the driver to process the request. Errors are
1571
* signaled by setting crp_etype before invoking the completion
1572
* callback.
1573
*/
1574
error = CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1575
KASSERT(error == 0 || error == ERESTART,
1576
("%s: invalid error %d from CRYPTODEV_PROCESS",
1577
__func__, error));
1578
}
1579
return (error);
1580
}
1581
1582
void
1583
crypto_destroyreq(struct cryptop *crp)
1584
{
1585
#ifdef DIAGNOSTIC
1586
{
1587
struct cryptop *crp2;
1588
struct crypto_ret_worker *ret_worker;
1589
1590
if (!crypto_destroyreq_check)
1591
return;
1592
1593
CRYPTO_Q_LOCK();
1594
TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1595
KASSERT(crp2 != crp,
1596
("Freeing cryptop from the crypto queue (%p).",
1597
crp));
1598
}
1599
CRYPTO_Q_UNLOCK();
1600
1601
FOREACH_CRYPTO_RETW(ret_worker) {
1602
CRYPTO_RETW_LOCK(ret_worker);
1603
TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) {
1604
KASSERT(crp2 != crp,
1605
("Freeing cryptop from the return queue (%p).",
1606
crp));
1607
}
1608
CRYPTO_RETW_UNLOCK(ret_worker);
1609
}
1610
}
1611
#endif
1612
}
1613
1614
void
1615
crypto_freereq(struct cryptop *crp)
1616
{
1617
if (crp == NULL)
1618
return;
1619
1620
crypto_destroyreq(crp);
1621
uma_zfree(cryptop_zone, crp);
1622
}
1623
1624
void
1625
crypto_initreq(struct cryptop *crp, crypto_session_t cses)
1626
{
1627
memset(crp, 0, sizeof(*crp));
1628
crp->crp_session = cses;
1629
}
1630
1631
struct cryptop *
1632
crypto_getreq(crypto_session_t cses, int how)
1633
{
1634
struct cryptop *crp;
1635
1636
MPASS(how == M_WAITOK || how == M_NOWAIT);
1637
crp = uma_zalloc(cryptop_zone, how);
1638
if (crp != NULL)
1639
crypto_initreq(crp, cses);
1640
return (crp);
1641
}
1642
1643
/*
1644
* Clone a crypto request, but associate it with the specified session
1645
* rather than inheriting the session from the original request. The
1646
* fields describing the request buffers are copied, but not the
1647
* opaque field or callback function.
1648
*/
1649
struct cryptop *
1650
crypto_clonereq(struct cryptop *crp, crypto_session_t cses, int how)
1651
{
1652
struct cryptop *new;
1653
1654
new = crypto_getreq(cses, how);
1655
if (new == NULL)
1656
return (NULL);
1657
1658
memcpy(&new->crp_startcopy, &crp->crp_startcopy,
1659
__rangeof(struct cryptop, crp_startcopy, crp_endcopy));
1660
return (new);
1661
}
1662
1663
/*
1664
* Invoke the callback on behalf of the driver.
1665
*/
1666
void
1667
crypto_done(struct cryptop *crp)
1668
{
1669
if (crp->crp_etype != 0)
1670
CRYPTOSTAT_INC(cs_errs);
1671
1672
/*
1673
* CBIMM means unconditionally do the callback immediately;
1674
* CBIFSYNC means do the callback immediately only if the
1675
* operation was done synchronously. Both are used to avoid
1676
* doing extraneous context switches; the latter is mostly
1677
* used with the software crypto driver.
1678
*/
1679
if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 &&
1680
((crp->crp_flags & CRYPTO_F_CBIMM) != 0 ||
1681
((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 &&
1682
CRYPTO_SESS_SYNC(crp->crp_session)))) {
1683
/*
1684
* Do the callback directly. This is ok when the
1685
* callback routine does very little (e.g. the
1686
* /dev/crypto callback method just does a wakeup).
1687
*/
1688
crp->crp_callback(crp);
1689
} else {
1690
struct crypto_ret_worker *ret_worker;
1691
bool wake;
1692
1693
ret_worker = CRYPTO_RETW(crp->crp_retw_id);
1694
1695
/*
1696
* Normal case; queue the callback for the thread.
1697
*/
1698
CRYPTO_RETW_LOCK(ret_worker);
1699
if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) {
1700
struct cryptop *tmp;
1701
1702
TAILQ_FOREACH_REVERSE(tmp,
1703
&ret_worker->crp_ordered_ret_q, cryptop_q,
1704
crp_next) {
1705
if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) {
1706
TAILQ_INSERT_AFTER(
1707
&ret_worker->crp_ordered_ret_q, tmp,
1708
crp, crp_next);
1709
break;
1710
}
1711
}
1712
if (tmp == NULL) {
1713
TAILQ_INSERT_HEAD(
1714
&ret_worker->crp_ordered_ret_q, crp,
1715
crp_next);
1716
}
1717
1718
wake = crp->crp_seq == ret_worker->reorder_cur_seq;
1719
} else {
1720
wake = TAILQ_EMPTY(&ret_worker->crp_ret_q);
1721
TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp,
1722
crp_next);
1723
}
1724
1725
if (wake)
1726
wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */
1727
CRYPTO_RETW_UNLOCK(ret_worker);
1728
}
1729
}
1730
1731
/*
1732
* Terminate a thread at module unload. The process that
1733
* initiated this is waiting for us to signal that we're gone;
1734
* wake it up and exit. We use the driver table lock to insure
1735
* we don't do the wakeup before they're waiting. There is no
1736
* race here because the waiter sleeps on the proc lock for the
1737
* thread so it gets notified at the right time because of an
1738
* extra wakeup that's done in exit1().
1739
*/
1740
static void
1741
crypto_finis(void *chan)
1742
{
1743
CRYPTO_DRIVER_LOCK();
1744
wakeup_one(chan);
1745
CRYPTO_DRIVER_UNLOCK();
1746
kthread_exit();
1747
}
1748
1749
/*
1750
* Crypto thread, dispatches crypto requests.
1751
*/
1752
static void
1753
crypto_dispatch_thread(void *arg __unused)
1754
{
1755
struct cryptop *crp, *submit;
1756
struct cryptocap *cap;
1757
int result, hint;
1758
1759
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1760
fpu_kern_thread(FPU_KERN_NORMAL);
1761
#endif
1762
1763
CRYPTO_Q_LOCK();
1764
for (;;) {
1765
/*
1766
* Find the first element in the queue that can be
1767
* processed and look-ahead to see if multiple ops
1768
* are ready for the same driver.
1769
*/
1770
submit = NULL;
1771
hint = 0;
1772
TAILQ_FOREACH(crp, &crp_q, crp_next) {
1773
cap = crp->crp_session->cap;
1774
/*
1775
* Driver cannot disappeared when there is an active
1776
* session.
1777
*/
1778
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1779
__func__, __LINE__));
1780
if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1781
/* Op needs to be migrated, process it. */
1782
if (submit == NULL)
1783
submit = crp;
1784
break;
1785
}
1786
if (!cap->cc_qblocked) {
1787
if (submit != NULL) {
1788
/*
1789
* We stop on finding another op,
1790
* regardless whether its for the same
1791
* driver or not. We could keep
1792
* searching the queue but it might be
1793
* better to just use a per-driver
1794
* queue instead.
1795
*/
1796
if (submit->crp_session->cap == cap)
1797
hint = CRYPTO_HINT_MORE;
1798
} else {
1799
submit = crp;
1800
}
1801
break;
1802
}
1803
}
1804
if (submit != NULL) {
1805
TAILQ_REMOVE(&crp_q, submit, crp_next);
1806
cap = submit->crp_session->cap;
1807
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1808
__func__, __LINE__));
1809
CRYPTO_Q_UNLOCK();
1810
result = crypto_invoke(cap, submit, hint);
1811
CRYPTO_Q_LOCK();
1812
if (result == ERESTART) {
1813
/*
1814
* The driver ran out of resources, mark the
1815
* driver ``blocked'' for cryptop's and put
1816
* the request back in the queue. It would
1817
* best to put the request back where we got
1818
* it but that's hard so for now we put it
1819
* at the front. This should be ok; putting
1820
* it at the end does not work.
1821
*/
1822
cap->cc_qblocked = 1;
1823
TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1824
CRYPTOSTAT_INC(cs_blocks);
1825
}
1826
} else {
1827
/*
1828
* Nothing more to be processed. Sleep until we're
1829
* woken because there are more ops to process.
1830
* This happens either by submission or by a driver
1831
* becoming unblocked and notifying us through
1832
* crypto_unblock. Note that when we wakeup we
1833
* start processing each queue again from the
1834
* front. It's not clear that it's important to
1835
* preserve this ordering since ops may finish
1836
* out of order if dispatched to different devices
1837
* and some become blocked while others do not.
1838
*/
1839
crp_sleep = 1;
1840
msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1841
crp_sleep = 0;
1842
if (cryptotd == NULL)
1843
break;
1844
CRYPTOSTAT_INC(cs_intrs);
1845
}
1846
}
1847
CRYPTO_Q_UNLOCK();
1848
1849
crypto_finis(&crp_q);
1850
}
1851
1852
/*
1853
* Crypto returns thread, does callbacks for processed crypto requests.
1854
* Callbacks are done here, rather than in the crypto drivers, because
1855
* callbacks typically are expensive and would slow interrupt handling.
1856
*/
1857
static void
1858
crypto_ret_thread(void *arg)
1859
{
1860
struct crypto_ret_worker *ret_worker = arg;
1861
struct cryptop *crpt;
1862
1863
CRYPTO_RETW_LOCK(ret_worker);
1864
for (;;) {
1865
/* Harvest return q's for completed ops */
1866
crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q);
1867
if (crpt != NULL) {
1868
if (crpt->crp_seq == ret_worker->reorder_cur_seq) {
1869
TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next);
1870
ret_worker->reorder_cur_seq++;
1871
} else {
1872
crpt = NULL;
1873
}
1874
}
1875
1876
if (crpt == NULL) {
1877
crpt = TAILQ_FIRST(&ret_worker->crp_ret_q);
1878
if (crpt != NULL)
1879
TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next);
1880
}
1881
1882
if (crpt != NULL) {
1883
CRYPTO_RETW_UNLOCK(ret_worker);
1884
/*
1885
* Run callbacks unlocked.
1886
*/
1887
if (crpt != NULL)
1888
crpt->crp_callback(crpt);
1889
CRYPTO_RETW_LOCK(ret_worker);
1890
} else {
1891
/*
1892
* Nothing more to be processed. Sleep until we're
1893
* woken because there are more returns to process.
1894
*/
1895
msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT,
1896
"crypto_ret_wait", 0);
1897
if (ret_worker->td == NULL)
1898
break;
1899
CRYPTOSTAT_INC(cs_rets);
1900
}
1901
}
1902
CRYPTO_RETW_UNLOCK(ret_worker);
1903
1904
crypto_finis(&ret_worker->crp_ret_q);
1905
}
1906
1907
#ifdef DDB
1908
static void
1909
db_show_drivers(void)
1910
{
1911
int hid;
1912
1913
db_printf("%12s %4s %8s %2s\n"
1914
, "Device"
1915
, "Ses"
1916
, "Flags"
1917
, "QB"
1918
);
1919
for (hid = 0; hid < crypto_drivers_size; hid++) {
1920
const struct cryptocap *cap = crypto_drivers[hid];
1921
if (cap == NULL)
1922
continue;
1923
db_printf("%-12s %4u %08x %2u\n"
1924
, device_get_nameunit(cap->cc_dev)
1925
, cap->cc_sessions
1926
, cap->cc_flags
1927
, cap->cc_qblocked
1928
);
1929
}
1930
}
1931
1932
DB_SHOW_COMMAND_FLAGS(crypto, db_show_crypto, DB_CMD_MEMSAFE)
1933
{
1934
struct cryptop *crp;
1935
struct crypto_ret_worker *ret_worker;
1936
1937
db_show_drivers();
1938
db_printf("\n");
1939
1940
db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1941
"HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1942
"Device", "Callback");
1943
TAILQ_FOREACH(crp, &crp_q, crp_next) {
1944
db_printf("%4u %08x %4u %4u %04x %8p %8p\n"
1945
, crp->crp_session->cap->cc_hid
1946
, (int) crypto_ses2caps(crp->crp_session)
1947
, crp->crp_olen
1948
, crp->crp_etype
1949
, crp->crp_flags
1950
, device_get_nameunit(crp->crp_session->cap->cc_dev)
1951
, crp->crp_callback
1952
);
1953
}
1954
FOREACH_CRYPTO_RETW(ret_worker) {
1955
db_printf("\n%8s %4s %4s %4s %8s\n",
1956
"ret_worker", "HID", "Etype", "Flags", "Callback");
1957
if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) {
1958
TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
1959
db_printf("%8td %4u %4u %04x %8p\n"
1960
, CRYPTO_RETW_ID(ret_worker)
1961
, crp->crp_session->cap->cc_hid
1962
, crp->crp_etype
1963
, crp->crp_flags
1964
, crp->crp_callback
1965
);
1966
}
1967
}
1968
}
1969
}
1970
#endif
1971
1972
int crypto_modevent(module_t mod, int type, void *unused);
1973
1974
/*
1975
* Initialization code, both for static and dynamic loading.
1976
* Note this is not invoked with the usual MODULE_DECLARE
1977
* mechanism but instead is listed as a dependency by the
1978
* cryptosoft driver. This guarantees proper ordering of
1979
* calls on module load/unload.
1980
*/
1981
int
1982
crypto_modevent(module_t mod, int type, void *unused)
1983
{
1984
int error = EINVAL;
1985
1986
switch (type) {
1987
case MOD_LOAD:
1988
error = crypto_init();
1989
if (error == 0 && bootverbose)
1990
printf("crypto: <crypto core>\n");
1991
break;
1992
case MOD_UNLOAD:
1993
/*XXX disallow if active sessions */
1994
error = 0;
1995
crypto_destroy();
1996
return 0;
1997
}
1998
return error;
1999
}
2000
MODULE_VERSION(crypto, 1);
2001
MODULE_DEPEND(crypto, zlib, 1, 1, 1);
2002
2003