Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/crypto/ccp/ccp.c
39534 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2017 Chelsio Communications, Inc.
5
* Copyright (c) 2017 Conrad Meyer <[email protected]>
6
* All rights reserved.
7
* Largely borrowed from ccr(4), Written by: John Baldwin <[email protected]>
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions and the following disclaimer.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
*
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
#include <sys/cdefs.h>
32
#include "opt_ddb.h"
33
34
#include <sys/param.h>
35
#include <sys/bus.h>
36
#include <sys/lock.h>
37
#include <sys/kernel.h>
38
#include <sys/malloc.h>
39
#include <sys/mutex.h>
40
#include <sys/module.h>
41
#include <sys/random.h>
42
#include <sys/sglist.h>
43
#include <sys/sysctl.h>
44
45
#ifdef DDB
46
#include <ddb/ddb.h>
47
#endif
48
49
#include <dev/pci/pcivar.h>
50
51
#include <dev/random/randomdev.h>
52
53
#include <opencrypto/cryptodev.h>
54
#include <opencrypto/xform.h>
55
56
#include "cryptodev_if.h"
57
58
#include "ccp.h"
59
#include "ccp_hardware.h"
60
61
MALLOC_DEFINE(M_CCP, "ccp", "AMD CCP crypto");
62
63
/*
64
* Need a global softc available for garbage random_source API, which lacks any
65
* context pointer. It's also handy for debugging.
66
*/
67
struct ccp_softc *g_ccp_softc;
68
69
bool g_debug_print = false;
70
SYSCTL_BOOL(_hw_ccp, OID_AUTO, debug, CTLFLAG_RWTUN, &g_debug_print, 0,
71
"Set to enable debugging log messages");
72
73
static struct pciid {
74
uint32_t devid;
75
const char *desc;
76
} ccp_ids[] = {
77
{ 0x14561022, "AMD CCP-5a" },
78
{ 0x14681022, "AMD CCP-5b" },
79
{ 0x15df1022, "AMD CCP-5a" },
80
};
81
82
static const struct random_source random_ccp = {
83
.rs_ident = "AMD CCP TRNG",
84
.rs_source = RANDOM_PURE_CCP,
85
.rs_read = random_ccp_read,
86
};
87
88
/*
89
* ccp_populate_sglist() generates a scatter/gather list that covers the entire
90
* crypto operation buffer.
91
*/
92
static int
93
ccp_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
94
{
95
int error;
96
97
sglist_reset(sg);
98
switch (cb->cb_type) {
99
case CRYPTO_BUF_MBUF:
100
error = sglist_append_mbuf(sg, cb->cb_mbuf);
101
break;
102
case CRYPTO_BUF_SINGLE_MBUF:
103
error = sglist_append_single_mbuf(sg, cb->cb_mbuf);
104
break;
105
case CRYPTO_BUF_UIO:
106
error = sglist_append_uio(sg, cb->cb_uio);
107
break;
108
case CRYPTO_BUF_CONTIG:
109
error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
110
break;
111
case CRYPTO_BUF_VMPAGE:
112
error = sglist_append_vmpages(sg, cb->cb_vm_page,
113
cb->cb_vm_page_offset, cb->cb_vm_page_len);
114
break;
115
default:
116
error = EINVAL;
117
}
118
return (error);
119
}
120
121
static int
122
ccp_probe(device_t dev)
123
{
124
struct pciid *ip;
125
uint32_t id;
126
127
id = pci_get_devid(dev);
128
for (ip = ccp_ids; ip < &ccp_ids[nitems(ccp_ids)]; ip++) {
129
if (id == ip->devid) {
130
device_set_desc(dev, ip->desc);
131
return (0);
132
}
133
}
134
return (ENXIO);
135
}
136
137
static void
138
ccp_initialize_queues(struct ccp_softc *sc)
139
{
140
struct ccp_queue *qp;
141
size_t i;
142
143
for (i = 0; i < nitems(sc->queues); i++) {
144
qp = &sc->queues[i];
145
146
qp->cq_softc = sc;
147
qp->cq_qindex = i;
148
mtx_init(&qp->cq_lock, "ccp queue", NULL, MTX_DEF);
149
/* XXX - arbitrarily chosen sizes */
150
qp->cq_sg_crp = sglist_alloc(32, M_WAITOK);
151
/* Two more SGEs than sg_crp to accommodate ipad. */
152
qp->cq_sg_ulptx = sglist_alloc(34, M_WAITOK);
153
qp->cq_sg_dst = sglist_alloc(2, M_WAITOK);
154
}
155
}
156
157
static void
158
ccp_free_queues(struct ccp_softc *sc)
159
{
160
struct ccp_queue *qp;
161
size_t i;
162
163
for (i = 0; i < nitems(sc->queues); i++) {
164
qp = &sc->queues[i];
165
166
mtx_destroy(&qp->cq_lock);
167
sglist_free(qp->cq_sg_crp);
168
sglist_free(qp->cq_sg_ulptx);
169
sglist_free(qp->cq_sg_dst);
170
}
171
}
172
173
static int
174
ccp_attach(device_t dev)
175
{
176
struct ccp_softc *sc;
177
int error;
178
179
sc = device_get_softc(dev);
180
sc->dev = dev;
181
182
sc->cid = crypto_get_driverid(dev, sizeof(struct ccp_session),
183
CRYPTOCAP_F_HARDWARE);
184
if (sc->cid < 0) {
185
device_printf(dev, "could not get crypto driver id\n");
186
return (ENXIO);
187
}
188
189
error = ccp_hw_attach(dev);
190
if (error != 0)
191
return (error);
192
193
mtx_init(&sc->lock, "ccp", NULL, MTX_DEF);
194
195
ccp_initialize_queues(sc);
196
197
if (g_ccp_softc == NULL) {
198
g_ccp_softc = sc;
199
if ((sc->hw_features & VERSION_CAP_TRNG) != 0)
200
random_source_register(&random_ccp);
201
}
202
203
return (0);
204
}
205
206
static int
207
ccp_detach(device_t dev)
208
{
209
struct ccp_softc *sc;
210
211
sc = device_get_softc(dev);
212
213
mtx_lock(&sc->lock);
214
sc->detaching = true;
215
mtx_unlock(&sc->lock);
216
217
crypto_unregister_all(sc->cid);
218
if (g_ccp_softc == sc && (sc->hw_features & VERSION_CAP_TRNG) != 0)
219
random_source_deregister(&random_ccp);
220
221
ccp_hw_detach(dev);
222
ccp_free_queues(sc);
223
224
if (g_ccp_softc == sc)
225
g_ccp_softc = NULL;
226
227
mtx_destroy(&sc->lock);
228
return (0);
229
}
230
231
static void
232
ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen)
233
{
234
union authctx auth_ctx;
235
const struct auth_hash *axf;
236
u_int i;
237
238
/*
239
* If the key is larger than the block size, use the digest of
240
* the key as the key instead.
241
*/
242
axf = s->hmac.auth_hash;
243
if (klen > axf->blocksize) {
244
axf->Init(&auth_ctx);
245
axf->Update(&auth_ctx, key, klen);
246
axf->Final(s->hmac.ipad, &auth_ctx);
247
explicit_bzero(&auth_ctx, sizeof(auth_ctx));
248
klen = axf->hashsize;
249
} else
250
memcpy(s->hmac.ipad, key, klen);
251
252
memset(s->hmac.ipad + klen, 0, axf->blocksize - klen);
253
memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize);
254
255
for (i = 0; i < axf->blocksize; i++) {
256
s->hmac.ipad[i] ^= HMAC_IPAD_VAL;
257
s->hmac.opad[i] ^= HMAC_OPAD_VAL;
258
}
259
}
260
261
static bool
262
ccp_aes_check_keylen(int alg, int klen)
263
{
264
265
switch (klen * 8) {
266
case 128:
267
case 192:
268
if (alg == CRYPTO_AES_XTS)
269
return (false);
270
break;
271
case 256:
272
break;
273
case 512:
274
if (alg != CRYPTO_AES_XTS)
275
return (false);
276
break;
277
default:
278
return (false);
279
}
280
return (true);
281
}
282
283
static void
284
ccp_aes_setkey(struct ccp_session *s, int alg, const void *key, int klen)
285
{
286
unsigned kbits;
287
288
if (alg == CRYPTO_AES_XTS)
289
kbits = (klen / 2) * 8;
290
else
291
kbits = klen * 8;
292
293
switch (kbits) {
294
case 128:
295
s->blkcipher.cipher_type = CCP_AES_TYPE_128;
296
break;
297
case 192:
298
s->blkcipher.cipher_type = CCP_AES_TYPE_192;
299
break;
300
case 256:
301
s->blkcipher.cipher_type = CCP_AES_TYPE_256;
302
break;
303
default:
304
panic("should not get here");
305
}
306
307
s->blkcipher.key_len = klen;
308
memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
309
}
310
311
static bool
312
ccp_auth_supported(struct ccp_softc *sc,
313
const struct crypto_session_params *csp)
314
{
315
316
if ((sc->hw_features & VERSION_CAP_SHA) == 0)
317
return (false);
318
switch (csp->csp_auth_alg) {
319
case CRYPTO_SHA1_HMAC:
320
case CRYPTO_SHA2_256_HMAC:
321
case CRYPTO_SHA2_384_HMAC:
322
case CRYPTO_SHA2_512_HMAC:
323
if (csp->csp_auth_key == NULL)
324
return (false);
325
break;
326
default:
327
return (false);
328
}
329
return (true);
330
}
331
332
static bool
333
ccp_cipher_supported(struct ccp_softc *sc,
334
const struct crypto_session_params *csp)
335
{
336
337
if ((sc->hw_features & VERSION_CAP_AES) == 0)
338
return (false);
339
switch (csp->csp_cipher_alg) {
340
case CRYPTO_AES_CBC:
341
if (csp->csp_ivlen != AES_BLOCK_LEN)
342
return (false);
343
break;
344
case CRYPTO_AES_ICM:
345
if (csp->csp_ivlen != AES_BLOCK_LEN)
346
return (false);
347
break;
348
case CRYPTO_AES_XTS:
349
if (csp->csp_ivlen != AES_XTS_IV_LEN)
350
return (false);
351
break;
352
default:
353
return (false);
354
}
355
return (ccp_aes_check_keylen(csp->csp_cipher_alg,
356
csp->csp_cipher_klen));
357
}
358
359
static int
360
ccp_probesession(device_t dev, const struct crypto_session_params *csp)
361
{
362
struct ccp_softc *sc;
363
364
if (csp->csp_flags != 0)
365
return (EINVAL);
366
sc = device_get_softc(dev);
367
switch (csp->csp_mode) {
368
case CSP_MODE_DIGEST:
369
if (!ccp_auth_supported(sc, csp))
370
return (EINVAL);
371
break;
372
case CSP_MODE_CIPHER:
373
if (!ccp_cipher_supported(sc, csp))
374
return (EINVAL);
375
break;
376
case CSP_MODE_AEAD:
377
switch (csp->csp_cipher_alg) {
378
case CRYPTO_AES_NIST_GCM_16:
379
if ((sc->hw_features & VERSION_CAP_AES) == 0)
380
return (EINVAL);
381
break;
382
default:
383
return (EINVAL);
384
}
385
break;
386
case CSP_MODE_ETA:
387
if (!ccp_auth_supported(sc, csp) ||
388
!ccp_cipher_supported(sc, csp))
389
return (EINVAL);
390
break;
391
default:
392
return (EINVAL);
393
}
394
395
return (CRYPTODEV_PROBE_HARDWARE);
396
}
397
398
static int
399
ccp_newsession(device_t dev, crypto_session_t cses,
400
const struct crypto_session_params *csp)
401
{
402
struct ccp_softc *sc;
403
struct ccp_session *s;
404
const struct auth_hash *auth_hash;
405
enum ccp_aes_mode cipher_mode;
406
unsigned auth_mode;
407
unsigned q;
408
409
/* XXX reconcile auth_mode with use by ccp_sha */
410
switch (csp->csp_auth_alg) {
411
case CRYPTO_SHA1_HMAC:
412
auth_hash = &auth_hash_hmac_sha1;
413
auth_mode = SHA1;
414
break;
415
case CRYPTO_SHA2_256_HMAC:
416
auth_hash = &auth_hash_hmac_sha2_256;
417
auth_mode = SHA2_256;
418
break;
419
case CRYPTO_SHA2_384_HMAC:
420
auth_hash = &auth_hash_hmac_sha2_384;
421
auth_mode = SHA2_384;
422
break;
423
case CRYPTO_SHA2_512_HMAC:
424
auth_hash = &auth_hash_hmac_sha2_512;
425
auth_mode = SHA2_512;
426
break;
427
default:
428
auth_hash = NULL;
429
auth_mode = 0;
430
break;
431
}
432
433
switch (csp->csp_cipher_alg) {
434
case CRYPTO_AES_CBC:
435
cipher_mode = CCP_AES_MODE_CBC;
436
break;
437
case CRYPTO_AES_ICM:
438
cipher_mode = CCP_AES_MODE_CTR;
439
break;
440
case CRYPTO_AES_NIST_GCM_16:
441
cipher_mode = CCP_AES_MODE_GCTR;
442
break;
443
case CRYPTO_AES_XTS:
444
cipher_mode = CCP_AES_MODE_XTS;
445
break;
446
default:
447
cipher_mode = CCP_AES_MODE_ECB;
448
break;
449
}
450
451
sc = device_get_softc(dev);
452
mtx_lock(&sc->lock);
453
if (sc->detaching) {
454
mtx_unlock(&sc->lock);
455
return (ENXIO);
456
}
457
458
s = crypto_get_driver_session(cses);
459
460
/* Just grab the first usable queue for now. */
461
for (q = 0; q < nitems(sc->queues); q++)
462
if ((sc->valid_queues & (1 << q)) != 0)
463
break;
464
if (q == nitems(sc->queues)) {
465
mtx_unlock(&sc->lock);
466
return (ENXIO);
467
}
468
s->queue = q;
469
470
switch (csp->csp_mode) {
471
case CSP_MODE_AEAD:
472
s->mode = GCM;
473
break;
474
case CSP_MODE_ETA:
475
s->mode = AUTHENC;
476
break;
477
case CSP_MODE_DIGEST:
478
s->mode = HMAC;
479
break;
480
case CSP_MODE_CIPHER:
481
s->mode = BLKCIPHER;
482
break;
483
}
484
485
if (s->mode == GCM) {
486
if (csp->csp_auth_mlen == 0)
487
s->gmac.hash_len = AES_GMAC_HASH_LEN;
488
else
489
s->gmac.hash_len = csp->csp_auth_mlen;
490
} else if (auth_hash != NULL) {
491
s->hmac.auth_hash = auth_hash;
492
s->hmac.auth_mode = auth_mode;
493
if (csp->csp_auth_mlen == 0)
494
s->hmac.hash_len = auth_hash->hashsize;
495
else
496
s->hmac.hash_len = csp->csp_auth_mlen;
497
ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen);
498
}
499
if (cipher_mode != CCP_AES_MODE_ECB) {
500
s->blkcipher.cipher_mode = cipher_mode;
501
if (csp->csp_cipher_key != NULL)
502
ccp_aes_setkey(s, csp->csp_cipher_alg,
503
csp->csp_cipher_key, csp->csp_cipher_klen);
504
}
505
506
s->active = true;
507
mtx_unlock(&sc->lock);
508
509
return (0);
510
}
511
512
static void
513
ccp_freesession(device_t dev, crypto_session_t cses)
514
{
515
struct ccp_session *s;
516
517
s = crypto_get_driver_session(cses);
518
519
if (s->pending != 0)
520
device_printf(dev,
521
"session %p freed with %d pending requests\n", s,
522
s->pending);
523
s->active = false;
524
}
525
526
static int
527
ccp_process(device_t dev, struct cryptop *crp, int hint)
528
{
529
const struct crypto_session_params *csp;
530
struct ccp_softc *sc;
531
struct ccp_queue *qp;
532
struct ccp_session *s;
533
int error;
534
bool qpheld;
535
536
qpheld = false;
537
qp = NULL;
538
539
csp = crypto_get_params(crp->crp_session);
540
s = crypto_get_driver_session(crp->crp_session);
541
sc = device_get_softc(dev);
542
mtx_lock(&sc->lock);
543
qp = &sc->queues[s->queue];
544
mtx_unlock(&sc->lock);
545
error = ccp_queue_acquire_reserve(qp, 1 /* placeholder */, M_NOWAIT);
546
if (error != 0)
547
goto out;
548
qpheld = true;
549
550
error = ccp_populate_sglist(qp->cq_sg_crp, &crp->crp_buf);
551
if (error != 0)
552
goto out;
553
554
if (crp->crp_auth_key != NULL) {
555
KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC"));
556
ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen);
557
}
558
if (crp->crp_cipher_key != NULL)
559
ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key,
560
csp->csp_cipher_klen);
561
562
switch (s->mode) {
563
case HMAC:
564
if (s->pending != 0) {
565
error = EAGAIN;
566
break;
567
}
568
error = ccp_hmac(qp, s, crp);
569
break;
570
case BLKCIPHER:
571
if (s->pending != 0) {
572
error = EAGAIN;
573
break;
574
}
575
error = ccp_blkcipher(qp, s, crp);
576
break;
577
case AUTHENC:
578
if (s->pending != 0) {
579
error = EAGAIN;
580
break;
581
}
582
error = ccp_authenc(qp, s, crp);
583
break;
584
case GCM:
585
if (s->pending != 0) {
586
error = EAGAIN;
587
break;
588
}
589
error = ccp_gcm(qp, s, crp);
590
break;
591
}
592
593
if (error == 0)
594
s->pending++;
595
596
out:
597
if (qpheld) {
598
if (error != 0) {
599
/*
600
* Squash EAGAIN so callers don't uselessly and
601
* expensively retry if the ring was full.
602
*/
603
if (error == EAGAIN)
604
error = ENOMEM;
605
ccp_queue_abort(qp);
606
} else
607
ccp_queue_release(qp);
608
}
609
610
if (error != 0) {
611
DPRINTF(dev, "%s: early error:%d\n", __func__, error);
612
crp->crp_etype = error;
613
crypto_done(crp);
614
}
615
return (0);
616
}
617
618
static device_method_t ccp_methods[] = {
619
DEVMETHOD(device_probe, ccp_probe),
620
DEVMETHOD(device_attach, ccp_attach),
621
DEVMETHOD(device_detach, ccp_detach),
622
623
DEVMETHOD(cryptodev_probesession, ccp_probesession),
624
DEVMETHOD(cryptodev_newsession, ccp_newsession),
625
DEVMETHOD(cryptodev_freesession, ccp_freesession),
626
DEVMETHOD(cryptodev_process, ccp_process),
627
628
DEVMETHOD_END
629
};
630
631
static driver_t ccp_driver = {
632
"ccp",
633
ccp_methods,
634
sizeof(struct ccp_softc)
635
};
636
637
DRIVER_MODULE(ccp, pci, ccp_driver, NULL, NULL);
638
MODULE_VERSION(ccp, 1);
639
MODULE_DEPEND(ccp, crypto, 1, 1, 1);
640
MODULE_DEPEND(ccp, random_device, 1, 1, 1);
641
#if 0 /* There are enough known issues that we shouldn't load automatically */
642
MODULE_PNP_INFO("W32:vendor/device", pci, ccp, ccp_ids,
643
nitems(ccp_ids));
644
#endif
645
646
static int
647
ccp_queue_reserve_space(struct ccp_queue *qp, unsigned n, int mflags)
648
{
649
struct ccp_softc *sc;
650
651
mtx_assert(&qp->cq_lock, MA_OWNED);
652
sc = qp->cq_softc;
653
654
if (n < 1 || n >= (1 << sc->ring_size_order))
655
return (EINVAL);
656
657
while (true) {
658
if (ccp_queue_get_ring_space(qp) >= n)
659
return (0);
660
if ((mflags & M_WAITOK) == 0)
661
return (EAGAIN);
662
qp->cq_waiting = true;
663
msleep(&qp->cq_tail, &qp->cq_lock, 0, "ccpqfull", 0);
664
}
665
}
666
667
int
668
ccp_queue_acquire_reserve(struct ccp_queue *qp, unsigned n, int mflags)
669
{
670
int error;
671
672
mtx_lock(&qp->cq_lock);
673
qp->cq_acq_tail = qp->cq_tail;
674
error = ccp_queue_reserve_space(qp, n, mflags);
675
if (error != 0)
676
mtx_unlock(&qp->cq_lock);
677
return (error);
678
}
679
680
void
681
ccp_queue_release(struct ccp_queue *qp)
682
{
683
684
mtx_assert(&qp->cq_lock, MA_OWNED);
685
if (qp->cq_tail != qp->cq_acq_tail) {
686
wmb();
687
ccp_queue_write_tail(qp);
688
}
689
mtx_unlock(&qp->cq_lock);
690
}
691
692
void
693
ccp_queue_abort(struct ccp_queue *qp)
694
{
695
unsigned i;
696
697
mtx_assert(&qp->cq_lock, MA_OWNED);
698
699
/* Wipe out any descriptors associated with this aborted txn. */
700
for (i = qp->cq_acq_tail; i != qp->cq_tail;
701
i = (i + 1) % (1 << qp->cq_softc->ring_size_order)) {
702
memset(&qp->desc_ring[i], 0, sizeof(qp->desc_ring[i]));
703
}
704
qp->cq_tail = qp->cq_acq_tail;
705
706
mtx_unlock(&qp->cq_lock);
707
}
708
709
#ifdef DDB
710
#define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
711
#define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
712
static void
713
db_show_ccp_sc(struct ccp_softc *sc)
714
{
715
716
db_printf("ccp softc at %p\n", sc);
717
db_printf(" cid: %d\n", (int)sc->cid);
718
719
db_printf(" lock: ");
720
db_show_lock(&sc->lock);
721
722
db_printf(" detaching: %d\n", (int)sc->detaching);
723
db_printf(" ring_size_order: %u\n", sc->ring_size_order);
724
725
db_printf(" hw_version: %d\n", (int)sc->hw_version);
726
db_printf(" hw_features: %b\n", (int)sc->hw_features,
727
"\20\24ELFC\23TRNG\22Zip_Compress\16Zip_Decompress\13ECC\12RSA"
728
"\11SHA\0103DES\07AES");
729
730
db_printf(" hw status:\n");
731
db_ccp_show_hw(sc);
732
}
733
734
static void
735
db_show_ccp_qp(struct ccp_queue *qp)
736
{
737
738
db_printf(" lock: ");
739
db_show_lock(&qp->cq_lock);
740
741
db_printf(" cq_qindex: %u\n", qp->cq_qindex);
742
db_printf(" cq_softc: %p\n", qp->cq_softc);
743
744
db_printf(" head: %u\n", qp->cq_head);
745
db_printf(" tail: %u\n", qp->cq_tail);
746
db_printf(" acq_tail: %u\n", qp->cq_acq_tail);
747
db_printf(" desc_ring: %p\n", qp->desc_ring);
748
db_printf(" completions_ring: %p\n", qp->completions_ring);
749
db_printf(" descriptors (phys): 0x%jx\n",
750
(uintmax_t)qp->desc_ring_bus_addr);
751
752
db_printf(" hw status:\n");
753
db_ccp_show_queue_hw(qp);
754
}
755
756
DB_SHOW_COMMAND(ccp, db_show_ccp)
757
{
758
struct ccp_softc *sc;
759
unsigned unit, qindex;
760
761
if (!have_addr)
762
goto usage;
763
764
unit = (unsigned)addr;
765
766
sc = devclass_get_softc(devclass_find("ccp"), unit);
767
if (sc == NULL) {
768
db_printf("No such device ccp%u\n", unit);
769
goto usage;
770
}
771
772
if (count == -1) {
773
db_show_ccp_sc(sc);
774
return;
775
}
776
777
qindex = (unsigned)count;
778
if (qindex >= nitems(sc->queues)) {
779
db_printf("No such queue %u\n", qindex);
780
goto usage;
781
}
782
db_show_ccp_qp(&sc->queues[qindex]);
783
return;
784
785
usage:
786
db_printf("usage: show ccp <unit>[,<qindex>]\n");
787
return;
788
}
789
#endif /* DDB */
790
791