Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/kern/kern_conf.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 1999-2002 Poul-Henning Kamp
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
*
16
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
* SUCH DAMAGE.
27
*/
28
29
#include <sys/param.h>
30
#include <sys/kernel.h>
31
#include <sys/systm.h>
32
#include <sys/bio.h>
33
#include <sys/devctl.h>
34
#include <sys/lock.h>
35
#include <sys/mutex.h>
36
#include <sys/module.h>
37
#include <sys/malloc.h>
38
#include <sys/conf.h>
39
#include <sys/vnode.h>
40
#include <sys/queue.h>
41
#include <sys/poll.h>
42
#include <sys/sx.h>
43
#include <sys/ctype.h>
44
#include <sys/stdarg.h>
45
#include <sys/ucred.h>
46
#include <sys/taskqueue.h>
47
48
#include <fs/devfs/devfs_int.h>
49
#include <vm/vm.h>
50
51
static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
52
53
struct mtx devmtx;
54
static void destroy_devl(struct cdev *dev);
55
static int destroy_dev_sched_cbl(struct cdev *dev,
56
void (*cb)(void *), void *arg);
57
static void destroy_dev_tq(void *ctx, int pending);
58
static void destroy_dev_tq_giant(void *ctx, int pending);
59
static int make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw,
60
int unit, struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
61
va_list ap);
62
63
static struct cdev_priv_list cdevp_free_list =
64
TAILQ_HEAD_INITIALIZER(cdevp_free_list);
65
static SLIST_HEAD(free_cdevsw, cdevsw) cdevsw_gt_post_list =
66
SLIST_HEAD_INITIALIZER(cdevsw_gt_post_list);
67
68
void
69
dev_lock(void)
70
{
71
72
mtx_lock(&devmtx);
73
}
74
75
/*
76
* Free all the memory collected while the cdev mutex was
77
* locked. Since devmtx is after the system map mutex, free() cannot
78
* be called immediately and is postponed until cdev mutex can be
79
* dropped.
80
*/
81
static void
82
dev_unlock_and_free(void)
83
{
84
struct cdev_priv_list cdp_free;
85
struct free_cdevsw csw_free;
86
struct cdev_priv *cdp;
87
struct cdevsw *csw;
88
89
dev_lock_assert_locked();
90
91
/*
92
* Make the local copy of the list heads while the dev_mtx is
93
* held. Free it later.
94
*/
95
TAILQ_INIT(&cdp_free);
96
TAILQ_CONCAT(&cdp_free, &cdevp_free_list, cdp_list);
97
csw_free = cdevsw_gt_post_list;
98
SLIST_INIT(&cdevsw_gt_post_list);
99
100
mtx_unlock(&devmtx);
101
102
while ((cdp = TAILQ_FIRST(&cdp_free)) != NULL) {
103
TAILQ_REMOVE(&cdp_free, cdp, cdp_list);
104
devfs_free(&cdp->cdp_c);
105
}
106
while ((csw = SLIST_FIRST(&csw_free)) != NULL) {
107
SLIST_REMOVE_HEAD(&csw_free, d_postfree_list);
108
free(csw, M_DEVT);
109
}
110
}
111
112
static void
113
dev_free_devlocked(struct cdev *cdev)
114
{
115
struct cdev_priv *cdp;
116
117
dev_lock_assert_locked();
118
cdp = cdev2priv(cdev);
119
KASSERT((cdp->cdp_flags & CDP_UNREF_DTR) == 0,
120
("destroy_dev() was not called after delist_dev(%p)", cdev));
121
KASSERT((cdp->cdp_flags & CDP_ON_ACTIVE_LIST) == 0,
122
("%s: cdp %p (%s) on active list", __func__, cdp, cdev->si_name));
123
TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list);
124
}
125
126
static void
127
cdevsw_free_devlocked(struct cdevsw *csw)
128
{
129
130
dev_lock_assert_locked();
131
SLIST_INSERT_HEAD(&cdevsw_gt_post_list, csw, d_postfree_list);
132
}
133
134
void
135
dev_unlock(void)
136
{
137
138
mtx_unlock(&devmtx);
139
}
140
141
void
142
dev_ref(struct cdev *dev)
143
{
144
145
dev_lock_assert_unlocked();
146
mtx_lock(&devmtx);
147
dev->si_refcount++;
148
mtx_unlock(&devmtx);
149
}
150
151
void
152
dev_refl(struct cdev *dev)
153
{
154
155
dev_lock_assert_locked();
156
dev->si_refcount++;
157
}
158
159
void
160
dev_rel(struct cdev *dev)
161
{
162
int flag = 0;
163
164
dev_lock_assert_unlocked();
165
dev_lock();
166
dev->si_refcount--;
167
KASSERT(dev->si_refcount >= 0,
168
("dev_rel(%s) gave negative count", devtoname(dev)));
169
if (dev->si_devsw == NULL && dev->si_refcount == 0) {
170
LIST_REMOVE(dev, si_list);
171
flag = 1;
172
}
173
dev_unlock();
174
if (flag)
175
devfs_free(dev);
176
}
177
178
struct cdevsw *
179
dev_refthread(struct cdev *dev, int *ref)
180
{
181
struct cdevsw *csw;
182
struct cdev_priv *cdp;
183
184
dev_lock_assert_unlocked();
185
if ((dev->si_flags & SI_ETERNAL) != 0) {
186
*ref = 0;
187
return (dev->si_devsw);
188
}
189
cdp = cdev2priv(dev);
190
mtx_lock(&cdp->cdp_threadlock);
191
csw = dev->si_devsw;
192
if (csw != NULL) {
193
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0)
194
atomic_add_long(&dev->si_threadcount, 1);
195
else
196
csw = NULL;
197
}
198
mtx_unlock(&cdp->cdp_threadlock);
199
if (csw != NULL)
200
*ref = 1;
201
return (csw);
202
}
203
204
struct cdevsw *
205
devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref)
206
{
207
struct cdevsw *csw;
208
struct cdev_priv *cdp;
209
struct cdev *dev;
210
211
dev_lock_assert_unlocked();
212
if ((vp->v_vflag & VV_ETERNALDEV) != 0) {
213
dev = vp->v_rdev;
214
if (dev == NULL)
215
return (NULL);
216
KASSERT((dev->si_flags & SI_ETERNAL) != 0,
217
("Not eternal cdev"));
218
*ref = 0;
219
csw = dev->si_devsw;
220
KASSERT(csw != NULL, ("Eternal cdev is destroyed"));
221
*devp = dev;
222
return (csw);
223
}
224
225
csw = NULL;
226
VI_LOCK(vp);
227
dev = vp->v_rdev;
228
if (dev == NULL) {
229
VI_UNLOCK(vp);
230
return (NULL);
231
}
232
cdp = cdev2priv(dev);
233
mtx_lock(&cdp->cdp_threadlock);
234
if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) {
235
csw = dev->si_devsw;
236
if (csw != NULL)
237
atomic_add_long(&dev->si_threadcount, 1);
238
}
239
mtx_unlock(&cdp->cdp_threadlock);
240
VI_UNLOCK(vp);
241
if (csw != NULL) {
242
*devp = dev;
243
*ref = 1;
244
}
245
return (csw);
246
}
247
248
void
249
dev_relthread(struct cdev *dev, int ref)
250
{
251
252
dev_lock_assert_unlocked();
253
if (!ref)
254
return;
255
KASSERT(dev->si_threadcount > 0,
256
("%s threadcount is wrong", dev->si_name));
257
atomic_subtract_rel_long(&dev->si_threadcount, 1);
258
}
259
260
int
261
nullop(void)
262
{
263
264
return (0);
265
}
266
267
int
268
eopnotsupp(void)
269
{
270
271
return (EOPNOTSUPP);
272
}
273
274
static int
275
enxio(void)
276
{
277
return (ENXIO);
278
}
279
280
static int
281
enodev(void)
282
{
283
return (ENODEV);
284
}
285
286
/* Define a dead_cdevsw for use when devices leave unexpectedly. */
287
288
#define dead_open (d_open_t *)enxio
289
#define dead_close (d_close_t *)enxio
290
#define dead_read (d_read_t *)enxio
291
#define dead_write (d_write_t *)enxio
292
#define dead_ioctl (d_ioctl_t *)enxio
293
#define dead_poll (d_poll_t *)enodev
294
#define dead_mmap (d_mmap_t *)enodev
295
296
static void
297
dead_strategy(struct bio *bp)
298
{
299
300
biofinish(bp, NULL, ENXIO);
301
}
302
303
#define dead_kqfilter (d_kqfilter_t *)enxio
304
#define dead_mmap_single (d_mmap_single_t *)enodev
305
306
static struct cdevsw dead_cdevsw = {
307
.d_version = D_VERSION,
308
.d_open = dead_open,
309
.d_close = dead_close,
310
.d_read = dead_read,
311
.d_write = dead_write,
312
.d_ioctl = dead_ioctl,
313
.d_poll = dead_poll,
314
.d_mmap = dead_mmap,
315
.d_strategy = dead_strategy,
316
.d_name = "dead",
317
.d_kqfilter = dead_kqfilter,
318
.d_mmap_single = dead_mmap_single
319
};
320
321
/* Default methods if driver does not specify method */
322
323
#define null_open (d_open_t *)nullop
324
#define null_close (d_close_t *)nullop
325
#define no_read (d_read_t *)enodev
326
#define no_write (d_write_t *)enodev
327
#define no_ioctl (d_ioctl_t *)enodev
328
#define no_mmap (d_mmap_t *)enodev
329
#define no_kqfilter (d_kqfilter_t *)enodev
330
#define no_mmap_single (d_mmap_single_t *)enodev
331
332
static void
333
no_strategy(struct bio *bp)
334
{
335
336
biofinish(bp, NULL, ENODEV);
337
}
338
339
static int
340
no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
341
{
342
343
return (poll_no_poll(events));
344
}
345
346
static int
347
giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
348
{
349
struct cdevsw *dsw;
350
int ref, retval;
351
352
dsw = dev_refthread(dev, &ref);
353
if (dsw == NULL)
354
return (ENXIO);
355
mtx_lock(&Giant);
356
retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td);
357
mtx_unlock(&Giant);
358
dev_relthread(dev, ref);
359
return (retval);
360
}
361
362
static int
363
giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp)
364
{
365
struct cdevsw *dsw;
366
int ref, retval;
367
368
dsw = dev_refthread(dev, &ref);
369
if (dsw == NULL)
370
return (ENXIO);
371
mtx_lock(&Giant);
372
retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp);
373
mtx_unlock(&Giant);
374
dev_relthread(dev, ref);
375
return (retval);
376
}
377
378
static int
379
giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
380
{
381
struct cdevsw *dsw;
382
int ref, retval;
383
384
dsw = dev_refthread(dev, &ref);
385
if (dsw == NULL)
386
return (ENXIO);
387
mtx_lock(&Giant);
388
retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td);
389
mtx_unlock(&Giant);
390
dev_relthread(dev, ref);
391
return (retval);
392
}
393
394
static void
395
giant_strategy(struct bio *bp)
396
{
397
struct cdevsw *dsw;
398
struct cdev *dev;
399
int ref;
400
401
dev = bp->bio_dev;
402
dsw = dev_refthread(dev, &ref);
403
if (dsw == NULL) {
404
biofinish(bp, NULL, ENXIO);
405
return;
406
}
407
mtx_lock(&Giant);
408
dsw->d_gianttrick->d_strategy(bp);
409
mtx_unlock(&Giant);
410
dev_relthread(dev, ref);
411
}
412
413
static int
414
giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
415
{
416
struct cdevsw *dsw;
417
int ref, retval;
418
419
dsw = dev_refthread(dev, &ref);
420
if (dsw == NULL)
421
return (ENXIO);
422
mtx_lock(&Giant);
423
retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td);
424
mtx_unlock(&Giant);
425
dev_relthread(dev, ref);
426
return (retval);
427
}
428
429
static int
430
giant_read(struct cdev *dev, struct uio *uio, int ioflag)
431
{
432
struct cdevsw *dsw;
433
int ref, retval;
434
435
dsw = dev_refthread(dev, &ref);
436
if (dsw == NULL)
437
return (ENXIO);
438
mtx_lock(&Giant);
439
retval = dsw->d_gianttrick->d_read(dev, uio, ioflag);
440
mtx_unlock(&Giant);
441
dev_relthread(dev, ref);
442
return (retval);
443
}
444
445
static int
446
giant_write(struct cdev *dev, struct uio *uio, int ioflag)
447
{
448
struct cdevsw *dsw;
449
int ref, retval;
450
451
dsw = dev_refthread(dev, &ref);
452
if (dsw == NULL)
453
return (ENXIO);
454
mtx_lock(&Giant);
455
retval = dsw->d_gianttrick->d_write(dev, uio, ioflag);
456
mtx_unlock(&Giant);
457
dev_relthread(dev, ref);
458
return (retval);
459
}
460
461
static int
462
giant_poll(struct cdev *dev, int events, struct thread *td)
463
{
464
struct cdevsw *dsw;
465
int ref, retval;
466
467
dsw = dev_refthread(dev, &ref);
468
if (dsw == NULL)
469
return (ENXIO);
470
mtx_lock(&Giant);
471
retval = dsw->d_gianttrick->d_poll(dev, events, td);
472
mtx_unlock(&Giant);
473
dev_relthread(dev, ref);
474
return (retval);
475
}
476
477
static int
478
giant_kqfilter(struct cdev *dev, struct knote *kn)
479
{
480
struct cdevsw *dsw;
481
int ref, retval;
482
483
dsw = dev_refthread(dev, &ref);
484
if (dsw == NULL)
485
return (ENXIO);
486
mtx_lock(&Giant);
487
retval = dsw->d_gianttrick->d_kqfilter(dev, kn);
488
mtx_unlock(&Giant);
489
dev_relthread(dev, ref);
490
return (retval);
491
}
492
493
static int
494
giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
495
vm_memattr_t *memattr)
496
{
497
struct cdevsw *dsw;
498
int ref, retval;
499
500
dsw = dev_refthread(dev, &ref);
501
if (dsw == NULL)
502
return (ENXIO);
503
mtx_lock(&Giant);
504
retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot,
505
memattr);
506
mtx_unlock(&Giant);
507
dev_relthread(dev, ref);
508
return (retval);
509
}
510
511
static int
512
giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size,
513
vm_object_t *object, int nprot)
514
{
515
struct cdevsw *dsw;
516
int ref, retval;
517
518
dsw = dev_refthread(dev, &ref);
519
if (dsw == NULL)
520
return (ENXIO);
521
mtx_lock(&Giant);
522
retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object,
523
nprot);
524
mtx_unlock(&Giant);
525
dev_relthread(dev, ref);
526
return (retval);
527
}
528
529
static void
530
notify(struct cdev *dev, const char *ev, int flags)
531
{
532
static const char prefix[] = "cdev=";
533
char *data;
534
int namelen, mflags;
535
536
if (cold)
537
return;
538
mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK;
539
namelen = strlen(dev->si_name);
540
data = malloc(namelen + sizeof(prefix), M_TEMP, mflags);
541
if (data == NULL)
542
return;
543
memcpy(data, prefix, sizeof(prefix) - 1);
544
memcpy(data + sizeof(prefix) - 1, dev->si_name, namelen + 1);
545
devctl_notify("DEVFS", "CDEV", ev, data);
546
free(data, M_TEMP);
547
}
548
549
static void
550
notify_create(struct cdev *dev, int flags)
551
{
552
553
notify(dev, "CREATE", flags);
554
}
555
556
static void
557
notify_destroy(struct cdev *dev)
558
{
559
560
notify(dev, "DESTROY", MAKEDEV_WAITOK);
561
}
562
563
static struct cdev *
564
newdev(struct make_dev_args *args, struct cdev *si)
565
{
566
struct cdev *si2;
567
struct cdevsw *csw;
568
569
dev_lock_assert_locked();
570
csw = args->mda_devsw;
571
si2 = NULL;
572
if (csw->d_flags & D_NEEDMINOR) {
573
/* We may want to return an existing device */
574
LIST_FOREACH(si2, &csw->d_devs, si_list) {
575
if (dev2unit(si2) == args->mda_unit) {
576
dev_free_devlocked(si);
577
si = si2;
578
break;
579
}
580
}
581
582
/*
583
* If we're returning an existing device, we should make sure
584
* it isn't already initialized. This would have been caught
585
* in consumers anyways, but it's good to catch such a case
586
* early. We still need to complete initialization of the
587
* device, and we'll use whatever make_dev_args were passed in
588
* to do so.
589
*/
590
KASSERT(si2 == NULL || (si2->si_flags & SI_NAMED) == 0,
591
("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
592
args->mda_devsw->d_name, dev2unit(si2), devtoname(si2)));
593
}
594
si->si_drv0 = args->mda_unit;
595
si->si_drv1 = args->mda_si_drv1;
596
si->si_drv2 = args->mda_si_drv2;
597
/* Only push to csw->d_devs if it's not a cloned device. */
598
if (si2 == NULL) {
599
si->si_devsw = csw;
600
LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
601
} else {
602
KASSERT(si->si_devsw == csw,
603
("%s: inconsistent devsw between clone_create() and make_dev()",
604
__func__));
605
}
606
return (si);
607
}
608
609
static void
610
fini_cdevsw(struct cdevsw *devsw)
611
{
612
struct cdevsw *gt;
613
614
if (devsw->d_gianttrick != NULL) {
615
gt = devsw->d_gianttrick;
616
memcpy(devsw, gt, sizeof *devsw);
617
cdevsw_free_devlocked(gt);
618
devsw->d_gianttrick = NULL;
619
}
620
devsw->d_flags &= ~D_INIT;
621
}
622
623
static int
624
prep_cdevsw(struct cdevsw *devsw, int flags)
625
{
626
struct cdevsw *dsw2;
627
628
dev_lock_assert_locked();
629
if (devsw->d_flags & D_INIT)
630
return (0);
631
if (devsw->d_flags & D_NEEDGIANT) {
632
dev_unlock();
633
dsw2 = malloc(sizeof *dsw2, M_DEVT,
634
(flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK);
635
dev_lock();
636
if (dsw2 == NULL && !(devsw->d_flags & D_INIT))
637
return (ENOMEM);
638
} else
639
dsw2 = NULL;
640
if (devsw->d_flags & D_INIT) {
641
if (dsw2 != NULL)
642
cdevsw_free_devlocked(dsw2);
643
return (0);
644
}
645
646
if (devsw->d_version != D_VERSION_04) {
647
printf(
648
"WARNING: Device driver \"%s\" has wrong version %s\n",
649
devsw->d_name == NULL ? "???" : devsw->d_name,
650
"and is disabled. Recompile KLD module.");
651
devsw->d_open = dead_open;
652
devsw->d_close = dead_close;
653
devsw->d_read = dead_read;
654
devsw->d_write = dead_write;
655
devsw->d_ioctl = dead_ioctl;
656
devsw->d_poll = dead_poll;
657
devsw->d_mmap = dead_mmap;
658
devsw->d_mmap_single = dead_mmap_single;
659
devsw->d_strategy = dead_strategy;
660
devsw->d_kqfilter = dead_kqfilter;
661
}
662
663
if ((devsw->d_flags & D_NEEDGIANT) != 0) {
664
if ((devsw->d_flags & D_GIANTOK) == 0) {
665
printf(
666
"WARNING: Device \"%s\" is Giant locked and may be "
667
"deleted before FreeBSD 15.0.\n",
668
devsw->d_name == NULL ? "???" : devsw->d_name);
669
}
670
if (devsw->d_gianttrick == NULL) {
671
memcpy(dsw2, devsw, sizeof *dsw2);
672
devsw->d_gianttrick = dsw2;
673
dsw2 = NULL;
674
}
675
}
676
677
#define FIXUP(member, noop, giant) \
678
do { \
679
if (devsw->member == NULL) { \
680
devsw->member = noop; \
681
} else if (devsw->d_flags & D_NEEDGIANT) \
682
devsw->member = giant; \
683
} \
684
while (0)
685
686
FIXUP(d_open, null_open, giant_open);
687
FIXUP(d_fdopen, NULL, giant_fdopen);
688
FIXUP(d_close, null_close, giant_close);
689
FIXUP(d_read, no_read, giant_read);
690
FIXUP(d_write, no_write, giant_write);
691
FIXUP(d_ioctl, no_ioctl, giant_ioctl);
692
FIXUP(d_poll, no_poll, giant_poll);
693
FIXUP(d_mmap, no_mmap, giant_mmap);
694
FIXUP(d_strategy, no_strategy, giant_strategy);
695
FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter);
696
FIXUP(d_mmap_single, no_mmap_single, giant_mmap_single);
697
698
LIST_INIT(&devsw->d_devs);
699
700
devsw->d_flags |= D_INIT;
701
702
if (dsw2 != NULL)
703
cdevsw_free_devlocked(dsw2);
704
return (0);
705
}
706
707
static int
708
prep_devname(struct cdev *dev, const char *fmt, va_list ap)
709
{
710
int len;
711
char *from, *q, *s, *to;
712
713
dev_lock_assert_locked();
714
715
len = vsnrprintf(dev->si_name, sizeof(dev->si_name), 32, fmt, ap);
716
if (len > sizeof(dev->si_name) - 1)
717
return (ENAMETOOLONG);
718
719
/* Strip leading slashes. */
720
for (from = dev->si_name; *from == '/'; from++)
721
;
722
723
for (to = dev->si_name; *from != '\0'; from++, to++) {
724
/*
725
* Spaces and double quotation marks cause
726
* problems for the devctl(4) protocol.
727
* Reject names containing those characters.
728
*/
729
if (isspace(*from) || *from == '"')
730
return (EINVAL);
731
/* Treat multiple sequential slashes as single. */
732
while (from[0] == '/' && from[1] == '/')
733
from++;
734
/* Trailing slash is considered invalid. */
735
if (from[0] == '/' && from[1] == '\0')
736
return (EINVAL);
737
*to = *from;
738
}
739
*to = '\0';
740
741
if (dev->si_name[0] == '\0')
742
return (EINVAL);
743
744
/* Disallow "." and ".." components. */
745
for (s = dev->si_name;;) {
746
for (q = s; *q != '/' && *q != '\0'; q++)
747
;
748
if (q - s == 1 && s[0] == '.')
749
return (EINVAL);
750
if (q - s == 2 && s[0] == '.' && s[1] == '.')
751
return (EINVAL);
752
if (*q != '/')
753
break;
754
s = q + 1;
755
}
756
757
if (devfs_dev_exists(dev->si_name) != 0)
758
return (EEXIST);
759
760
return (0);
761
}
762
763
void
764
make_dev_args_init_impl(struct make_dev_args *args, size_t sz)
765
{
766
767
bzero(args, sz);
768
args->mda_size = sz;
769
}
770
771
static int
772
make_dev_sv(struct make_dev_args *args1, struct cdev **dres,
773
const char *fmt, va_list ap)
774
{
775
struct cdev *dev, *dev_new;
776
struct make_dev_args args;
777
int res;
778
779
bzero(&args, sizeof(args));
780
if (sizeof(args) < args1->mda_size)
781
return (EINVAL);
782
bcopy(args1, &args, args1->mda_size);
783
KASSERT((args.mda_flags & MAKEDEV_WAITOK) == 0 ||
784
(args.mda_flags & MAKEDEV_NOWAIT) == 0,
785
("make_dev_sv: both WAITOK and NOWAIT specified"));
786
dev_new = devfs_alloc(args.mda_flags);
787
if (dev_new == NULL)
788
return (ENOMEM);
789
dev_lock();
790
res = prep_cdevsw(args.mda_devsw, args.mda_flags);
791
if (res != 0) {
792
dev_unlock();
793
devfs_free(dev_new);
794
return (res);
795
}
796
dev = newdev(&args, dev_new);
797
if ((dev->si_flags & SI_NAMED) == 0) {
798
res = prep_devname(dev, fmt, ap);
799
if (res != 0) {
800
if ((args.mda_flags & MAKEDEV_CHECKNAME) == 0) {
801
panic(
802
"make_dev_sv: bad si_name (error=%d, si_name=%s)",
803
res, dev->si_name);
804
}
805
if (dev == dev_new) {
806
LIST_REMOVE(dev, si_list);
807
dev_unlock();
808
devfs_free(dev);
809
} else
810
dev_unlock();
811
return (res);
812
}
813
}
814
if ((args.mda_flags & MAKEDEV_REF) != 0)
815
dev_refl(dev);
816
if ((args.mda_flags & MAKEDEV_ETERNAL) != 0)
817
dev->si_flags |= SI_ETERNAL;
818
KASSERT(!(dev->si_flags & SI_NAMED),
819
("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
820
args.mda_devsw->d_name, dev2unit(dev), devtoname(dev)));
821
dev->si_flags |= SI_NAMED;
822
if (args.mda_cr != NULL)
823
dev->si_cred = crhold(args.mda_cr);
824
dev->si_uid = args.mda_uid;
825
dev->si_gid = args.mda_gid;
826
dev->si_mode = args.mda_mode;
827
828
devfs_create(dev);
829
clean_unrhdrl(devfs_inos);
830
dev_unlock_and_free();
831
832
notify_create(dev, args.mda_flags);
833
834
*dres = dev;
835
return (0);
836
}
837
838
int
839
make_dev_s(struct make_dev_args *args, struct cdev **dres,
840
const char *fmt, ...)
841
{
842
va_list ap;
843
int res;
844
845
va_start(ap, fmt);
846
res = make_dev_sv(args, dres, fmt, ap);
847
va_end(ap);
848
return (res);
849
}
850
851
static int
852
make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit,
853
struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
854
va_list ap)
855
{
856
struct make_dev_args args;
857
858
make_dev_args_init(&args);
859
args.mda_flags = flags;
860
args.mda_devsw = devsw;
861
args.mda_cr = cr;
862
args.mda_uid = uid;
863
args.mda_gid = gid;
864
args.mda_mode = mode;
865
args.mda_unit = unit;
866
return (make_dev_sv(&args, dres, fmt, ap));
867
}
868
869
struct cdev *
870
make_dev(struct cdevsw *devsw, int unit, uid_t uid, gid_t gid, int mode,
871
const char *fmt, ...)
872
{
873
struct cdev *dev;
874
va_list ap;
875
int res __unused;
876
877
va_start(ap, fmt);
878
res = make_dev_credv(0, &dev, devsw, unit, NULL, uid, gid, mode, fmt,
879
ap);
880
va_end(ap);
881
KASSERT(res == 0 && dev != NULL,
882
("make_dev: failed make_dev_credv (error=%d)", res));
883
return (dev);
884
}
885
886
struct cdev *
887
make_dev_cred(struct cdevsw *devsw, int unit, struct ucred *cr, uid_t uid,
888
gid_t gid, int mode, const char *fmt, ...)
889
{
890
struct cdev *dev;
891
va_list ap;
892
int res __unused;
893
894
va_start(ap, fmt);
895
res = make_dev_credv(0, &dev, devsw, unit, cr, uid, gid, mode, fmt, ap);
896
va_end(ap);
897
898
KASSERT(res == 0 && dev != NULL,
899
("make_dev_cred: failed make_dev_credv (error=%d)", res));
900
return (dev);
901
}
902
903
struct cdev *
904
make_dev_credf(int flags, struct cdevsw *devsw, int unit, struct ucred *cr,
905
uid_t uid, gid_t gid, int mode, const char *fmt, ...)
906
{
907
struct cdev *dev;
908
va_list ap;
909
int res;
910
911
va_start(ap, fmt);
912
res = make_dev_credv(flags, &dev, devsw, unit, cr, uid, gid, mode,
913
fmt, ap);
914
va_end(ap);
915
916
KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) ||
917
((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0,
918
("make_dev_credf: failed make_dev_credv (error=%d)", res));
919
return (res == 0 ? dev : NULL);
920
}
921
922
int
923
make_dev_p(int flags, struct cdev **cdev, struct cdevsw *devsw,
924
struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt, ...)
925
{
926
va_list ap;
927
int res;
928
929
va_start(ap, fmt);
930
res = make_dev_credv(flags, cdev, devsw, 0, cr, uid, gid, mode,
931
fmt, ap);
932
va_end(ap);
933
934
KASSERT(((flags & MAKEDEV_NOWAIT) != 0 && res == ENOMEM) ||
935
((flags & MAKEDEV_CHECKNAME) != 0 && res != ENOMEM) || res == 0,
936
("make_dev_p: failed make_dev_credv (error=%d)", res));
937
return (res);
938
}
939
940
static void
941
dev_dependsl(struct cdev *pdev, struct cdev *cdev)
942
{
943
944
cdev->si_parent = pdev;
945
cdev->si_flags |= SI_CHILD;
946
LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
947
}
948
949
void
950
dev_depends(struct cdev *pdev, struct cdev *cdev)
951
{
952
953
dev_lock();
954
dev_dependsl(pdev, cdev);
955
dev_unlock();
956
}
957
958
static int
959
make_dev_alias_v(int flags, struct cdev **cdev, struct cdev *pdev,
960
const char *fmt, va_list ap)
961
{
962
struct cdev *dev;
963
int error;
964
965
KASSERT(pdev != NULL, ("make_dev_alias_v: pdev is NULL"));
966
KASSERT((flags & MAKEDEV_WAITOK) == 0 || (flags & MAKEDEV_NOWAIT) == 0,
967
("make_dev_alias_v: both WAITOK and NOWAIT specified"));
968
KASSERT((flags & ~(MAKEDEV_WAITOK | MAKEDEV_NOWAIT |
969
MAKEDEV_CHECKNAME)) == 0,
970
("make_dev_alias_v: invalid flags specified (flags=%02x)", flags));
971
972
dev = devfs_alloc(flags);
973
if (dev == NULL)
974
return (ENOMEM);
975
dev_lock();
976
dev->si_flags |= SI_ALIAS;
977
error = prep_devname(dev, fmt, ap);
978
if (error != 0) {
979
if ((flags & MAKEDEV_CHECKNAME) == 0) {
980
panic("make_dev_alias_v: bad si_name "
981
"(error=%d, si_name=%s)", error, dev->si_name);
982
}
983
dev_unlock();
984
devfs_free(dev);
985
return (error);
986
}
987
dev->si_flags |= SI_NAMED;
988
devfs_create(dev);
989
dev_dependsl(pdev, dev);
990
clean_unrhdrl(devfs_inos);
991
dev_unlock();
992
993
notify_create(dev, flags);
994
*cdev = dev;
995
996
return (0);
997
}
998
999
struct cdev *
1000
make_dev_alias(struct cdev *pdev, const char *fmt, ...)
1001
{
1002
struct cdev *dev;
1003
va_list ap;
1004
int res __unused;
1005
1006
va_start(ap, fmt);
1007
res = make_dev_alias_v(MAKEDEV_WAITOK, &dev, pdev, fmt, ap);
1008
va_end(ap);
1009
1010
KASSERT(res == 0 && dev != NULL,
1011
("make_dev_alias: failed make_dev_alias_v (error=%d)", res));
1012
return (dev);
1013
}
1014
1015
int
1016
make_dev_alias_p(int flags, struct cdev **cdev, struct cdev *pdev,
1017
const char *fmt, ...)
1018
{
1019
va_list ap;
1020
int res;
1021
1022
va_start(ap, fmt);
1023
res = make_dev_alias_v(flags, cdev, pdev, fmt, ap);
1024
va_end(ap);
1025
return (res);
1026
}
1027
1028
int
1029
make_dev_physpath_alias(int flags, struct cdev **cdev, struct cdev *pdev,
1030
struct cdev *old_alias, const char *physpath)
1031
{
1032
char *devfspath;
1033
int physpath_len;
1034
int max_parentpath_len;
1035
int parentpath_len;
1036
int devfspathbuf_len;
1037
int mflags;
1038
int ret;
1039
1040
*cdev = NULL;
1041
devfspath = NULL;
1042
physpath_len = strlen(physpath);
1043
ret = EINVAL;
1044
if (physpath_len == 0)
1045
goto out;
1046
1047
if (strncmp("id1,", physpath, 4) == 0) {
1048
physpath += 4;
1049
physpath_len -= 4;
1050
if (physpath_len == 0)
1051
goto out;
1052
}
1053
1054
max_parentpath_len = SPECNAMELEN - physpath_len - /*/*/1;
1055
parentpath_len = strlen(pdev->si_name);
1056
if (max_parentpath_len < parentpath_len) {
1057
if (bootverbose)
1058
printf("WARNING: Unable to alias %s "
1059
"to %s/%s - path too long\n",
1060
pdev->si_name, physpath, pdev->si_name);
1061
ret = ENAMETOOLONG;
1062
goto out;
1063
}
1064
1065
mflags = (flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK;
1066
devfspathbuf_len = physpath_len + /*/*/1 + parentpath_len + /*NUL*/1;
1067
devfspath = malloc(devfspathbuf_len, M_DEVBUF, mflags);
1068
if (devfspath == NULL) {
1069
ret = ENOMEM;
1070
goto out;
1071
}
1072
1073
sprintf(devfspath, "%s/%s", physpath, pdev->si_name);
1074
if (old_alias != NULL && strcmp(old_alias->si_name, devfspath) == 0) {
1075
/* Retain the existing alias. */
1076
*cdev = old_alias;
1077
old_alias = NULL;
1078
ret = 0;
1079
} else {
1080
ret = make_dev_alias_p(flags, cdev, pdev, "%s", devfspath);
1081
}
1082
out:
1083
if (old_alias != NULL)
1084
destroy_dev(old_alias);
1085
if (devfspath != NULL)
1086
free(devfspath, M_DEVBUF);
1087
return (ret);
1088
}
1089
1090
static void
1091
destroy_devl(struct cdev *dev)
1092
{
1093
struct cdevsw *csw;
1094
struct cdev_privdata *p;
1095
struct cdev_priv *cdp;
1096
1097
dev_lock_assert_locked();
1098
KASSERT(dev->si_flags & SI_NAMED,
1099
("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev)));
1100
KASSERT((dev->si_flags & SI_ETERNAL) == 0,
1101
("WARNING: Driver mistake: destroy_dev on eternal %d\n",
1102
dev2unit(dev)));
1103
1104
cdp = cdev2priv(dev);
1105
if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) {
1106
/*
1107
* Avoid race with dev_rel(), e.g. from the populate
1108
* loop. If CDP_UNREF_DTR flag is set, the reference
1109
* to be dropped at the end of destroy_devl() was
1110
* already taken by delist_dev_locked().
1111
*/
1112
dev_refl(dev);
1113
1114
devfs_destroy(dev);
1115
}
1116
1117
/* Remove name marking */
1118
dev->si_flags &= ~SI_NAMED;
1119
1120
/* If we are a child, remove us from the parents list */
1121
if (dev->si_flags & SI_CHILD) {
1122
LIST_REMOVE(dev, si_siblings);
1123
dev->si_flags &= ~SI_CHILD;
1124
}
1125
1126
/* Kill our children */
1127
while (!LIST_EMPTY(&dev->si_children))
1128
destroy_devl(LIST_FIRST(&dev->si_children));
1129
1130
/* Remove from clone list */
1131
if (dev->si_flags & SI_CLONELIST) {
1132
LIST_REMOVE(dev, si_clone);
1133
dev->si_flags &= ~SI_CLONELIST;
1134
}
1135
1136
mtx_lock(&cdp->cdp_threadlock);
1137
csw = dev->si_devsw;
1138
dev->si_devsw = NULL; /* already NULL for SI_ALIAS */
1139
while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
1140
csw->d_purge(dev);
1141
mtx_unlock(&cdp->cdp_threadlock);
1142
msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
1143
mtx_lock(&cdp->cdp_threadlock);
1144
if (dev->si_threadcount)
1145
printf("Still %lu threads in %s\n",
1146
dev->si_threadcount, devtoname(dev));
1147
}
1148
while (dev->si_threadcount != 0) {
1149
/* Use unique dummy wait ident */
1150
mtx_unlock(&cdp->cdp_threadlock);
1151
msleep(&csw, &devmtx, PRIBIO, "devdrn", hz / 10);
1152
mtx_lock(&cdp->cdp_threadlock);
1153
}
1154
1155
mtx_unlock(&cdp->cdp_threadlock);
1156
dev_unlock();
1157
if ((cdp->cdp_flags & CDP_UNREF_DTR) == 0) {
1158
/* avoid out of order notify events */
1159
notify_destroy(dev);
1160
}
1161
mtx_lock(&cdevpriv_mtx);
1162
while ((p = LIST_FIRST(&cdp->cdp_fdpriv)) != NULL) {
1163
devfs_destroy_cdevpriv(p);
1164
mtx_lock(&cdevpriv_mtx);
1165
}
1166
mtx_unlock(&cdevpriv_mtx);
1167
dev_lock();
1168
1169
dev->si_drv1 = 0;
1170
dev->si_drv2 = 0;
1171
1172
if (!(dev->si_flags & SI_ALIAS)) {
1173
/* Remove from cdevsw list */
1174
LIST_REMOVE(dev, si_list);
1175
1176
/* If cdevsw has no more struct cdev *'s, clean it */
1177
if (LIST_EMPTY(&csw->d_devs)) {
1178
fini_cdevsw(csw);
1179
wakeup(&csw->d_devs);
1180
}
1181
}
1182
dev->si_flags &= ~SI_ALIAS;
1183
cdp->cdp_flags &= ~CDP_UNREF_DTR;
1184
dev->si_refcount--;
1185
1186
if (dev->si_refcount > 0)
1187
LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
1188
else
1189
dev_free_devlocked(dev);
1190
}
1191
1192
static void
1193
delist_dev_locked(struct cdev *dev)
1194
{
1195
struct cdev_priv *cdp;
1196
struct cdev *child;
1197
1198
dev_lock_assert_locked();
1199
cdp = cdev2priv(dev);
1200
if ((cdp->cdp_flags & CDP_UNREF_DTR) != 0)
1201
return;
1202
cdp->cdp_flags |= CDP_UNREF_DTR;
1203
dev_refl(dev);
1204
devfs_destroy(dev);
1205
LIST_FOREACH(child, &dev->si_children, si_siblings)
1206
delist_dev_locked(child);
1207
dev_unlock();
1208
/* ensure the destroy event is queued in order */
1209
notify_destroy(dev);
1210
dev_lock();
1211
}
1212
1213
/*
1214
* This function will delist a character device and its children from
1215
* the directory listing and create a destroy event without waiting
1216
* for all character device references to go away. At some later point
1217
* destroy_dev() must be called to complete the character device
1218
* destruction. After calling this function the character device name
1219
* can instantly be re-used.
1220
*/
1221
void
1222
delist_dev(struct cdev *dev)
1223
{
1224
1225
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "delist_dev");
1226
dev_lock();
1227
delist_dev_locked(dev);
1228
dev_unlock();
1229
}
1230
1231
void
1232
destroy_dev(struct cdev *dev)
1233
{
1234
1235
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "destroy_dev");
1236
dev_lock();
1237
destroy_devl(dev);
1238
dev_unlock_and_free();
1239
}
1240
1241
const char *
1242
devtoname(struct cdev *dev)
1243
{
1244
1245
return (dev->si_name);
1246
}
1247
1248
void
1249
dev_copyname(struct cdev *dev, char *path, size_t len)
1250
{
1251
struct cdevsw *csw;
1252
int ref;
1253
1254
csw = dev_refthread(dev, &ref);
1255
if (csw != NULL) {
1256
strlcpy(path, dev->si_name, len);
1257
dev_relthread(dev, ref);
1258
}
1259
}
1260
1261
int
1262
dev_stdclone(char *name, char **namep, const char *stem, int *unit)
1263
{
1264
int u, i;
1265
1266
i = strlen(stem);
1267
if (strncmp(stem, name, i) != 0)
1268
return (0);
1269
if (!isdigit(name[i]))
1270
return (0);
1271
u = 0;
1272
if (name[i] == '0' && isdigit(name[i+1]))
1273
return (0);
1274
while (isdigit(name[i])) {
1275
u *= 10;
1276
u += name[i++] - '0';
1277
}
1278
if (u > 0xffffff)
1279
return (0);
1280
*unit = u;
1281
if (namep)
1282
*namep = &name[i];
1283
if (name[i])
1284
return (2);
1285
return (1);
1286
}
1287
1288
/*
1289
* Helper functions for cloning device drivers.
1290
*
1291
* The objective here is to make it unnecessary for the device drivers to
1292
* use rman or similar to manage their unit number space. Due to the way
1293
* we do "on-demand" devices, using rman or other "private" methods
1294
* will be very tricky to lock down properly once we lock down this file.
1295
*
1296
* Instead we give the drivers these routines which puts the struct cdev *'s
1297
* that are to be managed on their own list, and gives the driver the ability
1298
* to ask for the first free unit number or a given specified unit number.
1299
*
1300
* In addition these routines support paired devices (pty, nmdm and similar)
1301
* by respecting a number of "flag" bits in the minor number.
1302
*
1303
*/
1304
1305
struct clonedevs {
1306
LIST_HEAD(,cdev) head;
1307
};
1308
1309
void
1310
clone_setup(struct clonedevs **cdp)
1311
{
1312
1313
*cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
1314
LIST_INIT(&(*cdp)->head);
1315
}
1316
1317
int
1318
clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up,
1319
struct cdev **dp, int extra)
1320
{
1321
struct clonedevs *cd;
1322
struct cdev *dev, *ndev, *dl, *de;
1323
struct make_dev_args args;
1324
int unit, low, u;
1325
1326
KASSERT(*cdp != NULL,
1327
("clone_setup() not called in driver \"%s\"", csw->d_name));
1328
KASSERT(!(extra & CLONE_UNITMASK),
1329
("Illegal extra bits (0x%x) in clone_create", extra));
1330
KASSERT(*up <= CLONE_UNITMASK,
1331
("Too high unit (0x%x) in clone_create", *up));
1332
KASSERT(csw->d_flags & D_NEEDMINOR,
1333
("clone_create() on cdevsw without minor numbers"));
1334
1335
/*
1336
* Search the list for a lot of things in one go:
1337
* A preexisting match is returned immediately.
1338
* The lowest free unit number if we are passed -1, and the place
1339
* in the list where we should insert that new element.
1340
* The place to insert a specified unit number, if applicable
1341
* the end of the list.
1342
*/
1343
unit = *up;
1344
ndev = devfs_alloc(MAKEDEV_WAITOK);
1345
dev_lock();
1346
prep_cdevsw(csw, MAKEDEV_WAITOK);
1347
low = extra;
1348
de = dl = NULL;
1349
cd = *cdp;
1350
LIST_FOREACH(dev, &cd->head, si_clone) {
1351
KASSERT(dev->si_flags & SI_CLONELIST,
1352
("Dev %p(%s) should be on clonelist", dev, dev->si_name));
1353
u = dev2unit(dev);
1354
if (u == (unit | extra)) {
1355
*dp = dev;
1356
dev_unlock();
1357
devfs_free(ndev);
1358
return (0);
1359
}
1360
if (unit == -1 && u == low) {
1361
low++;
1362
de = dev;
1363
continue;
1364
} else if (u < (unit | extra)) {
1365
de = dev;
1366
continue;
1367
} else if (u > (unit | extra)) {
1368
dl = dev;
1369
break;
1370
}
1371
}
1372
if (unit == -1)
1373
unit = low & CLONE_UNITMASK;
1374
make_dev_args_init(&args);
1375
args.mda_unit = unit | extra;
1376
args.mda_devsw = csw;
1377
dev = newdev(&args, ndev);
1378
if (dev->si_flags & SI_CLONELIST) {
1379
printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
1380
printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
1381
LIST_FOREACH(dev, &cd->head, si_clone) {
1382
printf("\t%p %s\n", dev, dev->si_name);
1383
}
1384
panic("foo");
1385
}
1386
KASSERT(!(dev->si_flags & SI_CLONELIST),
1387
("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
1388
if (dl != NULL)
1389
LIST_INSERT_BEFORE(dl, dev, si_clone);
1390
else if (de != NULL)
1391
LIST_INSERT_AFTER(de, dev, si_clone);
1392
else
1393
LIST_INSERT_HEAD(&cd->head, dev, si_clone);
1394
dev->si_flags |= SI_CLONELIST;
1395
*up = unit;
1396
dev_unlock_and_free();
1397
return (1);
1398
}
1399
1400
/*
1401
* Kill everything still on the list. The driver should already have
1402
* disposed of any softc hung of the struct cdev *'s at this time.
1403
*/
1404
void
1405
clone_cleanup(struct clonedevs **cdp)
1406
{
1407
struct cdev *dev;
1408
struct cdev_priv *cp;
1409
struct clonedevs *cd;
1410
1411
cd = *cdp;
1412
if (cd == NULL)
1413
return;
1414
dev_lock();
1415
while (!LIST_EMPTY(&cd->head)) {
1416
dev = LIST_FIRST(&cd->head);
1417
LIST_REMOVE(dev, si_clone);
1418
KASSERT(dev->si_flags & SI_CLONELIST,
1419
("Dev %p(%s) should be on clonelist", dev, dev->si_name));
1420
dev->si_flags &= ~SI_CLONELIST;
1421
cp = cdev2priv(dev);
1422
if (!(cp->cdp_flags & CDP_SCHED_DTR)) {
1423
cp->cdp_flags |= CDP_SCHED_DTR;
1424
KASSERT(dev->si_flags & SI_NAMED,
1425
("Driver has goofed in cloning underways udev %jx unit %x",
1426
(uintmax_t)dev2udev(dev), dev2unit(dev)));
1427
destroy_devl(dev);
1428
}
1429
}
1430
dev_unlock_and_free();
1431
free(cd, M_DEVBUF);
1432
*cdp = NULL;
1433
}
1434
1435
static TAILQ_HEAD(, cdev_priv) dev_ddtr =
1436
TAILQ_HEAD_INITIALIZER(dev_ddtr);
1437
static TAILQ_HEAD(, cdev_priv) dev_ddtr_giant =
1438
TAILQ_HEAD_INITIALIZER(dev_ddtr_giant);
1439
static struct task dev_dtr_task = TASK_INITIALIZER(0, destroy_dev_tq, &dev_ddtr);
1440
static struct task dev_dtr_task_giant = TASK_INITIALIZER(0, destroy_dev_tq_giant,
1441
&dev_ddtr_giant);
1442
1443
static void
1444
destroy_dev_tq(void *ctx, int pending)
1445
{
1446
TAILQ_HEAD(, cdev_priv) *ddtr = ctx;
1447
struct cdev_priv *cp;
1448
struct cdev *dev;
1449
void (*cb)(void *);
1450
void *cb_arg;
1451
1452
dev_lock();
1453
while (!TAILQ_EMPTY(ddtr)) {
1454
cp = TAILQ_FIRST(ddtr);
1455
dev = &cp->cdp_c;
1456
KASSERT(cp->cdp_flags & CDP_SCHED_DTR,
1457
("cdev %p in dev_destroy_tq without CDP_SCHED_DTR", cp));
1458
TAILQ_REMOVE(ddtr, cp, cdp_dtr_list);
1459
cb = cp->cdp_dtr_cb;
1460
cb_arg = cp->cdp_dtr_cb_arg;
1461
destroy_devl(dev);
1462
dev_unlock_and_free();
1463
dev_rel(dev);
1464
if (cb != NULL)
1465
cb(cb_arg);
1466
dev_lock();
1467
}
1468
dev_unlock();
1469
}
1470
1471
static void
1472
destroy_dev_tq_giant(void *ctx, int pending)
1473
{
1474
mtx_lock(&Giant);
1475
destroy_dev_tq(ctx, pending);
1476
mtx_unlock(&Giant);
1477
}
1478
1479
/*
1480
* devmtx shall be locked on entry. devmtx will be unlocked after
1481
* function return.
1482
*/
1483
static int
1484
destroy_dev_sched_cbl(struct cdev *dev, void (*cb)(void *), void *arg)
1485
{
1486
struct cdev_priv *cp;
1487
bool need_giant;
1488
1489
dev_lock_assert_locked();
1490
cp = cdev2priv(dev);
1491
if (cp->cdp_flags & CDP_SCHED_DTR) {
1492
dev_unlock();
1493
return (0);
1494
}
1495
dev_refl(dev);
1496
cp->cdp_flags |= CDP_SCHED_DTR;
1497
cp->cdp_dtr_cb = cb;
1498
cp->cdp_dtr_cb_arg = arg;
1499
need_giant = (dev->si_devsw->d_flags & D_NEEDGIANT) != 0;
1500
if (need_giant)
1501
TAILQ_INSERT_TAIL(&dev_ddtr_giant, cp, cdp_dtr_list);
1502
else
1503
TAILQ_INSERT_TAIL(&dev_ddtr, cp, cdp_dtr_list);
1504
dev_unlock();
1505
if (need_giant)
1506
taskqueue_enqueue(taskqueue_thread, &dev_dtr_task_giant);
1507
else
1508
taskqueue_enqueue(taskqueue_thread, &dev_dtr_task);
1509
return (1);
1510
}
1511
1512
int
1513
destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg)
1514
{
1515
1516
dev_lock();
1517
return (destroy_dev_sched_cbl(dev, cb, arg));
1518
}
1519
1520
int
1521
destroy_dev_sched(struct cdev *dev)
1522
{
1523
1524
return (destroy_dev_sched_cb(dev, NULL, NULL));
1525
}
1526
1527
void
1528
destroy_dev_drain(struct cdevsw *csw)
1529
{
1530
1531
dev_lock();
1532
while (!LIST_EMPTY(&csw->d_devs)) {
1533
msleep(&csw->d_devs, &devmtx, PRIBIO, "devscd", hz/10);
1534
}
1535
dev_unlock();
1536
}
1537
1538
#include "opt_ddb.h"
1539
#ifdef DDB
1540
#include <sys/kernel.h>
1541
1542
#include <ddb/ddb.h>
1543
1544
DB_SHOW_COMMAND(cdev, db_show_cdev)
1545
{
1546
struct cdev_priv *cdp;
1547
struct cdev *dev;
1548
u_int flags;
1549
char buf[512];
1550
1551
if (!have_addr) {
1552
TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
1553
dev = &cdp->cdp_c;
1554
db_printf("%s %p\n", dev->si_name, dev);
1555
if (db_pager_quit)
1556
break;
1557
}
1558
return;
1559
}
1560
1561
dev = (struct cdev *)addr;
1562
cdp = cdev2priv(dev);
1563
db_printf("dev %s ref %d use %ld thr %ld inuse %u fdpriv %p\n",
1564
dev->si_name, dev->si_refcount, dev->si_usecount,
1565
dev->si_threadcount, cdp->cdp_inuse, cdp->cdp_fdpriv.lh_first);
1566
db_printf("devsw %p si_drv0 %d si_drv1 %p si_drv2 %p\n",
1567
dev->si_devsw, dev->si_drv0, dev->si_drv1, dev->si_drv2);
1568
flags = dev->si_flags;
1569
#define SI_FLAG(flag) do { \
1570
if (flags & (flag)) { \
1571
if (buf[0] != '\0') \
1572
strlcat(buf, ", ", sizeof(buf)); \
1573
strlcat(buf, (#flag) + 3, sizeof(buf)); \
1574
flags &= ~(flag); \
1575
} \
1576
} while (0)
1577
buf[0] = '\0';
1578
SI_FLAG(SI_ETERNAL);
1579
SI_FLAG(SI_ALIAS);
1580
SI_FLAG(SI_NAMED);
1581
SI_FLAG(SI_CHILD);
1582
SI_FLAG(SI_DUMPDEV);
1583
SI_FLAG(SI_CLONELIST);
1584
db_printf("si_flags %s\n", buf);
1585
1586
flags = cdp->cdp_flags;
1587
#define CDP_FLAG(flag) do { \
1588
if (flags & (flag)) { \
1589
if (buf[0] != '\0') \
1590
strlcat(buf, ", ", sizeof(buf)); \
1591
strlcat(buf, (#flag) + 4, sizeof(buf)); \
1592
flags &= ~(flag); \
1593
} \
1594
} while (0)
1595
buf[0] = '\0';
1596
CDP_FLAG(CDP_ACTIVE);
1597
CDP_FLAG(CDP_SCHED_DTR);
1598
db_printf("cdp_flags %s\n", buf);
1599
}
1600
#endif
1601
1602