Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cam/scsi/scsi_pass.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
5
* Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions, and the following disclaimer,
13
* without modification, immediately at the beginning of the file.
14
* 2. The name of the author may not be used to endorse or promote products
15
* derived from this software without specific prior written permission.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <sys/param.h>
31
#include <sys/systm.h>
32
#include <sys/kernel.h>
33
#include <sys/conf.h>
34
#include <sys/types.h>
35
#include <sys/bio.h>
36
#include <sys/bus.h>
37
#include <sys/devicestat.h>
38
#include <sys/errno.h>
39
#include <sys/fcntl.h>
40
#include <sys/malloc.h>
41
#include <sys/proc.h>
42
#include <sys/poll.h>
43
#include <sys/selinfo.h>
44
#include <sys/sdt.h>
45
#include <sys/sysent.h>
46
#include <sys/taskqueue.h>
47
#include <vm/uma.h>
48
#include <vm/vm.h>
49
#include <vm/vm_extern.h>
50
51
#include <machine/bus.h>
52
53
#include <cam/cam.h>
54
#include <cam/cam_ccb.h>
55
#include <cam/cam_periph.h>
56
#include <cam/cam_queue.h>
57
#include <cam/cam_xpt.h>
58
#include <cam/cam_xpt_periph.h>
59
#include <cam/cam_debug.h>
60
#include <cam/cam_compat.h>
61
#include <cam/cam_xpt_periph.h>
62
63
#include <cam/scsi/scsi_pass.h>
64
65
#define PERIPH_NAME "pass"
66
67
typedef enum {
68
PASS_FLAG_OPEN = 0x01,
69
PASS_FLAG_LOCKED = 0x02,
70
PASS_FLAG_INVALID = 0x04,
71
PASS_FLAG_INITIAL_PHYSPATH = 0x08,
72
PASS_FLAG_ZONE_INPROG = 0x10,
73
PASS_FLAG_ZONE_VALID = 0x20,
74
PASS_FLAG_UNMAPPED_CAPABLE = 0x40,
75
PASS_FLAG_ABANDONED_REF_SET = 0x80
76
} pass_flags;
77
78
typedef enum {
79
PASS_STATE_NORMAL
80
} pass_state;
81
82
typedef enum {
83
PASS_CCB_BUFFER_IO,
84
PASS_CCB_QUEUED_IO
85
} pass_ccb_types;
86
87
#define ccb_type ppriv_field0
88
#define ccb_ioreq ppriv_ptr1
89
90
/*
91
* The maximum number of memory segments we preallocate.
92
*/
93
#define PASS_MAX_SEGS 16
94
95
typedef enum {
96
PASS_IO_NONE = 0x00,
97
PASS_IO_USER_SEG_MALLOC = 0x01,
98
PASS_IO_KERN_SEG_MALLOC = 0x02,
99
PASS_IO_ABANDONED = 0x04
100
} pass_io_flags;
101
102
struct pass_io_req {
103
union ccb ccb;
104
union ccb *alloced_ccb;
105
union ccb *user_ccb_ptr;
106
camq_entry user_periph_links;
107
ccb_ppriv_area user_periph_priv;
108
struct cam_periph_map_info mapinfo;
109
pass_io_flags flags;
110
ccb_flags data_flags;
111
int num_user_segs;
112
bus_dma_segment_t user_segs[PASS_MAX_SEGS];
113
int num_kern_segs;
114
bus_dma_segment_t kern_segs[PASS_MAX_SEGS];
115
bus_dma_segment_t *user_segptr;
116
bus_dma_segment_t *kern_segptr;
117
int num_bufs;
118
uint32_t dirs[CAM_PERIPH_MAXMAPS];
119
uint32_t lengths[CAM_PERIPH_MAXMAPS];
120
uint8_t *user_bufs[CAM_PERIPH_MAXMAPS];
121
uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS];
122
struct bintime start_time;
123
TAILQ_ENTRY(pass_io_req) links;
124
};
125
126
struct pass_softc {
127
pass_state state;
128
pass_flags flags;
129
uint8_t pd_type;
130
int open_count;
131
u_int maxio;
132
struct devstat *device_stats;
133
struct cdev *dev;
134
struct cdev *alias_dev;
135
struct task add_physpath_task;
136
struct task shutdown_kqueue_task;
137
struct selinfo read_select;
138
TAILQ_HEAD(, pass_io_req) incoming_queue;
139
TAILQ_HEAD(, pass_io_req) active_queue;
140
TAILQ_HEAD(, pass_io_req) abandoned_queue;
141
TAILQ_HEAD(, pass_io_req) done_queue;
142
struct cam_periph *periph;
143
char zone_name[12];
144
char io_zone_name[12];
145
uma_zone_t pass_zone;
146
uma_zone_t pass_io_zone;
147
size_t io_zone_size;
148
};
149
150
static d_open_t passopen;
151
static d_close_t passclose;
152
static d_ioctl_t passioctl;
153
static d_ioctl_t passdoioctl;
154
static d_poll_t passpoll;
155
static d_kqfilter_t passkqfilter;
156
static void passreadfiltdetach(struct knote *kn);
157
static int passreadfilt(struct knote *kn, long hint);
158
159
static periph_init_t passinit;
160
static periph_ctor_t passregister;
161
static periph_oninv_t passoninvalidate;
162
static periph_dtor_t passcleanup;
163
static periph_start_t passstart;
164
static void pass_shutdown_kqueue(void *context, int pending);
165
static void pass_add_physpath(void *context, int pending);
166
static void passasync(void *callback_arg, uint32_t code,
167
struct cam_path *path, void *arg);
168
static void passdone(struct cam_periph *periph,
169
union ccb *done_ccb);
170
static int passcreatezone(struct cam_periph *periph);
171
static void passiocleanup(struct pass_softc *softc,
172
struct pass_io_req *io_req);
173
static int passcopysglist(struct cam_periph *periph,
174
struct pass_io_req *io_req,
175
ccb_flags direction);
176
static int passmemsetup(struct cam_periph *periph,
177
struct pass_io_req *io_req);
178
static int passmemdone(struct cam_periph *periph,
179
struct pass_io_req *io_req);
180
static int passerror(union ccb *ccb, uint32_t cam_flags,
181
uint32_t sense_flags);
182
static int passsendccb(struct cam_periph *periph, union ccb *ccb,
183
union ccb *inccb);
184
static void passflags(union ccb *ccb, uint32_t *cam_flags,
185
uint32_t *sense_flags);
186
187
static struct periph_driver passdriver =
188
{
189
passinit, PERIPH_NAME,
190
TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0
191
};
192
193
PERIPHDRIVER_DECLARE(pass, passdriver);
194
195
static struct cdevsw pass_cdevsw = {
196
.d_version = D_VERSION,
197
.d_flags = D_TRACKCLOSE,
198
.d_open = passopen,
199
.d_close = passclose,
200
.d_ioctl = passioctl,
201
.d_poll = passpoll,
202
.d_kqfilter = passkqfilter,
203
.d_name = PERIPH_NAME,
204
};
205
206
static const struct filterops passread_filtops = {
207
.f_isfd = 1,
208
.f_detach = passreadfiltdetach,
209
.f_event = passreadfilt
210
};
211
212
static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers");
213
214
static void
215
passinit(void)
216
{
217
cam_status status;
218
219
/*
220
* Install a global async callback. This callback will
221
* receive async callbacks like "new device found".
222
*/
223
status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL);
224
225
if (status != CAM_REQ_CMP) {
226
printf("pass: Failed to attach master async callback "
227
"due to status 0x%x!\n", status);
228
}
229
230
}
231
232
static void
233
passrejectios(struct cam_periph *periph)
234
{
235
struct pass_io_req *io_req, *io_req2;
236
struct pass_softc *softc;
237
238
softc = (struct pass_softc *)periph->softc;
239
240
/*
241
* The user can no longer get status for I/O on the done queue, so
242
* clean up all outstanding I/O on the done queue.
243
*/
244
TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
245
TAILQ_REMOVE(&softc->done_queue, io_req, links);
246
passiocleanup(softc, io_req);
247
uma_zfree(softc->pass_zone, io_req);
248
}
249
250
/*
251
* The underlying device is gone, so we can't issue these I/Os.
252
* The devfs node has been shut down, so we can't return status to
253
* the user. Free any I/O left on the incoming queue.
254
*/
255
TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
256
TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
257
passiocleanup(softc, io_req);
258
uma_zfree(softc->pass_zone, io_req);
259
}
260
261
/*
262
* Normally we would put I/Os on the abandoned queue and acquire a
263
* reference when we saw the final close. But, the device went
264
* away and devfs may have moved everything off to deadfs by the
265
* time the I/O done callback is called; as a result, we won't see
266
* any more closes. So, if we have any active I/Os, we need to put
267
* them on the abandoned queue. When the abandoned queue is empty,
268
* we'll release the remaining reference (see below) to the peripheral.
269
*/
270
TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
271
TAILQ_REMOVE(&softc->active_queue, io_req, links);
272
io_req->flags |= PASS_IO_ABANDONED;
273
TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
274
}
275
276
/*
277
* If we put any I/O on the abandoned queue, acquire a reference.
278
*/
279
if ((!TAILQ_EMPTY(&softc->abandoned_queue))
280
&& ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) {
281
cam_periph_doacquire(periph);
282
softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
283
}
284
}
285
286
static void
287
passdevgonecb(void *arg)
288
{
289
struct cam_periph *periph;
290
struct mtx *mtx;
291
struct pass_softc *softc;
292
int i;
293
294
periph = (struct cam_periph *)arg;
295
mtx = cam_periph_mtx(periph);
296
mtx_lock(mtx);
297
298
softc = (struct pass_softc *)periph->softc;
299
KASSERT(softc->open_count >= 0, ("Negative open count %d",
300
softc->open_count));
301
302
/*
303
* When we get this callback, we will get no more close calls from
304
* devfs. So if we have any dangling opens, we need to release the
305
* reference held for that particular context.
306
*/
307
for (i = 0; i < softc->open_count; i++)
308
cam_periph_release_locked(periph);
309
310
softc->open_count = 0;
311
312
/*
313
* Release the reference held for the device node, it is gone now.
314
* Accordingly, inform all queued I/Os of their fate.
315
*/
316
cam_periph_release_locked(periph);
317
passrejectios(periph);
318
319
/*
320
* We reference the SIM lock directly here, instead of using
321
* cam_periph_unlock(). The reason is that the final call to
322
* cam_periph_release_locked() above could result in the periph
323
* getting freed. If that is the case, dereferencing the periph
324
* with a cam_periph_unlock() call would cause a page fault.
325
*/
326
mtx_unlock(mtx);
327
328
/*
329
* We have to remove our kqueue context from a thread because it
330
* may sleep. It would be nice if we could get a callback from
331
* kqueue when it is done cleaning up resources.
332
*/
333
taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task);
334
}
335
336
static void
337
passoninvalidate(struct cam_periph *periph)
338
{
339
struct pass_softc *softc;
340
341
softc = (struct pass_softc *)periph->softc;
342
343
/*
344
* De-register any async callbacks.
345
*/
346
xpt_register_async(0, passasync, periph, periph->path);
347
348
softc->flags |= PASS_FLAG_INVALID;
349
350
/*
351
* Tell devfs this device has gone away, and ask for a callback
352
* when it has cleaned up its state.
353
*/
354
destroy_dev_sched_cb(softc->dev, passdevgonecb, periph);
355
}
356
357
static void
358
passcleanup(struct cam_periph *periph)
359
{
360
struct pass_softc *softc;
361
362
softc = (struct pass_softc *)periph->softc;
363
364
cam_periph_assert(periph, MA_OWNED);
365
KASSERT(TAILQ_EMPTY(&softc->active_queue),
366
("%s called when there are commands on the active queue!\n",
367
__func__));
368
KASSERT(TAILQ_EMPTY(&softc->abandoned_queue),
369
("%s called when there are commands on the abandoned queue!\n",
370
__func__));
371
KASSERT(TAILQ_EMPTY(&softc->incoming_queue),
372
("%s called when there are commands on the incoming queue!\n",
373
__func__));
374
KASSERT(TAILQ_EMPTY(&softc->done_queue),
375
("%s called when there are commands on the done queue!\n",
376
__func__));
377
378
devstat_remove_entry(softc->device_stats);
379
380
cam_periph_unlock(periph);
381
382
/*
383
* We call taskqueue_drain() for the physpath task to make sure it
384
* is complete. We drop the lock because this can potentially
385
* sleep. XXX KDM that is bad. Need a way to get a callback when
386
* a taskqueue is drained.
387
*
388
* Note that we don't drain the kqueue shutdown task queue. This
389
* is because we hold a reference on the periph for kqueue, and
390
* release that reference from the kqueue shutdown task queue. So
391
* we cannot come into this routine unless we've released that
392
* reference. Also, because that could be the last reference, we
393
* could be called from the cam_periph_release() call in
394
* pass_shutdown_kqueue(). In that case, the taskqueue_drain()
395
* would deadlock. It would be preferable if we had a way to
396
* get a callback when a taskqueue is done.
397
*/
398
taskqueue_drain(taskqueue_thread, &softc->add_physpath_task);
399
400
/*
401
* It should be safe to destroy the zones from here, because all
402
* of the references to this peripheral have been freed, and all
403
* I/O has been terminated and freed. We check the zones for NULL
404
* because they may not have been allocated yet if the device went
405
* away before any asynchronous I/O has been issued.
406
*/
407
if (softc->pass_zone != NULL)
408
uma_zdestroy(softc->pass_zone);
409
if (softc->pass_io_zone != NULL)
410
uma_zdestroy(softc->pass_io_zone);
411
412
cam_periph_lock(periph);
413
414
free(softc, M_DEVBUF);
415
}
416
417
static void
418
pass_shutdown_kqueue(void *context, int pending)
419
{
420
struct cam_periph *periph;
421
struct pass_softc *softc;
422
423
periph = context;
424
softc = periph->softc;
425
426
knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0);
427
knlist_destroy(&softc->read_select.si_note);
428
429
/*
430
* Release the reference we held for kqueue.
431
*/
432
cam_periph_release(periph);
433
}
434
435
static void
436
pass_add_physpath(void *context, int pending)
437
{
438
struct cam_periph *periph;
439
struct pass_softc *softc;
440
struct mtx *mtx;
441
char *physpath;
442
443
/*
444
* If we have one, create a devfs alias for our
445
* physical path.
446
*/
447
periph = context;
448
softc = periph->softc;
449
physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK);
450
mtx = cam_periph_mtx(periph);
451
mtx_lock(mtx);
452
453
if (periph->flags & CAM_PERIPH_INVALID)
454
goto out;
455
456
if (xpt_getattr(physpath, MAXPATHLEN,
457
"GEOM::physpath", periph->path) == 0
458
&& strlen(physpath) != 0) {
459
mtx_unlock(mtx);
460
make_dev_physpath_alias(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME,
461
&softc->alias_dev, softc->dev,
462
softc->alias_dev, physpath);
463
mtx_lock(mtx);
464
}
465
466
out:
467
/*
468
* Now that we've made our alias, we no longer have to have a
469
* reference to the device.
470
*/
471
if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0)
472
softc->flags |= PASS_FLAG_INITIAL_PHYSPATH;
473
474
/*
475
* We always acquire a reference to the periph before queueing this
476
* task queue function, so it won't go away before we run.
477
*/
478
while (pending-- > 0)
479
cam_periph_release_locked(periph);
480
mtx_unlock(mtx);
481
482
free(physpath, M_DEVBUF);
483
}
484
485
static void
486
passasync(void *callback_arg, uint32_t code,
487
struct cam_path *path, void *arg)
488
{
489
struct cam_periph *periph;
490
491
periph = (struct cam_periph *)callback_arg;
492
493
switch (code) {
494
case AC_FOUND_DEVICE:
495
{
496
struct ccb_getdev *cgd;
497
cam_status status;
498
499
cgd = (struct ccb_getdev *)arg;
500
if (cgd == NULL)
501
break;
502
503
/*
504
* Allocate a peripheral instance for
505
* this device and start the probe
506
* process.
507
*/
508
status = cam_periph_alloc(passregister, passoninvalidate,
509
passcleanup, passstart, PERIPH_NAME,
510
CAM_PERIPH_BIO, path,
511
passasync, AC_FOUND_DEVICE, cgd);
512
513
if (status != CAM_REQ_CMP
514
&& status != CAM_REQ_INPROG) {
515
const struct cam_status_entry *entry;
516
517
entry = cam_fetch_status_entry(status);
518
519
printf("passasync: Unable to attach new device "
520
"due to status %#x: %s\n", status, entry ?
521
entry->status_text : "Unknown");
522
}
523
524
break;
525
}
526
case AC_ADVINFO_CHANGED:
527
{
528
uintptr_t buftype;
529
530
buftype = (uintptr_t)arg;
531
if (buftype == CDAI_TYPE_PHYS_PATH) {
532
struct pass_softc *softc;
533
534
softc = (struct pass_softc *)periph->softc;
535
/*
536
* Acquire a reference to the periph before we
537
* start the taskqueue, so that we don't run into
538
* a situation where the periph goes away before
539
* the task queue has a chance to run.
540
*/
541
if (cam_periph_acquire(periph) != 0)
542
break;
543
544
taskqueue_enqueue(taskqueue_thread,
545
&softc->add_physpath_task);
546
}
547
break;
548
}
549
default:
550
cam_periph_async(periph, code, path, arg);
551
break;
552
}
553
}
554
555
static cam_status
556
passregister(struct cam_periph *periph, void *arg)
557
{
558
struct pass_softc *softc;
559
struct ccb_getdev *cgd;
560
struct ccb_pathinq cpi;
561
struct make_dev_args args;
562
int error, no_tags;
563
564
cgd = (struct ccb_getdev *)arg;
565
if (cgd == NULL) {
566
printf("%s: no getdev CCB, can't register device\n", __func__);
567
return(CAM_REQ_CMP_ERR);
568
}
569
570
softc = (struct pass_softc *)malloc(sizeof(*softc),
571
M_DEVBUF, M_NOWAIT);
572
573
if (softc == NULL) {
574
printf("%s: Unable to probe new device. "
575
"Unable to allocate softc\n", __func__);
576
return(CAM_REQ_CMP_ERR);
577
}
578
579
bzero(softc, sizeof(*softc));
580
softc->state = PASS_STATE_NORMAL;
581
if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI)
582
softc->pd_type = SID_TYPE(&cgd->inq_data);
583
else if (cgd->protocol == PROTO_SATAPM)
584
softc->pd_type = T_ENCLOSURE;
585
else
586
softc->pd_type = T_DIRECT;
587
588
periph->softc = softc;
589
softc->periph = periph;
590
TAILQ_INIT(&softc->incoming_queue);
591
TAILQ_INIT(&softc->active_queue);
592
TAILQ_INIT(&softc->abandoned_queue);
593
TAILQ_INIT(&softc->done_queue);
594
snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d",
595
periph->periph_name, periph->unit_number);
596
snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
597
periph->periph_name, periph->unit_number);
598
softc->io_zone_size = maxphys;
599
knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
600
601
xpt_path_inq(&cpi, periph->path);
602
603
if (cpi.maxio == 0)
604
softc->maxio = DFLTPHYS; /* traditional default */
605
else if (cpi.maxio > maxphys)
606
softc->maxio = maxphys; /* for safety */
607
else
608
softc->maxio = cpi.maxio; /* real value */
609
610
if (cpi.hba_misc & PIM_UNMAPPED)
611
softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE;
612
613
/*
614
* We pass in 0 for a blocksize, since we don't know what the blocksize
615
* of this device is, if it even has a blocksize.
616
*
617
* Note: no_tags is valid only for SCSI peripherals, but we don't do any
618
* devstat accounting for tags on any other transport. SCSI is the only
619
* transport that uses the tag_action (ata has only vestigial references
620
* to it, others ignore it entirely).
621
*/
622
cam_periph_unlock(periph);
623
no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0;
624
softc->device_stats = devstat_new_entry(PERIPH_NAME,
625
periph->unit_number, 0,
626
DEVSTAT_NO_BLOCKSIZE
627
| (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0),
628
softc->pd_type |
629
XPORT_DEVSTAT_TYPE(cpi.transport) |
630
DEVSTAT_TYPE_PASS,
631
DEVSTAT_PRIORITY_PASS);
632
633
/*
634
* Initialize the taskqueue handler for shutting down kqueue.
635
*/
636
TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0,
637
pass_shutdown_kqueue, periph);
638
639
/*
640
* Acquire a reference to the periph that we can release once we've
641
* cleaned up the kqueue.
642
*/
643
if (cam_periph_acquire(periph) != 0) {
644
xpt_print(periph->path, "%s: lost periph during "
645
"registration!\n", __func__);
646
cam_periph_lock(periph);
647
return (CAM_REQ_CMP_ERR);
648
}
649
650
/*
651
* Acquire a reference to the periph before we create the devfs
652
* instance for it. We'll release this reference once the devfs
653
* instance has been freed.
654
*/
655
if (cam_periph_acquire(periph) != 0) {
656
xpt_print(periph->path, "%s: lost periph during "
657
"registration!\n", __func__);
658
cam_periph_lock(periph);
659
return (CAM_REQ_CMP_ERR);
660
}
661
662
/* Register the device */
663
make_dev_args_init(&args);
664
args.mda_devsw = &pass_cdevsw;
665
args.mda_unit = periph->unit_number;
666
args.mda_uid = UID_ROOT;
667
args.mda_gid = GID_OPERATOR;
668
args.mda_mode = 0600;
669
args.mda_si_drv1 = periph;
670
args.mda_flags = MAKEDEV_NOWAIT;
671
error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name,
672
periph->unit_number);
673
if (error != 0) {
674
cam_periph_lock(periph);
675
cam_periph_release_locked(periph);
676
return (CAM_REQ_CMP_ERR);
677
}
678
679
/*
680
* Hold a reference to the periph before we create the physical
681
* path alias so it can't go away.
682
*/
683
if (cam_periph_acquire(periph) != 0) {
684
xpt_print(periph->path, "%s: lost periph during "
685
"registration!\n", __func__);
686
cam_periph_lock(periph);
687
return (CAM_REQ_CMP_ERR);
688
}
689
690
cam_periph_lock(periph);
691
692
TASK_INIT(&softc->add_physpath_task, /*priority*/0,
693
pass_add_physpath, periph);
694
695
/*
696
* See if physical path information is already available.
697
*/
698
taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task);
699
700
/*
701
* Add an async callback so that we get notified if
702
* this device goes away or its physical path
703
* (stored in the advanced info data of the EDT) has
704
* changed.
705
*/
706
xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED,
707
passasync, periph, periph->path);
708
709
if (bootverbose)
710
xpt_announce_periph(periph, NULL);
711
712
return(CAM_REQ_CMP);
713
}
714
715
static int
716
passopen(struct cdev *dev, int flags, int fmt, struct thread *td)
717
{
718
struct cam_periph *periph;
719
struct pass_softc *softc;
720
int error;
721
722
periph = (struct cam_periph *)dev->si_drv1;
723
if (cam_periph_acquire(periph) != 0)
724
return (ENXIO);
725
726
cam_periph_lock(periph);
727
728
softc = (struct pass_softc *)periph->softc;
729
730
if (softc->flags & PASS_FLAG_INVALID) {
731
cam_periph_release_locked(periph);
732
cam_periph_unlock(periph);
733
return(ENXIO);
734
}
735
736
/*
737
* Don't allow access when we're running at a high securelevel.
738
*/
739
error = securelevel_gt(td->td_ucred, 1);
740
if (error) {
741
cam_periph_release_locked(periph);
742
cam_periph_unlock(periph);
743
return(error);
744
}
745
746
/*
747
* Only allow read-write access.
748
*/
749
if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) {
750
cam_periph_release_locked(periph);
751
cam_periph_unlock(periph);
752
return(EPERM);
753
}
754
755
/*
756
* We don't allow nonblocking access.
757
*/
758
if ((flags & O_NONBLOCK) != 0) {
759
xpt_print(periph->path, "can't do nonblocking access\n");
760
cam_periph_release_locked(periph);
761
cam_periph_unlock(periph);
762
return(EINVAL);
763
}
764
765
softc->open_count++;
766
767
cam_periph_unlock(periph);
768
769
return (error);
770
}
771
772
static int
773
passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
774
{
775
struct cam_periph *periph;
776
struct pass_softc *softc;
777
struct mtx *mtx;
778
779
periph = (struct cam_periph *)dev->si_drv1;
780
mtx = cam_periph_mtx(periph);
781
mtx_lock(mtx);
782
783
softc = periph->softc;
784
softc->open_count--;
785
786
if (softc->open_count == 0) {
787
struct pass_io_req *io_req, *io_req2;
788
789
TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
790
TAILQ_REMOVE(&softc->done_queue, io_req, links);
791
passiocleanup(softc, io_req);
792
uma_zfree(softc->pass_zone, io_req);
793
}
794
795
TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
796
io_req2) {
797
TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
798
passiocleanup(softc, io_req);
799
uma_zfree(softc->pass_zone, io_req);
800
}
801
802
/*
803
* If there are any active I/Os, we need to forcibly acquire a
804
* reference to the peripheral so that we don't go away
805
* before they complete. We'll release the reference when
806
* the abandoned queue is empty.
807
*/
808
io_req = TAILQ_FIRST(&softc->active_queue);
809
if ((io_req != NULL)
810
&& (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) {
811
cam_periph_doacquire(periph);
812
softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
813
}
814
815
/*
816
* Since the I/O in the active queue is not under our
817
* control, just set a flag so that we can clean it up when
818
* it completes and put it on the abandoned queue. This
819
* will prevent our sending spurious completions in the
820
* event that the device is opened again before these I/Os
821
* complete.
822
*/
823
TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
824
io_req2) {
825
TAILQ_REMOVE(&softc->active_queue, io_req, links);
826
io_req->flags |= PASS_IO_ABANDONED;
827
TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
828
links);
829
}
830
}
831
832
cam_periph_release_locked(periph);
833
834
/*
835
* We reference the lock directly here, instead of using
836
* cam_periph_unlock(). The reason is that the call to
837
* cam_periph_release_locked() above could result in the periph
838
* getting freed. If that is the case, dereferencing the periph
839
* with a cam_periph_unlock() call would cause a page fault.
840
*
841
* cam_periph_release() avoids this problem using the same method,
842
* but we're manually acquiring and dropping the lock here to
843
* protect the open count and avoid another lock acquisition and
844
* release.
845
*/
846
mtx_unlock(mtx);
847
848
return (0);
849
}
850
851
static void
852
passstart(struct cam_periph *periph, union ccb *start_ccb)
853
{
854
struct pass_softc *softc;
855
856
softc = (struct pass_softc *)periph->softc;
857
858
switch (softc->state) {
859
case PASS_STATE_NORMAL: {
860
struct pass_io_req *io_req;
861
862
/*
863
* Check for any queued I/O requests that require an
864
* allocated slot.
865
*/
866
io_req = TAILQ_FIRST(&softc->incoming_queue);
867
if (io_req == NULL) {
868
xpt_release_ccb(start_ccb);
869
break;
870
}
871
TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
872
TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
873
/*
874
* Merge the user's CCB into the allocated CCB.
875
*/
876
xpt_merge_ccb(start_ccb, &io_req->ccb);
877
start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO;
878
start_ccb->ccb_h.ccb_ioreq = io_req;
879
start_ccb->ccb_h.cbfcnp = passdone;
880
io_req->alloced_ccb = start_ccb;
881
binuptime(&io_req->start_time);
882
devstat_start_transaction(softc->device_stats,
883
&io_req->start_time);
884
885
xpt_action(start_ccb);
886
887
/*
888
* If we have any more I/O waiting, schedule ourselves again.
889
*/
890
if (!TAILQ_EMPTY(&softc->incoming_queue))
891
xpt_schedule(periph, CAM_PRIORITY_NORMAL);
892
break;
893
}
894
default:
895
break;
896
}
897
}
898
899
static void
900
passdone(struct cam_periph *periph, union ccb *done_ccb)
901
{
902
struct pass_softc *softc;
903
struct ccb_hdr *hdr;
904
905
softc = (struct pass_softc *)periph->softc;
906
907
cam_periph_assert(periph, MA_OWNED);
908
909
hdr = &done_ccb->ccb_h;
910
switch (hdr->ccb_type) {
911
case PASS_CCB_QUEUED_IO: {
912
struct pass_io_req *io_req;
913
914
io_req = hdr->ccb_ioreq;
915
#if 0
916
xpt_print(periph->path, "%s: called for user CCB %p\n",
917
__func__, io_req->user_ccb_ptr);
918
#endif
919
if (((hdr->status & CAM_STATUS_MASK) != CAM_REQ_CMP) &&
920
((io_req->flags & PASS_IO_ABANDONED) == 0)) {
921
int error;
922
uint32_t cam_flags, sense_flags;
923
924
passflags(done_ccb, &cam_flags, &sense_flags);
925
error = passerror(done_ccb, cam_flags, sense_flags);
926
927
if (error == ERESTART) {
928
KASSERT(((sense_flags & SF_NO_RETRY) == 0),
929
("passerror returned ERESTART with no retry requested\n"));
930
return;
931
}
932
}
933
934
/*
935
* Copy the allocated CCB contents back to the malloced CCB
936
* so we can give status back to the user when he requests it.
937
*/
938
bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
939
940
/*
941
* Log data/transaction completion with devstat(9).
942
*/
943
switch (hdr->func_code) {
944
case XPT_SCSI_IO:
945
devstat_end_transaction(softc->device_stats,
946
done_ccb->csio.dxfer_len - done_ccb->csio.resid,
947
done_ccb->csio.tag_action & 0x3,
948
((hdr->flags & CAM_DIR_MASK) ==
949
CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
950
(hdr->flags & CAM_DIR_OUT) ?
951
DEVSTAT_WRITE : DEVSTAT_READ, NULL,
952
&io_req->start_time);
953
break;
954
case XPT_ATA_IO:
955
devstat_end_transaction(softc->device_stats,
956
done_ccb->ataio.dxfer_len - done_ccb->ataio.resid,
957
0, /* Not used in ATA */
958
((hdr->flags & CAM_DIR_MASK) ==
959
CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
960
(hdr->flags & CAM_DIR_OUT) ?
961
DEVSTAT_WRITE : DEVSTAT_READ, NULL,
962
&io_req->start_time);
963
break;
964
case XPT_SMP_IO:
965
/*
966
* XXX KDM this isn't quite right, but there isn't
967
* currently an easy way to represent a bidirectional
968
* transfer in devstat. The only way to do it
969
* and have the byte counts come out right would
970
* mean that we would have to record two
971
* transactions, one for the request and one for the
972
* response. For now, so that we report something,
973
* just treat the entire thing as a read.
974
*/
975
devstat_end_transaction(softc->device_stats,
976
done_ccb->smpio.smp_request_len +
977
done_ccb->smpio.smp_response_len,
978
DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL,
979
&io_req->start_time);
980
break;
981
/* XXX XPT_NVME_IO and XPT_NVME_ADMIN need cases here for resid */
982
default:
983
devstat_end_transaction(softc->device_stats, 0,
984
DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL,
985
&io_req->start_time);
986
break;
987
}
988
989
/*
990
* In the normal case, take the completed I/O off of the
991
* active queue and put it on the done queue. Notitfy the
992
* user that we have a completed I/O.
993
*/
994
if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
995
TAILQ_REMOVE(&softc->active_queue, io_req, links);
996
TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
997
selwakeuppri(&softc->read_select, PRIBIO);
998
KNOTE_LOCKED(&softc->read_select.si_note, 0);
999
} else {
1000
/*
1001
* In the case of an abandoned I/O (final close
1002
* without fetching the I/O), take it off of the
1003
* abandoned queue and free it.
1004
*/
1005
TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
1006
passiocleanup(softc, io_req);
1007
uma_zfree(softc->pass_zone, io_req);
1008
1009
/*
1010
* Release the done_ccb here, since we may wind up
1011
* freeing the peripheral when we decrement the
1012
* reference count below.
1013
*/
1014
xpt_release_ccb(done_ccb);
1015
1016
/*
1017
* If the abandoned queue is empty, we can release
1018
* our reference to the periph since we won't have
1019
* any more completions coming.
1020
*/
1021
if ((TAILQ_EMPTY(&softc->abandoned_queue))
1022
&& (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) {
1023
softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET;
1024
cam_periph_release_locked(periph);
1025
}
1026
1027
/*
1028
* We have already released the CCB, so we can
1029
* return.
1030
*/
1031
return;
1032
}
1033
break;
1034
}
1035
}
1036
xpt_release_ccb(done_ccb);
1037
}
1038
1039
static int
1040
passcreatezone(struct cam_periph *periph)
1041
{
1042
struct pass_softc *softc;
1043
int error;
1044
1045
error = 0;
1046
softc = (struct pass_softc *)periph->softc;
1047
1048
cam_periph_assert(periph, MA_OWNED);
1049
KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0),
1050
("%s called when the pass(4) zone is valid!\n", __func__));
1051
KASSERT((softc->pass_zone == NULL),
1052
("%s called when the pass(4) zone is allocated!\n", __func__));
1053
1054
if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) {
1055
/*
1056
* We're the first context through, so we need to create
1057
* the pass(4) UMA zone for I/O requests.
1058
*/
1059
softc->flags |= PASS_FLAG_ZONE_INPROG;
1060
1061
/*
1062
* uma_zcreate() does a blocking (M_WAITOK) allocation,
1063
* so we cannot hold a mutex while we call it.
1064
*/
1065
cam_periph_unlock(periph);
1066
1067
softc->pass_zone = uma_zcreate(softc->zone_name,
1068
sizeof(struct pass_io_req), NULL, NULL, NULL, NULL,
1069
/*align*/ 0, /*flags*/ 0);
1070
1071
softc->pass_io_zone = uma_zcreate(softc->io_zone_name,
1072
softc->io_zone_size, NULL, NULL, NULL, NULL,
1073
/*align*/ 0, /*flags*/ 0);
1074
1075
cam_periph_lock(periph);
1076
1077
if ((softc->pass_zone == NULL)
1078
|| (softc->pass_io_zone == NULL)) {
1079
if (softc->pass_zone == NULL)
1080
xpt_print(periph->path, "unable to allocate "
1081
"IO Req UMA zone\n");
1082
else
1083
xpt_print(periph->path, "unable to allocate "
1084
"IO UMA zone\n");
1085
softc->flags &= ~PASS_FLAG_ZONE_INPROG;
1086
goto bailout;
1087
}
1088
1089
/*
1090
* Set the flags appropriately and notify any other waiters.
1091
*/
1092
softc->flags &= ~PASS_FLAG_ZONE_INPROG;
1093
softc->flags |= PASS_FLAG_ZONE_VALID;
1094
wakeup(&softc->pass_zone);
1095
} else {
1096
/*
1097
* In this case, the UMA zone has not yet been created, but
1098
* another context is in the process of creating it. We
1099
* need to sleep until the creation is either done or has
1100
* failed.
1101
*/
1102
while ((softc->flags & PASS_FLAG_ZONE_INPROG)
1103
&& ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) {
1104
error = msleep(&softc->pass_zone,
1105
cam_periph_mtx(periph), PRIBIO,
1106
"paszon", 0);
1107
if (error != 0)
1108
goto bailout;
1109
}
1110
/*
1111
* If the zone creation failed, no luck for the user.
1112
*/
1113
if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){
1114
error = ENOMEM;
1115
goto bailout;
1116
}
1117
}
1118
bailout:
1119
return (error);
1120
}
1121
1122
static void
1123
passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
1124
{
1125
union ccb *ccb;
1126
struct ccb_hdr *hdr;
1127
uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1128
int i, numbufs;
1129
1130
ccb = &io_req->ccb;
1131
hdr = &ccb->ccb_h;
1132
1133
switch (hdr->func_code) {
1134
case XPT_DEV_MATCH:
1135
numbufs = min(io_req->num_bufs, 2);
1136
1137
if (numbufs == 1) {
1138
data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1139
} else {
1140
data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1141
data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1142
}
1143
break;
1144
case XPT_SCSI_IO:
1145
case XPT_CONT_TARGET_IO:
1146
data_ptrs[0] = &ccb->csio.data_ptr;
1147
numbufs = min(io_req->num_bufs, 1);
1148
break;
1149
case XPT_ATA_IO:
1150
data_ptrs[0] = &ccb->ataio.data_ptr;
1151
numbufs = min(io_req->num_bufs, 1);
1152
break;
1153
case XPT_SMP_IO:
1154
numbufs = min(io_req->num_bufs, 2);
1155
data_ptrs[0] = &ccb->smpio.smp_request;
1156
data_ptrs[1] = &ccb->smpio.smp_response;
1157
break;
1158
case XPT_DEV_ADVINFO:
1159
numbufs = min(io_req->num_bufs, 1);
1160
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1161
break;
1162
case XPT_NVME_IO:
1163
case XPT_NVME_ADMIN:
1164
data_ptrs[0] = &ccb->nvmeio.data_ptr;
1165
numbufs = min(io_req->num_bufs, 1);
1166
break;
1167
default:
1168
/* allow ourselves to be swapped once again */
1169
return;
1170
break; /* NOTREACHED */
1171
}
1172
1173
if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
1174
free(io_req->user_segptr, M_SCSIPASS);
1175
io_req->user_segptr = NULL;
1176
}
1177
1178
/*
1179
* We only want to free memory we malloced.
1180
*/
1181
if (io_req->data_flags == CAM_DATA_VADDR) {
1182
for (i = 0; i < io_req->num_bufs; i++) {
1183
if (io_req->kern_bufs[i] == NULL)
1184
continue;
1185
1186
free(io_req->kern_bufs[i], M_SCSIPASS);
1187
io_req->kern_bufs[i] = NULL;
1188
}
1189
} else if (io_req->data_flags == CAM_DATA_SG) {
1190
for (i = 0; i < io_req->num_kern_segs; i++) {
1191
if ((uint8_t *)(uintptr_t)
1192
io_req->kern_segptr[i].ds_addr == NULL)
1193
continue;
1194
1195
uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t)
1196
io_req->kern_segptr[i].ds_addr);
1197
io_req->kern_segptr[i].ds_addr = 0;
1198
}
1199
}
1200
1201
if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
1202
free(io_req->kern_segptr, M_SCSIPASS);
1203
io_req->kern_segptr = NULL;
1204
}
1205
1206
if (io_req->data_flags != CAM_DATA_PADDR) {
1207
for (i = 0; i < numbufs; i++) {
1208
/*
1209
* Restore the user's buffer pointers to their
1210
* previous values.
1211
*/
1212
if (io_req->user_bufs[i] != NULL)
1213
*data_ptrs[i] = io_req->user_bufs[i];
1214
}
1215
}
1216
1217
}
1218
1219
static int
1220
passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
1221
ccb_flags direction)
1222
{
1223
bus_size_t kern_watermark, user_watermark, len_to_copy;
1224
bus_dma_segment_t *user_sglist, *kern_sglist;
1225
int i, j, error;
1226
1227
error = 0;
1228
kern_watermark = 0;
1229
user_watermark = 0;
1230
len_to_copy = 0;
1231
user_sglist = io_req->user_segptr;
1232
kern_sglist = io_req->kern_segptr;
1233
1234
for (i = 0, j = 0; i < io_req->num_user_segs &&
1235
j < io_req->num_kern_segs;) {
1236
uint8_t *user_ptr, *kern_ptr;
1237
1238
len_to_copy = min(user_sglist[i].ds_len -user_watermark,
1239
kern_sglist[j].ds_len - kern_watermark);
1240
1241
user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr;
1242
user_ptr = user_ptr + user_watermark;
1243
kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr;
1244
kern_ptr = kern_ptr + kern_watermark;
1245
1246
user_watermark += len_to_copy;
1247
kern_watermark += len_to_copy;
1248
1249
if (direction == CAM_DIR_IN) {
1250
error = copyout(kern_ptr, user_ptr, len_to_copy);
1251
if (error != 0) {
1252
xpt_print(periph->path, "%s: copyout of %u "
1253
"bytes from %p to %p failed with "
1254
"error %d\n", __func__, len_to_copy,
1255
kern_ptr, user_ptr, error);
1256
goto bailout;
1257
}
1258
} else {
1259
error = copyin(user_ptr, kern_ptr, len_to_copy);
1260
if (error != 0) {
1261
xpt_print(periph->path, "%s: copyin of %u "
1262
"bytes from %p to %p failed with "
1263
"error %d\n", __func__, len_to_copy,
1264
user_ptr, kern_ptr, error);
1265
goto bailout;
1266
}
1267
}
1268
1269
if (user_sglist[i].ds_len == user_watermark) {
1270
i++;
1271
user_watermark = 0;
1272
}
1273
1274
if (kern_sglist[j].ds_len == kern_watermark) {
1275
j++;
1276
kern_watermark = 0;
1277
}
1278
}
1279
1280
bailout:
1281
1282
return (error);
1283
}
1284
1285
static int
1286
passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
1287
{
1288
union ccb *ccb;
1289
struct ccb_hdr *hdr;
1290
struct pass_softc *softc;
1291
int numbufs, i;
1292
uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1293
uint32_t lengths[CAM_PERIPH_MAXMAPS];
1294
uint32_t dirs[CAM_PERIPH_MAXMAPS];
1295
uint32_t num_segs;
1296
uint16_t *seg_cnt_ptr;
1297
size_t maxmap;
1298
int error;
1299
1300
cam_periph_assert(periph, MA_NOTOWNED);
1301
1302
softc = periph->softc;
1303
1304
error = 0;
1305
ccb = &io_req->ccb;
1306
hdr = &ccb->ccb_h;
1307
maxmap = 0;
1308
num_segs = 0;
1309
seg_cnt_ptr = NULL;
1310
1311
switch(hdr->func_code) {
1312
case XPT_DEV_MATCH:
1313
if (ccb->cdm.match_buf_len == 0) {
1314
printf("%s: invalid match buffer length 0\n", __func__);
1315
return(EINVAL);
1316
}
1317
if (ccb->cdm.pattern_buf_len > 0) {
1318
data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1319
lengths[0] = ccb->cdm.pattern_buf_len;
1320
dirs[0] = CAM_DIR_OUT;
1321
data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1322
lengths[1] = ccb->cdm.match_buf_len;
1323
dirs[1] = CAM_DIR_IN;
1324
numbufs = 2;
1325
} else {
1326
data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1327
lengths[0] = ccb->cdm.match_buf_len;
1328
dirs[0] = CAM_DIR_IN;
1329
numbufs = 1;
1330
}
1331
io_req->data_flags = CAM_DATA_VADDR;
1332
break;
1333
case XPT_SCSI_IO:
1334
case XPT_CONT_TARGET_IO:
1335
if ((hdr->flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1336
return(0);
1337
1338
/*
1339
* The user shouldn't be able to supply a bio.
1340
*/
1341
if ((hdr->flags & CAM_DATA_MASK) == CAM_DATA_BIO)
1342
return (EINVAL);
1343
1344
io_req->data_flags = hdr->flags & CAM_DATA_MASK;
1345
1346
data_ptrs[0] = &ccb->csio.data_ptr;
1347
lengths[0] = ccb->csio.dxfer_len;
1348
dirs[0] = hdr->flags & CAM_DIR_MASK;
1349
num_segs = ccb->csio.sglist_cnt;
1350
seg_cnt_ptr = &ccb->csio.sglist_cnt;
1351
numbufs = 1;
1352
maxmap = softc->maxio;
1353
break;
1354
case XPT_ATA_IO:
1355
if ((hdr->flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1356
return(0);
1357
1358
/*
1359
* We only support a single virtual address for ATA I/O.
1360
*/
1361
if ((hdr->flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
1362
return (EINVAL);
1363
1364
io_req->data_flags = CAM_DATA_VADDR;
1365
1366
data_ptrs[0] = &ccb->ataio.data_ptr;
1367
lengths[0] = ccb->ataio.dxfer_len;
1368
dirs[0] = hdr->flags & CAM_DIR_MASK;
1369
numbufs = 1;
1370
maxmap = softc->maxio;
1371
break;
1372
case XPT_SMP_IO:
1373
io_req->data_flags = CAM_DATA_VADDR;
1374
1375
data_ptrs[0] = &ccb->smpio.smp_request;
1376
lengths[0] = ccb->smpio.smp_request_len;
1377
dirs[0] = CAM_DIR_OUT;
1378
data_ptrs[1] = &ccb->smpio.smp_response;
1379
lengths[1] = ccb->smpio.smp_response_len;
1380
dirs[1] = CAM_DIR_IN;
1381
numbufs = 2;
1382
maxmap = softc->maxio;
1383
break;
1384
case XPT_DEV_ADVINFO:
1385
if (ccb->cdai.bufsiz == 0)
1386
return (0);
1387
1388
io_req->data_flags = CAM_DATA_VADDR;
1389
1390
data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1391
lengths[0] = ccb->cdai.bufsiz;
1392
dirs[0] = CAM_DIR_IN;
1393
numbufs = 1;
1394
break;
1395
case XPT_NVME_ADMIN:
1396
case XPT_NVME_IO:
1397
if ((hdr->flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1398
return (0);
1399
1400
io_req->data_flags = hdr->flags & CAM_DATA_MASK;
1401
1402
data_ptrs[0] = &ccb->nvmeio.data_ptr;
1403
lengths[0] = ccb->nvmeio.dxfer_len;
1404
dirs[0] = hdr->flags & CAM_DIR_MASK;
1405
num_segs = ccb->nvmeio.sglist_cnt;
1406
seg_cnt_ptr = &ccb->nvmeio.sglist_cnt;
1407
numbufs = 1;
1408
maxmap = softc->maxio;
1409
break;
1410
default:
1411
return(EINVAL);
1412
break; /* NOTREACHED */
1413
}
1414
1415
io_req->num_bufs = numbufs;
1416
1417
/*
1418
* If there is a maximum, check to make sure that the user's
1419
* request fits within the limit. In general, we should only have
1420
* a maximum length for requests that go to hardware. Otherwise it
1421
* is whatever we're able to malloc.
1422
*/
1423
for (i = 0; i < numbufs; i++) {
1424
io_req->user_bufs[i] = *data_ptrs[i];
1425
io_req->dirs[i] = dirs[i];
1426
io_req->lengths[i] = lengths[i];
1427
1428
if (maxmap == 0)
1429
continue;
1430
1431
if (lengths[i] <= maxmap)
1432
continue;
1433
1434
xpt_print(periph->path, "%s: data length %u > max allowed %u "
1435
"bytes\n", __func__, lengths[i], maxmap);
1436
error = EINVAL;
1437
goto bailout;
1438
}
1439
1440
switch (io_req->data_flags) {
1441
case CAM_DATA_VADDR:
1442
/* Map or copy the buffer into kernel address space */
1443
for (i = 0; i < numbufs; i++) {
1444
uint8_t *tmp_buf;
1445
1446
/*
1447
* If for some reason no length is specified, we
1448
* don't need to allocate anything.
1449
*/
1450
if (io_req->lengths[i] == 0)
1451
continue;
1452
1453
tmp_buf = malloc(lengths[i], M_SCSIPASS,
1454
M_WAITOK | M_ZERO);
1455
io_req->kern_bufs[i] = tmp_buf;
1456
*data_ptrs[i] = tmp_buf;
1457
1458
#if 0
1459
xpt_print(periph->path, "%s: malloced %p len %u, user "
1460
"buffer %p, operation: %s\n", __func__,
1461
tmp_buf, lengths[i], io_req->user_bufs[i],
1462
(dirs[i] == CAM_DIR_IN) ? "read" : "write");
1463
#endif
1464
/*
1465
* We only need to copy in if the user is writing.
1466
*/
1467
if (dirs[i] != CAM_DIR_OUT)
1468
continue;
1469
1470
error = copyin(io_req->user_bufs[i],
1471
io_req->kern_bufs[i], lengths[i]);
1472
if (error != 0) {
1473
xpt_print(periph->path, "%s: copy of user "
1474
"buffer from %p to %p failed with "
1475
"error %d\n", __func__,
1476
io_req->user_bufs[i],
1477
io_req->kern_bufs[i], error);
1478
goto bailout;
1479
}
1480
}
1481
break;
1482
case CAM_DATA_PADDR:
1483
/* Pass down the pointer as-is */
1484
break;
1485
case CAM_DATA_SG: {
1486
size_t sg_length, size_to_go, alloc_size;
1487
uint32_t num_segs_needed;
1488
1489
/*
1490
* Copy the user S/G list in, and then copy in the
1491
* individual segments.
1492
*/
1493
/*
1494
* We shouldn't see this, but check just in case.
1495
*/
1496
if (numbufs != 1) {
1497
xpt_print(periph->path, "%s: cannot currently handle "
1498
"more than one S/G list per CCB\n", __func__);
1499
error = EINVAL;
1500
goto bailout;
1501
}
1502
1503
/*
1504
* We have to have at least one segment.
1505
*/
1506
if (num_segs == 0) {
1507
xpt_print(periph->path, "%s: CAM_DATA_SG flag set, "
1508
"but sglist_cnt=0!\n", __func__);
1509
error = EINVAL;
1510
goto bailout;
1511
}
1512
1513
/*
1514
* Make sure the user specified the total length and didn't
1515
* just leave it to us to decode the S/G list.
1516
*/
1517
if (lengths[0] == 0) {
1518
xpt_print(periph->path, "%s: no dxfer_len specified, "
1519
"but CAM_DATA_SG flag is set!\n", __func__);
1520
error = EINVAL;
1521
goto bailout;
1522
}
1523
1524
/*
1525
* We allocate buffers in io_zone_size increments for an
1526
* S/G list. This will generally be maxphys.
1527
*/
1528
if (lengths[0] <= softc->io_zone_size)
1529
num_segs_needed = 1;
1530
else {
1531
num_segs_needed = lengths[0] / softc->io_zone_size;
1532
if ((lengths[0] % softc->io_zone_size) != 0)
1533
num_segs_needed++;
1534
}
1535
1536
/* Figure out the size of the S/G list */
1537
sg_length = num_segs * sizeof(bus_dma_segment_t);
1538
io_req->num_user_segs = num_segs;
1539
io_req->num_kern_segs = num_segs_needed;
1540
1541
/* Save the user's S/G list pointer for later restoration */
1542
io_req->user_bufs[0] = *data_ptrs[0];
1543
1544
/*
1545
* If we have enough segments allocated by default to handle
1546
* the length of the user's S/G list,
1547
*/
1548
if (num_segs > PASS_MAX_SEGS) {
1549
io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1550
num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1551
io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1552
} else
1553
io_req->user_segptr = io_req->user_segs;
1554
1555
error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1556
if (error != 0) {
1557
xpt_print(periph->path, "%s: copy of user S/G list "
1558
"from %p to %p failed with error %d\n",
1559
__func__, *data_ptrs[0], io_req->user_segptr,
1560
error);
1561
goto bailout;
1562
}
1563
1564
if (num_segs_needed > PASS_MAX_SEGS) {
1565
io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
1566
num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO);
1567
io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
1568
} else {
1569
io_req->kern_segptr = io_req->kern_segs;
1570
}
1571
1572
/*
1573
* Allocate the kernel S/G list.
1574
*/
1575
for (size_to_go = lengths[0], i = 0;
1576
size_to_go > 0 && i < num_segs_needed;
1577
i++, size_to_go -= alloc_size) {
1578
uint8_t *kern_ptr;
1579
1580
alloc_size = min(size_to_go, softc->io_zone_size);
1581
kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK);
1582
io_req->kern_segptr[i].ds_addr =
1583
(bus_addr_t)(uintptr_t)kern_ptr;
1584
io_req->kern_segptr[i].ds_len = alloc_size;
1585
}
1586
if (size_to_go > 0) {
1587
printf("%s: size_to_go = %zu, software error!\n",
1588
__func__, size_to_go);
1589
error = EINVAL;
1590
goto bailout;
1591
}
1592
1593
*data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
1594
*seg_cnt_ptr = io_req->num_kern_segs;
1595
1596
/*
1597
* We only need to copy data here if the user is writing.
1598
*/
1599
if (dirs[0] == CAM_DIR_OUT)
1600
error = passcopysglist(periph, io_req, dirs[0]);
1601
break;
1602
}
1603
case CAM_DATA_SG_PADDR: {
1604
size_t sg_length;
1605
1606
/*
1607
* We shouldn't see this, but check just in case.
1608
*/
1609
if (numbufs != 1) {
1610
printf("%s: cannot currently handle more than one "
1611
"S/G list per CCB\n", __func__);
1612
error = EINVAL;
1613
goto bailout;
1614
}
1615
1616
/*
1617
* We have to have at least one segment.
1618
*/
1619
if (num_segs == 0) {
1620
xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag "
1621
"set, but sglist_cnt=0!\n", __func__);
1622
error = EINVAL;
1623
goto bailout;
1624
}
1625
1626
/*
1627
* Make sure the user specified the total length and didn't
1628
* just leave it to us to decode the S/G list.
1629
*/
1630
if (lengths[0] == 0) {
1631
xpt_print(periph->path, "%s: no dxfer_len specified, "
1632
"but CAM_DATA_SG flag is set!\n", __func__);
1633
error = EINVAL;
1634
goto bailout;
1635
}
1636
1637
/* Figure out the size of the S/G list */
1638
sg_length = num_segs * sizeof(bus_dma_segment_t);
1639
io_req->num_user_segs = num_segs;
1640
io_req->num_kern_segs = io_req->num_user_segs;
1641
1642
/* Save the user's S/G list pointer for later restoration */
1643
io_req->user_bufs[0] = *data_ptrs[0];
1644
1645
if (num_segs > PASS_MAX_SEGS) {
1646
io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1647
num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1648
io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1649
} else
1650
io_req->user_segptr = io_req->user_segs;
1651
1652
io_req->kern_segptr = io_req->user_segptr;
1653
1654
error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1655
if (error != 0) {
1656
xpt_print(periph->path, "%s: copy of user S/G list "
1657
"from %p to %p failed with error %d\n",
1658
__func__, *data_ptrs[0], io_req->user_segptr,
1659
error);
1660
goto bailout;
1661
}
1662
break;
1663
}
1664
default:
1665
case CAM_DATA_BIO:
1666
/*
1667
* A user shouldn't be attaching a bio to the CCB. It
1668
* isn't a user-accessible structure.
1669
*/
1670
error = EINVAL;
1671
break;
1672
}
1673
1674
bailout:
1675
if (error != 0)
1676
passiocleanup(softc, io_req);
1677
1678
return (error);
1679
}
1680
1681
static int
1682
passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
1683
{
1684
struct pass_softc *softc;
1685
int error;
1686
int i;
1687
1688
error = 0;
1689
softc = (struct pass_softc *)periph->softc;
1690
1691
switch (io_req->data_flags) {
1692
case CAM_DATA_VADDR:
1693
/*
1694
* Copy back to the user buffer if this was a read.
1695
*/
1696
for (i = 0; i < io_req->num_bufs; i++) {
1697
if (io_req->dirs[i] != CAM_DIR_IN)
1698
continue;
1699
1700
error = copyout(io_req->kern_bufs[i],
1701
io_req->user_bufs[i], io_req->lengths[i]);
1702
if (error != 0) {
1703
xpt_print(periph->path, "Unable to copy %u "
1704
"bytes from %p to user address %p\n",
1705
io_req->lengths[i],
1706
io_req->kern_bufs[i],
1707
io_req->user_bufs[i]);
1708
goto bailout;
1709
}
1710
}
1711
break;
1712
case CAM_DATA_PADDR:
1713
/* Do nothing. The pointer is a physical address already */
1714
break;
1715
case CAM_DATA_SG:
1716
/*
1717
* Copy back to the user buffer if this was a read.
1718
* Restore the user's S/G list buffer pointer.
1719
*/
1720
if (io_req->dirs[0] == CAM_DIR_IN)
1721
error = passcopysglist(periph, io_req, io_req->dirs[0]);
1722
break;
1723
case CAM_DATA_SG_PADDR:
1724
/*
1725
* Restore the user's S/G list buffer pointer. No need to
1726
* copy.
1727
*/
1728
break;
1729
default:
1730
case CAM_DATA_BIO:
1731
error = EINVAL;
1732
break;
1733
}
1734
1735
bailout:
1736
/*
1737
* Reset the user's pointers to their original values and free
1738
* allocated memory.
1739
*/
1740
passiocleanup(softc, io_req);
1741
1742
return (error);
1743
}
1744
1745
static int
1746
passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1747
{
1748
int error;
1749
1750
if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
1751
error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl);
1752
}
1753
return (error);
1754
}
1755
1756
static int
1757
passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1758
{
1759
struct cam_periph *periph;
1760
struct pass_softc *softc;
1761
int error;
1762
uint32_t priority;
1763
1764
periph = (struct cam_periph *)dev->si_drv1;
1765
cam_periph_lock(periph);
1766
softc = (struct pass_softc *)periph->softc;
1767
1768
error = 0;
1769
1770
switch (cmd) {
1771
case CAMIOCOMMAND:
1772
{
1773
union ccb *inccb;
1774
union ccb *ccb;
1775
int ccb_malloced;
1776
1777
inccb = (union ccb *)addr;
1778
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1779
if (inccb->ccb_h.func_code == XPT_SCSI_IO)
1780
inccb->csio.bio = NULL;
1781
#endif
1782
1783
if (inccb->ccb_h.flags & CAM_UNLOCKED) {
1784
error = EINVAL;
1785
break;
1786
}
1787
1788
/*
1789
* Some CCB types, like scan bus and scan lun can only go
1790
* through the transport layer device.
1791
*/
1792
if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1793
xpt_print(periph->path, "CCB function code %#x is "
1794
"restricted to the XPT device\n",
1795
inccb->ccb_h.func_code);
1796
error = ENODEV;
1797
break;
1798
}
1799
1800
/* Compatibility for RL/priority-unaware code. */
1801
priority = inccb->ccb_h.pinfo.priority;
1802
if (priority <= CAM_PRIORITY_OOB)
1803
priority += CAM_PRIORITY_OOB + 1;
1804
1805
/*
1806
* Non-immediate CCBs need a CCB from the per-device pool
1807
* of CCBs, which is scheduled by the transport layer.
1808
* Immediate CCBs and user-supplied CCBs should just be
1809
* malloced.
1810
*/
1811
if ((inccb->ccb_h.func_code & XPT_FC_QUEUED)
1812
&& ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) {
1813
ccb = cam_periph_getccb(periph, priority);
1814
ccb_malloced = 0;
1815
} else {
1816
ccb = xpt_alloc_ccb_nowait();
1817
1818
if (ccb != NULL)
1819
xpt_setup_ccb(&ccb->ccb_h, periph->path,
1820
priority);
1821
ccb_malloced = 1;
1822
}
1823
1824
if (ccb == NULL) {
1825
xpt_print(periph->path, "unable to allocate CCB\n");
1826
error = ENOMEM;
1827
break;
1828
}
1829
1830
error = passsendccb(periph, ccb, inccb);
1831
1832
if (ccb_malloced)
1833
xpt_free_ccb(ccb);
1834
else
1835
xpt_release_ccb(ccb);
1836
1837
break;
1838
}
1839
case CAMIOQUEUE:
1840
{
1841
struct pass_io_req *io_req;
1842
union ccb **user_ccb, *ccb;
1843
xpt_opcode fc;
1844
1845
#ifdef COMPAT_FREEBSD32
1846
if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1847
error = ENOTTY;
1848
goto bailout;
1849
}
1850
#endif
1851
if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) {
1852
error = passcreatezone(periph);
1853
if (error != 0)
1854
goto bailout;
1855
}
1856
1857
/*
1858
* We're going to do a blocking allocation for this I/O
1859
* request, so we have to drop the lock.
1860
*/
1861
cam_periph_unlock(periph);
1862
1863
io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
1864
ccb = &io_req->ccb;
1865
user_ccb = (union ccb **)addr;
1866
1867
/*
1868
* Unlike the CAMIOCOMMAND ioctl above, we only have a
1869
* pointer to the user's CCB, so we have to copy the whole
1870
* thing in to a buffer we have allocated (above) instead
1871
* of allowing the ioctl code to malloc a buffer and copy
1872
* it in.
1873
*
1874
* This is an advantage for this asynchronous interface,
1875
* since we don't want the memory to get freed while the
1876
* CCB is outstanding.
1877
*/
1878
#if 0
1879
xpt_print(periph->path, "Copying user CCB %p to "
1880
"kernel address %p\n", *user_ccb, ccb);
1881
#endif
1882
error = copyin(*user_ccb, ccb, sizeof(*ccb));
1883
if (error != 0) {
1884
xpt_print(periph->path, "Copy of user CCB %p to "
1885
"kernel address %p failed with error %d\n",
1886
*user_ccb, ccb, error);
1887
goto camioqueue_error;
1888
}
1889
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1890
if (ccb->ccb_h.func_code == XPT_SCSI_IO)
1891
ccb->csio.bio = NULL;
1892
#endif
1893
1894
if (ccb->ccb_h.flags & CAM_UNLOCKED) {
1895
error = EINVAL;
1896
goto camioqueue_error;
1897
}
1898
1899
if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
1900
if (ccb->csio.cdb_len > IOCDBLEN) {
1901
error = EINVAL;
1902
goto camioqueue_error;
1903
}
1904
error = copyin(ccb->csio.cdb_io.cdb_ptr,
1905
ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len);
1906
if (error != 0)
1907
goto camioqueue_error;
1908
ccb->ccb_h.flags &= ~CAM_CDB_POINTER;
1909
}
1910
1911
/*
1912
* Some CCB types, like scan bus and scan lun can only go
1913
* through the transport layer device.
1914
*/
1915
if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1916
xpt_print(periph->path, "CCB function code %#x is "
1917
"restricted to the XPT device\n",
1918
ccb->ccb_h.func_code);
1919
error = ENODEV;
1920
goto camioqueue_error;
1921
}
1922
1923
/*
1924
* Save the user's CCB pointer as well as his linked list
1925
* pointers and peripheral private area so that we can
1926
* restore these later.
1927
*/
1928
io_req->user_ccb_ptr = *user_ccb;
1929
io_req->user_periph_links = ccb->ccb_h.periph_links;
1930
io_req->user_periph_priv = ccb->ccb_h.periph_priv;
1931
1932
/*
1933
* Now that we've saved the user's values, we can set our
1934
* own peripheral private entry.
1935
*/
1936
ccb->ccb_h.ccb_ioreq = io_req;
1937
1938
/* Compatibility for RL/priority-unaware code. */
1939
priority = ccb->ccb_h.pinfo.priority;
1940
if (priority <= CAM_PRIORITY_OOB)
1941
priority += CAM_PRIORITY_OOB + 1;
1942
1943
/*
1944
* Setup fields in the CCB like the path and the priority.
1945
* The path in particular cannot be done in userland, since
1946
* it is a pointer to a kernel data structure.
1947
*/
1948
xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority,
1949
ccb->ccb_h.flags);
1950
1951
/*
1952
* Setup our done routine. There is no way for the user to
1953
* have a valid pointer here.
1954
*/
1955
ccb->ccb_h.cbfcnp = passdone;
1956
1957
fc = ccb->ccb_h.func_code;
1958
/*
1959
* If this function code has memory that can be mapped in
1960
* or out, we need to call passmemsetup().
1961
*/
1962
if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
1963
|| (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
1964
|| (fc == XPT_DEV_ADVINFO)
1965
|| (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) {
1966
error = passmemsetup(periph, io_req);
1967
if (error != 0)
1968
goto camioqueue_error;
1969
} else
1970
io_req->mapinfo.num_bufs_used = 0;
1971
1972
cam_periph_lock(periph);
1973
1974
/*
1975
* Everything goes on the incoming queue initially.
1976
*/
1977
TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
1978
1979
/*
1980
* If the CCB is queued, and is not a user CCB, then
1981
* we need to allocate a slot for it. Call xpt_schedule()
1982
* so that our start routine will get called when a CCB is
1983
* available.
1984
*/
1985
if ((fc & XPT_FC_QUEUED)
1986
&& ((fc & XPT_FC_USER_CCB) == 0)) {
1987
xpt_schedule(periph, priority);
1988
break;
1989
}
1990
1991
/*
1992
* At this point, the CCB in question is either an
1993
* immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB
1994
* and therefore should be malloced, not allocated via a slot.
1995
* Remove the CCB from the incoming queue and add it to the
1996
* active queue.
1997
*/
1998
TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
1999
TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
2000
2001
xpt_action(ccb);
2002
2003
/*
2004
* If this is not a queued CCB (i.e. it is an immediate CCB),
2005
* then it is already done. We need to put it on the done
2006
* queue for the user to fetch.
2007
*/
2008
if ((fc & XPT_FC_QUEUED) == 0) {
2009
TAILQ_REMOVE(&softc->active_queue, io_req, links);
2010
TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
2011
}
2012
break;
2013
2014
camioqueue_error:
2015
uma_zfree(softc->pass_zone, io_req);
2016
cam_periph_lock(periph);
2017
break;
2018
}
2019
case CAMIOGET:
2020
{
2021
union ccb **user_ccb;
2022
struct pass_io_req *io_req;
2023
int old_error;
2024
2025
#ifdef COMPAT_FREEBSD32
2026
if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
2027
error = ENOTTY;
2028
goto bailout;
2029
}
2030
#endif
2031
user_ccb = (union ccb **)addr;
2032
old_error = 0;
2033
2034
io_req = TAILQ_FIRST(&softc->done_queue);
2035
if (io_req == NULL) {
2036
error = ENOENT;
2037
break;
2038
}
2039
2040
/*
2041
* Remove the I/O from the done queue.
2042
*/
2043
TAILQ_REMOVE(&softc->done_queue, io_req, links);
2044
2045
/*
2046
* We have to drop the lock during the copyout because the
2047
* copyout can result in VM faults that require sleeping.
2048
*/
2049
cam_periph_unlock(periph);
2050
2051
/*
2052
* Do any needed copies (e.g. for reads) and revert the
2053
* pointers in the CCB back to the user's pointers.
2054
*/
2055
error = passmemdone(periph, io_req);
2056
2057
old_error = error;
2058
2059
io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
2060
io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
2061
2062
#if 0
2063
xpt_print(periph->path, "Copying to user CCB %p from "
2064
"kernel address %p\n", *user_ccb, &io_req->ccb);
2065
#endif
2066
2067
error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
2068
if (error != 0) {
2069
xpt_print(periph->path, "Copy to user CCB %p from "
2070
"kernel address %p failed with error %d\n",
2071
*user_ccb, &io_req->ccb, error);
2072
}
2073
2074
/*
2075
* Prefer the first error we got back, and make sure we
2076
* don't overwrite bad status with good.
2077
*/
2078
if (old_error != 0)
2079
error = old_error;
2080
2081
cam_periph_lock(periph);
2082
2083
/*
2084
* At this point, if there was an error, we could potentially
2085
* re-queue the I/O and try again. But why? The error
2086
* would almost certainly happen again. We might as well
2087
* not leak memory.
2088
*/
2089
uma_zfree(softc->pass_zone, io_req);
2090
break;
2091
}
2092
default:
2093
error = cam_periph_ioctl(periph, cmd, addr, passerror);
2094
break;
2095
}
2096
2097
bailout:
2098
cam_periph_unlock(periph);
2099
2100
return(error);
2101
}
2102
2103
static int
2104
passpoll(struct cdev *dev, int poll_events, struct thread *td)
2105
{
2106
struct cam_periph *periph;
2107
struct pass_softc *softc;
2108
int revents;
2109
2110
periph = (struct cam_periph *)dev->si_drv1;
2111
softc = (struct pass_softc *)periph->softc;
2112
2113
revents = poll_events & (POLLOUT | POLLWRNORM);
2114
if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
2115
cam_periph_lock(periph);
2116
2117
if (!TAILQ_EMPTY(&softc->done_queue)) {
2118
revents |= poll_events & (POLLIN | POLLRDNORM);
2119
}
2120
cam_periph_unlock(periph);
2121
if (revents == 0)
2122
selrecord(td, &softc->read_select);
2123
}
2124
2125
return (revents);
2126
}
2127
2128
static int
2129
passkqfilter(struct cdev *dev, struct knote *kn)
2130
{
2131
struct cam_periph *periph;
2132
struct pass_softc *softc;
2133
2134
periph = (struct cam_periph *)dev->si_drv1;
2135
softc = (struct pass_softc *)periph->softc;
2136
2137
kn->kn_hook = (caddr_t)periph;
2138
kn->kn_fop = &passread_filtops;
2139
knlist_add(&softc->read_select.si_note, kn, 0);
2140
2141
return (0);
2142
}
2143
2144
static void
2145
passreadfiltdetach(struct knote *kn)
2146
{
2147
struct cam_periph *periph;
2148
struct pass_softc *softc;
2149
2150
periph = (struct cam_periph *)kn->kn_hook;
2151
softc = (struct pass_softc *)periph->softc;
2152
2153
knlist_remove(&softc->read_select.si_note, kn, 0);
2154
}
2155
2156
static int
2157
passreadfilt(struct knote *kn, long hint)
2158
{
2159
struct cam_periph *periph;
2160
struct pass_softc *softc;
2161
int retval;
2162
2163
periph = (struct cam_periph *)kn->kn_hook;
2164
softc = (struct pass_softc *)periph->softc;
2165
2166
cam_periph_assert(periph, MA_OWNED);
2167
2168
if (TAILQ_EMPTY(&softc->done_queue))
2169
retval = 0;
2170
else
2171
retval = 1;
2172
2173
return (retval);
2174
}
2175
2176
/*
2177
* Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
2178
* should be the CCB that is copied in from the user.
2179
*/
2180
static int
2181
passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
2182
{
2183
struct pass_softc *softc;
2184
struct cam_periph_map_info mapinfo;
2185
uint8_t *cmd;
2186
xpt_opcode fc;
2187
int error;
2188
2189
softc = (struct pass_softc *)periph->softc;
2190
2191
/*
2192
* There are some fields in the CCB header that need to be
2193
* preserved, the rest we get from the user.
2194
*/
2195
xpt_merge_ccb(ccb, inccb);
2196
2197
if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
2198
cmd = __builtin_alloca(ccb->csio.cdb_len);
2199
error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len);
2200
if (error)
2201
return (error);
2202
ccb->csio.cdb_io.cdb_ptr = cmd;
2203
}
2204
2205
/*
2206
* Let cam_periph_mapmem do a sanity check on the data pointer format.
2207
* Even if no data transfer is needed, it's a cheap check and it
2208
* simplifies the code.
2209
*/
2210
fc = ccb->ccb_h.func_code;
2211
if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO)
2212
|| (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_MMC_IO)
2213
|| (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) {
2214
bzero(&mapinfo, sizeof(mapinfo));
2215
2216
/*
2217
* cam_periph_mapmem calls into proc and vm functions that can
2218
* sleep as well as trigger I/O, so we can't hold the lock.
2219
* Dropping it here is reasonably safe.
2220
*/
2221
cam_periph_unlock(periph);
2222
error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio);
2223
cam_periph_lock(periph);
2224
2225
/*
2226
* cam_periph_mapmem returned an error, we can't continue.
2227
* Return the error to the user.
2228
*/
2229
if (error)
2230
return(error);
2231
} else
2232
/* Ensure that the unmap call later on is a no-op. */
2233
mapinfo.num_bufs_used = 0;
2234
2235
/*
2236
* If the user wants us to perform any error recovery, then honor
2237
* that request. Otherwise, it's up to the user to perform any
2238
* error recovery.
2239
*/
2240
{
2241
uint32_t cam_flags, sense_flags;
2242
2243
passflags(ccb, &cam_flags, &sense_flags);
2244
cam_periph_runccb(ccb, passerror, cam_flags,
2245
sense_flags, softc->device_stats);
2246
}
2247
2248
cam_periph_unlock(periph);
2249
error = cam_periph_unmapmem(ccb, &mapinfo);
2250
cam_periph_lock(periph);
2251
2252
ccb->ccb_h.cbfcnp = NULL;
2253
ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
2254
bcopy(ccb, inccb, sizeof(union ccb));
2255
2256
return (error);
2257
}
2258
2259
/*
2260
* Set the cam_flags and sense_flags based on whether or not the request wants
2261
* error recovery. In order to log errors via devctl, we need to do at least
2262
* minimal recovery. We do this by not retrying unit attention (we let the
2263
* requester do it, or not, if appropriate) and specifically asking for no
2264
* recovery, like we do during device probing.
2265
*/
2266
static void
2267
passflags(union ccb *ccb, uint32_t *cam_flags, uint32_t *sense_flags)
2268
{
2269
if ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) != 0) {
2270
*cam_flags = CAM_RETRY_SELTO;
2271
*sense_flags = SF_RETRY_UA | SF_NO_PRINT;
2272
} else {
2273
*cam_flags = 0;
2274
*sense_flags = SF_NO_RETRY | SF_NO_RECOVERY | SF_NO_PRINT;
2275
}
2276
}
2277
2278
static int
2279
passerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags)
2280
{
2281
2282
return(cam_periph_error(ccb, cam_flags, sense_flags));
2283
}
2284
2285