Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cam/cam_xpt.c
39475 views
1
/*-
2
* Implementation of the Common Access Method Transport (XPT) layer.
3
*
4
* SPDX-License-Identifier: BSD-2-Clause
5
*
6
* Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
7
* Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8
* All rights reserved.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions, and the following disclaimer,
15
* without modification, immediately at the beginning of the file.
16
* 2. The name of the author may not be used to endorse or promote products
17
* derived from this software without specific prior written permission.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
#include "opt_printf.h"
33
34
#include <sys/param.h>
35
#include <sys/bio.h>
36
#include <sys/bus.h>
37
#include <sys/systm.h>
38
#include <sys/types.h>
39
#include <sys/malloc.h>
40
#include <sys/kernel.h>
41
#include <sys/time.h>
42
#include <sys/conf.h>
43
#include <sys/fcntl.h>
44
#include <sys/proc.h>
45
#include <sys/sbuf.h>
46
#include <sys/smp.h>
47
#include <sys/stdarg.h>
48
#include <sys/taskqueue.h>
49
50
#include <sys/lock.h>
51
#include <sys/mutex.h>
52
#include <sys/sysctl.h>
53
#include <sys/kthread.h>
54
55
#include <cam/cam.h>
56
#include <cam/cam_ccb.h>
57
#include <cam/cam_iosched.h>
58
#include <cam/cam_periph.h>
59
#include <cam/cam_queue.h>
60
#include <cam/cam_sim.h>
61
#include <cam/cam_xpt.h>
62
#include <cam/cam_xpt_sim.h>
63
#include <cam/cam_xpt_periph.h>
64
#include <cam/cam_xpt_internal.h>
65
#include <cam/cam_debug.h>
66
#include <cam/cam_compat.h>
67
68
#include <cam/scsi/scsi_all.h>
69
#include <cam/scsi/scsi_message.h>
70
#include <cam/scsi/scsi_pass.h>
71
72
73
/* Wild guess based on not wanting to grow the stack too much */
74
#define XPT_PRINT_MAXLEN 512
75
#ifdef PRINTF_BUFR_SIZE
76
#define XPT_PRINT_LEN PRINTF_BUFR_SIZE
77
#else
78
#define XPT_PRINT_LEN 128
79
#endif
80
_Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large");
81
82
/*
83
* This is the maximum number of high powered commands (e.g. start unit)
84
* that can be outstanding at a particular time.
85
*/
86
#ifndef CAM_MAX_HIGHPOWER
87
#define CAM_MAX_HIGHPOWER 4
88
#endif
89
90
/* Datastructures internal to the xpt layer */
91
MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
92
MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices");
93
MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs");
94
MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths");
95
96
struct xpt_softc {
97
uint32_t xpt_generation;
98
99
/* number of high powered commands that can go through right now */
100
struct mtx xpt_highpower_lock;
101
STAILQ_HEAD(highpowerlist, cam_ed) highpowerq;
102
int num_highpower;
103
104
/* queue for handling async rescan requests. */
105
TAILQ_HEAD(, ccb_hdr) ccb_scanq;
106
int buses_to_config;
107
int buses_config_done;
108
109
/*
110
* Registered buses
111
*
112
* N.B., "busses" is an archaic spelling of "buses". In new code
113
* "buses" is preferred.
114
*/
115
TAILQ_HEAD(,cam_eb) xpt_busses;
116
u_int bus_generation;
117
118
int boot_delay;
119
struct callout boot_callout;
120
struct task boot_task;
121
struct root_hold_token xpt_rootmount;
122
123
struct mtx xpt_topo_lock;
124
struct taskqueue *xpt_taskq;
125
};
126
127
typedef enum {
128
DM_RET_COPY = 0x01,
129
DM_RET_FLAG_MASK = 0x0f,
130
DM_RET_NONE = 0x00,
131
DM_RET_STOP = 0x10,
132
DM_RET_DESCEND = 0x20,
133
DM_RET_ERROR = 0x30,
134
DM_RET_ACTION_MASK = 0xf0
135
} dev_match_ret;
136
137
typedef enum {
138
XPT_DEPTH_BUS,
139
XPT_DEPTH_TARGET,
140
XPT_DEPTH_DEVICE,
141
XPT_DEPTH_PERIPH
142
} xpt_traverse_depth;
143
144
struct xpt_traverse_config {
145
xpt_traverse_depth depth;
146
void *tr_func;
147
void *tr_arg;
148
};
149
150
typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
151
typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
152
typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
153
typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
154
typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
155
156
/* Transport layer configuration information */
157
static struct xpt_softc xsoftc;
158
159
MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF);
160
161
SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN,
162
&xsoftc.boot_delay, 0, "Bus registration wait time");
163
SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD,
164
&xsoftc.xpt_generation, 0, "CAM peripheral generation count");
165
166
struct cam_doneq {
167
struct mtx_padalign cam_doneq_mtx;
168
STAILQ_HEAD(, ccb_hdr) cam_doneq;
169
int cam_doneq_sleep;
170
};
171
172
static struct cam_doneq cam_doneqs[MAXCPU];
173
static u_int __read_mostly cam_num_doneqs;
174
static struct proc *cam_proc;
175
static struct cam_doneq cam_async;
176
177
SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN,
178
&cam_num_doneqs, 0, "Number of completion queues/threads");
179
180
struct cam_periph *xpt_periph;
181
182
static periph_init_t xpt_periph_init;
183
184
static struct periph_driver xpt_driver =
185
{
186
xpt_periph_init, "xpt",
187
TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0,
188
CAM_PERIPH_DRV_EARLY
189
};
190
191
PERIPHDRIVER_DECLARE(xpt, xpt_driver);
192
193
static d_open_t xptopen;
194
static d_close_t xptclose;
195
static d_ioctl_t xptioctl;
196
static d_ioctl_t xptdoioctl;
197
198
static struct cdevsw xpt_cdevsw = {
199
.d_version = D_VERSION,
200
.d_flags = 0,
201
.d_open = xptopen,
202
.d_close = xptclose,
203
.d_ioctl = xptioctl,
204
.d_name = "xpt",
205
};
206
207
/* Storage for debugging datastructures */
208
struct cam_path *cam_dpath;
209
uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS;
210
SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN,
211
&cam_dflags, 0, "Enabled debug flags");
212
uint32_t cam_debug_delay = CAM_DEBUG_DELAY;
213
SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN,
214
&cam_debug_delay, 0, "Delay in us after each debug message");
215
216
/* Our boot-time initialization hook */
217
static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
218
219
static moduledata_t cam_moduledata = {
220
"cam",
221
cam_module_event_handler,
222
NULL
223
};
224
225
static int xpt_init(void *);
226
227
DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
228
MODULE_VERSION(cam, 1);
229
230
static void xpt_async_bcast(struct async_list *async_head,
231
uint32_t async_code,
232
struct cam_path *path,
233
void *async_arg);
234
static path_id_t xptnextfreepathid(void);
235
static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
236
static union ccb *xpt_get_ccb(struct cam_periph *periph);
237
static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph);
238
static void xpt_run_allocq(struct cam_periph *periph, int sleep);
239
static void xpt_run_allocq_task(void *context, int pending);
240
static void xpt_run_devq(struct cam_devq *devq);
241
static callout_func_t xpt_release_devq_timeout;
242
static void xpt_acquire_bus(struct cam_eb *bus);
243
static void xpt_release_bus(struct cam_eb *bus);
244
static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
245
static int xpt_release_devq_device(struct cam_ed *dev, u_int count,
246
int run_queue);
247
static struct cam_et*
248
xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
249
static void xpt_acquire_target(struct cam_et *target);
250
static void xpt_release_target(struct cam_et *target);
251
static struct cam_eb*
252
xpt_find_bus(path_id_t path_id);
253
static struct cam_et*
254
xpt_find_target(struct cam_eb *bus, target_id_t target_id);
255
static struct cam_ed*
256
xpt_find_device(struct cam_et *target, lun_id_t lun_id);
257
static void xpt_config(void *arg);
258
static void xpt_hold_boot_locked(void);
259
static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
260
uint32_t new_priority);
261
static xpt_devicefunc_t xptpassannouncefunc;
262
static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
263
static void xptpoll(struct cam_sim *sim);
264
static void camisr_runqueue(void);
265
static void xpt_done_process(struct ccb_hdr *ccb_h);
266
static void xpt_done_td(void *);
267
static void xpt_async_td(void *);
268
static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
269
u_int num_patterns, struct cam_eb *bus);
270
static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
271
u_int num_patterns,
272
struct cam_ed *device);
273
static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
274
u_int num_patterns,
275
struct cam_periph *periph);
276
static xpt_busfunc_t xptedtbusfunc;
277
static xpt_targetfunc_t xptedttargetfunc;
278
static xpt_devicefunc_t xptedtdevicefunc;
279
static xpt_periphfunc_t xptedtperiphfunc;
280
static xpt_pdrvfunc_t xptplistpdrvfunc;
281
static xpt_periphfunc_t xptplistperiphfunc;
282
static int xptedtmatch(struct ccb_dev_match *cdm);
283
static int xptperiphlistmatch(struct ccb_dev_match *cdm);
284
static int xptbustraverse(struct cam_eb *start_bus,
285
xpt_busfunc_t *tr_func, void *arg);
286
static int xpttargettraverse(struct cam_eb *bus,
287
struct cam_et *start_target,
288
xpt_targetfunc_t *tr_func, void *arg);
289
static int xptdevicetraverse(struct cam_et *target,
290
struct cam_ed *start_device,
291
xpt_devicefunc_t *tr_func, void *arg);
292
static int xptperiphtraverse(struct cam_ed *device,
293
struct cam_periph *start_periph,
294
xpt_periphfunc_t *tr_func, void *arg);
295
static int xptpdrvtraverse(struct periph_driver **start_pdrv,
296
xpt_pdrvfunc_t *tr_func, void *arg);
297
static int xptpdperiphtraverse(struct periph_driver **pdrv,
298
struct cam_periph *start_periph,
299
xpt_periphfunc_t *tr_func,
300
void *arg);
301
static xpt_busfunc_t xptdefbusfunc;
302
static xpt_targetfunc_t xptdeftargetfunc;
303
static xpt_devicefunc_t xptdefdevicefunc;
304
static xpt_periphfunc_t xptdefperiphfunc;
305
static void xpt_finishconfig_task(void *context, int pending);
306
static void xpt_dev_async_default(uint32_t async_code,
307
struct cam_eb *bus,
308
struct cam_et *target,
309
struct cam_ed *device,
310
void *async_arg);
311
static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus,
312
struct cam_et *target,
313
lun_id_t lun_id);
314
static xpt_devicefunc_t xptsetasyncfunc;
315
static xpt_busfunc_t xptsetasyncbusfunc;
316
static cam_status xptregister(struct cam_periph *periph,
317
void *arg);
318
319
static __inline int
320
xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev)
321
{
322
int retval;
323
324
mtx_assert(&devq->send_mtx, MA_OWNED);
325
if ((dev->ccbq.queue.entries > 0) &&
326
(dev->ccbq.dev_openings > 0) &&
327
(dev->ccbq.queue.qfrozen_cnt == 0)) {
328
/*
329
* The priority of a device waiting for controller
330
* resources is that of the highest priority CCB
331
* enqueued.
332
*/
333
retval =
334
xpt_schedule_dev(&devq->send_queue,
335
&dev->devq_entry,
336
CAMQ_GET_PRIO(&dev->ccbq.queue));
337
} else {
338
retval = 0;
339
}
340
return (retval);
341
}
342
343
static __inline int
344
device_is_queued(struct cam_ed *device)
345
{
346
return (device->devq_entry.index != CAM_UNQUEUED_INDEX);
347
}
348
349
static void
350
xpt_periph_init(void)
351
{
352
make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
353
}
354
355
static int
356
xptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
357
{
358
359
/*
360
* Only allow read-write access.
361
*/
362
if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0))
363
return(EPERM);
364
365
/*
366
* We don't allow nonblocking access.
367
*/
368
if ((flags & O_NONBLOCK) != 0) {
369
printf("%s: can't do nonblocking access\n", devtoname(dev));
370
return(ENODEV);
371
}
372
373
return(0);
374
}
375
376
static int
377
xptclose(struct cdev *dev, int flag, int fmt, struct thread *td)
378
{
379
380
return(0);
381
}
382
383
/*
384
* Don't automatically grab the xpt softc lock here even though this is going
385
* through the xpt device. The xpt device is really just a back door for
386
* accessing other devices and SIMs, so the right thing to do is to grab
387
* the appropriate SIM lock once the bus/SIM is located.
388
*/
389
static int
390
xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
391
{
392
int error;
393
394
if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
395
error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl);
396
}
397
return (error);
398
}
399
400
static int
401
xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
402
{
403
int error;
404
405
error = 0;
406
407
switch(cmd) {
408
/*
409
* For the transport layer CAMIOCOMMAND ioctl, we really only want
410
* to accept CCB types that don't quite make sense to send through a
411
* passthrough driver. XPT_PATH_INQ is an exception to this, as stated
412
* in the CAM spec.
413
*/
414
case CAMIOCOMMAND: {
415
union ccb *ccb;
416
union ccb *inccb;
417
struct cam_eb *bus;
418
419
inccb = (union ccb *)addr;
420
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
421
if (inccb->ccb_h.func_code == XPT_SCSI_IO)
422
inccb->csio.bio = NULL;
423
#endif
424
425
if (inccb->ccb_h.flags & CAM_UNLOCKED)
426
return (EINVAL);
427
428
bus = xpt_find_bus(inccb->ccb_h.path_id);
429
if (bus == NULL)
430
return (EINVAL);
431
432
switch (inccb->ccb_h.func_code) {
433
case XPT_SCAN_BUS:
434
case XPT_RESET_BUS:
435
if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD ||
436
inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
437
xpt_release_bus(bus);
438
return (EINVAL);
439
}
440
break;
441
case XPT_SCAN_TGT:
442
if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD ||
443
inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) {
444
xpt_release_bus(bus);
445
return (EINVAL);
446
}
447
break;
448
default:
449
break;
450
}
451
452
switch(inccb->ccb_h.func_code) {
453
case XPT_SCAN_BUS:
454
case XPT_RESET_BUS:
455
case XPT_PATH_INQ:
456
case XPT_ENG_INQ:
457
case XPT_SCAN_LUN:
458
case XPT_SCAN_TGT:
459
460
ccb = xpt_alloc_ccb();
461
462
/*
463
* Create a path using the bus, target, and lun the
464
* user passed in.
465
*/
466
if (xpt_create_path(&ccb->ccb_h.path, NULL,
467
inccb->ccb_h.path_id,
468
inccb->ccb_h.target_id,
469
inccb->ccb_h.target_lun) !=
470
CAM_REQ_CMP){
471
error = EINVAL;
472
xpt_free_ccb(ccb);
473
break;
474
}
475
/* Ensure all of our fields are correct */
476
xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
477
inccb->ccb_h.pinfo.priority);
478
xpt_merge_ccb(ccb, inccb);
479
xpt_path_lock(ccb->ccb_h.path);
480
cam_periph_runccb(ccb, NULL, 0, 0, NULL);
481
xpt_path_unlock(ccb->ccb_h.path);
482
bcopy(ccb, inccb, sizeof(union ccb));
483
xpt_free_path(ccb->ccb_h.path);
484
xpt_free_ccb(ccb);
485
break;
486
487
case XPT_DEBUG: {
488
union ccb ccb;
489
490
/*
491
* This is an immediate CCB, so it's okay to
492
* allocate it on the stack.
493
*/
494
memset(&ccb, 0, sizeof(ccb));
495
496
/*
497
* Create a path using the bus, target, and lun the
498
* user passed in.
499
*/
500
if (xpt_create_path(&ccb.ccb_h.path, NULL,
501
inccb->ccb_h.path_id,
502
inccb->ccb_h.target_id,
503
inccb->ccb_h.target_lun) !=
504
CAM_REQ_CMP){
505
error = EINVAL;
506
break;
507
}
508
/* Ensure all of our fields are correct */
509
xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
510
inccb->ccb_h.pinfo.priority);
511
xpt_merge_ccb(&ccb, inccb);
512
xpt_action(&ccb);
513
bcopy(&ccb, inccb, sizeof(union ccb));
514
xpt_free_path(ccb.ccb_h.path);
515
break;
516
}
517
case XPT_DEV_MATCH: {
518
struct cam_periph_map_info mapinfo;
519
struct cam_path *old_path;
520
521
/*
522
* We can't deal with physical addresses for this
523
* type of transaction.
524
*/
525
if ((inccb->ccb_h.flags & CAM_DATA_MASK) !=
526
CAM_DATA_VADDR) {
527
error = EINVAL;
528
break;
529
}
530
531
/*
532
* Save this in case the caller had it set to
533
* something in particular.
534
*/
535
old_path = inccb->ccb_h.path;
536
537
/*
538
* We really don't need a path for the matching
539
* code. The path is needed because of the
540
* debugging statements in xpt_action(). They
541
* assume that the CCB has a valid path.
542
*/
543
inccb->ccb_h.path = xpt_periph->path;
544
545
bzero(&mapinfo, sizeof(mapinfo));
546
547
/*
548
* Map the pattern and match buffers into kernel
549
* virtual address space.
550
*/
551
error = cam_periph_mapmem(inccb, &mapinfo, maxphys);
552
553
if (error) {
554
inccb->ccb_h.path = old_path;
555
break;
556
}
557
558
/*
559
* This is an immediate CCB, we can send it on directly.
560
*/
561
xpt_action(inccb);
562
563
/*
564
* Map the buffers back into user space.
565
*/
566
error = cam_periph_unmapmem(inccb, &mapinfo);
567
568
inccb->ccb_h.path = old_path;
569
break;
570
}
571
default:
572
error = ENOTSUP;
573
break;
574
}
575
xpt_release_bus(bus);
576
break;
577
}
578
/*
579
* This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
580
* with the periphal driver name and unit name filled in. The other
581
* fields don't really matter as input. The passthrough driver name
582
* ("pass"), and unit number are passed back in the ccb. The current
583
* device generation number, and the index into the device peripheral
584
* driver list, and the status are also passed back. Note that
585
* since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
586
* we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
587
* (or rather should be) impossible for the device peripheral driver
588
* list to change since we look at the whole thing in one pass, and
589
* we do it with lock protection.
590
*
591
*/
592
case CAMGETPASSTHRU: {
593
union ccb *ccb;
594
struct cam_periph *periph;
595
struct periph_driver **p_drv;
596
char *name;
597
u_int unit;
598
bool base_periph_found;
599
600
ccb = (union ccb *)addr;
601
unit = ccb->cgdl.unit_number;
602
name = ccb->cgdl.periph_name;
603
base_periph_found = false;
604
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
605
if (ccb->ccb_h.func_code == XPT_SCSI_IO)
606
ccb->csio.bio = NULL;
607
#endif
608
609
/*
610
* Sanity check -- make sure we don't get a null peripheral
611
* driver name.
612
*/
613
if (*ccb->cgdl.periph_name == '\0') {
614
error = EINVAL;
615
break;
616
}
617
618
/* Keep the list from changing while we traverse it */
619
xpt_lock_buses();
620
621
/* first find our driver in the list of drivers */
622
for (p_drv = periph_drivers; *p_drv != NULL; p_drv++)
623
if (strcmp((*p_drv)->driver_name, name) == 0)
624
break;
625
626
if (*p_drv == NULL) {
627
xpt_unlock_buses();
628
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
629
ccb->cgdl.status = CAM_GDEVLIST_ERROR;
630
*ccb->cgdl.periph_name = '\0';
631
ccb->cgdl.unit_number = 0;
632
error = ENOENT;
633
break;
634
}
635
636
/*
637
* Run through every peripheral instance of this driver
638
* and check to see whether it matches the unit passed
639
* in by the user. If it does, get out of the loops and
640
* find the passthrough driver associated with that
641
* peripheral driver.
642
*/
643
for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
644
periph = TAILQ_NEXT(periph, unit_links)) {
645
if (periph->unit_number == unit)
646
break;
647
}
648
/*
649
* If we found the peripheral driver that the user passed
650
* in, go through all of the peripheral drivers for that
651
* particular device and look for a passthrough driver.
652
*/
653
if (periph != NULL) {
654
struct cam_ed *device;
655
int i;
656
657
base_periph_found = true;
658
device = periph->path->device;
659
for (i = 0, periph = SLIST_FIRST(&device->periphs);
660
periph != NULL;
661
periph = SLIST_NEXT(periph, periph_links), i++) {
662
/*
663
* Check to see whether we have a
664
* passthrough device or not.
665
*/
666
if (strcmp(periph->periph_name, "pass") == 0) {
667
/*
668
* Fill in the getdevlist fields.
669
*/
670
strlcpy(ccb->cgdl.periph_name,
671
periph->periph_name,
672
sizeof(ccb->cgdl.periph_name));
673
ccb->cgdl.unit_number =
674
periph->unit_number;
675
if (SLIST_NEXT(periph, periph_links))
676
ccb->cgdl.status =
677
CAM_GDEVLIST_MORE_DEVS;
678
else
679
ccb->cgdl.status =
680
CAM_GDEVLIST_LAST_DEVICE;
681
ccb->cgdl.generation =
682
device->generation;
683
ccb->cgdl.index = i;
684
/*
685
* Fill in some CCB header fields
686
* that the user may want.
687
*/
688
ccb->ccb_h.path_id =
689
periph->path->bus->path_id;
690
ccb->ccb_h.target_id =
691
periph->path->target->target_id;
692
ccb->ccb_h.target_lun =
693
periph->path->device->lun_id;
694
ccb->ccb_h.status = CAM_REQ_CMP;
695
break;
696
}
697
}
698
}
699
700
/*
701
* If the periph is null here, one of two things has
702
* happened. The first possibility is that we couldn't
703
* find the unit number of the particular peripheral driver
704
* that the user is asking about. e.g. the user asks for
705
* the passthrough driver for "da11". We find the list of
706
* "da" peripherals all right, but there is no unit 11.
707
* The other possibility is that we went through the list
708
* of peripheral drivers attached to the device structure,
709
* but didn't find one with the name "pass". Either way,
710
* we return ENOENT, since we couldn't find something.
711
*/
712
if (periph == NULL) {
713
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
714
ccb->cgdl.status = CAM_GDEVLIST_ERROR;
715
*ccb->cgdl.periph_name = '\0';
716
ccb->cgdl.unit_number = 0;
717
error = ENOENT;
718
/*
719
* It is unfortunate that this is even necessary,
720
* but there are many, many clueless users out there.
721
* If this is true, the user is looking for the
722
* passthrough driver, but doesn't have one in his
723
* kernel.
724
*/
725
if (base_periph_found) {
726
printf(
727
"xptioctl: pass driver is not in the kernel\n"
728
"xptioctl: put \"device pass\" in your kernel config file\n");
729
}
730
}
731
xpt_unlock_buses();
732
break;
733
}
734
default:
735
error = ENOTTY;
736
break;
737
}
738
739
return(error);
740
}
741
742
static int
743
cam_module_event_handler(module_t mod, int what, void *arg)
744
{
745
int error;
746
747
switch (what) {
748
case MOD_LOAD:
749
if ((error = xpt_init(NULL)) != 0)
750
return (error);
751
break;
752
case MOD_UNLOAD:
753
return EBUSY;
754
default:
755
return EOPNOTSUPP;
756
}
757
758
return 0;
759
}
760
761
static struct xpt_proto *
762
xpt_proto_find(cam_proto proto)
763
{
764
struct xpt_proto **pp;
765
766
SET_FOREACH(pp, cam_xpt_proto_set) {
767
if ((*pp)->proto == proto)
768
return *pp;
769
}
770
771
return NULL;
772
}
773
774
static void
775
xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
776
{
777
778
if (done_ccb->ccb_h.ppriv_ptr1 == NULL) {
779
xpt_free_path(done_ccb->ccb_h.path);
780
xpt_free_ccb(done_ccb);
781
} else {
782
done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1;
783
(*done_ccb->ccb_h.cbfcnp)(periph, done_ccb);
784
}
785
xpt_release_boot();
786
}
787
788
/* thread to handle bus rescans */
789
static void
790
xpt_scanner_thread(void *dummy)
791
{
792
union ccb *ccb;
793
struct mtx *mtx;
794
struct cam_ed *device;
795
796
xpt_lock_buses();
797
for (;;) {
798
if (TAILQ_EMPTY(&xsoftc.ccb_scanq))
799
msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO,
800
"-", 0);
801
if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
802
TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
803
xpt_unlock_buses();
804
805
/*
806
* We need to lock the device's mutex which we use as
807
* the path mutex. We can't do it directly because the
808
* cam_path in the ccb may wind up going away because
809
* the path lock may be dropped and the path retired in
810
* the completion callback. We do this directly to keep
811
* the reference counts in cam_path sane. We also have
812
* to copy the device pointer because ccb_h.path may
813
* be freed in the callback.
814
*/
815
mtx = xpt_path_mtx(ccb->ccb_h.path);
816
device = ccb->ccb_h.path->device;
817
xpt_acquire_device(device);
818
mtx_lock(mtx);
819
xpt_action(ccb);
820
mtx_unlock(mtx);
821
xpt_release_device(device);
822
823
xpt_lock_buses();
824
}
825
}
826
}
827
828
void
829
xpt_rescan(union ccb *ccb)
830
{
831
struct ccb_hdr *hdr;
832
833
/* Prepare request */
834
if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD &&
835
ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
836
ccb->ccb_h.func_code = XPT_SCAN_BUS;
837
else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
838
ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD)
839
ccb->ccb_h.func_code = XPT_SCAN_TGT;
840
else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD &&
841
ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD)
842
ccb->ccb_h.func_code = XPT_SCAN_LUN;
843
else {
844
xpt_print(ccb->ccb_h.path, "illegal scan path\n");
845
xpt_free_path(ccb->ccb_h.path);
846
xpt_free_ccb(ccb);
847
return;
848
}
849
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
850
("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code,
851
xpt_action_name(ccb->ccb_h.func_code)));
852
853
ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp;
854
ccb->ccb_h.cbfcnp = xpt_rescan_done;
855
xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT);
856
/* Don't make duplicate entries for the same paths. */
857
xpt_lock_buses();
858
if (ccb->ccb_h.ppriv_ptr1 == NULL) {
859
TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) {
860
if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) {
861
wakeup(&xsoftc.ccb_scanq);
862
xpt_unlock_buses();
863
xpt_print(ccb->ccb_h.path, "rescan already queued\n");
864
xpt_free_path(ccb->ccb_h.path);
865
xpt_free_ccb(ccb);
866
return;
867
}
868
}
869
}
870
TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
871
xpt_hold_boot_locked();
872
wakeup(&xsoftc.ccb_scanq);
873
xpt_unlock_buses();
874
}
875
876
/* Functions accessed by the peripheral drivers */
877
static int
878
xpt_init(void *dummy)
879
{
880
struct cam_sim *xpt_sim;
881
struct cam_path *path;
882
struct cam_devq *devq;
883
cam_status status;
884
int error, i;
885
886
TAILQ_INIT(&xsoftc.xpt_busses);
887
TAILQ_INIT(&xsoftc.ccb_scanq);
888
STAILQ_INIT(&xsoftc.highpowerq);
889
xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
890
891
mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF);
892
xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK,
893
taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq);
894
895
#ifdef CAM_BOOT_DELAY
896
/*
897
* Override this value at compile time to assist our users
898
* who don't use loader to boot a kernel.
899
*/
900
xsoftc.boot_delay = CAM_BOOT_DELAY;
901
#endif
902
903
/*
904
* The xpt layer is, itself, the equivalent of a SIM.
905
* Allow 16 ccbs in the ccb pool for it. This should
906
* give decent parallelism when we probe buses and
907
* perform other XPT functions.
908
*/
909
devq = cam_simq_alloc(16);
910
if (devq == NULL)
911
return (ENOMEM);
912
xpt_sim = cam_sim_alloc(xptaction,
913
xptpoll,
914
"xpt",
915
/*softc*/NULL,
916
/*unit*/0,
917
/*mtx*/NULL,
918
/*max_dev_transactions*/0,
919
/*max_tagged_dev_transactions*/0,
920
devq);
921
if (xpt_sim == NULL)
922
return (ENOMEM);
923
924
if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) {
925
printf(
926
"xpt_init: xpt_bus_register failed with errno %d, failing attach\n",
927
error);
928
return (EINVAL);
929
}
930
931
/*
932
* Looking at the XPT from the SIM layer, the XPT is
933
* the equivalent of a peripheral driver. Allocate
934
* a peripheral driver entry for us.
935
*/
936
if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
937
CAM_TARGET_WILDCARD,
938
CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
939
printf(
940
"xpt_init: xpt_create_path failed with status %#x, failing attach\n",
941
status);
942
return (EINVAL);
943
}
944
xpt_path_lock(path);
945
cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
946
path, NULL, 0, xpt_sim);
947
xpt_path_unlock(path);
948
xpt_free_path(path);
949
950
if (cam_num_doneqs < 1)
951
cam_num_doneqs = 1 + mp_ncpus / 6;
952
else if (cam_num_doneqs > MAXCPU)
953
cam_num_doneqs = MAXCPU;
954
for (i = 0; i < cam_num_doneqs; i++) {
955
mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL,
956
MTX_DEF);
957
STAILQ_INIT(&cam_doneqs[i].cam_doneq);
958
error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i],
959
&cam_proc, NULL, 0, 0, "cam", "doneq%d", i);
960
if (error != 0) {
961
cam_num_doneqs = i;
962
break;
963
}
964
}
965
if (cam_num_doneqs < 1) {
966
printf("xpt_init: Cannot init completion queues - failing attach\n");
967
return (ENOMEM);
968
}
969
970
mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF);
971
STAILQ_INIT(&cam_async.cam_doneq);
972
if (kproc_kthread_add(xpt_async_td, &cam_async,
973
&cam_proc, NULL, 0, 0, "cam", "async") != 0) {
974
printf("xpt_init: Cannot init async thread - failing attach\n");
975
return (ENOMEM);
976
}
977
978
/*
979
* Register a callback for when interrupts are enabled.
980
*/
981
config_intrhook_oneshot(xpt_config, NULL);
982
983
return (0);
984
}
985
986
static cam_status
987
xptregister(struct cam_periph *periph, void *arg)
988
{
989
struct cam_sim *xpt_sim;
990
991
if (periph == NULL) {
992
printf("xptregister: periph was NULL!!\n");
993
return(CAM_REQ_CMP_ERR);
994
}
995
996
xpt_sim = (struct cam_sim *)arg;
997
xpt_sim->softc = periph;
998
xpt_periph = periph;
999
periph->softc = NULL;
1000
1001
return(CAM_REQ_CMP);
1002
}
1003
1004
int32_t
1005
xpt_add_periph(struct cam_periph *periph)
1006
{
1007
struct cam_ed *device;
1008
int32_t status;
1009
1010
TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph);
1011
device = periph->path->device;
1012
status = CAM_REQ_CMP;
1013
if (device != NULL) {
1014
mtx_lock(&device->target->bus->eb_mtx);
1015
device->generation++;
1016
SLIST_INSERT_HEAD(&device->periphs, periph, periph_links);
1017
mtx_unlock(&device->target->bus->eb_mtx);
1018
atomic_add_32(&xsoftc.xpt_generation, 1);
1019
}
1020
1021
return (status);
1022
}
1023
1024
void
1025
xpt_remove_periph(struct cam_periph *periph)
1026
{
1027
struct cam_ed *device;
1028
1029
device = periph->path->device;
1030
if (device != NULL) {
1031
mtx_lock(&device->target->bus->eb_mtx);
1032
device->generation++;
1033
SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links);
1034
mtx_unlock(&device->target->bus->eb_mtx);
1035
atomic_add_32(&xsoftc.xpt_generation, 1);
1036
}
1037
}
1038
1039
void
1040
xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1041
{
1042
char buf[128];
1043
struct sbuf sb;
1044
1045
(void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1046
sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1047
xpt_announce_periph_sbuf(periph, &sb, announce_string);
1048
(void)sbuf_finish(&sb);
1049
}
1050
1051
void
1052
xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb,
1053
char *announce_string)
1054
{
1055
struct cam_path *path = periph->path;
1056
struct xpt_proto *proto;
1057
1058
cam_periph_assert(periph, MA_OWNED);
1059
periph->flags |= CAM_PERIPH_ANNOUNCED;
1060
1061
sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1062
periph->periph_name, periph->unit_number,
1063
path->bus->sim->sim_name,
1064
path->bus->sim->unit_number,
1065
path->bus->sim->bus_id,
1066
path->bus->path_id,
1067
path->target->target_id,
1068
(uintmax_t)path->device->lun_id);
1069
sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1070
proto = xpt_proto_find(path->device->protocol);
1071
if (proto)
1072
proto->ops->announce_sbuf(path->device, sb);
1073
else
1074
sbuf_printf(sb, "Unknown protocol device %d\n",
1075
path->device->protocol);
1076
if (path->device->serial_num_len > 0) {
1077
/* Don't wrap the screen - print only the first 60 chars */
1078
sbuf_printf(sb, "%s%d: Serial Number %.60s\n",
1079
periph->periph_name, periph->unit_number,
1080
path->device->serial_num);
1081
}
1082
/* Announce transport details. */
1083
path->bus->xport->ops->announce_sbuf(periph, sb);
1084
/* Announce command queueing. */
1085
if (path->device->inq_flags & SID_CmdQue
1086
|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1087
sbuf_printf(sb, "%s%d: Command Queueing enabled\n",
1088
periph->periph_name, periph->unit_number);
1089
}
1090
/* Announce caller's details if they've passed in. */
1091
if (announce_string != NULL)
1092
sbuf_printf(sb, "%s%d: %s\n", periph->periph_name,
1093
periph->unit_number, announce_string);
1094
}
1095
1096
void
1097
xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string)
1098
{
1099
if (quirks != 0) {
1100
printf("%s%d: quirks=0x%b\n", periph->periph_name,
1101
periph->unit_number, quirks, bit_string);
1102
}
1103
}
1104
1105
void
1106
xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb,
1107
int quirks, char *bit_string)
1108
{
1109
if (quirks != 0) {
1110
sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name,
1111
periph->unit_number, quirks, bit_string);
1112
}
1113
}
1114
1115
void
1116
xpt_denounce_periph(struct cam_periph *periph)
1117
{
1118
char buf[128];
1119
struct sbuf sb;
1120
1121
(void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1122
sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
1123
xpt_denounce_periph_sbuf(periph, &sb);
1124
(void)sbuf_finish(&sb);
1125
}
1126
1127
void
1128
xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb)
1129
{
1130
struct cam_path *path = periph->path;
1131
struct xpt_proto *proto;
1132
1133
cam_periph_assert(periph, MA_OWNED);
1134
1135
sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n",
1136
periph->periph_name, periph->unit_number,
1137
path->bus->sim->sim_name,
1138
path->bus->sim->unit_number,
1139
path->bus->sim->bus_id,
1140
path->bus->path_id,
1141
path->target->target_id,
1142
(uintmax_t)path->device->lun_id);
1143
sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number);
1144
proto = xpt_proto_find(path->device->protocol);
1145
if (proto)
1146
proto->ops->denounce_sbuf(path->device, sb);
1147
else
1148
sbuf_printf(sb, "Unknown protocol device %d",
1149
path->device->protocol);
1150
if (path->device->serial_num_len > 0)
1151
sbuf_printf(sb, " s/n %.60s", path->device->serial_num);
1152
sbuf_cat(sb, " detached\n");
1153
}
1154
1155
int
1156
xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path)
1157
{
1158
int ret = -1, l, o;
1159
struct ccb_dev_advinfo cdai;
1160
struct scsi_vpd_device_id *did;
1161
struct scsi_vpd_id_descriptor *idd;
1162
1163
xpt_path_assert(path, MA_OWNED);
1164
1165
memset(&cdai, 0, sizeof(cdai));
1166
xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
1167
cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
1168
cdai.flags = CDAI_FLAG_NONE;
1169
cdai.bufsiz = len;
1170
cdai.buf = buf;
1171
1172
if (!strcmp(attr, "GEOM::ident"))
1173
cdai.buftype = CDAI_TYPE_SERIAL_NUM;
1174
else if (!strcmp(attr, "GEOM::physpath"))
1175
cdai.buftype = CDAI_TYPE_PHYS_PATH;
1176
else if (strcmp(attr, "GEOM::lunid") == 0 ||
1177
strcmp(attr, "GEOM::lunname") == 0) {
1178
cdai.buftype = CDAI_TYPE_SCSI_DEVID;
1179
cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN;
1180
cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT);
1181
if (cdai.buf == NULL) {
1182
ret = ENOMEM;
1183
goto out;
1184
}
1185
} else
1186
goto out;
1187
1188
xpt_action((union ccb *)&cdai); /* can only be synchronous */
1189
if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
1190
cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
1191
if (cdai.provsiz == 0)
1192
goto out;
1193
switch(cdai.buftype) {
1194
case CDAI_TYPE_SCSI_DEVID:
1195
did = (struct scsi_vpd_device_id *)cdai.buf;
1196
if (strcmp(attr, "GEOM::lunid") == 0) {
1197
idd = scsi_get_devid(did, cdai.provsiz,
1198
scsi_devid_is_lun_naa);
1199
if (idd == NULL)
1200
idd = scsi_get_devid(did, cdai.provsiz,
1201
scsi_devid_is_lun_eui64);
1202
if (idd == NULL)
1203
idd = scsi_get_devid(did, cdai.provsiz,
1204
scsi_devid_is_lun_uuid);
1205
if (idd == NULL)
1206
idd = scsi_get_devid(did, cdai.provsiz,
1207
scsi_devid_is_lun_md5);
1208
} else
1209
idd = NULL;
1210
1211
if (idd == NULL)
1212
idd = scsi_get_devid(did, cdai.provsiz,
1213
scsi_devid_is_lun_t10);
1214
if (idd == NULL)
1215
idd = scsi_get_devid(did, cdai.provsiz,
1216
scsi_devid_is_lun_name);
1217
if (idd == NULL)
1218
break;
1219
1220
ret = 0;
1221
if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1222
SVPD_ID_CODESET_ASCII) {
1223
if (idd->length < len) {
1224
for (l = 0; l < idd->length; l++)
1225
buf[l] = idd->identifier[l] ?
1226
idd->identifier[l] : ' ';
1227
buf[l] = 0;
1228
} else
1229
ret = EFAULT;
1230
break;
1231
}
1232
if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) ==
1233
SVPD_ID_CODESET_UTF8) {
1234
l = strnlen(idd->identifier, idd->length);
1235
if (l < len) {
1236
bcopy(idd->identifier, buf, l);
1237
buf[l] = 0;
1238
} else
1239
ret = EFAULT;
1240
break;
1241
}
1242
if ((idd->id_type & SVPD_ID_TYPE_MASK) ==
1243
SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) {
1244
if ((idd->length - 2) * 2 + 4 >= len) {
1245
ret = EFAULT;
1246
break;
1247
}
1248
for (l = 2, o = 0; l < idd->length; l++) {
1249
if (l == 6 || l == 8 || l == 10 || l == 12)
1250
o += sprintf(buf + o, "-");
1251
o += sprintf(buf + o, "%02x",
1252
idd->identifier[l]);
1253
}
1254
break;
1255
}
1256
if (idd->length * 2 < len) {
1257
for (l = 0; l < idd->length; l++)
1258
sprintf(buf + l * 2, "%02x",
1259
idd->identifier[l]);
1260
} else
1261
ret = EFAULT;
1262
break;
1263
default:
1264
if (cdai.provsiz < len) {
1265
cdai.buf[cdai.provsiz] = 0;
1266
ret = 0;
1267
} else
1268
ret = EFAULT;
1269
break;
1270
}
1271
1272
out:
1273
if ((char *)cdai.buf != buf)
1274
free(cdai.buf, M_CAMXPT);
1275
return ret;
1276
}
1277
1278
static dev_match_ret
1279
xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1280
struct cam_eb *bus)
1281
{
1282
dev_match_ret retval;
1283
u_int i;
1284
1285
retval = DM_RET_NONE;
1286
1287
/*
1288
* If we aren't given something to match against, that's an error.
1289
*/
1290
if (bus == NULL)
1291
return(DM_RET_ERROR);
1292
1293
/*
1294
* If there are no match entries, then this bus matches no
1295
* matter what.
1296
*/
1297
if ((patterns == NULL) || (num_patterns == 0))
1298
return(DM_RET_DESCEND | DM_RET_COPY);
1299
1300
for (i = 0; i < num_patterns; i++) {
1301
struct bus_match_pattern *cur_pattern;
1302
struct device_match_pattern *dp = &patterns[i].pattern.device_pattern;
1303
struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1304
1305
/*
1306
* If the pattern in question isn't for a bus node, we
1307
* aren't interested. However, we do indicate to the
1308
* calling routine that we should continue descending the
1309
* tree, since the user wants to match against lower-level
1310
* EDT elements.
1311
*/
1312
if (patterns[i].type == DEV_MATCH_DEVICE &&
1313
(dp->flags & DEV_MATCH_PATH) != 0 &&
1314
dp->path_id != bus->path_id)
1315
continue;
1316
if (patterns[i].type == DEV_MATCH_PERIPH &&
1317
(pp->flags & PERIPH_MATCH_PATH) != 0 &&
1318
pp->path_id != bus->path_id)
1319
continue;
1320
if (patterns[i].type != DEV_MATCH_BUS) {
1321
if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1322
retval |= DM_RET_DESCEND;
1323
continue;
1324
}
1325
1326
cur_pattern = &patterns[i].pattern.bus_pattern;
1327
1328
if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1329
&& (cur_pattern->path_id != bus->path_id))
1330
continue;
1331
1332
if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1333
&& (cur_pattern->bus_id != bus->sim->bus_id))
1334
continue;
1335
1336
if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1337
&& (cur_pattern->unit_number != bus->sim->unit_number))
1338
continue;
1339
1340
if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1341
&& (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1342
DEV_IDLEN) != 0))
1343
continue;
1344
1345
/*
1346
* If we get to this point, the user definitely wants
1347
* information on this bus. So tell the caller to copy the
1348
* data out.
1349
*/
1350
retval |= DM_RET_COPY;
1351
1352
/*
1353
* If the return action has been set to descend, then we
1354
* know that we've already seen a non-bus matching
1355
* expression, therefore we need to further descend the tree.
1356
* This won't change by continuing around the loop, so we
1357
* go ahead and return. If we haven't seen a non-bus
1358
* matching expression, we keep going around the loop until
1359
* we exhaust the matching expressions. We'll set the stop
1360
* flag once we fall out of the loop.
1361
*/
1362
if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1363
return(retval);
1364
}
1365
1366
/*
1367
* If the return action hasn't been set to descend yet, that means
1368
* we haven't seen anything other than bus matching patterns. So
1369
* tell the caller to stop descending the tree -- the user doesn't
1370
* want to match against lower level tree elements.
1371
*/
1372
if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1373
retval |= DM_RET_STOP;
1374
1375
return(retval);
1376
}
1377
1378
static dev_match_ret
1379
xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1380
struct cam_ed *device)
1381
{
1382
dev_match_ret retval;
1383
u_int i;
1384
1385
retval = DM_RET_NONE;
1386
1387
/*
1388
* If we aren't given something to match against, that's an error.
1389
*/
1390
if (device == NULL)
1391
return(DM_RET_ERROR);
1392
1393
/*
1394
* If there are no match entries, then this device matches no
1395
* matter what.
1396
*/
1397
if ((patterns == NULL) || (num_patterns == 0))
1398
return(DM_RET_DESCEND | DM_RET_COPY);
1399
1400
for (i = 0; i < num_patterns; i++) {
1401
struct device_match_pattern *cur_pattern;
1402
struct scsi_vpd_device_id *device_id_page;
1403
struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern;
1404
1405
/*
1406
* If the pattern in question isn't for a device node, we
1407
* aren't interested.
1408
*/
1409
if (patterns[i].type == DEV_MATCH_PERIPH &&
1410
(pp->flags & PERIPH_MATCH_TARGET) != 0 &&
1411
pp->target_id != device->target->target_id)
1412
continue;
1413
if (patterns[i].type == DEV_MATCH_PERIPH &&
1414
(pp->flags & PERIPH_MATCH_LUN) != 0 &&
1415
pp->target_lun != device->lun_id)
1416
continue;
1417
if (patterns[i].type != DEV_MATCH_DEVICE) {
1418
if ((patterns[i].type == DEV_MATCH_PERIPH)
1419
&& ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1420
retval |= DM_RET_DESCEND;
1421
continue;
1422
}
1423
1424
cur_pattern = &patterns[i].pattern.device_pattern;
1425
1426
/* Error out if mutually exclusive options are specified. */
1427
if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1428
== (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID))
1429
return(DM_RET_ERROR);
1430
1431
if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1432
&& (cur_pattern->path_id != device->target->bus->path_id))
1433
continue;
1434
1435
if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1436
&& (cur_pattern->target_id != device->target->target_id))
1437
continue;
1438
1439
if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1440
&& (cur_pattern->target_lun != device->lun_id))
1441
continue;
1442
1443
if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1444
&& (cam_quirkmatch((caddr_t)&device->inq_data,
1445
(caddr_t)&cur_pattern->data.inq_pat,
1446
1, sizeof(cur_pattern->data.inq_pat),
1447
scsi_static_inquiry_match) == NULL))
1448
continue;
1449
1450
device_id_page = (struct scsi_vpd_device_id *)device->device_id;
1451
if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0)
1452
&& (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN
1453
|| scsi_devid_match((uint8_t *)device_id_page->desc_list,
1454
device->device_id_len
1455
- SVPD_DEVICE_ID_HDR_LEN,
1456
cur_pattern->data.devid_pat.id,
1457
cur_pattern->data.devid_pat.id_len) != 0))
1458
continue;
1459
1460
/*
1461
* If we get to this point, the user definitely wants
1462
* information on this device. So tell the caller to copy
1463
* the data out.
1464
*/
1465
retval |= DM_RET_COPY;
1466
1467
/*
1468
* If the return action has been set to descend, then we
1469
* know that we've already seen a peripheral matching
1470
* expression, therefore we need to further descend the tree.
1471
* This won't change by continuing around the loop, so we
1472
* go ahead and return. If we haven't seen a peripheral
1473
* matching expression, we keep going around the loop until
1474
* we exhaust the matching expressions. We'll set the stop
1475
* flag once we fall out of the loop.
1476
*/
1477
if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1478
return(retval);
1479
}
1480
1481
/*
1482
* If the return action hasn't been set to descend yet, that means
1483
* we haven't seen any peripheral matching patterns. So tell the
1484
* caller to stop descending the tree -- the user doesn't want to
1485
* match against lower level tree elements.
1486
*/
1487
if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1488
retval |= DM_RET_STOP;
1489
1490
return(retval);
1491
}
1492
1493
/*
1494
* Match a single peripheral against any number of match patterns.
1495
*/
1496
static dev_match_ret
1497
xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1498
struct cam_periph *periph)
1499
{
1500
dev_match_ret retval;
1501
u_int i;
1502
1503
/*
1504
* If we aren't given something to match against, that's an error.
1505
*/
1506
if (periph == NULL)
1507
return(DM_RET_ERROR);
1508
1509
/*
1510
* If there are no match entries, then this peripheral matches no
1511
* matter what.
1512
*/
1513
if ((patterns == NULL) || (num_patterns == 0))
1514
return(DM_RET_STOP | DM_RET_COPY);
1515
1516
/*
1517
* There aren't any nodes below a peripheral node, so there's no
1518
* reason to descend the tree any further.
1519
*/
1520
retval = DM_RET_STOP;
1521
1522
for (i = 0; i < num_patterns; i++) {
1523
struct periph_match_pattern *cur_pattern;
1524
1525
/*
1526
* If the pattern in question isn't for a peripheral, we
1527
* aren't interested.
1528
*/
1529
if (patterns[i].type != DEV_MATCH_PERIPH)
1530
continue;
1531
1532
cur_pattern = &patterns[i].pattern.periph_pattern;
1533
1534
if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1535
&& (cur_pattern->path_id != periph->path->bus->path_id))
1536
continue;
1537
1538
/*
1539
* For the target and lun id's, we have to make sure the
1540
* target and lun pointers aren't NULL. The xpt peripheral
1541
* has a wildcard target and device.
1542
*/
1543
if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1544
&& ((periph->path->target == NULL)
1545
||(cur_pattern->target_id != periph->path->target->target_id)))
1546
continue;
1547
1548
if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1549
&& ((periph->path->device == NULL)
1550
|| (cur_pattern->target_lun != periph->path->device->lun_id)))
1551
continue;
1552
1553
if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1554
&& (cur_pattern->unit_number != periph->unit_number))
1555
continue;
1556
1557
if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1558
&& (strncmp(cur_pattern->periph_name, periph->periph_name,
1559
DEV_IDLEN) != 0))
1560
continue;
1561
1562
/*
1563
* If we get to this point, the user definitely wants
1564
* information on this peripheral. So tell the caller to
1565
* copy the data out.
1566
*/
1567
retval |= DM_RET_COPY;
1568
1569
/*
1570
* The return action has already been set to stop, since
1571
* peripherals don't have any nodes below them in the EDT.
1572
*/
1573
return(retval);
1574
}
1575
1576
/*
1577
* If we get to this point, the peripheral that was passed in
1578
* doesn't match any of the patterns.
1579
*/
1580
return(retval);
1581
}
1582
1583
static int
1584
xptedtbusfunc(struct cam_eb *bus, void *arg)
1585
{
1586
struct ccb_dev_match *cdm;
1587
struct cam_et *target;
1588
dev_match_ret retval;
1589
1590
cdm = (struct ccb_dev_match *)arg;
1591
1592
/*
1593
* If our position is for something deeper in the tree, that means
1594
* that we've already seen this node. So, we keep going down.
1595
*/
1596
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1597
&& (cdm->pos.cookie.bus == bus)
1598
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1599
&& (cdm->pos.cookie.target != NULL))
1600
retval = DM_RET_DESCEND;
1601
else
1602
retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1603
1604
/*
1605
* If we got an error, bail out of the search.
1606
*/
1607
if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1608
cdm->status = CAM_DEV_MATCH_ERROR;
1609
return(0);
1610
}
1611
1612
/*
1613
* If the copy flag is set, copy this bus out.
1614
*/
1615
if (retval & DM_RET_COPY) {
1616
int spaceleft, j;
1617
1618
spaceleft = cdm->match_buf_len - (cdm->num_matches *
1619
sizeof(struct dev_match_result));
1620
1621
/*
1622
* If we don't have enough space to put in another
1623
* match result, save our position and tell the
1624
* user there are more devices to check.
1625
*/
1626
if (spaceleft < sizeof(struct dev_match_result)) {
1627
bzero(&cdm->pos, sizeof(cdm->pos));
1628
cdm->pos.position_type =
1629
CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1630
1631
cdm->pos.cookie.bus = bus;
1632
cdm->pos.generations[CAM_BUS_GENERATION]=
1633
xsoftc.bus_generation;
1634
cdm->status = CAM_DEV_MATCH_MORE;
1635
return(0);
1636
}
1637
j = cdm->num_matches;
1638
cdm->num_matches++;
1639
cdm->matches[j].type = DEV_MATCH_BUS;
1640
cdm->matches[j].result.bus_result.path_id = bus->path_id;
1641
cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1642
cdm->matches[j].result.bus_result.unit_number =
1643
bus->sim->unit_number;
1644
strlcpy(cdm->matches[j].result.bus_result.dev_name,
1645
bus->sim->sim_name,
1646
sizeof(cdm->matches[j].result.bus_result.dev_name));
1647
}
1648
1649
/*
1650
* If the user is only interested in buses, there's no
1651
* reason to descend to the next level in the tree.
1652
*/
1653
if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1654
return(1);
1655
1656
/*
1657
* If there is a target generation recorded, check it to
1658
* make sure the target list hasn't changed.
1659
*/
1660
mtx_lock(&bus->eb_mtx);
1661
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1662
&& (cdm->pos.cookie.bus == bus)
1663
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1664
&& (cdm->pos.cookie.target != NULL)) {
1665
if ((cdm->pos.generations[CAM_TARGET_GENERATION] !=
1666
bus->generation)) {
1667
mtx_unlock(&bus->eb_mtx);
1668
cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1669
return (0);
1670
}
1671
target = (struct cam_et *)cdm->pos.cookie.target;
1672
target->refcount++;
1673
} else
1674
target = NULL;
1675
mtx_unlock(&bus->eb_mtx);
1676
1677
return (xpttargettraverse(bus, target, xptedttargetfunc, arg));
1678
}
1679
1680
static int
1681
xptedttargetfunc(struct cam_et *target, void *arg)
1682
{
1683
struct ccb_dev_match *cdm;
1684
struct cam_eb *bus;
1685
struct cam_ed *device;
1686
1687
cdm = (struct ccb_dev_match *)arg;
1688
bus = target->bus;
1689
1690
/*
1691
* If there is a device list generation recorded, check it to
1692
* make sure the device list hasn't changed.
1693
*/
1694
mtx_lock(&bus->eb_mtx);
1695
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1696
&& (cdm->pos.cookie.bus == bus)
1697
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1698
&& (cdm->pos.cookie.target == target)
1699
&& (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1700
&& (cdm->pos.cookie.device != NULL)) {
1701
if (cdm->pos.generations[CAM_DEV_GENERATION] !=
1702
target->generation) {
1703
mtx_unlock(&bus->eb_mtx);
1704
cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1705
return(0);
1706
}
1707
device = (struct cam_ed *)cdm->pos.cookie.device;
1708
device->refcount++;
1709
} else
1710
device = NULL;
1711
mtx_unlock(&bus->eb_mtx);
1712
1713
return (xptdevicetraverse(target, device, xptedtdevicefunc, arg));
1714
}
1715
1716
static int
1717
xptedtdevicefunc(struct cam_ed *device, void *arg)
1718
{
1719
struct cam_eb *bus;
1720
struct cam_periph *periph;
1721
struct ccb_dev_match *cdm;
1722
dev_match_ret retval;
1723
1724
cdm = (struct ccb_dev_match *)arg;
1725
bus = device->target->bus;
1726
1727
/*
1728
* If our position is for something deeper in the tree, that means
1729
* that we've already seen this node. So, we keep going down.
1730
*/
1731
if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1732
&& (cdm->pos.cookie.device == device)
1733
&& (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1734
&& (cdm->pos.cookie.periph != NULL))
1735
retval = DM_RET_DESCEND;
1736
else
1737
retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
1738
device);
1739
1740
if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1741
cdm->status = CAM_DEV_MATCH_ERROR;
1742
return(0);
1743
}
1744
1745
/*
1746
* If the copy flag is set, copy this device out.
1747
*/
1748
if (retval & DM_RET_COPY) {
1749
int spaceleft, j;
1750
1751
spaceleft = cdm->match_buf_len - (cdm->num_matches *
1752
sizeof(struct dev_match_result));
1753
1754
/*
1755
* If we don't have enough space to put in another
1756
* match result, save our position and tell the
1757
* user there are more devices to check.
1758
*/
1759
if (spaceleft < sizeof(struct dev_match_result)) {
1760
bzero(&cdm->pos, sizeof(cdm->pos));
1761
cdm->pos.position_type =
1762
CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1763
CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
1764
1765
cdm->pos.cookie.bus = device->target->bus;
1766
cdm->pos.generations[CAM_BUS_GENERATION]=
1767
xsoftc.bus_generation;
1768
cdm->pos.cookie.target = device->target;
1769
cdm->pos.generations[CAM_TARGET_GENERATION] =
1770
device->target->bus->generation;
1771
cdm->pos.cookie.device = device;
1772
cdm->pos.generations[CAM_DEV_GENERATION] =
1773
device->target->generation;
1774
cdm->status = CAM_DEV_MATCH_MORE;
1775
return(0);
1776
}
1777
j = cdm->num_matches;
1778
cdm->num_matches++;
1779
cdm->matches[j].type = DEV_MATCH_DEVICE;
1780
cdm->matches[j].result.device_result.path_id =
1781
device->target->bus->path_id;
1782
cdm->matches[j].result.device_result.target_id =
1783
device->target->target_id;
1784
cdm->matches[j].result.device_result.target_lun =
1785
device->lun_id;
1786
cdm->matches[j].result.device_result.protocol =
1787
device->protocol;
1788
bcopy(&device->inq_data,
1789
&cdm->matches[j].result.device_result.inq_data,
1790
sizeof(struct scsi_inquiry_data));
1791
bcopy(&device->ident_data,
1792
&cdm->matches[j].result.device_result.ident_data,
1793
sizeof(struct ata_params));
1794
1795
/* Let the user know whether this device is unconfigured */
1796
if (device->flags & CAM_DEV_UNCONFIGURED)
1797
cdm->matches[j].result.device_result.flags =
1798
DEV_RESULT_UNCONFIGURED;
1799
else
1800
cdm->matches[j].result.device_result.flags =
1801
DEV_RESULT_NOFLAG;
1802
}
1803
1804
/*
1805
* If the user isn't interested in peripherals, don't descend
1806
* the tree any further.
1807
*/
1808
if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1809
return(1);
1810
1811
/*
1812
* If there is a peripheral list generation recorded, make sure
1813
* it hasn't changed.
1814
*/
1815
xpt_lock_buses();
1816
mtx_lock(&bus->eb_mtx);
1817
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1818
&& (cdm->pos.cookie.bus == bus)
1819
&& (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1820
&& (cdm->pos.cookie.target == device->target)
1821
&& (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1822
&& (cdm->pos.cookie.device == device)
1823
&& (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1824
&& (cdm->pos.cookie.periph != NULL)) {
1825
if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1826
device->generation) {
1827
mtx_unlock(&bus->eb_mtx);
1828
xpt_unlock_buses();
1829
cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1830
return(0);
1831
}
1832
periph = (struct cam_periph *)cdm->pos.cookie.periph;
1833
periph->refcount++;
1834
} else
1835
periph = NULL;
1836
mtx_unlock(&bus->eb_mtx);
1837
xpt_unlock_buses();
1838
1839
return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg));
1840
}
1841
1842
static int
1843
xptedtperiphfunc(struct cam_periph *periph, void *arg)
1844
{
1845
struct ccb_dev_match *cdm;
1846
dev_match_ret retval;
1847
1848
cdm = (struct ccb_dev_match *)arg;
1849
1850
retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1851
1852
if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1853
cdm->status = CAM_DEV_MATCH_ERROR;
1854
return(0);
1855
}
1856
1857
/*
1858
* If the copy flag is set, copy this peripheral out.
1859
*/
1860
if (retval & DM_RET_COPY) {
1861
int spaceleft, j;
1862
size_t l;
1863
1864
spaceleft = cdm->match_buf_len - (cdm->num_matches *
1865
sizeof(struct dev_match_result));
1866
1867
/*
1868
* If we don't have enough space to put in another
1869
* match result, save our position and tell the
1870
* user there are more devices to check.
1871
*/
1872
if (spaceleft < sizeof(struct dev_match_result)) {
1873
bzero(&cdm->pos, sizeof(cdm->pos));
1874
cdm->pos.position_type =
1875
CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
1876
CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
1877
CAM_DEV_POS_PERIPH;
1878
1879
cdm->pos.cookie.bus = periph->path->bus;
1880
cdm->pos.generations[CAM_BUS_GENERATION]=
1881
xsoftc.bus_generation;
1882
cdm->pos.cookie.target = periph->path->target;
1883
cdm->pos.generations[CAM_TARGET_GENERATION] =
1884
periph->path->bus->generation;
1885
cdm->pos.cookie.device = periph->path->device;
1886
cdm->pos.generations[CAM_DEV_GENERATION] =
1887
periph->path->target->generation;
1888
cdm->pos.cookie.periph = periph;
1889
cdm->pos.generations[CAM_PERIPH_GENERATION] =
1890
periph->path->device->generation;
1891
cdm->status = CAM_DEV_MATCH_MORE;
1892
return(0);
1893
}
1894
1895
j = cdm->num_matches;
1896
cdm->num_matches++;
1897
cdm->matches[j].type = DEV_MATCH_PERIPH;
1898
cdm->matches[j].result.periph_result.path_id =
1899
periph->path->bus->path_id;
1900
cdm->matches[j].result.periph_result.target_id =
1901
periph->path->target->target_id;
1902
cdm->matches[j].result.periph_result.target_lun =
1903
periph->path->device->lun_id;
1904
cdm->matches[j].result.periph_result.unit_number =
1905
periph->unit_number;
1906
l = sizeof(cdm->matches[j].result.periph_result.periph_name);
1907
strlcpy(cdm->matches[j].result.periph_result.periph_name,
1908
periph->periph_name, l);
1909
}
1910
1911
return(1);
1912
}
1913
1914
static int
1915
xptedtmatch(struct ccb_dev_match *cdm)
1916
{
1917
struct cam_eb *bus;
1918
int ret;
1919
1920
cdm->num_matches = 0;
1921
1922
/*
1923
* Check the bus list generation. If it has changed, the user
1924
* needs to reset everything and start over.
1925
*/
1926
xpt_lock_buses();
1927
if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1928
&& (cdm->pos.cookie.bus != NULL)) {
1929
if (cdm->pos.generations[CAM_BUS_GENERATION] !=
1930
xsoftc.bus_generation) {
1931
xpt_unlock_buses();
1932
cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1933
return(0);
1934
}
1935
bus = (struct cam_eb *)cdm->pos.cookie.bus;
1936
bus->refcount++;
1937
} else
1938
bus = NULL;
1939
xpt_unlock_buses();
1940
1941
ret = xptbustraverse(bus, xptedtbusfunc, cdm);
1942
1943
/*
1944
* If we get back 0, that means that we had to stop before fully
1945
* traversing the EDT. It also means that one of the subroutines
1946
* has set the status field to the proper value. If we get back 1,
1947
* we've fully traversed the EDT and copied out any matching entries.
1948
*/
1949
if (ret == 1)
1950
cdm->status = CAM_DEV_MATCH_LAST;
1951
1952
return(ret);
1953
}
1954
1955
static int
1956
xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
1957
{
1958
struct cam_periph *periph;
1959
struct ccb_dev_match *cdm;
1960
1961
cdm = (struct ccb_dev_match *)arg;
1962
1963
xpt_lock_buses();
1964
if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
1965
&& (cdm->pos.cookie.pdrv == pdrv)
1966
&& (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
1967
&& (cdm->pos.cookie.periph != NULL)) {
1968
if (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
1969
(*pdrv)->generation) {
1970
xpt_unlock_buses();
1971
cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1972
return(0);
1973
}
1974
periph = (struct cam_periph *)cdm->pos.cookie.periph;
1975
periph->refcount++;
1976
} else
1977
periph = NULL;
1978
xpt_unlock_buses();
1979
1980
return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg));
1981
}
1982
1983
static int
1984
xptplistperiphfunc(struct cam_periph *periph, void *arg)
1985
{
1986
struct ccb_dev_match *cdm;
1987
dev_match_ret retval;
1988
1989
cdm = (struct ccb_dev_match *)arg;
1990
1991
retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
1992
1993
if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1994
cdm->status = CAM_DEV_MATCH_ERROR;
1995
return(0);
1996
}
1997
1998
/*
1999
* If the copy flag is set, copy this peripheral out.
2000
*/
2001
if (retval & DM_RET_COPY) {
2002
int spaceleft, j;
2003
size_t l;
2004
2005
spaceleft = cdm->match_buf_len - (cdm->num_matches *
2006
sizeof(struct dev_match_result));
2007
2008
/*
2009
* If we don't have enough space to put in another
2010
* match result, save our position and tell the
2011
* user there are more devices to check.
2012
*/
2013
if (spaceleft < sizeof(struct dev_match_result)) {
2014
struct periph_driver **pdrv;
2015
2016
pdrv = NULL;
2017
bzero(&cdm->pos, sizeof(cdm->pos));
2018
cdm->pos.position_type =
2019
CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2020
CAM_DEV_POS_PERIPH;
2021
2022
/*
2023
* This may look a bit non-sensical, but it is
2024
* actually quite logical. There are very few
2025
* peripheral drivers, and bloating every peripheral
2026
* structure with a pointer back to its parent
2027
* peripheral driver linker set entry would cost
2028
* more in the long run than doing this quick lookup.
2029
*/
2030
for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2031
if (strcmp((*pdrv)->driver_name,
2032
periph->periph_name) == 0)
2033
break;
2034
}
2035
2036
if (*pdrv == NULL) {
2037
cdm->status = CAM_DEV_MATCH_ERROR;
2038
return(0);
2039
}
2040
2041
cdm->pos.cookie.pdrv = pdrv;
2042
/*
2043
* The periph generation slot does double duty, as
2044
* does the periph pointer slot. They are used for
2045
* both edt and pdrv lookups and positioning.
2046
*/
2047
cdm->pos.cookie.periph = periph;
2048
cdm->pos.generations[CAM_PERIPH_GENERATION] =
2049
(*pdrv)->generation;
2050
cdm->status = CAM_DEV_MATCH_MORE;
2051
return(0);
2052
}
2053
2054
j = cdm->num_matches;
2055
cdm->num_matches++;
2056
cdm->matches[j].type = DEV_MATCH_PERIPH;
2057
cdm->matches[j].result.periph_result.path_id =
2058
periph->path->bus->path_id;
2059
2060
/*
2061
* The transport layer peripheral doesn't have a target or
2062
* lun.
2063
*/
2064
if (periph->path->target)
2065
cdm->matches[j].result.periph_result.target_id =
2066
periph->path->target->target_id;
2067
else
2068
cdm->matches[j].result.periph_result.target_id =
2069
CAM_TARGET_WILDCARD;
2070
2071
if (periph->path->device)
2072
cdm->matches[j].result.periph_result.target_lun =
2073
periph->path->device->lun_id;
2074
else
2075
cdm->matches[j].result.periph_result.target_lun =
2076
CAM_LUN_WILDCARD;
2077
2078
cdm->matches[j].result.periph_result.unit_number =
2079
periph->unit_number;
2080
l = sizeof(cdm->matches[j].result.periph_result.periph_name);
2081
strlcpy(cdm->matches[j].result.periph_result.periph_name,
2082
periph->periph_name, l);
2083
}
2084
2085
return(1);
2086
}
2087
2088
static int
2089
xptperiphlistmatch(struct ccb_dev_match *cdm)
2090
{
2091
int ret;
2092
2093
cdm->num_matches = 0;
2094
2095
/*
2096
* At this point in the edt traversal function, we check the bus
2097
* list generation to make sure that no buses have been added or
2098
* removed since the user last sent a XPT_DEV_MATCH ccb through.
2099
* For the peripheral driver list traversal function, however, we
2100
* don't have to worry about new peripheral driver types coming or
2101
* going; they're in a linker set, and therefore can't change
2102
* without a recompile.
2103
*/
2104
2105
if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2106
&& (cdm->pos.cookie.pdrv != NULL))
2107
ret = xptpdrvtraverse(
2108
(struct periph_driver **)cdm->pos.cookie.pdrv,
2109
xptplistpdrvfunc, cdm);
2110
else
2111
ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2112
2113
/*
2114
* If we get back 0, that means that we had to stop before fully
2115
* traversing the peripheral driver tree. It also means that one of
2116
* the subroutines has set the status field to the proper value. If
2117
* we get back 1, we've fully traversed the EDT and copied out any
2118
* matching entries.
2119
*/
2120
if (ret == 1)
2121
cdm->status = CAM_DEV_MATCH_LAST;
2122
2123
return(ret);
2124
}
2125
2126
static int
2127
xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2128
{
2129
struct cam_eb *bus, *next_bus;
2130
int retval;
2131
2132
retval = 1;
2133
if (start_bus)
2134
bus = start_bus;
2135
else {
2136
xpt_lock_buses();
2137
bus = TAILQ_FIRST(&xsoftc.xpt_busses);
2138
if (bus == NULL) {
2139
xpt_unlock_buses();
2140
return (retval);
2141
}
2142
bus->refcount++;
2143
xpt_unlock_buses();
2144
}
2145
for (; bus != NULL; bus = next_bus) {
2146
retval = tr_func(bus, arg);
2147
if (retval == 0) {
2148
xpt_release_bus(bus);
2149
break;
2150
}
2151
xpt_lock_buses();
2152
next_bus = TAILQ_NEXT(bus, links);
2153
if (next_bus)
2154
next_bus->refcount++;
2155
xpt_unlock_buses();
2156
xpt_release_bus(bus);
2157
}
2158
return(retval);
2159
}
2160
2161
static int
2162
xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2163
xpt_targetfunc_t *tr_func, void *arg)
2164
{
2165
struct cam_et *target, *next_target;
2166
int retval;
2167
2168
retval = 1;
2169
if (start_target)
2170
target = start_target;
2171
else {
2172
mtx_lock(&bus->eb_mtx);
2173
target = TAILQ_FIRST(&bus->et_entries);
2174
if (target == NULL) {
2175
mtx_unlock(&bus->eb_mtx);
2176
return (retval);
2177
}
2178
target->refcount++;
2179
mtx_unlock(&bus->eb_mtx);
2180
}
2181
for (; target != NULL; target = next_target) {
2182
retval = tr_func(target, arg);
2183
if (retval == 0) {
2184
xpt_release_target(target);
2185
break;
2186
}
2187
mtx_lock(&bus->eb_mtx);
2188
next_target = TAILQ_NEXT(target, links);
2189
if (next_target)
2190
next_target->refcount++;
2191
mtx_unlock(&bus->eb_mtx);
2192
xpt_release_target(target);
2193
}
2194
return(retval);
2195
}
2196
2197
static int
2198
xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2199
xpt_devicefunc_t *tr_func, void *arg)
2200
{
2201
struct cam_eb *bus;
2202
struct cam_ed *device, *next_device;
2203
int retval;
2204
2205
retval = 1;
2206
bus = target->bus;
2207
if (start_device)
2208
device = start_device;
2209
else {
2210
mtx_lock(&bus->eb_mtx);
2211
device = TAILQ_FIRST(&target->ed_entries);
2212
if (device == NULL) {
2213
mtx_unlock(&bus->eb_mtx);
2214
return (retval);
2215
}
2216
device->refcount++;
2217
mtx_unlock(&bus->eb_mtx);
2218
}
2219
for (; device != NULL; device = next_device) {
2220
mtx_lock(&device->device_mtx);
2221
retval = tr_func(device, arg);
2222
mtx_unlock(&device->device_mtx);
2223
if (retval == 0) {
2224
xpt_release_device(device);
2225
break;
2226
}
2227
mtx_lock(&bus->eb_mtx);
2228
next_device = TAILQ_NEXT(device, links);
2229
if (next_device)
2230
next_device->refcount++;
2231
mtx_unlock(&bus->eb_mtx);
2232
xpt_release_device(device);
2233
}
2234
return(retval);
2235
}
2236
2237
static int
2238
xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2239
xpt_periphfunc_t *tr_func, void *arg)
2240
{
2241
struct cam_eb *bus;
2242
struct cam_periph *periph, *next_periph;
2243
int retval;
2244
2245
retval = 1;
2246
2247
bus = device->target->bus;
2248
if (start_periph)
2249
periph = start_periph;
2250
else {
2251
xpt_lock_buses();
2252
mtx_lock(&bus->eb_mtx);
2253
periph = SLIST_FIRST(&device->periphs);
2254
while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2255
periph = SLIST_NEXT(periph, periph_links);
2256
if (periph == NULL) {
2257
mtx_unlock(&bus->eb_mtx);
2258
xpt_unlock_buses();
2259
return (retval);
2260
}
2261
periph->refcount++;
2262
mtx_unlock(&bus->eb_mtx);
2263
xpt_unlock_buses();
2264
}
2265
for (; periph != NULL; periph = next_periph) {
2266
retval = tr_func(periph, arg);
2267
if (retval == 0) {
2268
cam_periph_release_locked(periph);
2269
break;
2270
}
2271
xpt_lock_buses();
2272
mtx_lock(&bus->eb_mtx);
2273
next_periph = SLIST_NEXT(periph, periph_links);
2274
while (next_periph != NULL &&
2275
(next_periph->flags & CAM_PERIPH_FREE) != 0)
2276
next_periph = SLIST_NEXT(next_periph, periph_links);
2277
if (next_periph)
2278
next_periph->refcount++;
2279
mtx_unlock(&bus->eb_mtx);
2280
xpt_unlock_buses();
2281
cam_periph_release_locked(periph);
2282
}
2283
return(retval);
2284
}
2285
2286
static int
2287
xptpdrvtraverse(struct periph_driver **start_pdrv,
2288
xpt_pdrvfunc_t *tr_func, void *arg)
2289
{
2290
struct periph_driver **pdrv;
2291
int retval;
2292
2293
retval = 1;
2294
2295
/*
2296
* We don't traverse the peripheral driver list like we do the
2297
* other lists, because it is a linker set, and therefore cannot be
2298
* changed during runtime. If the peripheral driver list is ever
2299
* re-done to be something other than a linker set (i.e. it can
2300
* change while the system is running), the list traversal should
2301
* be modified to work like the other traversal functions.
2302
*/
2303
for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2304
*pdrv != NULL; pdrv++) {
2305
retval = tr_func(pdrv, arg);
2306
2307
if (retval == 0)
2308
return(retval);
2309
}
2310
2311
return(retval);
2312
}
2313
2314
static int
2315
xptpdperiphtraverse(struct periph_driver **pdrv,
2316
struct cam_periph *start_periph,
2317
xpt_periphfunc_t *tr_func, void *arg)
2318
{
2319
struct cam_periph *periph, *next_periph;
2320
int retval;
2321
2322
retval = 1;
2323
2324
if (start_periph)
2325
periph = start_periph;
2326
else {
2327
xpt_lock_buses();
2328
periph = TAILQ_FIRST(&(*pdrv)->units);
2329
while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0)
2330
periph = TAILQ_NEXT(periph, unit_links);
2331
if (periph == NULL) {
2332
xpt_unlock_buses();
2333
return (retval);
2334
}
2335
periph->refcount++;
2336
xpt_unlock_buses();
2337
}
2338
for (; periph != NULL; periph = next_periph) {
2339
cam_periph_lock(periph);
2340
retval = tr_func(periph, arg);
2341
cam_periph_unlock(periph);
2342
if (retval == 0) {
2343
cam_periph_release(periph);
2344
break;
2345
}
2346
xpt_lock_buses();
2347
next_periph = TAILQ_NEXT(periph, unit_links);
2348
while (next_periph != NULL &&
2349
(next_periph->flags & CAM_PERIPH_FREE) != 0)
2350
next_periph = TAILQ_NEXT(next_periph, unit_links);
2351
if (next_periph)
2352
next_periph->refcount++;
2353
xpt_unlock_buses();
2354
cam_periph_release(periph);
2355
}
2356
return(retval);
2357
}
2358
2359
static int
2360
xptdefbusfunc(struct cam_eb *bus, void *arg)
2361
{
2362
struct xpt_traverse_config *tr_config;
2363
2364
tr_config = (struct xpt_traverse_config *)arg;
2365
2366
if (tr_config->depth == XPT_DEPTH_BUS) {
2367
xpt_busfunc_t *tr_func;
2368
2369
tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2370
2371
return(tr_func(bus, tr_config->tr_arg));
2372
} else
2373
return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2374
}
2375
2376
static int
2377
xptdeftargetfunc(struct cam_et *target, void *arg)
2378
{
2379
struct xpt_traverse_config *tr_config;
2380
2381
tr_config = (struct xpt_traverse_config *)arg;
2382
2383
if (tr_config->depth == XPT_DEPTH_TARGET) {
2384
xpt_targetfunc_t *tr_func;
2385
2386
tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2387
2388
return(tr_func(target, tr_config->tr_arg));
2389
} else
2390
return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2391
}
2392
2393
static int
2394
xptdefdevicefunc(struct cam_ed *device, void *arg)
2395
{
2396
struct xpt_traverse_config *tr_config;
2397
2398
tr_config = (struct xpt_traverse_config *)arg;
2399
2400
if (tr_config->depth == XPT_DEPTH_DEVICE) {
2401
xpt_devicefunc_t *tr_func;
2402
2403
tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2404
2405
return(tr_func(device, tr_config->tr_arg));
2406
} else
2407
return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2408
}
2409
2410
static int
2411
xptdefperiphfunc(struct cam_periph *periph, void *arg)
2412
{
2413
struct xpt_traverse_config *tr_config;
2414
xpt_periphfunc_t *tr_func;
2415
2416
tr_config = (struct xpt_traverse_config *)arg;
2417
2418
tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2419
2420
/*
2421
* Unlike the other default functions, we don't check for depth
2422
* here. The peripheral driver level is the last level in the EDT,
2423
* so if we're here, we should execute the function in question.
2424
*/
2425
return(tr_func(periph, tr_config->tr_arg));
2426
}
2427
2428
/*
2429
* Execute the given function for every bus in the EDT.
2430
*/
2431
static int
2432
xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2433
{
2434
struct xpt_traverse_config tr_config;
2435
2436
tr_config.depth = XPT_DEPTH_BUS;
2437
tr_config.tr_func = tr_func;
2438
tr_config.tr_arg = arg;
2439
2440
return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2441
}
2442
2443
/*
2444
* Execute the given function for every device in the EDT.
2445
*/
2446
static int
2447
xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2448
{
2449
struct xpt_traverse_config tr_config;
2450
2451
tr_config.depth = XPT_DEPTH_DEVICE;
2452
tr_config.tr_func = tr_func;
2453
tr_config.tr_arg = arg;
2454
2455
return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2456
}
2457
2458
static int
2459
xptsetasyncfunc(struct cam_ed *device, void *arg)
2460
{
2461
struct cam_path path;
2462
struct ccb_getdev cgd;
2463
struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2464
2465
/*
2466
* Don't report unconfigured devices (Wildcard devs,
2467
* devices only for target mode, device instances
2468
* that have been invalidated but are waiting for
2469
* their last reference count to be released).
2470
*/
2471
if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2472
return (1);
2473
2474
xpt_compile_path(&path,
2475
NULL,
2476
device->target->bus->path_id,
2477
device->target->target_id,
2478
device->lun_id);
2479
xpt_gdev_type(&cgd, &path);
2480
csa->callback(csa->callback_arg,
2481
AC_FOUND_DEVICE,
2482
&path, &cgd);
2483
xpt_release_path(&path);
2484
2485
return(1);
2486
}
2487
2488
static int
2489
xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2490
{
2491
struct cam_path path;
2492
struct ccb_pathinq cpi;
2493
struct ccb_setasync *csa = (struct ccb_setasync *)arg;
2494
2495
xpt_compile_path(&path, /*periph*/NULL,
2496
bus->path_id,
2497
CAM_TARGET_WILDCARD,
2498
CAM_LUN_WILDCARD);
2499
xpt_path_lock(&path);
2500
xpt_path_inq(&cpi, &path);
2501
csa->callback(csa->callback_arg,
2502
AC_PATH_REGISTERED,
2503
&path, &cpi);
2504
xpt_path_unlock(&path);
2505
xpt_release_path(&path);
2506
2507
return(1);
2508
}
2509
2510
void
2511
xpt_action(union ccb *start_ccb)
2512
{
2513
2514
CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE,
2515
("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code,
2516
xpt_action_name(start_ccb->ccb_h.func_code)));
2517
2518
/*
2519
* Either it isn't queued, or it has a real priority. There still too
2520
* many places that reuse CCBs with a real priority to do immediate
2521
* queries to do the other side of this assert.
2522
*/
2523
KASSERT((start_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 ||
2524
start_ccb->ccb_h.pinfo.priority != CAM_PRIORITY_NONE,
2525
("%s: queued ccb and CAM_PRIORITY_NONE illegal.", __func__));
2526
2527
start_ccb->ccb_h.status = CAM_REQ_INPROG;
2528
(*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb);
2529
}
2530
2531
void
2532
xpt_action_default(union ccb *start_ccb)
2533
{
2534
struct cam_path *path;
2535
struct cam_sim *sim;
2536
struct mtx *mtx;
2537
2538
path = start_ccb->ccb_h.path;
2539
CAM_DEBUG(path, CAM_DEBUG_TRACE,
2540
("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code,
2541
xpt_action_name(start_ccb->ccb_h.func_code)));
2542
2543
switch (start_ccb->ccb_h.func_code) {
2544
case XPT_SCSI_IO:
2545
{
2546
struct cam_ed *device;
2547
2548
/*
2549
* For the sake of compatibility with SCSI-1
2550
* devices that may not understand the identify
2551
* message, we include lun information in the
2552
* second byte of all commands. SCSI-1 specifies
2553
* that luns are a 3 bit value and reserves only 3
2554
* bits for lun information in the CDB. Later
2555
* revisions of the SCSI spec allow for more than 8
2556
* luns, but have deprecated lun information in the
2557
* CDB. So, if the lun won't fit, we must omit.
2558
*
2559
* Also be aware that during initial probing for devices,
2560
* the inquiry information is unknown but initialized to 0.
2561
* This means that this code will be exercised while probing
2562
* devices with an ANSI revision greater than 2.
2563
*/
2564
device = path->device;
2565
if (device->protocol_version <= SCSI_REV_2
2566
&& start_ccb->ccb_h.target_lun < 8
2567
&& (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2568
start_ccb->csio.cdb_io.cdb_bytes[1] |=
2569
start_ccb->ccb_h.target_lun << 5;
2570
}
2571
start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2572
}
2573
/* FALLTHROUGH */
2574
case XPT_TARGET_IO:
2575
case XPT_CONT_TARGET_IO:
2576
start_ccb->csio.sense_resid = 0;
2577
start_ccb->csio.resid = 0;
2578
/* FALLTHROUGH */
2579
case XPT_ATA_IO:
2580
if (start_ccb->ccb_h.func_code == XPT_ATA_IO)
2581
start_ccb->ataio.resid = 0;
2582
/* FALLTHROUGH */
2583
case XPT_NVME_IO:
2584
case XPT_NVME_ADMIN:
2585
case XPT_MMC_IO:
2586
case XPT_MMC_GET_TRAN_SETTINGS:
2587
case XPT_MMC_SET_TRAN_SETTINGS:
2588
case XPT_RESET_DEV:
2589
case XPT_ENG_EXEC:
2590
case XPT_SMP_IO:
2591
{
2592
struct cam_devq *devq;
2593
2594
devq = path->bus->sim->devq;
2595
mtx_lock(&devq->send_mtx);
2596
cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2597
if (xpt_schedule_devq(devq, path->device) != 0)
2598
xpt_run_devq(devq);
2599
mtx_unlock(&devq->send_mtx);
2600
break;
2601
}
2602
case XPT_CALC_GEOMETRY:
2603
/* Filter out garbage */
2604
if (start_ccb->ccg.block_size == 0
2605
|| start_ccb->ccg.volume_size == 0) {
2606
start_ccb->ccg.cylinders = 0;
2607
start_ccb->ccg.heads = 0;
2608
start_ccb->ccg.secs_per_track = 0;
2609
start_ccb->ccb_h.status = CAM_REQ_CMP;
2610
break;
2611
}
2612
goto call_sim;
2613
case XPT_ABORT:
2614
{
2615
union ccb* abort_ccb;
2616
2617
abort_ccb = start_ccb->cab.abort_ccb;
2618
if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2619
struct cam_ed *device;
2620
struct cam_devq *devq;
2621
2622
device = abort_ccb->ccb_h.path->device;
2623
devq = device->sim->devq;
2624
2625
mtx_lock(&devq->send_mtx);
2626
if (abort_ccb->ccb_h.pinfo.index > 0) {
2627
cam_ccbq_remove_ccb(&device->ccbq, abort_ccb);
2628
abort_ccb->ccb_h.status =
2629
CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2630
xpt_freeze_devq_device(device, 1);
2631
mtx_unlock(&devq->send_mtx);
2632
xpt_done(abort_ccb);
2633
start_ccb->ccb_h.status = CAM_REQ_CMP;
2634
break;
2635
}
2636
mtx_unlock(&devq->send_mtx);
2637
2638
if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2639
&& (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2640
/*
2641
* We've caught this ccb en route to
2642
* the SIM. Flag it for abort and the
2643
* SIM will do so just before starting
2644
* real work on the CCB.
2645
*/
2646
abort_ccb->ccb_h.status =
2647
CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2648
xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2649
start_ccb->ccb_h.status = CAM_REQ_CMP;
2650
break;
2651
}
2652
}
2653
if (XPT_FC_IS_QUEUED(abort_ccb)
2654
&& (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2655
/*
2656
* It's already completed but waiting
2657
* for our SWI to get to it.
2658
*/
2659
start_ccb->ccb_h.status = CAM_UA_ABORT;
2660
break;
2661
}
2662
/*
2663
* If we weren't able to take care of the abort request
2664
* in the XPT, pass the request down to the SIM for processing.
2665
*/
2666
}
2667
/* FALLTHROUGH */
2668
case XPT_ACCEPT_TARGET_IO:
2669
case XPT_EN_LUN:
2670
case XPT_IMMED_NOTIFY:
2671
case XPT_NOTIFY_ACK:
2672
case XPT_RESET_BUS:
2673
case XPT_IMMEDIATE_NOTIFY:
2674
case XPT_NOTIFY_ACKNOWLEDGE:
2675
case XPT_GET_SIM_KNOB_OLD:
2676
case XPT_GET_SIM_KNOB:
2677
case XPT_SET_SIM_KNOB:
2678
case XPT_GET_TRAN_SETTINGS:
2679
case XPT_SET_TRAN_SETTINGS:
2680
case XPT_PATH_INQ:
2681
call_sim:
2682
sim = path->bus->sim;
2683
mtx = sim->mtx;
2684
if (mtx && !mtx_owned(mtx))
2685
mtx_lock(mtx);
2686
else
2687
mtx = NULL;
2688
2689
CAM_DEBUG(path, CAM_DEBUG_TRACE,
2690
("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code));
2691
(*(sim->sim_action))(sim, start_ccb);
2692
CAM_DEBUG(path, CAM_DEBUG_TRACE,
2693
("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status));
2694
if (mtx)
2695
mtx_unlock(mtx);
2696
break;
2697
case XPT_PATH_STATS:
2698
start_ccb->cpis.last_reset = path->bus->last_reset;
2699
start_ccb->ccb_h.status = CAM_REQ_CMP;
2700
break;
2701
case XPT_GDEV_TYPE:
2702
{
2703
struct cam_ed *dev;
2704
2705
dev = path->device;
2706
if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2707
start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2708
} else {
2709
struct ccb_getdev *cgd;
2710
2711
cgd = &start_ccb->cgd;
2712
cgd->protocol = dev->protocol;
2713
cgd->inq_data = dev->inq_data;
2714
cgd->ident_data = dev->ident_data;
2715
cgd->inq_flags = dev->inq_flags;
2716
cgd->ccb_h.status = CAM_REQ_CMP;
2717
cgd->serial_num_len = dev->serial_num_len;
2718
if ((dev->serial_num_len > 0)
2719
&& (dev->serial_num != NULL))
2720
bcopy(dev->serial_num, cgd->serial_num,
2721
dev->serial_num_len);
2722
}
2723
break;
2724
}
2725
case XPT_GDEV_STATS:
2726
{
2727
struct ccb_getdevstats *cgds = &start_ccb->cgds;
2728
struct cam_ed *dev = path->device;
2729
struct cam_eb *bus = path->bus;
2730
struct cam_et *tar = path->target;
2731
struct cam_devq *devq = bus->sim->devq;
2732
2733
mtx_lock(&devq->send_mtx);
2734
cgds->dev_openings = dev->ccbq.dev_openings;
2735
cgds->dev_active = dev->ccbq.dev_active;
2736
cgds->allocated = dev->ccbq.allocated;
2737
cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq);
2738
cgds->held = cgds->allocated - cgds->dev_active - cgds->queued;
2739
cgds->last_reset = tar->last_reset;
2740
cgds->maxtags = dev->maxtags;
2741
cgds->mintags = dev->mintags;
2742
if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2743
cgds->last_reset = bus->last_reset;
2744
mtx_unlock(&devq->send_mtx);
2745
cgds->ccb_h.status = CAM_REQ_CMP;
2746
break;
2747
}
2748
case XPT_GDEVLIST:
2749
{
2750
struct cam_periph *nperiph;
2751
struct periph_list *periph_head;
2752
struct ccb_getdevlist *cgdl;
2753
u_int i;
2754
struct cam_ed *device;
2755
bool found;
2756
2757
found = false;
2758
2759
/*
2760
* Don't want anyone mucking with our data.
2761
*/
2762
device = path->device;
2763
periph_head = &device->periphs;
2764
cgdl = &start_ccb->cgdl;
2765
2766
/*
2767
* Check and see if the list has changed since the user
2768
* last requested a list member. If so, tell them that the
2769
* list has changed, and therefore they need to start over
2770
* from the beginning.
2771
*/
2772
if ((cgdl->index != 0) &&
2773
(cgdl->generation != device->generation)) {
2774
cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2775
break;
2776
}
2777
2778
/*
2779
* Traverse the list of peripherals and attempt to find
2780
* the requested peripheral.
2781
*/
2782
for (nperiph = SLIST_FIRST(periph_head), i = 0;
2783
(nperiph != NULL) && (i <= cgdl->index);
2784
nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
2785
if (i == cgdl->index) {
2786
strlcpy(cgdl->periph_name,
2787
nperiph->periph_name,
2788
sizeof(cgdl->periph_name));
2789
cgdl->unit_number = nperiph->unit_number;
2790
found = true;
2791
}
2792
}
2793
if (!found) {
2794
cgdl->status = CAM_GDEVLIST_ERROR;
2795
break;
2796
}
2797
2798
if (nperiph == NULL)
2799
cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
2800
else
2801
cgdl->status = CAM_GDEVLIST_MORE_DEVS;
2802
2803
cgdl->index++;
2804
cgdl->generation = device->generation;
2805
2806
cgdl->ccb_h.status = CAM_REQ_CMP;
2807
break;
2808
}
2809
case XPT_DEV_MATCH:
2810
{
2811
dev_pos_type position_type;
2812
struct ccb_dev_match *cdm;
2813
2814
cdm = &start_ccb->cdm;
2815
2816
/*
2817
* There are two ways of getting at information in the EDT.
2818
* The first way is via the primary EDT tree. It starts
2819
* with a list of buses, then a list of targets on a bus,
2820
* then devices/luns on a target, and then peripherals on a
2821
* device/lun. The "other" way is by the peripheral driver
2822
* lists. The peripheral driver lists are organized by
2823
* peripheral driver. (obviously) So it makes sense to
2824
* use the peripheral driver list if the user is looking
2825
* for something like "da1", or all "da" devices. If the
2826
* user is looking for something on a particular bus/target
2827
* or lun, it's generally better to go through the EDT tree.
2828
*/
2829
2830
if (cdm->pos.position_type != CAM_DEV_POS_NONE)
2831
position_type = cdm->pos.position_type;
2832
else {
2833
u_int i;
2834
2835
position_type = CAM_DEV_POS_NONE;
2836
2837
for (i = 0; i < cdm->num_patterns; i++) {
2838
if ((cdm->patterns[i].type == DEV_MATCH_BUS)
2839
||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
2840
position_type = CAM_DEV_POS_EDT;
2841
break;
2842
}
2843
}
2844
2845
if (cdm->num_patterns == 0)
2846
position_type = CAM_DEV_POS_EDT;
2847
else if (position_type == CAM_DEV_POS_NONE)
2848
position_type = CAM_DEV_POS_PDRV;
2849
}
2850
2851
switch(position_type & CAM_DEV_POS_TYPEMASK) {
2852
case CAM_DEV_POS_EDT:
2853
xptedtmatch(cdm);
2854
break;
2855
case CAM_DEV_POS_PDRV:
2856
xptperiphlistmatch(cdm);
2857
break;
2858
default:
2859
cdm->status = CAM_DEV_MATCH_ERROR;
2860
break;
2861
}
2862
2863
if (cdm->status == CAM_DEV_MATCH_ERROR)
2864
start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2865
else
2866
start_ccb->ccb_h.status = CAM_REQ_CMP;
2867
2868
break;
2869
}
2870
case XPT_SASYNC_CB:
2871
{
2872
struct ccb_setasync *csa;
2873
struct async_node *cur_entry;
2874
struct async_list *async_head;
2875
uint32_t added;
2876
2877
csa = &start_ccb->csa;
2878
added = csa->event_enable;
2879
async_head = &path->device->asyncs;
2880
2881
/*
2882
* If there is already an entry for us, simply
2883
* update it.
2884
*/
2885
cur_entry = SLIST_FIRST(async_head);
2886
while (cur_entry != NULL) {
2887
if ((cur_entry->callback_arg == csa->callback_arg)
2888
&& (cur_entry->callback == csa->callback))
2889
break;
2890
cur_entry = SLIST_NEXT(cur_entry, links);
2891
}
2892
2893
if (cur_entry != NULL) {
2894
/*
2895
* If the request has no flags set,
2896
* remove the entry.
2897
*/
2898
added &= ~cur_entry->event_enable;
2899
if (csa->event_enable == 0) {
2900
SLIST_REMOVE(async_head, cur_entry,
2901
async_node, links);
2902
xpt_release_device(path->device);
2903
free(cur_entry, M_CAMXPT);
2904
} else {
2905
cur_entry->event_enable = csa->event_enable;
2906
}
2907
csa->event_enable = added;
2908
} else {
2909
cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT,
2910
M_NOWAIT);
2911
if (cur_entry == NULL) {
2912
csa->ccb_h.status = CAM_RESRC_UNAVAIL;
2913
break;
2914
}
2915
cur_entry->event_enable = csa->event_enable;
2916
cur_entry->event_lock = (path->bus->sim->mtx &&
2917
mtx_owned(path->bus->sim->mtx)) ? 1 : 0;
2918
cur_entry->callback_arg = csa->callback_arg;
2919
cur_entry->callback = csa->callback;
2920
SLIST_INSERT_HEAD(async_head, cur_entry, links);
2921
xpt_acquire_device(path->device);
2922
}
2923
start_ccb->ccb_h.status = CAM_REQ_CMP;
2924
break;
2925
}
2926
case XPT_REL_SIMQ:
2927
{
2928
struct ccb_relsim *crs;
2929
struct cam_ed *dev;
2930
2931
crs = &start_ccb->crs;
2932
dev = path->device;
2933
if (dev == NULL) {
2934
crs->ccb_h.status = CAM_DEV_NOT_THERE;
2935
break;
2936
}
2937
2938
if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
2939
/* Don't ever go below one opening */
2940
if (crs->openings > 0) {
2941
xpt_dev_ccbq_resize(path, crs->openings);
2942
if (bootverbose) {
2943
xpt_print(path,
2944
"number of openings is now %d\n",
2945
crs->openings);
2946
}
2947
}
2948
}
2949
2950
mtx_lock(&dev->sim->devq->send_mtx);
2951
if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
2952
if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
2953
/*
2954
* Just extend the old timeout and decrement
2955
* the freeze count so that a single timeout
2956
* is sufficient for releasing the queue.
2957
*/
2958
start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2959
callout_stop(&dev->callout);
2960
} else {
2961
start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2962
}
2963
2964
callout_reset_sbt(&dev->callout,
2965
SBT_1MS * crs->release_timeout, SBT_1MS,
2966
xpt_release_devq_timeout, dev, 0);
2967
2968
dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
2969
}
2970
2971
if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
2972
if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
2973
/*
2974
* Decrement the freeze count so that a single
2975
* completion is still sufficient to unfreeze
2976
* the queue.
2977
*/
2978
start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2979
} else {
2980
dev->flags |= CAM_DEV_REL_ON_COMPLETE;
2981
start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2982
}
2983
}
2984
2985
if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
2986
if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
2987
|| (dev->ccbq.dev_active == 0)) {
2988
start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
2989
} else {
2990
dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
2991
start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
2992
}
2993
}
2994
mtx_unlock(&dev->sim->devq->send_mtx);
2995
2996
if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0)
2997
xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE);
2998
start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
2999
start_ccb->ccb_h.status = CAM_REQ_CMP;
3000
break;
3001
}
3002
case XPT_DEBUG: {
3003
struct cam_path *oldpath;
3004
3005
/* Check that all request bits are supported. */
3006
if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) {
3007
start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3008
break;
3009
}
3010
3011
cam_dflags = CAM_DEBUG_NONE;
3012
if (cam_dpath != NULL) {
3013
oldpath = cam_dpath;
3014
cam_dpath = NULL;
3015
xpt_free_path(oldpath);
3016
}
3017
if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) {
3018
if (xpt_create_path(&cam_dpath, NULL,
3019
start_ccb->ccb_h.path_id,
3020
start_ccb->ccb_h.target_id,
3021
start_ccb->ccb_h.target_lun) !=
3022
CAM_REQ_CMP) {
3023
start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3024
} else {
3025
cam_dflags = start_ccb->cdbg.flags;
3026
start_ccb->ccb_h.status = CAM_REQ_CMP;
3027
xpt_print(cam_dpath, "debugging flags now %x\n",
3028
cam_dflags);
3029
}
3030
} else
3031
start_ccb->ccb_h.status = CAM_REQ_CMP;
3032
break;
3033
}
3034
case XPT_NOOP:
3035
if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3036
xpt_freeze_devq(path, 1);
3037
start_ccb->ccb_h.status = CAM_REQ_CMP;
3038
break;
3039
case XPT_REPROBE_LUN:
3040
xpt_async(AC_INQ_CHANGED, path, NULL);
3041
start_ccb->ccb_h.status = CAM_REQ_CMP;
3042
xpt_done(start_ccb);
3043
break;
3044
case XPT_ASYNC:
3045
/*
3046
* Queue the async operation so it can be run from a sleepable
3047
* context.
3048
*/
3049
start_ccb->ccb_h.status = CAM_REQ_CMP;
3050
mtx_lock(&cam_async.cam_doneq_mtx);
3051
STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe);
3052
start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX;
3053
mtx_unlock(&cam_async.cam_doneq_mtx);
3054
wakeup(&cam_async.cam_doneq);
3055
break;
3056
default:
3057
case XPT_SDEV_TYPE:
3058
case XPT_TERM_IO:
3059
case XPT_ENG_INQ:
3060
/* XXX Implement */
3061
xpt_print(start_ccb->ccb_h.path,
3062
"%s: CCB type %#x %s not supported\n", __func__,
3063
start_ccb->ccb_h.func_code,
3064
xpt_action_name(start_ccb->ccb_h.func_code));
3065
start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3066
if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) {
3067
xpt_done(start_ccb);
3068
}
3069
break;
3070
}
3071
CAM_DEBUG(path, CAM_DEBUG_TRACE,
3072
("xpt_action_default: func= %#x %s status %#x\n",
3073
start_ccb->ccb_h.func_code,
3074
xpt_action_name(start_ccb->ccb_h.func_code),
3075
start_ccb->ccb_h.status));
3076
}
3077
3078
/*
3079
* Call the sim poll routine to allow the sim to complete
3080
* any inflight requests, then call camisr_runqueue to
3081
* complete any CCB that the polling completed.
3082
*/
3083
void
3084
xpt_sim_poll(struct cam_sim *sim)
3085
{
3086
struct mtx *mtx;
3087
3088
KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3089
mtx = sim->mtx;
3090
if (mtx)
3091
mtx_lock(mtx);
3092
(*(sim->sim_poll))(sim);
3093
if (mtx)
3094
mtx_unlock(mtx);
3095
camisr_runqueue();
3096
}
3097
3098
uint32_t
3099
xpt_poll_setup(union ccb *start_ccb)
3100
{
3101
uint32_t timeout;
3102
struct cam_sim *sim;
3103
struct cam_devq *devq;
3104
struct cam_ed *dev;
3105
3106
timeout = start_ccb->ccb_h.timeout * 10;
3107
sim = start_ccb->ccb_h.path->bus->sim;
3108
devq = sim->devq;
3109
dev = start_ccb->ccb_h.path->device;
3110
3111
KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__));
3112
3113
/*
3114
* Steal an opening so that no other queued requests
3115
* can get it before us while we simulate interrupts.
3116
*/
3117
mtx_lock(&devq->send_mtx);
3118
dev->ccbq.dev_openings--;
3119
while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) &&
3120
(--timeout > 0)) {
3121
mtx_unlock(&devq->send_mtx);
3122
DELAY(100);
3123
xpt_sim_poll(sim);
3124
mtx_lock(&devq->send_mtx);
3125
}
3126
dev->ccbq.dev_openings++;
3127
mtx_unlock(&devq->send_mtx);
3128
3129
return (timeout);
3130
}
3131
3132
void
3133
xpt_pollwait(union ccb *start_ccb, uint32_t timeout)
3134
{
3135
3136
KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim),
3137
("%s: non-pollable sim", __func__));
3138
while (--timeout > 0) {
3139
xpt_sim_poll(start_ccb->ccb_h.path->bus->sim);
3140
if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3141
!= CAM_REQ_INPROG)
3142
break;
3143
DELAY(100);
3144
}
3145
3146
if (timeout == 0) {
3147
/*
3148
* XXX Is it worth adding a sim_timeout entry
3149
* point so we can attempt recovery? If
3150
* this is only used for dumps, I don't think
3151
* it is.
3152
*/
3153
start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3154
}
3155
}
3156
3157
/*
3158
* Schedule a peripheral driver to receive a ccb when its
3159
* target device has space for more transactions.
3160
*/
3161
void
3162
xpt_schedule(struct cam_periph *periph, uint32_t new_priority)
3163
{
3164
3165
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3166
cam_periph_assert(periph, MA_OWNED);
3167
if (new_priority < periph->scheduled_priority) {
3168
periph->scheduled_priority = new_priority;
3169
xpt_run_allocq(periph, 0);
3170
}
3171
}
3172
3173
/*
3174
* Schedule a device to run on a given queue.
3175
* If the device was inserted as a new entry on the queue,
3176
* return 1 meaning the device queue should be run. If we
3177
* were already queued, implying someone else has already
3178
* started the queue, return 0 so the caller doesn't attempt
3179
* to run the queue.
3180
*/
3181
static int
3182
xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3183
uint32_t new_priority)
3184
{
3185
int retval;
3186
uint32_t old_priority;
3187
3188
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3189
3190
old_priority = pinfo->priority;
3191
3192
/*
3193
* Are we already queued?
3194
*/
3195
if (pinfo->index != CAM_UNQUEUED_INDEX) {
3196
/* Simply reorder based on new priority */
3197
if (new_priority < old_priority) {
3198
camq_change_priority(queue, pinfo->index,
3199
new_priority);
3200
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3201
("changed priority to %d\n",
3202
new_priority));
3203
retval = 1;
3204
} else
3205
retval = 0;
3206
} else {
3207
/* New entry on the queue */
3208
if (new_priority < old_priority)
3209
pinfo->priority = new_priority;
3210
3211
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3212
("Inserting onto queue\n"));
3213
pinfo->generation = ++queue->generation;
3214
camq_insert(queue, pinfo);
3215
retval = 1;
3216
}
3217
return (retval);
3218
}
3219
3220
static void
3221
xpt_run_allocq_task(void *context, int pending)
3222
{
3223
struct cam_periph *periph = context;
3224
3225
cam_periph_lock(periph);
3226
periph->flags &= ~CAM_PERIPH_RUN_TASK;
3227
xpt_run_allocq(periph, 1);
3228
cam_periph_unlock(periph);
3229
cam_periph_release(periph);
3230
}
3231
3232
static void
3233
xpt_run_allocq(struct cam_periph *periph, int sleep)
3234
{
3235
struct cam_ed *device;
3236
union ccb *ccb;
3237
uint32_t prio;
3238
3239
cam_periph_assert(periph, MA_OWNED);
3240
if (periph->periph_allocating)
3241
return;
3242
cam_periph_doacquire(periph);
3243
periph->periph_allocating = 1;
3244
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph));
3245
device = periph->path->device;
3246
ccb = NULL;
3247
restart:
3248
while ((prio = min(periph->scheduled_priority,
3249
periph->immediate_priority)) != CAM_PRIORITY_NONE &&
3250
(periph->periph_allocated - (ccb != NULL ? 1 : 0) <
3251
device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) {
3252
if (ccb == NULL &&
3253
(ccb = xpt_get_ccb_nowait(periph)) == NULL) {
3254
if (sleep) {
3255
ccb = xpt_get_ccb(periph);
3256
goto restart;
3257
}
3258
if (periph->flags & CAM_PERIPH_RUN_TASK)
3259
break;
3260
cam_periph_doacquire(periph);
3261
periph->flags |= CAM_PERIPH_RUN_TASK;
3262
taskqueue_enqueue(xsoftc.xpt_taskq,
3263
&periph->periph_run_task);
3264
break;
3265
}
3266
xpt_setup_ccb(&ccb->ccb_h, periph->path, prio);
3267
if (prio == periph->immediate_priority) {
3268
periph->immediate_priority = CAM_PRIORITY_NONE;
3269
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3270
("waking cam_periph_getccb()\n"));
3271
SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h,
3272
periph_links.sle);
3273
wakeup(&periph->ccb_list);
3274
} else {
3275
periph->scheduled_priority = CAM_PRIORITY_NONE;
3276
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3277
("calling periph_start()\n"));
3278
periph->periph_start(periph, ccb);
3279
}
3280
ccb = NULL;
3281
}
3282
if (ccb != NULL)
3283
xpt_release_ccb(ccb);
3284
periph->periph_allocating = 0;
3285
cam_periph_release_locked(periph);
3286
}
3287
3288
static void
3289
xpt_run_devq(struct cam_devq *devq)
3290
{
3291
struct mtx *mtx;
3292
3293
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n"));
3294
3295
devq->send_queue.qfrozen_cnt++;
3296
while ((devq->send_queue.entries > 0)
3297
&& (devq->send_openings > 0)
3298
&& (devq->send_queue.qfrozen_cnt <= 1)) {
3299
struct cam_ed *device;
3300
union ccb *work_ccb;
3301
struct cam_sim *sim;
3302
struct xpt_proto *proto;
3303
3304
device = (struct cam_ed *)camq_remove(&devq->send_queue,
3305
CAMQ_HEAD);
3306
CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3307
("running device %p\n", device));
3308
3309
work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3310
if (work_ccb == NULL) {
3311
printf("device on run queue with no ccbs???\n");
3312
continue;
3313
}
3314
3315
if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3316
mtx_lock(&xsoftc.xpt_highpower_lock);
3317
if (xsoftc.num_highpower <= 0) {
3318
/*
3319
* We got a high power command, but we
3320
* don't have any available slots. Freeze
3321
* the device queue until we have a slot
3322
* available.
3323
*/
3324
xpt_freeze_devq_device(device, 1);
3325
STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device,
3326
highpowerq_entry);
3327
3328
mtx_unlock(&xsoftc.xpt_highpower_lock);
3329
continue;
3330
} else {
3331
/*
3332
* Consume a high power slot while
3333
* this ccb runs.
3334
*/
3335
xsoftc.num_highpower--;
3336
}
3337
mtx_unlock(&xsoftc.xpt_highpower_lock);
3338
}
3339
cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3340
cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3341
devq->send_openings--;
3342
devq->send_active++;
3343
xpt_schedule_devq(devq, device);
3344
mtx_unlock(&devq->send_mtx);
3345
3346
if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) {
3347
/*
3348
* The client wants to freeze the queue
3349
* after this CCB is sent.
3350
*/
3351
xpt_freeze_devq(work_ccb->ccb_h.path, 1);
3352
}
3353
3354
/* In Target mode, the peripheral driver knows best... */
3355
if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3356
if ((device->inq_flags & SID_CmdQue) != 0
3357
&& work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3358
work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3359
else
3360
/*
3361
* Clear this in case of a retried CCB that
3362
* failed due to a rejected tag.
3363
*/
3364
work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3365
}
3366
3367
KASSERT(device == work_ccb->ccb_h.path->device,
3368
("device (%p) / path->device (%p) mismatch",
3369
device, work_ccb->ccb_h.path->device));
3370
proto = xpt_proto_find(device->protocol);
3371
if (proto && proto->ops->debug_out)
3372
proto->ops->debug_out(work_ccb);
3373
3374
/*
3375
* Device queues can be shared among multiple SIM instances
3376
* that reside on different buses. Use the SIM from the
3377
* queued device, rather than the one from the calling bus.
3378
*/
3379
sim = device->sim;
3380
mtx = sim->mtx;
3381
if (mtx && !mtx_owned(mtx))
3382
mtx_lock(mtx);
3383
else
3384
mtx = NULL;
3385
work_ccb->ccb_h.qos.periph_data = cam_iosched_now();
3386
(*(sim->sim_action))(sim, work_ccb);
3387
if (mtx)
3388
mtx_unlock(mtx);
3389
mtx_lock(&devq->send_mtx);
3390
}
3391
devq->send_queue.qfrozen_cnt--;
3392
}
3393
3394
/*
3395
* This function merges stuff from the src ccb into the dst ccb, while keeping
3396
* important fields in the dst ccb constant.
3397
*/
3398
void
3399
xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb)
3400
{
3401
3402
/*
3403
* Pull fields that are valid for peripheral drivers to set
3404
* into the dst CCB along with the CCB "payload".
3405
*/
3406
dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count;
3407
dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code;
3408
dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout;
3409
dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags;
3410
bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1],
3411
sizeof(union ccb) - sizeof(struct ccb_hdr));
3412
}
3413
3414
void
3415
xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path,
3416
uint32_t priority, uint32_t flags)
3417
{
3418
3419
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3420
ccb_h->pinfo.priority = priority;
3421
ccb_h->path = path;
3422
ccb_h->path_id = path->bus->path_id;
3423
if (path->target)
3424
ccb_h->target_id = path->target->target_id;
3425
else
3426
ccb_h->target_id = CAM_TARGET_WILDCARD;
3427
if (path->device) {
3428
ccb_h->target_lun = path->device->lun_id;
3429
ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3430
} else {
3431
ccb_h->target_lun = CAM_TARGET_WILDCARD;
3432
}
3433
ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3434
ccb_h->flags = flags;
3435
ccb_h->xflags = 0;
3436
}
3437
3438
void
3439
xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority)
3440
{
3441
xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0);
3442
}
3443
3444
/* Path manipulation functions */
3445
cam_status
3446
xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3447
path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3448
{
3449
struct cam_path *path;
3450
cam_status status;
3451
3452
path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3453
3454
if (path == NULL) {
3455
status = CAM_RESRC_UNAVAIL;
3456
return(status);
3457
}
3458
status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3459
if (status != CAM_REQ_CMP) {
3460
free(path, M_CAMPATH);
3461
path = NULL;
3462
}
3463
*new_path_ptr = path;
3464
return (status);
3465
}
3466
3467
cam_status
3468
xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3469
struct cam_periph *periph, path_id_t path_id,
3470
target_id_t target_id, lun_id_t lun_id)
3471
{
3472
3473
return (xpt_create_path(new_path_ptr, periph, path_id, target_id,
3474
lun_id));
3475
}
3476
3477
cam_status
3478
xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3479
path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3480
{
3481
struct cam_eb *bus;
3482
struct cam_et *target;
3483
struct cam_ed *device;
3484
cam_status status;
3485
3486
status = CAM_REQ_CMP; /* Completed without error */
3487
target = NULL; /* Wildcarded */
3488
device = NULL; /* Wildcarded */
3489
3490
/*
3491
* We will potentially modify the EDT, so block interrupts
3492
* that may attempt to create cam paths.
3493
*/
3494
bus = xpt_find_bus(path_id);
3495
if (bus == NULL) {
3496
status = CAM_PATH_INVALID;
3497
} else {
3498
xpt_lock_buses();
3499
mtx_lock(&bus->eb_mtx);
3500
target = xpt_find_target(bus, target_id);
3501
if (target == NULL) {
3502
/* Create one */
3503
struct cam_et *new_target;
3504
3505
new_target = xpt_alloc_target(bus, target_id);
3506
if (new_target == NULL) {
3507
status = CAM_RESRC_UNAVAIL;
3508
} else {
3509
target = new_target;
3510
}
3511
}
3512
xpt_unlock_buses();
3513
if (target != NULL) {
3514
device = xpt_find_device(target, lun_id);
3515
if (device == NULL) {
3516
/* Create one */
3517
struct cam_ed *new_device;
3518
3519
new_device =
3520
(*(bus->xport->ops->alloc_device))(bus,
3521
target,
3522
lun_id);
3523
if (new_device == NULL) {
3524
status = CAM_RESRC_UNAVAIL;
3525
} else {
3526
device = new_device;
3527
}
3528
}
3529
}
3530
mtx_unlock(&bus->eb_mtx);
3531
}
3532
3533
/*
3534
* Only touch the user's data if we are successful.
3535
*/
3536
if (status == CAM_REQ_CMP) {
3537
new_path->periph = perph;
3538
new_path->bus = bus;
3539
new_path->target = target;
3540
new_path->device = device;
3541
CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3542
} else {
3543
if (device != NULL)
3544
xpt_release_device(device);
3545
if (target != NULL)
3546
xpt_release_target(target);
3547
if (bus != NULL)
3548
xpt_release_bus(bus);
3549
}
3550
return (status);
3551
}
3552
3553
int
3554
xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path)
3555
{
3556
struct cam_path *new_path;
3557
3558
new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT);
3559
if (new_path == NULL)
3560
return (ENOMEM);
3561
*new_path = *path;
3562
if (path->bus != NULL)
3563
xpt_acquire_bus(path->bus);
3564
if (path->target != NULL)
3565
xpt_acquire_target(path->target);
3566
if (path->device != NULL)
3567
xpt_acquire_device(path->device);
3568
*new_path_ptr = new_path;
3569
return (0);
3570
}
3571
3572
void
3573
xpt_release_path(struct cam_path *path)
3574
{
3575
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3576
if (path->device != NULL) {
3577
xpt_release_device(path->device);
3578
path->device = NULL;
3579
}
3580
if (path->target != NULL) {
3581
xpt_release_target(path->target);
3582
path->target = NULL;
3583
}
3584
if (path->bus != NULL) {
3585
xpt_release_bus(path->bus);
3586
path->bus = NULL;
3587
}
3588
}
3589
3590
void
3591
xpt_free_path(struct cam_path *path)
3592
{
3593
3594
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3595
xpt_release_path(path);
3596
free(path, M_CAMPATH);
3597
}
3598
3599
void
3600
xpt_path_counts(struct cam_path *path, uint32_t *bus_ref,
3601
uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref)
3602
{
3603
3604
xpt_lock_buses();
3605
if (bus_ref) {
3606
if (path->bus)
3607
*bus_ref = path->bus->refcount;
3608
else
3609
*bus_ref = 0;
3610
}
3611
if (periph_ref) {
3612
if (path->periph)
3613
*periph_ref = path->periph->refcount;
3614
else
3615
*periph_ref = 0;
3616
}
3617
xpt_unlock_buses();
3618
if (target_ref) {
3619
if (path->target)
3620
*target_ref = path->target->refcount;
3621
else
3622
*target_ref = 0;
3623
}
3624
if (device_ref) {
3625
if (path->device)
3626
*device_ref = path->device->refcount;
3627
else
3628
*device_ref = 0;
3629
}
3630
}
3631
3632
/*
3633
* Return -1 for failure, 0 for exact match, 1 for match with wildcards
3634
* in path1, 2 for match with wildcards in path2.
3635
*/
3636
int
3637
xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3638
{
3639
int retval = 0;
3640
3641
if (path1->bus != path2->bus) {
3642
if (path1->bus->path_id == CAM_BUS_WILDCARD)
3643
retval = 1;
3644
else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3645
retval = 2;
3646
else
3647
return (-1);
3648
}
3649
if (path1->target != path2->target) {
3650
if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3651
if (retval == 0)
3652
retval = 1;
3653
} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3654
retval = 2;
3655
else
3656
return (-1);
3657
}
3658
if (path1->device != path2->device) {
3659
if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3660
if (retval == 0)
3661
retval = 1;
3662
} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3663
retval = 2;
3664
else
3665
return (-1);
3666
}
3667
return (retval);
3668
}
3669
3670
int
3671
xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev)
3672
{
3673
int retval = 0;
3674
3675
if (path->bus != dev->target->bus) {
3676
if (path->bus->path_id == CAM_BUS_WILDCARD)
3677
retval = 1;
3678
else if (dev->target->bus->path_id == CAM_BUS_WILDCARD)
3679
retval = 2;
3680
else
3681
return (-1);
3682
}
3683
if (path->target != dev->target) {
3684
if (path->target->target_id == CAM_TARGET_WILDCARD) {
3685
if (retval == 0)
3686
retval = 1;
3687
} else if (dev->target->target_id == CAM_TARGET_WILDCARD)
3688
retval = 2;
3689
else
3690
return (-1);
3691
}
3692
if (path->device != dev) {
3693
if (path->device->lun_id == CAM_LUN_WILDCARD) {
3694
if (retval == 0)
3695
retval = 1;
3696
} else if (dev->lun_id == CAM_LUN_WILDCARD)
3697
retval = 2;
3698
else
3699
return (-1);
3700
}
3701
return (retval);
3702
}
3703
3704
void
3705
xpt_print_path(struct cam_path *path)
3706
{
3707
struct sbuf sb;
3708
char buffer[XPT_PRINT_LEN];
3709
3710
sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3711
xpt_path_sbuf(path, &sb);
3712
sbuf_finish(&sb);
3713
printf("%s", sbuf_data(&sb));
3714
sbuf_delete(&sb);
3715
}
3716
3717
static void
3718
xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb)
3719
{
3720
if (device == NULL)
3721
sbuf_cat(sb, "(nopath): ");
3722
else {
3723
sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ",
3724
device->sim->sim_name,
3725
device->sim->unit_number,
3726
device->sim->bus_id,
3727
device->target->target_id,
3728
(uintmax_t)device->lun_id);
3729
}
3730
}
3731
3732
void
3733
xpt_print(struct cam_path *path, const char *fmt, ...)
3734
{
3735
va_list ap;
3736
struct sbuf sb;
3737
char buffer[XPT_PRINT_LEN];
3738
3739
sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN);
3740
3741
xpt_path_sbuf(path, &sb);
3742
va_start(ap, fmt);
3743
sbuf_vprintf(&sb, fmt, ap);
3744
va_end(ap);
3745
3746
sbuf_finish(&sb);
3747
printf("%s", sbuf_data(&sb));
3748
sbuf_delete(&sb);
3749
}
3750
3751
char *
3752
xpt_path_string(struct cam_path *path, char *str, size_t str_len)
3753
{
3754
struct sbuf sb;
3755
3756
sbuf_new(&sb, str, str_len, 0);
3757
xpt_path_sbuf(path, &sb);
3758
sbuf_finish(&sb);
3759
return (str);
3760
}
3761
3762
void
3763
xpt_path_sbuf(struct cam_path *path, struct sbuf *sb)
3764
{
3765
3766
if (path == NULL)
3767
sbuf_cat(sb, "(nopath): ");
3768
else {
3769
if (path->periph != NULL)
3770
sbuf_printf(sb, "(%s%d:", path->periph->periph_name,
3771
path->periph->unit_number);
3772
else
3773
sbuf_cat(sb, "(noperiph:");
3774
3775
if (path->bus != NULL)
3776
sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name,
3777
path->bus->sim->unit_number,
3778
path->bus->sim->bus_id);
3779
else
3780
sbuf_cat(sb, "nobus:");
3781
3782
if (path->target != NULL)
3783
sbuf_printf(sb, "%d:", path->target->target_id);
3784
else
3785
sbuf_cat(sb, "X:");
3786
3787
if (path->device != NULL)
3788
sbuf_printf(sb, "%jx): ",
3789
(uintmax_t)path->device->lun_id);
3790
else
3791
sbuf_cat(sb, "X): ");
3792
}
3793
}
3794
3795
path_id_t
3796
xpt_path_path_id(struct cam_path *path)
3797
{
3798
return(path->bus->path_id);
3799
}
3800
3801
target_id_t
3802
xpt_path_target_id(struct cam_path *path)
3803
{
3804
if (path->target != NULL)
3805
return (path->target->target_id);
3806
else
3807
return (CAM_TARGET_WILDCARD);
3808
}
3809
3810
lun_id_t
3811
xpt_path_lun_id(struct cam_path *path)
3812
{
3813
if (path->device != NULL)
3814
return (path->device->lun_id);
3815
else
3816
return (CAM_LUN_WILDCARD);
3817
}
3818
3819
struct cam_sim *
3820
xpt_path_sim(struct cam_path *path)
3821
{
3822
3823
return (path->bus->sim);
3824
}
3825
3826
struct cam_periph*
3827
xpt_path_periph(struct cam_path *path)
3828
{
3829
3830
return (path->periph);
3831
}
3832
3833
/*
3834
* Release a CAM control block for the caller. Remit the cost of the structure
3835
* to the device referenced by the path. If the this device had no 'credits'
3836
* and peripheral drivers have registered async callbacks for this notification
3837
* call them now.
3838
*/
3839
void
3840
xpt_release_ccb(union ccb *free_ccb)
3841
{
3842
struct cam_ed *device;
3843
struct cam_periph *periph;
3844
3845
CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3846
xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED);
3847
device = free_ccb->ccb_h.path->device;
3848
periph = free_ccb->ccb_h.path->periph;
3849
3850
xpt_free_ccb(free_ccb);
3851
periph->periph_allocated--;
3852
cam_ccbq_release_opening(&device->ccbq);
3853
xpt_run_allocq(periph, 0);
3854
}
3855
3856
/* Functions accessed by SIM drivers */
3857
3858
static struct xpt_xport_ops xport_default_ops = {
3859
.alloc_device = xpt_alloc_device_default,
3860
.action = xpt_action_default,
3861
.async = xpt_dev_async_default,
3862
};
3863
static struct xpt_xport xport_default = {
3864
.xport = XPORT_UNKNOWN,
3865
.name = "unknown",
3866
.ops = &xport_default_ops,
3867
};
3868
3869
CAM_XPT_XPORT(xport_default);
3870
3871
/*
3872
* A sim structure, listing the SIM entry points and instance
3873
* identification info is passed to xpt_bus_register to hook the SIM
3874
* into the CAM framework. xpt_bus_register creates a cam_eb entry
3875
* for this new bus and places it in the array of buses and assigns
3876
* it a path_id. The path_id may be influenced by "hard wiring"
3877
* information specified by the user. Once interrupt services are
3878
* available, the bus will be probed.
3879
*/
3880
int
3881
xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus)
3882
{
3883
struct cam_eb *new_bus;
3884
struct cam_eb *old_bus;
3885
struct ccb_pathinq cpi;
3886
struct cam_path *path;
3887
cam_status status;
3888
3889
sim->bus_id = bus;
3890
new_bus = (struct cam_eb *)malloc(sizeof(*new_bus),
3891
M_CAMXPT, M_NOWAIT|M_ZERO);
3892
if (new_bus == NULL) {
3893
/* Couldn't satisfy request */
3894
return (ENOMEM);
3895
}
3896
3897
mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF);
3898
TAILQ_INIT(&new_bus->et_entries);
3899
cam_sim_hold(sim);
3900
new_bus->sim = sim;
3901
timevalclear(&new_bus->last_reset);
3902
new_bus->flags = 0;
3903
new_bus->refcount = 1; /* Held until a bus_deregister event */
3904
new_bus->generation = 0;
3905
new_bus->parent_dev = parent;
3906
3907
xpt_lock_buses();
3908
sim->path_id = new_bus->path_id =
3909
xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3910
old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
3911
while (old_bus != NULL
3912
&& old_bus->path_id < new_bus->path_id)
3913
old_bus = TAILQ_NEXT(old_bus, links);
3914
if (old_bus != NULL)
3915
TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3916
else
3917
TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
3918
xsoftc.bus_generation++;
3919
xpt_unlock_buses();
3920
3921
/*
3922
* Set a default transport so that a PATH_INQ can be issued to
3923
* the SIM. This will then allow for probing and attaching of
3924
* a more appropriate transport.
3925
*/
3926
new_bus->xport = &xport_default;
3927
3928
status = xpt_create_path(&path, /*periph*/NULL, sim->path_id,
3929
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3930
if (status != CAM_REQ_CMP) {
3931
xpt_release_bus(new_bus);
3932
return (ENOMEM);
3933
}
3934
3935
xpt_path_inq(&cpi, path);
3936
3937
/*
3938
* Use the results of PATH_INQ to pick a transport. Note that
3939
* the xpt bus (which uses XPORT_UNSPECIFIED) always uses
3940
* xport_default instead of a transport from
3941
* cam_xpt_port_set.
3942
*/
3943
if (cam_ccb_success((union ccb *)&cpi) &&
3944
cpi.transport != XPORT_UNSPECIFIED) {
3945
struct xpt_xport **xpt;
3946
3947
SET_FOREACH(xpt, cam_xpt_xport_set) {
3948
if ((*xpt)->xport == cpi.transport) {
3949
new_bus->xport = *xpt;
3950
break;
3951
}
3952
}
3953
if (new_bus->xport == &xport_default) {
3954
xpt_print(path,
3955
"No transport found for %d\n", cpi.transport);
3956
xpt_release_bus(new_bus);
3957
xpt_free_path(path);
3958
return (EINVAL);
3959
}
3960
}
3961
3962
/* Notify interested parties */
3963
if (sim->path_id != CAM_XPT_PATH_ID) {
3964
xpt_async(AC_PATH_REGISTERED, path, &cpi);
3965
if ((cpi.hba_misc & PIM_NOSCAN) == 0) {
3966
union ccb *scan_ccb;
3967
3968
/* Initiate bus rescan. */
3969
scan_ccb = xpt_alloc_ccb_nowait();
3970
if (scan_ccb != NULL) {
3971
scan_ccb->ccb_h.path = path;
3972
scan_ccb->ccb_h.func_code = XPT_SCAN_BUS;
3973
scan_ccb->crcn.flags = 0;
3974
xpt_rescan(scan_ccb);
3975
} else {
3976
xpt_print(path,
3977
"Can't allocate CCB to scan bus\n");
3978
xpt_free_path(path);
3979
}
3980
} else
3981
xpt_free_path(path);
3982
} else
3983
xpt_free_path(path);
3984
return (CAM_SUCCESS);
3985
}
3986
3987
int
3988
xpt_bus_deregister(path_id_t pathid)
3989
{
3990
struct cam_path bus_path;
3991
cam_status status;
3992
3993
status = xpt_compile_path(&bus_path, NULL, pathid,
3994
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3995
if (status != CAM_REQ_CMP)
3996
return (ENOMEM);
3997
3998
xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
3999
xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4000
4001
/* Release the reference count held while registered. */
4002
xpt_release_bus(bus_path.bus);
4003
xpt_release_path(&bus_path);
4004
4005
return (CAM_SUCCESS);
4006
}
4007
4008
static path_id_t
4009
xptnextfreepathid(void)
4010
{
4011
struct cam_eb *bus;
4012
path_id_t pathid;
4013
const char *strval;
4014
4015
mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4016
pathid = 0;
4017
bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4018
retry:
4019
/* Find an unoccupied pathid */
4020
while (bus != NULL && bus->path_id <= pathid) {
4021
if (bus->path_id == pathid)
4022
pathid++;
4023
bus = TAILQ_NEXT(bus, links);
4024
}
4025
4026
/*
4027
* Ensure that this pathid is not reserved for
4028
* a bus that may be registered in the future.
4029
*/
4030
if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4031
++pathid;
4032
/* Start the search over */
4033
goto retry;
4034
}
4035
return (pathid);
4036
}
4037
4038
static path_id_t
4039
xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4040
{
4041
path_id_t pathid;
4042
int i, dunit, val;
4043
char buf[32];
4044
const char *dname;
4045
4046
pathid = CAM_XPT_PATH_ID;
4047
snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4048
if (strcmp(buf, "xpt0") == 0 && sim_bus == 0)
4049
return (pathid);
4050
i = 0;
4051
while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) {
4052
if (strcmp(dname, "scbus")) {
4053
/* Avoid a bit of foot shooting. */
4054
continue;
4055
}
4056
if (dunit < 0) /* unwired?! */
4057
continue;
4058
if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4059
if (sim_bus == val) {
4060
pathid = dunit;
4061
break;
4062
}
4063
} else if (sim_bus == 0) {
4064
/* Unspecified matches bus 0 */
4065
pathid = dunit;
4066
break;
4067
} else {
4068
printf(
4069
"Ambiguous scbus configuration for %s%d bus %d, cannot wire down. The kernel\n"
4070
"config entry for scbus%d should specify a controller bus.\n"
4071
"Scbus will be assigned dynamically.\n",
4072
sim_name, sim_unit, sim_bus, dunit);
4073
break;
4074
}
4075
}
4076
4077
if (pathid == CAM_XPT_PATH_ID)
4078
pathid = xptnextfreepathid();
4079
return (pathid);
4080
}
4081
4082
static const char *
4083
xpt_async_string(uint32_t async_code)
4084
{
4085
4086
switch (async_code) {
4087
case AC_BUS_RESET: return ("AC_BUS_RESET");
4088
case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL");
4089
case AC_SCSI_AEN: return ("AC_SCSI_AEN");
4090
case AC_SENT_BDR: return ("AC_SENT_BDR");
4091
case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED");
4092
case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED");
4093
case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE");
4094
case AC_LOST_DEVICE: return ("AC_LOST_DEVICE");
4095
case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG");
4096
case AC_INQ_CHANGED: return ("AC_INQ_CHANGED");
4097
case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED");
4098
case AC_CONTRACT: return ("AC_CONTRACT");
4099
case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED");
4100
case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION");
4101
}
4102
return ("AC_UNKNOWN");
4103
}
4104
4105
static int
4106
xpt_async_size(uint32_t async_code)
4107
{
4108
4109
switch (async_code) {
4110
case AC_BUS_RESET: return (0);
4111
case AC_UNSOL_RESEL: return (0);
4112
case AC_SCSI_AEN: return (0);
4113
case AC_SENT_BDR: return (0);
4114
case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq));
4115
case AC_PATH_DEREGISTERED: return (0);
4116
case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev));
4117
case AC_LOST_DEVICE: return (0);
4118
case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings));
4119
case AC_INQ_CHANGED: return (0);
4120
case AC_GETDEV_CHANGED: return (0);
4121
case AC_CONTRACT: return (sizeof(struct ac_contract));
4122
case AC_ADVINFO_CHANGED: return (-1);
4123
case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio));
4124
}
4125
return (0);
4126
}
4127
4128
static int
4129
xpt_async_process_dev(struct cam_ed *device, void *arg)
4130
{
4131
union ccb *ccb = arg;
4132
struct cam_path *path = ccb->ccb_h.path;
4133
void *async_arg = ccb->casync.async_arg_ptr;
4134
uint32_t async_code = ccb->casync.async_code;
4135
bool relock;
4136
4137
if (path->device != device
4138
&& path->device->lun_id != CAM_LUN_WILDCARD
4139
&& device->lun_id != CAM_LUN_WILDCARD)
4140
return (1);
4141
4142
/*
4143
* The async callback could free the device.
4144
* If it is a broadcast async, it doesn't hold
4145
* device reference, so take our own reference.
4146
*/
4147
xpt_acquire_device(device);
4148
4149
/*
4150
* If async for specific device is to be delivered to
4151
* the wildcard client, take the specific device lock.
4152
* XXX: We may need a way for client to specify it.
4153
*/
4154
if ((device->lun_id == CAM_LUN_WILDCARD &&
4155
path->device->lun_id != CAM_LUN_WILDCARD) ||
4156
(device->target->target_id == CAM_TARGET_WILDCARD &&
4157
path->target->target_id != CAM_TARGET_WILDCARD) ||
4158
(device->target->bus->path_id == CAM_BUS_WILDCARD &&
4159
path->target->bus->path_id != CAM_BUS_WILDCARD)) {
4160
mtx_unlock(&device->device_mtx);
4161
xpt_path_lock(path);
4162
relock = true;
4163
} else
4164
relock = false;
4165
4166
(*(device->target->bus->xport->ops->async))(async_code,
4167
device->target->bus, device->target, device, async_arg);
4168
xpt_async_bcast(&device->asyncs, async_code, path, async_arg);
4169
4170
if (relock) {
4171
xpt_path_unlock(path);
4172
mtx_lock(&device->device_mtx);
4173
}
4174
xpt_release_device(device);
4175
return (1);
4176
}
4177
4178
static int
4179
xpt_async_process_tgt(struct cam_et *target, void *arg)
4180
{
4181
union ccb *ccb = arg;
4182
struct cam_path *path = ccb->ccb_h.path;
4183
4184
if (path->target != target
4185
&& path->target->target_id != CAM_TARGET_WILDCARD
4186
&& target->target_id != CAM_TARGET_WILDCARD)
4187
return (1);
4188
4189
if (ccb->casync.async_code == AC_SENT_BDR) {
4190
/* Update our notion of when the last reset occurred */
4191
microtime(&target->last_reset);
4192
}
4193
4194
return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb));
4195
}
4196
4197
static void
4198
xpt_async_process(struct cam_periph *periph, union ccb *ccb)
4199
{
4200
struct cam_eb *bus;
4201
struct cam_path *path;
4202
void *async_arg;
4203
uint32_t async_code;
4204
4205
path = ccb->ccb_h.path;
4206
async_code = ccb->casync.async_code;
4207
async_arg = ccb->casync.async_arg_ptr;
4208
CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO,
4209
("xpt_async(%s)\n", xpt_async_string(async_code)));
4210
bus = path->bus;
4211
4212
if (async_code == AC_BUS_RESET) {
4213
/* Update our notion of when the last reset occurred */
4214
microtime(&bus->last_reset);
4215
}
4216
4217
xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb);
4218
4219
/*
4220
* If this wasn't a fully wildcarded async, tell all
4221
* clients that want all async events.
4222
*/
4223
if (bus != xpt_periph->path->bus) {
4224
xpt_path_lock(xpt_periph->path);
4225
xpt_async_process_dev(xpt_periph->path->device, ccb);
4226
xpt_path_unlock(xpt_periph->path);
4227
}
4228
4229
if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4230
xpt_release_devq(path, 1, TRUE);
4231
else
4232
xpt_release_simq(path->bus->sim, TRUE);
4233
if (ccb->casync.async_arg_size > 0)
4234
free(async_arg, M_CAMXPT);
4235
xpt_free_path(path);
4236
xpt_free_ccb(ccb);
4237
}
4238
4239
static void
4240
xpt_async_bcast(struct async_list *async_head,
4241
uint32_t async_code,
4242
struct cam_path *path, void *async_arg)
4243
{
4244
struct async_node *cur_entry;
4245
struct mtx *mtx;
4246
4247
cur_entry = SLIST_FIRST(async_head);
4248
while (cur_entry != NULL) {
4249
struct async_node *next_entry;
4250
/*
4251
* Grab the next list entry before we call the current
4252
* entry's callback. This is because the callback function
4253
* can delete its async callback entry.
4254
*/
4255
next_entry = SLIST_NEXT(cur_entry, links);
4256
if ((cur_entry->event_enable & async_code) != 0) {
4257
mtx = cur_entry->event_lock ?
4258
path->device->sim->mtx : NULL;
4259
if (mtx)
4260
mtx_lock(mtx);
4261
cur_entry->callback(cur_entry->callback_arg,
4262
async_code, path,
4263
async_arg);
4264
if (mtx)
4265
mtx_unlock(mtx);
4266
}
4267
cur_entry = next_entry;
4268
}
4269
}
4270
4271
void
4272
xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg)
4273
{
4274
union ccb *ccb;
4275
int size;
4276
4277
ccb = xpt_alloc_ccb_nowait();
4278
if (ccb == NULL) {
4279
xpt_print(path, "Can't allocate CCB to send %s\n",
4280
xpt_async_string(async_code));
4281
return;
4282
}
4283
4284
if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) {
4285
xpt_print(path, "Can't allocate path to send %s\n",
4286
xpt_async_string(async_code));
4287
xpt_free_ccb(ccb);
4288
return;
4289
}
4290
ccb->ccb_h.path->periph = NULL;
4291
ccb->ccb_h.func_code = XPT_ASYNC;
4292
ccb->ccb_h.cbfcnp = xpt_async_process;
4293
ccb->ccb_h.flags |= CAM_UNLOCKED;
4294
ccb->casync.async_code = async_code;
4295
ccb->casync.async_arg_size = 0;
4296
size = xpt_async_size(async_code);
4297
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE,
4298
("xpt_async: func %#x %s aync_code %d %s\n",
4299
ccb->ccb_h.func_code,
4300
xpt_action_name(ccb->ccb_h.func_code),
4301
async_code,
4302
xpt_async_string(async_code)));
4303
if (size > 0 && async_arg != NULL) {
4304
ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT);
4305
if (ccb->casync.async_arg_ptr == NULL) {
4306
xpt_print(path, "Can't allocate argument to send %s\n",
4307
xpt_async_string(async_code));
4308
xpt_free_path(ccb->ccb_h.path);
4309
xpt_free_ccb(ccb);
4310
return;
4311
}
4312
memcpy(ccb->casync.async_arg_ptr, async_arg, size);
4313
ccb->casync.async_arg_size = size;
4314
} else if (size < 0) {
4315
ccb->casync.async_arg_ptr = async_arg;
4316
ccb->casync.async_arg_size = size;
4317
}
4318
if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD)
4319
xpt_freeze_devq(path, 1);
4320
else
4321
xpt_freeze_simq(path->bus->sim, 1);
4322
xpt_action(ccb);
4323
}
4324
4325
static void
4326
xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus,
4327
struct cam_et *target, struct cam_ed *device,
4328
void *async_arg)
4329
{
4330
4331
/*
4332
* We only need to handle events for real devices.
4333
*/
4334
if (target->target_id == CAM_TARGET_WILDCARD
4335
|| device->lun_id == CAM_LUN_WILDCARD)
4336
return;
4337
4338
printf("%s called\n", __func__);
4339
}
4340
4341
static uint32_t
4342
xpt_freeze_devq_device(struct cam_ed *dev, u_int count)
4343
{
4344
struct cam_devq *devq;
4345
uint32_t freeze;
4346
4347
devq = dev->sim->devq;
4348
mtx_assert(&devq->send_mtx, MA_OWNED);
4349
CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4350
("xpt_freeze_devq_device(%d) %u->%u\n", count,
4351
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
4352
freeze = (dev->ccbq.queue.qfrozen_cnt += count);
4353
/* Remove frozen device from sendq. */
4354
if (device_is_queued(dev))
4355
camq_remove(&devq->send_queue, dev->devq_entry.index);
4356
return (freeze);
4357
}
4358
4359
uint32_t
4360
xpt_freeze_devq(struct cam_path *path, u_int count)
4361
{
4362
struct cam_ed *dev = path->device;
4363
struct cam_devq *devq;
4364
uint32_t freeze;
4365
4366
devq = dev->sim->devq;
4367
mtx_lock(&devq->send_mtx);
4368
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count));
4369
freeze = xpt_freeze_devq_device(dev, count);
4370
mtx_unlock(&devq->send_mtx);
4371
return (freeze);
4372
}
4373
4374
uint32_t
4375
xpt_freeze_simq(struct cam_sim *sim, u_int count)
4376
{
4377
struct cam_devq *devq;
4378
uint32_t freeze;
4379
4380
devq = sim->devq;
4381
mtx_lock(&devq->send_mtx);
4382
freeze = (devq->send_queue.qfrozen_cnt += count);
4383
mtx_unlock(&devq->send_mtx);
4384
return (freeze);
4385
}
4386
4387
static void
4388
xpt_release_devq_timeout(void *arg)
4389
{
4390
struct cam_ed *dev;
4391
struct cam_devq *devq;
4392
4393
dev = (struct cam_ed *)arg;
4394
CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n"));
4395
devq = dev->sim->devq;
4396
mtx_assert(&devq->send_mtx, MA_OWNED);
4397
if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE))
4398
xpt_run_devq(devq);
4399
}
4400
4401
void
4402
xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4403
{
4404
struct cam_ed *dev;
4405
struct cam_devq *devq;
4406
4407
CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n",
4408
count, run_queue));
4409
dev = path->device;
4410
devq = dev->sim->devq;
4411
mtx_lock(&devq->send_mtx);
4412
if (xpt_release_devq_device(dev, count, run_queue))
4413
xpt_run_devq(dev->sim->devq);
4414
mtx_unlock(&devq->send_mtx);
4415
}
4416
4417
static int
4418
xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4419
{
4420
4421
mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED);
4422
CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE,
4423
("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue,
4424
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
4425
if (count > dev->ccbq.queue.qfrozen_cnt) {
4426
#ifdef INVARIANTS
4427
printf("xpt_release_devq(): requested %u > present %u\n",
4428
count, dev->ccbq.queue.qfrozen_cnt);
4429
#endif
4430
count = dev->ccbq.queue.qfrozen_cnt;
4431
}
4432
dev->ccbq.queue.qfrozen_cnt -= count;
4433
if (dev->ccbq.queue.qfrozen_cnt == 0) {
4434
/*
4435
* No longer need to wait for a successful
4436
* command completion.
4437
*/
4438
dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4439
/*
4440
* Remove any timeouts that might be scheduled
4441
* to release this queue.
4442
*/
4443
if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4444
callout_stop(&dev->callout);
4445
dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4446
}
4447
/*
4448
* Now that we are unfrozen schedule the
4449
* device so any pending transactions are
4450
* run.
4451
*/
4452
xpt_schedule_devq(dev->sim->devq, dev);
4453
} else
4454
run_queue = 0;
4455
return (run_queue);
4456
}
4457
4458
void
4459
xpt_release_simq(struct cam_sim *sim, int run_queue)
4460
{
4461
struct cam_devq *devq;
4462
4463
devq = sim->devq;
4464
mtx_lock(&devq->send_mtx);
4465
if (devq->send_queue.qfrozen_cnt <= 0) {
4466
#ifdef INVARIANTS
4467
printf("xpt_release_simq: requested 1 > present %u\n",
4468
devq->send_queue.qfrozen_cnt);
4469
#endif
4470
} else
4471
devq->send_queue.qfrozen_cnt--;
4472
if (devq->send_queue.qfrozen_cnt == 0) {
4473
if (run_queue) {
4474
/*
4475
* Now that we are unfrozen run the send queue.
4476
*/
4477
xpt_run_devq(sim->devq);
4478
}
4479
}
4480
mtx_unlock(&devq->send_mtx);
4481
}
4482
4483
void
4484
xpt_done(union ccb *done_ccb)
4485
{
4486
struct cam_doneq *queue;
4487
int run, hash;
4488
4489
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4490
if (done_ccb->ccb_h.func_code == XPT_SCSI_IO &&
4491
done_ccb->csio.bio != NULL)
4492
biotrack(done_ccb->csio.bio, __func__);
4493
#endif
4494
4495
CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4496
("xpt_done: func= %#x %s status %#x\n",
4497
done_ccb->ccb_h.func_code,
4498
xpt_action_name(done_ccb->ccb_h.func_code),
4499
done_ccb->ccb_h.status));
4500
if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4501
return;
4502
4503
/* Store the time the ccb was in the sim */
4504
done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4505
done_ccb->ccb_h.status |= CAM_QOS_VALID;
4506
hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id +
4507
done_ccb->ccb_h.target_lun) % cam_num_doneqs;
4508
queue = &cam_doneqs[hash];
4509
mtx_lock(&queue->cam_doneq_mtx);
4510
run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
4511
STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
4512
done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4513
mtx_unlock(&queue->cam_doneq_mtx);
4514
if (run && !dumping)
4515
wakeup(&queue->cam_doneq);
4516
}
4517
4518
void
4519
xpt_done_direct(union ccb *done_ccb)
4520
{
4521
4522
CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4523
("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status));
4524
if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0)
4525
return;
4526
4527
/* Store the time the ccb was in the sim */
4528
done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data);
4529
done_ccb->ccb_h.status |= CAM_QOS_VALID;
4530
xpt_done_process(&done_ccb->ccb_h);
4531
}
4532
4533
union ccb *
4534
xpt_alloc_ccb(void)
4535
{
4536
union ccb *new_ccb;
4537
4538
new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4539
return (new_ccb);
4540
}
4541
4542
union ccb *
4543
xpt_alloc_ccb_nowait(void)
4544
{
4545
union ccb *new_ccb;
4546
4547
new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4548
return (new_ccb);
4549
}
4550
4551
void
4552
xpt_free_ccb(union ccb *free_ccb)
4553
{
4554
struct cam_periph *periph;
4555
4556
if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) {
4557
/*
4558
* Looks like a CCB allocated from a periph UMA zone.
4559
*/
4560
periph = free_ccb->ccb_h.path->periph;
4561
uma_zfree(periph->ccb_zone, free_ccb);
4562
} else {
4563
free(free_ccb, M_CAMCCB);
4564
}
4565
}
4566
4567
/* Private XPT functions */
4568
4569
/*
4570
* Get a CAM control block for the caller. Charge the structure to the device
4571
* referenced by the path. If we don't have sufficient resources to allocate
4572
* more ccbs, we return NULL.
4573
*/
4574
static union ccb *
4575
xpt_get_ccb_nowait(struct cam_periph *periph)
4576
{
4577
union ccb *new_ccb;
4578
int alloc_flags;
4579
4580
if (periph->ccb_zone != NULL) {
4581
alloc_flags = CAM_CCB_FROM_UMA;
4582
new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT);
4583
} else {
4584
alloc_flags = 0;
4585
new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT);
4586
}
4587
if (new_ccb == NULL)
4588
return (NULL);
4589
new_ccb->ccb_h.alloc_flags = alloc_flags;
4590
periph->periph_allocated++;
4591
cam_ccbq_take_opening(&periph->path->device->ccbq);
4592
return (new_ccb);
4593
}
4594
4595
static union ccb *
4596
xpt_get_ccb(struct cam_periph *periph)
4597
{
4598
union ccb *new_ccb;
4599
int alloc_flags;
4600
4601
cam_periph_unlock(periph);
4602
if (periph->ccb_zone != NULL) {
4603
alloc_flags = CAM_CCB_FROM_UMA;
4604
new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK);
4605
} else {
4606
alloc_flags = 0;
4607
new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK);
4608
}
4609
new_ccb->ccb_h.alloc_flags = alloc_flags;
4610
cam_periph_lock(periph);
4611
periph->periph_allocated++;
4612
cam_ccbq_take_opening(&periph->path->device->ccbq);
4613
return (new_ccb);
4614
}
4615
4616
union ccb *
4617
cam_periph_getccb(struct cam_periph *periph, uint32_t priority)
4618
{
4619
struct ccb_hdr *ccb_h;
4620
4621
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n"));
4622
cam_periph_assert(periph, MA_OWNED);
4623
while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL ||
4624
ccb_h->pinfo.priority != priority) {
4625
if (priority < periph->immediate_priority) {
4626
periph->immediate_priority = priority;
4627
xpt_run_allocq(periph, 0);
4628
} else
4629
cam_periph_sleep(periph, &periph->ccb_list, PRIBIO,
4630
"cgticb", 0);
4631
}
4632
SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle);
4633
return ((union ccb *)ccb_h);
4634
}
4635
4636
static void
4637
xpt_acquire_bus(struct cam_eb *bus)
4638
{
4639
4640
xpt_lock_buses();
4641
bus->refcount++;
4642
xpt_unlock_buses();
4643
}
4644
4645
static void
4646
xpt_release_bus(struct cam_eb *bus)
4647
{
4648
4649
xpt_lock_buses();
4650
KASSERT(bus->refcount >= 1, ("bus->refcount >= 1"));
4651
if (--bus->refcount > 0) {
4652
xpt_unlock_buses();
4653
return;
4654
}
4655
TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4656
xsoftc.bus_generation++;
4657
xpt_unlock_buses();
4658
KASSERT(TAILQ_EMPTY(&bus->et_entries),
4659
("destroying bus, but target list is not empty"));
4660
cam_sim_release(bus->sim);
4661
mtx_destroy(&bus->eb_mtx);
4662
free(bus, M_CAMXPT);
4663
}
4664
4665
static struct cam_et *
4666
xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4667
{
4668
struct cam_et *cur_target, *target;
4669
4670
mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED);
4671
mtx_assert(&bus->eb_mtx, MA_OWNED);
4672
target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT,
4673
M_NOWAIT|M_ZERO);
4674
if (target == NULL)
4675
return (NULL);
4676
4677
TAILQ_INIT(&target->ed_entries);
4678
target->bus = bus;
4679
target->target_id = target_id;
4680
target->refcount = 1;
4681
target->generation = 0;
4682
target->luns = NULL;
4683
mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF);
4684
timevalclear(&target->last_reset);
4685
/*
4686
* Hold a reference to our parent bus so it
4687
* will not go away before we do.
4688
*/
4689
bus->refcount++;
4690
4691
/* Insertion sort into our bus's target list */
4692
cur_target = TAILQ_FIRST(&bus->et_entries);
4693
while (cur_target != NULL && cur_target->target_id < target_id)
4694
cur_target = TAILQ_NEXT(cur_target, links);
4695
if (cur_target != NULL) {
4696
TAILQ_INSERT_BEFORE(cur_target, target, links);
4697
} else {
4698
TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4699
}
4700
bus->generation++;
4701
return (target);
4702
}
4703
4704
static void
4705
xpt_acquire_target(struct cam_et *target)
4706
{
4707
struct cam_eb *bus = target->bus;
4708
4709
mtx_lock(&bus->eb_mtx);
4710
target->refcount++;
4711
mtx_unlock(&bus->eb_mtx);
4712
}
4713
4714
static void
4715
xpt_release_target(struct cam_et *target)
4716
{
4717
struct cam_eb *bus = target->bus;
4718
4719
mtx_lock(&bus->eb_mtx);
4720
if (--target->refcount > 0) {
4721
mtx_unlock(&bus->eb_mtx);
4722
return;
4723
}
4724
TAILQ_REMOVE(&bus->et_entries, target, links);
4725
bus->generation++;
4726
mtx_unlock(&bus->eb_mtx);
4727
KASSERT(TAILQ_EMPTY(&target->ed_entries),
4728
("destroying target, but device list is not empty"));
4729
xpt_release_bus(bus);
4730
mtx_destroy(&target->luns_mtx);
4731
if (target->luns)
4732
free(target->luns, M_CAMXPT);
4733
free(target, M_CAMXPT);
4734
}
4735
4736
static struct cam_ed *
4737
xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target,
4738
lun_id_t lun_id)
4739
{
4740
struct cam_ed *device;
4741
4742
device = xpt_alloc_device(bus, target, lun_id);
4743
if (device == NULL)
4744
return (NULL);
4745
4746
device->mintags = 1;
4747
device->maxtags = 1;
4748
return (device);
4749
}
4750
4751
static void
4752
xpt_destroy_device(void *context, int pending)
4753
{
4754
struct cam_ed *device = context;
4755
4756
mtx_lock(&device->device_mtx);
4757
mtx_destroy(&device->device_mtx);
4758
free(device, M_CAMDEV);
4759
}
4760
4761
struct cam_ed *
4762
xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4763
{
4764
struct cam_ed *cur_device, *device;
4765
struct cam_devq *devq;
4766
cam_status status;
4767
4768
mtx_assert(&bus->eb_mtx, MA_OWNED);
4769
/* Make space for us in the device queue on our bus */
4770
devq = bus->sim->devq;
4771
mtx_lock(&devq->send_mtx);
4772
status = cam_devq_resize(devq, devq->send_queue.array_size + 1);
4773
mtx_unlock(&devq->send_mtx);
4774
if (status != CAM_REQ_CMP)
4775
return (NULL);
4776
4777
device = (struct cam_ed *)malloc(sizeof(*device),
4778
M_CAMDEV, M_NOWAIT|M_ZERO);
4779
if (device == NULL)
4780
return (NULL);
4781
4782
cam_init_pinfo(&device->devq_entry);
4783
device->target = target;
4784
device->lun_id = lun_id;
4785
device->sim = bus->sim;
4786
if (cam_ccbq_init(&device->ccbq,
4787
bus->sim->max_dev_openings) != 0) {
4788
free(device, M_CAMDEV);
4789
return (NULL);
4790
}
4791
SLIST_INIT(&device->asyncs);
4792
SLIST_INIT(&device->periphs);
4793
device->generation = 0;
4794
device->flags = CAM_DEV_UNCONFIGURED;
4795
device->tag_delay_count = 0;
4796
device->tag_saved_openings = 0;
4797
device->refcount = 1;
4798
mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF);
4799
callout_init_mtx(&device->callout, &devq->send_mtx, 0);
4800
TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device);
4801
/*
4802
* Hold a reference to our parent bus so it
4803
* will not go away before we do.
4804
*/
4805
target->refcount++;
4806
4807
cur_device = TAILQ_FIRST(&target->ed_entries);
4808
while (cur_device != NULL && cur_device->lun_id < lun_id)
4809
cur_device = TAILQ_NEXT(cur_device, links);
4810
if (cur_device != NULL)
4811
TAILQ_INSERT_BEFORE(cur_device, device, links);
4812
else
4813
TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4814
target->generation++;
4815
return (device);
4816
}
4817
4818
void
4819
xpt_acquire_device(struct cam_ed *device)
4820
{
4821
struct cam_eb *bus = device->target->bus;
4822
4823
mtx_lock(&bus->eb_mtx);
4824
device->refcount++;
4825
mtx_unlock(&bus->eb_mtx);
4826
}
4827
4828
void
4829
xpt_release_device(struct cam_ed *device)
4830
{
4831
struct cam_eb *bus = device->target->bus;
4832
struct cam_devq *devq;
4833
4834
mtx_lock(&bus->eb_mtx);
4835
if (--device->refcount > 0) {
4836
mtx_unlock(&bus->eb_mtx);
4837
return;
4838
}
4839
4840
TAILQ_REMOVE(&device->target->ed_entries, device,links);
4841
device->target->generation++;
4842
mtx_unlock(&bus->eb_mtx);
4843
4844
/* Release our slot in the devq */
4845
devq = bus->sim->devq;
4846
mtx_lock(&devq->send_mtx);
4847
cam_devq_resize(devq, devq->send_queue.array_size - 1);
4848
4849
KASSERT(SLIST_EMPTY(&device->periphs),
4850
("destroying device, but periphs list is not empty"));
4851
KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX,
4852
("destroying device while still queued for ccbs"));
4853
4854
/* The send_mtx must be held when accessing the callout */
4855
if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0)
4856
callout_stop(&device->callout);
4857
4858
mtx_unlock(&devq->send_mtx);
4859
4860
xpt_release_target(device->target);
4861
4862
cam_ccbq_fini(&device->ccbq);
4863
/*
4864
* Free allocated memory. free(9) does nothing if the
4865
* supplied pointer is NULL, so it is safe to call without
4866
* checking.
4867
*/
4868
free(device->supported_vpds, M_CAMXPT);
4869
free(device->device_id, M_CAMXPT);
4870
free(device->ext_inq, M_CAMXPT);
4871
free(device->physpath, M_CAMXPT);
4872
free(device->rcap_buf, M_CAMXPT);
4873
free(device->serial_num, M_CAMXPT);
4874
free(device->nvme_data, M_CAMXPT);
4875
free(device->nvme_cdata, M_CAMXPT);
4876
taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task);
4877
}
4878
4879
uint32_t
4880
xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4881
{
4882
int result;
4883
struct cam_ed *dev;
4884
4885
dev = path->device;
4886
mtx_lock(&dev->sim->devq->send_mtx);
4887
result = cam_ccbq_resize(&dev->ccbq, newopenings);
4888
mtx_unlock(&dev->sim->devq->send_mtx);
4889
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
4890
|| (dev->inq_flags & SID_CmdQue) != 0)
4891
dev->tag_saved_openings = newopenings;
4892
return (result);
4893
}
4894
4895
static struct cam_eb *
4896
xpt_find_bus(path_id_t path_id)
4897
{
4898
struct cam_eb *bus;
4899
4900
xpt_lock_buses();
4901
for (bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4902
bus != NULL;
4903
bus = TAILQ_NEXT(bus, links)) {
4904
if (bus->path_id == path_id) {
4905
bus->refcount++;
4906
break;
4907
}
4908
}
4909
xpt_unlock_buses();
4910
return (bus);
4911
}
4912
4913
static struct cam_et *
4914
xpt_find_target(struct cam_eb *bus, target_id_t target_id)
4915
{
4916
struct cam_et *target;
4917
4918
mtx_assert(&bus->eb_mtx, MA_OWNED);
4919
for (target = TAILQ_FIRST(&bus->et_entries);
4920
target != NULL;
4921
target = TAILQ_NEXT(target, links)) {
4922
if (target->target_id == target_id) {
4923
target->refcount++;
4924
break;
4925
}
4926
}
4927
return (target);
4928
}
4929
4930
static struct cam_ed *
4931
xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4932
{
4933
struct cam_ed *device;
4934
4935
mtx_assert(&target->bus->eb_mtx, MA_OWNED);
4936
for (device = TAILQ_FIRST(&target->ed_entries);
4937
device != NULL;
4938
device = TAILQ_NEXT(device, links)) {
4939
if (device->lun_id == lun_id) {
4940
device->refcount++;
4941
break;
4942
}
4943
}
4944
return (device);
4945
}
4946
4947
void
4948
xpt_start_tags(struct cam_path *path)
4949
{
4950
struct ccb_relsim crs;
4951
struct cam_ed *device;
4952
struct cam_sim *sim;
4953
int newopenings;
4954
4955
device = path->device;
4956
sim = path->bus->sim;
4957
device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4958
xpt_freeze_devq(path, /*count*/1);
4959
device->inq_flags |= SID_CmdQue;
4960
if (device->tag_saved_openings != 0)
4961
newopenings = device->tag_saved_openings;
4962
else
4963
newopenings = min(device->maxtags,
4964
sim->max_tagged_dev_openings);
4965
xpt_dev_ccbq_resize(path, newopenings);
4966
xpt_async(AC_GETDEV_CHANGED, path, NULL);
4967
memset(&crs, 0, sizeof(crs));
4968
xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4969
crs.ccb_h.func_code = XPT_REL_SIMQ;
4970
crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4971
crs.openings
4972
= crs.release_timeout
4973
= crs.qfrozen_cnt
4974
= 0;
4975
xpt_action((union ccb *)&crs);
4976
}
4977
4978
void
4979
xpt_stop_tags(struct cam_path *path)
4980
{
4981
struct ccb_relsim crs;
4982
struct cam_ed *device;
4983
struct cam_sim *sim;
4984
4985
device = path->device;
4986
sim = path->bus->sim;
4987
device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
4988
device->tag_delay_count = 0;
4989
xpt_freeze_devq(path, /*count*/1);
4990
device->inq_flags &= ~SID_CmdQue;
4991
xpt_dev_ccbq_resize(path, sim->max_dev_openings);
4992
xpt_async(AC_GETDEV_CHANGED, path, NULL);
4993
memset(&crs, 0, sizeof(crs));
4994
xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
4995
crs.ccb_h.func_code = XPT_REL_SIMQ;
4996
crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
4997
crs.openings
4998
= crs.release_timeout
4999
= crs.qfrozen_cnt
5000
= 0;
5001
xpt_action((union ccb *)&crs);
5002
}
5003
5004
/*
5005
* Assume all possible buses are detected by this time, so allow boot
5006
* as soon as they all are scanned.
5007
*/
5008
static void
5009
xpt_boot_delay(void *arg)
5010
{
5011
5012
xpt_release_boot();
5013
}
5014
5015
/*
5016
* Now that all config hooks have completed, start boot_delay timer,
5017
* waiting for possibly still undetected buses (USB) to appear.
5018
*/
5019
static void
5020
xpt_ch_done(void *arg)
5021
{
5022
5023
callout_init(&xsoftc.boot_callout, 1);
5024
callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay,
5025
SBT_1MS, xpt_boot_delay, NULL, 0);
5026
}
5027
SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL);
5028
5029
/*
5030
* Now that interrupts are enabled, go find our devices
5031
*/
5032
static void
5033
xpt_config(void *arg)
5034
{
5035
if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq"))
5036
printf("xpt_config: failed to create taskqueue thread.\n");
5037
5038
/* Setup debugging path */
5039
if (cam_dflags != CAM_DEBUG_NONE) {
5040
if (xpt_create_path(&cam_dpath, NULL,
5041
CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
5042
CAM_DEBUG_LUN) != CAM_REQ_CMP) {
5043
printf(
5044
"xpt_config: xpt_create_path() failed for debug target %d:%d:%d, debugging disabled\n",
5045
CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
5046
cam_dflags = CAM_DEBUG_NONE;
5047
}
5048
} else
5049
cam_dpath = NULL;
5050
5051
periphdriver_init(1);
5052
xpt_hold_boot();
5053
5054
/* Fire up rescan thread. */
5055
if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0,
5056
"cam", "scanner")) {
5057
printf("xpt_config: failed to create rescan thread.\n");
5058
}
5059
}
5060
5061
void
5062
xpt_hold_boot_locked(void)
5063
{
5064
5065
if (xsoftc.buses_to_config++ == 0)
5066
root_mount_hold_token("CAM", &xsoftc.xpt_rootmount);
5067
}
5068
5069
void
5070
xpt_hold_boot(void)
5071
{
5072
5073
xpt_lock_buses();
5074
xpt_hold_boot_locked();
5075
xpt_unlock_buses();
5076
}
5077
5078
void
5079
xpt_release_boot(void)
5080
{
5081
5082
xpt_lock_buses();
5083
if (--xsoftc.buses_to_config == 0) {
5084
if (xsoftc.buses_config_done == 0) {
5085
xsoftc.buses_config_done = 1;
5086
xsoftc.buses_to_config++;
5087
TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task,
5088
NULL);
5089
taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task);
5090
} else
5091
root_mount_rel(&xsoftc.xpt_rootmount);
5092
}
5093
xpt_unlock_buses();
5094
}
5095
5096
/*
5097
* If the given device only has one peripheral attached to it, and if that
5098
* peripheral is the passthrough driver, announce it. This insures that the
5099
* user sees some sort of announcement for every peripheral in their system.
5100
*/
5101
static int
5102
xptpassannouncefunc(struct cam_ed *device, void *arg)
5103
{
5104
struct cam_periph *periph;
5105
int i;
5106
5107
for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
5108
periph = SLIST_NEXT(periph, periph_links), i++);
5109
5110
periph = SLIST_FIRST(&device->periphs);
5111
if ((i == 1)
5112
&& (strncmp(periph->periph_name, "pass", 4) == 0))
5113
xpt_announce_periph(periph, NULL);
5114
5115
return(1);
5116
}
5117
5118
static void
5119
xpt_finishconfig_task(void *context, int pending)
5120
{
5121
5122
periphdriver_init(2);
5123
/*
5124
* Check for devices with no "standard" peripheral driver
5125
* attached. For any devices like that, announce the
5126
* passthrough driver so the user will see something.
5127
*/
5128
if (!bootverbose)
5129
xpt_for_all_devices(xptpassannouncefunc, NULL);
5130
5131
xpt_release_boot();
5132
}
5133
5134
cam_status
5135
xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
5136
struct cam_path *path)
5137
{
5138
struct ccb_setasync csa;
5139
cam_status status;
5140
bool xptpath = false;
5141
5142
if (path == NULL) {
5143
status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
5144
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5145
if (status != CAM_REQ_CMP)
5146
return (status);
5147
xpt_path_lock(path);
5148
xptpath = true;
5149
}
5150
5151
memset(&csa, 0, sizeof(csa));
5152
xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL);
5153
csa.ccb_h.func_code = XPT_SASYNC_CB;
5154
csa.event_enable = event;
5155
csa.callback = cbfunc;
5156
csa.callback_arg = cbarg;
5157
xpt_action((union ccb *)&csa);
5158
status = csa.ccb_h.status;
5159
5160
CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE,
5161
("xpt_register_async: func %p\n", cbfunc));
5162
5163
if (xptpath) {
5164
xpt_path_unlock(path);
5165
xpt_free_path(path);
5166
}
5167
5168
if ((status == CAM_REQ_CMP) &&
5169
(csa.event_enable & AC_FOUND_DEVICE)) {
5170
/*
5171
* Get this peripheral up to date with all
5172
* the currently existing devices.
5173
*/
5174
xpt_for_all_devices(xptsetasyncfunc, &csa);
5175
}
5176
if ((status == CAM_REQ_CMP) &&
5177
(csa.event_enable & AC_PATH_REGISTERED)) {
5178
/*
5179
* Get this peripheral up to date with all
5180
* the currently existing buses.
5181
*/
5182
xpt_for_all_busses(xptsetasyncbusfunc, &csa);
5183
}
5184
5185
return (status);
5186
}
5187
5188
static void
5189
xptaction(struct cam_sim *sim, union ccb *work_ccb)
5190
{
5191
CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
5192
5193
switch (work_ccb->ccb_h.func_code) {
5194
/* Common cases first */
5195
case XPT_PATH_INQ: /* Path routing inquiry */
5196
{
5197
struct ccb_pathinq *cpi;
5198
5199
cpi = &work_ccb->cpi;
5200
cpi->version_num = 1; /* XXX??? */
5201
cpi->hba_inquiry = 0;
5202
cpi->target_sprt = 0;
5203
cpi->hba_misc = 0;
5204
cpi->hba_eng_cnt = 0;
5205
cpi->max_target = 0;
5206
cpi->max_lun = 0;
5207
cpi->initiator_id = 0;
5208
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
5209
strlcpy(cpi->hba_vid, "", HBA_IDLEN);
5210
strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
5211
cpi->unit_number = sim->unit_number;
5212
cpi->bus_id = sim->bus_id;
5213
cpi->base_transfer_speed = 0;
5214
cpi->protocol = PROTO_UNSPECIFIED;
5215
cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
5216
cpi->transport = XPORT_UNSPECIFIED;
5217
cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
5218
cpi->ccb_h.status = CAM_REQ_CMP;
5219
break;
5220
}
5221
default:
5222
work_ccb->ccb_h.status = CAM_REQ_INVALID;
5223
break;
5224
}
5225
xpt_done(work_ccb);
5226
}
5227
5228
/*
5229
* The xpt as a "controller" has no interrupt sources, so polling
5230
* is a no-op.
5231
*/
5232
static void
5233
xptpoll(struct cam_sim *sim)
5234
{
5235
}
5236
5237
void
5238
xpt_lock_buses(void)
5239
{
5240
mtx_lock(&xsoftc.xpt_topo_lock);
5241
}
5242
5243
void
5244
xpt_unlock_buses(void)
5245
{
5246
mtx_unlock(&xsoftc.xpt_topo_lock);
5247
}
5248
5249
struct mtx *
5250
xpt_path_mtx(struct cam_path *path)
5251
{
5252
5253
return (&path->device->device_mtx);
5254
}
5255
5256
static void
5257
xpt_done_process(struct ccb_hdr *ccb_h)
5258
{
5259
struct cam_sim *sim = NULL;
5260
struct cam_devq *devq = NULL;
5261
struct mtx *mtx = NULL;
5262
5263
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5264
struct ccb_scsiio *csio;
5265
5266
if (ccb_h->func_code == XPT_SCSI_IO) {
5267
csio = &((union ccb *)ccb_h)->csio;
5268
if (csio->bio != NULL)
5269
biotrack(csio->bio, __func__);
5270
}
5271
#endif
5272
5273
if (ccb_h->flags & CAM_HIGH_POWER) {
5274
struct highpowerlist *hphead;
5275
struct cam_ed *device;
5276
5277
mtx_lock(&xsoftc.xpt_highpower_lock);
5278
hphead = &xsoftc.highpowerq;
5279
5280
device = STAILQ_FIRST(hphead);
5281
5282
/*
5283
* Increment the count since this command is done.
5284
*/
5285
xsoftc.num_highpower++;
5286
5287
/*
5288
* Any high powered commands queued up?
5289
*/
5290
if (device != NULL) {
5291
STAILQ_REMOVE_HEAD(hphead, highpowerq_entry);
5292
mtx_unlock(&xsoftc.xpt_highpower_lock);
5293
5294
mtx_lock(&device->sim->devq->send_mtx);
5295
xpt_release_devq_device(device,
5296
/*count*/1, /*runqueue*/TRUE);
5297
mtx_unlock(&device->sim->devq->send_mtx);
5298
} else
5299
mtx_unlock(&xsoftc.xpt_highpower_lock);
5300
}
5301
5302
/*
5303
* Insulate against a race where the periph is destroyed but CCBs are
5304
* still not all processed. This shouldn't happen, but allows us better
5305
* bug diagnostic when it does.
5306
*/
5307
if (ccb_h->path->bus)
5308
sim = ccb_h->path->bus->sim;
5309
5310
if (ccb_h->status & CAM_RELEASE_SIMQ) {
5311
KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request"));
5312
xpt_release_simq(sim, /*run_queue*/FALSE);
5313
ccb_h->status &= ~CAM_RELEASE_SIMQ;
5314
}
5315
5316
if ((ccb_h->flags & CAM_DEV_QFRZDIS)
5317
&& (ccb_h->status & CAM_DEV_QFRZN)) {
5318
xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE);
5319
ccb_h->status &= ~CAM_DEV_QFRZN;
5320
}
5321
5322
if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
5323
struct cam_ed *dev = ccb_h->path->device;
5324
5325
if (sim)
5326
devq = sim->devq;
5327
KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.",
5328
ccb_h, xpt_action_name(ccb_h->func_code)));
5329
5330
mtx_lock(&devq->send_mtx);
5331
devq->send_active--;
5332
devq->send_openings++;
5333
cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
5334
5335
if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
5336
&& (dev->ccbq.dev_active == 0))) {
5337
dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY;
5338
xpt_release_devq_device(dev, /*count*/1,
5339
/*run_queue*/FALSE);
5340
}
5341
5342
if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
5343
&& (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) {
5344
dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
5345
xpt_release_devq_device(dev, /*count*/1,
5346
/*run_queue*/FALSE);
5347
}
5348
5349
if (!device_is_queued(dev))
5350
(void)xpt_schedule_devq(devq, dev);
5351
xpt_run_devq(devq);
5352
mtx_unlock(&devq->send_mtx);
5353
5354
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) {
5355
mtx = xpt_path_mtx(ccb_h->path);
5356
mtx_lock(mtx);
5357
5358
if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5359
&& (--dev->tag_delay_count == 0))
5360
xpt_start_tags(ccb_h->path);
5361
}
5362
}
5363
5364
if ((ccb_h->flags & CAM_UNLOCKED) == 0) {
5365
if (mtx == NULL) {
5366
mtx = xpt_path_mtx(ccb_h->path);
5367
mtx_lock(mtx);
5368
}
5369
} else {
5370
if (mtx != NULL) {
5371
mtx_unlock(mtx);
5372
mtx = NULL;
5373
}
5374
}
5375
5376
/* Call the peripheral driver's callback */
5377
ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
5378
(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
5379
if (mtx != NULL)
5380
mtx_unlock(mtx);
5381
}
5382
5383
/*
5384
* Parameterize instead and use xpt_done_td?
5385
*/
5386
static void
5387
xpt_async_td(void *arg)
5388
{
5389
struct cam_doneq *queue = arg;
5390
struct ccb_hdr *ccb_h;
5391
STAILQ_HEAD(, ccb_hdr) doneq;
5392
5393
STAILQ_INIT(&doneq);
5394
mtx_lock(&queue->cam_doneq_mtx);
5395
while (1) {
5396
while (STAILQ_EMPTY(&queue->cam_doneq))
5397
msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5398
PRIBIO, "-", 0);
5399
STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5400
mtx_unlock(&queue->cam_doneq_mtx);
5401
5402
while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5403
STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5404
xpt_done_process(ccb_h);
5405
}
5406
5407
mtx_lock(&queue->cam_doneq_mtx);
5408
}
5409
}
5410
5411
void
5412
xpt_done_td(void *arg)
5413
{
5414
struct cam_doneq *queue = arg;
5415
struct ccb_hdr *ccb_h;
5416
STAILQ_HEAD(, ccb_hdr) doneq;
5417
5418
STAILQ_INIT(&doneq);
5419
mtx_lock(&queue->cam_doneq_mtx);
5420
while (1) {
5421
while (STAILQ_EMPTY(&queue->cam_doneq)) {
5422
queue->cam_doneq_sleep = 1;
5423
msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
5424
PRIBIO, "-", 0);
5425
queue->cam_doneq_sleep = 0;
5426
}
5427
STAILQ_CONCAT(&doneq, &queue->cam_doneq);
5428
mtx_unlock(&queue->cam_doneq_mtx);
5429
5430
THREAD_NO_SLEEPING();
5431
while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) {
5432
STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe);
5433
xpt_done_process(ccb_h);
5434
}
5435
THREAD_SLEEPING_OK();
5436
5437
mtx_lock(&queue->cam_doneq_mtx);
5438
}
5439
}
5440
5441
static void
5442
camisr_runqueue(void)
5443
{
5444
struct ccb_hdr *ccb_h;
5445
struct cam_doneq *queue;
5446
int i;
5447
5448
/* Process global queues. */
5449
for (i = 0; i < cam_num_doneqs; i++) {
5450
queue = &cam_doneqs[i];
5451
mtx_lock(&queue->cam_doneq_mtx);
5452
while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
5453
STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
5454
mtx_unlock(&queue->cam_doneq_mtx);
5455
xpt_done_process(ccb_h);
5456
mtx_lock(&queue->cam_doneq_mtx);
5457
}
5458
mtx_unlock(&queue->cam_doneq_mtx);
5459
}
5460
}
5461
5462
/**
5463
* @brief Return the device_t associated with the path
5464
*
5465
* When a SIM is created, it registers a bus with a NEWBUS device_t. This is
5466
* stored in the internal cam_eb bus structure. There is no guarnatee any given
5467
* path will have a @c device_t associated with it (it's legal to call @c
5468
* xpt_bus_register with a @c NULL @c device_t.
5469
*
5470
* @param path Path to return the device_t for.
5471
*/
5472
device_t
5473
xpt_path_sim_device(const struct cam_path *path)
5474
{
5475
return (path->bus->parent_dev);
5476
}
5477
5478
struct kv
5479
{
5480
uint32_t v;
5481
const char *name;
5482
};
5483
5484
static struct kv map[] = {
5485
{ XPT_NOOP, "XPT_NOOP" },
5486
{ XPT_SCSI_IO, "XPT_SCSI_IO" },
5487
{ XPT_GDEV_TYPE, "XPT_GDEV_TYPE" },
5488
{ XPT_GDEVLIST, "XPT_GDEVLIST" },
5489
{ XPT_PATH_INQ, "XPT_PATH_INQ" },
5490
{ XPT_REL_SIMQ, "XPT_REL_SIMQ" },
5491
{ XPT_SASYNC_CB, "XPT_SASYNC_CB" },
5492
{ XPT_SDEV_TYPE, "XPT_SDEV_TYPE" },
5493
{ XPT_SCAN_BUS, "XPT_SCAN_BUS" },
5494
{ XPT_DEV_MATCH, "XPT_DEV_MATCH" },
5495
{ XPT_DEBUG, "XPT_DEBUG" },
5496
{ XPT_PATH_STATS, "XPT_PATH_STATS" },
5497
{ XPT_GDEV_STATS, "XPT_GDEV_STATS" },
5498
{ XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" },
5499
{ XPT_ASYNC, "XPT_ASYNC" },
5500
{ XPT_ABORT, "XPT_ABORT" },
5501
{ XPT_RESET_BUS, "XPT_RESET_BUS" },
5502
{ XPT_RESET_DEV, "XPT_RESET_DEV" },
5503
{ XPT_TERM_IO, "XPT_TERM_IO" },
5504
{ XPT_SCAN_LUN, "XPT_SCAN_LUN" },
5505
{ XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" },
5506
{ XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" },
5507
{ XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" },
5508
{ XPT_ATA_IO, "XPT_ATA_IO" },
5509
{ XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" },
5510
{ XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" },
5511
{ XPT_NVME_IO, "XPT_NVME_IO" },
5512
{ XPT_MMC_IO, "XPT_MMC_IO" },
5513
{ XPT_SMP_IO, "XPT_SMP_IO" },
5514
{ XPT_SCAN_TGT, "XPT_SCAN_TGT" },
5515
{ XPT_NVME_ADMIN, "XPT_NVME_ADMIN" },
5516
{ XPT_ENG_INQ, "XPT_ENG_INQ" },
5517
{ XPT_ENG_EXEC, "XPT_ENG_EXEC" },
5518
{ XPT_EN_LUN, "XPT_EN_LUN" },
5519
{ XPT_TARGET_IO, "XPT_TARGET_IO" },
5520
{ XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" },
5521
{ XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" },
5522
{ XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" },
5523
{ XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" },
5524
{ XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" },
5525
{ XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" },
5526
{ 0, 0 }
5527
};
5528
5529
const char *
5530
xpt_action_name(uint32_t action)
5531
{
5532
static char buffer[32]; /* Only for unknown messages -- racy */
5533
struct kv *walker = map;
5534
5535
while (walker->name != NULL) {
5536
if (walker->v == action)
5537
return (walker->name);
5538
walker++;
5539
}
5540
5541
snprintf(buffer, sizeof(buffer), "%#x", action);
5542
return (buffer);
5543
}
5544
5545
void
5546
xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...)
5547
{
5548
struct sbuf sbuf;
5549
char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5550
struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5551
va_list ap;
5552
5553
sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5554
xpt_path_sbuf(path, sb);
5555
va_start(ap, fmt);
5556
sbuf_vprintf(sb, fmt, ap);
5557
va_end(ap);
5558
sbuf_finish(sb);
5559
sbuf_delete(sb);
5560
if (cam_debug_delay != 0)
5561
DELAY(cam_debug_delay);
5562
}
5563
5564
void
5565
xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...)
5566
{
5567
struct sbuf sbuf;
5568
char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5569
struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5570
va_list ap;
5571
5572
sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5573
xpt_device_sbuf(dev, sb);
5574
va_start(ap, fmt);
5575
sbuf_vprintf(sb, fmt, ap);
5576
va_end(ap);
5577
sbuf_finish(sb);
5578
sbuf_delete(sb);
5579
if (cam_debug_delay != 0)
5580
DELAY(cam_debug_delay);
5581
}
5582
5583
void
5584
xpt_cam_debug(const char *fmt, ...)
5585
{
5586
struct sbuf sbuf;
5587
char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */
5588
struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN);
5589
va_list ap;
5590
5591
sbuf_set_drain(sb, sbuf_printf_drain, NULL);
5592
sbuf_cat(sb, "cam_debug: ");
5593
va_start(ap, fmt);
5594
sbuf_vprintf(sb, fmt, ap);
5595
va_end(ap);
5596
sbuf_finish(sb);
5597
sbuf_delete(sb);
5598
if (cam_debug_delay != 0)
5599
DELAY(cam_debug_delay);
5600
}
5601
5602