Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cam/ctl/ctl_frontend_ioctl.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2003-2009 Silicon Graphics International Corp.
5
* Copyright (c) 2012 The FreeBSD Foundation
6
* Copyright (c) 2015 Alexander Motin <[email protected]>
7
* Copyright (c) 2017 Jakub Wojciech Klama <[email protected]>
8
* All rights reserved.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer,
15
* without modification, immediately at the beginning of the file.
16
* 2. Redistributions in binary form must reproduce the above copyright
17
* notice, this list of conditions and the following disclaimer in the
18
* documentation and/or other materials provided with the distribution.
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
*/
31
32
#include <sys/param.h>
33
#include <sys/systm.h>
34
#include <sys/kernel.h>
35
#include <sys/types.h>
36
#include <sys/lock.h>
37
#include <sys/module.h>
38
#include <sys/mutex.h>
39
#include <sys/condvar.h>
40
#include <sys/malloc.h>
41
#include <sys/conf.h>
42
#include <sys/queue.h>
43
#include <sys/sysctl.h>
44
#include <sys/nv.h>
45
#include <sys/dnv.h>
46
47
#include <cam/cam.h>
48
#include <cam/scsi/scsi_all.h>
49
#include <cam/scsi/scsi_da.h>
50
#include <cam/ctl/ctl_io.h>
51
#include <cam/ctl/ctl.h>
52
#include <cam/ctl/ctl_frontend.h>
53
#include <cam/ctl/ctl_util.h>
54
#include <cam/ctl/ctl_backend.h>
55
#include <cam/ctl/ctl_ioctl.h>
56
#include <cam/ctl/ctl_ha.h>
57
#include <cam/ctl/ctl_private.h>
58
#include <cam/ctl/ctl_debug.h>
59
#include <cam/ctl/ctl_error.h>
60
61
typedef enum {
62
CTL_IOCTL_INPROG,
63
CTL_IOCTL_DATAMOVE,
64
CTL_IOCTL_DONE
65
} ctl_fe_ioctl_state;
66
67
struct ctl_fe_ioctl_params {
68
struct cv sem;
69
struct mtx ioctl_mtx;
70
ctl_fe_ioctl_state state;
71
};
72
73
struct cfi_port {
74
TAILQ_ENTRY(cfi_port) link;
75
u_int cur_tag_num;
76
struct cdev * dev;
77
struct ctl_port port;
78
};
79
80
struct cfi_softc {
81
TAILQ_HEAD(, cfi_port) ports;
82
};
83
84
static struct cfi_softc cfi_softc;
85
86
static int cfi_init(void);
87
static int cfi_shutdown(void);
88
static void cfi_datamove(union ctl_io *io);
89
static void cfi_done(union ctl_io *io);
90
static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
91
struct thread *td);
92
static void cfi_ioctl_port_create(struct ctl_req *req);
93
static void cfi_ioctl_port_remove(struct ctl_req *req);
94
95
static struct cdevsw cfi_cdevsw = {
96
.d_version = D_VERSION,
97
.d_flags = 0,
98
.d_ioctl = ctl_ioctl_io
99
};
100
101
static struct ctl_frontend cfi_frontend =
102
{
103
.name = "ioctl",
104
.init = cfi_init,
105
.ioctl = cfi_ioctl,
106
.shutdown = cfi_shutdown,
107
};
108
CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
109
110
static int
111
cfi_init(void)
112
{
113
struct cfi_softc *isoftc = &cfi_softc;
114
struct cfi_port *cfi;
115
struct ctl_port *port;
116
int error = 0;
117
118
memset(isoftc, 0, sizeof(*isoftc));
119
TAILQ_INIT(&isoftc->ports);
120
121
cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
122
port = &cfi->port;
123
port->frontend = &cfi_frontend;
124
port->port_type = CTL_PORT_IOCTL;
125
port->num_requested_ctl_io = 100;
126
port->port_name = "ioctl";
127
port->fe_datamove = cfi_datamove;
128
port->fe_done = cfi_done;
129
port->physical_port = 0;
130
port->targ_port = -1;
131
132
if ((error = ctl_port_register(port)) != 0) {
133
printf("%s: ioctl port registration failed\n", __func__);
134
return (error);
135
}
136
137
ctl_port_online(port);
138
TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
139
return (0);
140
}
141
142
static int
143
cfi_shutdown(void)
144
{
145
struct cfi_softc *isoftc = &cfi_softc;
146
struct cfi_port *cfi, *temp;
147
struct ctl_port *port;
148
int error;
149
150
TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) {
151
port = &cfi->port;
152
ctl_port_offline(port);
153
error = ctl_port_deregister(port);
154
if (error != 0) {
155
printf("%s: ctl_frontend_deregister() failed\n",
156
__func__);
157
return (error);
158
}
159
160
TAILQ_REMOVE(&isoftc->ports, cfi, link);
161
free(cfi, M_CTL);
162
}
163
164
return (0);
165
}
166
167
static void
168
cfi_ioctl_port_create(struct ctl_req *req)
169
{
170
struct cfi_softc *isoftc = &cfi_softc;
171
struct cfi_port *cfi;
172
struct ctl_port *port;
173
struct make_dev_args args;
174
const char *val;
175
int retval;
176
int pp = -1, vp = 0;
177
178
val = dnvlist_get_string(req->args_nvl, "pp", NULL);
179
if (val != NULL)
180
pp = strtol(val, NULL, 10);
181
182
val = dnvlist_get_string(req->args_nvl, "vp", NULL);
183
if (val != NULL)
184
vp = strtol(val, NULL, 10);
185
186
if (pp != -1) {
187
/* Check for duplicates */
188
TAILQ_FOREACH(cfi, &isoftc->ports, link) {
189
if (pp == cfi->port.physical_port &&
190
vp == cfi->port.virtual_port) {
191
req->status = CTL_LUN_ERROR;
192
snprintf(req->error_str, sizeof(req->error_str),
193
"port %d already exists", pp);
194
195
return;
196
}
197
}
198
} else {
199
/* Find free port number */
200
TAILQ_FOREACH(cfi, &isoftc->ports, link) {
201
pp = MAX(pp, cfi->port.physical_port);
202
}
203
204
pp++;
205
}
206
207
cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
208
port = &cfi->port;
209
port->frontend = &cfi_frontend;
210
port->port_type = CTL_PORT_IOCTL;
211
port->num_requested_ctl_io = 100;
212
port->port_name = "ioctl";
213
port->fe_datamove = cfi_datamove;
214
port->fe_done = cfi_done;
215
port->physical_port = pp;
216
port->virtual_port = vp;
217
port->targ_port = -1;
218
219
retval = ctl_port_register(port);
220
if (retval != 0) {
221
req->status = CTL_LUN_ERROR;
222
snprintf(req->error_str, sizeof(req->error_str),
223
"ctl_port_register() failed with error %d", retval);
224
free(cfi, M_CTL);
225
return;
226
}
227
228
req->result_nvl = nvlist_create(0);
229
nvlist_add_number(req->result_nvl, "port_id", port->targ_port);
230
ctl_port_online(port);
231
232
make_dev_args_init(&args);
233
args.mda_devsw = &cfi_cdevsw;
234
args.mda_uid = UID_ROOT;
235
args.mda_gid = GID_OPERATOR;
236
args.mda_mode = 0600;
237
args.mda_si_drv1 = NULL;
238
args.mda_si_drv2 = cfi;
239
240
retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp);
241
if (retval != 0) {
242
req->status = CTL_LUN_ERROR;
243
snprintf(req->error_str, sizeof(req->error_str),
244
"make_dev_s() failed with error %d", retval);
245
ctl_port_offline(port);
246
ctl_port_deregister(port);
247
free(cfi, M_CTL);
248
return;
249
}
250
251
req->status = CTL_LUN_OK;
252
TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
253
}
254
255
static void
256
cfi_ioctl_port_remove(struct ctl_req *req)
257
{
258
struct cfi_softc *isoftc = &cfi_softc;
259
struct cfi_port *cfi = NULL;
260
const char *val;
261
int port_id = -1;
262
263
val = dnvlist_get_string(req->args_nvl, "port_id", NULL);
264
if (val != NULL)
265
port_id = strtol(val, NULL, 10);
266
267
if (port_id == -1) {
268
req->status = CTL_LUN_ERROR;
269
snprintf(req->error_str, sizeof(req->error_str),
270
"Missing required argument: port_id");
271
return;
272
}
273
274
TAILQ_FOREACH(cfi, &isoftc->ports, link) {
275
if (cfi->port.targ_port == port_id)
276
break;
277
}
278
279
if (cfi == NULL) {
280
req->status = CTL_LUN_ERROR;
281
snprintf(req->error_str, sizeof(req->error_str),
282
"cannot find port %d", port_id);
283
284
return;
285
}
286
287
if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) {
288
req->status = CTL_LUN_ERROR;
289
snprintf(req->error_str, sizeof(req->error_str),
290
"cannot destroy default ioctl port");
291
292
return;
293
}
294
295
ctl_port_offline(&cfi->port);
296
ctl_port_deregister(&cfi->port);
297
TAILQ_REMOVE(&isoftc->ports, cfi, link);
298
destroy_dev(cfi->dev);
299
free(cfi, M_CTL);
300
req->status = CTL_LUN_OK;
301
}
302
303
static int
304
cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
305
struct thread *td)
306
{
307
struct ctl_req *req;
308
309
if (cmd == CTL_PORT_REQ) {
310
req = (struct ctl_req *)addr;
311
switch (req->reqtype) {
312
case CTL_REQ_CREATE:
313
cfi_ioctl_port_create(req);
314
break;
315
case CTL_REQ_REMOVE:
316
cfi_ioctl_port_remove(req);
317
break;
318
default:
319
req->status = CTL_LUN_ERROR;
320
snprintf(req->error_str, sizeof(req->error_str),
321
"Unsupported request type %d", req->reqtype);
322
}
323
return (0);
324
}
325
326
return (ENOTTY);
327
}
328
329
/*
330
* Data movement routine for the CTL ioctl frontend port.
331
*/
332
static int
333
ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
334
{
335
struct ctl_sg_entry *ext_sglist, *kern_sglist;
336
struct ctl_sg_entry ext_entry, kern_entry;
337
int ext_sglen, ext_sg_entries, kern_sg_entries;
338
int ext_sg_start, ext_offset;
339
int len_to_copy;
340
int kern_watermark, ext_watermark;
341
int ext_sglist_malloced;
342
int i, j;
343
344
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
345
346
/*
347
* If this flag is set, fake the data transfer.
348
*/
349
if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
350
ext_sglist_malloced = 0;
351
ctsio->ext_data_filled += ctsio->kern_data_len;
352
ctsio->kern_data_resid = 0;
353
goto bailout;
354
}
355
356
/*
357
* To simplify things here, if we have a single buffer, stick it in
358
* a S/G entry and just make it a single entry S/G list.
359
*/
360
if (ctsio->ext_sg_entries > 0) {
361
int len_seen;
362
363
ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
364
ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
365
M_WAITOK);
366
ext_sglist_malloced = 1;
367
if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
368
ctsio->io_hdr.port_status = 31343;
369
goto bailout;
370
}
371
ext_sg_entries = ctsio->ext_sg_entries;
372
ext_sg_start = ext_sg_entries;
373
ext_offset = 0;
374
len_seen = 0;
375
for (i = 0; i < ext_sg_entries; i++) {
376
if ((len_seen + ext_sglist[i].len) >=
377
ctsio->ext_data_filled) {
378
ext_sg_start = i;
379
ext_offset = ctsio->ext_data_filled - len_seen;
380
break;
381
}
382
len_seen += ext_sglist[i].len;
383
}
384
} else {
385
ext_sglist = &ext_entry;
386
ext_sglist_malloced = 0;
387
ext_sglist->addr = ctsio->ext_data_ptr;
388
ext_sglist->len = ctsio->ext_data_len;
389
ext_sg_entries = 1;
390
ext_sg_start = 0;
391
ext_offset = ctsio->ext_data_filled;
392
}
393
394
if (ctsio->kern_sg_entries > 0) {
395
kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
396
kern_sg_entries = ctsio->kern_sg_entries;
397
} else {
398
kern_sglist = &kern_entry;
399
kern_sglist->addr = ctsio->kern_data_ptr;
400
kern_sglist->len = ctsio->kern_data_len;
401
kern_sg_entries = 1;
402
}
403
404
kern_watermark = 0;
405
ext_watermark = ext_offset;
406
for (i = ext_sg_start, j = 0;
407
i < ext_sg_entries && j < kern_sg_entries;) {
408
uint8_t *ext_ptr, *kern_ptr;
409
410
len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
411
kern_sglist[j].len - kern_watermark);
412
413
ext_ptr = (uint8_t *)ext_sglist[i].addr;
414
ext_ptr = ext_ptr + ext_watermark;
415
if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
416
/*
417
* XXX KDM fix this!
418
*/
419
panic("need to implement bus address support");
420
#if 0
421
kern_ptr = bus_to_virt(kern_sglist[j].addr);
422
#endif
423
} else
424
kern_ptr = (uint8_t *)kern_sglist[j].addr;
425
kern_ptr = kern_ptr + kern_watermark;
426
427
if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
428
CTL_FLAG_DATA_IN) {
429
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
430
"bytes to user\n", len_to_copy));
431
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
432
"to %p\n", kern_ptr, ext_ptr));
433
if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
434
ctsio->io_hdr.port_status = 31344;
435
goto bailout;
436
}
437
} else {
438
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
439
"bytes from user\n", len_to_copy));
440
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
441
"to %p\n", ext_ptr, kern_ptr));
442
if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
443
ctsio->io_hdr.port_status = 31345;
444
goto bailout;
445
}
446
}
447
448
ctsio->ext_data_filled += len_to_copy;
449
ctsio->kern_data_resid -= len_to_copy;
450
451
ext_watermark += len_to_copy;
452
if (ext_sglist[i].len == ext_watermark) {
453
i++;
454
ext_watermark = 0;
455
}
456
457
kern_watermark += len_to_copy;
458
if (kern_sglist[j].len == kern_watermark) {
459
j++;
460
kern_watermark = 0;
461
}
462
}
463
464
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
465
"kern_sg_entries: %d\n", ext_sg_entries,
466
kern_sg_entries));
467
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
468
"kern_data_len = %d\n", ctsio->ext_data_len,
469
ctsio->kern_data_len));
470
471
bailout:
472
if (ext_sglist_malloced != 0)
473
free(ext_sglist, M_CTL);
474
475
return (CTL_RETVAL_COMPLETE);
476
}
477
478
static void
479
cfi_datamove(union ctl_io *io)
480
{
481
struct ctl_fe_ioctl_params *params;
482
483
params = (struct ctl_fe_ioctl_params *)
484
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
485
486
mtx_lock(&params->ioctl_mtx);
487
params->state = CTL_IOCTL_DATAMOVE;
488
cv_broadcast(&params->sem);
489
mtx_unlock(&params->ioctl_mtx);
490
}
491
492
static void
493
cfi_done(union ctl_io *io)
494
{
495
struct ctl_fe_ioctl_params *params;
496
497
params = (struct ctl_fe_ioctl_params *)
498
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
499
500
mtx_lock(&params->ioctl_mtx);
501
params->state = CTL_IOCTL_DONE;
502
cv_broadcast(&params->sem);
503
mtx_unlock(&params->ioctl_mtx);
504
}
505
506
static int
507
cfi_submit_wait(union ctl_io *io)
508
{
509
struct ctl_fe_ioctl_params params;
510
ctl_fe_ioctl_state last_state;
511
int done, retval;
512
513
bzero(&params, sizeof(params));
514
mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
515
cv_init(&params.sem, "ctlioccv");
516
params.state = CTL_IOCTL_INPROG;
517
last_state = params.state;
518
519
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
520
521
CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
522
523
/* This shouldn't happen */
524
if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE)
525
return (retval);
526
527
done = 0;
528
529
do {
530
mtx_lock(&params.ioctl_mtx);
531
/*
532
* Check the state here, and don't sleep if the state has
533
* already changed (i.e. wakeup has already occurred, but we
534
* weren't waiting yet).
535
*/
536
if (params.state == last_state) {
537
/* XXX KDM cv_wait_sig instead? */
538
cv_wait(&params.sem, &params.ioctl_mtx);
539
}
540
last_state = params.state;
541
542
switch (params.state) {
543
case CTL_IOCTL_INPROG:
544
/* Why did we wake up? */
545
/* XXX KDM error here? */
546
mtx_unlock(&params.ioctl_mtx);
547
break;
548
case CTL_IOCTL_DATAMOVE:
549
CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
550
551
/*
552
* change last_state back to INPROG to avoid
553
* deadlock on subsequent data moves.
554
*/
555
params.state = last_state = CTL_IOCTL_INPROG;
556
557
mtx_unlock(&params.ioctl_mtx);
558
ctl_ioctl_do_datamove(&io->scsiio);
559
/*
560
* Note that in some cases, most notably writes,
561
* this will queue the I/O and call us back later.
562
* In other cases, generally reads, this routine
563
* will immediately call back and wake us up,
564
* probably using our own context.
565
*/
566
ctl_datamove_done(io, false);
567
break;
568
case CTL_IOCTL_DONE:
569
mtx_unlock(&params.ioctl_mtx);
570
CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
571
done = 1;
572
break;
573
default:
574
mtx_unlock(&params.ioctl_mtx);
575
/* XXX KDM error here? */
576
break;
577
}
578
} while (done == 0);
579
580
mtx_destroy(&params.ioctl_mtx);
581
cv_destroy(&params.sem);
582
583
return (CTL_RETVAL_COMPLETE);
584
}
585
586
int
587
ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
588
struct thread *td)
589
{
590
struct cfi_port *cfi;
591
union ctl_io *io;
592
void *pool_tmp, *sc_tmp;
593
int retval = 0;
594
595
if (cmd != CTL_IO)
596
return (ENOTTY);
597
598
cfi = dev->si_drv2 == NULL
599
? TAILQ_FIRST(&cfi_softc.ports)
600
: dev->si_drv2;
601
602
/*
603
* If we haven't been "enabled", don't allow any SCSI I/O
604
* to this FETD.
605
*/
606
if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0)
607
return (EPERM);
608
609
io = ctl_alloc_io(cfi->port.ctl_pool_ref);
610
611
/*
612
* Need to save the pool reference so it doesn't get
613
* spammed by the user's ctl_io.
614
*/
615
pool_tmp = io->io_hdr.pool;
616
sc_tmp = CTL_SOFTC(io);
617
memcpy(io, (void *)addr, sizeof(*io));
618
io->io_hdr.pool = pool_tmp;
619
CTL_SOFTC(io) = sc_tmp;
620
TAILQ_INIT(&io->io_hdr.blocked_queue);
621
622
/*
623
* No status yet, so make sure the status is set properly.
624
*/
625
io->io_hdr.status = CTL_STATUS_NONE;
626
627
/*
628
* The user sets the initiator ID, target and LUN IDs.
629
*/
630
io->io_hdr.nexus.targ_port = cfi->port.targ_port;
631
io->io_hdr.flags |= CTL_FLAG_USER_REQ;
632
if ((io->io_hdr.flags & CTL_FLAG_USER_TAG) == 0 &&
633
io->io_hdr.io_type == CTL_IO_SCSI &&
634
io->scsiio.tag_type != CTL_TAG_UNTAGGED)
635
io->scsiio.tag_num = atomic_fetchadd_int(&cfi->cur_tag_num, 1);
636
637
retval = cfi_submit_wait(io);
638
if (retval == 0)
639
memcpy((void *)addr, io, sizeof(*io));
640
641
ctl_free_io(io);
642
return (retval);
643
}
644
645