Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cam/scsi/scsi_da.c
105687 views
1
/*-
2
* Implementation of SCSI Direct Access Peripheral driver for CAM.
3
*
4
* SPDX-License-Identifier: BSD-2-Clause
5
*
6
* Copyright (c) 1997 Justin T. Gibbs.
7
* All rights reserved.
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions, and the following disclaimer,
14
* without modification, immediately at the beginning of the file.
15
* 2. The name of the author may not be used to endorse or promote products
16
* derived from this software without specific prior written permission.
17
*
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
#include <sys/param.h>
32
33
#ifdef _KERNEL
34
#include "opt_da.h"
35
#include <sys/systm.h>
36
#include <sys/kernel.h>
37
#include <sys/bio.h>
38
#include <sys/sysctl.h>
39
#include <sys/taskqueue.h>
40
#include <sys/lock.h>
41
#include <sys/mutex.h>
42
#include <sys/conf.h>
43
#include <sys/devicestat.h>
44
#include <sys/eventhandler.h>
45
#include <sys/malloc.h>
46
#include <sys/cons.h>
47
#include <sys/endian.h>
48
#include <sys/proc.h>
49
#include <sys/reboot.h>
50
#include <sys/sbuf.h>
51
#include <geom/geom.h>
52
#include <geom/geom_disk.h>
53
#include <machine/atomic.h>
54
#endif /* _KERNEL */
55
56
#ifndef _KERNEL
57
#include <stdio.h>
58
#include <string.h>
59
#endif /* _KERNEL */
60
61
#include <cam/cam.h>
62
#include <cam/cam_ccb.h>
63
#include <cam/cam_periph.h>
64
#include <cam/cam_xpt_periph.h>
65
#ifdef _KERNEL
66
#include <cam/cam_xpt_internal.h>
67
#endif /* _KERNEL */
68
#include <cam/cam_sim.h>
69
#include <cam/cam_iosched.h>
70
71
#include <cam/scsi/scsi_message.h>
72
#include <cam/scsi/scsi_da.h>
73
74
#ifdef _KERNEL
75
/*
76
* Note that there are probe ordering dependencies here. The order isn't
77
* controlled by this enumeration, but by explicit state transitions in
78
* dastart() and dadone(). Here are some of the dependencies:
79
*
80
* 1. RC should come first, before RC16, unless there is evidence that RC16
81
* is supported.
82
* 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
83
* 3. The ATA probes should go in this order:
84
* ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
85
*/
86
typedef enum {
87
DA_STATE_PROBE_WP,
88
DA_STATE_PROBE_RC,
89
DA_STATE_PROBE_RC16,
90
DA_STATE_PROBE_CACHE,
91
DA_STATE_PROBE_LBP,
92
DA_STATE_PROBE_BLK_LIMITS,
93
DA_STATE_PROBE_BDC,
94
DA_STATE_PROBE_ATA,
95
DA_STATE_PROBE_ATA_LOGDIR,
96
DA_STATE_PROBE_ATA_IDDIR,
97
DA_STATE_PROBE_ATA_SUP,
98
DA_STATE_PROBE_ATA_ZONE,
99
DA_STATE_PROBE_ZONE,
100
DA_STATE_NORMAL
101
} da_state;
102
103
typedef enum {
104
DA_FLAG_PACK_INVALID = 0x000001,
105
DA_FLAG_NEW_PACK = 0x000002,
106
DA_FLAG_PACK_LOCKED = 0x000004,
107
DA_FLAG_PACK_REMOVABLE = 0x000008,
108
DA_FLAG_ROTATING = 0x000010,
109
DA_FLAG_NEED_OTAG = 0x000020,
110
DA_FLAG_WAS_OTAG = 0x000040,
111
DA_FLAG_RETRY_UA = 0x000080,
112
DA_FLAG_OPEN = 0x000100,
113
DA_FLAG_SCTX_INIT = 0x000200,
114
DA_FLAG_CAN_RC16 = 0x000400,
115
DA_FLAG_PROBED = 0x000800,
116
DA_FLAG_DIRTY = 0x001000,
117
DA_FLAG_ANNOUNCED = 0x002000,
118
DA_FLAG_CAN_ATA_DMA = 0x004000,
119
DA_FLAG_CAN_ATA_LOG = 0x008000,
120
DA_FLAG_CAN_ATA_IDLOG = 0x010000,
121
DA_FLAG_CAN_ATA_SUPCAP = 0x020000,
122
DA_FLAG_CAN_ATA_ZONE = 0x040000,
123
DA_FLAG_TUR_PENDING = 0x080000,
124
DA_FLAG_UNMAPPEDIO = 0x100000,
125
DA_FLAG_LBP = 0x200000,
126
} da_flags;
127
#define DA_FLAG_STRING \
128
"\020" \
129
"\001PACK_INVALID" \
130
"\002NEW_PACK" \
131
"\003PACK_LOCKED" \
132
"\004PACK_REMOVABLE" \
133
"\005ROTATING" \
134
"\006NEED_OTAG" \
135
"\007WAS_OTAG" \
136
"\010RETRY_UA" \
137
"\011OPEN" \
138
"\012SCTX_INIT" \
139
"\013CAN_RC16" \
140
"\014PROBED" \
141
"\015DIRTY" \
142
"\016ANNOUNCED" \
143
"\017CAN_ATA_DMA" \
144
"\020CAN_ATA_LOG" \
145
"\021CAN_ATA_IDLOG" \
146
"\022CAN_ATA_SUPACP" \
147
"\023CAN_ATA_ZONE" \
148
"\024TUR_PENDING" \
149
"\025UNMAPPEDIO" \
150
"\026LBP" \
151
152
typedef enum {
153
DA_Q_NONE = 0x00,
154
DA_Q_NO_SYNC_CACHE = 0x01,
155
DA_Q_NO_6_BYTE = 0x02,
156
DA_Q_NO_PREVENT = 0x04,
157
DA_Q_4K = 0x08,
158
DA_Q_NO_RC16 = 0x10,
159
DA_Q_NO_UNMAP = 0x20,
160
DA_Q_RETRY_BUSY = 0x40,
161
DA_Q_SMR_DM = 0x80,
162
DA_Q_STRICT_UNMAP = 0x100,
163
DA_Q_128KB = 0x200
164
} da_quirks;
165
166
#define DA_Q_BIT_STRING \
167
"\020" \
168
"\001NO_SYNC_CACHE" \
169
"\002NO_6_BYTE" \
170
"\003NO_PREVENT" \
171
"\0044K" \
172
"\005NO_RC16" \
173
"\006NO_UNMAP" \
174
"\007RETRY_BUSY" \
175
"\010SMR_DM" \
176
"\011STRICT_UNMAP" \
177
"\012128KB"
178
179
typedef enum {
180
DA_CCB_PROBE_RC = 0x01,
181
DA_CCB_PROBE_RC16 = 0x02,
182
DA_CCB_PROBE_LBP = 0x03,
183
DA_CCB_PROBE_BLK_LIMITS = 0x04,
184
DA_CCB_PROBE_BDC = 0x05,
185
DA_CCB_PROBE_ATA = 0x06,
186
DA_CCB_BUFFER_IO = 0x07,
187
DA_CCB_DUMP = 0x0A,
188
DA_CCB_DELETE = 0x0B,
189
DA_CCB_TUR = 0x0C,
190
DA_CCB_PROBE_ZONE = 0x0D,
191
DA_CCB_PROBE_ATA_LOGDIR = 0x0E,
192
DA_CCB_PROBE_ATA_IDDIR = 0x0F,
193
DA_CCB_PROBE_ATA_SUP = 0x10,
194
DA_CCB_PROBE_ATA_ZONE = 0x11,
195
DA_CCB_PROBE_WP = 0x12,
196
DA_CCB_PROBE_CACHE = 0x13,
197
DA_CCB_TYPE_MASK = 0x1F,
198
DA_CCB_RETRY_UA = 0x20
199
} da_ccb_state;
200
201
/*
202
* Order here is important for method choice
203
*
204
* We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
205
* LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
206
* using ATA_TRIM than the corresponding UNMAP results for a real world mysql
207
* import taking 5mins.
208
*
209
*/
210
typedef enum {
211
DA_DELETE_NONE,
212
DA_DELETE_DISABLE,
213
DA_DELETE_ATA_TRIM,
214
DA_DELETE_UNMAP,
215
DA_DELETE_WS16,
216
DA_DELETE_WS10,
217
DA_DELETE_ZERO,
218
DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
219
DA_DELETE_MAX = DA_DELETE_ZERO
220
} da_delete_methods;
221
222
/*
223
* For SCSI, host managed drives show up as a separate device type. For
224
* ATA, host managed drives also have a different device signature.
225
* XXX KDM figure out the ATA host managed signature.
226
*/
227
typedef enum {
228
DA_ZONE_NONE = 0x00,
229
DA_ZONE_DRIVE_MANAGED = 0x01,
230
DA_ZONE_HOST_AWARE = 0x02,
231
DA_ZONE_HOST_MANAGED = 0x03
232
} da_zone_mode;
233
234
/*
235
* We distinguish between these interface cases in addition to the drive type:
236
* o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
237
* o ATA drive behind a SCSI translation layer that does not know about
238
* ZBC/ZAC, and so needs to be managed via ATA passthrough. In this
239
* case, we would need to share the ATA code with the ada(4) driver.
240
* o SCSI drive.
241
*/
242
typedef enum {
243
DA_ZONE_IF_SCSI,
244
DA_ZONE_IF_ATA_PASS,
245
DA_ZONE_IF_ATA_SAT,
246
} da_zone_interface;
247
248
typedef enum {
249
DA_ZONE_FLAG_RZ_SUP = 0x0001,
250
DA_ZONE_FLAG_OPEN_SUP = 0x0002,
251
DA_ZONE_FLAG_CLOSE_SUP = 0x0004,
252
DA_ZONE_FLAG_FINISH_SUP = 0x0008,
253
DA_ZONE_FLAG_RWP_SUP = 0x0010,
254
DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP |
255
DA_ZONE_FLAG_OPEN_SUP |
256
DA_ZONE_FLAG_CLOSE_SUP |
257
DA_ZONE_FLAG_FINISH_SUP |
258
DA_ZONE_FLAG_RWP_SUP),
259
DA_ZONE_FLAG_URSWRZ = 0x0020,
260
DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040,
261
DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080,
262
DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100,
263
DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET |
264
DA_ZONE_FLAG_OPT_NONSEQ_SET |
265
DA_ZONE_FLAG_MAX_SEQ_SET)
266
} da_zone_flags;
267
268
static struct da_zone_desc {
269
da_zone_flags value;
270
const char *desc;
271
} da_zone_desc_table[] = {
272
{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
273
{DA_ZONE_FLAG_OPEN_SUP, "Open" },
274
{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
275
{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
276
{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
277
};
278
279
typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
280
struct bio *bp);
281
static da_delete_func_t da_delete_trim;
282
static da_delete_func_t da_delete_unmap;
283
static da_delete_func_t da_delete_ws;
284
285
static const void * da_delete_functions[] = {
286
NULL,
287
NULL,
288
da_delete_trim,
289
da_delete_unmap,
290
da_delete_ws,
291
da_delete_ws,
292
da_delete_ws
293
};
294
295
static const char *da_delete_method_names[] =
296
{ "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
297
static const char *da_delete_method_desc[] =
298
{ "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
299
"WRITE SAME(10) with UNMAP", "ZERO" };
300
301
/* Offsets into our private area for storing information */
302
#define ccb_state ppriv_field0
303
#define ccb_bp ppriv_ptr1
304
305
struct disk_params {
306
uint8_t heads;
307
uint32_t cylinders;
308
uint8_t secs_per_track;
309
uint32_t secsize; /* Number of bytes/sector */
310
uint64_t sectors; /* total number sectors */
311
u_int stripesize;
312
u_int stripeoffset;
313
};
314
315
#define UNMAP_RANGE_MAX 0xffffffff
316
#define UNMAP_HEAD_SIZE 8
317
#define UNMAP_RANGE_SIZE 16
318
#define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */
319
#define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
320
UNMAP_HEAD_SIZE)
321
322
#define WS10_MAX_BLKS 0xffff
323
#define WS16_MAX_BLKS 0xffffffff
324
#define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \
325
(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
326
327
#define DA_WORK_TUR (1 << 16)
328
329
typedef enum {
330
DA_REF_OPEN = 1,
331
DA_REF_OPEN_HOLD,
332
DA_REF_CLOSE_HOLD,
333
DA_REF_TUR,
334
DA_REF_GEOM,
335
DA_REF_SYSCTL,
336
DA_REF_REPROBE,
337
DA_REF_MAX /* KEEP LAST */
338
} da_ref_token;
339
340
struct da_softc {
341
struct cam_iosched_softc *cam_iosched;
342
struct bio_queue_head delete_run_queue;
343
LIST_HEAD(, ccb_hdr) pending_ccbs;
344
int refcount; /* Active xpt_action() calls */
345
da_state state;
346
da_flags flags;
347
da_quirks quirks;
348
int minimum_cmd_size;
349
int mode_page;
350
int error_inject;
351
int trim_max_ranges;
352
int delete_available; /* Delete methods possibly available */
353
da_zone_mode zone_mode;
354
da_zone_interface zone_interface;
355
da_zone_flags zone_flags;
356
struct ata_gp_log_dir ata_logdir;
357
int valid_logdir_len;
358
struct ata_identify_log_pages ata_iddir;
359
int valid_iddir_len;
360
uint64_t optimal_seq_zones;
361
uint64_t optimal_nonseq_zones;
362
uint64_t max_seq_zones;
363
u_int maxio;
364
uint32_t unmap_max_ranges;
365
uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */
366
uint32_t unmap_gran;
367
uint32_t unmap_gran_align;
368
uint64_t ws_max_blks;
369
uint64_t trim_count;
370
uint64_t trim_ranges;
371
uint64_t trim_lbas;
372
da_delete_methods delete_method_pref;
373
da_delete_methods delete_method;
374
da_delete_func_t *delete_func;
375
int p_type;
376
struct disk_params params;
377
struct disk *disk;
378
struct task sysctl_task;
379
struct sysctl_ctx_list sysctl_ctx;
380
struct sysctl_oid *sysctl_tree;
381
struct callout sendordered_c;
382
uint64_t wwpn;
383
uint8_t unmap_buf[UNMAP_BUF_SIZE];
384
struct scsi_read_capacity_data_long rcaplong;
385
struct callout mediapoll_c;
386
int ref_flags[DA_REF_MAX];
387
#ifdef CAM_IO_STATS
388
struct sysctl_ctx_list sysctl_stats_ctx;
389
struct sysctl_oid *sysctl_stats_tree;
390
u_int errors;
391
u_int timeouts;
392
u_int invalidations;
393
#endif
394
#define DA_ANNOUNCETMP_SZ 160
395
char announce_temp[DA_ANNOUNCETMP_SZ];
396
#define DA_ANNOUNCE_SZ 400
397
char announcebuf[DA_ANNOUNCE_SZ];
398
};
399
400
#define dadeleteflag(softc, delete_method, enable) \
401
if (enable) { \
402
softc->delete_available |= (1 << delete_method); \
403
} else { \
404
softc->delete_available &= ~(1 << delete_method); \
405
}
406
407
static uma_zone_t da_ccb_zone;
408
409
struct da_quirk_entry {
410
struct scsi_inquiry_pattern inq_pat;
411
da_quirks quirks;
412
};
413
414
static const char quantum[] = "QUANTUM";
415
static const char microp[] = "MICROP";
416
417
static struct da_quirk_entry da_quirk_table[] =
418
{
419
/* SPI, FC devices */
420
{
421
/*
422
* Fujitsu M2513A MO drives.
423
* Tested devices: M2513A2 firmware versions 1200 & 1300.
424
* (dip switch selects whether T_DIRECT or T_OPTICAL device)
425
* Reported by: W.Scholten <[email protected]>
426
*/
427
{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
428
/*quirks*/ DA_Q_NO_SYNC_CACHE
429
},
430
{
431
/* See above. */
432
{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
433
/*quirks*/ DA_Q_NO_SYNC_CACHE
434
},
435
{
436
/*
437
* This particular Fujitsu drive doesn't like the
438
* synchronize cache command.
439
* Reported by: Tom Jackson <[email protected]>
440
*/
441
{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
442
/*quirks*/ DA_Q_NO_SYNC_CACHE
443
},
444
{
445
/*
446
* This drive doesn't like the synchronize cache command
447
* either. Reported by: Matthew Jacob <[email protected]>
448
* in NetBSD PR kern/6027, August 24, 1998.
449
*/
450
{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
451
/*quirks*/ DA_Q_NO_SYNC_CACHE
452
},
453
{
454
/*
455
* This drive doesn't like the synchronize cache command
456
* either. Reported by: Hellmuth Michaelis ([email protected])
457
* (PR 8882).
458
*/
459
{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
460
/*quirks*/ DA_Q_NO_SYNC_CACHE
461
},
462
{
463
/*
464
* Doesn't like the synchronize cache command.
465
* Reported by: Blaz Zupan <[email protected]>
466
*/
467
{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
468
/*quirks*/ DA_Q_NO_SYNC_CACHE
469
},
470
{
471
/*
472
* Doesn't like the synchronize cache command.
473
* Reported by: Blaz Zupan <[email protected]>
474
*/
475
{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
476
/*quirks*/ DA_Q_NO_SYNC_CACHE
477
},
478
{
479
/*
480
* Doesn't like the synchronize cache command.
481
*/
482
{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
483
/*quirks*/ DA_Q_NO_SYNC_CACHE
484
},
485
{
486
/*
487
* Doesn't like the synchronize cache command.
488
* Reported by: [email protected]
489
*/
490
{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
491
/*quirks*/ DA_Q_NO_SYNC_CACHE
492
},
493
{
494
/*
495
* Doesn't work correctly with 6 byte reads/writes.
496
* Returns illegal request, and points to byte 9 of the
497
* 6-byte CDB.
498
* Reported by: Adam McDougall <[email protected]>
499
*/
500
{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
501
/*quirks*/ DA_Q_NO_6_BYTE
502
},
503
{
504
/* See above. */
505
{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
506
/*quirks*/ DA_Q_NO_6_BYTE
507
},
508
{
509
/*
510
* Doesn't like the synchronize cache command.
511
* Reported by: [email protected]
512
*/
513
{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
514
/*quirks*/ DA_Q_NO_SYNC_CACHE
515
},
516
{
517
/*
518
* The CISS RAID controllers do not support SYNC_CACHE
519
*/
520
{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
521
/*quirks*/ DA_Q_NO_SYNC_CACHE
522
},
523
{
524
/*
525
* The STEC SSDs sometimes hang on UNMAP.
526
*/
527
{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
528
/*quirks*/ DA_Q_NO_UNMAP
529
},
530
{
531
/*
532
* VMware returns BUSY status when storage has transient
533
* connectivity problems, so better wait.
534
* Also VMware returns odd errors on misaligned UNMAPs.
535
*/
536
{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
537
/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
538
},
539
/* USB mass storage devices supported by umass(4) */
540
{
541
/*
542
* EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
543
* PR: kern/51675
544
*/
545
{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
546
/*quirks*/ DA_Q_NO_SYNC_CACHE
547
},
548
{
549
/*
550
* Power Quotient Int. (PQI) USB flash key
551
* PR: kern/53067
552
*/
553
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
554
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
555
},
556
{
557
/*
558
* Creative Nomad MUVO mp3 player (USB)
559
* PR: kern/53094
560
*/
561
{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
562
/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
563
},
564
{
565
/*
566
* Jungsoft NEXDISK USB flash key
567
* PR: kern/54737
568
*/
569
{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
570
/*quirks*/ DA_Q_NO_SYNC_CACHE
571
},
572
{
573
/*
574
* FreeDik USB Mini Data Drive
575
* PR: kern/54786
576
*/
577
{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
578
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
579
},
580
{
581
/*
582
* Sigmatel USB Flash MP3 Player
583
* PR: kern/57046
584
*/
585
{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
586
/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
587
},
588
{
589
/*
590
* Neuros USB Digital Audio Computer
591
* PR: kern/63645
592
*/
593
{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
594
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
595
},
596
{
597
/*
598
* SEAGRAND NP-900 MP3 Player
599
* PR: kern/64563
600
*/
601
{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
602
/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
603
},
604
{
605
/*
606
* iRiver iFP MP3 player (with UMS Firmware)
607
* PR: kern/54881, i386/63941, kern/66124
608
*/
609
{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
610
/*quirks*/ DA_Q_NO_SYNC_CACHE
611
},
612
{
613
/*
614
* Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
615
* PR: kern/70158
616
*/
617
{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
618
/*quirks*/ DA_Q_NO_SYNC_CACHE
619
},
620
{
621
/*
622
* ZICPlay USB MP3 Player with FM
623
* PR: kern/75057
624
*/
625
{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
626
/*quirks*/ DA_Q_NO_SYNC_CACHE
627
},
628
{
629
/*
630
* TEAC USB floppy mechanisms
631
*/
632
{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
633
/*quirks*/ DA_Q_NO_SYNC_CACHE
634
},
635
{
636
/*
637
* Kingston DataTraveler II+ USB Pen-Drive.
638
* Reported by: Pawel Jakub Dawidek <[email protected]>
639
*/
640
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
641
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
642
},
643
{
644
/*
645
* USB DISK Pro PMAP
646
* Reported by: jhs
647
* PR: usb/96381
648
*/
649
{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
650
/*quirks*/ DA_Q_NO_SYNC_CACHE
651
},
652
{
653
/*
654
* Motorola E398 Mobile Phone (TransFlash memory card).
655
* Reported by: Wojciech A. Koszek <[email protected]>
656
* PR: usb/89889
657
*/
658
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
659
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
660
},
661
{
662
/*
663
* Qware BeatZkey! Pro
664
* PR: usb/79164
665
*/
666
{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
667
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
668
},
669
{
670
/*
671
* Time DPA20B 1GB MP3 Player
672
* PR: usb/81846
673
*/
674
{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
675
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
676
},
677
{
678
/*
679
* Samsung USB key 128Mb
680
* PR: usb/90081
681
*/
682
{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
683
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
684
},
685
{
686
/*
687
* Kingston DataTraveler 2.0 USB Flash memory.
688
* PR: usb/89196
689
*/
690
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
691
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
692
},
693
{
694
/*
695
* Creative MUVO Slim mp3 player (USB)
696
* PR: usb/86131
697
*/
698
{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
699
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
700
},
701
{
702
/*
703
* United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
704
* PR: usb/80487
705
*/
706
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
707
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
708
},
709
{
710
/*
711
* SanDisk Micro Cruzer 128MB
712
* PR: usb/75970
713
*/
714
{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
715
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
716
},
717
{
718
/*
719
* TOSHIBA TransMemory USB sticks
720
* PR: kern/94660
721
*/
722
{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
723
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
724
},
725
{
726
/*
727
* PNY USB 3.0 Flash Drives
728
*/
729
{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
730
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
731
},
732
{
733
/*
734
* PNY USB Flash keys
735
* PR: usb/75578, usb/72344, usb/65436
736
*/
737
{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
738
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
739
},
740
{
741
/*
742
* Genesys GL3224
743
*/
744
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
745
"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
746
},
747
{
748
/*
749
* Genesys 6-in-1 Card Reader
750
* PR: usb/94647
751
*/
752
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
753
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
754
},
755
{
756
/*
757
* Rekam Digital CAMERA
758
* PR: usb/98713
759
*/
760
{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
761
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
762
},
763
{
764
/*
765
* iRiver H10 MP3 player
766
* PR: usb/102547
767
*/
768
{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
769
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
770
},
771
{
772
/*
773
* iRiver U10 MP3 player
774
* PR: usb/92306
775
*/
776
{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
777
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
778
},
779
{
780
/*
781
* X-Micro Flash Disk
782
* PR: usb/96901
783
*/
784
{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
785
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
786
},
787
{
788
/*
789
* EasyMP3 EM732X USB 2.0 Flash MP3 Player
790
* PR: usb/96546
791
*/
792
{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
793
"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
794
},
795
{
796
/*
797
* Denver MP3 player
798
* PR: usb/107101
799
*/
800
{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
801
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
802
},
803
{
804
/*
805
* Philips USB Key Audio KEY013
806
* PR: usb/68412
807
*/
808
{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
809
/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
810
},
811
{
812
/*
813
* JNC MP3 Player
814
* PR: usb/94439
815
*/
816
{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
817
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
818
},
819
{
820
/*
821
* SAMSUNG MP0402H
822
* PR: usb/108427
823
*/
824
{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
825
/*quirks*/ DA_Q_NO_SYNC_CACHE
826
},
827
{
828
/*
829
* I/O Magic USB flash - Giga Bank
830
* PR: usb/108810
831
*/
832
{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
833
/*quirks*/ DA_Q_NO_SYNC_CACHE
834
},
835
{
836
/*
837
* JoyFly 128mb USB Flash Drive
838
* PR: 96133
839
*/
840
{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
841
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
842
},
843
{
844
/*
845
* ChipsBnk usb stick
846
* PR: 103702
847
*/
848
{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
849
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
850
},
851
{
852
/*
853
* Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
854
* PR: 129858
855
*/
856
{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
857
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
858
},
859
{
860
/*
861
* Samsung YP-U3 mp3-player
862
* PR: 125398
863
*/
864
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
865
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
866
},
867
{
868
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
869
"2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
870
},
871
{
872
/*
873
* Sony Cyber-Shot DSC cameras
874
* PR: usb/137035
875
*/
876
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
877
/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
878
},
879
{
880
{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
881
"1.00"}, /*quirks*/ DA_Q_NO_PREVENT
882
},
883
{
884
/* At least several Transcent USB sticks lie on RC16. */
885
{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
886
"*"}, /*quirks*/ DA_Q_NO_RC16
887
},
888
{
889
/* ADATA USB sticks lie on RC16. */
890
{T_DIRECT, SIP_MEDIA_REMOVABLE, "ADATA", "USB Flash Drive*",
891
"*"}, /*quirks*/ DA_Q_NO_RC16
892
},
893
{
894
/*
895
* I-O Data USB Flash Disk
896
* PR: usb/211716
897
*/
898
{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
899
"*"}, /*quirks*/ DA_Q_NO_RC16
900
},
901
{
902
/*
903
* SLC CHIPFANCIER USB drives
904
* PR: usb/234503 (RC10 right, RC16 wrong)
905
* 16GB, 32GB and 128GB confirmed to have same issue
906
*/
907
{T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
908
"*"}, /*quirks*/ DA_Q_NO_RC16
909
},
910
/* ATA/SATA devices over SAS/USB/... */
911
{
912
/* Sandisk X400 */
913
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
914
/*quirks*/DA_Q_128KB
915
},
916
{
917
/* Hitachi Advanced Format (4k) drives */
918
{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
919
/*quirks*/DA_Q_4K
920
},
921
{
922
/* Micron Advanced Format (4k) drives */
923
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
924
/*quirks*/DA_Q_4K
925
},
926
{
927
/* Samsung Advanced Format (4k) drives */
928
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
929
/*quirks*/DA_Q_4K
930
},
931
{
932
/* Samsung Advanced Format (4k) drives */
933
{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
934
/*quirks*/DA_Q_4K
935
},
936
{
937
/* Samsung Advanced Format (4k) drives */
938
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
939
/*quirks*/DA_Q_4K
940
},
941
{
942
/* Samsung Advanced Format (4k) drives */
943
{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
944
/*quirks*/DA_Q_4K
945
},
946
{
947
/* Seagate Barracuda Green Advanced Format (4k) drives */
948
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
949
/*quirks*/DA_Q_4K
950
},
951
{
952
/* Seagate Barracuda Green Advanced Format (4k) drives */
953
{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
954
/*quirks*/DA_Q_4K
955
},
956
{
957
/* Seagate Barracuda Green Advanced Format (4k) drives */
958
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
959
/*quirks*/DA_Q_4K
960
},
961
{
962
/* Seagate Barracuda Green Advanced Format (4k) drives */
963
{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
964
/*quirks*/DA_Q_4K
965
},
966
{
967
/* Seagate Barracuda Green Advanced Format (4k) drives */
968
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
969
/*quirks*/DA_Q_4K
970
},
971
{
972
/* Seagate Barracuda Green Advanced Format (4k) drives */
973
{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
974
/*quirks*/DA_Q_4K
975
},
976
{
977
/* Seagate Momentus Advanced Format (4k) drives */
978
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
979
/*quirks*/DA_Q_4K
980
},
981
{
982
/* Seagate Momentus Advanced Format (4k) drives */
983
{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
984
/*quirks*/DA_Q_4K
985
},
986
{
987
/* Seagate Momentus Advanced Format (4k) drives */
988
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
989
/*quirks*/DA_Q_4K
990
},
991
{
992
/* Seagate Momentus Advanced Format (4k) drives */
993
{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
994
/*quirks*/DA_Q_4K
995
},
996
{
997
/* Seagate Momentus Advanced Format (4k) drives */
998
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
999
/*quirks*/DA_Q_4K
1000
},
1001
{
1002
/* Seagate Momentus Advanced Format (4k) drives */
1003
{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
1004
/*quirks*/DA_Q_4K
1005
},
1006
{
1007
/* Seagate Momentus Advanced Format (4k) drives */
1008
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
1009
/*quirks*/DA_Q_4K
1010
},
1011
{
1012
/* Seagate Momentus Advanced Format (4k) drives */
1013
{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
1014
/*quirks*/DA_Q_4K
1015
},
1016
{
1017
/* Seagate Momentus Advanced Format (4k) drives */
1018
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
1019
/*quirks*/DA_Q_4K
1020
},
1021
{
1022
/* Seagate Momentus Advanced Format (4k) drives */
1023
{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
1024
/*quirks*/DA_Q_4K
1025
},
1026
{
1027
/* Seagate Momentus Advanced Format (4k) drives */
1028
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
1029
/*quirks*/DA_Q_4K
1030
},
1031
{
1032
/* Seagate Momentus Advanced Format (4k) drives */
1033
{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
1034
/*quirks*/DA_Q_4K
1035
},
1036
{
1037
/* Seagate Momentus Advanced Format (4k) drives */
1038
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
1039
/*quirks*/DA_Q_4K
1040
},
1041
{
1042
/* Seagate Momentus Advanced Format (4k) drives */
1043
{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
1044
/*quirks*/DA_Q_4K
1045
},
1046
{
1047
/* Seagate Momentus Thin Advanced Format (4k) drives */
1048
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
1049
/*quirks*/DA_Q_4K
1050
},
1051
{
1052
/* Seagate Momentus Thin Advanced Format (4k) drives */
1053
{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
1054
/*quirks*/DA_Q_4K
1055
},
1056
{
1057
/* WDC Caviar Green Advanced Format (4k) drives */
1058
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
1059
/*quirks*/DA_Q_4K
1060
},
1061
{
1062
/* WDC Caviar Green Advanced Format (4k) drives */
1063
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1064
/*quirks*/DA_Q_4K
1065
},
1066
{
1067
/* WDC Caviar Green Advanced Format (4k) drives */
1068
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1069
/*quirks*/DA_Q_4K
1070
},
1071
{
1072
/* WDC Caviar Green Advanced Format (4k) drives */
1073
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1074
/*quirks*/DA_Q_4K
1075
},
1076
{
1077
/* WDC Caviar Green Advanced Format (4k) drives */
1078
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1079
/*quirks*/DA_Q_4K
1080
},
1081
{
1082
/* WDC Caviar Green Advanced Format (4k) drives */
1083
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1084
/*quirks*/DA_Q_4K
1085
},
1086
{
1087
/* WDC Caviar Green Advanced Format (4k) drives */
1088
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1089
/*quirks*/DA_Q_4K
1090
},
1091
{
1092
/* WDC Caviar Green Advanced Format (4k) drives */
1093
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1094
/*quirks*/DA_Q_4K
1095
},
1096
{
1097
/* WDC Scorpio Black Advanced Format (4k) drives */
1098
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1099
/*quirks*/DA_Q_4K
1100
},
1101
{
1102
/* WDC Scorpio Black Advanced Format (4k) drives */
1103
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1104
/*quirks*/DA_Q_4K
1105
},
1106
{
1107
/* WDC Scorpio Black Advanced Format (4k) drives */
1108
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1109
/*quirks*/DA_Q_4K
1110
},
1111
{
1112
/* WDC Scorpio Black Advanced Format (4k) drives */
1113
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1114
/*quirks*/DA_Q_4K
1115
},
1116
{
1117
/* WDC Scorpio Blue Advanced Format (4k) drives */
1118
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1119
/*quirks*/DA_Q_4K
1120
},
1121
{
1122
/* WDC Scorpio Blue Advanced Format (4k) drives */
1123
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1124
/*quirks*/DA_Q_4K
1125
},
1126
{
1127
/* WDC Scorpio Blue Advanced Format (4k) drives */
1128
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1129
/*quirks*/DA_Q_4K
1130
},
1131
{
1132
/* WDC Scorpio Blue Advanced Format (4k) drives */
1133
{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1134
/*quirks*/DA_Q_4K
1135
},
1136
{
1137
/*
1138
* Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1139
* PR: usb/97472
1140
*/
1141
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1142
/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1143
},
1144
{
1145
/*
1146
* Olympus digital cameras (D-370)
1147
* PR: usb/97472
1148
*/
1149
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1150
/*quirks*/ DA_Q_NO_6_BYTE
1151
},
1152
{
1153
/*
1154
* Olympus digital cameras (E-100RS, E-10).
1155
* PR: usb/97472
1156
*/
1157
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1158
/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1159
},
1160
{
1161
/*
1162
* Olympus FE-210 camera
1163
*/
1164
{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1165
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1166
},
1167
{
1168
/*
1169
* Pentax Digital Camera
1170
* PR: usb/93389
1171
*/
1172
{T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA",
1173
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1174
},
1175
{
1176
/*
1177
* LG UP3S MP3 player
1178
*/
1179
{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1180
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1181
},
1182
{
1183
/*
1184
* Laser MP3-2GA13 MP3 player
1185
*/
1186
{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1187
"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1188
},
1189
{
1190
/*
1191
* LaCie external 250GB Hard drive des by Porsche
1192
* Submitted by: Ben Stuyts <[email protected]>
1193
* PR: 121474
1194
*/
1195
{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1196
/*quirks*/ DA_Q_NO_SYNC_CACHE
1197
},
1198
/* SATA SSDs */
1199
{
1200
/*
1201
* Corsair Force 2 SSDs
1202
* 4k optimised & trim only works in 4k requests + 4k aligned
1203
*/
1204
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1205
/*quirks*/DA_Q_4K
1206
},
1207
{
1208
/*
1209
* Corsair Force 3 SSDs
1210
* 4k optimised & trim only works in 4k requests + 4k aligned
1211
*/
1212
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1213
/*quirks*/DA_Q_4K
1214
},
1215
{
1216
/*
1217
* Corsair Neutron GTX SSDs
1218
* 4k optimised & trim only works in 4k requests + 4k aligned
1219
*/
1220
{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1221
/*quirks*/DA_Q_4K
1222
},
1223
{
1224
/*
1225
* Corsair Force GT & GS SSDs
1226
* 4k optimised & trim only works in 4k requests + 4k aligned
1227
*/
1228
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1229
/*quirks*/DA_Q_4K
1230
},
1231
{
1232
/*
1233
* Crucial M4 SSDs
1234
* 4k optimised & trim only works in 4k requests + 4k aligned
1235
*/
1236
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1237
/*quirks*/DA_Q_4K
1238
},
1239
{
1240
/*
1241
* Crucial RealSSD C300 SSDs
1242
* 4k optimised
1243
*/
1244
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1245
"*" }, /*quirks*/DA_Q_4K
1246
},
1247
{
1248
/*
1249
* Intel 320 Series SSDs
1250
* 4k optimised & trim only works in 4k requests + 4k aligned
1251
*/
1252
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1253
/*quirks*/DA_Q_4K
1254
},
1255
{
1256
/*
1257
* Intel 330 Series SSDs
1258
* 4k optimised & trim only works in 4k requests + 4k aligned
1259
*/
1260
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1261
/*quirks*/DA_Q_4K
1262
},
1263
{
1264
/*
1265
* Intel 510 Series SSDs
1266
* 4k optimised & trim only works in 4k requests + 4k aligned
1267
*/
1268
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1269
/*quirks*/DA_Q_4K
1270
},
1271
{
1272
/*
1273
* Intel 520 Series SSDs
1274
* 4k optimised & trim only works in 4k requests + 4k aligned
1275
*/
1276
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1277
/*quirks*/DA_Q_4K
1278
},
1279
{
1280
/*
1281
* Intel S3610 Series SSDs
1282
* 4k optimised & trim only works in 4k requests + 4k aligned
1283
*/
1284
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1285
/*quirks*/DA_Q_4K
1286
},
1287
{
1288
/*
1289
* Intel X25-M Series SSDs
1290
* 4k optimised & trim only works in 4k requests + 4k aligned
1291
*/
1292
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1293
/*quirks*/DA_Q_4K
1294
},
1295
{
1296
/*
1297
* Kingston E100 Series SSDs
1298
* 4k optimised & trim only works in 4k requests + 4k aligned
1299
*/
1300
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1301
/*quirks*/DA_Q_4K
1302
},
1303
{
1304
/*
1305
* Kingston HyperX 3k SSDs
1306
* 4k optimised & trim only works in 4k requests + 4k aligned
1307
*/
1308
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1309
/*quirks*/DA_Q_4K
1310
},
1311
{
1312
/*
1313
* Marvell SSDs (entry taken from OpenSolaris)
1314
* 4k optimised & trim only works in 4k requests + 4k aligned
1315
*/
1316
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1317
/*quirks*/DA_Q_4K
1318
},
1319
{
1320
/*
1321
* OCZ Agility 2 SSDs
1322
* 4k optimised & trim only works in 4k requests + 4k aligned
1323
*/
1324
{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1325
/*quirks*/DA_Q_4K
1326
},
1327
{
1328
/*
1329
* OCZ Agility 3 SSDs
1330
* 4k optimised & trim only works in 4k requests + 4k aligned
1331
*/
1332
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1333
/*quirks*/DA_Q_4K
1334
},
1335
{
1336
/*
1337
* OCZ Deneva R Series SSDs
1338
* 4k optimised & trim only works in 4k requests + 4k aligned
1339
*/
1340
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1341
/*quirks*/DA_Q_4K
1342
},
1343
{
1344
/*
1345
* OCZ Vertex 2 SSDs (inc pro series)
1346
* 4k optimised & trim only works in 4k requests + 4k aligned
1347
*/
1348
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1349
/*quirks*/DA_Q_4K
1350
},
1351
{
1352
/*
1353
* OCZ Vertex 3 SSDs
1354
* 4k optimised & trim only works in 4k requests + 4k aligned
1355
*/
1356
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1357
/*quirks*/DA_Q_4K
1358
},
1359
{
1360
/*
1361
* OCZ Vertex 4 SSDs
1362
* 4k optimised & trim only works in 4k requests + 4k aligned
1363
*/
1364
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1365
/*quirks*/DA_Q_4K
1366
},
1367
{
1368
/*
1369
* Samsung 750 Series SSDs
1370
* 4k optimised & trim only works in 4k requests + 4k aligned
1371
*/
1372
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1373
/*quirks*/DA_Q_4K
1374
},
1375
{
1376
/*
1377
* Samsung 830 Series SSDs
1378
* 4k optimised & trim only works in 4k requests + 4k aligned
1379
*/
1380
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1381
/*quirks*/DA_Q_4K
1382
},
1383
{
1384
/*
1385
* Samsung 840 SSDs
1386
* 4k optimised & trim only works in 4k requests + 4k aligned
1387
*/
1388
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1389
/*quirks*/DA_Q_4K
1390
},
1391
{
1392
/*
1393
* Samsung 845 SSDs
1394
* 4k optimised & trim only works in 4k requests + 4k aligned
1395
*/
1396
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1397
/*quirks*/DA_Q_4K
1398
},
1399
{
1400
/*
1401
* Samsung 850 SSDs
1402
* 4k optimised & trim only works in 4k requests + 4k aligned
1403
*/
1404
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1405
/*quirks*/DA_Q_4K
1406
},
1407
{
1408
/*
1409
* Samsung 860 SSDs
1410
* 4k optimised & trim only works in 4k requests + 4k aligned
1411
*/
1412
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 860*", "*" },
1413
/*quirks*/DA_Q_4K
1414
},
1415
{
1416
/*
1417
* Samsung 870 SSDs
1418
* 4k optimised & trim only works in 4k requests + 4k aligned
1419
*/
1420
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 870*", "*" },
1421
/*quirks*/DA_Q_4K
1422
},
1423
{
1424
/*
1425
* Samsung 843T Series SSDs (MZ7WD*)
1426
* Samsung PM851 Series SSDs (MZ7TE*)
1427
* Samsung PM853T Series SSDs (MZ7GE*)
1428
* Samsung SM863 Series SSDs (MZ7KM*)
1429
* 4k optimised
1430
*/
1431
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1432
/*quirks*/DA_Q_4K
1433
},
1434
{
1435
/*
1436
* Same as for SAMSUNG MZ7* but enable the quirks for SSD
1437
* starting with MZ7* too
1438
*/
1439
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1440
/*quirks*/DA_Q_4K
1441
},
1442
{
1443
/*
1444
* Same as above but enable the quirks for SSD SAMSUNG MZ7*
1445
* connected via SATA-to-SAS interposer and because of this
1446
* starting without "ATA"
1447
*/
1448
{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MZ7*", "*" },
1449
/*quirks*/DA_Q_4K
1450
},
1451
{
1452
/*
1453
* SuperTalent TeraDrive CT SSDs
1454
* 4k optimised & trim only works in 4k requests + 4k aligned
1455
*/
1456
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1457
/*quirks*/DA_Q_4K
1458
},
1459
{
1460
/*
1461
* XceedIOPS SATA SSDs
1462
* 4k optimised
1463
*/
1464
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1465
/*quirks*/DA_Q_4K
1466
},
1467
{
1468
/*
1469
* Hama Innostor USB-Stick
1470
*/
1471
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1472
/*quirks*/DA_Q_NO_RC16
1473
},
1474
{
1475
/*
1476
* Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1477
* Drive Managed SATA hard drive. This drive doesn't report
1478
* in firmware that it is a drive managed SMR drive.
1479
*/
1480
{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1481
/*quirks*/DA_Q_SMR_DM
1482
},
1483
{
1484
/*
1485
* MX-ES USB Drive by Mach Xtreme
1486
*/
1487
{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1488
/*quirks*/DA_Q_NO_RC16
1489
},
1490
};
1491
1492
static disk_strategy_t dastrategy;
1493
static dumper_t dadump;
1494
static periph_init_t dainit;
1495
static void daasync(void *callback_arg, uint32_t code,
1496
struct cam_path *path, void *arg);
1497
static void dasysctlinit(void *context, int pending);
1498
static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1499
static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1500
static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1501
static int dabitsysctl(SYSCTL_HANDLER_ARGS);
1502
static int daflagssysctl(SYSCTL_HANDLER_ARGS);
1503
static int daquirkssysctl(SYSCTL_HANDLER_ARGS);
1504
static int dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1505
static int dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1506
static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1507
static void dadeletemethodset(struct da_softc *softc,
1508
da_delete_methods delete_method);
1509
static off_t dadeletemaxsize(struct da_softc *softc,
1510
da_delete_methods delete_method);
1511
static void dadeletemethodchoose(struct da_softc *softc,
1512
da_delete_methods default_method);
1513
static void daprobedone(struct cam_periph *periph, union ccb *ccb);
1514
1515
static periph_ctor_t daregister;
1516
static periph_dtor_t dacleanup;
1517
static periph_start_t dastart;
1518
static periph_oninv_t daoninvalidate;
1519
static void dazonedone(struct cam_periph *periph, union ccb *ccb);
1520
static void dadone(struct cam_periph *periph,
1521
union ccb *done_ccb);
1522
static void dadone_probewp(struct cam_periph *periph,
1523
union ccb *done_ccb);
1524
static void dadone_proberc(struct cam_periph *periph,
1525
union ccb *done_ccb);
1526
static void dadone_probelbp(struct cam_periph *periph,
1527
union ccb *done_ccb);
1528
static void dadone_probeblklimits(struct cam_periph *periph,
1529
union ccb *done_ccb);
1530
static void dadone_probebdc(struct cam_periph *periph,
1531
union ccb *done_ccb);
1532
static void dadone_probecache(struct cam_periph *periph,
1533
union ccb *done_ccb);
1534
static void dadone_probeata(struct cam_periph *periph,
1535
union ccb *done_ccb);
1536
static void dadone_probeatalogdir(struct cam_periph *periph,
1537
union ccb *done_ccb);
1538
static void dadone_probeataiddir(struct cam_periph *periph,
1539
union ccb *done_ccb);
1540
static void dadone_probeatasup(struct cam_periph *periph,
1541
union ccb *done_ccb);
1542
static void dadone_probeatazone(struct cam_periph *periph,
1543
union ccb *done_ccb);
1544
static void dadone_probezone(struct cam_periph *periph,
1545
union ccb *done_ccb);
1546
static void dadone_tur(struct cam_periph *periph,
1547
union ccb *done_ccb);
1548
static int daerror(union ccb *ccb, uint32_t cam_flags,
1549
uint32_t sense_flags);
1550
static void daprevent(struct cam_periph *periph, int action);
1551
static void dareprobe(struct cam_periph *periph);
1552
static void dasetgeom(struct cam_periph *periph, uint32_t block_len,
1553
uint64_t maxsector,
1554
struct scsi_read_capacity_data_long *rcaplong,
1555
size_t rcap_size);
1556
static callout_func_t dasendorderedtag;
1557
static void dashutdown(void *arg, int howto);
1558
static callout_func_t damediapoll;
1559
1560
#ifndef DA_DEFAULT_POLL_PERIOD
1561
#define DA_DEFAULT_POLL_PERIOD 3
1562
#endif
1563
1564
#ifndef DA_DEFAULT_TIMEOUT
1565
#define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */
1566
#endif
1567
1568
#ifndef DA_DEFAULT_SOFTTIMEOUT
1569
#define DA_DEFAULT_SOFTTIMEOUT 0
1570
#endif
1571
1572
#ifndef DA_DEFAULT_RETRY
1573
#define DA_DEFAULT_RETRY 4
1574
#endif
1575
1576
#ifndef DA_DEFAULT_SEND_ORDERED
1577
#define DA_DEFAULT_SEND_ORDERED 1
1578
#endif
1579
1580
static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1581
static int da_retry_count = DA_DEFAULT_RETRY;
1582
static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1583
static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1584
static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1585
static int da_disable_wp_detection = 0;
1586
static int da_enable_biospeedup = 1;
1587
static int da_enable_uma_ccbs = 1;
1588
1589
static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1590
"CAM Direct Access Disk driver");
1591
SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1592
&da_poll_period, 0, "Media polling period in seconds");
1593
SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1594
&da_retry_count, 0, "Normal I/O retry count");
1595
SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1596
&da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1597
SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1598
&da_send_ordered, 0, "Send Ordered Tags");
1599
SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1600
&da_disable_wp_detection, 0,
1601
"Disable detection of write-protected disks");
1602
SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
1603
&da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
1604
SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_uma_ccbs, CTLFLAG_RWTUN,
1605
&da_enable_uma_ccbs, 0, "Use UMA for CCBs");
1606
1607
SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1608
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1609
dasysctlsofttimeout, "I",
1610
"Soft I/O timeout (ms)");
1611
TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1612
1613
/*
1614
* DA_ORDEREDTAG_INTERVAL determines how often, relative
1615
* to the default timeout, we check to see whether an ordered
1616
* tagged transaction is appropriate to prevent simple tag
1617
* starvation. Since we'd like to ensure that there is at least
1618
* 1/2 of the timeout length left for a starved transaction to
1619
* complete after we've sent an ordered tag, we must poll at least
1620
* four times in every timeout period. This takes care of the worst
1621
* case where a starved transaction starts during an interval that
1622
* meets the requirement "don't send an ordered tag" test so it takes
1623
* us two intervals to determine that a tag must be sent.
1624
*/
1625
#ifndef DA_ORDEREDTAG_INTERVAL
1626
#define DA_ORDEREDTAG_INTERVAL 4
1627
#endif
1628
1629
static struct periph_driver dadriver =
1630
{
1631
dainit, "da",
1632
TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1633
};
1634
1635
PERIPHDRIVER_DECLARE(da, dadriver);
1636
1637
static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1638
1639
/*
1640
* This driver takes out references / holds in well defined pairs, never
1641
* recursively. These macros / inline functions enforce those rules. They
1642
* are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is
1643
* defined to be 2 or larger, the tracking also includes debug printfs.
1644
*/
1645
#if defined(DA_TRACK_REFS) || defined(INVARIANTS)
1646
1647
#ifndef DA_TRACK_REFS
1648
#define DA_TRACK_REFS 1
1649
#endif
1650
1651
#if DA_TRACK_REFS > 1
1652
static const char *da_ref_text[] = {
1653
"bogus",
1654
"open",
1655
"open hold",
1656
"close hold",
1657
"reprobe hold",
1658
"Test Unit Ready",
1659
"Geom",
1660
"sysctl",
1661
"reprobe",
1662
"max -- also bogus"
1663
};
1664
1665
#define DA_PERIPH_PRINT(periph, msg, args...) \
1666
CAM_PERIPH_PRINT(periph, msg, ##args)
1667
#else
1668
#define DA_PERIPH_PRINT(periph, msg, args...)
1669
#endif
1670
1671
static inline void
1672
token_sanity(da_ref_token token)
1673
{
1674
if ((unsigned)token >= DA_REF_MAX)
1675
panic("Bad token value passed in %d\n", token);
1676
}
1677
1678
static inline int
1679
da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token)
1680
{
1681
int err = cam_periph_hold(periph, priority);
1682
1683
token_sanity(token);
1684
DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n",
1685
da_ref_text[token], token, err);
1686
if (err == 0) {
1687
int cnt;
1688
struct da_softc *softc = periph->softc;
1689
1690
cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1691
if (cnt != 0)
1692
panic("Re-holding for reason %d, cnt = %d", token, cnt);
1693
}
1694
return (err);
1695
}
1696
1697
static inline void
1698
da_periph_unhold(struct cam_periph *periph, da_ref_token token)
1699
{
1700
int cnt;
1701
struct da_softc *softc = periph->softc;
1702
1703
token_sanity(token);
1704
DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n",
1705
da_ref_text[token], token);
1706
cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1707
if (cnt != 1)
1708
panic("Unholding %d with cnt = %d", token, cnt);
1709
cam_periph_unhold(periph);
1710
}
1711
1712
static inline int
1713
da_periph_acquire(struct cam_periph *periph, da_ref_token token)
1714
{
1715
int err = cam_periph_acquire(periph);
1716
1717
token_sanity(token);
1718
DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n",
1719
da_ref_text[token], token, err);
1720
if (err == 0) {
1721
int cnt;
1722
struct da_softc *softc = periph->softc;
1723
1724
cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1725
if (cnt != 0)
1726
panic("Re-refing for reason %d, cnt = %d", token, cnt);
1727
}
1728
return (err);
1729
}
1730
1731
static inline void
1732
da_periph_release(struct cam_periph *periph, da_ref_token token)
1733
{
1734
int cnt;
1735
struct da_softc *softc = periph->softc;
1736
1737
token_sanity(token);
1738
DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n",
1739
da_ref_text[token], token);
1740
cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1741
if (cnt != 1)
1742
panic("Releasing %d with cnt = %d", token, cnt);
1743
cam_periph_release(periph);
1744
}
1745
1746
static inline void
1747
da_periph_release_locked(struct cam_periph *periph, da_ref_token token)
1748
{
1749
int cnt;
1750
struct da_softc *softc = periph->softc;
1751
1752
token_sanity(token);
1753
DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n",
1754
da_ref_text[token], token);
1755
cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1756
if (cnt != 1)
1757
panic("releasing (locked) %d with cnt = %d", token, cnt);
1758
cam_periph_release_locked(periph);
1759
}
1760
1761
#define cam_periph_hold POISON
1762
#define cam_periph_unhold POISON
1763
#define cam_periph_acquire POISON
1764
#define cam_periph_release POISON
1765
#define cam_periph_release_locked POISON
1766
1767
#else
1768
#define da_periph_hold(periph, prio, token) cam_periph_hold((periph), (prio))
1769
#define da_periph_unhold(periph, token) cam_periph_unhold((periph))
1770
#define da_periph_acquire(periph, token) cam_periph_acquire((periph))
1771
#define da_periph_release(periph, token) cam_periph_release((periph))
1772
#define da_periph_release_locked(periph, token) cam_periph_release_locked((periph))
1773
#endif
1774
1775
static int
1776
daopen(struct disk *dp)
1777
{
1778
struct cam_periph *periph;
1779
struct da_softc *softc;
1780
int error;
1781
1782
periph = (struct cam_periph *)dp->d_drv1;
1783
if (da_periph_acquire(periph, DA_REF_OPEN) != 0) {
1784
return (ENXIO);
1785
}
1786
1787
cam_periph_lock(periph);
1788
if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) {
1789
cam_periph_unlock(periph);
1790
da_periph_release(periph, DA_REF_OPEN);
1791
return (error);
1792
}
1793
1794
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1795
("daopen\n"));
1796
1797
softc = (struct da_softc *)periph->softc;
1798
dareprobe(periph);
1799
1800
/* Wait for the disk size update. */
1801
error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1802
"dareprobe", 0);
1803
if (error != 0)
1804
xpt_print(periph->path, "unable to retrieve capacity data\n");
1805
1806
if (periph->flags & CAM_PERIPH_INVALID)
1807
error = ENXIO;
1808
1809
if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1810
(softc->quirks & DA_Q_NO_PREVENT) == 0)
1811
daprevent(periph, PR_PREVENT);
1812
1813
/*
1814
* Only 'validate' the pack if the media size is non-zero and the
1815
* underlying peripheral isn't invalid (the only error != 0 path). Once
1816
* the periph is marked invalid, we only get here on lost races with its
1817
* teardown, so keeping the pack invalid also keeps more I/O from
1818
* starting.
1819
*/
1820
if (error == 0 && softc->params.sectors != 0)
1821
softc->flags &= ~DA_FLAG_PACK_INVALID;
1822
else
1823
softc->flags |= DA_FLAG_PACK_INVALID;
1824
1825
if (error == 0)
1826
softc->flags |= DA_FLAG_OPEN;
1827
1828
da_periph_unhold(periph, DA_REF_OPEN_HOLD);
1829
cam_periph_unlock(periph);
1830
1831
if (error != 0)
1832
da_periph_release(periph, DA_REF_OPEN);
1833
1834
return (error);
1835
}
1836
1837
static int
1838
daclose(struct disk *dp)
1839
{
1840
struct cam_periph *periph;
1841
struct da_softc *softc;
1842
union ccb *ccb;
1843
1844
periph = (struct cam_periph *)dp->d_drv1;
1845
softc = (struct da_softc *)periph->softc;
1846
cam_periph_lock(periph);
1847
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1848
("daclose\n"));
1849
1850
if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) {
1851
/* Flush disk cache. */
1852
if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1853
(softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1854
(softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1855
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1856
scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1857
/*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG,
1858
/*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1859
5 * 60 * 1000);
1860
cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1861
/*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1862
softc->disk->d_devstat);
1863
softc->flags &= ~DA_FLAG_DIRTY;
1864
xpt_release_ccb(ccb);
1865
}
1866
1867
/* Allow medium removal. */
1868
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1869
(softc->quirks & DA_Q_NO_PREVENT) == 0)
1870
daprevent(periph, PR_ALLOW);
1871
1872
da_periph_unhold(periph, DA_REF_CLOSE_HOLD);
1873
}
1874
1875
/*
1876
* If we've got removable media, mark the blocksize as
1877
* unavailable, since it could change when new media is
1878
* inserted.
1879
*/
1880
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1881
softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1882
1883
softc->flags &= ~DA_FLAG_OPEN;
1884
while (softc->refcount != 0)
1885
cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1886
cam_periph_unlock(periph);
1887
da_periph_release(periph, DA_REF_OPEN);
1888
return (0);
1889
}
1890
1891
static void
1892
daschedule(struct cam_periph *periph)
1893
{
1894
struct da_softc *softc = (struct da_softc *)periph->softc;
1895
1896
if (softc->state != DA_STATE_NORMAL)
1897
return;
1898
1899
cam_iosched_schedule(softc->cam_iosched, periph);
1900
}
1901
1902
/*
1903
* Actually translate the requested transfer into one the physical driver
1904
* can understand. The transfer is described by a buf and will include
1905
* only one physical transfer.
1906
*/
1907
static void
1908
dastrategy(struct bio *bp)
1909
{
1910
struct cam_periph *periph;
1911
struct da_softc *softc;
1912
1913
periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1914
softc = (struct da_softc *)periph->softc;
1915
1916
cam_periph_lock(periph);
1917
1918
/*
1919
* If the pack has been invalidated, fail all I/O. The medium is not
1920
* suitable for normal I/O, because one or more is ture:
1921
* - the medium is missing
1922
* - its size is unknown
1923
* - it differs from the medium present at daopen
1924
* - we're tearing the cam periph device down
1925
* Since we have the cam periph lock, we don't need to check it for
1926
* the last condition since PACK_INVALID is set when we invalidate
1927
* the device.
1928
*/
1929
if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1930
cam_periph_unlock(periph);
1931
biofinish(bp, NULL, ENXIO);
1932
return;
1933
}
1934
1935
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1936
1937
/*
1938
* Zone commands must be ordered, because they can depend on the
1939
* effects of previously issued commands, and they may affect
1940
* commands after them.
1941
*/
1942
if (bp->bio_cmd == BIO_ZONE)
1943
bp->bio_flags |= BIO_ORDERED;
1944
1945
/*
1946
* Place it in the queue of disk activities for this disk
1947
*/
1948
cam_iosched_queue_work(softc->cam_iosched, bp);
1949
1950
/*
1951
* Schedule ourselves for performing the work.
1952
*/
1953
daschedule(periph);
1954
cam_periph_unlock(periph);
1955
1956
return;
1957
}
1958
1959
static int
1960
dadump(void *arg, void *virtual, off_t offset, size_t length)
1961
{
1962
struct cam_periph *periph;
1963
struct da_softc *softc;
1964
u_int secsize;
1965
struct ccb_scsiio csio;
1966
struct disk *dp;
1967
int error = 0;
1968
1969
dp = arg;
1970
periph = dp->d_drv1;
1971
softc = (struct da_softc *)periph->softc;
1972
secsize = softc->params.secsize;
1973
1974
/*
1975
* Can't dump to a disk that's not there or changed, for whatever
1976
* reason.
1977
*/
1978
if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
1979
return (ENXIO);
1980
1981
memset(&csio, 0, sizeof(csio));
1982
if (length > 0) {
1983
xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1984
csio.ccb_h.ccb_state = DA_CCB_DUMP;
1985
scsi_read_write(&csio,
1986
/*retries*/0,
1987
/*cbfcnp*/NULL,
1988
MSG_ORDERED_Q_TAG,
1989
/*read*/SCSI_RW_WRITE,
1990
/*byte2*/0,
1991
/*minimum_cmd_size*/ softc->minimum_cmd_size,
1992
offset / secsize,
1993
length / secsize,
1994
/*data_ptr*/(uint8_t *) virtual,
1995
/*dxfer_len*/length,
1996
/*sense_len*/SSD_FULL_SIZE,
1997
da_default_timeout * 1000);
1998
error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1999
0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
2000
if (error != 0)
2001
printf("Aborting dump due to I/O error.\n");
2002
return (error);
2003
}
2004
2005
/*
2006
* Sync the disk cache contents to the physical media.
2007
*/
2008
if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
2009
xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
2010
csio.ccb_h.ccb_state = DA_CCB_DUMP;
2011
scsi_synchronize_cache(&csio,
2012
/*retries*/0,
2013
/*cbfcnp*/NULL,
2014
MSG_SIMPLE_Q_TAG,
2015
/*begin_lba*/0,/* Cover the whole disk */
2016
/*lb_count*/0,
2017
SSD_FULL_SIZE,
2018
5 * 1000);
2019
error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
2020
0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
2021
if (error != 0)
2022
xpt_print(periph->path, "Synchronize cache failed\n");
2023
}
2024
return (error);
2025
}
2026
2027
static int
2028
dagetattr(struct bio *bp)
2029
{
2030
int ret;
2031
struct cam_periph *periph;
2032
2033
if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup))
2034
return (EJUSTRETURN);
2035
2036
periph = (struct cam_periph *)bp->bio_disk->d_drv1;
2037
cam_periph_lock(periph);
2038
ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
2039
periph->path);
2040
cam_periph_unlock(periph);
2041
if (ret == 0)
2042
bp->bio_completed = bp->bio_length;
2043
return ret;
2044
}
2045
2046
static void
2047
dainit(void)
2048
{
2049
cam_status status;
2050
2051
da_ccb_zone = uma_zcreate("da_ccb",
2052
sizeof(struct ccb_scsiio), NULL, NULL, NULL, NULL,
2053
UMA_ALIGN_PTR, 0);
2054
2055
/*
2056
* Install a global async callback. This callback will
2057
* receive async callbacks like "new device found".
2058
*/
2059
status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
2060
2061
if (status != CAM_REQ_CMP) {
2062
printf(
2063
"da: Failed to attach master async callback due to status 0x%x!\n",
2064
status);
2065
} else if (da_send_ordered) {
2066
/* Register our shutdown event handler */
2067
if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
2068
NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
2069
printf("dainit: shutdown event registration failed!\n");
2070
}
2071
}
2072
2073
/*
2074
* Callback from GEOM, called when it has finished cleaning up its
2075
* resources.
2076
*/
2077
static void
2078
dadiskgonecb(struct disk *dp)
2079
{
2080
struct cam_periph *periph;
2081
2082
periph = (struct cam_periph *)dp->d_drv1;
2083
da_periph_release(periph, DA_REF_GEOM);
2084
}
2085
2086
static void
2087
daoninvalidate(struct cam_periph *periph)
2088
{
2089
struct da_softc *softc;
2090
2091
cam_periph_assert(periph, MA_OWNED);
2092
softc = (struct da_softc *)periph->softc;
2093
2094
/*
2095
* De-register any async callbacks.
2096
*/
2097
xpt_register_async(0, daasync, periph, periph->path);
2098
2099
softc->flags |= DA_FLAG_PACK_INVALID;
2100
#ifdef CAM_IO_STATS
2101
softc->invalidations++;
2102
#endif
2103
2104
/*
2105
* Return all queued I/O with ENXIO. Transactions may be queued up here
2106
* for retry (since we are called while there's other transactions
2107
* pending). Any requests in the hardware will drain before dacleanup
2108
* is called.
2109
*/
2110
cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
2111
2112
/*
2113
* Tell GEOM that we've gone away, we'll get a callback when it is
2114
* done cleaning up its resources.
2115
*/
2116
disk_gone(softc->disk);
2117
}
2118
2119
static void
2120
dacleanup(struct cam_periph *periph)
2121
{
2122
struct da_softc *softc;
2123
2124
softc = (struct da_softc *)periph->softc;
2125
2126
cam_periph_unlock(periph);
2127
2128
cam_iosched_fini(softc->cam_iosched);
2129
2130
/*
2131
* If we can't free the sysctl tree, oh well...
2132
*/
2133
if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
2134
#ifdef CAM_IO_STATS
2135
if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
2136
xpt_print(periph->path,
2137
"can't remove sysctl stats context\n");
2138
#endif
2139
if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
2140
xpt_print(periph->path,
2141
"can't remove sysctl context\n");
2142
}
2143
2144
callout_drain(&softc->mediapoll_c);
2145
disk_destroy(softc->disk);
2146
callout_drain(&softc->sendordered_c);
2147
free(softc, M_DEVBUF);
2148
cam_periph_lock(periph);
2149
}
2150
2151
static void
2152
daasync(void *callback_arg, uint32_t code,
2153
struct cam_path *path, void *arg)
2154
{
2155
struct cam_periph *periph;
2156
struct da_softc *softc;
2157
2158
periph = (struct cam_periph *)callback_arg;
2159
switch (code) {
2160
case AC_FOUND_DEVICE: /* callback to create periph, no locking yet */
2161
{
2162
struct ccb_getdev *cgd;
2163
cam_status status;
2164
2165
cgd = (struct ccb_getdev *)arg;
2166
if (cgd == NULL)
2167
break;
2168
2169
if (cgd->protocol != PROTO_SCSI)
2170
break;
2171
if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
2172
break;
2173
if (SID_TYPE(&cgd->inq_data) != T_DIRECT
2174
&& SID_TYPE(&cgd->inq_data) != T_RBC
2175
&& SID_TYPE(&cgd->inq_data) != T_OPTICAL
2176
&& SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
2177
break;
2178
2179
/*
2180
* Allocate a peripheral instance for
2181
* this device and start the probe
2182
* process.
2183
*/
2184
status = cam_periph_alloc(daregister, daoninvalidate,
2185
dacleanup, dastart,
2186
"da", CAM_PERIPH_BIO,
2187
path, daasync,
2188
AC_FOUND_DEVICE, cgd);
2189
2190
if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG)
2191
printf(
2192
"daasync: Unable to attach to new device due to status 0x%x\n",
2193
status);
2194
return;
2195
}
2196
case AC_ADVINFO_CHANGED: /* Doesn't touch periph */
2197
{
2198
uintptr_t buftype;
2199
2200
buftype = (uintptr_t)arg;
2201
if (buftype == CDAI_TYPE_PHYS_PATH) {
2202
struct da_softc *softc;
2203
2204
softc = periph->softc;
2205
disk_attr_changed(softc->disk, "GEOM::physpath",
2206
M_NOWAIT);
2207
}
2208
break;
2209
}
2210
case AC_UNIT_ATTENTION: /* Called for this path: periph locked */
2211
{
2212
union ccb *ccb;
2213
int error_code, sense_key, asc, ascq;
2214
2215
softc = (struct da_softc *)periph->softc;
2216
ccb = (union ccb *)arg;
2217
2218
/*
2219
* Unit attentions are broadcast to all the LUNs of the device
2220
* so handle all UNIT ATTENTIONs except our own, as they will be
2221
* handled by daerror().
2222
*/
2223
if (xpt_path_periph(ccb->ccb_h.path) != periph &&
2224
scsi_extract_sense_ccb(ccb,
2225
&error_code, &sense_key, &asc, &ascq)) {
2226
if (asc == 0x2A && ascq == 0x09) {
2227
/* 2a/9: CAPACITY DATA HAS CHANGED */
2228
xpt_print(ccb->ccb_h.path,
2229
"Capacity data has changed\n");
2230
cam_periph_assert(periph, MA_OWNED);
2231
softc->flags &= ~DA_FLAG_PROBED;
2232
dareprobe(periph);
2233
} else if (asc == 0x28 && ascq == 0x00) {
2234
/* 28/0: NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
2235
cam_periph_assert(periph, MA_OWNED);
2236
softc->flags &= ~DA_FLAG_PROBED;
2237
disk_media_changed(softc->disk, M_NOWAIT);
2238
} else if (asc == 0x3F && ascq == 0x03) {
2239
/* 3f/3: INQUIRY DATA HAS CHANGED */
2240
xpt_print(ccb->ccb_h.path,
2241
"INQUIRY data has changed\n");
2242
cam_periph_assert(periph, MA_OWNED);
2243
softc->flags &= ~DA_FLAG_PROBED;
2244
dareprobe(periph);
2245
}
2246
}
2247
break;
2248
}
2249
case AC_SCSI_AEN: /* Called for this path: periph locked */
2250
/*
2251
* Appears to be currently unused for SCSI devices, only ata SIMs
2252
* generate this.
2253
*/
2254
cam_periph_assert(periph, MA_OWNED);
2255
softc = (struct da_softc *)periph->softc;
2256
if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
2257
(softc->flags & DA_FLAG_TUR_PENDING) == 0) {
2258
if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
2259
cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
2260
daschedule(periph);
2261
}
2262
}
2263
/* FALLTHROUGH */
2264
case AC_SENT_BDR: /* Called for this path: periph locked */
2265
case AC_BUS_RESET: /* Called for this path: periph locked */
2266
{
2267
struct ccb_hdr *ccbh;
2268
2269
cam_periph_assert(periph, MA_OWNED);
2270
softc = (struct da_softc *)periph->softc;
2271
/*
2272
* Don't fail on the expected unit attention
2273
* that will occur.
2274
*/
2275
softc->flags |= DA_FLAG_RETRY_UA;
2276
LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
2277
ccbh->ccb_state |= DA_CCB_RETRY_UA;
2278
break;
2279
}
2280
case AC_INQ_CHANGED: /* Called for this path: periph locked */
2281
cam_periph_assert(periph, MA_OWNED);
2282
softc = (struct da_softc *)periph->softc;
2283
softc->flags &= ~DA_FLAG_PROBED;
2284
dareprobe(periph);
2285
break;
2286
default:
2287
break;
2288
}
2289
cam_periph_async(periph, code, path, arg);
2290
}
2291
2292
static void
2293
dasysctlinit(void *context, int pending)
2294
{
2295
struct cam_periph *periph;
2296
struct da_softc *softc;
2297
char tmpstr[32], tmpstr2[16];
2298
struct ccb_trans_settings cts;
2299
2300
periph = (struct cam_periph *)context;
2301
/*
2302
* periph was held for us when this task was enqueued
2303
*/
2304
if (periph->flags & CAM_PERIPH_INVALID) {
2305
da_periph_release(periph, DA_REF_SYSCTL);
2306
return;
2307
}
2308
2309
softc = (struct da_softc *)periph->softc;
2310
snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2311
snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2312
2313
sysctl_ctx_init(&softc->sysctl_ctx);
2314
cam_periph_lock(periph);
2315
softc->flags |= DA_FLAG_SCTX_INIT;
2316
cam_periph_unlock(periph);
2317
softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
2318
SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2319
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index");
2320
if (softc->sysctl_tree == NULL) {
2321
printf("dasysctlinit: unable to allocate sysctl tree\n");
2322
da_periph_release(periph, DA_REF_SYSCTL);
2323
return;
2324
}
2325
2326
/*
2327
* Now register the sysctl handler, so the user can change the value on
2328
* the fly.
2329
*/
2330
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2331
OID_AUTO, "delete_method",
2332
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
2333
softc, 0, dadeletemethodsysctl, "A",
2334
"BIO_DELETE execution method");
2335
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2336
OID_AUTO, "delete_max",
2337
CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2338
softc, 0, dadeletemaxsysctl, "Q",
2339
"Maximum BIO_DELETE size");
2340
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2341
OID_AUTO, "minimum_cmd_size",
2342
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2343
&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2344
"Minimum CDB size");
2345
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2346
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2347
"trim_count", CTLFLAG_RD, &softc->trim_count,
2348
"Total number of unmap/dsm commands sent");
2349
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2350
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2351
"trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
2352
"Total number of ranges in unmap/dsm commands");
2353
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2354
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2355
"trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
2356
"Total lbas in the unmap/dsm commands sent");
2357
2358
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2359
OID_AUTO, "zone_mode",
2360
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2361
softc, 0, dazonemodesysctl, "A",
2362
"Zone Mode");
2363
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2364
OID_AUTO, "zone_support",
2365
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2366
softc, 0, dazonesupsysctl, "A",
2367
"Zone Support");
2368
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2369
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2370
"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2371
"Optimal Number of Open Sequential Write Preferred Zones");
2372
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2373
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2374
"optimal_nonseq_zones", CTLFLAG_RD,
2375
&softc->optimal_nonseq_zones,
2376
"Optimal Number of Non-Sequentially Written Sequential Write Preferred Zones");
2377
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2378
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2379
"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2380
"Maximum Number of Open Sequential Write Required Zones");
2381
2382
SYSCTL_ADD_INT(&softc->sysctl_ctx,
2383
SYSCTL_CHILDREN(softc->sysctl_tree),
2384
OID_AUTO,
2385
"error_inject",
2386
CTLFLAG_RW,
2387
&softc->error_inject,
2388
0,
2389
"error_inject leaf");
2390
2391
SYSCTL_ADD_INT(&softc->sysctl_ctx,
2392
SYSCTL_CHILDREN(softc->sysctl_tree),
2393
OID_AUTO,
2394
"p_type",
2395
CTLFLAG_RD,
2396
&softc->p_type,
2397
0,
2398
"DIF protection type");
2399
2400
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2401
OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2402
softc, 0, daflagssysctl, "A",
2403
"Flags for drive");
2404
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2405
OID_AUTO, "quirks", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2406
softc, 0, daquirkssysctl, "A",
2407
"Active quirks for drive");
2408
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2409
OID_AUTO, "rotating", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2410
&softc->flags, (u_int)DA_FLAG_ROTATING, dabitsysctl, "I",
2411
"Rotating media *DEPRECATED* gone in FreeBSD 16");
2412
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2413
OID_AUTO, "unmapped_io", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2414
&softc->flags, (u_int)DA_FLAG_UNMAPPEDIO, dabitsysctl, "I",
2415
"Unmapped I/O support *DEPRECATED* gone in FreeBSD 16");
2416
2417
#ifdef CAM_TEST_FAILURE
2418
SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2419
OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2420
periph, 0, cam_periph_invalidate_sysctl, "I",
2421
"Write 1 to invalidate the drive immediately");
2422
#endif
2423
2424
/*
2425
* Add some addressing info.
2426
*/
2427
memset(&cts, 0, sizeof (cts));
2428
xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2429
cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2430
cts.type = CTS_TYPE_CURRENT_SETTINGS;
2431
cam_periph_lock(periph);
2432
xpt_action((union ccb *)&cts);
2433
cam_periph_unlock(periph);
2434
if (cts.ccb_h.status != CAM_REQ_CMP) {
2435
da_periph_release(periph, DA_REF_SYSCTL);
2436
return;
2437
}
2438
if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2439
struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2440
if (fc->valid & CTS_FC_VALID_WWPN) {
2441
softc->wwpn = fc->wwpn;
2442
SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2443
SYSCTL_CHILDREN(softc->sysctl_tree),
2444
OID_AUTO, "wwpn", CTLFLAG_RD,
2445
&softc->wwpn, "World Wide Port Name");
2446
}
2447
}
2448
2449
#ifdef CAM_IO_STATS
2450
/*
2451
* Now add some useful stats.
2452
* XXX These should live in cam_periph and be common to all periphs
2453
*/
2454
softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2455
SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2456
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics");
2457
SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2458
SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2459
OID_AUTO,
2460
"errors",
2461
CTLFLAG_RD,
2462
&softc->errors,
2463
0,
2464
"Transport errors reported by the SIM");
2465
SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2466
SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2467
OID_AUTO,
2468
"timeouts",
2469
CTLFLAG_RD,
2470
&softc->timeouts,
2471
0,
2472
"Device timeouts reported by the SIM");
2473
SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2474
SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2475
OID_AUTO,
2476
"pack_invalidations",
2477
CTLFLAG_RD,
2478
&softc->invalidations,
2479
0,
2480
"Device pack invalidations");
2481
#endif
2482
2483
cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2484
softc->sysctl_tree);
2485
2486
da_periph_release(periph, DA_REF_SYSCTL);
2487
}
2488
2489
static int
2490
dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2491
{
2492
int error;
2493
uint64_t value;
2494
struct da_softc *softc;
2495
2496
softc = (struct da_softc *)arg1;
2497
2498
value = softc->disk->d_delmaxsize;
2499
error = sysctl_handle_64(oidp, &value, 0, req);
2500
if ((error != 0) || (req->newptr == NULL))
2501
return (error);
2502
2503
/* only accept values smaller than the calculated value */
2504
if (value > dadeletemaxsize(softc, softc->delete_method)) {
2505
return (EINVAL);
2506
}
2507
softc->disk->d_delmaxsize = value;
2508
2509
return (0);
2510
}
2511
2512
static int
2513
dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2514
{
2515
int error, value;
2516
2517
value = *(int *)arg1;
2518
2519
error = sysctl_handle_int(oidp, &value, 0, req);
2520
2521
if ((error != 0)
2522
|| (req->newptr == NULL))
2523
return (error);
2524
2525
/*
2526
* Acceptable values here are 6, 10, 12 or 16.
2527
*/
2528
if (value < 6)
2529
value = 6;
2530
else if ((value > 6)
2531
&& (value <= 10))
2532
value = 10;
2533
else if ((value > 10)
2534
&& (value <= 12))
2535
value = 12;
2536
else if (value > 12)
2537
value = 16;
2538
2539
*(int *)arg1 = value;
2540
2541
return (0);
2542
}
2543
2544
static int
2545
dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2546
{
2547
sbintime_t value;
2548
int error;
2549
2550
value = da_default_softtimeout / SBT_1MS;
2551
2552
error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2553
if ((error != 0) || (req->newptr == NULL))
2554
return (error);
2555
2556
/* XXX Should clip this to a reasonable level */
2557
if (value > da_default_timeout * 1000)
2558
return (EINVAL);
2559
2560
da_default_softtimeout = value * SBT_1MS;
2561
return (0);
2562
}
2563
2564
static void
2565
dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2566
{
2567
2568
softc->delete_method = delete_method;
2569
softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2570
softc->delete_func = da_delete_functions[delete_method];
2571
2572
if (softc->delete_method > DA_DELETE_DISABLE)
2573
softc->disk->d_flags |= DISKFLAG_CANDELETE;
2574
else
2575
softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2576
}
2577
2578
static off_t
2579
dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2580
{
2581
off_t sectors;
2582
2583
switch(delete_method) {
2584
case DA_DELETE_UNMAP:
2585
sectors = (off_t)softc->unmap_max_lba;
2586
break;
2587
case DA_DELETE_ATA_TRIM:
2588
sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2589
break;
2590
case DA_DELETE_WS16:
2591
sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2592
break;
2593
case DA_DELETE_ZERO:
2594
case DA_DELETE_WS10:
2595
sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2596
break;
2597
default:
2598
return 0;
2599
}
2600
2601
return (off_t)softc->params.secsize *
2602
omin(sectors, softc->params.sectors);
2603
}
2604
2605
static void
2606
daprobedone(struct cam_periph *periph, union ccb *ccb)
2607
{
2608
struct da_softc *softc;
2609
2610
softc = (struct da_softc *)periph->softc;
2611
2612
cam_periph_assert(periph, MA_OWNED);
2613
2614
dadeletemethodchoose(softc, DA_DELETE_NONE);
2615
2616
if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2617
char buf[80];
2618
int i, sep;
2619
2620
snprintf(buf, sizeof(buf), "Delete methods: <");
2621
sep = 0;
2622
for (i = 0; i <= DA_DELETE_MAX; i++) {
2623
if ((softc->delete_available & (1 << i)) == 0 &&
2624
i != softc->delete_method)
2625
continue;
2626
if (sep)
2627
strlcat(buf, ",", sizeof(buf));
2628
strlcat(buf, da_delete_method_names[i],
2629
sizeof(buf));
2630
if (i == softc->delete_method)
2631
strlcat(buf, "(*)", sizeof(buf));
2632
sep = 1;
2633
}
2634
strlcat(buf, ">", sizeof(buf));
2635
printf("%s%d: %s\n", periph->periph_name,
2636
periph->unit_number, buf);
2637
}
2638
if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2639
(softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2640
printf("%s%d: Write Protected\n", periph->periph_name,
2641
periph->unit_number);
2642
}
2643
2644
/*
2645
* Since our peripheral may be invalidated by an error
2646
* above or an external event, we must release our CCB
2647
* before releasing the probe lock on the peripheral.
2648
* The peripheral will only go away once the last lock
2649
* is removed, and we need it around for the CCB release
2650
* operation.
2651
*/
2652
xpt_release_ccb(ccb);
2653
softc->state = DA_STATE_NORMAL;
2654
softc->flags |= DA_FLAG_PROBED;
2655
daschedule(periph);
2656
wakeup(&softc->disk->d_mediasize);
2657
if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2658
softc->flags |= DA_FLAG_ANNOUNCED;
2659
2660
/*
2661
* We'll release this reference once GEOM calls us back via
2662
* dadiskgonecb(), telling us that our provider has been freed.
2663
*/
2664
if (da_periph_acquire(periph, DA_REF_GEOM) == 0)
2665
disk_create(softc->disk, DISK_VERSION);
2666
2667
cam_periph_release_boot(periph);
2668
}
2669
da_periph_release_locked(periph, DA_REF_REPROBE);
2670
}
2671
2672
static void
2673
dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2674
{
2675
int i, methods;
2676
2677
/* If available, prefer the method requested by user. */
2678
i = softc->delete_method_pref;
2679
methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2680
if (methods & (1 << i)) {
2681
dadeletemethodset(softc, i);
2682
return;
2683
}
2684
2685
/* Use the pre-defined order to choose the best performing delete. */
2686
for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2687
if (i == DA_DELETE_ZERO)
2688
continue;
2689
if (softc->delete_available & (1 << i)) {
2690
dadeletemethodset(softc, i);
2691
return;
2692
}
2693
}
2694
2695
/* Fallback to default. */
2696
dadeletemethodset(softc, default_method);
2697
}
2698
2699
static int
2700
dabitsysctl(SYSCTL_HANDLER_ARGS)
2701
{
2702
u_int *flags = arg1;
2703
u_int test = arg2;
2704
int tmpout, error;
2705
2706
tmpout = !!(*flags & test);
2707
error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
2708
if (error || !req->newptr)
2709
return (error);
2710
2711
return (EPERM);
2712
}
2713
2714
static int
2715
daflagssysctl(SYSCTL_HANDLER_ARGS)
2716
{
2717
struct sbuf sbuf;
2718
struct da_softc *softc = arg1;
2719
int error;
2720
2721
sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
2722
if (softc->flags != 0)
2723
sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, DA_FLAG_STRING);
2724
else
2725
sbuf_putc(&sbuf, '0');
2726
error = sbuf_finish(&sbuf);
2727
sbuf_delete(&sbuf);
2728
2729
return (error);
2730
}
2731
2732
static int
2733
daquirkssysctl(SYSCTL_HANDLER_ARGS)
2734
{
2735
struct sbuf sbuf;
2736
struct da_softc *softc = arg1;
2737
int error;
2738
2739
sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
2740
if (softc->quirks != 0)
2741
sbuf_printf(&sbuf, "0x%b", (unsigned)softc->quirks, DA_Q_BIT_STRING);
2742
else
2743
sbuf_putc(&sbuf, '0');
2744
error = sbuf_finish(&sbuf);
2745
sbuf_delete(&sbuf);
2746
2747
return (error);
2748
}
2749
2750
static int
2751
dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2752
{
2753
char buf[16];
2754
const char *p;
2755
struct da_softc *softc;
2756
int i, error, value;
2757
2758
softc = (struct da_softc *)arg1;
2759
2760
value = softc->delete_method;
2761
if (value < 0 || value > DA_DELETE_MAX)
2762
p = "UNKNOWN";
2763
else
2764
p = da_delete_method_names[value];
2765
strncpy(buf, p, sizeof(buf));
2766
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2767
if (error != 0 || req->newptr == NULL)
2768
return (error);
2769
for (i = 0; i <= DA_DELETE_MAX; i++) {
2770
if (strcmp(buf, da_delete_method_names[i]) == 0)
2771
break;
2772
}
2773
if (i > DA_DELETE_MAX)
2774
return (EINVAL);
2775
softc->delete_method_pref = i;
2776
dadeletemethodchoose(softc, DA_DELETE_NONE);
2777
return (0);
2778
}
2779
2780
static int
2781
dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2782
{
2783
char tmpbuf[40];
2784
struct da_softc *softc;
2785
int error;
2786
2787
softc = (struct da_softc *)arg1;
2788
2789
switch (softc->zone_mode) {
2790
case DA_ZONE_DRIVE_MANAGED:
2791
snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2792
break;
2793
case DA_ZONE_HOST_AWARE:
2794
snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2795
break;
2796
case DA_ZONE_HOST_MANAGED:
2797
snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2798
break;
2799
case DA_ZONE_NONE:
2800
default:
2801
snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2802
break;
2803
}
2804
2805
error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2806
2807
return (error);
2808
}
2809
2810
static int
2811
dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2812
{
2813
struct da_softc *softc;
2814
struct sbuf sb;
2815
int error, first;
2816
unsigned int i;
2817
2818
softc = (struct da_softc *)arg1;
2819
2820
first = 1;
2821
sbuf_new_for_sysctl(&sb, NULL, 0, req);
2822
2823
for (i = 0; i < sizeof(da_zone_desc_table) /
2824
sizeof(da_zone_desc_table[0]); i++) {
2825
if (softc->zone_flags & da_zone_desc_table[i].value) {
2826
if (first == 0)
2827
sbuf_cat(&sb, ", ");
2828
else
2829
first = 0;
2830
sbuf_cat(&sb, da_zone_desc_table[i].desc);
2831
}
2832
}
2833
2834
if (first == 1)
2835
sbuf_cat(&sb, "None");
2836
2837
error = sbuf_finish(&sb);
2838
sbuf_delete(&sb);
2839
return (error);
2840
}
2841
2842
static cam_status
2843
daregister(struct cam_periph *periph, void *arg)
2844
{
2845
struct da_softc *softc;
2846
struct ccb_pathinq cpi;
2847
struct ccb_getdev *cgd;
2848
char tmpstr[80];
2849
caddr_t match;
2850
int quirks;
2851
2852
cgd = (struct ccb_getdev *)arg;
2853
if (cgd == NULL) {
2854
printf("daregister: no getdev CCB, can't register device\n");
2855
return(CAM_REQ_CMP_ERR);
2856
}
2857
2858
softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2859
M_NOWAIT|M_ZERO);
2860
2861
if (softc == NULL) {
2862
printf(
2863
"daregister: Unable to probe new device. Unable to allocate softc\n");
2864
return(CAM_REQ_CMP_ERR);
2865
}
2866
2867
LIST_INIT(&softc->pending_ccbs);
2868
softc->state = DA_STATE_PROBE_WP;
2869
bioq_init(&softc->delete_run_queue);
2870
if (SID_IS_REMOVABLE(&cgd->inq_data))
2871
softc->flags |= DA_FLAG_PACK_REMOVABLE;
2872
softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2873
softc->unmap_max_lba = UNMAP_RANGE_MAX;
2874
softc->unmap_gran = 0;
2875
softc->unmap_gran_align = 0;
2876
softc->ws_max_blks = WS16_MAX_BLKS;
2877
softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2878
softc->flags |= DA_FLAG_ROTATING;
2879
2880
periph->softc = softc;
2881
2882
/*
2883
* See if this device has any quirks.
2884
*/
2885
match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2886
(caddr_t)da_quirk_table,
2887
nitems(da_quirk_table),
2888
sizeof(*da_quirk_table), scsi_inquiry_match);
2889
2890
if (match != NULL)
2891
softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2892
else
2893
softc->quirks = DA_Q_NONE;
2894
2895
/* Check if the SIM does not want 6 byte commands */
2896
xpt_path_inq(&cpi, periph->path);
2897
if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2898
softc->quirks |= DA_Q_NO_6_BYTE;
2899
2900
/* Override quirks if tunable is set */
2901
snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks",
2902
periph->unit_number);
2903
quirks = softc->quirks;
2904
TUNABLE_INT_FETCH(tmpstr, &quirks);
2905
softc->quirks = quirks;
2906
2907
if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2908
softc->zone_mode = DA_ZONE_HOST_MANAGED;
2909
else if (softc->quirks & DA_Q_SMR_DM)
2910
softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2911
else
2912
softc->zone_mode = DA_ZONE_NONE;
2913
2914
if (softc->zone_mode != DA_ZONE_NONE) {
2915
if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2916
if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2917
softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2918
else
2919
softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2920
} else
2921
softc->zone_interface = DA_ZONE_IF_SCSI;
2922
}
2923
2924
TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2925
2926
/*
2927
* Let XPT know we can use UMA-allocated CCBs.
2928
*/
2929
if (da_enable_uma_ccbs) {
2930
KASSERT(da_ccb_zone != NULL,
2931
("%s: NULL da_ccb_zone", __func__));
2932
periph->ccb_zone = da_ccb_zone;
2933
}
2934
2935
/*
2936
* Take a reference on the periph while dastart is called to finish the
2937
* probe. The reference will be dropped in dadone at the end of probe.
2938
*/
2939
(void)da_periph_acquire(periph, DA_REF_REPROBE);
2940
2941
/*
2942
* Schedule a periodic event to occasionally send an
2943
* ordered tag to a device.
2944
*/
2945
callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2946
callout_reset_sbt(&softc->sendordered_c,
2947
SBT_1S / DA_ORDEREDTAG_INTERVAL * da_default_timeout, 0,
2948
dasendorderedtag, periph, C_PREL(1));
2949
2950
cam_periph_unlock(periph);
2951
/*
2952
* RBC devices don't have to support READ(6), only READ(10).
2953
*/
2954
if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2955
softc->minimum_cmd_size = 10;
2956
else
2957
softc->minimum_cmd_size = 6;
2958
2959
/*
2960
* Load the user's default, if any.
2961
*/
2962
snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2963
periph->unit_number);
2964
TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2965
2966
/*
2967
* 6, 10, 12 and 16 are the currently permissible values.
2968
*/
2969
if (softc->minimum_cmd_size > 12)
2970
softc->minimum_cmd_size = 16;
2971
else if (softc->minimum_cmd_size > 10)
2972
softc->minimum_cmd_size = 12;
2973
else if (softc->minimum_cmd_size > 6)
2974
softc->minimum_cmd_size = 10;
2975
else
2976
softc->minimum_cmd_size = 6;
2977
2978
/* On first PROBE_WP request all more pages, then adjust. */
2979
softc->mode_page = SMS_ALL_PAGES_PAGE;
2980
2981
/* Predict whether device may support READ CAPACITY(16). */
2982
if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2983
(softc->quirks & DA_Q_NO_RC16) == 0) {
2984
softc->flags |= DA_FLAG_CAN_RC16;
2985
}
2986
2987
/*
2988
* Register this media as a disk.
2989
*/
2990
softc->disk = disk_alloc();
2991
softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2992
periph->unit_number, 0,
2993
DEVSTAT_BS_UNAVAILABLE,
2994
SID_TYPE(&cgd->inq_data) |
2995
XPORT_DEVSTAT_TYPE(cpi.transport),
2996
DEVSTAT_PRIORITY_DISK);
2997
softc->disk->d_open = daopen;
2998
softc->disk->d_close = daclose;
2999
softc->disk->d_strategy = dastrategy;
3000
if (cam_sim_pollable(periph->sim))
3001
softc->disk->d_dump = dadump;
3002
softc->disk->d_getattr = dagetattr;
3003
softc->disk->d_gone = dadiskgonecb;
3004
softc->disk->d_name = "da";
3005
softc->disk->d_drv1 = periph;
3006
if (cpi.maxio == 0)
3007
softc->maxio = DFLTPHYS; /* traditional default */
3008
else if (cpi.maxio > maxphys)
3009
softc->maxio = maxphys; /* for safety */
3010
else
3011
softc->maxio = cpi.maxio;
3012
if (softc->quirks & DA_Q_128KB)
3013
softc->maxio = min(softc->maxio, 128 * 1024);
3014
softc->disk->d_maxsize = softc->maxio;
3015
softc->disk->d_unit = periph->unit_number;
3016
softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
3017
if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
3018
softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
3019
if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
3020
softc->flags |= DA_FLAG_UNMAPPEDIO;
3021
softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
3022
}
3023
cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
3024
sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
3025
strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
3026
cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
3027
cgd->inq_data.product, sizeof(cgd->inq_data.product),
3028
sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
3029
softc->disk->d_hba_vendor = cpi.hba_vendor;
3030
softc->disk->d_hba_device = cpi.hba_device;
3031
softc->disk->d_hba_subvendor = cpi.hba_subvendor;
3032
softc->disk->d_hba_subdevice = cpi.hba_subdevice;
3033
snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment),
3034
"%s%d", cpi.dev_name, cpi.unit_number);
3035
3036
if (cam_iosched_init(&softc->cam_iosched, periph, softc->disk,
3037
daschedule) != 0) {
3038
printf(
3039
"daregister: Unable to probe new device. Unable to allocate iosched memory\n");
3040
free(softc, M_DEVBUF);
3041
return(CAM_REQ_CMP_ERR);
3042
}
3043
3044
/*
3045
* Add async callbacks for events of interest.
3046
* I don't bother checking if this fails as,
3047
* in most cases, the system will function just
3048
* fine without them and the only alternative
3049
* would be to not attach the device on failure.
3050
*/
3051
cam_periph_lock(periph);
3052
xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
3053
AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
3054
AC_INQ_CHANGED, daasync, periph, periph->path);
3055
3056
/*
3057
* Schedule a periodic media polling events.
3058
*/
3059
callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
3060
if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
3061
(cgd->inq_flags & SID_AEN) == 0 &&
3062
da_poll_period != 0) {
3063
callout_reset_sbt(&softc->mediapoll_c, da_poll_period * SBT_1S,
3064
0, damediapoll, periph, C_PREL(1));
3065
}
3066
3067
/* Released after probe when disk_create() call pass it to GEOM. */
3068
cam_periph_hold_boot(periph);
3069
3070
xpt_schedule(periph, CAM_PRIORITY_DEV);
3071
return(CAM_REQ_CMP);
3072
}
3073
3074
static int
3075
da_zone_bio_to_scsi(int disk_zone_cmd)
3076
{
3077
switch (disk_zone_cmd) {
3078
case DISK_ZONE_OPEN:
3079
return ZBC_OUT_SA_OPEN;
3080
case DISK_ZONE_CLOSE:
3081
return ZBC_OUT_SA_CLOSE;
3082
case DISK_ZONE_FINISH:
3083
return ZBC_OUT_SA_FINISH;
3084
case DISK_ZONE_RWP:
3085
return ZBC_OUT_SA_RWP;
3086
}
3087
3088
return -1;
3089
}
3090
3091
static int
3092
da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
3093
int *queue_ccb)
3094
{
3095
struct da_softc *softc;
3096
int error;
3097
3098
error = 0;
3099
3100
if (bp->bio_cmd != BIO_ZONE) {
3101
error = EINVAL;
3102
goto bailout;
3103
}
3104
3105
softc = periph->softc;
3106
3107
switch (bp->bio_zone.zone_cmd) {
3108
case DISK_ZONE_OPEN:
3109
case DISK_ZONE_CLOSE:
3110
case DISK_ZONE_FINISH:
3111
case DISK_ZONE_RWP: {
3112
int zone_flags;
3113
int zone_sa;
3114
uint64_t lba;
3115
3116
zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
3117
if (zone_sa == -1) {
3118
xpt_print(periph->path,
3119
"Cannot translate zone cmd %#x to SCSI\n",
3120
bp->bio_zone.zone_cmd);
3121
error = EINVAL;
3122
goto bailout;
3123
}
3124
3125
zone_flags = 0;
3126
lba = bp->bio_zone.zone_params.rwp.id;
3127
3128
if (bp->bio_zone.zone_params.rwp.flags &
3129
DISK_ZONE_RWP_FLAG_ALL)
3130
zone_flags |= ZBC_OUT_ALL;
3131
3132
if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3133
scsi_zbc_out(&ccb->csio,
3134
/*retries*/ da_retry_count,
3135
/*cbfcnp*/ dadone,
3136
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3137
/*service_action*/ zone_sa,
3138
/*zone_id*/ lba,
3139
/*zone_flags*/ zone_flags,
3140
/*data_ptr*/ NULL,
3141
/*dxfer_len*/ 0,
3142
/*sense_len*/ SSD_FULL_SIZE,
3143
/*timeout*/ da_default_timeout * 1000);
3144
} else {
3145
/*
3146
* Note that in this case, even though we can
3147
* technically use NCQ, we don't bother for several
3148
* reasons:
3149
* 1. It hasn't been tested on a SAT layer that
3150
* supports it. This is new as of SAT-4.
3151
* 2. Even when there is a SAT layer that supports
3152
* it, that SAT layer will also probably support
3153
* ZBC -> ZAC translation, since they are both
3154
* in the SAT-4 spec.
3155
* 3. Translation will likely be preferable to ATA
3156
* passthrough. LSI / Avago at least single
3157
* steps ATA passthrough commands in the HBA,
3158
* regardless of protocol, so unless that
3159
* changes, there is a performance penalty for
3160
* doing ATA passthrough no matter whether
3161
* you're using NCQ/FPDMA, DMA or PIO.
3162
* 4. It requires a 32-byte CDB, which at least at
3163
* this point in CAM requires a CDB pointer, which
3164
* would require us to allocate an additional bit
3165
* of storage separate from the CCB.
3166
*/
3167
error = scsi_ata_zac_mgmt_out(&ccb->csio,
3168
/*retries*/ da_retry_count,
3169
/*cbfcnp*/ dadone,
3170
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3171
/*use_ncq*/ 0,
3172
/*zm_action*/ zone_sa,
3173
/*zone_id*/ lba,
3174
/*zone_flags*/ zone_flags,
3175
/*data_ptr*/ NULL,
3176
/*dxfer_len*/ 0,
3177
/*cdb_storage*/ NULL,
3178
/*cdb_storage_len*/ 0,
3179
/*sense_len*/ SSD_FULL_SIZE,
3180
/*timeout*/ da_default_timeout * 1000);
3181
if (error != 0) {
3182
error = EINVAL;
3183
xpt_print(periph->path,
3184
"scsi_ata_zac_mgmt_out() returned an error!");
3185
goto bailout;
3186
}
3187
}
3188
*queue_ccb = 1;
3189
3190
break;
3191
}
3192
case DISK_ZONE_REPORT_ZONES: {
3193
uint8_t *rz_ptr;
3194
uint32_t num_entries, alloc_size;
3195
struct disk_zone_report *rep;
3196
3197
rep = &bp->bio_zone.zone_params.report;
3198
3199
num_entries = rep->entries_allocated;
3200
if (num_entries == 0) {
3201
xpt_print(periph->path,
3202
"No entries allocated for Report Zones request\n");
3203
error = EINVAL;
3204
goto bailout;
3205
}
3206
alloc_size = sizeof(struct scsi_report_zones_hdr) +
3207
(sizeof(struct scsi_report_zones_desc) * num_entries);
3208
alloc_size = min(alloc_size, softc->disk->d_maxsize);
3209
rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
3210
if (rz_ptr == NULL) {
3211
xpt_print(periph->path,
3212
"Unable to allocate memory for Report Zones request\n");
3213
error = ENOMEM;
3214
goto bailout;
3215
}
3216
3217
if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3218
scsi_zbc_in(&ccb->csio,
3219
/*retries*/ da_retry_count,
3220
/*cbcfnp*/ dadone,
3221
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3222
/*service_action*/ ZBC_IN_SA_REPORT_ZONES,
3223
/*zone_start_lba*/ rep->starting_id,
3224
/*zone_options*/ rep->rep_options,
3225
/*data_ptr*/ rz_ptr,
3226
/*dxfer_len*/ alloc_size,
3227
/*sense_len*/ SSD_FULL_SIZE,
3228
/*timeout*/ da_default_timeout * 1000);
3229
} else {
3230
/*
3231
* Note that in this case, even though we can
3232
* technically use NCQ, we don't bother for several
3233
* reasons:
3234
* 1. It hasn't been tested on a SAT layer that
3235
* supports it. This is new as of SAT-4.
3236
* 2. Even when there is a SAT layer that supports
3237
* it, that SAT layer will also probably support
3238
* ZBC -> ZAC translation, since they are both
3239
* in the SAT-4 spec.
3240
* 3. Translation will likely be preferable to ATA
3241
* passthrough. LSI / Avago at least single
3242
* steps ATA passthrough commands in the HBA,
3243
* regardless of protocol, so unless that
3244
* changes, there is a performance penalty for
3245
* doing ATA passthrough no matter whether
3246
* you're using NCQ/FPDMA, DMA or PIO.
3247
* 4. It requires a 32-byte CDB, which at least at
3248
* this point in CAM requires a CDB pointer, which
3249
* would require us to allocate an additional bit
3250
* of storage separate from the CCB.
3251
*/
3252
error = scsi_ata_zac_mgmt_in(&ccb->csio,
3253
/*retries*/ da_retry_count,
3254
/*cbcfnp*/ dadone,
3255
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3256
/*use_ncq*/ 0,
3257
/*zm_action*/ ATA_ZM_REPORT_ZONES,
3258
/*zone_id*/ rep->starting_id,
3259
/*zone_flags*/ rep->rep_options,
3260
/*data_ptr*/ rz_ptr,
3261
/*dxfer_len*/ alloc_size,
3262
/*cdb_storage*/ NULL,
3263
/*cdb_storage_len*/ 0,
3264
/*sense_len*/ SSD_FULL_SIZE,
3265
/*timeout*/ da_default_timeout * 1000);
3266
if (error != 0) {
3267
error = EINVAL;
3268
xpt_print(periph->path,
3269
"scsi_ata_zac_mgmt_in() returned an error!");
3270
goto bailout;
3271
}
3272
}
3273
3274
/*
3275
* For BIO_ZONE, this isn't normally needed. However, it
3276
* is used by devstat_end_transaction_bio() to determine
3277
* how much data was transferred.
3278
*/
3279
/*
3280
* XXX KDM we have a problem. But I'm not sure how to fix
3281
* it. devstat uses bio_bcount - bio_resid to calculate
3282
* the amount of data transferred. The GEOM disk code
3283
* uses bio_length - bio_resid to calculate the amount of
3284
* data in bio_completed. We have different structure
3285
* sizes above and below the ada(4) driver. So, if we
3286
* use the sizes above, the amount transferred won't be
3287
* quite accurate for devstat. If we use different sizes
3288
* for bio_bcount and bio_length (above and below
3289
* respectively), then the residual needs to match one or
3290
* the other. Everything is calculated after the bio
3291
* leaves the driver, so changing the values around isn't
3292
* really an option. For now, just set the count to the
3293
* passed in length. This means that the calculations
3294
* above (e.g. bio_completed) will be correct, but the
3295
* amount of data reported to devstat will be slightly
3296
* under or overstated.
3297
*/
3298
bp->bio_bcount = bp->bio_length;
3299
3300
*queue_ccb = 1;
3301
3302
break;
3303
}
3304
case DISK_ZONE_GET_PARAMS: {
3305
struct disk_zone_disk_params *params;
3306
3307
params = &bp->bio_zone.zone_params.disk_params;
3308
bzero(params, sizeof(*params));
3309
3310
switch (softc->zone_mode) {
3311
case DA_ZONE_DRIVE_MANAGED:
3312
params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
3313
break;
3314
case DA_ZONE_HOST_AWARE:
3315
params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
3316
break;
3317
case DA_ZONE_HOST_MANAGED:
3318
params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
3319
break;
3320
default:
3321
case DA_ZONE_NONE:
3322
params->zone_mode = DISK_ZONE_MODE_NONE;
3323
break;
3324
}
3325
3326
if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
3327
params->flags |= DISK_ZONE_DISK_URSWRZ;
3328
3329
if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
3330
params->optimal_seq_zones = softc->optimal_seq_zones;
3331
params->flags |= DISK_ZONE_OPT_SEQ_SET;
3332
}
3333
3334
if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
3335
params->optimal_nonseq_zones =
3336
softc->optimal_nonseq_zones;
3337
params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
3338
}
3339
3340
if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
3341
params->max_seq_zones = softc->max_seq_zones;
3342
params->flags |= DISK_ZONE_MAX_SEQ_SET;
3343
}
3344
if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
3345
params->flags |= DISK_ZONE_RZ_SUP;
3346
3347
if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
3348
params->flags |= DISK_ZONE_OPEN_SUP;
3349
3350
if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
3351
params->flags |= DISK_ZONE_CLOSE_SUP;
3352
3353
if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
3354
params->flags |= DISK_ZONE_FINISH_SUP;
3355
3356
if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
3357
params->flags |= DISK_ZONE_RWP_SUP;
3358
break;
3359
}
3360
default:
3361
break;
3362
}
3363
bailout:
3364
return (error);
3365
}
3366
3367
static void
3368
dastart(struct cam_periph *periph, union ccb *start_ccb)
3369
{
3370
struct da_softc *softc;
3371
uint32_t priority = start_ccb->ccb_h.pinfo.priority;
3372
3373
cam_periph_assert(periph, MA_OWNED);
3374
softc = (struct da_softc *)periph->softc;
3375
3376
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
3377
3378
/*
3379
* When we're running the state machine, we should only accept DEV CCBs.
3380
* When we're doing normal I/O we should only accept NORMAL CCBs.
3381
*
3382
* While in the state machine, we carefully single step the queue, but
3383
* there's no protection for 'extra' calls to xpt_schedule() at the
3384
* wrong priority. Guard against that so that we filter any CCBs that
3385
* are offered at the wrong priority. This avoids generating requests
3386
* that are at normal priority. In addition, though we can't easily
3387
* enforce it, one must not transition to the NORMAL state via the
3388
* skipstate mechanism.
3389
` */
3390
if ((softc->state != DA_STATE_NORMAL && priority != CAM_PRIORITY_DEV) ||
3391
(softc->state == DA_STATE_NORMAL && priority != CAM_PRIORITY_NORMAL)) {
3392
xpt_print(periph->path, "Bad priority for state %d prio %d\n",
3393
softc->state, priority);
3394
xpt_release_ccb(start_ccb);
3395
return;
3396
}
3397
3398
skipstate:
3399
switch (softc->state) {
3400
case DA_STATE_NORMAL:
3401
{
3402
struct bio *bp;
3403
uint8_t tag_code;
3404
3405
more:
3406
bp = cam_iosched_next_bio(softc->cam_iosched);
3407
if (bp == NULL) {
3408
if (cam_iosched_has_work_flags(softc->cam_iosched,
3409
DA_WORK_TUR)) {
3410
softc->flags |= DA_FLAG_TUR_PENDING;
3411
cam_iosched_clr_work_flags(softc->cam_iosched,
3412
DA_WORK_TUR);
3413
scsi_test_unit_ready(&start_ccb->csio,
3414
/*retries*/ da_retry_count,
3415
dadone_tur,
3416
MSG_SIMPLE_Q_TAG,
3417
SSD_FULL_SIZE,
3418
da_default_timeout * 1000);
3419
start_ccb->ccb_h.ccb_bp = NULL;
3420
start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3421
xpt_action(start_ccb);
3422
} else
3423
xpt_release_ccb(start_ccb);
3424
break;
3425
}
3426
3427
if (bp->bio_cmd == BIO_DELETE) {
3428
if (softc->delete_func != NULL) {
3429
softc->delete_func(periph, start_ccb, bp);
3430
goto out;
3431
} else {
3432
/*
3433
* Not sure this is possible, but failsafe by
3434
* lying and saying "sure, done."
3435
*/
3436
biofinish(bp, NULL, 0);
3437
goto more;
3438
}
3439
}
3440
3441
if (cam_iosched_has_work_flags(softc->cam_iosched,
3442
DA_WORK_TUR)) {
3443
cam_iosched_clr_work_flags(softc->cam_iosched,
3444
DA_WORK_TUR);
3445
da_periph_release_locked(periph, DA_REF_TUR);
3446
}
3447
3448
if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3449
(softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3450
softc->flags &= ~DA_FLAG_NEED_OTAG;
3451
softc->flags |= DA_FLAG_WAS_OTAG;
3452
tag_code = MSG_ORDERED_Q_TAG;
3453
} else {
3454
tag_code = MSG_SIMPLE_Q_TAG;
3455
}
3456
3457
switch (bp->bio_cmd) {
3458
case BIO_WRITE:
3459
case BIO_READ:
3460
{
3461
void *data_ptr;
3462
int rw_op;
3463
3464
biotrack(bp, __func__);
3465
3466
if (bp->bio_cmd == BIO_WRITE) {
3467
softc->flags |= DA_FLAG_DIRTY;
3468
rw_op = SCSI_RW_WRITE;
3469
} else {
3470
rw_op = SCSI_RW_READ;
3471
}
3472
3473
data_ptr = bp->bio_data;
3474
if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3475
rw_op |= SCSI_RW_BIO;
3476
data_ptr = bp;
3477
}
3478
3479
scsi_read_write(&start_ccb->csio,
3480
/*retries*/da_retry_count,
3481
/*cbfcnp*/dadone,
3482
/*tag_action*/tag_code,
3483
rw_op,
3484
/*byte2*/0,
3485
softc->minimum_cmd_size,
3486
/*lba*/bp->bio_pblkno,
3487
/*block_count*/bp->bio_bcount /
3488
softc->params.secsize,
3489
data_ptr,
3490
/*dxfer_len*/ bp->bio_bcount,
3491
/*sense_len*/SSD_FULL_SIZE,
3492
da_default_timeout * 1000);
3493
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3494
start_ccb->csio.bio = bp;
3495
#endif
3496
break;
3497
}
3498
case BIO_FLUSH:
3499
/*
3500
* If we don't support sync cache, or the disk
3501
* isn't dirty, FLUSH is a no-op. Use the
3502
* allocated CCB for the next bio if one is
3503
* available.
3504
*/
3505
if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3506
(softc->flags & DA_FLAG_DIRTY) == 0) {
3507
biodone(bp);
3508
goto skipstate;
3509
}
3510
3511
/*
3512
* BIO_FLUSH doesn't currently communicate
3513
* range data, so we synchronize the cache
3514
* over the whole disk.
3515
*/
3516
scsi_synchronize_cache(&start_ccb->csio,
3517
/*retries*/1,
3518
/*cbfcnp*/dadone,
3519
/*tag_action*/tag_code,
3520
/*begin_lba*/0,
3521
/*lb_count*/0,
3522
SSD_FULL_SIZE,
3523
da_default_timeout*1000);
3524
/*
3525
* Clear the dirty flag before sending the command.
3526
* Either this sync cache will be successful, or it
3527
* will fail after a retry. If it fails, it is
3528
* unlikely to be successful if retried later, so
3529
* we'll save ourselves time by just marking the
3530
* device clean.
3531
*/
3532
softc->flags &= ~DA_FLAG_DIRTY;
3533
break;
3534
case BIO_ZONE: {
3535
int error, queue_ccb;
3536
3537
queue_ccb = 0;
3538
3539
error = da_zone_cmd(periph, start_ccb, bp, &queue_ccb);
3540
if ((error != 0)
3541
|| (queue_ccb == 0)) {
3542
/*
3543
* g_io_deliver will recurisvely call start
3544
* routine for ENOMEM, so drop the periph
3545
* lock to allow that recursion.
3546
*/
3547
if (error == ENOMEM)
3548
cam_periph_unlock(periph);
3549
biofinish(bp, NULL, error);
3550
if (error == ENOMEM)
3551
cam_periph_lock(periph);
3552
xpt_release_ccb(start_ccb);
3553
return;
3554
}
3555
break;
3556
}
3557
default:
3558
biofinish(bp, NULL, EOPNOTSUPP);
3559
xpt_release_ccb(start_ccb);
3560
return;
3561
}
3562
start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3563
start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3564
start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3565
3566
out:
3567
LIST_INSERT_HEAD(&softc->pending_ccbs,
3568
&start_ccb->ccb_h, periph_links.le);
3569
3570
/* We expect a unit attention from this device */
3571
if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3572
start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3573
softc->flags &= ~DA_FLAG_RETRY_UA;
3574
}
3575
3576
start_ccb->ccb_h.ccb_bp = bp;
3577
softc->refcount++;
3578
cam_periph_unlock(periph);
3579
xpt_action(start_ccb);
3580
cam_periph_lock(periph);
3581
3582
/* May have more work to do, so ensure we stay scheduled */
3583
daschedule(periph);
3584
break;
3585
}
3586
case DA_STATE_PROBE_WP:
3587
{
3588
void *mode_buf;
3589
int mode_buf_len;
3590
3591
if (da_disable_wp_detection || softc->mode_page < 0) {
3592
if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3593
softc->state = DA_STATE_PROBE_RC16;
3594
else
3595
softc->state = DA_STATE_PROBE_RC;
3596
goto skipstate;
3597
}
3598
mode_buf_len = 192;
3599
mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3600
if (mode_buf == NULL) {
3601
xpt_print(periph->path,
3602
"Unable to send mode sense - malloc failure\n");
3603
if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3604
softc->state = DA_STATE_PROBE_RC16;
3605
else
3606
softc->state = DA_STATE_PROBE_RC;
3607
goto skipstate;
3608
}
3609
scsi_mode_sense_len(&start_ccb->csio,
3610
/*retries*/ da_retry_count,
3611
/*cbfcnp*/ dadone_probewp,
3612
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3613
/*dbd*/ FALSE,
3614
/*pc*/ SMS_PAGE_CTRL_CURRENT,
3615
/*page*/ softc->mode_page,
3616
/*param_buf*/ mode_buf,
3617
/*param_len*/ mode_buf_len,
3618
/*minimum_cmd_size*/ softc->minimum_cmd_size,
3619
/*sense_len*/ SSD_FULL_SIZE,
3620
/*timeout*/ da_default_timeout * 1000);
3621
start_ccb->ccb_h.ccb_bp = NULL;
3622
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3623
xpt_action(start_ccb);
3624
break;
3625
}
3626
case DA_STATE_PROBE_RC:
3627
{
3628
struct scsi_read_capacity_data *rcap;
3629
3630
rcap = (struct scsi_read_capacity_data *)
3631
malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3632
if (rcap == NULL) {
3633
printf("dastart: Couldn't malloc read_capacity data\n");
3634
/* da_free_periph??? */
3635
break;
3636
}
3637
scsi_read_capacity(&start_ccb->csio,
3638
/*retries*/da_retry_count,
3639
dadone_proberc,
3640
MSG_SIMPLE_Q_TAG,
3641
rcap,
3642
SSD_FULL_SIZE,
3643
/*timeout*/5000);
3644
start_ccb->ccb_h.ccb_bp = NULL;
3645
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3646
xpt_action(start_ccb);
3647
break;
3648
}
3649
case DA_STATE_PROBE_RC16:
3650
{
3651
struct scsi_read_capacity_data_long *rcaplong;
3652
3653
rcaplong = (struct scsi_read_capacity_data_long *)
3654
malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3655
if (rcaplong == NULL) {
3656
printf("dastart: Couldn't malloc read_capacity data\n");
3657
/* da_free_periph??? */
3658
break;
3659
}
3660
scsi_read_capacity_16(&start_ccb->csio,
3661
/*retries*/ da_retry_count,
3662
/*cbfcnp*/ dadone_proberc,
3663
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3664
/*lba*/ 0,
3665
/*reladr*/ 0,
3666
/*pmi*/ 0,
3667
/*rcap_buf*/ (uint8_t *)rcaplong,
3668
/*rcap_buf_len*/ sizeof(*rcaplong),
3669
/*sense_len*/ SSD_FULL_SIZE,
3670
/*timeout*/ da_default_timeout * 1000);
3671
start_ccb->ccb_h.ccb_bp = NULL;
3672
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3673
xpt_action(start_ccb);
3674
break;
3675
}
3676
case DA_STATE_PROBE_LBP:
3677
{
3678
struct scsi_vpd_logical_block_prov *lbp;
3679
3680
if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3681
/*
3682
* If we get here we don't support any SBC-3 delete
3683
* methods with UNMAP as the Logical Block Provisioning
3684
* VPD page support is required for devices which
3685
* support it according to T10/1799-D Revision 31
3686
* however older revisions of the spec don't mandate
3687
* this so we currently don't remove these methods
3688
* from the available set.
3689
*/
3690
softc->state = DA_STATE_PROBE_BLK_LIMITS;
3691
goto skipstate;
3692
}
3693
3694
lbp = (struct scsi_vpd_logical_block_prov *)
3695
malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3696
3697
if (lbp == NULL) {
3698
printf("dastart: Couldn't malloc lbp data\n");
3699
/* da_free_periph??? */
3700
break;
3701
}
3702
3703
scsi_inquiry(&start_ccb->csio,
3704
/*retries*/da_retry_count,
3705
/*cbfcnp*/dadone_probelbp,
3706
/*tag_action*/MSG_SIMPLE_Q_TAG,
3707
/*inq_buf*/(uint8_t *)lbp,
3708
/*inq_len*/sizeof(*lbp),
3709
/*evpd*/TRUE,
3710
/*page_code*/SVPD_LBP,
3711
/*sense_len*/SSD_MIN_SIZE,
3712
/*timeout*/da_default_timeout * 1000);
3713
start_ccb->ccb_h.ccb_bp = NULL;
3714
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3715
xpt_action(start_ccb);
3716
break;
3717
}
3718
case DA_STATE_PROBE_BLK_LIMITS:
3719
{
3720
struct scsi_vpd_block_limits *block_limits;
3721
3722
if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3723
/* Not supported skip to next probe */
3724
softc->state = DA_STATE_PROBE_BDC;
3725
goto skipstate;
3726
}
3727
3728
block_limits = (struct scsi_vpd_block_limits *)
3729
malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3730
3731
if (block_limits == NULL) {
3732
printf("dastart: Couldn't malloc block_limits data\n");
3733
/* da_free_periph??? */
3734
break;
3735
}
3736
3737
scsi_inquiry(&start_ccb->csio,
3738
/*retries*/da_retry_count,
3739
/*cbfcnp*/dadone_probeblklimits,
3740
/*tag_action*/MSG_SIMPLE_Q_TAG,
3741
/*inq_buf*/(uint8_t *)block_limits,
3742
/*inq_len*/sizeof(*block_limits),
3743
/*evpd*/TRUE,
3744
/*page_code*/SVPD_BLOCK_LIMITS,
3745
/*sense_len*/SSD_MIN_SIZE,
3746
/*timeout*/da_default_timeout * 1000);
3747
start_ccb->ccb_h.ccb_bp = NULL;
3748
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3749
xpt_action(start_ccb);
3750
break;
3751
}
3752
case DA_STATE_PROBE_BDC:
3753
{
3754
struct scsi_vpd_block_device_characteristics *bdc;
3755
3756
if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3757
softc->state = DA_STATE_PROBE_ATA;
3758
goto skipstate;
3759
}
3760
3761
bdc = (struct scsi_vpd_block_device_characteristics *)
3762
malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3763
3764
if (bdc == NULL) {
3765
printf("dastart: Couldn't malloc bdc data\n");
3766
/* da_free_periph??? */
3767
break;
3768
}
3769
3770
scsi_inquiry(&start_ccb->csio,
3771
/*retries*/da_retry_count,
3772
/*cbfcnp*/dadone_probebdc,
3773
/*tag_action*/MSG_SIMPLE_Q_TAG,
3774
/*inq_buf*/(uint8_t *)bdc,
3775
/*inq_len*/sizeof(*bdc),
3776
/*evpd*/TRUE,
3777
/*page_code*/SVPD_BDC,
3778
/*sense_len*/SSD_MIN_SIZE,
3779
/*timeout*/da_default_timeout * 1000);
3780
start_ccb->ccb_h.ccb_bp = NULL;
3781
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3782
xpt_action(start_ccb);
3783
break;
3784
}
3785
case DA_STATE_PROBE_CACHE:
3786
{
3787
void *mode_buf;
3788
int mode_buf_len;
3789
3790
/* XXX Future: skip if already not doing SYNC CACHE */
3791
3792
/*
3793
* Probe the CACHE mode page to see if we need to do a
3794
* SYNCHRONIZE CACHE command or not. If there's no
3795
* caching page, or we get back garbage when we ask
3796
* for the caching page or MODE SENSE isn't supported,
3797
* we set DA_Q_NO_SYNC_CACHE.
3798
*/
3799
mode_buf_len = sizeof(struct scsi_mode_header_6) +
3800
sizeof(struct scsi_mode_blk_desc) +
3801
sizeof(struct scsi_caching_page);
3802
mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3803
if (mode_buf == NULL) {
3804
printf("dastart: Couldn't malloc mode_buf data\n");
3805
/* da_free_periph??? */
3806
break;
3807
}
3808
scsi_mode_sense(&start_ccb->csio,
3809
/*retries*/4,
3810
dadone_probecache,
3811
MSG_SIMPLE_Q_TAG,
3812
/*dbd*/FALSE,
3813
SMS_PAGE_CTRL_CURRENT,
3814
SMS_CACHE_PAGE,
3815
mode_buf,
3816
mode_buf_len,
3817
SSD_FULL_SIZE,
3818
/*timeout*/60000);
3819
start_ccb->ccb_h.ccb_bp = NULL;
3820
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_CACHE;
3821
xpt_action(start_ccb);
3822
break;
3823
}
3824
case DA_STATE_PROBE_ATA:
3825
{
3826
struct ata_params *ata_params;
3827
3828
if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3829
if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3830
|| (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3831
/*
3832
* Note that if the ATA VPD page isn't
3833
* supported, we aren't talking to an ATA
3834
* device anyway. Support for that VPD
3835
* page is mandatory for SCSI to ATA (SAT)
3836
* translation layers.
3837
*/
3838
softc->state = DA_STATE_PROBE_ZONE;
3839
goto skipstate;
3840
}
3841
daprobedone(periph, start_ccb);
3842
break;
3843
}
3844
3845
ata_params = &periph->path->device->ident_data;
3846
3847
scsi_ata_identify(&start_ccb->csio,
3848
/*retries*/da_retry_count,
3849
/*cbfcnp*/dadone_probeata,
3850
/*tag_action*/MSG_SIMPLE_Q_TAG,
3851
/*data_ptr*/(uint8_t *)ata_params,
3852
/*dxfer_len*/sizeof(*ata_params),
3853
/*sense_len*/SSD_FULL_SIZE,
3854
/*timeout*/da_default_timeout * 1000);
3855
start_ccb->ccb_h.ccb_bp = NULL;
3856
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3857
xpt_action(start_ccb);
3858
break;
3859
}
3860
case DA_STATE_PROBE_ATA_LOGDIR:
3861
{
3862
struct ata_gp_log_dir *log_dir;
3863
int retval;
3864
3865
retval = 0;
3866
3867
if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3868
/*
3869
* If we don't have log support, not much point in
3870
* trying to probe zone support.
3871
*/
3872
daprobedone(periph, start_ccb);
3873
break;
3874
}
3875
3876
/*
3877
* If we have an ATA device (the SCSI ATA Information VPD
3878
* page should be present and the ATA identify should have
3879
* succeeded) and it supports logs, ask for the log directory.
3880
*/
3881
3882
log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3883
if (log_dir == NULL) {
3884
xpt_print(periph->path, "Couldn't malloc log_dir data\n");
3885
daprobedone(periph, start_ccb);
3886
break;
3887
}
3888
3889
retval = scsi_ata_read_log(&start_ccb->csio,
3890
/*retries*/ da_retry_count,
3891
/*cbfcnp*/ dadone_probeatalogdir,
3892
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3893
/*log_address*/ ATA_LOG_DIRECTORY,
3894
/*page_number*/ 0,
3895
/*block_count*/ 1,
3896
/*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3897
AP_PROTO_DMA : AP_PROTO_PIO_IN,
3898
/*data_ptr*/ (uint8_t *)log_dir,
3899
/*dxfer_len*/ sizeof(*log_dir),
3900
/*sense_len*/ SSD_FULL_SIZE,
3901
/*timeout*/ da_default_timeout * 1000);
3902
3903
if (retval != 0) {
3904
xpt_print(periph->path, "scsi_ata_read_log() failed!");
3905
free(log_dir, M_SCSIDA);
3906
daprobedone(periph, start_ccb);
3907
break;
3908
}
3909
start_ccb->ccb_h.ccb_bp = NULL;
3910
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3911
xpt_action(start_ccb);
3912
break;
3913
}
3914
case DA_STATE_PROBE_ATA_IDDIR:
3915
{
3916
struct ata_identify_log_pages *id_dir;
3917
int retval;
3918
3919
retval = 0;
3920
3921
/*
3922
* Check here to see whether the Identify Device log is
3923
* supported in the directory of logs. If so, continue
3924
* with requesting the log of identify device pages.
3925
*/
3926
if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3927
daprobedone(periph, start_ccb);
3928
break;
3929
}
3930
3931
id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3932
if (id_dir == NULL) {
3933
xpt_print(periph->path, "Couldn't malloc id_dir data\n");
3934
daprobedone(periph, start_ccb);
3935
break;
3936
}
3937
3938
retval = scsi_ata_read_log(&start_ccb->csio,
3939
/*retries*/ da_retry_count,
3940
/*cbfcnp*/ dadone_probeataiddir,
3941
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3942
/*log_address*/ ATA_IDENTIFY_DATA_LOG,
3943
/*page_number*/ ATA_IDL_PAGE_LIST,
3944
/*block_count*/ 1,
3945
/*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3946
AP_PROTO_DMA : AP_PROTO_PIO_IN,
3947
/*data_ptr*/ (uint8_t *)id_dir,
3948
/*dxfer_len*/ sizeof(*id_dir),
3949
/*sense_len*/ SSD_FULL_SIZE,
3950
/*timeout*/ da_default_timeout * 1000);
3951
3952
if (retval != 0) {
3953
xpt_print(periph->path, "scsi_ata_read_log() failed!");
3954
free(id_dir, M_SCSIDA);
3955
daprobedone(periph, start_ccb);
3956
break;
3957
}
3958
start_ccb->ccb_h.ccb_bp = NULL;
3959
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3960
xpt_action(start_ccb);
3961
break;
3962
}
3963
case DA_STATE_PROBE_ATA_SUP:
3964
{
3965
struct ata_identify_log_sup_cap *sup_cap;
3966
int retval;
3967
3968
retval = 0;
3969
3970
/*
3971
* Check here to see whether the Supported Capabilities log
3972
* is in the list of Identify Device logs.
3973
*/
3974
if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3975
daprobedone(periph, start_ccb);
3976
break;
3977
}
3978
3979
sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3980
if (sup_cap == NULL) {
3981
xpt_print(periph->path, "Couldn't malloc sup_cap data\n");
3982
daprobedone(periph, start_ccb);
3983
break;
3984
}
3985
3986
retval = scsi_ata_read_log(&start_ccb->csio,
3987
/*retries*/ da_retry_count,
3988
/*cbfcnp*/ dadone_probeatasup,
3989
/*tag_action*/ MSG_SIMPLE_Q_TAG,
3990
/*log_address*/ ATA_IDENTIFY_DATA_LOG,
3991
/*page_number*/ ATA_IDL_SUP_CAP,
3992
/*block_count*/ 1,
3993
/*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3994
AP_PROTO_DMA : AP_PROTO_PIO_IN,
3995
/*data_ptr*/ (uint8_t *)sup_cap,
3996
/*dxfer_len*/ sizeof(*sup_cap),
3997
/*sense_len*/ SSD_FULL_SIZE,
3998
/*timeout*/ da_default_timeout * 1000);
3999
4000
if (retval != 0) {
4001
xpt_print(periph->path, "scsi_ata_read_log() failed!");
4002
free(sup_cap, M_SCSIDA);
4003
daprobedone(periph, start_ccb);
4004
break;
4005
}
4006
4007
start_ccb->ccb_h.ccb_bp = NULL;
4008
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
4009
xpt_action(start_ccb);
4010
break;
4011
}
4012
case DA_STATE_PROBE_ATA_ZONE:
4013
{
4014
struct ata_zoned_info_log *ata_zone;
4015
int retval;
4016
4017
retval = 0;
4018
4019
/*
4020
* Check here to see whether the zoned device information
4021
* page is supported. If so, continue on to request it.
4022
* If not, skip to DA_STATE_PROBE_LOG or done.
4023
*/
4024
if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
4025
daprobedone(periph, start_ccb);
4026
break;
4027
}
4028
ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
4029
M_NOWAIT|M_ZERO);
4030
if (ata_zone == NULL) {
4031
xpt_print(periph->path, "Couldn't malloc ata_zone data\n");
4032
daprobedone(periph, start_ccb);
4033
break;
4034
}
4035
4036
retval = scsi_ata_read_log(&start_ccb->csio,
4037
/*retries*/ da_retry_count,
4038
/*cbfcnp*/ dadone_probeatazone,
4039
/*tag_action*/ MSG_SIMPLE_Q_TAG,
4040
/*log_address*/ ATA_IDENTIFY_DATA_LOG,
4041
/*page_number*/ ATA_IDL_ZDI,
4042
/*block_count*/ 1,
4043
/*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
4044
AP_PROTO_DMA : AP_PROTO_PIO_IN,
4045
/*data_ptr*/ (uint8_t *)ata_zone,
4046
/*dxfer_len*/ sizeof(*ata_zone),
4047
/*sense_len*/ SSD_FULL_SIZE,
4048
/*timeout*/ da_default_timeout * 1000);
4049
4050
if (retval != 0) {
4051
xpt_print(periph->path, "scsi_ata_read_log() failed!");
4052
free(ata_zone, M_SCSIDA);
4053
daprobedone(periph, start_ccb);
4054
break;
4055
}
4056
start_ccb->ccb_h.ccb_bp = NULL;
4057
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
4058
xpt_action(start_ccb);
4059
4060
break;
4061
}
4062
case DA_STATE_PROBE_ZONE:
4063
{
4064
struct scsi_vpd_zoned_bdc *bdc;
4065
4066
/*
4067
* Note that this page will be supported for SCSI protocol
4068
* devices that support ZBC (SMR devices), as well as ATA
4069
* protocol devices that are behind a SAT (SCSI to ATA
4070
* Translation) layer that supports converting ZBC commands
4071
* to their ZAC equivalents.
4072
*/
4073
if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
4074
daprobedone(periph, start_ccb);
4075
break;
4076
}
4077
bdc = (struct scsi_vpd_zoned_bdc *)
4078
malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
4079
4080
if (bdc == NULL) {
4081
xpt_release_ccb(start_ccb);
4082
xpt_print(periph->path, "Couldn't malloc zone VPD data\n");
4083
break;
4084
}
4085
scsi_inquiry(&start_ccb->csio,
4086
/*retries*/da_retry_count,
4087
/*cbfcnp*/dadone_probezone,
4088
/*tag_action*/MSG_SIMPLE_Q_TAG,
4089
/*inq_buf*/(uint8_t *)bdc,
4090
/*inq_len*/sizeof(*bdc),
4091
/*evpd*/TRUE,
4092
/*page_code*/SVPD_ZONED_BDC,
4093
/*sense_len*/SSD_FULL_SIZE,
4094
/*timeout*/da_default_timeout * 1000);
4095
start_ccb->ccb_h.ccb_bp = NULL;
4096
start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
4097
xpt_action(start_ccb);
4098
break;
4099
}
4100
}
4101
}
4102
4103
/*
4104
* In each of the methods below, while its the caller's
4105
* responsibility to ensure the request will fit into a
4106
* single device request, we might have changed the delete
4107
* method due to the device incorrectly advertising either
4108
* its supported methods or limits.
4109
*
4110
* To prevent this causing further issues we validate the
4111
* against the methods limits, and warn which would
4112
* otherwise be unnecessary.
4113
*/
4114
static void
4115
da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4116
{
4117
struct da_softc *softc = (struct da_softc *)periph->softc;
4118
struct bio *bp1;
4119
uint8_t *buf = softc->unmap_buf;
4120
struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
4121
uint64_t lba, lastlba = (uint64_t)-1;
4122
uint64_t totalcount = 0;
4123
uint64_t count;
4124
uint32_t c, lastcount = 0, ranges = 0;
4125
4126
/*
4127
* Currently this doesn't take the UNMAP
4128
* Granularity and Granularity Alignment
4129
* fields into account.
4130
*
4131
* This could result in both unoptimal unmap
4132
* requests as as well as UNMAP calls unmapping
4133
* fewer LBA's than requested.
4134
*/
4135
4136
bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4137
bp1 = bp;
4138
do {
4139
/*
4140
* Note: ada and da are different in how they store the
4141
* pending bp's in a trim. ada stores all of them in the
4142
* trim_req.bps. da stores all but the first one in the
4143
* delete_run_queue. ada then completes all the bps in
4144
* its adadone() loop. da completes all the bps in the
4145
* delete_run_queue in dadone, and relies on the biodone
4146
* after to complete. This should be reconciled since there's
4147
* no real reason to do it differently. XXX
4148
*/
4149
if (bp1 != bp)
4150
bioq_insert_tail(&softc->delete_run_queue, bp1);
4151
lba = bp1->bio_pblkno;
4152
count = bp1->bio_bcount / softc->params.secsize;
4153
4154
/* Try to extend the previous range. */
4155
if (lba == lastlba) {
4156
c = omin(count, UNMAP_RANGE_MAX - lastcount);
4157
lastlba += c;
4158
lastcount += c;
4159
scsi_ulto4b(lastcount, d[ranges - 1].length);
4160
count -= c;
4161
lba += c;
4162
totalcount += c;
4163
} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
4164
softc->unmap_gran != 0) {
4165
/* Align length of the previous range. */
4166
if ((c = lastcount % softc->unmap_gran) != 0) {
4167
if (lastcount <= c) {
4168
totalcount -= lastcount;
4169
lastlba = (uint64_t)-1;
4170
lastcount = 0;
4171
ranges--;
4172
} else {
4173
totalcount -= c;
4174
lastlba -= c;
4175
lastcount -= c;
4176
scsi_ulto4b(lastcount,
4177
d[ranges - 1].length);
4178
}
4179
}
4180
/* Align beginning of the new range. */
4181
c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
4182
if (c != 0) {
4183
c = softc->unmap_gran - c;
4184
if (count <= c) {
4185
count = 0;
4186
} else {
4187
lba += c;
4188
count -= c;
4189
}
4190
}
4191
}
4192
4193
while (count > 0) {
4194
c = omin(count, UNMAP_RANGE_MAX);
4195
if (totalcount + c > softc->unmap_max_lba ||
4196
ranges >= softc->unmap_max_ranges) {
4197
xpt_print(periph->path,
4198
"%s issuing short delete %ld > %ld || %d >= %d",
4199
da_delete_method_desc[softc->delete_method],
4200
totalcount + c, softc->unmap_max_lba,
4201
ranges, softc->unmap_max_ranges);
4202
break;
4203
}
4204
scsi_u64to8b(lba, d[ranges].lba);
4205
scsi_ulto4b(c, d[ranges].length);
4206
lba += c;
4207
totalcount += c;
4208
ranges++;
4209
count -= c;
4210
lastlba = lba;
4211
lastcount = c;
4212
}
4213
bp1 = cam_iosched_next_trim(softc->cam_iosched);
4214
if (bp1 == NULL)
4215
break;
4216
if (ranges >= softc->unmap_max_ranges ||
4217
totalcount + bp1->bio_bcount /
4218
softc->params.secsize > softc->unmap_max_lba) {
4219
cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4220
break;
4221
}
4222
} while (1);
4223
4224
/* Align length of the last range. */
4225
if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
4226
(c = lastcount % softc->unmap_gran) != 0) {
4227
if (lastcount <= c)
4228
ranges--;
4229
else
4230
scsi_ulto4b(lastcount - c, d[ranges - 1].length);
4231
}
4232
4233
scsi_ulto2b(ranges * 16 + 6, &buf[0]);
4234
scsi_ulto2b(ranges * 16, &buf[2]);
4235
4236
scsi_unmap(&ccb->csio,
4237
/*retries*/da_retry_count,
4238
/*cbfcnp*/dadone,
4239
/*tag_action*/MSG_SIMPLE_Q_TAG,
4240
/*byte2*/0,
4241
/*data_ptr*/ buf,
4242
/*dxfer_len*/ ranges * 16 + 8,
4243
/*sense_len*/SSD_FULL_SIZE,
4244
da_default_timeout * 1000);
4245
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4246
ccb->ccb_h.flags |= CAM_UNLOCKED;
4247
softc->trim_count++;
4248
softc->trim_ranges += ranges;
4249
softc->trim_lbas += totalcount;
4250
cam_iosched_submit_trim(softc->cam_iosched);
4251
}
4252
4253
static void
4254
da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4255
{
4256
struct da_softc *softc = (struct da_softc *)periph->softc;
4257
struct bio *bp1;
4258
uint8_t *buf = softc->unmap_buf;
4259
uint64_t lastlba = (uint64_t)-1;
4260
uint64_t count;
4261
uint64_t lba;
4262
uint32_t lastcount = 0, c, requestcount;
4263
int ranges = 0, off, block_count;
4264
4265
bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4266
bp1 = bp;
4267
do {
4268
if (bp1 != bp)//XXX imp XXX
4269
bioq_insert_tail(&softc->delete_run_queue, bp1);
4270
lba = bp1->bio_pblkno;
4271
count = bp1->bio_bcount / softc->params.secsize;
4272
requestcount = count;
4273
4274
/* Try to extend the previous range. */
4275
if (lba == lastlba) {
4276
c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
4277
lastcount += c;
4278
off = (ranges - 1) * 8;
4279
buf[off + 6] = lastcount & 0xff;
4280
buf[off + 7] = (lastcount >> 8) & 0xff;
4281
count -= c;
4282
lba += c;
4283
}
4284
4285
while (count > 0) {
4286
c = omin(count, ATA_DSM_RANGE_MAX);
4287
off = ranges * 8;
4288
4289
buf[off + 0] = lba & 0xff;
4290
buf[off + 1] = (lba >> 8) & 0xff;
4291
buf[off + 2] = (lba >> 16) & 0xff;
4292
buf[off + 3] = (lba >> 24) & 0xff;
4293
buf[off + 4] = (lba >> 32) & 0xff;
4294
buf[off + 5] = (lba >> 40) & 0xff;
4295
buf[off + 6] = c & 0xff;
4296
buf[off + 7] = (c >> 8) & 0xff;
4297
lba += c;
4298
ranges++;
4299
count -= c;
4300
lastcount = c;
4301
if (count != 0 && ranges == softc->trim_max_ranges) {
4302
xpt_print(periph->path,
4303
"%s issuing short delete %ld > %ld\n",
4304
da_delete_method_desc[softc->delete_method],
4305
requestcount,
4306
(softc->trim_max_ranges - ranges) *
4307
ATA_DSM_RANGE_MAX);
4308
break;
4309
}
4310
}
4311
lastlba = lba;
4312
bp1 = cam_iosched_next_trim(softc->cam_iosched);
4313
if (bp1 == NULL)
4314
break;
4315
if (bp1->bio_bcount / softc->params.secsize >
4316
(softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
4317
cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4318
break;
4319
}
4320
} while (1);
4321
4322
block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
4323
scsi_ata_trim(&ccb->csio,
4324
/*retries*/da_retry_count,
4325
/*cbfcnp*/dadone,
4326
/*tag_action*/MSG_SIMPLE_Q_TAG,
4327
block_count,
4328
/*data_ptr*/buf,
4329
/*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
4330
/*sense_len*/SSD_FULL_SIZE,
4331
da_default_timeout * 1000);
4332
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4333
ccb->ccb_h.flags |= CAM_UNLOCKED;
4334
softc->trim_count++;
4335
softc->trim_ranges += ranges;
4336
softc->trim_lbas += block_count;
4337
cam_iosched_submit_trim(softc->cam_iosched);
4338
}
4339
4340
/*
4341
* We calculate ws_max_blks here based off d_delmaxsize instead
4342
* of using softc->ws_max_blks as it is absolute max for the
4343
* device not the protocol max which may well be lower.
4344
*/
4345
static void
4346
da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4347
{
4348
struct da_softc *softc;
4349
struct bio *bp1;
4350
uint64_t ws_max_blks;
4351
uint64_t lba;
4352
uint64_t count; /* forward compat with WS32 */
4353
4354
softc = (struct da_softc *)periph->softc;
4355
ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
4356
lba = bp->bio_pblkno;
4357
count = 0;
4358
bp1 = bp;
4359
do {
4360
if (bp1 != bp)//XXX imp XXX
4361
bioq_insert_tail(&softc->delete_run_queue, bp1);
4362
count += bp1->bio_bcount / softc->params.secsize;
4363
if (count > ws_max_blks) {
4364
xpt_print(periph->path,
4365
"%s issuing short delete %ld > %ld\n",
4366
da_delete_method_desc[softc->delete_method],
4367
count, ws_max_blks);
4368
count = omin(count, ws_max_blks);
4369
break;
4370
}
4371
bp1 = cam_iosched_next_trim(softc->cam_iosched);
4372
if (bp1 == NULL)
4373
break;
4374
if (lba + count != bp1->bio_pblkno ||
4375
count + bp1->bio_bcount /
4376
softc->params.secsize > ws_max_blks) {
4377
cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4378
break;
4379
}
4380
} while (1);
4381
4382
scsi_write_same(&ccb->csio,
4383
/*retries*/da_retry_count,
4384
/*cbfcnp*/dadone,
4385
/*tag_action*/MSG_SIMPLE_Q_TAG,
4386
/*byte2*/softc->delete_method ==
4387
DA_DELETE_ZERO ? 0 : SWS_UNMAP,
4388
softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
4389
/*lba*/lba,
4390
/*block_count*/count,
4391
/*data_ptr*/ __DECONST(void *, zero_region),
4392
/*dxfer_len*/ softc->params.secsize,
4393
/*sense_len*/SSD_FULL_SIZE,
4394
da_default_timeout * 1000);
4395
ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4396
ccb->ccb_h.flags |= CAM_UNLOCKED;
4397
softc->trim_count++;
4398
softc->trim_ranges++;
4399
softc->trim_lbas += count;
4400
cam_iosched_submit_trim(softc->cam_iosched);
4401
}
4402
4403
static int
4404
cmd6workaround(union ccb *ccb)
4405
{
4406
struct scsi_rw_6 cmd6;
4407
struct scsi_rw_10 *cmd10;
4408
struct da_softc *softc;
4409
uint8_t *cdb;
4410
struct bio *bp;
4411
int frozen;
4412
4413
cdb = ccb->csio.cdb_io.cdb_bytes;
4414
softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
4415
4416
if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
4417
da_delete_methods old_method = softc->delete_method;
4418
4419
/*
4420
* Typically there are two reasons for failure here
4421
* 1. Delete method was detected as supported but isn't
4422
* 2. Delete failed due to invalid params e.g. too big
4423
*
4424
* While we will attempt to choose an alternative delete method
4425
* this may result in short deletes if the existing delete
4426
* requests from geom are big for the new method chosen.
4427
*
4428
* This method assumes that the error which triggered this
4429
* will not retry the io otherwise a panic will occur
4430
*/
4431
dadeleteflag(softc, old_method, 0);
4432
dadeletemethodchoose(softc, DA_DELETE_DISABLE);
4433
if (softc->delete_method == DA_DELETE_DISABLE)
4434
xpt_print(ccb->ccb_h.path,
4435
"%s failed, disabling BIO_DELETE\n",
4436
da_delete_method_desc[old_method]);
4437
else
4438
xpt_print(ccb->ccb_h.path,
4439
"%s failed, switching to %s BIO_DELETE\n",
4440
da_delete_method_desc[old_method],
4441
da_delete_method_desc[softc->delete_method]);
4442
4443
while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
4444
cam_iosched_queue_work(softc->cam_iosched, bp);
4445
cam_iosched_queue_work(softc->cam_iosched,
4446
(struct bio *)ccb->ccb_h.ccb_bp);
4447
ccb->ccb_h.ccb_bp = NULL;
4448
return (0);
4449
}
4450
4451
/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
4452
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4453
(*cdb == PREVENT_ALLOW) &&
4454
(softc->quirks & DA_Q_NO_PREVENT) == 0) {
4455
if (bootverbose)
4456
xpt_print(ccb->ccb_h.path,
4457
"PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
4458
softc->quirks |= DA_Q_NO_PREVENT;
4459
return (0);
4460
}
4461
4462
/* Detect unsupported SYNCHRONIZE CACHE(10). */
4463
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4464
(*cdb == SYNCHRONIZE_CACHE) &&
4465
(softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4466
if (bootverbose)
4467
xpt_print(ccb->ccb_h.path,
4468
"SYNCHRONIZE CACHE(10) not supported.\n");
4469
softc->quirks |= DA_Q_NO_SYNC_CACHE;
4470
softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4471
return (0);
4472
}
4473
4474
/* Translation only possible if CDB is an array and cmd is R/W6 */
4475
if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4476
(*cdb != READ_6 && *cdb != WRITE_6))
4477
return 0;
4478
4479
xpt_print(ccb->ccb_h.path,
4480
"READ(6)/WRITE(6) not supported, increasing minimum_cmd_size to 10.\n");
4481
softc->minimum_cmd_size = 10;
4482
4483
bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4484
cmd10 = (struct scsi_rw_10 *)cdb;
4485
cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4486
cmd10->byte2 = 0;
4487
scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4488
cmd10->reserved = 0;
4489
scsi_ulto2b(cmd6.length, cmd10->length);
4490
cmd10->control = cmd6.control;
4491
ccb->csio.cdb_len = sizeof(*cmd10);
4492
4493
/* Requeue request, unfreezing queue if necessary */
4494
frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4495
ccb->ccb_h.status = CAM_REQUEUE_REQ;
4496
xpt_action(ccb);
4497
if (frozen) {
4498
cam_release_devq(ccb->ccb_h.path,
4499
/*relsim_flags*/0,
4500
/*reduction*/0,
4501
/*timeout*/0,
4502
/*getcount_only*/0);
4503
}
4504
return (ERESTART);
4505
}
4506
4507
static void
4508
dazonedone(struct cam_periph *periph, union ccb *ccb)
4509
{
4510
struct da_softc *softc;
4511
struct bio *bp;
4512
4513
softc = periph->softc;
4514
bp = (struct bio *)ccb->ccb_h.ccb_bp;
4515
4516
switch (bp->bio_zone.zone_cmd) {
4517
case DISK_ZONE_OPEN:
4518
case DISK_ZONE_CLOSE:
4519
case DISK_ZONE_FINISH:
4520
case DISK_ZONE_RWP:
4521
break;
4522
case DISK_ZONE_REPORT_ZONES: {
4523
uint32_t avail_len;
4524
struct disk_zone_report *rep;
4525
struct scsi_report_zones_hdr *hdr;
4526
struct scsi_report_zones_desc *desc;
4527
struct disk_zone_rep_entry *entry;
4528
uint32_t hdr_len, num_avail;
4529
uint32_t num_to_fill, i;
4530
int ata;
4531
4532
rep = &bp->bio_zone.zone_params.report;
4533
avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4534
/*
4535
* Note that bio_resid isn't normally used for zone
4536
* commands, but it is used by devstat_end_transaction_bio()
4537
* to determine how much data was transferred. Because
4538
* the size of the SCSI/ATA data structures is different
4539
* than the size of the BIO interface structures, the
4540
* amount of data actually transferred from the drive will
4541
* be different than the amount of data transferred to
4542
* the user.
4543
*/
4544
bp->bio_resid = ccb->csio.resid;
4545
hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4546
if (avail_len < sizeof(*hdr)) {
4547
/*
4548
* Is there a better error than EIO here? We asked
4549
* for at least the header, and we got less than
4550
* that.
4551
*/
4552
bp->bio_error = EIO;
4553
bp->bio_flags |= BIO_ERROR;
4554
bp->bio_resid = bp->bio_bcount;
4555
break;
4556
}
4557
4558
if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4559
ata = 1;
4560
else
4561
ata = 0;
4562
4563
hdr_len = ata ? le32dec(hdr->length) :
4564
scsi_4btoul(hdr->length);
4565
if (hdr_len > 0)
4566
rep->entries_available = hdr_len / sizeof(*desc);
4567
else
4568
rep->entries_available = 0;
4569
/*
4570
* NOTE: using the same values for the BIO version of the
4571
* same field as the SCSI/ATA values. This means we could
4572
* get some additional values that aren't defined in bio.h
4573
* if more values of the same field are defined later.
4574
*/
4575
rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4576
rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) :
4577
scsi_8btou64(hdr->maximum_lba);
4578
/*
4579
* If the drive reports no entries that match the query,
4580
* we're done.
4581
*/
4582
if (hdr_len == 0) {
4583
rep->entries_filled = 0;
4584
break;
4585
}
4586
4587
num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4588
hdr_len / sizeof(*desc));
4589
/*
4590
* If the drive didn't return any data, then we're done.
4591
*/
4592
if (num_avail == 0) {
4593
rep->entries_filled = 0;
4594
break;
4595
}
4596
4597
num_to_fill = min(num_avail, rep->entries_allocated);
4598
/*
4599
* If the user didn't allocate any entries for us to fill,
4600
* we're done.
4601
*/
4602
if (num_to_fill == 0) {
4603
rep->entries_filled = 0;
4604
break;
4605
}
4606
4607
for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4608
i < num_to_fill; i++, desc++, entry++) {
4609
/*
4610
* NOTE: we're mapping the values here directly
4611
* from the SCSI/ATA bit definitions to the bio.h
4612
* definitions. There is also a warning in
4613
* disk_zone.h, but the impact is that if
4614
* additional values are added in the SCSI/ATA
4615
* specs these will be visible to consumers of
4616
* this interface.
4617
*/
4618
entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4619
entry->zone_condition =
4620
(desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4621
SRZ_ZONE_COND_SHIFT;
4622
entry->zone_flags |= desc->zone_flags &
4623
(SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4624
entry->zone_length =
4625
ata ? le64dec(desc->zone_length) :
4626
scsi_8btou64(desc->zone_length);
4627
entry->zone_start_lba =
4628
ata ? le64dec(desc->zone_start_lba) :
4629
scsi_8btou64(desc->zone_start_lba);
4630
entry->write_pointer_lba =
4631
ata ? le64dec(desc->write_pointer_lba) :
4632
scsi_8btou64(desc->write_pointer_lba);
4633
}
4634
rep->entries_filled = num_to_fill;
4635
break;
4636
}
4637
case DISK_ZONE_GET_PARAMS:
4638
default:
4639
/*
4640
* In theory we should not get a GET_PARAMS bio, since it
4641
* should be handled without queueing the command to the
4642
* drive.
4643
*/
4644
panic("%s: Invalid zone command %d", __func__,
4645
bp->bio_zone.zone_cmd);
4646
break;
4647
}
4648
4649
if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4650
free(ccb->csio.data_ptr, M_SCSIDA);
4651
}
4652
4653
static void
4654
dadone(struct cam_periph *periph, union ccb *done_ccb)
4655
{
4656
struct bio *bp, *bp1;
4657
struct da_softc *softc;
4658
struct ccb_scsiio *csio;
4659
da_ccb_state state;
4660
4661
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4662
4663
softc = (struct da_softc *)periph->softc;
4664
csio = &done_ccb->csio;
4665
4666
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4667
if (csio->bio != NULL)
4668
biotrack(csio->bio, __func__);
4669
#endif
4670
state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4671
4672
cam_periph_lock(periph);
4673
bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4674
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4675
int error;
4676
int sf;
4677
4678
if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4679
sf = SF_RETRY_UA;
4680
else
4681
sf = 0;
4682
4683
error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4684
if (error == ERESTART) {
4685
/* A retry was scheduled, so just return. */
4686
cam_periph_unlock(periph);
4687
return;
4688
}
4689
/*
4690
* refresh bp, since cmd6workaround may set it to NULL when
4691
* there's no delete methos available since it pushes the bp
4692
* back onto the work queue to reschedule it (since different
4693
* delete methods have different size limitations).
4694
*/
4695
bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4696
if (error != 0) {
4697
bool pack_invalid =
4698
(softc->flags & DA_FLAG_PACK_INVALID) != 0;
4699
4700
if (error == ENXIO && !pack_invalid) {
4701
/*
4702
* ENXIO flags ASC/ASCQ codes for either media
4703
* missing, or the drive being extremely
4704
* unhealthy. Invalidate peripheral on this
4705
* catestrophic error when the pack is valid
4706
* since we set the pack invalid bit only for
4707
* the few ASC/ASCQ codes indicating missing
4708
* media. The invalidation will flush any
4709
* queued I/O and short-circuit retries for
4710
* other I/O. We only invalidate the da device
4711
* so the passX device remains for recovery and
4712
* diagnostics.
4713
*
4714
* While we do also set the pack invalid bit
4715
* after invalidating the peripheral, the
4716
* pending I/O will have been flushed then with
4717
* no new I/O starting, so this 'edge' case
4718
* doesn't matter.
4719
*/
4720
xpt_print(periph->path, "Invalidating pack\n");
4721
cam_periph_invalidate(periph);
4722
} else {
4723
/*
4724
* Return all queued I/O with EIO, so that the
4725
* client can retry these I/Os in the proper
4726
* order should it attempt to recover. When the
4727
* pack is invalid, fail all I/O with ENXIO
4728
* since we can't assume when the media returns
4729
* it's the same media and we force a trip
4730
* through daclose / daopen and the client won't
4731
* retry.
4732
*/
4733
cam_iosched_flush(softc->cam_iosched, NULL,
4734
pack_invalid ? ENXIO : EIO);
4735
}
4736
if (bp != NULL) {
4737
bp->bio_error = error;
4738
bp->bio_resid = bp->bio_bcount;
4739
bp->bio_flags |= BIO_ERROR;
4740
}
4741
} else if (bp != NULL) {
4742
if (state == DA_CCB_DELETE)
4743
bp->bio_resid = 0;
4744
else
4745
bp->bio_resid = csio->resid;
4746
bp->bio_error = 0;
4747
if (bp->bio_resid != 0)
4748
bp->bio_flags |= BIO_ERROR;
4749
}
4750
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4751
cam_release_devq(done_ccb->ccb_h.path,
4752
/*relsim_flags*/0,
4753
/*reduction*/0,
4754
/*timeout*/0,
4755
/*getcount_only*/0);
4756
} else if (bp != NULL) {
4757
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4758
panic("REQ_CMP with QFRZN");
4759
if (bp->bio_cmd == BIO_ZONE)
4760
dazonedone(periph, done_ccb);
4761
else if (state == DA_CCB_DELETE)
4762
bp->bio_resid = 0;
4763
else
4764
bp->bio_resid = csio->resid;
4765
if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE))
4766
bp->bio_flags |= BIO_ERROR;
4767
if (softc->error_inject != 0) {
4768
bp->bio_error = softc->error_inject;
4769
bp->bio_resid = bp->bio_bcount;
4770
bp->bio_flags |= BIO_ERROR;
4771
softc->error_inject = 0;
4772
}
4773
}
4774
4775
if (bp != NULL)
4776
biotrack(bp, __func__);
4777
LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4778
if (LIST_EMPTY(&softc->pending_ccbs))
4779
softc->flags |= DA_FLAG_WAS_OTAG;
4780
4781
/*
4782
* We need to call cam_iosched before we call biodone so that we don't
4783
* measure any activity that happens in the completion routine, which in
4784
* the case of sendfile can be quite extensive. Release the periph
4785
* refcount taken in dastart() for each CCB.
4786
*/
4787
cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4788
xpt_release_ccb(done_ccb);
4789
KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount));
4790
softc->refcount--;
4791
if (state == DA_CCB_DELETE) {
4792
TAILQ_HEAD(, bio) queue;
4793
4794
TAILQ_INIT(&queue);
4795
TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4796
softc->delete_run_queue.insert_point = NULL;
4797
/*
4798
* Normally, the xpt_release_ccb() above would make sure
4799
* that when we have more work to do, that work would
4800
* get kicked off. However, we specifically keep
4801
* delete_running set to 0 before the call above to
4802
* allow other I/O to progress when many BIO_DELETE
4803
* requests are pushed down. We set delete_running to 0
4804
* and call daschedule again so that we don't stall if
4805
* there are no other I/Os pending apart from BIO_DELETEs.
4806
*/
4807
cam_iosched_trim_done(softc->cam_iosched);
4808
daschedule(periph);
4809
cam_periph_unlock(periph);
4810
while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4811
TAILQ_REMOVE(&queue, bp1, bio_queue);
4812
bp1->bio_error = bp->bio_error;
4813
if (bp->bio_flags & BIO_ERROR) {
4814
bp1->bio_flags |= BIO_ERROR;
4815
bp1->bio_resid = bp1->bio_bcount;
4816
} else
4817
bp1->bio_resid = 0;
4818
biodone(bp1);
4819
}
4820
} else {
4821
daschedule(periph);
4822
cam_periph_unlock(periph);
4823
}
4824
if (bp != NULL)
4825
biodone(bp);
4826
return;
4827
}
4828
4829
static void
4830
dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
4831
{
4832
struct da_softc *softc;
4833
struct ccb_scsiio *csio;
4834
uint32_t priority;
4835
4836
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n"));
4837
4838
softc = (struct da_softc *)periph->softc;
4839
priority = done_ccb->ccb_h.pinfo.priority;
4840
csio = &done_ccb->csio;
4841
4842
cam_periph_assert(periph, MA_OWNED);
4843
4844
KASSERT(softc->state == DA_STATE_PROBE_WP,
4845
("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4846
softc->state, periph, done_ccb));
4847
KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP,
4848
("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4849
(unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph,
4850
done_ccb));
4851
4852
if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4853
int len, off;
4854
uint8_t dev_spec;
4855
4856
if (csio->cdb_len > 6) {
4857
struct scsi_mode_header_10 *mh =
4858
(struct scsi_mode_header_10 *)csio->data_ptr;
4859
len = 2 + scsi_2btoul(mh->data_length);
4860
off = sizeof(*mh) + scsi_2btoul(mh->blk_desc_len);
4861
dev_spec = mh->dev_spec;
4862
} else {
4863
struct scsi_mode_header_6 *mh =
4864
(struct scsi_mode_header_6 *)csio->data_ptr;
4865
len = 1 + mh->data_length;
4866
off = sizeof(*mh) + mh->blk_desc_len;
4867
dev_spec = mh->dev_spec;
4868
}
4869
if ((dev_spec & 0x80) != 0)
4870
softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4871
else
4872
softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4873
4874
/* Next time request only the first of returned mode pages. */
4875
if (off < len && off < csio->dxfer_len - csio->resid)
4876
softc->mode_page = csio->data_ptr[off] & SMPH_PC_MASK;
4877
} else {
4878
int error;
4879
4880
error = daerror(done_ccb, CAM_RETRY_SELTO,
4881
SF_RETRY_UA|SF_NO_PRINT);
4882
if (error == ERESTART)
4883
return;
4884
else if (error != 0) {
4885
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4886
/* Don't wedge this device's queue */
4887
cam_release_devq(done_ccb->ccb_h.path,
4888
/*relsim_flags*/0,
4889
/*reduction*/0,
4890
/*timeout*/0,
4891
/*getcount_only*/0);
4892
}
4893
4894
/* We don't depend on it, so don't try again. */
4895
softc->mode_page = -1;
4896
}
4897
}
4898
4899
free(csio->data_ptr, M_SCSIDA);
4900
if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4901
softc->state = DA_STATE_PROBE_RC16;
4902
else
4903
softc->state = DA_STATE_PROBE_RC;
4904
xpt_release_ccb(done_ccb);
4905
xpt_schedule(periph, priority);
4906
return;
4907
}
4908
4909
static void
4910
dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
4911
{
4912
struct scsi_read_capacity_data *rdcap;
4913
struct scsi_read_capacity_data_long *rcaplong;
4914
struct da_softc *softc;
4915
struct ccb_scsiio *csio;
4916
da_ccb_state state;
4917
char *announce_buf;
4918
uint32_t priority;
4919
int n;
4920
4921
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n"));
4922
4923
softc = (struct da_softc *)periph->softc;
4924
priority = done_ccb->ccb_h.pinfo.priority;
4925
csio = &done_ccb->csio;
4926
state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4927
4928
KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16,
4929
("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p",
4930
softc->state, periph, done_ccb));
4931
KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16,
4932
("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p",
4933
(unsigned long)state, periph, done_ccb));
4934
4935
rdcap = NULL;
4936
rcaplong = NULL;
4937
/* XXX TODO: can this be a malloc? */
4938
announce_buf = softc->announce_temp;
4939
bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4940
4941
if (state == DA_CCB_PROBE_RC)
4942
rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4943
else
4944
rcaplong = (struct scsi_read_capacity_data_long *)
4945
csio->data_ptr;
4946
4947
cam_periph_assert(periph, MA_OWNED);
4948
4949
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4950
struct disk_params *dp;
4951
uint32_t block_size;
4952
uint64_t maxsector;
4953
u_int lalba; /* Lowest aligned LBA. */
4954
4955
if (state == DA_CCB_PROBE_RC) {
4956
block_size = scsi_4btoul(rdcap->length);
4957
maxsector = scsi_4btoul(rdcap->addr);
4958
lalba = 0;
4959
4960
/*
4961
* According to SBC-2, if the standard 10
4962
* byte READ CAPACITY command returns 2^32,
4963
* we should issue the 16 byte version of
4964
* the command, since the device in question
4965
* has more sectors than can be represented
4966
* with the short version of the command.
4967
*/
4968
if (maxsector == 0xffffffff) {
4969
free(rdcap, M_SCSIDA);
4970
softc->state = DA_STATE_PROBE_RC16;
4971
xpt_release_ccb(done_ccb);
4972
xpt_schedule(periph, priority);
4973
return;
4974
}
4975
} else {
4976
block_size = scsi_4btoul(rcaplong->length);
4977
maxsector = scsi_8btou64(rcaplong->addr);
4978
lalba = scsi_2btoul(rcaplong->lalba_lbp);
4979
}
4980
4981
/*
4982
* Because GEOM code just will panic us if we
4983
* give them an 'illegal' value we'll avoid that
4984
* here.
4985
*/
4986
if (block_size == 0) {
4987
block_size = 512;
4988
if (maxsector == 0)
4989
maxsector = -1;
4990
}
4991
if (block_size >= maxphys) {
4992
xpt_print(periph->path,
4993
"unsupportable block size %ju\n",
4994
(uintmax_t) block_size);
4995
announce_buf = NULL;
4996
cam_periph_invalidate(periph);
4997
} else {
4998
/*
4999
* We pass rcaplong into dasetgeom(),
5000
* because it will only use it if it is
5001
* non-NULL.
5002
*/
5003
dasetgeom(periph, block_size, maxsector,
5004
rcaplong, sizeof(*rcaplong));
5005
if ((lalba & SRC16_LBPME_A) != 0 &&
5006
(softc->quirks & DA_Q_NO_UNMAP) == 0)
5007
softc->flags |= DA_FLAG_LBP;
5008
dp = &softc->params;
5009
n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
5010
"%juMB (%ju %u byte sectors",
5011
((uintmax_t)dp->secsize * dp->sectors) /
5012
(1024 * 1024),
5013
(uintmax_t)dp->sectors, dp->secsize);
5014
if (softc->p_type != 0) {
5015
n += snprintf(announce_buf + n,
5016
DA_ANNOUNCETMP_SZ - n,
5017
", DIF type %d", softc->p_type);
5018
}
5019
snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")");
5020
}
5021
} else {
5022
int error;
5023
5024
/*
5025
* Retry any UNIT ATTENTION type errors. They
5026
* are expected at boot.
5027
*/
5028
error = daerror(done_ccb, CAM_RETRY_SELTO,
5029
SF_RETRY_UA|SF_NO_PRINT);
5030
if (error == ERESTART) {
5031
/*
5032
* A retry was scheuled, so
5033
* just return.
5034
*/
5035
return;
5036
} else if (error != 0) {
5037
int asc, ascq;
5038
int sense_key, error_code;
5039
int have_sense;
5040
cam_status status;
5041
struct ccb_getdev cgd;
5042
5043
/* Don't wedge this device's queue */
5044
status = done_ccb->ccb_h.status;
5045
if ((status & CAM_DEV_QFRZN) != 0)
5046
cam_release_devq(done_ccb->ccb_h.path,
5047
/*relsim_flags*/0,
5048
/*reduction*/0,
5049
/*timeout*/0,
5050
/*getcount_only*/0);
5051
5052
xpt_gdev_type(&cgd, done_ccb->ccb_h.path);
5053
5054
if (scsi_extract_sense_ccb(done_ccb,
5055
&error_code, &sense_key, &asc, &ascq))
5056
have_sense = TRUE;
5057
else
5058
have_sense = FALSE;
5059
5060
/*
5061
* If we tried READ CAPACITY(16) and failed,
5062
* fallback to READ CAPACITY(10).
5063
*/
5064
if ((state == DA_CCB_PROBE_RC16) &&
5065
(softc->flags & DA_FLAG_CAN_RC16) &&
5066
(((csio->ccb_h.status & CAM_STATUS_MASK) ==
5067
CAM_REQ_INVALID) ||
5068
((have_sense) &&
5069
(error_code == SSD_CURRENT_ERROR ||
5070
error_code == SSD_DESC_CURRENT_ERROR) &&
5071
(sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
5072
cam_periph_assert(periph, MA_OWNED);
5073
softc->flags &= ~DA_FLAG_CAN_RC16;
5074
free(rdcap, M_SCSIDA);
5075
softc->state = DA_STATE_PROBE_RC;
5076
xpt_release_ccb(done_ccb);
5077
xpt_schedule(periph, priority);
5078
return;
5079
}
5080
5081
/*
5082
* Attach to anything that claims to be a direct access
5083
* or optical disk device, as long as it doesn't return
5084
* a "Logical unit not supported" (25/0) error.
5085
* "Internal Target Failure" (44/0) is also special and
5086
* typically means that the device is a SATA drive
5087
* behind a SATL translation that's fallen into a
5088
* terminally fatal state.
5089
*
5090
* 4/2 happens on some HGST drives that are quite
5091
* ill. We've already sent the start unit command (for
5092
* which we ignore a 44/0 asc/ascq, which I'm hesitant
5093
* to change since it's so basic and there's other error
5094
* conditions to the START UNIT we should ignore). So to
5095
* require initialization at this point when it should
5096
* be fine implies to me, at least, that we should
5097
* invalidate. Since we do read capacity in geom tasting
5098
* a lot, and since this timeout is long, this leads to
5099
* up to a 10 minute delay in booting.
5100
*
5101
* 4/2: LOGICAL UNIT NOT READY, INITIALIZING COMMAND REQUIRED
5102
* 25/0: LOGICAL UNIT NOT SUPPORTED
5103
* 44/0: INTERNAL TARGET FAILURE
5104
* 44/1: PERSISTENT RESERVATION INFORMATION LOST
5105
* 44/71: ATA DEVICE FAILED SET FEATURES
5106
*/
5107
if ((have_sense)
5108
&& (asc != 0x25) && (asc != 0x44)
5109
&& (asc != 0x04 && ascq != 0x02)
5110
&& (error_code == SSD_CURRENT_ERROR
5111
|| error_code == SSD_DESC_CURRENT_ERROR)) {
5112
const char *sense_key_desc;
5113
const char *asc_desc;
5114
5115
dasetgeom(periph, 512, -1, NULL, 0);
5116
scsi_sense_desc(sense_key, asc, ascq,
5117
&cgd.inq_data, &sense_key_desc,
5118
&asc_desc);
5119
snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
5120
"Attempt to query device size failed: %s, %s",
5121
sense_key_desc, asc_desc);
5122
} else {
5123
if (have_sense)
5124
scsi_sense_print(&done_ccb->csio);
5125
else {
5126
xpt_print(periph->path,
5127
"got CAM status %#x\n",
5128
done_ccb->ccb_h.status);
5129
}
5130
xpt_print(periph->path,
5131
"fatal error, failed to attach to device\n");
5132
5133
announce_buf = NULL;
5134
5135
/*
5136
* Free up resources.
5137
*/
5138
cam_periph_invalidate(periph);
5139
}
5140
}
5141
}
5142
free(csio->data_ptr, M_SCSIDA);
5143
if (announce_buf != NULL &&
5144
((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
5145
struct sbuf sb;
5146
5147
sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
5148
SBUF_FIXEDLEN);
5149
xpt_announce_periph_sbuf(periph, &sb, announce_buf);
5150
xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
5151
DA_Q_BIT_STRING);
5152
sbuf_finish(&sb);
5153
sbuf_putbuf(&sb);
5154
5155
/*
5156
* Create our sysctl variables, now that we know
5157
* we have successfully attached.
5158
*/
5159
/* increase the refcount */
5160
if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) {
5161
taskqueue_enqueue(taskqueue_thread,
5162
&softc->sysctl_task);
5163
} else {
5164
/* XXX This message is useless! */
5165
xpt_print(periph->path,
5166
"fatal error, could not acquire reference count\n");
5167
}
5168
}
5169
5170
/* We already probed the device. */
5171
if (softc->flags & DA_FLAG_PROBED) {
5172
daprobedone(periph, done_ccb);
5173
return;
5174
}
5175
5176
softc->state = DA_STATE_PROBE_CACHE;
5177
xpt_release_ccb(done_ccb);
5178
xpt_schedule(periph, priority);
5179
return;
5180
}
5181
5182
static void
5183
dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
5184
{
5185
struct scsi_vpd_logical_block_prov *lbp;
5186
struct da_softc *softc;
5187
struct ccb_scsiio *csio;
5188
uint32_t priority;
5189
5190
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n"));
5191
5192
softc = (struct da_softc *)periph->softc;
5193
priority = done_ccb->ccb_h.pinfo.priority;
5194
csio = &done_ccb->csio;
5195
lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
5196
5197
cam_periph_assert(periph, MA_OWNED);
5198
5199
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5200
/*
5201
* T10/1799-D Revision 31 states at least one of these
5202
* must be supported but we don't currently enforce this.
5203
*/
5204
dadeleteflag(softc, DA_DELETE_WS16,
5205
(lbp->flags & SVPD_LBP_WS16));
5206
dadeleteflag(softc, DA_DELETE_WS10,
5207
(lbp->flags & SVPD_LBP_WS10));
5208
dadeleteflag(softc, DA_DELETE_UNMAP,
5209
(lbp->flags & SVPD_LBP_UNMAP));
5210
} else {
5211
int error;
5212
error = daerror(done_ccb, CAM_RETRY_SELTO,
5213
SF_RETRY_UA|SF_NO_PRINT);
5214
if (error == ERESTART)
5215
return;
5216
else if (error != 0) {
5217
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5218
/* Don't wedge this device's queue */
5219
cam_release_devq(done_ccb->ccb_h.path,
5220
/*relsim_flags*/0,
5221
/*reduction*/0,
5222
/*timeout*/0,
5223
/*getcount_only*/0);
5224
}
5225
5226
/*
5227
* Failure indicates we don't support any SBC-3
5228
* delete methods with UNMAP
5229
*/
5230
}
5231
}
5232
5233
free(lbp, M_SCSIDA);
5234
softc->state = DA_STATE_PROBE_BLK_LIMITS;
5235
xpt_release_ccb(done_ccb);
5236
xpt_schedule(periph, priority);
5237
return;
5238
}
5239
5240
static void
5241
dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
5242
{
5243
struct scsi_vpd_block_limits *block_limits;
5244
struct da_softc *softc;
5245
struct ccb_scsiio *csio;
5246
uint32_t priority;
5247
5248
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n"));
5249
5250
softc = (struct da_softc *)periph->softc;
5251
priority = done_ccb->ccb_h.pinfo.priority;
5252
csio = &done_ccb->csio;
5253
block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
5254
5255
cam_periph_assert(periph, MA_OWNED);
5256
5257
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5258
uint32_t max_txfer_len = scsi_4btoul(
5259
block_limits->max_txfer_len);
5260
uint32_t max_unmap_lba_cnt = scsi_4btoul(
5261
block_limits->max_unmap_lba_cnt);
5262
uint32_t max_unmap_blk_cnt = scsi_4btoul(
5263
block_limits->max_unmap_blk_cnt);
5264
uint32_t unmap_gran = scsi_4btoul(
5265
block_limits->opt_unmap_grain);
5266
uint32_t unmap_gran_align = scsi_4btoul(
5267
block_limits->unmap_grain_align);
5268
uint64_t ws_max_blks = scsi_8btou64(
5269
block_limits->max_write_same_length);
5270
5271
if (max_txfer_len != 0) {
5272
softc->disk->d_maxsize = MIN(softc->maxio,
5273
(off_t)max_txfer_len * softc->params.secsize);
5274
}
5275
5276
/*
5277
* We should already support UNMAP but we check lba
5278
* and block count to be sure
5279
*/
5280
if (max_unmap_lba_cnt != 0x00L &&
5281
max_unmap_blk_cnt != 0x00L) {
5282
softc->unmap_max_lba = max_unmap_lba_cnt;
5283
softc->unmap_max_ranges = min(max_unmap_blk_cnt,
5284
UNMAP_MAX_RANGES);
5285
if (unmap_gran > 1) {
5286
softc->unmap_gran = unmap_gran;
5287
if (unmap_gran_align & 0x80000000) {
5288
softc->unmap_gran_align =
5289
unmap_gran_align & 0x7fffffff;
5290
}
5291
}
5292
} else {
5293
/*
5294
* Unexpected UNMAP limits which means the
5295
* device doesn't actually support UNMAP
5296
*/
5297
dadeleteflag(softc, DA_DELETE_UNMAP, 0);
5298
}
5299
5300
if (ws_max_blks != 0x00L)
5301
softc->ws_max_blks = ws_max_blks;
5302
} else {
5303
int error;
5304
error = daerror(done_ccb, CAM_RETRY_SELTO,
5305
SF_RETRY_UA|SF_NO_PRINT);
5306
if (error == ERESTART)
5307
return;
5308
else if (error != 0) {
5309
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5310
/* Don't wedge this device's queue */
5311
cam_release_devq(done_ccb->ccb_h.path,
5312
/*relsim_flags*/0,
5313
/*reduction*/0,
5314
/*timeout*/0,
5315
/*getcount_only*/0);
5316
}
5317
5318
/*
5319
* Failure here doesn't mean UNMAP is not
5320
* supported as this is an optional page.
5321
*/
5322
softc->unmap_max_lba = 1;
5323
softc->unmap_max_ranges = 1;
5324
}
5325
}
5326
5327
free(block_limits, M_SCSIDA);
5328
softc->state = DA_STATE_PROBE_BDC;
5329
xpt_release_ccb(done_ccb);
5330
xpt_schedule(periph, priority);
5331
return;
5332
}
5333
5334
static void
5335
dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
5336
{
5337
struct scsi_vpd_block_device_characteristics *bdc;
5338
struct da_softc *softc;
5339
struct ccb_scsiio *csio;
5340
uint32_t priority;
5341
5342
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n"));
5343
5344
softc = (struct da_softc *)periph->softc;
5345
priority = done_ccb->ccb_h.pinfo.priority;
5346
csio = &done_ccb->csio;
5347
bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr;
5348
5349
cam_periph_assert(periph, MA_OWNED);
5350
5351
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5352
uint32_t valid_len;
5353
5354
/*
5355
* Disable queue sorting for non-rotational media
5356
* by default.
5357
*/
5358
uint16_t old_rate = softc->disk->d_rotation_rate;
5359
5360
valid_len = csio->dxfer_len - csio->resid;
5361
if (SBDC_IS_PRESENT(bdc, valid_len,
5362
medium_rotation_rate)) {
5363
softc->disk->d_rotation_rate =
5364
scsi_2btoul(bdc->medium_rotation_rate);
5365
if (softc->disk->d_rotation_rate == SVPD_NON_ROTATING) {
5366
cam_iosched_set_sort_queue(
5367
softc->cam_iosched, 0);
5368
softc->flags &= ~DA_FLAG_ROTATING;
5369
}
5370
if (softc->disk->d_rotation_rate != old_rate) {
5371
disk_attr_changed(softc->disk,
5372
"GEOM::rotation_rate", M_NOWAIT);
5373
}
5374
}
5375
if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
5376
&& (softc->zone_mode == DA_ZONE_NONE)) {
5377
int ata_proto;
5378
5379
if (scsi_vpd_supported_page(periph,
5380
SVPD_ATA_INFORMATION))
5381
ata_proto = 1;
5382
else
5383
ata_proto = 0;
5384
5385
/*
5386
* The Zoned field will only be set for
5387
* Drive Managed and Host Aware drives. If
5388
* they are Host Managed, the device type
5389
* in the standard INQUIRY data should be
5390
* set to T_ZBC_HM (0x14).
5391
*/
5392
if ((bdc->flags & SVPD_ZBC_MASK) ==
5393
SVPD_HAW_ZBC) {
5394
softc->zone_mode = DA_ZONE_HOST_AWARE;
5395
softc->zone_interface = (ata_proto) ?
5396
DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5397
} else if ((bdc->flags & SVPD_ZBC_MASK) ==
5398
SVPD_DM_ZBC) {
5399
softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5400
softc->zone_interface = (ata_proto) ?
5401
DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5402
} else if ((bdc->flags & SVPD_ZBC_MASK) !=
5403
SVPD_ZBC_NR) {
5404
xpt_print(periph->path, "Unknown zoned type %#x",
5405
bdc->flags & SVPD_ZBC_MASK);
5406
}
5407
}
5408
} else {
5409
int error;
5410
error = daerror(done_ccb, CAM_RETRY_SELTO,
5411
SF_RETRY_UA|SF_NO_PRINT);
5412
if (error == ERESTART)
5413
return;
5414
else if (error != 0) {
5415
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5416
/* Don't wedge this device's queue */
5417
cam_release_devq(done_ccb->ccb_h.path,
5418
/*relsim_flags*/0,
5419
/*reduction*/0,
5420
/*timeout*/0,
5421
/*getcount_only*/0);
5422
}
5423
}
5424
}
5425
5426
free(bdc, M_SCSIDA);
5427
softc->state = DA_STATE_PROBE_ATA;
5428
xpt_release_ccb(done_ccb);
5429
xpt_schedule(periph, priority);
5430
return;
5431
}
5432
5433
static void
5434
dadone_probecache(struct cam_periph *periph, union ccb *done_ccb)
5435
{
5436
struct da_softc *softc;
5437
struct ccb_scsiio *csio;
5438
uint32_t priority;
5439
struct scsi_mode_header_6 *sense_hdr;
5440
struct scsi_caching_page *cache_page;
5441
5442
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probecache\n"));
5443
5444
softc = (struct da_softc *)periph->softc;
5445
priority = done_ccb->ccb_h.pinfo.priority;
5446
csio = &done_ccb->csio;
5447
sense_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5448
cache_page = (struct scsi_caching_page *)(csio->data_ptr +
5449
sizeof(struct scsi_mode_header_6) + sense_hdr->blk_desc_len);
5450
5451
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5452
/*
5453
* Sanity check different fields of the data. We make sure
5454
* there's enough data, in total, and that the page part of the
5455
* data is long enough and that the page number is correct. Some
5456
* devices will return sense data as if we'd requested page 0x3f
5457
* always, for exmaple, and those devices can't be trusted
5458
* (which is why we don't walk the list of pages or try to
5459
* request a bigger buffer). The devices that have problems are
5460
* typically cheap USB thumb drives.
5461
*/
5462
if (sense_hdr->data_length + 1 <
5463
sense_hdr->blk_desc_len + sizeof(*cache_page)) {
5464
xpt_print(done_ccb->ccb_h.path,
5465
"CACHE PAGE TOO SHORT data len %d desc len %d\n",
5466
sense_hdr->data_length,
5467
sense_hdr->blk_desc_len);
5468
goto bad;
5469
}
5470
if ((cache_page->page_code & ~SMS_PAGE_CTRL_MASK) !=
5471
SMS_CACHE_PAGE) {
5472
xpt_print(done_ccb->ccb_h.path,
5473
"Bad cache page %#x\n",
5474
cache_page->page_code);
5475
goto bad;
5476
}
5477
if (cache_page->page_length != sizeof(*cache_page) -
5478
offsetof(struct scsi_caching_page, flags1)) {
5479
xpt_print(done_ccb->ccb_h.path,
5480
"CACHE PAGE length bogus %#x\n",
5481
cache_page->page_length);
5482
goto bad;
5483
}
5484
/*
5485
* If there's a block descritor header, we could save the block
5486
* count to compare later against READ CAPACITY or READ CAPACITY
5487
* (16), but the same devices that get those wrongs often don't
5488
* provide a block descritptor header to store away for later.
5489
*/
5490
5491
/*
5492
* Warn about aparently unsafe quirking. A couple of
5493
* my USB sticks have WCE enabled, but some quirk somewhere
5494
* disables the necessary SYCHRONIZE CACHE ops.
5495
*/
5496
if (softc->quirks & DA_Q_NO_SYNC_CACHE &&
5497
cache_page->flags1 & SCP_WCE)
5498
xpt_print(done_ccb->ccb_h.path,
5499
"Devices quirked NO_SYNC_CACHE, but WCE=1 enabling write cache.\n");
5500
} else {
5501
int error, error_code, sense_key, asc, ascq;
5502
bool mark_bad;
5503
5504
/*
5505
* Three types of errors observed here:
5506
* 24h/00h DZTPROMAEBKVF INVALID FIELD IN CDB
5507
* 26h/00h DZTPROMAEBKVF INVALID FIELD IN PARAMETER LIST
5508
* 3Ah/00h DZT ROM BK MEDIUM NOT PRESENT
5509
*
5510
* The first two are legit ways of saying page 8 doesn't exist
5511
* and set the NO_SYNC_CACHE quirk. The third is a null result:
5512
* At least some devices that report this when a slot is empty
5513
* none-the-less have working SYNCHRONIZE CACHE. Take our
5514
* chances and refrain from setting the quirk. The one device I
5515
* have that does this, but doesn't support the command doesn't
5516
* hang on the command either. I conjecture that the exact card
5517
* that's inserted will determine if SYNC is supported which
5518
* would make repeated probings hard.
5519
*/
5520
mark_bad = true;
5521
if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key,
5522
&asc, &ascq)) {
5523
if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a)
5524
mark_bad = false;
5525
}
5526
error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT);
5527
if (error == ERESTART) {
5528
return;
5529
} else if (error != 0) {
5530
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5531
/* Don't wedge this device's queue */
5532
cam_release_devq(done_ccb->ccb_h.path,
5533
/*relsim_flags*/0,
5534
/*reduction*/0,
5535
/*timeout*/0,
5536
/*getcount_only*/0);
5537
}
5538
}
5539
xpt_print(done_ccb->ccb_h.path,
5540
"MODE SENSE for CACHE page command failed.\n");
5541
5542
/*
5543
* There's no cache page, the command wasn't
5544
* supported, retries failed or the data returned was
5545
* junk. Any one of these reasons is enough to
5546
* conclude that the drive doesn't support caching, so
5547
* SYNCHRONIZE CACHE isn't needed and may hang the
5548
* drive!
5549
*/
5550
if (mark_bad) {
5551
bad:
5552
xpt_print(done_ccb->ccb_h.path,
5553
"Mode page 8 missing, disabling SYNCHRONIZE CACHE\n");
5554
if (softc->quirks & DA_Q_NO_SYNC_CACHE)
5555
xpt_print(done_ccb->ccb_h.path,
5556
"Devices already quirked for NO_SYNC_CACHE, maybe remove quirk table\n");
5557
softc->quirks |= DA_Q_NO_SYNC_CACHE;
5558
softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
5559
}
5560
}
5561
free(sense_hdr, M_SCSIDA);
5562
5563
/* Ensure re-probe doesn't see old delete. */
5564
softc->delete_available = 0;
5565
dadeleteflag(softc, DA_DELETE_ZERO, 1);
5566
if ((softc->flags & DA_FLAG_LBP) != 0) {
5567
/*
5568
* Based on older SBC-3 spec revisions
5569
* any of the UNMAP methods "may" be
5570
* available via LBP given this flag so
5571
* we flag all of them as available and
5572
* then remove those which further
5573
* probes confirm aren't available
5574
* later.
5575
*
5576
* We could also check readcap(16) p_type
5577
* flag to exclude one or more invalid
5578
* write same (X) types here
5579
*/
5580
dadeleteflag(softc, DA_DELETE_WS16, 1);
5581
dadeleteflag(softc, DA_DELETE_WS10, 1);
5582
dadeleteflag(softc, DA_DELETE_UNMAP, 1);
5583
5584
softc->state = DA_STATE_PROBE_LBP;
5585
} else {
5586
softc->state = DA_STATE_PROBE_BDC;
5587
}
5588
xpt_release_ccb(done_ccb);
5589
xpt_schedule(periph, priority);
5590
return;
5591
}
5592
5593
static void
5594
dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
5595
{
5596
struct ata_params *ata_params;
5597
struct ccb_scsiio *csio;
5598
struct da_softc *softc;
5599
uint32_t priority;
5600
int continue_probe;
5601
int error;
5602
5603
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n"));
5604
5605
softc = (struct da_softc *)periph->softc;
5606
priority = done_ccb->ccb_h.pinfo.priority;
5607
csio = &done_ccb->csio;
5608
ata_params = (struct ata_params *)csio->data_ptr;
5609
continue_probe = 0;
5610
error = 0;
5611
5612
cam_periph_assert(periph, MA_OWNED);
5613
5614
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5615
uint16_t old_rate;
5616
5617
ata_param_fixup(ata_params);
5618
if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
5619
(softc->quirks & DA_Q_NO_UNMAP) == 0) {
5620
dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
5621
if (ata_params->max_dsm_blocks != 0)
5622
softc->trim_max_ranges = min(
5623
softc->trim_max_ranges,
5624
ata_params->max_dsm_blocks *
5625
ATA_DSM_BLK_RANGES);
5626
}
5627
/*
5628
* Disable queue sorting for non-rotational media
5629
* by default.
5630
*/
5631
old_rate = softc->disk->d_rotation_rate;
5632
softc->disk->d_rotation_rate = ata_params->media_rotation_rate;
5633
if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) {
5634
cam_iosched_set_sort_queue(softc->cam_iosched, 0);
5635
softc->flags &= ~DA_FLAG_ROTATING;
5636
}
5637
if (softc->disk->d_rotation_rate != old_rate) {
5638
disk_attr_changed(softc->disk,
5639
"GEOM::rotation_rate", M_NOWAIT);
5640
}
5641
5642
cam_periph_assert(periph, MA_OWNED);
5643
if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
5644
softc->flags |= DA_FLAG_CAN_ATA_DMA;
5645
5646
if (ata_params->support.extension & ATA_SUPPORT_GENLOG)
5647
softc->flags |= DA_FLAG_CAN_ATA_LOG;
5648
5649
/*
5650
* At this point, if we have a SATA host aware drive,
5651
* we communicate via ATA passthrough unless the
5652
* SAT layer supports ZBC -> ZAC translation. In
5653
* that case,
5654
*
5655
* XXX KDM figure out how to detect a host managed
5656
* SATA drive.
5657
*/
5658
if (softc->zone_mode == DA_ZONE_NONE) {
5659
/*
5660
* Note that we don't override the zone
5661
* mode or interface if it has already been
5662
* set. This is because it has either been
5663
* set as a quirk, or when we probed the
5664
* SCSI Block Device Characteristics page,
5665
* the zoned field was set. The latter
5666
* means that the SAT layer supports ZBC to
5667
* ZAC translation, and we would prefer to
5668
* use that if it is available.
5669
*/
5670
if ((ata_params->support3 &
5671
ATA_SUPPORT_ZONE_MASK) ==
5672
ATA_SUPPORT_ZONE_HOST_AWARE) {
5673
softc->zone_mode = DA_ZONE_HOST_AWARE;
5674
softc->zone_interface =
5675
DA_ZONE_IF_ATA_PASS;
5676
} else if ((ata_params->support3 &
5677
ATA_SUPPORT_ZONE_MASK) ==
5678
ATA_SUPPORT_ZONE_DEV_MANAGED) {
5679
softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5680
softc->zone_interface = DA_ZONE_IF_ATA_PASS;
5681
}
5682
}
5683
5684
} else {
5685
error = daerror(done_ccb, CAM_RETRY_SELTO,
5686
SF_RETRY_UA|SF_NO_PRINT);
5687
if (error == ERESTART)
5688
return;
5689
else if (error != 0) {
5690
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5691
/* Don't wedge this device's queue */
5692
cam_release_devq(done_ccb->ccb_h.path,
5693
/*relsim_flags*/0,
5694
/*reduction*/0,
5695
/*timeout*/0,
5696
/*getcount_only*/0);
5697
}
5698
}
5699
}
5700
5701
if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
5702
|| (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
5703
/*
5704
* If the ATA IDENTIFY failed, we could be talking
5705
* to a SCSI drive, although that seems unlikely,
5706
* since the drive did report that it supported the
5707
* ATA Information VPD page. If the ATA IDENTIFY
5708
* succeeded, and the SAT layer doesn't support
5709
* ZBC -> ZAC translation, continue on to get the
5710
* directory of ATA logs, and complete the rest of
5711
* the ZAC probe. If the SAT layer does support
5712
* ZBC -> ZAC translation, we want to use that,
5713
* and we'll probe the SCSI Zoned Block Device
5714
* Characteristics VPD page next.
5715
*/
5716
if ((error == 0)
5717
&& (softc->flags & DA_FLAG_CAN_ATA_LOG)
5718
&& (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
5719
softc->state = DA_STATE_PROBE_ATA_LOGDIR;
5720
else
5721
softc->state = DA_STATE_PROBE_ZONE;
5722
continue_probe = 1;
5723
}
5724
if (continue_probe != 0) {
5725
xpt_schedule(periph, priority);
5726
xpt_release_ccb(done_ccb);
5727
return;
5728
} else
5729
daprobedone(periph, done_ccb);
5730
return;
5731
}
5732
5733
static void
5734
dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
5735
{
5736
struct da_softc *softc;
5737
struct ccb_scsiio *csio;
5738
uint32_t priority;
5739
int error;
5740
5741
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n"));
5742
5743
softc = (struct da_softc *)periph->softc;
5744
priority = done_ccb->ccb_h.pinfo.priority;
5745
csio = &done_ccb->csio;
5746
5747
cam_periph_assert(periph, MA_OWNED);
5748
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5749
error = 0;
5750
softc->valid_logdir_len = 0;
5751
bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5752
softc->valid_logdir_len = csio->dxfer_len - csio->resid;
5753
if (softc->valid_logdir_len > 0)
5754
bcopy(csio->data_ptr, &softc->ata_logdir,
5755
min(softc->valid_logdir_len,
5756
sizeof(softc->ata_logdir)));
5757
/*
5758
* Figure out whether the Identify Device log is
5759
* supported. The General Purpose log directory
5760
* has a header, and lists the number of pages
5761
* available for each GP log identified by the
5762
* offset into the list.
5763
*/
5764
if ((softc->valid_logdir_len >=
5765
((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5766
&& (le16dec(softc->ata_logdir.header) ==
5767
ATA_GP_LOG_DIR_VERSION)
5768
&& (le16dec(&softc->ata_logdir.num_pages[
5769
(ATA_IDENTIFY_DATA_LOG *
5770
sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5771
softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5772
} else {
5773
softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5774
}
5775
} else {
5776
error = daerror(done_ccb, CAM_RETRY_SELTO,
5777
SF_RETRY_UA|SF_NO_PRINT);
5778
if (error == ERESTART)
5779
return;
5780
else if (error != 0) {
5781
/*
5782
* If we can't get the ATA log directory,
5783
* then ATA logs are effectively not
5784
* supported even if the bit is set in the
5785
* identify data.
5786
*/
5787
softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5788
DA_FLAG_CAN_ATA_IDLOG);
5789
if ((done_ccb->ccb_h.status &
5790
CAM_DEV_QFRZN) != 0) {
5791
/* Don't wedge this device's queue */
5792
cam_release_devq(done_ccb->ccb_h.path,
5793
/*relsim_flags*/0,
5794
/*reduction*/0,
5795
/*timeout*/0,
5796
/*getcount_only*/0);
5797
}
5798
}
5799
}
5800
5801
free(csio->data_ptr, M_SCSIDA);
5802
5803
if ((error == 0)
5804
&& (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5805
softc->state = DA_STATE_PROBE_ATA_IDDIR;
5806
xpt_release_ccb(done_ccb);
5807
xpt_schedule(periph, priority);
5808
return;
5809
}
5810
daprobedone(periph, done_ccb);
5811
return;
5812
}
5813
5814
static void
5815
dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
5816
{
5817
struct da_softc *softc;
5818
struct ccb_scsiio *csio;
5819
uint32_t priority;
5820
int error;
5821
5822
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n"));
5823
5824
softc = (struct da_softc *)periph->softc;
5825
priority = done_ccb->ccb_h.pinfo.priority;
5826
csio = &done_ccb->csio;
5827
5828
cam_periph_assert(periph, MA_OWNED);
5829
5830
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5831
off_t entries_offset, max_entries;
5832
error = 0;
5833
5834
softc->valid_iddir_len = 0;
5835
bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5836
softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5837
DA_FLAG_CAN_ATA_ZONE);
5838
softc->valid_iddir_len = csio->dxfer_len - csio->resid;
5839
if (softc->valid_iddir_len > 0)
5840
bcopy(csio->data_ptr, &softc->ata_iddir,
5841
min(softc->valid_iddir_len,
5842
sizeof(softc->ata_iddir)));
5843
5844
entries_offset =
5845
__offsetof(struct ata_identify_log_pages,entries);
5846
max_entries = softc->valid_iddir_len - entries_offset;
5847
if ((softc->valid_iddir_len > (entries_offset + 1))
5848
&& (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION)
5849
&& (softc->ata_iddir.entry_count > 0)) {
5850
int num_entries, i;
5851
5852
num_entries = softc->ata_iddir.entry_count;
5853
num_entries = min(num_entries,
5854
softc->valid_iddir_len - entries_offset);
5855
for (i = 0; i < num_entries && i < max_entries; i++) {
5856
if (softc->ata_iddir.entries[i] ==
5857
ATA_IDL_SUP_CAP)
5858
softc->flags |= DA_FLAG_CAN_ATA_SUPCAP;
5859
else if (softc->ata_iddir.entries[i] ==
5860
ATA_IDL_ZDI)
5861
softc->flags |= DA_FLAG_CAN_ATA_ZONE;
5862
5863
if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP)
5864
&& (softc->flags & DA_FLAG_CAN_ATA_ZONE))
5865
break;
5866
}
5867
}
5868
} else {
5869
error = daerror(done_ccb, CAM_RETRY_SELTO,
5870
SF_RETRY_UA|SF_NO_PRINT);
5871
if (error == ERESTART)
5872
return;
5873
else if (error != 0) {
5874
/*
5875
* If we can't get the ATA Identify Data log
5876
* directory, then it effectively isn't
5877
* supported even if the ATA Log directory
5878
* a non-zero number of pages present for
5879
* this log.
5880
*/
5881
softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5882
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5883
/* Don't wedge this device's queue */
5884
cam_release_devq(done_ccb->ccb_h.path,
5885
/*relsim_flags*/0,
5886
/*reduction*/0,
5887
/*timeout*/0,
5888
/*getcount_only*/0);
5889
}
5890
}
5891
}
5892
5893
free(csio->data_ptr, M_SCSIDA);
5894
5895
if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5896
softc->state = DA_STATE_PROBE_ATA_SUP;
5897
xpt_release_ccb(done_ccb);
5898
xpt_schedule(periph, priority);
5899
return;
5900
}
5901
daprobedone(periph, done_ccb);
5902
return;
5903
}
5904
5905
static void
5906
dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
5907
{
5908
struct da_softc *softc;
5909
struct ccb_scsiio *csio;
5910
uint32_t priority;
5911
int error;
5912
5913
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n"));
5914
5915
softc = (struct da_softc *)periph->softc;
5916
priority = done_ccb->ccb_h.pinfo.priority;
5917
csio = &done_ccb->csio;
5918
5919
cam_periph_assert(periph, MA_OWNED);
5920
5921
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5922
uint32_t valid_len;
5923
size_t needed_size;
5924
struct ata_identify_log_sup_cap *sup_cap;
5925
error = 0;
5926
5927
sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr;
5928
valid_len = csio->dxfer_len - csio->resid;
5929
needed_size = __offsetof(struct ata_identify_log_sup_cap,
5930
sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5931
if (valid_len >= needed_size) {
5932
uint64_t zoned, zac_cap;
5933
5934
zoned = le64dec(sup_cap->zoned_cap);
5935
if (zoned & ATA_ZONED_VALID) {
5936
/*
5937
* This should have already been
5938
* set, because this is also in the
5939
* ATA identify data.
5940
*/
5941
if ((zoned & ATA_ZONED_MASK) ==
5942
ATA_SUPPORT_ZONE_HOST_AWARE)
5943
softc->zone_mode = DA_ZONE_HOST_AWARE;
5944
else if ((zoned & ATA_ZONED_MASK) ==
5945
ATA_SUPPORT_ZONE_DEV_MANAGED)
5946
softc->zone_mode =
5947
DA_ZONE_DRIVE_MANAGED;
5948
}
5949
5950
zac_cap = le64dec(sup_cap->sup_zac_cap);
5951
if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5952
if (zac_cap & ATA_REPORT_ZONES_SUP)
5953
softc->zone_flags |=
5954
DA_ZONE_FLAG_RZ_SUP;
5955
if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5956
softc->zone_flags |=
5957
DA_ZONE_FLAG_OPEN_SUP;
5958
if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5959
softc->zone_flags |=
5960
DA_ZONE_FLAG_CLOSE_SUP;
5961
if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5962
softc->zone_flags |=
5963
DA_ZONE_FLAG_FINISH_SUP;
5964
if (zac_cap & ATA_ND_RWP_SUP)
5965
softc->zone_flags |=
5966
DA_ZONE_FLAG_RWP_SUP;
5967
} else {
5968
/*
5969
* This field was introduced in
5970
* ACS-4, r08 on April 28th, 2015.
5971
* If the drive firmware was written
5972
* to an earlier spec, it won't have
5973
* the field. So, assume all
5974
* commands are supported.
5975
*/
5976
softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5977
}
5978
}
5979
} else {
5980
error = daerror(done_ccb, CAM_RETRY_SELTO,
5981
SF_RETRY_UA|SF_NO_PRINT);
5982
if (error == ERESTART)
5983
return;
5984
else if (error != 0) {
5985
/*
5986
* If we can't get the ATA Identify Data
5987
* Supported Capabilities page, clear the
5988
* flag...
5989
*/
5990
softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5991
/*
5992
* And clear zone capabilities.
5993
*/
5994
softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5995
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5996
/* Don't wedge this device's queue */
5997
cam_release_devq(done_ccb->ccb_h.path,
5998
/*relsim_flags*/0,
5999
/*reduction*/0,
6000
/*timeout*/0,
6001
/*getcount_only*/0);
6002
}
6003
}
6004
}
6005
6006
free(csio->data_ptr, M_SCSIDA);
6007
6008
if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
6009
softc->state = DA_STATE_PROBE_ATA_ZONE;
6010
xpt_release_ccb(done_ccb);
6011
xpt_schedule(periph, priority);
6012
return;
6013
}
6014
daprobedone(periph, done_ccb);
6015
return;
6016
}
6017
6018
static void
6019
dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
6020
{
6021
struct da_softc *softc;
6022
struct ccb_scsiio *csio;
6023
int error;
6024
6025
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n"));
6026
6027
softc = (struct da_softc *)periph->softc;
6028
csio = &done_ccb->csio;
6029
6030
cam_periph_assert(periph, MA_OWNED);
6031
6032
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6033
struct ata_zoned_info_log *zi_log;
6034
uint32_t valid_len;
6035
size_t needed_size;
6036
6037
zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
6038
6039
valid_len = csio->dxfer_len - csio->resid;
6040
needed_size = __offsetof(struct ata_zoned_info_log,
6041
version_info) + 1 + sizeof(zi_log->version_info);
6042
if (valid_len >= needed_size) {
6043
uint64_t tmpvar;
6044
6045
tmpvar = le64dec(zi_log->zoned_cap);
6046
if (tmpvar & ATA_ZDI_CAP_VALID) {
6047
if (tmpvar & ATA_ZDI_CAP_URSWRZ)
6048
softc->zone_flags |=
6049
DA_ZONE_FLAG_URSWRZ;
6050
else
6051
softc->zone_flags &=
6052
~DA_ZONE_FLAG_URSWRZ;
6053
}
6054
tmpvar = le64dec(zi_log->optimal_seq_zones);
6055
if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
6056
softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
6057
softc->optimal_seq_zones = (tmpvar &
6058
ATA_ZDI_OPT_SEQ_MASK);
6059
} else {
6060
softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET;
6061
softc->optimal_seq_zones = 0;
6062
}
6063
6064
tmpvar =le64dec(zi_log->optimal_nonseq_zones);
6065
if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
6066
softc->zone_flags |=
6067
DA_ZONE_FLAG_OPT_NONSEQ_SET;
6068
softc->optimal_nonseq_zones =
6069
(tmpvar & ATA_ZDI_OPT_NS_MASK);
6070
} else {
6071
softc->zone_flags &=
6072
~DA_ZONE_FLAG_OPT_NONSEQ_SET;
6073
softc->optimal_nonseq_zones = 0;
6074
}
6075
6076
tmpvar = le64dec(zi_log->max_seq_req_zones);
6077
if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
6078
softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
6079
softc->max_seq_zones =
6080
(tmpvar & ATA_ZDI_MAX_SEQ_MASK);
6081
} else {
6082
softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET;
6083
softc->max_seq_zones = 0;
6084
}
6085
}
6086
} else {
6087
error = daerror(done_ccb, CAM_RETRY_SELTO,
6088
SF_RETRY_UA|SF_NO_PRINT);
6089
if (error == ERESTART)
6090
return;
6091
else if (error != 0) {
6092
softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
6093
softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
6094
6095
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6096
/* Don't wedge this device's queue */
6097
cam_release_devq(done_ccb->ccb_h.path,
6098
/*relsim_flags*/0,
6099
/*reduction*/0,
6100
/*timeout*/0,
6101
/*getcount_only*/0);
6102
}
6103
}
6104
}
6105
6106
free(csio->data_ptr, M_SCSIDA);
6107
6108
daprobedone(periph, done_ccb);
6109
return;
6110
}
6111
6112
static void
6113
dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
6114
{
6115
struct da_softc *softc;
6116
struct ccb_scsiio *csio;
6117
int error;
6118
6119
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n"));
6120
6121
softc = (struct da_softc *)periph->softc;
6122
csio = &done_ccb->csio;
6123
6124
cam_periph_assert(periph, MA_OWNED);
6125
6126
if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6127
uint32_t valid_len;
6128
size_t needed_len;
6129
struct scsi_vpd_zoned_bdc *zoned_bdc;
6130
6131
error = 0;
6132
zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr;
6133
valid_len = csio->dxfer_len - csio->resid;
6134
needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
6135
max_seq_req_zones) + 1 +
6136
sizeof(zoned_bdc->max_seq_req_zones);
6137
if ((valid_len >= needed_len)
6138
&& (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) {
6139
if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
6140
softc->zone_flags |= DA_ZONE_FLAG_URSWRZ;
6141
else
6142
softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ;
6143
softc->optimal_seq_zones =
6144
scsi_4btoul(zoned_bdc->optimal_seq_zones);
6145
softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
6146
softc->optimal_nonseq_zones = scsi_4btoul(
6147
zoned_bdc->optimal_nonseq_zones);
6148
softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET;
6149
softc->max_seq_zones =
6150
scsi_4btoul(zoned_bdc->max_seq_req_zones);
6151
softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
6152
}
6153
/*
6154
* All of the zone commands are mandatory for SCSI
6155
* devices.
6156
*
6157
* XXX KDM this is valid as of September 2015.
6158
* Re-check this assumption once the SAT spec is
6159
* updated to support SCSI ZBC to ATA ZAC mapping.
6160
* Since ATA allows zone commands to be reported
6161
* as supported or not, this may not necessarily
6162
* be true for an ATA device behind a SAT (SCSI to
6163
* ATA Translation) layer.
6164
*/
6165
softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
6166
} else {
6167
error = daerror(done_ccb, CAM_RETRY_SELTO,
6168
SF_RETRY_UA|SF_NO_PRINT);
6169
if (error == ERESTART)
6170
return;
6171
else if (error != 0) {
6172
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6173
/* Don't wedge this device's queue */
6174
cam_release_devq(done_ccb->ccb_h.path,
6175
/*relsim_flags*/0,
6176
/*reduction*/0,
6177
/*timeout*/0,
6178
/*getcount_only*/0);
6179
}
6180
}
6181
}
6182
6183
free(csio->data_ptr, M_SCSIDA);
6184
6185
daprobedone(periph, done_ccb);
6186
return;
6187
}
6188
6189
static void
6190
dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
6191
{
6192
struct da_softc *softc;
6193
6194
CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n"));
6195
6196
softc = (struct da_softc *)periph->softc;
6197
6198
cam_periph_assert(periph, MA_OWNED);
6199
6200
if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6201
if (daerror(done_ccb, CAM_RETRY_SELTO,
6202
SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART)
6203
return; /* Will complete again, keep reference */
6204
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
6205
cam_release_devq(done_ccb->ccb_h.path,
6206
/*relsim_flags*/0,
6207
/*reduction*/0,
6208
/*timeout*/0,
6209
/*getcount_only*/0);
6210
}
6211
softc->flags &= ~DA_FLAG_TUR_PENDING;
6212
xpt_release_ccb(done_ccb);
6213
da_periph_release_locked(periph, DA_REF_TUR);
6214
return;
6215
}
6216
6217
static void
6218
dareprobe(struct cam_periph *periph)
6219
{
6220
struct da_softc *softc;
6221
int status __diagused;
6222
6223
softc = (struct da_softc *)periph->softc;
6224
6225
cam_periph_assert(periph, MA_OWNED);
6226
6227
/* Probe in progress; don't interfere. */
6228
if (softc->state != DA_STATE_NORMAL)
6229
return;
6230
6231
status = da_periph_acquire(periph, DA_REF_REPROBE);
6232
KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed"));
6233
6234
softc->state = DA_STATE_PROBE_WP;
6235
xpt_schedule(periph, CAM_PRIORITY_DEV);
6236
}
6237
6238
static int
6239
daerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags)
6240
{
6241
struct da_softc *softc;
6242
struct cam_periph *periph;
6243
int error, error_code, sense_key, asc, ascq;
6244
6245
#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
6246
if (ccb->csio.bio != NULL)
6247
biotrack(ccb->csio.bio, __func__);
6248
#endif
6249
6250
periph = xpt_path_periph(ccb->ccb_h.path);
6251
softc = (struct da_softc *)periph->softc;
6252
6253
cam_periph_assert(periph, MA_OWNED);
6254
6255
/*
6256
* Automatically detect devices that do not support
6257
* READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
6258
*/
6259
error = 0;
6260
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
6261
error = cmd6workaround(ccb);
6262
} else if (scsi_extract_sense_ccb(ccb,
6263
&error_code, &sense_key, &asc, &ascq)) {
6264
if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
6265
error = cmd6workaround(ccb);
6266
/*
6267
* If the target replied with CAPACITY DATA HAS CHANGED UA,
6268
* query the capacity and notify upper layers.
6269
*/
6270
else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
6271
asc == 0x2A && ascq == 0x09) {
6272
/* 2a/9: CAPACITY DATA HAS CHANGED */
6273
xpt_print(periph->path, "Capacity data has changed\n");
6274
softc->flags &= ~DA_FLAG_PROBED;
6275
dareprobe(periph);
6276
sense_flags |= SF_NO_PRINT;
6277
} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
6278
asc == 0x28 && ascq == 0x00) {
6279
/* 28/0: NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
6280
softc->flags &= ~DA_FLAG_PROBED;
6281
disk_media_changed(softc->disk, M_NOWAIT);
6282
/*
6283
* In an ideal world, we'd make sure that we have the
6284
* same medium mounted (if we'd seen one already) but
6285
* instead we don't invalidate the pack here and flag
6286
* below to retry the UAs. If we exhaust retries, then
6287
* we'll invalidate it in dadone for ENXIO errors (which
6288
* 28/0 will fail with eventually). Usually, retrying
6289
* just works and/or we get this before we've opened the
6290
* device (which clears the invalid flag).
6291
*/
6292
} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
6293
asc == 0x3F && ascq == 0x03) {
6294
/* 3f/3: INQUIRY DATA HAS CHANGED */
6295
xpt_print(periph->path, "INQUIRY data has changed\n");
6296
softc->flags &= ~DA_FLAG_PROBED;
6297
dareprobe(periph);
6298
sense_flags |= SF_NO_PRINT;
6299
} else if (sense_key == SSD_KEY_NOT_READY &&
6300
asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
6301
/* 3a/0: MEDIUM NOT PRESENT */
6302
/* 3a/1: MEDIUM NOT PRESENT - TRAY CLOSED */
6303
/* 3a/2: MEDIUM NOT PRESENT - TRAY OPEN */
6304
/* 3a/3: MEDIUM NOT PRESENT - LOADABLE */
6305
/* 3a/4: MEDIUM NOT PRESENT - MEDIUM AUXILIARY MEMORY ACCESSIBLE */
6306
softc->flags |= DA_FLAG_PACK_INVALID;
6307
disk_media_gone(softc->disk, M_NOWAIT);
6308
}
6309
}
6310
if (error == ERESTART)
6311
return (ERESTART);
6312
6313
#ifdef CAM_IO_STATS
6314
switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
6315
case CAM_CMD_TIMEOUT:
6316
softc->timeouts++;
6317
break;
6318
case CAM_REQ_ABORTED:
6319
case CAM_REQ_CMP_ERR:
6320
case CAM_REQ_TERMIO:
6321
case CAM_UNREC_HBA_ERROR:
6322
case CAM_DATA_RUN_ERR:
6323
case CAM_SCSI_STATUS_ERROR:
6324
case CAM_ATA_STATUS_ERROR:
6325
softc->errors++;
6326
break;
6327
default:
6328
break;
6329
}
6330
#endif
6331
6332
/*
6333
* XXX
6334
* Until we have a better way of doing pack validation,
6335
* don't treat UAs as errors.
6336
*/
6337
sense_flags |= SF_RETRY_UA;
6338
6339
if (softc->quirks & DA_Q_RETRY_BUSY)
6340
sense_flags |= SF_RETRY_BUSY;
6341
return(cam_periph_error(ccb, cam_flags, sense_flags));
6342
}
6343
6344
static void
6345
damediapoll(void *arg)
6346
{
6347
struct cam_periph *periph = arg;
6348
struct da_softc *softc = periph->softc;
6349
6350
if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
6351
(softc->flags & DA_FLAG_TUR_PENDING) == 0 &&
6352
softc->state == DA_STATE_NORMAL &&
6353
LIST_EMPTY(&softc->pending_ccbs)) {
6354
if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
6355
cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
6356
daschedule(periph);
6357
}
6358
}
6359
6360
/* Queue us up again */
6361
if (da_poll_period != 0) {
6362
callout_schedule_sbt(&softc->mediapoll_c,
6363
da_poll_period * SBT_1S, 0, C_PREL(1));
6364
}
6365
}
6366
6367
static void
6368
daprevent(struct cam_periph *periph, int action)
6369
{
6370
struct da_softc *softc;
6371
union ccb *ccb;
6372
int error;
6373
6374
cam_periph_assert(periph, MA_OWNED);
6375
softc = (struct da_softc *)periph->softc;
6376
6377
if (((action == PR_ALLOW)
6378
&& (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
6379
|| ((action == PR_PREVENT)
6380
&& (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
6381
return;
6382
}
6383
6384
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6385
6386
scsi_prevent(&ccb->csio,
6387
/*retries*/1,
6388
/*cbcfp*/NULL,
6389
MSG_SIMPLE_Q_TAG,
6390
action,
6391
SSD_FULL_SIZE,
6392
5000);
6393
6394
error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
6395
SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
6396
6397
if (error == 0) {
6398
if (action == PR_ALLOW)
6399
softc->flags &= ~DA_FLAG_PACK_LOCKED;
6400
else
6401
softc->flags |= DA_FLAG_PACK_LOCKED;
6402
}
6403
6404
xpt_release_ccb(ccb);
6405
}
6406
6407
static void
6408
dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
6409
struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
6410
{
6411
struct ccb_calc_geometry ccg;
6412
struct da_softc *softc;
6413
struct disk_params *dp;
6414
u_int lbppbe, lalba;
6415
int error;
6416
6417
softc = (struct da_softc *)periph->softc;
6418
6419
dp = &softc->params;
6420
dp->secsize = block_len;
6421
dp->sectors = maxsector + 1;
6422
if (rcaplong != NULL) {
6423
lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
6424
lalba = scsi_2btoul(rcaplong->lalba_lbp);
6425
lalba &= SRC16_LALBA_A;
6426
if (rcaplong->prot & SRC16_PROT_EN)
6427
softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >>
6428
SRC16_P_TYPE_SHIFT) + 1;
6429
else
6430
softc->p_type = 0;
6431
} else {
6432
lbppbe = 0;
6433
lalba = 0;
6434
softc->p_type = 0;
6435
}
6436
6437
if (lbppbe > 0) {
6438
dp->stripesize = block_len << lbppbe;
6439
dp->stripeoffset = (dp->stripesize - block_len * lalba) %
6440
dp->stripesize;
6441
} else if (softc->quirks & DA_Q_4K) {
6442
dp->stripesize = 4096;
6443
dp->stripeoffset = 0;
6444
} else if (softc->unmap_gran != 0) {
6445
dp->stripesize = block_len * softc->unmap_gran;
6446
dp->stripeoffset = (dp->stripesize - block_len *
6447
softc->unmap_gran_align) % dp->stripesize;
6448
} else {
6449
dp->stripesize = 0;
6450
dp->stripeoffset = 0;
6451
}
6452
/*
6453
* Have the controller provide us with a geometry
6454
* for this disk. The only time the geometry
6455
* matters is when we boot and the controller
6456
* is the only one knowledgeable enough to come
6457
* up with something that will make this a bootable
6458
* device.
6459
*/
6460
memset(&ccg, 0, sizeof(ccg));
6461
xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6462
ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
6463
ccg.block_size = dp->secsize;
6464
ccg.volume_size = dp->sectors;
6465
ccg.heads = 0;
6466
ccg.secs_per_track = 0;
6467
ccg.cylinders = 0;
6468
xpt_action((union ccb*)&ccg);
6469
if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6470
/*
6471
* We don't know what went wrong here- but just pick
6472
* a geometry so we don't have nasty things like divide
6473
* by zero.
6474
*/
6475
dp->heads = 255;
6476
dp->secs_per_track = 255;
6477
dp->cylinders = dp->sectors / (255 * 255);
6478
if (dp->cylinders == 0) {
6479
dp->cylinders = 1;
6480
}
6481
} else {
6482
dp->heads = ccg.heads;
6483
dp->secs_per_track = ccg.secs_per_track;
6484
dp->cylinders = ccg.cylinders;
6485
}
6486
6487
/*
6488
* If the user supplied a read capacity buffer, and if it is
6489
* different than the previous buffer, update the data in the EDT.
6490
* If it's the same, we don't bother. This avoids sending an
6491
* update every time someone opens this device.
6492
*/
6493
if ((rcaplong != NULL)
6494
&& (bcmp(rcaplong, &softc->rcaplong,
6495
min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
6496
struct ccb_dev_advinfo cdai;
6497
6498
memset(&cdai, 0, sizeof(cdai));
6499
xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6500
cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
6501
cdai.buftype = CDAI_TYPE_RCAPLONG;
6502
cdai.flags = CDAI_FLAG_STORE;
6503
cdai.bufsiz = rcap_len;
6504
cdai.buf = (uint8_t *)rcaplong;
6505
xpt_action((union ccb *)&cdai);
6506
if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
6507
cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
6508
if (cdai.ccb_h.status != CAM_REQ_CMP) {
6509
xpt_print(periph->path,
6510
"%s: failed to set read capacity advinfo\n",
6511
__func__);
6512
/* Use cam_error_print() to decode the status */
6513
cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
6514
CAM_EPF_ALL);
6515
} else {
6516
bcopy(rcaplong, &softc->rcaplong,
6517
min(sizeof(softc->rcaplong), rcap_len));
6518
}
6519
}
6520
6521
softc->disk->d_sectorsize = softc->params.secsize;
6522
softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
6523
softc->disk->d_stripesize = softc->params.stripesize;
6524
softc->disk->d_stripeoffset = softc->params.stripeoffset;
6525
/* XXX: these are not actually "firmware" values, so they may be wrong */
6526
softc->disk->d_fwsectors = softc->params.secs_per_track;
6527
softc->disk->d_fwheads = softc->params.heads;
6528
softc->disk->d_devstat->block_size = softc->params.secsize;
6529
softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
6530
6531
error = disk_resize(softc->disk, M_NOWAIT);
6532
if (error != 0)
6533
xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
6534
}
6535
6536
static void
6537
dasendorderedtag(void *arg)
6538
{
6539
struct cam_periph *periph = arg;
6540
struct da_softc *softc = periph->softc;
6541
6542
cam_periph_assert(periph, MA_OWNED);
6543
if (da_send_ordered) {
6544
if (!LIST_EMPTY(&softc->pending_ccbs)) {
6545
if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
6546
softc->flags |= DA_FLAG_NEED_OTAG;
6547
softc->flags &= ~DA_FLAG_WAS_OTAG;
6548
}
6549
}
6550
6551
/* Queue us up again */
6552
callout_schedule_sbt(&softc->sendordered_c,
6553
SBT_1S / DA_ORDEREDTAG_INTERVAL * da_default_timeout, 0,
6554
C_PREL(1));
6555
}
6556
6557
/*
6558
* Step through all DA peripheral drivers, and if the device is still open,
6559
* sync the disk cache to physical media.
6560
*/
6561
static void
6562
dashutdown(void * arg, int howto)
6563
{
6564
struct cam_periph *periph;
6565
struct da_softc *softc;
6566
union ccb *ccb;
6567
int error;
6568
6569
if ((howto & RB_NOSYNC) != 0)
6570
return;
6571
6572
CAM_PERIPH_FOREACH(periph, &dadriver) {
6573
softc = (struct da_softc *)periph->softc;
6574
if (SCHEDULER_STOPPED()) {
6575
/* If we paniced with the lock held, do not recurse. */
6576
if (!cam_periph_owned(periph) &&
6577
(softc->flags & DA_FLAG_OPEN)) {
6578
dadump(softc->disk, NULL, 0, 0);
6579
}
6580
continue;
6581
}
6582
cam_periph_lock(periph);
6583
6584
/*
6585
* We only sync the cache if the drive is still open, and
6586
* if the drive is capable of it..
6587
*/
6588
if (((softc->flags & DA_FLAG_OPEN) == 0)
6589
|| (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
6590
cam_periph_unlock(periph);
6591
continue;
6592
}
6593
6594
ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6595
scsi_synchronize_cache(&ccb->csio,
6596
/*retries*/0,
6597
/*cbfcnp*/NULL,
6598
MSG_SIMPLE_Q_TAG,
6599
/*begin_lba*/0, /* whole disk */
6600
/*lb_count*/0,
6601
SSD_FULL_SIZE,
6602
60 * 60 * 1000);
6603
6604
error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
6605
/*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
6606
softc->disk->d_devstat);
6607
if (error != 0)
6608
xpt_print(periph->path, "Synchronize cache failed\n");
6609
xpt_release_ccb(ccb);
6610
cam_periph_unlock(periph);
6611
}
6612
}
6613
6614
#else /* !_KERNEL */
6615
6616
/*
6617
* XXX These are only left out of the kernel build to silence warnings. If,
6618
* for some reason these functions are used in the kernel, the ifdefs should
6619
* be moved so they are included both in the kernel and userland.
6620
*/
6621
void
6622
scsi_format_unit(struct ccb_scsiio *csio, uint32_t retries,
6623
void (*cbfcnp)(struct cam_periph *, union ccb *),
6624
uint8_t tag_action, uint8_t byte2, uint16_t ileave,
6625
uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
6626
uint32_t timeout)
6627
{
6628
struct scsi_format_unit *scsi_cmd;
6629
6630
scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
6631
scsi_cmd->opcode = FORMAT_UNIT;
6632
scsi_cmd->byte2 = byte2;
6633
scsi_ulto2b(ileave, scsi_cmd->interleave);
6634
6635
cam_fill_csio(csio,
6636
retries,
6637
cbfcnp,
6638
/*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6639
tag_action,
6640
data_ptr,
6641
dxfer_len,
6642
sense_len,
6643
sizeof(*scsi_cmd),
6644
timeout);
6645
}
6646
6647
void
6648
scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
6649
void (*cbfcnp)(struct cam_periph *, union ccb *),
6650
uint8_t tag_action, uint8_t list_format,
6651
uint32_t addr_desc_index, uint8_t *data_ptr,
6652
uint32_t dxfer_len, int minimum_cmd_size,
6653
uint8_t sense_len, uint32_t timeout)
6654
{
6655
uint8_t cdb_len;
6656
6657
/*
6658
* These conditions allow using the 10 byte command. Otherwise we
6659
* need to use the 12 byte command.
6660
*/
6661
if ((minimum_cmd_size <= 10)
6662
&& (addr_desc_index == 0)
6663
&& (dxfer_len <= SRDD10_MAX_LENGTH)) {
6664
struct scsi_read_defect_data_10 *cdb10;
6665
6666
cdb10 = (struct scsi_read_defect_data_10 *)
6667
&csio->cdb_io.cdb_bytes;
6668
6669
cdb_len = sizeof(*cdb10);
6670
bzero(cdb10, cdb_len);
6671
cdb10->opcode = READ_DEFECT_DATA_10;
6672
cdb10->format = list_format;
6673
scsi_ulto2b(dxfer_len, cdb10->alloc_length);
6674
} else {
6675
struct scsi_read_defect_data_12 *cdb12;
6676
6677
cdb12 = (struct scsi_read_defect_data_12 *)
6678
&csio->cdb_io.cdb_bytes;
6679
6680
cdb_len = sizeof(*cdb12);
6681
bzero(cdb12, cdb_len);
6682
cdb12->opcode = READ_DEFECT_DATA_12;
6683
cdb12->format = list_format;
6684
scsi_ulto4b(dxfer_len, cdb12->alloc_length);
6685
scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
6686
}
6687
6688
cam_fill_csio(csio,
6689
retries,
6690
cbfcnp,
6691
/*flags*/ CAM_DIR_IN,
6692
tag_action,
6693
data_ptr,
6694
dxfer_len,
6695
sense_len,
6696
cdb_len,
6697
timeout);
6698
}
6699
6700
void
6701
scsi_sanitize(struct ccb_scsiio *csio, uint32_t retries,
6702
void (*cbfcnp)(struct cam_periph *, union ccb *),
6703
uint8_t tag_action, uint8_t byte2, uint16_t control,
6704
uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
6705
uint32_t timeout)
6706
{
6707
struct scsi_sanitize *scsi_cmd;
6708
6709
scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
6710
scsi_cmd->opcode = SANITIZE;
6711
scsi_cmd->byte2 = byte2;
6712
scsi_cmd->control = control;
6713
scsi_ulto2b(dxfer_len, scsi_cmd->length);
6714
6715
cam_fill_csio(csio,
6716
retries,
6717
cbfcnp,
6718
/*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6719
tag_action,
6720
data_ptr,
6721
dxfer_len,
6722
sense_len,
6723
sizeof(*scsi_cmd),
6724
timeout);
6725
}
6726
6727
#endif /* _KERNEL */
6728
6729
void
6730
scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
6731
void (*cbfcnp)(struct cam_periph *, union ccb *),
6732
uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
6733
uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
6734
uint8_t sense_len, uint32_t timeout)
6735
{
6736
struct scsi_zbc_out *scsi_cmd;
6737
6738
scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
6739
scsi_cmd->opcode = ZBC_OUT;
6740
scsi_cmd->service_action = service_action;
6741
scsi_u64to8b(zone_id, scsi_cmd->zone_id);
6742
scsi_cmd->zone_flags = zone_flags;
6743
6744
cam_fill_csio(csio,
6745
retries,
6746
cbfcnp,
6747
/*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6748
tag_action,
6749
data_ptr,
6750
dxfer_len,
6751
sense_len,
6752
sizeof(*scsi_cmd),
6753
timeout);
6754
}
6755
6756
void
6757
scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
6758
void (*cbfcnp)(struct cam_periph *, union ccb *),
6759
uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
6760
uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
6761
uint8_t sense_len, uint32_t timeout)
6762
{
6763
struct scsi_zbc_in *scsi_cmd;
6764
6765
scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
6766
scsi_cmd->opcode = ZBC_IN;
6767
scsi_cmd->service_action = service_action;
6768
scsi_ulto4b(dxfer_len, scsi_cmd->length);
6769
scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
6770
scsi_cmd->zone_options = zone_options;
6771
6772
cam_fill_csio(csio,
6773
retries,
6774
cbfcnp,
6775
/*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
6776
tag_action,
6777
data_ptr,
6778
dxfer_len,
6779
sense_len,
6780
sizeof(*scsi_cmd),
6781
timeout);
6782
6783
}
6784
6785
int
6786
scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
6787
void (*cbfcnp)(struct cam_periph *, union ccb *),
6788
uint8_t tag_action, int use_ncq,
6789
uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6790
uint8_t *data_ptr, uint32_t dxfer_len,
6791
uint8_t *cdb_storage, size_t cdb_storage_len,
6792
uint8_t sense_len, uint32_t timeout)
6793
{
6794
uint8_t command_out, protocol, ata_flags;
6795
uint16_t features_out;
6796
uint32_t sectors_out, auxiliary;
6797
int retval;
6798
6799
retval = 0;
6800
6801
if (use_ncq == 0) {
6802
command_out = ATA_ZAC_MANAGEMENT_OUT;
6803
features_out = (zm_action & 0xf) | (zone_flags << 8);
6804
ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6805
if (dxfer_len == 0) {
6806
protocol = AP_PROTO_NON_DATA;
6807
ata_flags |= AP_FLAG_TLEN_NO_DATA;
6808
sectors_out = 0;
6809
} else {
6810
protocol = AP_PROTO_DMA;
6811
ata_flags |= AP_FLAG_TLEN_SECT_CNT |
6812
AP_FLAG_TDIR_TO_DEV;
6813
sectors_out = ((dxfer_len >> 9) & 0xffff);
6814
}
6815
auxiliary = 0;
6816
} else {
6817
ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6818
if (dxfer_len == 0) {
6819
command_out = ATA_NCQ_NON_DATA;
6820
features_out = ATA_NCQ_ZAC_MGMT_OUT;
6821
/*
6822
* We're assuming the SCSI to ATA translation layer
6823
* will set the NCQ tag number in the tag field.
6824
* That isn't clear from the SAT-4 spec (as of rev 05).
6825
*/
6826
sectors_out = 0;
6827
ata_flags |= AP_FLAG_TLEN_NO_DATA;
6828
} else {
6829
command_out = ATA_SEND_FPDMA_QUEUED;
6830
/*
6831
* Note that we're defaulting to normal priority,
6832
* and assuming that the SCSI to ATA translation
6833
* layer will insert the NCQ tag number in the tag
6834
* field. That isn't clear in the SAT-4 spec (as
6835
* of rev 05).
6836
*/
6837
sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6838
6839
ata_flags |= AP_FLAG_TLEN_FEAT |
6840
AP_FLAG_TDIR_TO_DEV;
6841
6842
/*
6843
* For SEND FPDMA QUEUED, the transfer length is
6844
* encoded in the FEATURE register, and 0 means
6845
* that 65536 512 byte blocks are to be transferred.
6846
* In practice, it seems unlikely that we'll see
6847
* a transfer that large, and it may confuse the
6848
* the SAT layer, because generally that means that
6849
* 0 bytes should be transferred.
6850
*/
6851
if (dxfer_len == (65536 * 512)) {
6852
features_out = 0;
6853
} else if (dxfer_len <= (65535 * 512)) {
6854
features_out = ((dxfer_len >> 9) & 0xffff);
6855
} else {
6856
/* The transfer is too big. */
6857
retval = 1;
6858
goto bailout;
6859
}
6860
}
6861
6862
auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6863
protocol = AP_PROTO_FPDMA;
6864
}
6865
6866
protocol |= AP_EXTEND;
6867
6868
retval = scsi_ata_pass(csio,
6869
retries,
6870
cbfcnp,
6871
/*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6872
tag_action,
6873
/*protocol*/ protocol,
6874
/*ata_flags*/ ata_flags,
6875
/*features*/ features_out,
6876
/*sector_count*/ sectors_out,
6877
/*lba*/ zone_id,
6878
/*command*/ command_out,
6879
/*device*/ 0,
6880
/*icc*/ 0,
6881
/*auxiliary*/ auxiliary,
6882
/*control*/ 0,
6883
/*data_ptr*/ data_ptr,
6884
/*dxfer_len*/ dxfer_len,
6885
/*cdb_storage*/ cdb_storage,
6886
/*cdb_storage_len*/ cdb_storage_len,
6887
/*minimum_cmd_size*/ 0,
6888
/*sense_len*/ SSD_FULL_SIZE,
6889
/*timeout*/ timeout);
6890
6891
bailout:
6892
6893
return (retval);
6894
}
6895
6896
int
6897
scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6898
void (*cbfcnp)(struct cam_periph *, union ccb *),
6899
uint8_t tag_action, int use_ncq,
6900
uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6901
uint8_t *data_ptr, uint32_t dxfer_len,
6902
uint8_t *cdb_storage, size_t cdb_storage_len,
6903
uint8_t sense_len, uint32_t timeout)
6904
{
6905
uint8_t command_out, protocol;
6906
uint16_t features_out, sectors_out;
6907
uint32_t auxiliary;
6908
int ata_flags;
6909
int retval;
6910
6911
retval = 0;
6912
ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6913
6914
if (use_ncq == 0) {
6915
command_out = ATA_ZAC_MANAGEMENT_IN;
6916
/* XXX KDM put a macro here */
6917
features_out = (zm_action & 0xf) | (zone_flags << 8);
6918
sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6919
protocol = AP_PROTO_DMA;
6920
ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6921
auxiliary = 0;
6922
} else {
6923
ata_flags |= AP_FLAG_TLEN_FEAT;
6924
6925
command_out = ATA_RECV_FPDMA_QUEUED;
6926
sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6927
6928
/*
6929
* For RECEIVE FPDMA QUEUED, the transfer length is
6930
* encoded in the FEATURE register, and 0 means
6931
* that 65536 512 byte blocks are to be transferred.
6932
* In practice, it seems unlikely that we'll see
6933
* a transfer that large, and it may confuse the
6934
* the SAT layer, because generally that means that
6935
* 0 bytes should be transferred.
6936
*/
6937
if (dxfer_len == (65536 * 512)) {
6938
features_out = 0;
6939
} else if (dxfer_len <= (65535 * 512)) {
6940
features_out = ((dxfer_len >> 9) & 0xffff);
6941
} else {
6942
/* The transfer is too big. */
6943
retval = 1;
6944
goto bailout;
6945
}
6946
auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6947
protocol = AP_PROTO_FPDMA;
6948
}
6949
6950
protocol |= AP_EXTEND;
6951
6952
retval = scsi_ata_pass(csio,
6953
retries,
6954
cbfcnp,
6955
/*flags*/ CAM_DIR_IN,
6956
tag_action,
6957
/*protocol*/ protocol,
6958
/*ata_flags*/ ata_flags,
6959
/*features*/ features_out,
6960
/*sector_count*/ sectors_out,
6961
/*lba*/ zone_id,
6962
/*command*/ command_out,
6963
/*device*/ 0,
6964
/*icc*/ 0,
6965
/*auxiliary*/ auxiliary,
6966
/*control*/ 0,
6967
/*data_ptr*/ data_ptr,
6968
/*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6969
/*cdb_storage*/ cdb_storage,
6970
/*cdb_storage_len*/ cdb_storage_len,
6971
/*minimum_cmd_size*/ 0,
6972
/*sense_len*/ SSD_FULL_SIZE,
6973
/*timeout*/ timeout);
6974
6975
bailout:
6976
return (retval);
6977
}
6978
6979