Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/ata/libata-scsi.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* libata-scsi.c - helper library for ATA
4
*
5
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6
* Copyright 2003-2004 Jeff Garzik
7
*
8
* libata documentation is available via 'make {ps|pdf}docs',
9
* as Documentation/driver-api/libata.rst
10
*
11
* Hardware documentation available from
12
* - http://www.t10.org/
13
* - http://www.t13.org/
14
*/
15
16
#include <linux/compat.h>
17
#include <linux/slab.h>
18
#include <linux/kernel.h>
19
#include <linux/blkdev.h>
20
#include <linux/spinlock.h>
21
#include <linux/export.h>
22
#include <scsi/scsi.h>
23
#include <scsi/scsi_host.h>
24
#include <scsi/scsi_cmnd.h>
25
#include <scsi/scsi_eh.h>
26
#include <scsi/scsi_device.h>
27
#include <scsi/scsi_tcq.h>
28
#include <scsi/scsi_transport.h>
29
#include <linux/libata.h>
30
#include <linux/hdreg.h>
31
#include <linux/uaccess.h>
32
#include <linux/suspend.h>
33
#include <linux/unaligned.h>
34
#include <linux/ioprio.h>
35
#include <linux/of.h>
36
37
#include "libata.h"
38
#include "libata-transport.h"
39
40
#define ATA_SCSI_RBUF_SIZE 2048
41
42
static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
43
static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
44
45
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
46
47
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
48
const struct scsi_device *scsidev);
49
50
#define RW_RECOVERY_MPAGE 0x1
51
#define RW_RECOVERY_MPAGE_LEN 12
52
#define CACHE_MPAGE 0x8
53
#define CACHE_MPAGE_LEN 20
54
#define CONTROL_MPAGE 0xa
55
#define CONTROL_MPAGE_LEN 12
56
#define ALL_MPAGES 0x3f
57
#define ALL_SUB_MPAGES 0xff
58
#define CDL_T2A_SUB_MPAGE 0x07
59
#define CDL_T2B_SUB_MPAGE 0x08
60
#define CDL_T2_SUB_MPAGE_LEN 232
61
#define ATA_FEATURE_SUB_MPAGE 0xf2
62
#define ATA_FEATURE_SUB_MPAGE_LEN 16
63
64
static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
65
RW_RECOVERY_MPAGE,
66
RW_RECOVERY_MPAGE_LEN - 2,
67
(1 << 7), /* AWRE */
68
0, /* read retry count */
69
0, 0, 0, 0,
70
0, /* write retry count */
71
0, 0, 0
72
};
73
74
static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
75
CACHE_MPAGE,
76
CACHE_MPAGE_LEN - 2,
77
0, /* contains WCE, needs to be 0 for logic */
78
0, 0, 0, 0, 0, 0, 0, 0, 0,
79
0, /* contains DRA, needs to be 0 for logic */
80
0, 0, 0, 0, 0, 0, 0
81
};
82
83
static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
84
CONTROL_MPAGE,
85
CONTROL_MPAGE_LEN - 2,
86
2, /* DSENSE=0, GLTSD=1 */
87
0, /* [QAM+QERR may be 1, see 05-359r1] */
88
0, 0, 0, 0, 0xff, 0xff,
89
0, 30 /* extended self test time, see 05-359r1 */
90
};
91
92
static ssize_t ata_scsi_park_show(struct device *device,
93
struct device_attribute *attr, char *buf)
94
{
95
struct scsi_device *sdev = to_scsi_device(device);
96
struct ata_port *ap;
97
struct ata_link *link;
98
struct ata_device *dev;
99
unsigned long now;
100
unsigned int msecs;
101
int rc = 0;
102
103
ap = ata_shost_to_port(sdev->host);
104
105
spin_lock_irq(ap->lock);
106
dev = ata_scsi_find_dev(ap, sdev);
107
if (!dev) {
108
rc = -ENODEV;
109
goto unlock;
110
}
111
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
112
rc = -EOPNOTSUPP;
113
goto unlock;
114
}
115
116
link = dev->link;
117
now = jiffies;
118
if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
119
link->eh_context.unloaded_mask & (1 << dev->devno) &&
120
time_after(dev->unpark_deadline, now))
121
msecs = jiffies_to_msecs(dev->unpark_deadline - now);
122
else
123
msecs = 0;
124
125
unlock:
126
spin_unlock_irq(ap->lock);
127
128
return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
129
}
130
131
static ssize_t ata_scsi_park_store(struct device *device,
132
struct device_attribute *attr,
133
const char *buf, size_t len)
134
{
135
struct scsi_device *sdev = to_scsi_device(device);
136
struct ata_port *ap;
137
struct ata_device *dev;
138
int input;
139
unsigned long flags;
140
int rc;
141
142
rc = kstrtoint(buf, 10, &input);
143
if (rc)
144
return rc;
145
if (input < -2)
146
return -EINVAL;
147
if (input > ATA_TMOUT_MAX_PARK) {
148
rc = -EOVERFLOW;
149
input = ATA_TMOUT_MAX_PARK;
150
}
151
152
ap = ata_shost_to_port(sdev->host);
153
154
spin_lock_irqsave(ap->lock, flags);
155
dev = ata_scsi_find_dev(ap, sdev);
156
if (unlikely(!dev)) {
157
rc = -ENODEV;
158
goto unlock;
159
}
160
if (dev->class != ATA_DEV_ATA &&
161
dev->class != ATA_DEV_ZAC) {
162
rc = -EOPNOTSUPP;
163
goto unlock;
164
}
165
166
if (input >= 0) {
167
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
168
rc = -EOPNOTSUPP;
169
goto unlock;
170
}
171
172
dev->unpark_deadline = ata_deadline(jiffies, input);
173
dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
174
ata_port_schedule_eh(ap);
175
complete(&ap->park_req_pending);
176
} else {
177
switch (input) {
178
case -1:
179
dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
180
break;
181
case -2:
182
dev->flags |= ATA_DFLAG_NO_UNLOAD;
183
break;
184
}
185
}
186
unlock:
187
spin_unlock_irqrestore(ap->lock, flags);
188
189
return rc ? rc : len;
190
}
191
DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
192
ata_scsi_park_show, ata_scsi_park_store);
193
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
194
195
bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq)
196
{
197
/*
198
* If sk == NO_SENSE, and asc + ascq == NO ADDITIONAL SENSE INFORMATION,
199
* then there is no sense data to add.
200
*/
201
if (sk == 0 && asc == 0 && ascq == 0)
202
return false;
203
204
/* If sk > COMPLETED, sense data is bogus. */
205
if (sk > COMPLETED)
206
return false;
207
208
return true;
209
}
210
211
void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
212
u8 sk, u8 asc, u8 ascq)
213
{
214
bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
215
216
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
217
}
218
219
static void ata_scsi_set_sense_information(struct ata_queued_cmd *qc)
220
{
221
u64 information;
222
223
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
224
ata_dev_dbg(qc->dev,
225
"missing result TF: can't set INFORMATION sense field\n");
226
return;
227
}
228
229
information = ata_tf_read_block(&qc->result_tf, qc->dev);
230
if (information == U64_MAX)
231
return;
232
233
scsi_set_sense_information(qc->scsicmd->sense_buffer,
234
SCSI_SENSE_BUFFERSIZE, information);
235
}
236
237
/**
238
* ata_scsi_set_passthru_sense_fields - Set ATA fields in sense buffer
239
* @qc: ATA PASS-THROUGH command.
240
*
241
* Populates "ATA Status Return sense data descriptor" / "Fixed format
242
* sense data" with ATA taskfile fields.
243
*
244
* LOCKING:
245
* None.
246
*/
247
static void ata_scsi_set_passthru_sense_fields(struct ata_queued_cmd *qc)
248
{
249
struct ata_device *dev = qc->dev;
250
struct scsi_cmnd *cmd = qc->scsicmd;
251
struct ata_taskfile *tf = &qc->result_tf;
252
unsigned char *sb = cmd->sense_buffer;
253
254
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
255
ata_dev_dbg(dev,
256
"missing result TF: can't set ATA PT sense fields\n");
257
return;
258
}
259
260
if ((sb[0] & 0x7f) >= 0x72) {
261
unsigned char *desc;
262
u8 len;
263
264
/* descriptor format */
265
len = sb[7];
266
desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
267
if (!desc) {
268
if (SCSI_SENSE_BUFFERSIZE < len + 14)
269
return;
270
sb[7] = len + 14;
271
desc = sb + 8 + len;
272
}
273
desc[0] = 9;
274
desc[1] = 12;
275
/*
276
* Copy registers into sense buffer.
277
*/
278
desc[2] = 0x00;
279
desc[3] = tf->error;
280
desc[5] = tf->nsect;
281
desc[7] = tf->lbal;
282
desc[9] = tf->lbam;
283
desc[11] = tf->lbah;
284
desc[12] = tf->device;
285
desc[13] = tf->status;
286
287
/*
288
* Fill in Extend bit, and the high order bytes
289
* if applicable.
290
*/
291
if (tf->flags & ATA_TFLAG_LBA48) {
292
desc[2] |= 0x01;
293
desc[4] = tf->hob_nsect;
294
desc[6] = tf->hob_lbal;
295
desc[8] = tf->hob_lbam;
296
desc[10] = tf->hob_lbah;
297
}
298
} else {
299
/* Fixed sense format */
300
sb[0] |= 0x80;
301
sb[3] = tf->error;
302
sb[4] = tf->status;
303
sb[5] = tf->device;
304
sb[6] = tf->nsect;
305
if (tf->flags & ATA_TFLAG_LBA48) {
306
sb[8] |= 0x80;
307
if (tf->hob_nsect)
308
sb[8] |= 0x40;
309
if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
310
sb[8] |= 0x20;
311
}
312
sb[9] = tf->lbal;
313
sb[10] = tf->lbam;
314
sb[11] = tf->lbah;
315
}
316
}
317
318
static void ata_scsi_set_invalid_field(struct ata_device *dev,
319
struct scsi_cmnd *cmd, u16 field, u8 bit)
320
{
321
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
322
/* "Invalid field in CDB" */
323
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
324
field, bit, 1);
325
}
326
327
static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
328
struct scsi_cmnd *cmd, u16 field)
329
{
330
/* "Invalid field in parameter list" */
331
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
332
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
333
field, 0xff, 0);
334
}
335
336
static struct attribute *ata_common_sdev_attrs[] = {
337
&dev_attr_unload_heads.attr,
338
NULL
339
};
340
341
static const struct attribute_group ata_common_sdev_attr_group = {
342
.attrs = ata_common_sdev_attrs
343
};
344
345
const struct attribute_group *ata_common_sdev_groups[] = {
346
&ata_common_sdev_attr_group,
347
NULL
348
};
349
EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
350
351
/**
352
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
353
* @sdev: SCSI device for which BIOS geometry is to be determined
354
* @bdev: block device associated with @sdev
355
* @capacity: capacity of SCSI device
356
* @geom: location to which geometry will be output
357
*
358
* Generic bios head/sector/cylinder calculator
359
* used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
360
* mapping. Some situations may arise where the disk is not
361
* bootable if this is not used.
362
*
363
* LOCKING:
364
* Defined by the SCSI layer. We don't really care.
365
*
366
* RETURNS:
367
* Zero.
368
*/
369
int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
370
sector_t capacity, int geom[])
371
{
372
geom[0] = 255;
373
geom[1] = 63;
374
sector_div(capacity, 255*63);
375
geom[2] = capacity;
376
377
return 0;
378
}
379
EXPORT_SYMBOL_GPL(ata_std_bios_param);
380
381
/**
382
* ata_scsi_unlock_native_capacity - unlock native capacity
383
* @sdev: SCSI device to adjust device capacity for
384
*
385
* This function is called if a partition on @sdev extends beyond
386
* the end of the device. It requests EH to unlock HPA.
387
*
388
* LOCKING:
389
* Defined by the SCSI layer. Might sleep.
390
*/
391
void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
392
{
393
struct ata_port *ap = ata_shost_to_port(sdev->host);
394
struct ata_device *dev;
395
unsigned long flags;
396
397
spin_lock_irqsave(ap->lock, flags);
398
399
dev = ata_scsi_find_dev(ap, sdev);
400
if (dev && dev->n_sectors < dev->n_native_sectors) {
401
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
402
dev->link->eh_info.action |= ATA_EH_RESET;
403
ata_port_schedule_eh(ap);
404
}
405
406
spin_unlock_irqrestore(ap->lock, flags);
407
ata_port_wait_eh(ap);
408
}
409
EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
410
411
/**
412
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
413
* @ap: target port
414
* @sdev: SCSI device to get identify data for
415
* @arg: User buffer area for identify data
416
*
417
* LOCKING:
418
* Defined by the SCSI layer. We don't really care.
419
*
420
* RETURNS:
421
* Zero on success, negative errno on error.
422
*/
423
static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
424
void __user *arg)
425
{
426
struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
427
u16 __user *dst = arg;
428
char buf[40];
429
430
if (!dev)
431
return -ENOMSG;
432
433
if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
434
return -EFAULT;
435
436
ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
437
if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
438
return -EFAULT;
439
440
ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
441
if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
442
return -EFAULT;
443
444
ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
445
if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
446
return -EFAULT;
447
448
return 0;
449
}
450
451
/**
452
* ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
453
* @scsidev: Device to which we are issuing command
454
* @arg: User provided data for issuing command
455
*
456
* LOCKING:
457
* Defined by the SCSI layer. We don't really care.
458
*
459
* RETURNS:
460
* Zero on success, negative errno on error.
461
*/
462
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
463
{
464
int rc = 0;
465
u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
466
u8 scsi_cmd[MAX_COMMAND_SIZE];
467
u8 args[4], *argbuf = NULL;
468
int argsize = 0;
469
struct scsi_sense_hdr sshdr;
470
const struct scsi_exec_args exec_args = {
471
.sshdr = &sshdr,
472
.sense = sensebuf,
473
.sense_len = sizeof(sensebuf),
474
};
475
int cmd_result;
476
477
if (arg == NULL)
478
return -EINVAL;
479
480
if (copy_from_user(args, arg, sizeof(args)))
481
return -EFAULT;
482
483
memset(sensebuf, 0, sizeof(sensebuf));
484
memset(scsi_cmd, 0, sizeof(scsi_cmd));
485
486
if (args[3]) {
487
argsize = ATA_SECT_SIZE * args[3];
488
argbuf = kmalloc(argsize, GFP_KERNEL);
489
if (argbuf == NULL) {
490
rc = -ENOMEM;
491
goto error;
492
}
493
494
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
495
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
496
block count in sector count field */
497
} else {
498
scsi_cmd[1] = (3 << 1); /* Non-data */
499
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
500
}
501
502
scsi_cmd[0] = ATA_16;
503
504
scsi_cmd[4] = args[2];
505
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
506
scsi_cmd[6] = args[3];
507
scsi_cmd[8] = args[1];
508
scsi_cmd[10] = ATA_SMART_LBAM_PASS;
509
scsi_cmd[12] = ATA_SMART_LBAH_PASS;
510
} else {
511
scsi_cmd[6] = args[1];
512
}
513
scsi_cmd[14] = args[0];
514
515
/* Good values for timeout and retries? Values below
516
from scsi_ioctl_send_command() for default case... */
517
cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf,
518
argsize, 10 * HZ, 5, &exec_args);
519
if (cmd_result < 0) {
520
rc = cmd_result;
521
goto error;
522
}
523
if (scsi_sense_valid(&sshdr)) {/* sense data available */
524
u8 *desc = sensebuf + 8;
525
526
/* If we set cc then ATA pass-through will cause a
527
* check condition even if no error. Filter that. */
528
if (scsi_status_is_check_condition(cmd_result)) {
529
if (sshdr.sense_key == RECOVERED_ERROR &&
530
sshdr.asc == 0 && sshdr.ascq == 0x1d)
531
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
532
}
533
534
/* Send userspace a few ATA registers (same as drivers/ide) */
535
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
536
desc[0] == 0x09) { /* code is "ATA Descriptor" */
537
args[0] = desc[13]; /* status */
538
args[1] = desc[3]; /* error */
539
args[2] = desc[5]; /* sector count (0:7) */
540
if (copy_to_user(arg, args, sizeof(args)))
541
rc = -EFAULT;
542
}
543
}
544
545
546
if (cmd_result) {
547
rc = -EIO;
548
goto error;
549
}
550
551
if ((argbuf)
552
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
553
rc = -EFAULT;
554
error:
555
kfree(argbuf);
556
return rc;
557
}
558
559
/**
560
* ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
561
* @scsidev: Device to which we are issuing command
562
* @arg: User provided data for issuing command
563
*
564
* LOCKING:
565
* Defined by the SCSI layer. We don't really care.
566
*
567
* RETURNS:
568
* Zero on success, negative errno on error.
569
*/
570
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
571
{
572
int rc = 0;
573
u8 sensebuf[SCSI_SENSE_BUFFERSIZE];
574
u8 scsi_cmd[MAX_COMMAND_SIZE];
575
u8 args[7];
576
struct scsi_sense_hdr sshdr;
577
int cmd_result;
578
const struct scsi_exec_args exec_args = {
579
.sshdr = &sshdr,
580
.sense = sensebuf,
581
.sense_len = sizeof(sensebuf),
582
};
583
584
if (arg == NULL)
585
return -EINVAL;
586
587
if (copy_from_user(args, arg, sizeof(args)))
588
return -EFAULT;
589
590
memset(sensebuf, 0, sizeof(sensebuf));
591
memset(scsi_cmd, 0, sizeof(scsi_cmd));
592
scsi_cmd[0] = ATA_16;
593
scsi_cmd[1] = (3 << 1); /* Non-data */
594
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
595
scsi_cmd[4] = args[1];
596
scsi_cmd[6] = args[2];
597
scsi_cmd[8] = args[3];
598
scsi_cmd[10] = args[4];
599
scsi_cmd[12] = args[5];
600
scsi_cmd[13] = args[6] & 0x4f;
601
scsi_cmd[14] = args[0];
602
603
/* Good values for timeout and retries? Values below
604
from scsi_ioctl_send_command() for default case... */
605
cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL,
606
0, 10 * HZ, 5, &exec_args);
607
if (cmd_result < 0) {
608
rc = cmd_result;
609
goto error;
610
}
611
if (scsi_sense_valid(&sshdr)) {/* sense data available */
612
u8 *desc = sensebuf + 8;
613
614
/* If we set cc then ATA pass-through will cause a
615
* check condition even if no error. Filter that. */
616
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
617
if (sshdr.sense_key == RECOVERED_ERROR &&
618
sshdr.asc == 0 && sshdr.ascq == 0x1d)
619
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
620
}
621
622
/* Send userspace ATA registers */
623
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
624
desc[0] == 0x09) {/* code is "ATA Descriptor" */
625
args[0] = desc[13]; /* status */
626
args[1] = desc[3]; /* error */
627
args[2] = desc[5]; /* sector count (0:7) */
628
args[3] = desc[7]; /* lbal */
629
args[4] = desc[9]; /* lbam */
630
args[5] = desc[11]; /* lbah */
631
args[6] = desc[12]; /* select */
632
if (copy_to_user(arg, args, sizeof(args)))
633
rc = -EFAULT;
634
}
635
}
636
637
if (cmd_result) {
638
rc = -EIO;
639
goto error;
640
}
641
642
error:
643
return rc;
644
}
645
646
static bool ata_ioc32(struct ata_port *ap)
647
{
648
if (ap->flags & ATA_FLAG_PIO_DMA)
649
return true;
650
if (ap->pflags & ATA_PFLAG_PIO32)
651
return true;
652
return false;
653
}
654
655
/*
656
* This handles both native and compat commands, so anything added
657
* here must have a compatible argument, or check in_compat_syscall()
658
*/
659
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
660
unsigned int cmd, void __user *arg)
661
{
662
unsigned long val;
663
int rc = -EINVAL;
664
unsigned long flags;
665
666
switch (cmd) {
667
case HDIO_GET_32BIT:
668
spin_lock_irqsave(ap->lock, flags);
669
val = ata_ioc32(ap);
670
spin_unlock_irqrestore(ap->lock, flags);
671
#ifdef CONFIG_COMPAT
672
if (in_compat_syscall())
673
return put_user(val, (compat_ulong_t __user *)arg);
674
#endif
675
return put_user(val, (unsigned long __user *)arg);
676
677
case HDIO_SET_32BIT:
678
val = (unsigned long) arg;
679
rc = 0;
680
spin_lock_irqsave(ap->lock, flags);
681
if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
682
if (val)
683
ap->pflags |= ATA_PFLAG_PIO32;
684
else
685
ap->pflags &= ~ATA_PFLAG_PIO32;
686
} else {
687
if (val != ata_ioc32(ap))
688
rc = -EINVAL;
689
}
690
spin_unlock_irqrestore(ap->lock, flags);
691
return rc;
692
693
case HDIO_GET_IDENTITY:
694
return ata_get_identity(ap, scsidev, arg);
695
696
case HDIO_DRIVE_CMD:
697
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
698
return -EACCES;
699
return ata_cmd_ioctl(scsidev, arg);
700
701
case HDIO_DRIVE_TASK:
702
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
703
return -EACCES;
704
return ata_task_ioctl(scsidev, arg);
705
706
default:
707
rc = -ENOTTY;
708
break;
709
}
710
711
return rc;
712
}
713
EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
714
715
int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd,
716
void __user *arg)
717
{
718
return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
719
scsidev, cmd, arg);
720
}
721
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
722
723
/**
724
* ata_scsi_qc_new - acquire new ata_queued_cmd reference
725
* @dev: ATA device to which the new command is attached
726
* @cmd: SCSI command that originated this ATA command
727
*
728
* Obtain a reference to an unused ata_queued_cmd structure,
729
* which is the basic libata structure representing a single
730
* ATA command sent to the hardware.
731
*
732
* If a command was available, fill in the SCSI-specific
733
* portions of the structure with information on the
734
* current command.
735
*
736
* LOCKING:
737
* spin_lock_irqsave(host lock)
738
*
739
* RETURNS:
740
* Command allocated, or %NULL if none available.
741
*/
742
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
743
struct scsi_cmnd *cmd)
744
{
745
struct ata_port *ap = dev->link->ap;
746
struct ata_queued_cmd *qc;
747
int tag;
748
749
if (unlikely(ata_port_is_frozen(ap)))
750
goto fail;
751
752
if (ap->flags & ATA_FLAG_SAS_HOST) {
753
/*
754
* SAS hosts may queue > ATA_MAX_QUEUE commands so use
755
* unique per-device budget token as a tag.
756
*/
757
if (WARN_ON_ONCE(cmd->budget_token >= ATA_MAX_QUEUE))
758
goto fail;
759
tag = cmd->budget_token;
760
} else {
761
tag = scsi_cmd_to_rq(cmd)->tag;
762
}
763
764
qc = __ata_qc_from_tag(ap, tag);
765
qc->tag = qc->hw_tag = tag;
766
qc->ap = ap;
767
qc->dev = dev;
768
769
ata_qc_reinit(qc);
770
771
qc->scsicmd = cmd;
772
qc->scsidone = scsi_done;
773
774
qc->sg = scsi_sglist(cmd);
775
qc->n_elem = scsi_sg_count(cmd);
776
777
if (scsi_cmd_to_rq(cmd)->rq_flags & RQF_QUIET)
778
qc->flags |= ATA_QCFLAG_QUIET;
779
780
return qc;
781
782
fail:
783
set_host_byte(cmd, DID_OK);
784
set_status_byte(cmd, SAM_STAT_TASK_SET_FULL);
785
scsi_done(cmd);
786
return NULL;
787
}
788
789
static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
790
{
791
struct scsi_cmnd *scmd = qc->scsicmd;
792
793
qc->extrabytes = scmd->extra_len;
794
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
795
}
796
797
/**
798
* ata_to_sense_error - convert ATA error to SCSI error
799
* @drv_stat: value contained in ATA status register
800
* @drv_err: value contained in ATA error register
801
* @sk: the sense key we'll fill out
802
* @asc: the additional sense code we'll fill out
803
* @ascq: the additional sense code qualifier we'll fill out
804
*
805
* Converts an ATA error into a SCSI error. Fill out pointers to
806
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
807
* format sense blocks.
808
*
809
* LOCKING:
810
* spin_lock_irqsave(host lock)
811
*/
812
static void ata_to_sense_error(u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
813
u8 *ascq)
814
{
815
int i;
816
817
/* Based on the 3ware driver translation table */
818
static const unsigned char sense_table[][4] = {
819
/* BBD|ECC|ID|MAR */
820
{0xd1, ABORTED_COMMAND, 0x00, 0x00},
821
// Device busy Aborted command
822
/* BBD|ECC|ID */
823
{0xd0, ABORTED_COMMAND, 0x00, 0x00},
824
// Device busy Aborted command
825
/* ECC|MC|MARK */
826
{0x61, HARDWARE_ERROR, 0x00, 0x00},
827
// Device fault Hardware error
828
/* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
829
{0x84, ABORTED_COMMAND, 0x47, 0x00},
830
// Data CRC error SCSI parity error
831
/* MC|ID|ABRT|TRK0|MARK */
832
{0x37, NOT_READY, 0x04, 0x00},
833
// Unit offline Not ready
834
/* MCR|MARK */
835
{0x09, NOT_READY, 0x04, 0x00},
836
// Unrecovered disk error Not ready
837
/* Bad address mark */
838
{0x01, MEDIUM_ERROR, 0x13, 0x00},
839
// Address mark not found for data field
840
/* TRK0 - Track 0 not found */
841
{0x02, HARDWARE_ERROR, 0x00, 0x00},
842
// Hardware error
843
/* Abort: 0x04 is not translated here, see below */
844
/* Media change request */
845
{0x08, NOT_READY, 0x04, 0x00},
846
// FIXME: faking offline
847
/* SRV/IDNF - ID not found */
848
{0x10, ILLEGAL_REQUEST, 0x21, 0x00},
849
// Logical address out of range
850
/* MC - Media Changed */
851
{0x20, UNIT_ATTENTION, 0x28, 0x00},
852
// Not ready to ready change, medium may have changed
853
/* ECC - Uncorrectable ECC error */
854
{0x40, MEDIUM_ERROR, 0x11, 0x04},
855
// Unrecovered read error
856
/* BBD - block marked bad */
857
{0x80, MEDIUM_ERROR, 0x11, 0x04},
858
// Block marked bad Medium error, unrecovered read error
859
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
860
};
861
static const unsigned char stat_table[][4] = {
862
/* Busy: must be first because BUSY means no other bits valid */
863
{ ATA_BUSY, ABORTED_COMMAND, 0x00, 0x00 },
864
/* Device fault: INTERNAL TARGET FAILURE */
865
{ ATA_DF, HARDWARE_ERROR, 0x44, 0x00 },
866
/* Corrected data error */
867
{ ATA_CORR, RECOVERED_ERROR, 0x00, 0x00 },
868
869
{ 0xFF, 0xFF, 0xFF, 0xFF }, /* END mark */
870
};
871
872
/*
873
* Is this an error we can process/parse
874
*/
875
if (drv_stat & ATA_BUSY) {
876
drv_err = 0; /* Ignore the err bits, they're invalid */
877
}
878
879
if (drv_err) {
880
/* Look for drv_err */
881
for (i = 0; sense_table[i][0] != 0xFF; i++) {
882
/* Look for best matches first */
883
if ((sense_table[i][0] & drv_err) ==
884
sense_table[i][0]) {
885
*sk = sense_table[i][1];
886
*asc = sense_table[i][2];
887
*ascq = sense_table[i][3];
888
return;
889
}
890
}
891
}
892
893
/*
894
* Fall back to interpreting status bits. Note that if the drv_err
895
* has only the ABRT bit set, we decode drv_stat. ABRT by itself
896
* is not descriptive enough.
897
*/
898
for (i = 0; stat_table[i][0] != 0xFF; i++) {
899
if (stat_table[i][0] & drv_stat) {
900
*sk = stat_table[i][1];
901
*asc = stat_table[i][2];
902
*ascq = stat_table[i][3];
903
return;
904
}
905
}
906
907
/*
908
* We need a sensible error return here, which is tricky, and one
909
* that won't cause people to do things like return a disk wrongly.
910
*/
911
*sk = ABORTED_COMMAND;
912
*asc = 0x00;
913
*ascq = 0x00;
914
}
915
916
/*
917
* ata_gen_passthru_sense - Generate check condition sense block.
918
* @qc: Command that completed.
919
*
920
* This function is specific to the ATA pass through commands.
921
* Regardless of whether the command errored or not, return a sense
922
* block. If there was no error, we get the request from an ATA
923
* passthrough command, so we use the following sense data:
924
* sk = RECOVERED ERROR
925
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
926
*
927
*
928
* LOCKING:
929
* None.
930
*/
931
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
932
{
933
struct ata_device *dev = qc->dev;
934
struct scsi_cmnd *cmd = qc->scsicmd;
935
struct ata_taskfile *tf = &qc->result_tf;
936
u8 sense_key, asc, ascq;
937
938
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
939
ata_dev_dbg(dev,
940
"missing result TF: can't generate ATA PT sense data\n");
941
if (qc->err_mask)
942
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
943
return;
944
}
945
946
/*
947
* Use ata_to_sense_error() to map status register bits
948
* onto sense key, asc & ascq.
949
*/
950
if (qc->err_mask ||
951
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
952
ata_to_sense_error(tf->status, tf->error,
953
&sense_key, &asc, &ascq);
954
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
955
} else {
956
/*
957
* ATA PASS-THROUGH INFORMATION AVAILABLE
958
*
959
* Note: we are supposed to call ata_scsi_set_sense(), which
960
* respects the D_SENSE bit, instead of unconditionally
961
* generating the sense data in descriptor format. However,
962
* because hdparm, hddtemp, and udisks incorrectly assume sense
963
* data in descriptor format, without even looking at the
964
* RESPONSE CODE field in the returned sense data (to see which
965
* format the returned sense data is in), we are stuck with
966
* being bug compatible with older kernels.
967
*/
968
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
969
}
970
}
971
972
/**
973
* ata_gen_ata_sense - generate a SCSI fixed sense block
974
* @qc: Command that we are erroring out
975
*
976
* Generate sense block for a failed ATA command @qc.
977
*
978
* LOCKING:
979
* None.
980
*/
981
static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
982
{
983
struct ata_device *dev = qc->dev;
984
struct scsi_cmnd *cmd = qc->scsicmd;
985
struct ata_taskfile *tf = &qc->result_tf;
986
u8 sense_key, asc, ascq;
987
988
if (ata_dev_disabled(dev)) {
989
/* Device disabled after error recovery */
990
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
991
ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
992
return;
993
}
994
995
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
996
ata_dev_dbg(dev,
997
"Missing result TF: reporting aborted command\n");
998
goto aborted;
999
}
1000
1001
/* Use ata_to_sense_error() to map status register bits
1002
* onto sense key, asc & ascq.
1003
*/
1004
if (qc->err_mask ||
1005
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1006
ata_to_sense_error(tf->status, tf->error,
1007
&sense_key, &asc, &ascq);
1008
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
1009
return;
1010
}
1011
1012
/* Could not decode error */
1013
ata_dev_warn(dev,
1014
"Could not decode error 0x%x, status 0x%x (err_mask=0x%x)\n",
1015
tf->error, tf->status, qc->err_mask);
1016
aborted:
1017
ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
1018
}
1019
1020
void ata_scsi_sdev_config(struct scsi_device *sdev)
1021
{
1022
sdev->use_10_for_rw = 1;
1023
sdev->use_10_for_ms = 1;
1024
sdev->no_write_same = 1;
1025
1026
/* Schedule policy is determined by ->qc_defer() callback and
1027
* it needs to see every deferred qc. Set dev_blocked to 1 to
1028
* prevent SCSI midlayer from automatically deferring
1029
* requests.
1030
*/
1031
sdev->max_device_blocked = 1;
1032
}
1033
1034
/**
1035
* ata_scsi_dma_need_drain - Check whether data transfer may overflow
1036
* @rq: request to be checked
1037
*
1038
* ATAPI commands which transfer variable length data to host
1039
* might overflow due to application error or hardware bug. This
1040
* function checks whether overflow should be drained and ignored
1041
* for @request.
1042
*
1043
* LOCKING:
1044
* None.
1045
*
1046
* RETURNS:
1047
* 1 if ; otherwise, 0.
1048
*/
1049
bool ata_scsi_dma_need_drain(struct request *rq)
1050
{
1051
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
1052
1053
return atapi_cmd_type(scmd->cmnd[0]) == ATAPI_MISC;
1054
}
1055
EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
1056
1057
int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
1058
struct ata_device *dev)
1059
{
1060
int depth = 1;
1061
1062
if (!ata_id_has_unload(dev->id))
1063
dev->flags |= ATA_DFLAG_NO_UNLOAD;
1064
1065
/* configure max sectors */
1066
dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors);
1067
lim->max_hw_sectors = dev->max_sectors;
1068
1069
if (dev->class == ATA_DEV_ATAPI) {
1070
sdev->sector_size = ATA_SECT_SIZE;
1071
1072
/* set DMA padding */
1073
lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1;
1074
1075
/* make room for appending the drain */
1076
lim->max_segments--;
1077
1078
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
1079
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
1080
if (!sdev->dma_drain_buf) {
1081
ata_dev_err(dev, "drain buffer allocation failed\n");
1082
return -ENOMEM;
1083
}
1084
} else {
1085
sdev->sector_size = ata_id_logical_sector_size(dev->id);
1086
1087
/*
1088
* Ask the sd driver to issue START STOP UNIT on runtime suspend
1089
* and resume and shutdown only. For system level suspend/resume,
1090
* devices power state is handled directly by libata EH.
1091
* Given that disks are always spun up on system resume, also
1092
* make sure that the sd driver forces runtime suspended disks
1093
* to be resumed to correctly reflect the power state of the
1094
* device.
1095
*/
1096
sdev->manage_runtime_start_stop = 1;
1097
sdev->manage_shutdown = 1;
1098
sdev->force_runtime_start_on_system_start = 1;
1099
}
1100
1101
/*
1102
* ata_pio_sectors() expects buffer for each sector to not cross
1103
* page boundary. Enforce it by requiring buffers to be sector
1104
* aligned, which works iff sector_size is not larger than
1105
* PAGE_SIZE. ATAPI devices also need the alignment as
1106
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
1107
*/
1108
if (sdev->sector_size > PAGE_SIZE)
1109
ata_dev_warn(dev,
1110
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1111
sdev->sector_size);
1112
1113
lim->dma_alignment = sdev->sector_size - 1;
1114
1115
if (dev->flags & ATA_DFLAG_AN)
1116
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1117
1118
if (ata_ncq_supported(dev))
1119
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1120
depth = min(ATA_MAX_QUEUE, depth);
1121
scsi_change_queue_depth(sdev, depth);
1122
1123
if (dev->flags & ATA_DFLAG_TRUSTED)
1124
sdev->security_supported = 1;
1125
1126
dev->sdev = sdev;
1127
return 0;
1128
}
1129
1130
/**
1131
* ata_scsi_sdev_init - Early setup of SCSI device
1132
* @sdev: SCSI device to examine
1133
*
1134
* This is called from scsi_alloc_sdev() when the scsi device
1135
* associated with an ATA device is scanned on a port.
1136
*
1137
* LOCKING:
1138
* Defined by SCSI layer. We don't really care.
1139
*/
1140
1141
int ata_scsi_sdev_init(struct scsi_device *sdev)
1142
{
1143
struct ata_port *ap = ata_shost_to_port(sdev->host);
1144
struct device_link *link;
1145
1146
ata_scsi_sdev_config(sdev);
1147
1148
/*
1149
* Create a link from the ata_port device to the scsi device to ensure
1150
* that PM does suspend/resume in the correct order: the scsi device is
1151
* consumer (child) and the ata port the supplier (parent).
1152
*/
1153
link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
1154
DL_FLAG_STATELESS |
1155
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
1156
if (!link) {
1157
ata_port_err(ap, "Failed to create link to scsi device %s\n",
1158
dev_name(&sdev->sdev_gendev));
1159
return -ENODEV;
1160
}
1161
1162
return 0;
1163
}
1164
EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
1165
1166
/**
1167
* ata_scsi_sdev_configure - Set SCSI device attributes
1168
* @sdev: SCSI device to examine
1169
* @lim: queue limits
1170
*
1171
* This is called before we actually start reading
1172
* and writing to the device, to configure certain
1173
* SCSI mid-layer behaviors.
1174
*
1175
* LOCKING:
1176
* Defined by SCSI layer. We don't really care.
1177
*/
1178
1179
int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
1180
{
1181
struct ata_port *ap = ata_shost_to_port(sdev->host);
1182
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1183
1184
if (dev)
1185
return ata_scsi_dev_config(sdev, lim, dev);
1186
1187
return 0;
1188
}
1189
EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
1190
1191
/**
1192
* ata_scsi_sdev_destroy - SCSI device is about to be destroyed
1193
* @sdev: SCSI device to be destroyed
1194
*
1195
* @sdev is about to be destroyed for hot/warm unplugging. If
1196
* this unplugging was initiated by libata as indicated by NULL
1197
* dev->sdev, this function doesn't have to do anything.
1198
* Otherwise, SCSI layer initiated warm-unplug is in progress.
1199
* Clear dev->sdev, schedule the device for ATA detach and invoke
1200
* EH.
1201
*
1202
* LOCKING:
1203
* Defined by SCSI layer. We don't really care.
1204
*/
1205
void ata_scsi_sdev_destroy(struct scsi_device *sdev)
1206
{
1207
struct ata_port *ap = ata_shost_to_port(sdev->host);
1208
unsigned long flags;
1209
struct ata_device *dev;
1210
1211
device_link_remove(&sdev->sdev_gendev, &ap->tdev);
1212
1213
spin_lock_irqsave(ap->lock, flags);
1214
dev = __ata_scsi_find_dev(ap, sdev);
1215
if (dev && dev->sdev) {
1216
/* SCSI device already in CANCEL state, no need to offline it */
1217
dev->sdev = NULL;
1218
dev->flags |= ATA_DFLAG_DETACH;
1219
ata_port_schedule_eh(ap);
1220
}
1221
spin_unlock_irqrestore(ap->lock, flags);
1222
1223
kfree(sdev->dma_drain_buf);
1224
}
1225
EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
1226
1227
/**
1228
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1229
* @qc: Storage for translated ATA taskfile
1230
*
1231
* Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
1232
* (to start). Perhaps these commands should be preceded by
1233
* CHECK POWER MODE to see what power mode the device is already in.
1234
* [See SAT revision 5 at www.t10.org]
1235
*
1236
* LOCKING:
1237
* spin_lock_irqsave(host lock)
1238
*
1239
* RETURNS:
1240
* Zero on success, non-zero on error.
1241
*/
1242
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1243
{
1244
struct scsi_cmnd *scmd = qc->scsicmd;
1245
const u8 *cdb = scmd->cmnd;
1246
u16 fp;
1247
u8 bp = 0xff;
1248
1249
if (scmd->cmd_len < 5) {
1250
fp = 4;
1251
goto invalid_fld;
1252
}
1253
1254
/* LOEJ bit set not supported */
1255
if (cdb[4] & 0x2) {
1256
fp = 4;
1257
bp = 1;
1258
goto invalid_fld;
1259
}
1260
1261
/* Power conditions not supported */
1262
if (((cdb[4] >> 4) & 0xf) != 0) {
1263
fp = 4;
1264
bp = 3;
1265
goto invalid_fld;
1266
}
1267
1268
/* Ignore IMMED bit (cdb[1] & 0x1), violates sat-r05 */
1269
if (!ata_dev_power_init_tf(qc->dev, &qc->tf, cdb[4] & 0x1)) {
1270
ata_scsi_set_sense(qc->dev, scmd, ABORTED_COMMAND, 0, 0);
1271
return 1;
1272
}
1273
1274
/*
1275
* Standby and Idle condition timers could be implemented but that
1276
* would require libata to implement the Power condition mode page
1277
* and allow the user to change it. Changing mode pages requires
1278
* MODE SELECT to be implemented.
1279
*/
1280
1281
return 0;
1282
1283
invalid_fld:
1284
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
1285
return 1;
1286
}
1287
1288
/**
1289
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1290
* @qc: Storage for translated ATA taskfile
1291
*
1292
* Sets up an ATA taskfile to issue FLUSH CACHE or
1293
* FLUSH CACHE EXT.
1294
*
1295
* LOCKING:
1296
* spin_lock_irqsave(host lock)
1297
*
1298
* RETURNS:
1299
* Zero on success, non-zero on error.
1300
*/
1301
static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1302
{
1303
struct ata_taskfile *tf = &qc->tf;
1304
1305
tf->flags |= ATA_TFLAG_DEVICE;
1306
tf->protocol = ATA_PROT_NODATA;
1307
1308
if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1309
tf->command = ATA_CMD_FLUSH_EXT;
1310
else
1311
tf->command = ATA_CMD_FLUSH;
1312
1313
/* flush is critical for IO integrity, consider it an IO command */
1314
qc->flags |= ATA_QCFLAG_IO;
1315
1316
return 0;
1317
}
1318
1319
/**
1320
* scsi_6_lba_len - Get LBA and transfer length
1321
* @cdb: SCSI command to translate
1322
*
1323
* Calculate LBA and transfer length for 6-byte commands.
1324
*
1325
* RETURNS:
1326
* @plba: the LBA
1327
* @plen: the transfer length
1328
*/
1329
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1330
{
1331
*plba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
1332
*plen = cdb[4];
1333
}
1334
1335
/**
1336
* scsi_10_lba_len - Get LBA and transfer length
1337
* @cdb: SCSI command to translate
1338
*
1339
* Calculate LBA and transfer length for 10-byte commands.
1340
*
1341
* RETURNS:
1342
* @plba: the LBA
1343
* @plen: the transfer length
1344
*/
1345
static inline void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1346
{
1347
*plba = get_unaligned_be32(&cdb[2]);
1348
*plen = get_unaligned_be16(&cdb[7]);
1349
}
1350
1351
/**
1352
* scsi_16_lba_len - Get LBA and transfer length
1353
* @cdb: SCSI command to translate
1354
*
1355
* Calculate LBA and transfer length for 16-byte commands.
1356
*
1357
* RETURNS:
1358
* @plba: the LBA
1359
* @plen: the transfer length
1360
*/
1361
static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1362
{
1363
*plba = get_unaligned_be64(&cdb[2]);
1364
*plen = get_unaligned_be32(&cdb[10]);
1365
}
1366
1367
/**
1368
* scsi_dld - Get duration limit descriptor index
1369
* @cdb: SCSI command to translate
1370
*
1371
* Returns the dld bits indicating the index of a command duration limit
1372
* descriptor.
1373
*/
1374
static inline int scsi_dld(const u8 *cdb)
1375
{
1376
return ((cdb[1] & 0x01) << 2) | ((cdb[14] >> 6) & 0x03);
1377
}
1378
1379
/**
1380
* ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1381
* @qc: Storage for translated ATA taskfile
1382
*
1383
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
1384
*
1385
* LOCKING:
1386
* spin_lock_irqsave(host lock)
1387
*
1388
* RETURNS:
1389
* Zero on success, non-zero on error.
1390
*/
1391
static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1392
{
1393
struct scsi_cmnd *scmd = qc->scsicmd;
1394
struct ata_taskfile *tf = &qc->tf;
1395
struct ata_device *dev = qc->dev;
1396
u64 dev_sectors = qc->dev->n_sectors;
1397
const u8 *cdb = scmd->cmnd;
1398
u64 block;
1399
u32 n_block;
1400
u16 fp;
1401
1402
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1403
tf->protocol = ATA_PROT_NODATA;
1404
1405
switch (cdb[0]) {
1406
case VERIFY:
1407
if (scmd->cmd_len < 10) {
1408
fp = 9;
1409
goto invalid_fld;
1410
}
1411
scsi_10_lba_len(cdb, &block, &n_block);
1412
break;
1413
case VERIFY_16:
1414
if (scmd->cmd_len < 16) {
1415
fp = 15;
1416
goto invalid_fld;
1417
}
1418
scsi_16_lba_len(cdb, &block, &n_block);
1419
break;
1420
default:
1421
fp = 0;
1422
goto invalid_fld;
1423
}
1424
1425
if (!n_block)
1426
goto nothing_to_do;
1427
if (block >= dev_sectors)
1428
goto out_of_range;
1429
if ((block + n_block) > dev_sectors)
1430
goto out_of_range;
1431
1432
if (dev->flags & ATA_DFLAG_LBA) {
1433
tf->flags |= ATA_TFLAG_LBA;
1434
1435
if (lba_28_ok(block, n_block)) {
1436
/* use LBA28 */
1437
tf->command = ATA_CMD_VERIFY;
1438
tf->device |= (block >> 24) & 0xf;
1439
} else if (lba_48_ok(block, n_block)) {
1440
if (!(dev->flags & ATA_DFLAG_LBA48))
1441
goto out_of_range;
1442
1443
/* use LBA48 */
1444
tf->flags |= ATA_TFLAG_LBA48;
1445
tf->command = ATA_CMD_VERIFY_EXT;
1446
1447
tf->hob_nsect = (n_block >> 8) & 0xff;
1448
1449
tf->hob_lbah = (block >> 40) & 0xff;
1450
tf->hob_lbam = (block >> 32) & 0xff;
1451
tf->hob_lbal = (block >> 24) & 0xff;
1452
} else
1453
/* request too large even for LBA48 */
1454
goto out_of_range;
1455
1456
tf->nsect = n_block & 0xff;
1457
1458
tf->lbah = (block >> 16) & 0xff;
1459
tf->lbam = (block >> 8) & 0xff;
1460
tf->lbal = block & 0xff;
1461
1462
tf->device |= ATA_LBA;
1463
} else {
1464
/* CHS */
1465
u32 sect, head, cyl, track;
1466
1467
if (!lba_28_ok(block, n_block))
1468
goto out_of_range;
1469
1470
/* Convert LBA to CHS */
1471
track = (u32)block / dev->sectors;
1472
cyl = track / dev->heads;
1473
head = track % dev->heads;
1474
sect = (u32)block % dev->sectors + 1;
1475
1476
/* Check whether the converted CHS can fit.
1477
Cylinder: 0-65535
1478
Head: 0-15
1479
Sector: 1-255*/
1480
if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1481
goto out_of_range;
1482
1483
tf->command = ATA_CMD_VERIFY;
1484
tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1485
tf->lbal = sect;
1486
tf->lbam = cyl;
1487
tf->lbah = cyl >> 8;
1488
tf->device |= head;
1489
}
1490
1491
return 0;
1492
1493
invalid_fld:
1494
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
1495
return 1;
1496
1497
out_of_range:
1498
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1499
/* "Logical Block Address out of range" */
1500
return 1;
1501
1502
nothing_to_do:
1503
scmd->result = SAM_STAT_GOOD;
1504
return 1;
1505
}
1506
1507
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
1508
{
1509
struct request *rq = scsi_cmd_to_rq(scmd);
1510
u32 req_blocks;
1511
1512
if (!blk_rq_is_passthrough(rq))
1513
return true;
1514
1515
req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
1516
if (n_blocks > req_blocks)
1517
return false;
1518
1519
return true;
1520
}
1521
1522
/**
1523
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1524
* @qc: Storage for translated ATA taskfile
1525
*
1526
* Converts any of six SCSI read/write commands into the
1527
* ATA counterpart, including starting sector (LBA),
1528
* sector count, and taking into account the device's LBA48
1529
* support.
1530
*
1531
* Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1532
* %WRITE_16 are currently supported.
1533
*
1534
* LOCKING:
1535
* spin_lock_irqsave(host lock)
1536
*
1537
* RETURNS:
1538
* Zero on success, non-zero on error.
1539
*/
1540
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1541
{
1542
struct scsi_cmnd *scmd = qc->scsicmd;
1543
const u8 *cdb = scmd->cmnd;
1544
struct request *rq = scsi_cmd_to_rq(scmd);
1545
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
1546
unsigned int tf_flags = 0;
1547
int dld = 0;
1548
u64 block;
1549
u32 n_block;
1550
int rc;
1551
u16 fp = 0;
1552
1553
switch (cdb[0]) {
1554
case WRITE_6:
1555
case WRITE_10:
1556
case WRITE_16:
1557
tf_flags |= ATA_TFLAG_WRITE;
1558
break;
1559
}
1560
1561
/* Calculate the SCSI LBA, transfer length and FUA. */
1562
switch (cdb[0]) {
1563
case READ_10:
1564
case WRITE_10:
1565
if (unlikely(scmd->cmd_len < 10)) {
1566
fp = 9;
1567
goto invalid_fld;
1568
}
1569
scsi_10_lba_len(cdb, &block, &n_block);
1570
if (cdb[1] & (1 << 3))
1571
tf_flags |= ATA_TFLAG_FUA;
1572
if (!ata_check_nblocks(scmd, n_block))
1573
goto invalid_fld;
1574
break;
1575
case READ_6:
1576
case WRITE_6:
1577
if (unlikely(scmd->cmd_len < 6)) {
1578
fp = 5;
1579
goto invalid_fld;
1580
}
1581
scsi_6_lba_len(cdb, &block, &n_block);
1582
1583
/* for 6-byte r/w commands, transfer length 0
1584
* means 256 blocks of data, not 0 block.
1585
*/
1586
if (!n_block)
1587
n_block = 256;
1588
if (!ata_check_nblocks(scmd, n_block))
1589
goto invalid_fld;
1590
break;
1591
case READ_16:
1592
case WRITE_16:
1593
if (unlikely(scmd->cmd_len < 16)) {
1594
fp = 15;
1595
goto invalid_fld;
1596
}
1597
scsi_16_lba_len(cdb, &block, &n_block);
1598
dld = scsi_dld(cdb);
1599
if (cdb[1] & (1 << 3))
1600
tf_flags |= ATA_TFLAG_FUA;
1601
if (!ata_check_nblocks(scmd, n_block))
1602
goto invalid_fld;
1603
break;
1604
default:
1605
fp = 0;
1606
goto invalid_fld;
1607
}
1608
1609
/* Check and compose ATA command */
1610
if (!n_block)
1611
/* For 10-byte and 16-byte SCSI R/W commands, transfer
1612
* length 0 means transfer 0 block of data.
1613
* However, for ATA R/W commands, sector count 0 means
1614
* 256 or 65536 sectors, not 0 sectors as in SCSI.
1615
*
1616
* WARNING: one or two older ATA drives treat 0 as 0...
1617
*/
1618
goto nothing_to_do;
1619
1620
qc->flags |= ATA_QCFLAG_IO;
1621
qc->nbytes = n_block * scmd->device->sector_size;
1622
1623
rc = ata_build_rw_tf(qc, block, n_block, tf_flags, dld, class);
1624
if (likely(rc == 0))
1625
return 0;
1626
1627
if (rc == -ERANGE)
1628
goto out_of_range;
1629
/* treat all other errors as -EINVAL, fall through */
1630
invalid_fld:
1631
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
1632
return 1;
1633
1634
out_of_range:
1635
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1636
/* "Logical Block Address out of range" */
1637
return 1;
1638
1639
nothing_to_do:
1640
scmd->result = SAM_STAT_GOOD;
1641
return 1;
1642
}
1643
1644
static void ata_qc_done(struct ata_queued_cmd *qc)
1645
{
1646
struct scsi_cmnd *cmd = qc->scsicmd;
1647
void (*done)(struct scsi_cmnd *) = qc->scsidone;
1648
1649
ata_qc_free(qc);
1650
done(cmd);
1651
}
1652
1653
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1654
{
1655
struct scsi_cmnd *cmd = qc->scsicmd;
1656
u8 *cdb = cmd->cmnd;
1657
bool have_sense = qc->flags & ATA_QCFLAG_SENSE_VALID;
1658
bool is_ata_passthru = cdb[0] == ATA_16 || cdb[0] == ATA_12;
1659
bool is_ck_cond_request = cdb[2] & 0x20;
1660
bool is_error = qc->err_mask != 0;
1661
1662
/* For ATA pass thru (SAT) commands, generate a sense block if
1663
* user mandated it or if there's an error. Note that if we
1664
* generate because the user forced us to [CK_COND=1], a check
1665
* condition is generated and the ATA register values are returned
1666
* whether the command completed successfully or not. If there
1667
* was no error, and CK_COND=1, we use the following sense data:
1668
* sk = RECOVERED ERROR
1669
* asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE
1670
*/
1671
if (is_ata_passthru && (is_ck_cond_request || is_error || have_sense)) {
1672
if (!have_sense)
1673
ata_gen_passthru_sense(qc);
1674
ata_scsi_set_passthru_sense_fields(qc);
1675
if (is_ck_cond_request)
1676
set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
1677
} else if (is_error) {
1678
if (!have_sense)
1679
ata_gen_ata_sense(qc);
1680
ata_scsi_set_sense_information(qc);
1681
}
1682
1683
ata_qc_done(qc);
1684
}
1685
1686
/**
1687
* ata_scsi_translate - Translate then issue SCSI command to ATA device
1688
* @dev: ATA device to which the command is addressed
1689
* @cmd: SCSI command to execute
1690
* @xlat_func: Actor which translates @cmd to an ATA taskfile
1691
*
1692
* Our ->queuecommand() function has decided that the SCSI
1693
* command issued can be directly translated into an ATA
1694
* command, rather than handled internally.
1695
*
1696
* This function sets up an ata_queued_cmd structure for the
1697
* SCSI command, and sends that ata_queued_cmd to the hardware.
1698
*
1699
* The xlat_func argument (actor) returns 0 if ready to execute
1700
* ATA command, else 1 to finish translation. If 1 is returned
1701
* then cmd->result (and possibly cmd->sense_buffer) are assumed
1702
* to be set reflecting an error condition or clean (early)
1703
* termination.
1704
*
1705
* LOCKING:
1706
* spin_lock_irqsave(host lock)
1707
*
1708
* RETURNS:
1709
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1710
* needs to be deferred.
1711
*/
1712
static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1713
ata_xlat_func_t xlat_func)
1714
{
1715
struct ata_port *ap = dev->link->ap;
1716
struct ata_queued_cmd *qc;
1717
int rc;
1718
1719
qc = ata_scsi_qc_new(dev, cmd);
1720
if (!qc)
1721
goto err_mem;
1722
1723
/* data is present; dma-map it */
1724
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1725
cmd->sc_data_direction == DMA_TO_DEVICE) {
1726
if (unlikely(scsi_bufflen(cmd) < 1)) {
1727
ata_dev_warn(dev, "WARNING: zero len r/w req\n");
1728
goto err_did;
1729
}
1730
1731
ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1732
1733
qc->dma_dir = cmd->sc_data_direction;
1734
}
1735
1736
qc->complete_fn = ata_scsi_qc_complete;
1737
1738
if (xlat_func(qc))
1739
goto early_finish;
1740
1741
if (ap->ops->qc_defer) {
1742
if ((rc = ap->ops->qc_defer(qc)))
1743
goto defer;
1744
}
1745
1746
/* select device, send command to hardware */
1747
ata_qc_issue(qc);
1748
1749
return 0;
1750
1751
early_finish:
1752
ata_qc_free(qc);
1753
scsi_done(cmd);
1754
return 0;
1755
1756
err_did:
1757
ata_qc_free(qc);
1758
cmd->result = (DID_ERROR << 16);
1759
scsi_done(cmd);
1760
err_mem:
1761
return 0;
1762
1763
defer:
1764
ata_qc_free(qc);
1765
if (rc == ATA_DEFER_LINK)
1766
return SCSI_MLQUEUE_DEVICE_BUSY;
1767
else
1768
return SCSI_MLQUEUE_HOST_BUSY;
1769
}
1770
1771
/**
1772
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1773
* @dev: Target device.
1774
* @cmd: SCSI command of interest.
1775
* @actor: Callback hook for desired SCSI command simulator
1776
*
1777
* Takes care of the hard work of simulating a SCSI command...
1778
* Mapping the response buffer, calling the command's handler,
1779
* and handling the handler's return value. This return value
1780
* indicates whether the handler wishes the SCSI command to be
1781
* completed successfully (0), or not (in which case cmd->result
1782
* and sense buffer are assumed to be set).
1783
*
1784
* LOCKING:
1785
* spin_lock_irqsave(host lock)
1786
*/
1787
static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
1788
unsigned int (*actor)(struct ata_device *dev,
1789
struct scsi_cmnd *cmd, u8 *rbuf))
1790
{
1791
unsigned long flags;
1792
unsigned int len;
1793
1794
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
1795
1796
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1797
len = actor(dev, cmd, ata_scsi_rbuf);
1798
if (len) {
1799
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1800
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1801
cmd->result = SAM_STAT_GOOD;
1802
if (scsi_bufflen(cmd) > len)
1803
scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
1804
}
1805
1806
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
1807
}
1808
1809
/**
1810
* ata_scsiop_inq_std - Simulate standard INQUIRY command
1811
* @dev: Target device.
1812
* @cmd: SCSI command of interest.
1813
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1814
*
1815
* Returns standard device identification data associated
1816
* with non-VPD INQUIRY command output.
1817
*
1818
* LOCKING:
1819
* spin_lock_irqsave(host lock)
1820
*/
1821
static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
1822
struct scsi_cmnd *cmd, u8 *rbuf)
1823
{
1824
static const u8 versions[] = {
1825
0x00,
1826
0x60, /* SAM-3 (no version claimed) */
1827
1828
0x03,
1829
0x20, /* SBC-2 (no version claimed) */
1830
1831
0x03,
1832
0x00 /* SPC-3 (no version claimed) */
1833
};
1834
static const u8 versions_zbc[] = {
1835
0x00,
1836
0xA0, /* SAM-5 (no version claimed) */
1837
1838
0x06,
1839
0x00, /* SBC-4 (no version claimed) */
1840
1841
0x05,
1842
0xC0, /* SPC-5 (no version claimed) */
1843
1844
0x60,
1845
0x24, /* ZBC r05 */
1846
};
1847
1848
u8 hdr[] = {
1849
TYPE_DISK,
1850
0,
1851
0x5, /* claim SPC-3 version compatibility */
1852
2,
1853
95 - 4,
1854
0,
1855
0,
1856
2
1857
};
1858
1859
/*
1860
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
1861
* device bit (obsolete since ATA-8 ACS) is set.
1862
*/
1863
if (ata_id_removable(dev->id))
1864
hdr[1] |= (1 << 7);
1865
1866
if (dev->class == ATA_DEV_ZAC) {
1867
hdr[0] = TYPE_ZBC;
1868
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
1869
}
1870
1871
if (dev->flags & ATA_DFLAG_CDL)
1872
hdr[2] = 0xd; /* claim SPC-6 version compatibility */
1873
1874
memcpy(rbuf, hdr, sizeof(hdr));
1875
memcpy(&rbuf[8], "ATA ", 8);
1876
ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
1877
1878
/* From SAT, use last 2 words from fw rev unless they are spaces */
1879
ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
1880
if (strncmp(&rbuf[32], " ", 4) == 0)
1881
ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
1882
1883
if (rbuf[32] == 0 || rbuf[32] == ' ')
1884
memcpy(&rbuf[32], "n/a ", 4);
1885
1886
if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
1887
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
1888
else
1889
memcpy(rbuf + 58, versions, sizeof(versions));
1890
1891
/*
1892
* Include all 8 possible version descriptors, even if not all of
1893
* them are popoulated.
1894
*/
1895
return 96;
1896
}
1897
1898
/**
1899
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1900
* @dev: Target device.
1901
* @cmd: SCSI command of interest.
1902
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1903
*
1904
* Returns list of inquiry VPD pages available.
1905
*
1906
* LOCKING:
1907
* spin_lock_irqsave(host lock)
1908
*/
1909
static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
1910
struct scsi_cmnd *cmd, u8 *rbuf)
1911
{
1912
int i, num_pages = 0;
1913
static const u8 pages[] = {
1914
0x00, /* page 0x00, this page */
1915
0x80, /* page 0x80, unit serial no page */
1916
0x83, /* page 0x83, device ident page */
1917
0x89, /* page 0x89, ata info page */
1918
0xb0, /* page 0xb0, block limits page */
1919
0xb1, /* page 0xb1, block device characteristics page */
1920
0xb2, /* page 0xb2, thin provisioning page */
1921
0xb6, /* page 0xb6, zoned block device characteristics */
1922
0xb9, /* page 0xb9, concurrent positioning ranges */
1923
};
1924
1925
for (i = 0; i < sizeof(pages); i++) {
1926
if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
1927
continue;
1928
rbuf[num_pages + 4] = pages[i];
1929
num_pages++;
1930
}
1931
rbuf[3] = num_pages; /* number of supported VPD pages */
1932
1933
return get_unaligned_be16(&rbuf[2]) + 4;
1934
}
1935
1936
/**
1937
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1938
* @dev: Target device.
1939
* @cmd: SCSI command of interest.
1940
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1941
*
1942
* Returns ATA device serial number.
1943
*
1944
* LOCKING:
1945
* spin_lock_irqsave(host lock)
1946
*/
1947
static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
1948
struct scsi_cmnd *cmd, u8 *rbuf)
1949
{
1950
static const u8 hdr[] = {
1951
0,
1952
0x80, /* this page code */
1953
0,
1954
ATA_ID_SERNO_LEN, /* page len */
1955
};
1956
1957
memcpy(rbuf, hdr, sizeof(hdr));
1958
ata_id_string(dev->id, (unsigned char *) &rbuf[4],
1959
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1960
1961
return get_unaligned_be16(&rbuf[2]) + 4;
1962
}
1963
1964
/**
1965
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
1966
* @dev: Target device.
1967
* @cmd: SCSI command of interest.
1968
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1969
*
1970
* Yields two logical unit device identification designators:
1971
* - vendor specific ASCII containing the ATA serial number
1972
* - SAT defined "t10 vendor id based" containing ASCII vendor
1973
* name ("ATA "), model and serial numbers.
1974
*
1975
* LOCKING:
1976
* spin_lock_irqsave(host lock)
1977
*/
1978
static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
1979
struct scsi_cmnd *cmd, u8 *rbuf)
1980
{
1981
const int sat_model_serial_desc_len = 68;
1982
int num;
1983
1984
rbuf[1] = 0x83; /* this page code */
1985
num = 4;
1986
1987
/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1988
rbuf[num + 0] = 2;
1989
rbuf[num + 3] = ATA_ID_SERNO_LEN;
1990
num += 4;
1991
ata_id_string(dev->id, (unsigned char *) rbuf + num,
1992
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1993
num += ATA_ID_SERNO_LEN;
1994
1995
/* SAT defined lu model and serial numbers descriptor */
1996
/* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
1997
rbuf[num + 0] = 2;
1998
rbuf[num + 1] = 1;
1999
rbuf[num + 3] = sat_model_serial_desc_len;
2000
num += 4;
2001
memcpy(rbuf + num, "ATA ", 8);
2002
num += 8;
2003
ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
2004
ATA_ID_PROD_LEN);
2005
num += ATA_ID_PROD_LEN;
2006
ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
2007
ATA_ID_SERNO_LEN);
2008
num += ATA_ID_SERNO_LEN;
2009
2010
if (ata_id_has_wwn(dev->id)) {
2011
/* SAT defined lu world wide name */
2012
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
2013
rbuf[num + 0] = 1;
2014
rbuf[num + 1] = 3;
2015
rbuf[num + 3] = ATA_ID_WWN_LEN;
2016
num += 4;
2017
ata_id_string(dev->id, (unsigned char *) rbuf + num,
2018
ATA_ID_WWN, ATA_ID_WWN_LEN);
2019
num += ATA_ID_WWN_LEN;
2020
}
2021
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
2022
2023
return get_unaligned_be16(&rbuf[2]) + 4;
2024
}
2025
2026
/**
2027
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2028
* @dev: Target device.
2029
* @cmd: SCSI command of interest.
2030
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2031
*
2032
* Yields SAT-specified ATA VPD page.
2033
*
2034
* LOCKING:
2035
* spin_lock_irqsave(host lock)
2036
*/
2037
static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
2038
struct scsi_cmnd *cmd, u8 *rbuf)
2039
{
2040
rbuf[1] = 0x89; /* our page code */
2041
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
2042
rbuf[3] = (0x238 & 0xff);
2043
2044
memcpy(&rbuf[8], "linux ", 8);
2045
memcpy(&rbuf[16], "libata ", 16);
2046
memcpy(&rbuf[32], DRV_VERSION, 4);
2047
2048
rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
2049
rbuf[37] = (1 << 7); /* bit 7 indicates Command FIS */
2050
/* TODO: PMP? */
2051
2052
/* we don't store the ATA device signature, so we fake it */
2053
rbuf[38] = ATA_DRDY; /* really, this is Status reg */
2054
rbuf[40] = 0x1;
2055
rbuf[48] = 0x1;
2056
2057
rbuf[56] = ATA_CMD_ID_ATA;
2058
2059
memcpy(&rbuf[60], &dev->id[0], 512);
2060
2061
return get_unaligned_be16(&rbuf[2]) + 4;
2062
}
2063
2064
/**
2065
* ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
2066
* @dev: Target device.
2067
* @cmd: SCSI command of interest.
2068
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2069
*
2070
* Return data for the VPD page B0h (Block Limits).
2071
*
2072
* LOCKING:
2073
* spin_lock_irqsave(host lock)
2074
*/
2075
static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
2076
struct scsi_cmnd *cmd, u8 *rbuf)
2077
{
2078
u16 min_io_sectors;
2079
2080
rbuf[1] = 0xb0;
2081
rbuf[3] = 0x3c; /* required VPD size with unmap support */
2082
2083
/*
2084
* Optimal transfer length granularity.
2085
*
2086
* This is always one physical block, but for disks with a smaller
2087
* logical than physical sector size we need to figure out what the
2088
* latter is.
2089
*/
2090
min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
2091
put_unaligned_be16(min_io_sectors, &rbuf[6]);
2092
2093
/*
2094
* Optimal unmap granularity.
2095
*
2096
* The ATA spec doesn't even know about a granularity or alignment
2097
* for the TRIM command. We can leave away most of the unmap related
2098
* VPD page entries, but we have specifify a granularity to signal
2099
* that we support some form of unmap - in thise case via WRITE SAME
2100
* with the unmap bit set.
2101
*/
2102
if (ata_id_has_trim(dev->id)) {
2103
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
2104
2105
if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
2106
max_blocks = 128 << (20 - SECTOR_SHIFT);
2107
2108
put_unaligned_be64(max_blocks, &rbuf[36]);
2109
put_unaligned_be32(1, &rbuf[28]);
2110
}
2111
2112
return get_unaligned_be16(&rbuf[2]) + 4;
2113
}
2114
2115
/**
2116
* ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
2117
* Characteristics
2118
* @dev: Target device.
2119
* @cmd: SCSI command of interest.
2120
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2121
*
2122
* Return data for the VPD page B1h (Block Device Characteristics).
2123
*
2124
* LOCKING:
2125
* spin_lock_irqsave(host lock)
2126
*/
2127
static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
2128
struct scsi_cmnd *cmd, u8 *rbuf)
2129
{
2130
int form_factor = ata_id_form_factor(dev->id);
2131
int media_rotation_rate = ata_id_rotation_rate(dev->id);
2132
u8 zoned = ata_id_zoned_cap(dev->id);
2133
2134
rbuf[1] = 0xb1;
2135
rbuf[3] = 0x3c;
2136
rbuf[4] = media_rotation_rate >> 8;
2137
rbuf[5] = media_rotation_rate;
2138
rbuf[7] = form_factor;
2139
if (zoned)
2140
rbuf[8] = (zoned << 4);
2141
2142
return get_unaligned_be16(&rbuf[2]) + 4;
2143
}
2144
2145
/**
2146
* ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
2147
* Provisioning
2148
* @dev: Target device.
2149
* @cmd: SCSI command of interest.
2150
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2151
*
2152
* Return data for the VPD page B2h (Logical Block Provisioning).
2153
*
2154
* LOCKING:
2155
* spin_lock_irqsave(host lock)
2156
*/
2157
static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
2158
struct scsi_cmnd *cmd, u8 *rbuf)
2159
{
2160
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
2161
rbuf[1] = 0xb2;
2162
rbuf[3] = 0x4;
2163
rbuf[5] = 1 << 6; /* TPWS */
2164
2165
return get_unaligned_be16(&rbuf[2]) + 4;
2166
}
2167
2168
/**
2169
* ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
2170
* Characteristics
2171
* @dev: Target device.
2172
* @cmd: SCSI command of interest.
2173
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2174
*
2175
* Return data for the VPD page B2h (Zoned Block Device Characteristics).
2176
*
2177
* LOCKING:
2178
* spin_lock_irqsave(host lock)
2179
*/
2180
static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
2181
struct scsi_cmnd *cmd, u8 *rbuf)
2182
{
2183
if (!ata_dev_is_zac(dev)) {
2184
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
2185
return 0;
2186
}
2187
2188
/*
2189
* zbc-r05 SCSI Zoned Block device characteristics VPD page
2190
*/
2191
rbuf[1] = 0xb6;
2192
rbuf[3] = 0x3C;
2193
2194
/*
2195
* URSWRZ bit is only meaningful for host-managed ZAC drives
2196
*/
2197
if (dev->zac_zoned_cap & 1)
2198
rbuf[4] |= 1;
2199
put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
2200
put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
2201
put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
2202
2203
return get_unaligned_be16(&rbuf[2]) + 4;
2204
}
2205
2206
/**
2207
* ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
2208
* Ranges
2209
* @dev: Target device.
2210
* @cmd: SCSI command of interest.
2211
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2212
*
2213
* Return data for the VPD page B9h (Concurrent Positioning Ranges).
2214
*
2215
* LOCKING:
2216
* spin_lock_irqsave(host lock)
2217
*/
2218
static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
2219
struct scsi_cmnd *cmd, u8 *rbuf)
2220
{
2221
struct ata_cpr_log *cpr_log = dev->cpr_log;
2222
u8 *desc = &rbuf[64];
2223
int i;
2224
2225
if (!cpr_log) {
2226
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
2227
return 0;
2228
}
2229
2230
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
2231
rbuf[1] = 0xb9;
2232
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
2233
2234
for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
2235
desc[0] = cpr_log->cpr[i].num;
2236
desc[1] = cpr_log->cpr[i].num_storage_elements;
2237
put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
2238
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
2239
}
2240
2241
return get_unaligned_be16(&rbuf[2]) + 4;
2242
}
2243
2244
/**
2245
* ata_scsiop_inquiry - Simulate INQUIRY command
2246
* @dev: Target device.
2247
* @cmd: SCSI command of interest.
2248
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2249
*
2250
* Returns data associated with an INQUIRY command output.
2251
*
2252
* LOCKING:
2253
* spin_lock_irqsave(host lock)
2254
*/
2255
static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
2256
struct scsi_cmnd *cmd, u8 *rbuf)
2257
{
2258
const u8 *scsicmd = cmd->cmnd;
2259
2260
/* is CmdDt set? */
2261
if (scsicmd[1] & 2) {
2262
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
2263
return 0;
2264
}
2265
2266
/* Is EVPD clear? */
2267
if ((scsicmd[1] & 1) == 0)
2268
return ata_scsiop_inq_std(dev, cmd, rbuf);
2269
2270
switch (scsicmd[2]) {
2271
case 0x00:
2272
return ata_scsiop_inq_00(dev, cmd, rbuf);
2273
case 0x80:
2274
return ata_scsiop_inq_80(dev, cmd, rbuf);
2275
case 0x83:
2276
return ata_scsiop_inq_83(dev, cmd, rbuf);
2277
case 0x89:
2278
return ata_scsiop_inq_89(dev, cmd, rbuf);
2279
case 0xb0:
2280
return ata_scsiop_inq_b0(dev, cmd, rbuf);
2281
case 0xb1:
2282
return ata_scsiop_inq_b1(dev, cmd, rbuf);
2283
case 0xb2:
2284
return ata_scsiop_inq_b2(dev, cmd, rbuf);
2285
case 0xb6:
2286
return ata_scsiop_inq_b6(dev, cmd, rbuf);
2287
case 0xb9:
2288
return ata_scsiop_inq_b9(dev, cmd, rbuf);
2289
default:
2290
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
2291
return 0;
2292
}
2293
}
2294
2295
/**
2296
* modecpy - Prepare response for MODE SENSE
2297
* @dest: output buffer
2298
* @src: data being copied
2299
* @n: length of mode page
2300
* @changeable: whether changeable parameters are requested
2301
*
2302
* Generate a generic MODE SENSE page for either current or changeable
2303
* parameters.
2304
*
2305
* LOCKING:
2306
* None.
2307
*/
2308
static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
2309
{
2310
if (changeable) {
2311
memcpy(dest, src, 2);
2312
memset(dest + 2, 0, n - 2);
2313
} else {
2314
memcpy(dest, src, n);
2315
}
2316
}
2317
2318
/**
2319
* ata_msense_caching - Simulate MODE SENSE caching info page
2320
* @id: device IDENTIFY data
2321
* @buf: output buffer
2322
* @changeable: whether changeable parameters are requested
2323
*
2324
* Generate a caching info page, which conditionally indicates
2325
* write caching to the SCSI layer, depending on device
2326
* capabilities.
2327
*
2328
* LOCKING:
2329
* None.
2330
*/
2331
static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
2332
{
2333
modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
2334
if (changeable) {
2335
buf[2] |= (1 << 2); /* ata_mselect_caching() */
2336
} else {
2337
buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */
2338
buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */
2339
}
2340
return sizeof(def_cache_mpage);
2341
}
2342
2343
/*
2344
* Simulate MODE SENSE control mode page, sub-page 0.
2345
*/
2346
static unsigned int ata_msense_control_spg0(struct ata_device *dev, u8 *buf,
2347
bool changeable)
2348
{
2349
modecpy(buf, def_control_mpage,
2350
sizeof(def_control_mpage), changeable);
2351
if (changeable) {
2352
/* ata_mselect_control() */
2353
buf[2] |= (1 << 2);
2354
} else {
2355
bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
2356
2357
/* descriptor format sense data */
2358
buf[2] |= (d_sense << 2);
2359
}
2360
2361
return sizeof(def_control_mpage);
2362
}
2363
2364
/*
2365
* Translate an ATA duration limit in microseconds to a SCSI duration limit
2366
* using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes
2367
* only, take care of overflows.
2368
*/
2369
static inline u16 ata_xlat_cdl_limit(u8 *buf)
2370
{
2371
u32 limit = get_unaligned_le32(buf);
2372
2373
return min_t(u32, limit / 10000, 65535);
2374
}
2375
2376
/*
2377
* Simulate MODE SENSE control mode page, sub-pages 07h and 08h
2378
* (command duration limits T2A and T2B mode pages).
2379
*/
2380
static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf,
2381
u8 spg)
2382
{
2383
u8 *b, *cdl, *desc;
2384
u32 policy;
2385
int i;
2386
2387
if (!(dev->flags & ATA_DFLAG_CDL) || !dev->cdl)
2388
return 0;
2389
2390
cdl = dev->cdl->desc_log_buf;
2391
2392
/*
2393
* Fill the subpage. The first four bytes of the T2A/T2B mode pages
2394
* are a header. The PAGE LENGTH field is the size of the page
2395
* excluding the header.
2396
*/
2397
buf[0] = CONTROL_MPAGE;
2398
buf[1] = spg;
2399
put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]);
2400
if (spg == CDL_T2A_SUB_MPAGE) {
2401
/*
2402
* Read descriptors map to the T2A page:
2403
* set perf_vs_duration_guidleine.
2404
*/
2405
buf[7] = (cdl[0] & 0x03) << 4;
2406
desc = cdl + 64;
2407
} else {
2408
/* Write descriptors map to the T2B page */
2409
desc = cdl + 288;
2410
}
2411
2412
/* Fill the T2 page descriptors */
2413
b = &buf[8];
2414
policy = get_unaligned_le32(&cdl[0]);
2415
for (i = 0; i < 7; i++, b += 32, desc += 32) {
2416
/* t2cdlunits: fixed to 10ms */
2417
b[0] = 0x0a;
2418
2419
/* Max inactive time and its policy */
2420
put_unaligned_be16(ata_xlat_cdl_limit(&desc[8]), &b[2]);
2421
b[6] = ((policy >> 8) & 0x0f) << 4;
2422
2423
/* Max active time and its policy */
2424
put_unaligned_be16(ata_xlat_cdl_limit(&desc[4]), &b[4]);
2425
b[6] |= (policy >> 4) & 0x0f;
2426
2427
/* Command duration guideline and its policy */
2428
put_unaligned_be16(ata_xlat_cdl_limit(&desc[16]), &b[10]);
2429
b[14] = policy & 0x0f;
2430
}
2431
2432
return CDL_T2_SUB_MPAGE_LEN;
2433
}
2434
2435
/*
2436
* Simulate MODE SENSE control mode page, sub-page f2h
2437
* (ATA feature control mode page).
2438
*/
2439
static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
2440
u8 *buf)
2441
{
2442
/* PS=0, SPF=1 */
2443
buf[0] = CONTROL_MPAGE | (1 << 6);
2444
buf[1] = ATA_FEATURE_SUB_MPAGE;
2445
2446
/*
2447
* The first four bytes of ATA Feature Control mode page are a header.
2448
* The PAGE LENGTH field is the size of the page excluding the header.
2449
*/
2450
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
2451
2452
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
2453
buf[4] = 0x02; /* T2A and T2B pages enabled */
2454
else
2455
buf[4] = 0;
2456
2457
return ATA_FEATURE_SUB_MPAGE_LEN;
2458
}
2459
2460
/**
2461
* ata_msense_control - Simulate MODE SENSE control mode page
2462
* @dev: ATA device of interest
2463
* @buf: output buffer
2464
* @spg: sub-page code
2465
* @changeable: whether changeable parameters are requested
2466
*
2467
* Generate a generic MODE SENSE control mode page.
2468
*
2469
* LOCKING:
2470
* None.
2471
*/
2472
static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
2473
u8 spg, bool changeable)
2474
{
2475
unsigned int n;
2476
2477
switch (spg) {
2478
case 0:
2479
return ata_msense_control_spg0(dev, buf, changeable);
2480
case CDL_T2A_SUB_MPAGE:
2481
case CDL_T2B_SUB_MPAGE:
2482
return ata_msense_control_spgt2(dev, buf, spg);
2483
case ATA_FEATURE_SUB_MPAGE:
2484
return ata_msense_control_ata_feature(dev, buf);
2485
case ALL_SUB_MPAGES:
2486
n = ata_msense_control_spg0(dev, buf, changeable);
2487
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE);
2488
n += ata_msense_control_spgt2(dev, buf + n, CDL_T2B_SUB_MPAGE);
2489
n += ata_msense_control_ata_feature(dev, buf + n);
2490
return n;
2491
default:
2492
return 0;
2493
}
2494
}
2495
2496
/**
2497
* ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2498
* @buf: output buffer
2499
* @changeable: whether changeable parameters are requested
2500
*
2501
* Generate a generic MODE SENSE r/w error recovery page.
2502
*
2503
* LOCKING:
2504
* None.
2505
*/
2506
static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
2507
{
2508
modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage),
2509
changeable);
2510
return sizeof(def_rw_recovery_mpage);
2511
}
2512
2513
/**
2514
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2515
* @dev: Target device.
2516
* @cmd: SCSI command of interest.
2517
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2518
*
2519
* Simulate MODE SENSE commands. Assume this is invoked for direct
2520
* access devices (e.g. disks) only. There should be no block
2521
* descriptor for other device types.
2522
*
2523
* LOCKING:
2524
* spin_lock_irqsave(host lock)
2525
*/
2526
static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
2527
struct scsi_cmnd *cmd, u8 *rbuf)
2528
{
2529
u8 *scsicmd = cmd->cmnd, *p = rbuf;
2530
static const u8 sat_blk_desc[] = {
2531
0, 0, 0, 0, /* number of blocks: sat unspecified */
2532
0,
2533
0, 0x2, 0x0 /* block length: 512 bytes */
2534
};
2535
u8 pg, spg;
2536
unsigned int ebd, page_control, six_byte;
2537
u8 dpofua = 0, bp = 0xff;
2538
u16 fp;
2539
2540
six_byte = (scsicmd[0] == MODE_SENSE);
2541
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2542
/*
2543
* LLBA bit in msense(10) ignored (compliant)
2544
*/
2545
2546
page_control = scsicmd[2] >> 6;
2547
switch (page_control) {
2548
case 0: /* current */
2549
case 1: /* changeable */
2550
case 2: /* defaults */
2551
break; /* supported */
2552
case 3: /* saved */
2553
goto saving_not_supp;
2554
default:
2555
fp = 2;
2556
bp = 6;
2557
goto invalid_fld;
2558
}
2559
2560
if (six_byte)
2561
p += 4 + (ebd ? 8 : 0);
2562
else
2563
p += 8 + (ebd ? 8 : 0);
2564
2565
pg = scsicmd[2] & 0x3f;
2566
spg = scsicmd[3];
2567
2568
/*
2569
* Supported subpages: all subpages and sub-pages 07h, 08h and f2h of
2570
* the control page.
2571
*/
2572
if (spg) {
2573
switch (spg) {
2574
case ALL_SUB_MPAGES:
2575
break;
2576
case CDL_T2A_SUB_MPAGE:
2577
case CDL_T2B_SUB_MPAGE:
2578
case ATA_FEATURE_SUB_MPAGE:
2579
if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE)
2580
break;
2581
fallthrough;
2582
default:
2583
fp = 3;
2584
goto invalid_fld;
2585
}
2586
}
2587
2588
switch(pg) {
2589
case RW_RECOVERY_MPAGE:
2590
p += ata_msense_rw_recovery(p, page_control == 1);
2591
break;
2592
2593
case CACHE_MPAGE:
2594
p += ata_msense_caching(dev->id, p, page_control == 1);
2595
break;
2596
2597
case CONTROL_MPAGE:
2598
p += ata_msense_control(dev, p, spg, page_control == 1);
2599
break;
2600
2601
case ALL_MPAGES:
2602
p += ata_msense_rw_recovery(p, page_control == 1);
2603
p += ata_msense_caching(dev->id, p, page_control == 1);
2604
p += ata_msense_control(dev, p, spg, page_control == 1);
2605
break;
2606
2607
default: /* invalid page code */
2608
fp = 2;
2609
goto invalid_fld;
2610
}
2611
2612
if (dev->flags & ATA_DFLAG_FUA)
2613
dpofua = 1 << 4;
2614
2615
if (six_byte) {
2616
rbuf[0] = p - rbuf - 1;
2617
rbuf[2] |= dpofua;
2618
if (ebd) {
2619
rbuf[3] = sizeof(sat_blk_desc);
2620
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2621
}
2622
2623
return rbuf[0] + 1;
2624
}
2625
2626
put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
2627
rbuf[3] |= dpofua;
2628
if (ebd) {
2629
rbuf[7] = sizeof(sat_blk_desc);
2630
memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2631
}
2632
2633
return get_unaligned_be16(&rbuf[0]) + 2;
2634
2635
invalid_fld:
2636
ata_scsi_set_invalid_field(dev, cmd, fp, bp);
2637
return 0;
2638
2639
saving_not_supp:
2640
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2641
/* "Saving parameters not supported" */
2642
return 0;
2643
}
2644
2645
/**
2646
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2647
* @dev: Target device.
2648
* @cmd: SCSI command of interest.
2649
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2650
*
2651
* Simulate READ CAPACITY commands.
2652
*
2653
* LOCKING:
2654
* None.
2655
*/
2656
static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
2657
struct scsi_cmnd *cmd, u8 *rbuf)
2658
{
2659
u8 *scsicmd = cmd->cmnd;
2660
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
2661
u32 sector_size; /* physical sector size in bytes */
2662
u8 log2_per_phys;
2663
u16 lowest_aligned;
2664
2665
sector_size = ata_id_logical_sector_size(dev->id);
2666
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
2667
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
2668
2669
if (scsicmd[0] == READ_CAPACITY) {
2670
if (last_lba >= 0xffffffffULL)
2671
last_lba = 0xffffffff;
2672
2673
/* sector count, 32-bit */
2674
rbuf[0] = last_lba >> (8 * 3);
2675
rbuf[1] = last_lba >> (8 * 2);
2676
rbuf[2] = last_lba >> (8 * 1);
2677
rbuf[3] = last_lba;
2678
2679
/* sector size */
2680
rbuf[4] = sector_size >> (8 * 3);
2681
rbuf[5] = sector_size >> (8 * 2);
2682
rbuf[6] = sector_size >> (8 * 1);
2683
rbuf[7] = sector_size;
2684
2685
return 8;
2686
}
2687
2688
/*
2689
* READ CAPACITY 16 command is defined as a service action
2690
* (SERVICE_ACTION_IN_16 command).
2691
*/
2692
if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
2693
(scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
2694
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
2695
return 0;
2696
}
2697
2698
/* sector count, 64-bit */
2699
rbuf[0] = last_lba >> (8 * 7);
2700
rbuf[1] = last_lba >> (8 * 6);
2701
rbuf[2] = last_lba >> (8 * 5);
2702
rbuf[3] = last_lba >> (8 * 4);
2703
rbuf[4] = last_lba >> (8 * 3);
2704
rbuf[5] = last_lba >> (8 * 2);
2705
rbuf[6] = last_lba >> (8 * 1);
2706
rbuf[7] = last_lba;
2707
2708
/* sector size */
2709
rbuf[ 8] = sector_size >> (8 * 3);
2710
rbuf[ 9] = sector_size >> (8 * 2);
2711
rbuf[10] = sector_size >> (8 * 1);
2712
rbuf[11] = sector_size;
2713
2714
if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
2715
rbuf[12] = (1 << 4); /* RC_BASIS */
2716
rbuf[13] = log2_per_phys;
2717
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2718
rbuf[15] = lowest_aligned;
2719
2720
if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
2721
rbuf[14] |= 0x80; /* LBPME */
2722
2723
if (ata_id_has_zero_after_trim(dev->id) &&
2724
dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
2725
ata_dev_info(dev, "Enabling discard_zeroes_data\n");
2726
rbuf[14] |= 0x40; /* LBPRZ */
2727
}
2728
}
2729
2730
return 16;
2731
}
2732
2733
/**
2734
* ata_scsiop_report_luns - Simulate REPORT LUNS command
2735
* @dev: Target device.
2736
* @cmd: SCSI command of interest.
2737
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2738
*
2739
* Simulate REPORT LUNS command.
2740
*
2741
* LOCKING:
2742
* spin_lock_irqsave(host lock)
2743
*/
2744
static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
2745
struct scsi_cmnd *cmd, u8 *rbuf)
2746
{
2747
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2748
2749
return 16;
2750
}
2751
2752
/*
2753
* ATAPI devices typically report zero for their SCSI version, and sometimes
2754
* deviate from the spec WRT response data format. If SCSI version is
2755
* reported as zero like normal, then we make the following fixups:
2756
* 1) Fake MMC-5 version, to indicate to the Linux scsi midlayer this is a
2757
* modern device.
2758
* 2) Ensure response data format / ATAPI information are always correct.
2759
*/
2760
static void atapi_fixup_inquiry(struct scsi_cmnd *cmd)
2761
{
2762
u8 buf[4];
2763
2764
sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
2765
if (buf[2] == 0) {
2766
buf[2] = 0x5;
2767
buf[3] = 0x32;
2768
}
2769
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, 4);
2770
}
2771
2772
static void atapi_qc_complete(struct ata_queued_cmd *qc)
2773
{
2774
struct scsi_cmnd *cmd = qc->scsicmd;
2775
unsigned int err_mask = qc->err_mask;
2776
2777
/* handle completion from EH */
2778
if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2779
2780
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID))
2781
ata_gen_passthru_sense(qc);
2782
2783
/* SCSI EH automatically locks door if sdev->locked is
2784
* set. Sometimes door lock request continues to
2785
* fail, for example, when no media is present. This
2786
* creates a loop - SCSI EH issues door lock which
2787
* fails and gets invoked again to acquire sense data
2788
* for the failed command.
2789
*
2790
* If door lock fails, always clear sdev->locked to
2791
* avoid this infinite loop.
2792
*
2793
* This may happen before SCSI scan is complete. Make
2794
* sure qc->dev->sdev isn't NULL before dereferencing.
2795
*/
2796
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2797
qc->dev->sdev->locked = 0;
2798
2799
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2800
ata_qc_done(qc);
2801
return;
2802
}
2803
2804
/* successful completion path */
2805
if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
2806
atapi_fixup_inquiry(cmd);
2807
cmd->result = SAM_STAT_GOOD;
2808
2809
ata_qc_done(qc);
2810
}
2811
/**
2812
* atapi_xlat - Initialize PACKET taskfile
2813
* @qc: command structure to be initialized
2814
*
2815
* LOCKING:
2816
* spin_lock_irqsave(host lock)
2817
*
2818
* RETURNS:
2819
* Zero on success, non-zero on failure.
2820
*/
2821
static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2822
{
2823
struct scsi_cmnd *scmd = qc->scsicmd;
2824
struct ata_device *dev = qc->dev;
2825
int nodata = (scmd->sc_data_direction == DMA_NONE);
2826
int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2827
unsigned int nbytes;
2828
2829
memset(qc->cdb, 0, dev->cdb_len);
2830
memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2831
2832
qc->complete_fn = atapi_qc_complete;
2833
2834
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2835
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2836
qc->tf.flags |= ATA_TFLAG_WRITE;
2837
}
2838
2839
qc->tf.command = ATA_CMD_PACKET;
2840
ata_qc_set_pc_nbytes(qc);
2841
2842
/* check whether ATAPI DMA is safe */
2843
if (!nodata && !using_pio && atapi_check_dma(qc))
2844
using_pio = 1;
2845
2846
/* Some controller variants snoop this value for Packet
2847
* transfers to do state machine and FIFO management. Thus we
2848
* want to set it properly, and for DMA where it is
2849
* effectively meaningless.
2850
*/
2851
nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2852
2853
/* Most ATAPI devices which honor transfer chunk size don't
2854
* behave according to the spec when odd chunk size which
2855
* matches the transfer length is specified. If the number of
2856
* bytes to transfer is 2n+1. According to the spec, what
2857
* should happen is to indicate that 2n+1 is going to be
2858
* transferred and transfer 2n+2 bytes where the last byte is
2859
* padding.
2860
*
2861
* In practice, this doesn't happen. ATAPI devices first
2862
* indicate and transfer 2n bytes and then indicate and
2863
* transfer 2 bytes where the last byte is padding.
2864
*
2865
* This inconsistency confuses several controllers which
2866
* perform PIO using DMA such as Intel AHCIs and sil3124/32.
2867
* These controllers use actual number of transferred bytes to
2868
* update DMA pointer and transfer of 4n+2 bytes make those
2869
* controller push DMA pointer by 4n+4 bytes because SATA data
2870
* FISes are aligned to 4 bytes. This causes data corruption
2871
* and buffer overrun.
2872
*
2873
* Always setting nbytes to even number solves this problem
2874
* because then ATAPI devices don't have to split data at 2n
2875
* boundaries.
2876
*/
2877
if (nbytes & 0x1)
2878
nbytes++;
2879
2880
qc->tf.lbam = (nbytes & 0xFF);
2881
qc->tf.lbah = (nbytes >> 8);
2882
2883
if (nodata)
2884
qc->tf.protocol = ATAPI_PROT_NODATA;
2885
else if (using_pio)
2886
qc->tf.protocol = ATAPI_PROT_PIO;
2887
else {
2888
/* DMA data xfer */
2889
qc->tf.protocol = ATAPI_PROT_DMA;
2890
qc->tf.feature |= ATAPI_PKT_DMA;
2891
2892
if ((dev->flags & ATA_DFLAG_DMADIR) &&
2893
(scmd->sc_data_direction != DMA_TO_DEVICE))
2894
/* some SATA bridges need us to indicate data xfer direction */
2895
qc->tf.feature |= ATAPI_DMADIR;
2896
}
2897
2898
2899
/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
2900
as ATAPI tape drives don't get this right otherwise */
2901
return 0;
2902
}
2903
2904
static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
2905
{
2906
/*
2907
* For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
2908
* or 2 (IDE master + slave case). However, the former case includes
2909
* libsas hosted devices which are numbered per scsi host, leading
2910
* to devno potentially being larger than 0 but with each struct
2911
* ata_device having its own struct ata_port and struct ata_link.
2912
* To accommodate these, ignore devno and always use device number 0.
2913
*/
2914
if (likely(!sata_pmp_attached(ap))) {
2915
int link_max_devices = ata_link_max_devices(&ap->link);
2916
2917
if (link_max_devices == 1)
2918
return &ap->link.device[0];
2919
2920
if (devno < link_max_devices)
2921
return &ap->link.device[devno];
2922
2923
return NULL;
2924
}
2925
2926
/*
2927
* For PMP-attached devices, the device number corresponds to C
2928
* (channel) of SCSI [H:C:I:L], indicating the port pmp link
2929
* for the device.
2930
*/
2931
if (devno < ap->nr_pmp_links)
2932
return &ap->pmp_link[devno].device[0];
2933
2934
return NULL;
2935
}
2936
2937
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2938
const struct scsi_device *scsidev)
2939
{
2940
int devno;
2941
2942
/* skip commands not addressed to targets we simulate */
2943
if (!sata_pmp_attached(ap)) {
2944
if (unlikely(scsidev->channel || scsidev->lun))
2945
return NULL;
2946
devno = scsidev->id;
2947
} else {
2948
if (unlikely(scsidev->id || scsidev->lun))
2949
return NULL;
2950
devno = scsidev->channel;
2951
}
2952
2953
return ata_find_dev(ap, devno);
2954
}
2955
2956
/**
2957
* ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2958
* @ap: ATA port to which the device is attached
2959
* @scsidev: SCSI device from which we derive the ATA device
2960
*
2961
* Given various information provided in struct scsi_cmnd,
2962
* map that onto an ATA bus, and using that mapping
2963
* determine which ata_device is associated with the
2964
* SCSI command to be sent.
2965
*
2966
* LOCKING:
2967
* spin_lock_irqsave(host lock)
2968
*
2969
* RETURNS:
2970
* Associated ATA device, or %NULL if not found.
2971
*/
2972
struct ata_device *
2973
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2974
{
2975
struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2976
2977
if (unlikely(!dev || !ata_dev_enabled(dev)))
2978
return NULL;
2979
2980
return dev;
2981
}
2982
2983
/*
2984
* ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2985
* @byte1: Byte 1 from pass-thru CDB.
2986
*
2987
* RETURNS:
2988
* ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2989
*/
2990
static u8
2991
ata_scsi_map_proto(u8 byte1)
2992
{
2993
switch((byte1 & 0x1e) >> 1) {
2994
case 3: /* Non-data */
2995
return ATA_PROT_NODATA;
2996
2997
case 6: /* DMA */
2998
case 10: /* UDMA Data-in */
2999
case 11: /* UDMA Data-Out */
3000
return ATA_PROT_DMA;
3001
3002
case 4: /* PIO Data-in */
3003
case 5: /* PIO Data-out */
3004
return ATA_PROT_PIO;
3005
3006
case 12: /* FPDMA */
3007
return ATA_PROT_NCQ;
3008
3009
case 0: /* Hard Reset */
3010
case 1: /* SRST */
3011
case 8: /* Device Diagnostic */
3012
case 9: /* Device Reset */
3013
case 7: /* DMA Queued */
3014
case 15: /* Return Response Info */
3015
default: /* Reserved */
3016
break;
3017
}
3018
3019
return ATA_PROT_UNKNOWN;
3020
}
3021
3022
/**
3023
* ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
3024
* @qc: command structure to be initialized
3025
*
3026
* Handles either 12, 16, or 32-byte versions of the CDB.
3027
*
3028
* RETURNS:
3029
* Zero on success, non-zero on failure.
3030
*/
3031
static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
3032
{
3033
struct ata_taskfile *tf = &(qc->tf);
3034
struct scsi_cmnd *scmd = qc->scsicmd;
3035
struct ata_device *dev = qc->dev;
3036
const u8 *cdb = scmd->cmnd;
3037
u16 fp;
3038
u16 cdb_offset = 0;
3039
3040
/* 7Fh variable length cmd means a ata pass-thru(32) */
3041
if (cdb[0] == VARIABLE_LENGTH_CMD)
3042
cdb_offset = 9;
3043
3044
tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]);
3045
if (tf->protocol == ATA_PROT_UNKNOWN) {
3046
fp = 1;
3047
goto invalid_fld;
3048
}
3049
3050
if ((cdb[2 + cdb_offset] & 0x3) == 0) {
3051
/*
3052
* When T_LENGTH is zero (No data is transferred), dir should
3053
* be DMA_NONE.
3054
*/
3055
if (scmd->sc_data_direction != DMA_NONE) {
3056
fp = 2 + cdb_offset;
3057
goto invalid_fld;
3058
}
3059
3060
if (ata_is_ncq(tf->protocol))
3061
tf->protocol = ATA_PROT_NCQ_NODATA;
3062
}
3063
3064
/* enable LBA */
3065
tf->flags |= ATA_TFLAG_LBA;
3066
3067
/*
3068
* 12 and 16 byte CDBs use different offsets to
3069
* provide the various register values.
3070
*/
3071
switch (cdb[0]) {
3072
case ATA_16:
3073
/*
3074
* 16-byte CDB - may contain extended commands.
3075
*
3076
* If that is the case, copy the upper byte register values.
3077
*/
3078
if (cdb[1] & 0x01) {
3079
tf->hob_feature = cdb[3];
3080
tf->hob_nsect = cdb[5];
3081
tf->hob_lbal = cdb[7];
3082
tf->hob_lbam = cdb[9];
3083
tf->hob_lbah = cdb[11];
3084
tf->flags |= ATA_TFLAG_LBA48;
3085
} else
3086
tf->flags &= ~ATA_TFLAG_LBA48;
3087
3088
/*
3089
* Always copy low byte, device and command registers.
3090
*/
3091
tf->feature = cdb[4];
3092
tf->nsect = cdb[6];
3093
tf->lbal = cdb[8];
3094
tf->lbam = cdb[10];
3095
tf->lbah = cdb[12];
3096
tf->device = cdb[13];
3097
tf->command = cdb[14];
3098
break;
3099
case ATA_12:
3100
/*
3101
* 12-byte CDB - incapable of extended commands.
3102
*/
3103
tf->flags &= ~ATA_TFLAG_LBA48;
3104
3105
tf->feature = cdb[3];
3106
tf->nsect = cdb[4];
3107
tf->lbal = cdb[5];
3108
tf->lbam = cdb[6];
3109
tf->lbah = cdb[7];
3110
tf->device = cdb[8];
3111
tf->command = cdb[9];
3112
break;
3113
default:
3114
/*
3115
* 32-byte CDB - may contain extended command fields.
3116
*
3117
* If that is the case, copy the upper byte register values.
3118
*/
3119
if (cdb[10] & 0x01) {
3120
tf->hob_feature = cdb[20];
3121
tf->hob_nsect = cdb[22];
3122
tf->hob_lbal = cdb[16];
3123
tf->hob_lbam = cdb[15];
3124
tf->hob_lbah = cdb[14];
3125
tf->flags |= ATA_TFLAG_LBA48;
3126
} else
3127
tf->flags &= ~ATA_TFLAG_LBA48;
3128
3129
tf->feature = cdb[21];
3130
tf->nsect = cdb[23];
3131
tf->lbal = cdb[19];
3132
tf->lbam = cdb[18];
3133
tf->lbah = cdb[17];
3134
tf->device = cdb[24];
3135
tf->command = cdb[25];
3136
tf->auxiliary = get_unaligned_be32(&cdb[28]);
3137
break;
3138
}
3139
3140
/* For NCQ commands copy the tag value */
3141
if (ata_is_ncq(tf->protocol))
3142
tf->nsect = qc->hw_tag << 3;
3143
3144
/* enforce correct master/slave bit */
3145
tf->device = dev->devno ?
3146
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
3147
3148
switch (tf->command) {
3149
/* READ/WRITE LONG use a non-standard sect_size */
3150
case ATA_CMD_READ_LONG:
3151
case ATA_CMD_READ_LONG_ONCE:
3152
case ATA_CMD_WRITE_LONG:
3153
case ATA_CMD_WRITE_LONG_ONCE:
3154
if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
3155
fp = 1;
3156
goto invalid_fld;
3157
}
3158
qc->sect_size = scsi_bufflen(scmd);
3159
break;
3160
3161
/* commands using reported Logical Block size (e.g. 512 or 4K) */
3162
case ATA_CMD_CFA_WRITE_NE:
3163
case ATA_CMD_CFA_TRANS_SECT:
3164
case ATA_CMD_CFA_WRITE_MULT_NE:
3165
/* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
3166
case ATA_CMD_READ:
3167
case ATA_CMD_READ_EXT:
3168
case ATA_CMD_READ_QUEUED:
3169
/* XXX: case ATA_CMD_READ_QUEUED_EXT: */
3170
case ATA_CMD_FPDMA_READ:
3171
case ATA_CMD_READ_MULTI:
3172
case ATA_CMD_READ_MULTI_EXT:
3173
case ATA_CMD_PIO_READ:
3174
case ATA_CMD_PIO_READ_EXT:
3175
case ATA_CMD_READ_STREAM_DMA_EXT:
3176
case ATA_CMD_READ_STREAM_EXT:
3177
case ATA_CMD_VERIFY:
3178
case ATA_CMD_VERIFY_EXT:
3179
case ATA_CMD_WRITE:
3180
case ATA_CMD_WRITE_EXT:
3181
case ATA_CMD_WRITE_FUA_EXT:
3182
case ATA_CMD_WRITE_QUEUED:
3183
case ATA_CMD_WRITE_QUEUED_FUA_EXT:
3184
case ATA_CMD_FPDMA_WRITE:
3185
case ATA_CMD_WRITE_MULTI:
3186
case ATA_CMD_WRITE_MULTI_EXT:
3187
case ATA_CMD_WRITE_MULTI_FUA_EXT:
3188
case ATA_CMD_PIO_WRITE:
3189
case ATA_CMD_PIO_WRITE_EXT:
3190
case ATA_CMD_WRITE_STREAM_DMA_EXT:
3191
case ATA_CMD_WRITE_STREAM_EXT:
3192
qc->sect_size = scmd->device->sector_size;
3193
break;
3194
3195
/* Everything else uses 512 byte "sectors" */
3196
default:
3197
qc->sect_size = ATA_SECT_SIZE;
3198
}
3199
3200
/*
3201
* Set flags so that all registers will be written, pass on
3202
* write indication (used for PIO/DMA setup), result TF is
3203
* copied back and we don't whine too much about its failure.
3204
*/
3205
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3206
if (scmd->sc_data_direction == DMA_TO_DEVICE)
3207
tf->flags |= ATA_TFLAG_WRITE;
3208
3209
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
3210
3211
/*
3212
* Set transfer length.
3213
*
3214
* TODO: find out if we need to do more here to
3215
* cover scatter/gather case.
3216
*/
3217
ata_qc_set_pc_nbytes(qc);
3218
3219
/* We may not issue DMA commands if no DMA mode is set */
3220
if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
3221
fp = 1;
3222
goto invalid_fld;
3223
}
3224
3225
/* We may not issue NCQ commands to devices not supporting NCQ */
3226
if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
3227
fp = 1;
3228
goto invalid_fld;
3229
}
3230
3231
/* sanity check for pio multi commands */
3232
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
3233
fp = 1;
3234
goto invalid_fld;
3235
}
3236
3237
if (is_multi_taskfile(tf)) {
3238
unsigned int multi_count = 1 << (cdb[1] >> 5);
3239
3240
/* compare the passed through multi_count
3241
* with the cached multi_count of libata
3242
*/
3243
if (multi_count != dev->multi_count)
3244
ata_dev_warn(dev, "invalid multi_count %u ignored\n",
3245
multi_count);
3246
}
3247
3248
/*
3249
* Filter SET_FEATURES - XFER MODE command -- otherwise,
3250
* SET_FEATURES - XFER MODE must be preceded/succeeded
3251
* by an update to hardware-specific registers for each
3252
* controller (i.e. the reason for ->set_piomode(),
3253
* ->set_dmamode(), and ->post_set_mode() hooks).
3254
*/
3255
if (tf->command == ATA_CMD_SET_FEATURES &&
3256
tf->feature == SETFEATURES_XFER) {
3257
fp = (cdb[0] == ATA_16) ? 4 : 3;
3258
goto invalid_fld;
3259
}
3260
3261
/*
3262
* Filter TPM commands by default. These provide an
3263
* essentially uncontrolled encrypted "back door" between
3264
* applications and the disk. Set libata.allow_tpm=1 if you
3265
* have a real reason for wanting to use them. This ensures
3266
* that installed software cannot easily mess stuff up without
3267
* user intent. DVR type users will probably ship with this enabled
3268
* for movie content management.
3269
*
3270
* Note that for ATA8 we can issue a DCS change and DCS freeze lock
3271
* for this and should do in future but that it is not sufficient as
3272
* DCS is an optional feature set. Thus we also do the software filter
3273
* so that we comply with the TC consortium stated goal that the user
3274
* can turn off TC features of their system.
3275
*/
3276
if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
3277
fp = (cdb[0] == ATA_16) ? 14 : 9;
3278
goto invalid_fld;
3279
}
3280
3281
return 0;
3282
3283
invalid_fld:
3284
ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
3285
return 1;
3286
}
3287
3288
/**
3289
* ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
3290
* @cmd: SCSI command being translated
3291
* @trmax: Maximum number of entries that will fit in sector_size bytes.
3292
* @sector: Starting sector
3293
* @count: Total Range of request in logical sectors
3294
*
3295
* Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
3296
* descriptor.
3297
*
3298
* Upto 64 entries of the format:
3299
* 63:48 Range Length
3300
* 47:0 LBA
3301
*
3302
* Range Length of 0 is ignored.
3303
* LBA's should be sorted order and not overlap.
3304
*
3305
* NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
3306
*
3307
* Return: Number of bytes copied into sglist.
3308
*/
3309
static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
3310
u64 sector, u32 count)
3311
{
3312
struct scsi_device *sdp = cmd->device;
3313
size_t len = sdp->sector_size;
3314
size_t r;
3315
__le64 *buf;
3316
u32 i = 0;
3317
unsigned long flags;
3318
3319
WARN_ON(len > ATA_SCSI_RBUF_SIZE);
3320
3321
if (len > ATA_SCSI_RBUF_SIZE)
3322
len = ATA_SCSI_RBUF_SIZE;
3323
3324
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
3325
buf = ((void *)ata_scsi_rbuf);
3326
memset(buf, 0, len);
3327
while (i < trmax) {
3328
u64 entry = sector |
3329
((u64)(count > 0xffff ? 0xffff : count) << 48);
3330
buf[i++] = __cpu_to_le64(entry);
3331
if (count <= 0xffff)
3332
break;
3333
count -= 0xffff;
3334
sector += 0xffff;
3335
}
3336
r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
3337
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
3338
3339
return r;
3340
}
3341
3342
/**
3343
* ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
3344
* @qc: Command to be translated
3345
*
3346
* Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
3347
* an SCT Write Same command.
3348
* Based on WRITE SAME has the UNMAP flag:
3349
*
3350
* - When set translate to DSM TRIM
3351
* - When clear translate to SCT Write Same
3352
*/
3353
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
3354
{
3355
struct ata_taskfile *tf = &qc->tf;
3356
struct scsi_cmnd *scmd = qc->scsicmd;
3357
struct scsi_device *sdp = scmd->device;
3358
size_t len = sdp->sector_size;
3359
struct ata_device *dev = qc->dev;
3360
const u8 *cdb = scmd->cmnd;
3361
u64 block;
3362
u32 n_block;
3363
const u32 trmax = len >> 3;
3364
u32 size;
3365
u16 fp;
3366
u8 bp = 0xff;
3367
u8 unmap = cdb[1] & 0x8;
3368
3369
/* we may not issue DMA commands if no DMA mode is set */
3370
if (unlikely(!ata_dma_enabled(dev)))
3371
goto invalid_opcode;
3372
3373
/*
3374
* We only allow sending this command through the block layer,
3375
* as it modifies the DATA OUT buffer, which would corrupt user
3376
* memory for SG_IO commands.
3377
*/
3378
if (unlikely(blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))))
3379
goto invalid_opcode;
3380
3381
if (unlikely(scmd->cmd_len < 16)) {
3382
fp = 15;
3383
goto invalid_fld;
3384
}
3385
scsi_16_lba_len(cdb, &block, &n_block);
3386
3387
if (!unmap || (dev->quirks & ATA_QUIRK_NOTRIM) ||
3388
!ata_id_has_trim(dev->id)) {
3389
fp = 1;
3390
bp = 3;
3391
goto invalid_fld;
3392
}
3393
/* If the request is too large the cmd is invalid */
3394
if (n_block > 0xffff * trmax) {
3395
fp = 2;
3396
goto invalid_fld;
3397
}
3398
3399
/*
3400
* WRITE SAME always has a sector sized buffer as payload, this
3401
* should never be a multiple entry S/G list.
3402
*/
3403
if (!scsi_sg_count(scmd))
3404
goto invalid_param_len;
3405
3406
/*
3407
* size must match sector size in bytes
3408
* For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
3409
* is defined as number of 512 byte blocks to be transferred.
3410
*/
3411
3412
size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
3413
if (size != len)
3414
goto invalid_param_len;
3415
3416
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
3417
/* Newer devices support queued TRIM commands */
3418
tf->protocol = ATA_PROT_NCQ;
3419
tf->command = ATA_CMD_FPDMA_SEND;
3420
tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
3421
tf->nsect = qc->hw_tag << 3;
3422
tf->hob_feature = (size / 512) >> 8;
3423
tf->feature = size / 512;
3424
3425
tf->auxiliary = 1;
3426
} else {
3427
tf->protocol = ATA_PROT_DMA;
3428
tf->hob_feature = 0;
3429
tf->feature = ATA_DSM_TRIM;
3430
tf->hob_nsect = (size / 512) >> 8;
3431
tf->nsect = size / 512;
3432
tf->command = ATA_CMD_DSM;
3433
}
3434
3435
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
3436
ATA_TFLAG_WRITE;
3437
3438
ata_qc_set_pc_nbytes(qc);
3439
3440
return 0;
3441
3442
invalid_fld:
3443
ata_scsi_set_invalid_field(dev, scmd, fp, bp);
3444
return 1;
3445
invalid_param_len:
3446
/* "Parameter list length error" */
3447
ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3448
return 1;
3449
invalid_opcode:
3450
/* "Invalid command operation code" */
3451
ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
3452
return 1;
3453
}
3454
3455
/**
3456
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
3457
* @dev: Target device.
3458
* @cmd: SCSI command of interest.
3459
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
3460
*
3461
* Yields a subset to satisfy scsi_report_opcode()
3462
*
3463
* LOCKING:
3464
* spin_lock_irqsave(host lock)
3465
*/
3466
static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
3467
struct scsi_cmnd *cmd, u8 *rbuf)
3468
{
3469
u8 *cdb = cmd->cmnd;
3470
u8 supported = 0, cdlp = 0, rwcdlp = 0;
3471
3472
if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
3473
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
3474
return 0;
3475
}
3476
3477
if (cdb[2] != 1 && cdb[2] != 3) {
3478
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
3479
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
3480
return 0;
3481
}
3482
3483
switch (cdb[3]) {
3484
case INQUIRY:
3485
case MODE_SENSE:
3486
case MODE_SENSE_10:
3487
case READ_CAPACITY:
3488
case SERVICE_ACTION_IN_16:
3489
case REPORT_LUNS:
3490
case REQUEST_SENSE:
3491
case SYNCHRONIZE_CACHE:
3492
case SYNCHRONIZE_CACHE_16:
3493
case REZERO_UNIT:
3494
case SEEK_6:
3495
case SEEK_10:
3496
case TEST_UNIT_READY:
3497
case SEND_DIAGNOSTIC:
3498
case MAINTENANCE_IN:
3499
case READ_6:
3500
case READ_10:
3501
case WRITE_6:
3502
case WRITE_10:
3503
case ATA_12:
3504
case ATA_16:
3505
case VERIFY:
3506
case VERIFY_16:
3507
case MODE_SELECT:
3508
case MODE_SELECT_10:
3509
case START_STOP:
3510
supported = 3;
3511
break;
3512
case READ_16:
3513
supported = 3;
3514
if (dev->flags & ATA_DFLAG_CDL) {
3515
/*
3516
* CDL read descriptors map to the T2A page, that is,
3517
* rwcdlp = 0x01 and cdlp = 0x01
3518
*/
3519
rwcdlp = 0x01;
3520
cdlp = 0x01 << 3;
3521
}
3522
break;
3523
case WRITE_16:
3524
supported = 3;
3525
if (dev->flags & ATA_DFLAG_CDL) {
3526
/*
3527
* CDL write descriptors map to the T2B page, that is,
3528
* rwcdlp = 0x01 and cdlp = 0x02
3529
*/
3530
rwcdlp = 0x01;
3531
cdlp = 0x02 << 3;
3532
}
3533
break;
3534
case ZBC_IN:
3535
case ZBC_OUT:
3536
if (ata_id_zoned_cap(dev->id) ||
3537
dev->class == ATA_DEV_ZAC)
3538
supported = 3;
3539
break;
3540
case SECURITY_PROTOCOL_IN:
3541
case SECURITY_PROTOCOL_OUT:
3542
if (dev->flags & ATA_DFLAG_TRUSTED)
3543
supported = 3;
3544
break;
3545
default:
3546
break;
3547
}
3548
3549
/* One command format */
3550
rbuf[0] = rwcdlp;
3551
rbuf[1] = cdlp | supported;
3552
3553
return 4;
3554
}
3555
3556
/**
3557
* ata_scsi_report_zones_complete - convert ATA output
3558
* @qc: command structure returning the data
3559
*
3560
* Convert T-13 little-endian field representation into
3561
* T-10 big-endian field representation.
3562
* What a mess.
3563
*/
3564
static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
3565
{
3566
struct scsi_cmnd *scmd = qc->scsicmd;
3567
struct sg_mapping_iter miter;
3568
unsigned long flags;
3569
unsigned int bytes = 0;
3570
3571
sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
3572
SG_MITER_TO_SG | SG_MITER_ATOMIC);
3573
3574
local_irq_save(flags);
3575
while (sg_miter_next(&miter)) {
3576
unsigned int offset = 0;
3577
3578
if (bytes == 0) {
3579
char *hdr;
3580
u32 list_length;
3581
u64 max_lba, opt_lba;
3582
u16 same;
3583
3584
/* Swizzle header */
3585
hdr = miter.addr;
3586
list_length = get_unaligned_le32(&hdr[0]);
3587
same = get_unaligned_le16(&hdr[4]);
3588
max_lba = get_unaligned_le64(&hdr[8]);
3589
opt_lba = get_unaligned_le64(&hdr[16]);
3590
put_unaligned_be32(list_length, &hdr[0]);
3591
hdr[4] = same & 0xf;
3592
put_unaligned_be64(max_lba, &hdr[8]);
3593
put_unaligned_be64(opt_lba, &hdr[16]);
3594
offset += 64;
3595
bytes += 64;
3596
}
3597
while (offset < miter.length) {
3598
char *rec;
3599
u8 cond, type, non_seq, reset;
3600
u64 size, start, wp;
3601
3602
/* Swizzle zone descriptor */
3603
rec = miter.addr + offset;
3604
type = rec[0] & 0xf;
3605
cond = (rec[1] >> 4) & 0xf;
3606
non_seq = (rec[1] & 2);
3607
reset = (rec[1] & 1);
3608
size = get_unaligned_le64(&rec[8]);
3609
start = get_unaligned_le64(&rec[16]);
3610
wp = get_unaligned_le64(&rec[24]);
3611
rec[0] = type;
3612
rec[1] = (cond << 4) | non_seq | reset;
3613
put_unaligned_be64(size, &rec[8]);
3614
put_unaligned_be64(start, &rec[16]);
3615
put_unaligned_be64(wp, &rec[24]);
3616
WARN_ON(offset + 64 > miter.length);
3617
offset += 64;
3618
bytes += 64;
3619
}
3620
}
3621
sg_miter_stop(&miter);
3622
local_irq_restore(flags);
3623
3624
ata_scsi_qc_complete(qc);
3625
}
3626
3627
static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
3628
{
3629
struct ata_taskfile *tf = &qc->tf;
3630
struct scsi_cmnd *scmd = qc->scsicmd;
3631
const u8 *cdb = scmd->cmnd;
3632
u16 sect, fp = (u16)-1;
3633
u8 sa, options, bp = 0xff;
3634
u64 block;
3635
u32 n_block;
3636
3637
if (unlikely(scmd->cmd_len < 16)) {
3638
ata_dev_warn(qc->dev, "invalid cdb length %d\n",
3639
scmd->cmd_len);
3640
fp = 15;
3641
goto invalid_fld;
3642
}
3643
scsi_16_lba_len(cdb, &block, &n_block);
3644
if (n_block != scsi_bufflen(scmd)) {
3645
ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
3646
n_block, scsi_bufflen(scmd));
3647
goto invalid_param_len;
3648
}
3649
sa = cdb[1] & 0x1f;
3650
if (sa != ZI_REPORT_ZONES) {
3651
ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
3652
fp = 1;
3653
goto invalid_fld;
3654
}
3655
/*
3656
* ZAC allows only for transfers in 512 byte blocks,
3657
* and uses a 16 bit value for the transfer count.
3658
*/
3659
if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
3660
ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
3661
goto invalid_param_len;
3662
}
3663
sect = n_block / 512;
3664
options = cdb[14] & 0xbf;
3665
3666
if (ata_ncq_enabled(qc->dev) &&
3667
ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
3668
tf->protocol = ATA_PROT_NCQ;
3669
tf->command = ATA_CMD_FPDMA_RECV;
3670
tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
3671
tf->nsect = qc->hw_tag << 3;
3672
tf->feature = sect & 0xff;
3673
tf->hob_feature = (sect >> 8) & 0xff;
3674
tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
3675
} else {
3676
tf->command = ATA_CMD_ZAC_MGMT_IN;
3677
tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
3678
tf->protocol = ATA_PROT_DMA;
3679
tf->hob_feature = options;
3680
tf->hob_nsect = (sect >> 8) & 0xff;
3681
tf->nsect = sect & 0xff;
3682
}
3683
tf->device = ATA_LBA;
3684
tf->lbah = (block >> 16) & 0xff;
3685
tf->lbam = (block >> 8) & 0xff;
3686
tf->lbal = block & 0xff;
3687
tf->hob_lbah = (block >> 40) & 0xff;
3688
tf->hob_lbam = (block >> 32) & 0xff;
3689
tf->hob_lbal = (block >> 24) & 0xff;
3690
3691
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
3692
qc->flags |= ATA_QCFLAG_RESULT_TF;
3693
3694
ata_qc_set_pc_nbytes(qc);
3695
3696
qc->complete_fn = ata_scsi_report_zones_complete;
3697
3698
return 0;
3699
3700
invalid_fld:
3701
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
3702
return 1;
3703
3704
invalid_param_len:
3705
/* "Parameter list length error" */
3706
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3707
return 1;
3708
}
3709
3710
static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3711
{
3712
struct ata_taskfile *tf = &qc->tf;
3713
struct scsi_cmnd *scmd = qc->scsicmd;
3714
struct ata_device *dev = qc->dev;
3715
const u8 *cdb = scmd->cmnd;
3716
u8 all, sa;
3717
u64 block;
3718
u32 n_block;
3719
u16 fp = (u16)-1;
3720
3721
if (unlikely(scmd->cmd_len < 16)) {
3722
fp = 15;
3723
goto invalid_fld;
3724
}
3725
3726
sa = cdb[1] & 0x1f;
3727
if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
3728
(sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
3729
fp = 1;
3730
goto invalid_fld;
3731
}
3732
3733
scsi_16_lba_len(cdb, &block, &n_block);
3734
if (n_block) {
3735
/*
3736
* ZAC MANAGEMENT OUT doesn't define any length
3737
*/
3738
goto invalid_param_len;
3739
}
3740
3741
all = cdb[14] & 0x1;
3742
if (all) {
3743
/*
3744
* Ignore the block address (zone ID) as defined by ZBC.
3745
*/
3746
block = 0;
3747
} else if (block >= dev->n_sectors) {
3748
/*
3749
* Block must be a valid zone ID (a zone start LBA).
3750
*/
3751
fp = 2;
3752
goto invalid_fld;
3753
}
3754
3755
if (ata_ncq_enabled(qc->dev) &&
3756
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
3757
tf->protocol = ATA_PROT_NCQ_NODATA;
3758
tf->command = ATA_CMD_NCQ_NON_DATA;
3759
tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
3760
tf->nsect = qc->hw_tag << 3;
3761
tf->auxiliary = sa | ((u16)all << 8);
3762
} else {
3763
tf->protocol = ATA_PROT_NODATA;
3764
tf->command = ATA_CMD_ZAC_MGMT_OUT;
3765
tf->feature = sa;
3766
tf->hob_feature = all;
3767
}
3768
tf->lbah = (block >> 16) & 0xff;
3769
tf->lbam = (block >> 8) & 0xff;
3770
tf->lbal = block & 0xff;
3771
tf->hob_lbah = (block >> 40) & 0xff;
3772
tf->hob_lbam = (block >> 32) & 0xff;
3773
tf->hob_lbal = (block >> 24) & 0xff;
3774
tf->device = ATA_LBA;
3775
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
3776
3777
return 0;
3778
3779
invalid_fld:
3780
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
3781
return 1;
3782
invalid_param_len:
3783
/* "Parameter list length error" */
3784
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
3785
return 1;
3786
}
3787
3788
/**
3789
* ata_mselect_caching - Simulate MODE SELECT for caching info page
3790
* @qc: Storage for translated ATA taskfile
3791
* @buf: input buffer
3792
* @len: number of valid bytes in the input buffer
3793
* @fp: out parameter for the failed field on error
3794
*
3795
* Prepare a taskfile to modify caching information for the device.
3796
*
3797
* LOCKING:
3798
* None.
3799
*/
3800
static int ata_mselect_caching(struct ata_queued_cmd *qc,
3801
const u8 *buf, int len, u16 *fp)
3802
{
3803
struct ata_taskfile *tf = &qc->tf;
3804
struct ata_device *dev = qc->dev;
3805
u8 mpage[CACHE_MPAGE_LEN];
3806
u8 wce;
3807
int i;
3808
3809
/*
3810
* The first two bytes of def_cache_mpage are a header, so offsets
3811
* in mpage are off by 2 compared to buf. Same for len.
3812
*/
3813
3814
if (len != CACHE_MPAGE_LEN - 2) {
3815
*fp = min(len, CACHE_MPAGE_LEN - 2);
3816
return -EINVAL;
3817
}
3818
3819
wce = buf[0] & (1 << 2);
3820
3821
/*
3822
* Check that read-only bits are not modified.
3823
*/
3824
ata_msense_caching(dev->id, mpage, false);
3825
for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
3826
if (i == 0)
3827
continue;
3828
if (mpage[i + 2] != buf[i]) {
3829
*fp = i;
3830
return -EINVAL;
3831
}
3832
}
3833
3834
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3835
tf->protocol = ATA_PROT_NODATA;
3836
tf->nsect = 0;
3837
tf->command = ATA_CMD_SET_FEATURES;
3838
tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF;
3839
return 0;
3840
}
3841
3842
/*
3843
* Simulate MODE SELECT control mode page, sub-page 0.
3844
*/
3845
static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
3846
const u8 *buf, int len, u16 *fp)
3847
{
3848
struct ata_device *dev = qc->dev;
3849
u8 mpage[CONTROL_MPAGE_LEN];
3850
u8 d_sense;
3851
int i;
3852
3853
/*
3854
* The first two bytes of def_control_mpage are a header, so offsets
3855
* in mpage are off by 2 compared to buf. Same for len.
3856
*/
3857
3858
if (len != CONTROL_MPAGE_LEN - 2) {
3859
*fp = min(len, CONTROL_MPAGE_LEN - 2);
3860
return -EINVAL;
3861
}
3862
3863
d_sense = buf[0] & (1 << 2);
3864
3865
/*
3866
* Check that read-only bits are not modified.
3867
*/
3868
ata_msense_control_spg0(dev, mpage, false);
3869
for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
3870
if (i == 0)
3871
continue;
3872
if (mpage[2 + i] != buf[i]) {
3873
*fp = i;
3874
return -EINVAL;
3875
}
3876
}
3877
if (d_sense & (1 << 2))
3878
dev->flags |= ATA_DFLAG_D_SENSE;
3879
else
3880
dev->flags &= ~ATA_DFLAG_D_SENSE;
3881
return 0;
3882
}
3883
3884
/*
3885
* Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
3886
* page) into a SET FEATURES command.
3887
*/
3888
static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
3889
const u8 *buf, int len, u16 *fp)
3890
{
3891
struct ata_device *dev = qc->dev;
3892
struct ata_taskfile *tf = &qc->tf;
3893
u8 cdl_action;
3894
3895
/*
3896
* The first four bytes of ATA Feature Control mode page are a header,
3897
* so offsets in mpage are off by 4 compared to buf. Same for len.
3898
*/
3899
if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) {
3900
*fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4);
3901
return -EINVAL;
3902
}
3903
3904
/* Check cdl_ctrl */
3905
switch (buf[0] & 0x03) {
3906
case 0:
3907
/* Disable CDL */
3908
ata_dev_dbg(dev, "Disabling CDL\n");
3909
cdl_action = 0;
3910
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
3911
break;
3912
case 0x02:
3913
/*
3914
* Enable CDL. Since CDL is mutually exclusive with NCQ
3915
* priority, allow this only if NCQ priority is disabled.
3916
*/
3917
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
3918
ata_dev_err(dev,
3919
"NCQ priority must be disabled to enable CDL\n");
3920
return -EINVAL;
3921
}
3922
ata_dev_dbg(dev, "Enabling CDL\n");
3923
cdl_action = 1;
3924
dev->flags |= ATA_DFLAG_CDL_ENABLED;
3925
break;
3926
default:
3927
*fp = 0;
3928
return -EINVAL;
3929
}
3930
3931
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3932
tf->protocol = ATA_PROT_NODATA;
3933
tf->command = ATA_CMD_SET_FEATURES;
3934
tf->feature = SETFEATURES_CDL;
3935
tf->nsect = cdl_action;
3936
3937
return 1;
3938
}
3939
3940
/**
3941
* ata_mselect_control - Simulate MODE SELECT for control page
3942
* @qc: Storage for translated ATA taskfile
3943
* @spg: target sub-page of the control page
3944
* @buf: input buffer
3945
* @len: number of valid bytes in the input buffer
3946
* @fp: out parameter for the failed field on error
3947
*
3948
* Prepare a taskfile to modify caching information for the device.
3949
*
3950
* LOCKING:
3951
* None.
3952
*/
3953
static int ata_mselect_control(struct ata_queued_cmd *qc, u8 spg,
3954
const u8 *buf, int len, u16 *fp)
3955
{
3956
switch (spg) {
3957
case 0:
3958
return ata_mselect_control_spg0(qc, buf, len, fp);
3959
case ATA_FEATURE_SUB_MPAGE:
3960
return ata_mselect_control_ata_feature(qc, buf, len, fp);
3961
default:
3962
return -EINVAL;
3963
}
3964
}
3965
3966
/**
3967
* ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
3968
* @qc: Storage for translated ATA taskfile
3969
*
3970
* Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
3971
* Assume this is invoked for direct access devices (e.g. disks) only.
3972
* There should be no block descriptor for other device types.
3973
*
3974
* LOCKING:
3975
* spin_lock_irqsave(host lock)
3976
*/
3977
static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
3978
{
3979
struct scsi_cmnd *scmd = qc->scsicmd;
3980
const u8 *cdb = scmd->cmnd;
3981
u8 pg, spg;
3982
unsigned six_byte, pg_len, hdr_len, bd_len;
3983
int len, ret;
3984
u16 fp = (u16)-1;
3985
u8 bp = 0xff;
3986
u8 buffer[64];
3987
const u8 *p = buffer;
3988
3989
six_byte = (cdb[0] == MODE_SELECT);
3990
if (six_byte) {
3991
if (scmd->cmd_len < 5) {
3992
fp = 4;
3993
goto invalid_fld;
3994
}
3995
3996
len = cdb[4];
3997
hdr_len = 4;
3998
} else {
3999
if (scmd->cmd_len < 9) {
4000
fp = 8;
4001
goto invalid_fld;
4002
}
4003
4004
len = get_unaligned_be16(&cdb[7]);
4005
hdr_len = 8;
4006
}
4007
4008
/* We only support PF=1, SP=0. */
4009
if ((cdb[1] & 0x11) != 0x10) {
4010
fp = 1;
4011
bp = (cdb[1] & 0x01) ? 1 : 5;
4012
goto invalid_fld;
4013
}
4014
4015
/* Test early for possible overrun. */
4016
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
4017
goto invalid_param_len;
4018
4019
/* Move past header and block descriptors. */
4020
if (len < hdr_len)
4021
goto invalid_param_len;
4022
4023
if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
4024
buffer, sizeof(buffer)))
4025
goto invalid_param_len;
4026
4027
if (six_byte)
4028
bd_len = p[3];
4029
else
4030
bd_len = get_unaligned_be16(&p[6]);
4031
4032
len -= hdr_len;
4033
p += hdr_len;
4034
if (len < bd_len)
4035
goto invalid_param_len;
4036
if (bd_len != 0 && bd_len != 8) {
4037
fp = (six_byte) ? 3 : 6;
4038
fp += bd_len + hdr_len;
4039
goto invalid_param;
4040
}
4041
4042
len -= bd_len;
4043
p += bd_len;
4044
if (len == 0)
4045
goto skip;
4046
4047
/* Parse both possible formats for the mode page headers. */
4048
pg = p[0] & 0x3f;
4049
if (p[0] & 0x40) {
4050
if (len < 4)
4051
goto invalid_param_len;
4052
4053
spg = p[1];
4054
pg_len = get_unaligned_be16(&p[2]);
4055
p += 4;
4056
len -= 4;
4057
} else {
4058
if (len < 2)
4059
goto invalid_param_len;
4060
4061
spg = 0;
4062
pg_len = p[1];
4063
p += 2;
4064
len -= 2;
4065
}
4066
4067
/*
4068
* Supported subpages: all subpages and ATA feature sub-page f2h of
4069
* the control page.
4070
*/
4071
if (spg) {
4072
switch (spg) {
4073
case ALL_SUB_MPAGES:
4074
/* All subpages is not supported for the control page */
4075
if (pg == CONTROL_MPAGE) {
4076
fp = (p[0] & 0x40) ? 1 : 0;
4077
fp += hdr_len + bd_len;
4078
goto invalid_param;
4079
}
4080
break;
4081
case ATA_FEATURE_SUB_MPAGE:
4082
if (qc->dev->flags & ATA_DFLAG_CDL &&
4083
pg == CONTROL_MPAGE)
4084
break;
4085
fallthrough;
4086
default:
4087
fp = (p[0] & 0x40) ? 1 : 0;
4088
fp += hdr_len + bd_len;
4089
goto invalid_param;
4090
}
4091
}
4092
if (pg_len > len)
4093
goto invalid_param_len;
4094
4095
switch (pg) {
4096
case CACHE_MPAGE:
4097
if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
4098
fp += hdr_len + bd_len;
4099
goto invalid_param;
4100
}
4101
break;
4102
case CONTROL_MPAGE:
4103
ret = ata_mselect_control(qc, spg, p, pg_len, &fp);
4104
if (ret < 0) {
4105
fp += hdr_len + bd_len;
4106
goto invalid_param;
4107
}
4108
if (!ret)
4109
goto skip; /* No ATA command to send */
4110
break;
4111
default:
4112
/* Invalid page code */
4113
fp = bd_len + hdr_len;
4114
goto invalid_param;
4115
}
4116
4117
/*
4118
* Only one page has changeable data, so we only support setting one
4119
* page at a time.
4120
*/
4121
if (len > pg_len)
4122
goto invalid_param;
4123
4124
return 0;
4125
4126
invalid_fld:
4127
ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
4128
return 1;
4129
4130
invalid_param:
4131
ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
4132
return 1;
4133
4134
invalid_param_len:
4135
/* "Parameter list length error" */
4136
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
4137
return 1;
4138
4139
skip:
4140
scmd->result = SAM_STAT_GOOD;
4141
return 1;
4142
}
4143
4144
static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma)
4145
{
4146
if (len == 0)
4147
return ATA_CMD_TRUSTED_NONDATA;
4148
else if (send)
4149
return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND;
4150
else
4151
return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV;
4152
}
4153
4154
static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc)
4155
{
4156
struct scsi_cmnd *scmd = qc->scsicmd;
4157
const u8 *cdb = scmd->cmnd;
4158
struct ata_taskfile *tf = &qc->tf;
4159
u8 secp = cdb[1];
4160
bool send = (cdb[0] == SECURITY_PROTOCOL_OUT);
4161
u16 spsp = get_unaligned_be16(&cdb[2]);
4162
u32 len = get_unaligned_be32(&cdb[6]);
4163
bool dma = !(qc->dev->flags & ATA_DFLAG_PIO);
4164
4165
/*
4166
* We don't support the ATA "security" protocol.
4167
*/
4168
if (secp == 0xef) {
4169
ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0);
4170
return 1;
4171
}
4172
4173
if (cdb[4] & 7) { /* INC_512 */
4174
if (len > 0xffff) {
4175
ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
4176
return 1;
4177
}
4178
} else {
4179
if (len > 0x01fffe00) {
4180
ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
4181
return 1;
4182
}
4183
4184
/* convert to the sector-based ATA addressing */
4185
len = (len + 511) / 512;
4186
}
4187
4188
tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO;
4189
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA;
4190
if (send)
4191
tf->flags |= ATA_TFLAG_WRITE;
4192
tf->command = ata_scsi_trusted_op(len, send, dma);
4193
tf->feature = secp;
4194
tf->lbam = spsp & 0xff;
4195
tf->lbah = spsp >> 8;
4196
4197
if (len) {
4198
tf->nsect = len & 0xff;
4199
tf->lbal = len >> 8;
4200
} else {
4201
if (!send)
4202
tf->lbah = (1 << 7);
4203
}
4204
4205
ata_qc_set_pc_nbytes(qc);
4206
return 0;
4207
}
4208
4209
/**
4210
* ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
4211
* @qc: Command to be translated
4212
*
4213
* Translate a SCSI variable length CDB to specified commands.
4214
* It checks a service action value in CDB to call corresponding handler.
4215
*
4216
* RETURNS:
4217
* Zero on success, non-zero on failure
4218
*
4219
*/
4220
static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc)
4221
{
4222
struct scsi_cmnd *scmd = qc->scsicmd;
4223
const u8 *cdb = scmd->cmnd;
4224
const u16 sa = get_unaligned_be16(&cdb[8]);
4225
4226
/*
4227
* if service action represents a ata pass-thru(32) command,
4228
* then pass it to ata_scsi_pass_thru handler.
4229
*/
4230
if (sa == ATA_32)
4231
return ata_scsi_pass_thru(qc);
4232
4233
/* unsupported service action */
4234
return 1;
4235
}
4236
4237
/**
4238
* ata_get_xlat_func - check if SCSI to ATA translation is possible
4239
* @dev: ATA device
4240
* @cmd: SCSI command opcode to consider
4241
*
4242
* Look up the SCSI command given, and determine whether the
4243
* SCSI command is to be translated or simulated.
4244
*
4245
* RETURNS:
4246
* Pointer to translation function if possible, %NULL if not.
4247
*/
4248
4249
static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
4250
{
4251
switch (cmd) {
4252
case READ_6:
4253
case READ_10:
4254
case READ_16:
4255
4256
case WRITE_6:
4257
case WRITE_10:
4258
case WRITE_16:
4259
return ata_scsi_rw_xlat;
4260
4261
case WRITE_SAME_16:
4262
return ata_scsi_write_same_xlat;
4263
4264
case SYNCHRONIZE_CACHE:
4265
case SYNCHRONIZE_CACHE_16:
4266
if (ata_try_flush_cache(dev))
4267
return ata_scsi_flush_xlat;
4268
break;
4269
4270
case VERIFY:
4271
case VERIFY_16:
4272
return ata_scsi_verify_xlat;
4273
4274
case ATA_12:
4275
case ATA_16:
4276
return ata_scsi_pass_thru;
4277
4278
case VARIABLE_LENGTH_CMD:
4279
return ata_scsi_var_len_cdb_xlat;
4280
4281
case MODE_SELECT:
4282
case MODE_SELECT_10:
4283
return ata_scsi_mode_select_xlat;
4284
4285
case ZBC_IN:
4286
return ata_scsi_zbc_in_xlat;
4287
4288
case ZBC_OUT:
4289
return ata_scsi_zbc_out_xlat;
4290
4291
case SECURITY_PROTOCOL_IN:
4292
case SECURITY_PROTOCOL_OUT:
4293
if (!(dev->flags & ATA_DFLAG_TRUSTED))
4294
break;
4295
return ata_scsi_security_inout_xlat;
4296
4297
case START_STOP:
4298
return ata_scsi_start_stop_xlat;
4299
}
4300
4301
return NULL;
4302
}
4303
4304
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
4305
{
4306
struct ata_port *ap = dev->link->ap;
4307
u8 scsi_op = scmd->cmnd[0];
4308
ata_xlat_func_t xlat_func;
4309
4310
/*
4311
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
4312
* However, this check is done without holding the ap->lock (a libata
4313
* specific lock), so we can have received an error irq since then,
4314
* therefore we must check if EH is pending or running, while holding
4315
* ap->lock.
4316
*/
4317
if (ata_port_eh_scheduled(ap))
4318
return SCSI_MLQUEUE_DEVICE_BUSY;
4319
4320
if (unlikely(!scmd->cmd_len))
4321
goto bad_cdb_len;
4322
4323
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
4324
if (unlikely(scmd->cmd_len > dev->cdb_len))
4325
goto bad_cdb_len;
4326
4327
xlat_func = ata_get_xlat_func(dev, scsi_op);
4328
} else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
4329
/* relay SCSI command to ATAPI device */
4330
int len = COMMAND_SIZE(scsi_op);
4331
4332
if (unlikely(len > scmd->cmd_len ||
4333
len > dev->cdb_len ||
4334
scmd->cmd_len > ATAPI_CDB_LEN))
4335
goto bad_cdb_len;
4336
4337
xlat_func = atapi_xlat;
4338
} else {
4339
/* ATA_16 passthru, treat as an ATA command */
4340
if (unlikely(scmd->cmd_len > 16))
4341
goto bad_cdb_len;
4342
4343
xlat_func = ata_get_xlat_func(dev, scsi_op);
4344
}
4345
4346
if (xlat_func)
4347
return ata_scsi_translate(dev, scmd, xlat_func);
4348
4349
ata_scsi_simulate(dev, scmd);
4350
4351
return 0;
4352
4353
bad_cdb_len:
4354
scmd->result = DID_ERROR << 16;
4355
scsi_done(scmd);
4356
return 0;
4357
}
4358
4359
/**
4360
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
4361
* @shost: SCSI host of command to be sent
4362
* @cmd: SCSI command to be sent
4363
*
4364
* In some cases, this function translates SCSI commands into
4365
* ATA taskfiles, and queues the taskfiles to be sent to
4366
* hardware. In other cases, this function simulates a
4367
* SCSI device by evaluating and responding to certain
4368
* SCSI commands. This creates the overall effect of
4369
* ATA and ATAPI devices appearing as SCSI devices.
4370
*
4371
* LOCKING:
4372
* ATA host lock
4373
*
4374
* RETURNS:
4375
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
4376
* 0 otherwise.
4377
*/
4378
int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
4379
{
4380
struct ata_port *ap;
4381
struct ata_device *dev;
4382
struct scsi_device *scsidev = cmd->device;
4383
int rc = 0;
4384
unsigned long irq_flags;
4385
4386
ap = ata_shost_to_port(shost);
4387
4388
spin_lock_irqsave(ap->lock, irq_flags);
4389
4390
dev = ata_scsi_find_dev(ap, scsidev);
4391
if (likely(dev))
4392
rc = __ata_scsi_queuecmd(cmd, dev);
4393
else {
4394
cmd->result = (DID_BAD_TARGET << 16);
4395
scsi_done(cmd);
4396
}
4397
4398
spin_unlock_irqrestore(ap->lock, irq_flags);
4399
4400
return rc;
4401
}
4402
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4403
4404
/**
4405
* ata_scsi_simulate - simulate SCSI command on ATA device
4406
* @dev: the target device
4407
* @cmd: SCSI command being sent to device.
4408
*
4409
* Interprets and directly executes a select list of SCSI commands
4410
* that can be handled internally.
4411
*
4412
* LOCKING:
4413
* spin_lock_irqsave(host lock)
4414
*/
4415
4416
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
4417
{
4418
const u8 *scsicmd = cmd->cmnd;
4419
u8 tmp8;
4420
4421
switch(scsicmd[0]) {
4422
case INQUIRY:
4423
ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
4424
break;
4425
4426
case MODE_SENSE:
4427
case MODE_SENSE_10:
4428
ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
4429
break;
4430
4431
case READ_CAPACITY:
4432
case SERVICE_ACTION_IN_16:
4433
ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
4434
break;
4435
4436
case REPORT_LUNS:
4437
ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
4438
break;
4439
4440
case REQUEST_SENSE:
4441
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
4442
break;
4443
4444
/* if we reach this, then writeback caching is disabled,
4445
* turning this into a no-op.
4446
*/
4447
case SYNCHRONIZE_CACHE:
4448
case SYNCHRONIZE_CACHE_16:
4449
fallthrough;
4450
4451
/* no-op's, complete with success */
4452
case REZERO_UNIT:
4453
case SEEK_6:
4454
case SEEK_10:
4455
case TEST_UNIT_READY:
4456
break;
4457
4458
case SEND_DIAGNOSTIC:
4459
tmp8 = scsicmd[1] & ~(1 << 3);
4460
if (tmp8 != 0x4 || scsicmd[3] || scsicmd[4])
4461
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
4462
break;
4463
4464
case MAINTENANCE_IN:
4465
ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
4466
break;
4467
4468
/* all other commands */
4469
default:
4470
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
4471
/* "Invalid command operation code" */
4472
break;
4473
}
4474
4475
scsi_done(cmd);
4476
}
4477
4478
int ata_scsi_add_hosts(struct ata_host *host, const struct scsi_host_template *sht)
4479
{
4480
int i, rc;
4481
4482
for (i = 0; i < host->n_ports; i++) {
4483
struct ata_port *ap = host->ports[i];
4484
struct Scsi_Host *shost;
4485
4486
rc = -ENOMEM;
4487
shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
4488
if (!shost)
4489
goto err_alloc;
4490
4491
shost->eh_noresume = 1;
4492
*(struct ata_port **)&shost->hostdata[0] = ap;
4493
ap->scsi_host = shost;
4494
4495
shost->transportt = ata_scsi_transport_template;
4496
shost->unique_id = ap->print_id;
4497
shost->max_id = 16;
4498
shost->max_lun = 1;
4499
shost->max_channel = 1;
4500
shost->max_cmd_len = 32;
4501
4502
/* Schedule policy is determined by ->qc_defer()
4503
* callback and it needs to see every deferred qc.
4504
* Set host_blocked to 1 to prevent SCSI midlayer from
4505
* automatically deferring requests.
4506
*/
4507
shost->max_host_blocked = 1;
4508
4509
rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
4510
if (rc)
4511
goto err_alloc;
4512
}
4513
4514
return 0;
4515
4516
err_alloc:
4517
while (--i >= 0) {
4518
struct Scsi_Host *shost = host->ports[i]->scsi_host;
4519
4520
/* scsi_host_put() is in ata_devres_release() */
4521
scsi_remove_host(shost);
4522
}
4523
return rc;
4524
}
4525
4526
#ifdef CONFIG_OF
4527
static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
4528
{
4529
struct scsi_device *sdev = dev->sdev;
4530
struct device *d = ap->host->dev;
4531
struct device_node *np = d->of_node;
4532
struct device_node *child;
4533
4534
for_each_available_child_of_node(np, child) {
4535
int ret;
4536
u32 val;
4537
4538
ret = of_property_read_u32(child, "reg", &val);
4539
if (ret)
4540
continue;
4541
if (val == dev->devno) {
4542
dev_dbg(d, "found matching device node\n");
4543
sdev->sdev_gendev.of_node = child;
4544
return;
4545
}
4546
}
4547
}
4548
#else
4549
static void ata_scsi_assign_ofnode(struct ata_device *dev, struct ata_port *ap)
4550
{
4551
}
4552
#endif
4553
4554
void ata_scsi_scan_host(struct ata_port *ap, int sync)
4555
{
4556
int tries = 5;
4557
struct ata_device *last_failed_dev = NULL;
4558
struct ata_link *link;
4559
struct ata_device *dev;
4560
4561
repeat:
4562
ata_for_each_link(link, ap, EDGE) {
4563
ata_for_each_dev(dev, link, ENABLED) {
4564
struct scsi_device *sdev;
4565
int channel = 0, id = 0;
4566
4567
if (dev->sdev)
4568
continue;
4569
4570
if (ata_is_host_link(link))
4571
id = dev->devno;
4572
else
4573
channel = link->pmp;
4574
4575
sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
4576
NULL);
4577
if (!IS_ERR(sdev)) {
4578
dev->sdev = sdev;
4579
ata_scsi_assign_ofnode(dev, ap);
4580
scsi_device_put(sdev);
4581
} else {
4582
dev->sdev = NULL;
4583
}
4584
}
4585
}
4586
4587
/* If we scanned while EH was in progress or allocation
4588
* failure occurred, scan would have failed silently. Check
4589
* whether all devices are attached.
4590
*/
4591
ata_for_each_link(link, ap, EDGE) {
4592
ata_for_each_dev(dev, link, ENABLED) {
4593
if (!dev->sdev)
4594
goto exit_loop;
4595
}
4596
}
4597
exit_loop:
4598
if (!link)
4599
return;
4600
4601
/* we're missing some SCSI devices */
4602
if (sync) {
4603
/* If caller requested synchrnous scan && we've made
4604
* any progress, sleep briefly and repeat.
4605
*/
4606
if (dev != last_failed_dev) {
4607
msleep(100);
4608
last_failed_dev = dev;
4609
goto repeat;
4610
}
4611
4612
/* We might be failing to detect boot device, give it
4613
* a few more chances.
4614
*/
4615
if (--tries) {
4616
msleep(100);
4617
goto repeat;
4618
}
4619
4620
ata_port_err(ap,
4621
"WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
4622
}
4623
4624
queue_delayed_work(system_long_wq, &ap->hotplug_task,
4625
round_jiffies_relative(HZ));
4626
}
4627
4628
/**
4629
* ata_scsi_offline_dev - offline attached SCSI device
4630
* @dev: ATA device to offline attached SCSI device for
4631
*
4632
* This function is called from ata_eh_detach_dev() and is responsible for
4633
* taking the SCSI device attached to @dev offline. This function is
4634
* called with host lock which protects dev->sdev against clearing.
4635
*
4636
* LOCKING:
4637
* spin_lock_irqsave(host lock)
4638
*
4639
* RETURNS:
4640
* true if attached SCSI device exists, false otherwise.
4641
*/
4642
bool ata_scsi_offline_dev(struct ata_device *dev)
4643
{
4644
if (dev->sdev) {
4645
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
4646
return true;
4647
}
4648
return false;
4649
}
4650
4651
/**
4652
* ata_scsi_remove_dev - remove attached SCSI device
4653
* @dev: ATA device to remove attached SCSI device for
4654
*
4655
* This function is called from ata_eh_scsi_hotplug() and
4656
* responsible for removing the SCSI device attached to @dev.
4657
*
4658
* LOCKING:
4659
* Kernel thread context (may sleep).
4660
*/
4661
static void ata_scsi_remove_dev(struct ata_device *dev)
4662
{
4663
struct ata_port *ap = dev->link->ap;
4664
struct scsi_device *sdev;
4665
unsigned long flags;
4666
4667
/* Alas, we need to grab scan_mutex to ensure SCSI device
4668
* state doesn't change underneath us and thus
4669
* scsi_device_get() always succeeds. The mutex locking can
4670
* be removed if there is __scsi_device_get() interface which
4671
* increments reference counts regardless of device state.
4672
*/
4673
mutex_lock(&ap->scsi_host->scan_mutex);
4674
spin_lock_irqsave(ap->lock, flags);
4675
4676
/* clearing dev->sdev is protected by host lock */
4677
sdev = dev->sdev;
4678
dev->sdev = NULL;
4679
4680
if (sdev) {
4681
/* If user initiated unplug races with us, sdev can go
4682
* away underneath us after the host lock and
4683
* scan_mutex are released. Hold onto it.
4684
*/
4685
if (scsi_device_get(sdev) == 0) {
4686
/* The following ensures the attached sdev is
4687
* offline on return from ata_scsi_offline_dev()
4688
* regardless it wins or loses the race
4689
* against this function.
4690
*/
4691
scsi_device_set_state(sdev, SDEV_OFFLINE);
4692
} else {
4693
WARN_ON(1);
4694
sdev = NULL;
4695
}
4696
}
4697
4698
spin_unlock_irqrestore(ap->lock, flags);
4699
mutex_unlock(&ap->scsi_host->scan_mutex);
4700
4701
if (sdev) {
4702
ata_dev_info(dev, "detaching (SCSI %s)\n",
4703
dev_name(&sdev->sdev_gendev));
4704
4705
scsi_remove_device(sdev);
4706
scsi_device_put(sdev);
4707
}
4708
}
4709
4710
static void ata_scsi_handle_link_detach(struct ata_link *link)
4711
{
4712
struct ata_port *ap = link->ap;
4713
struct ata_device *dev;
4714
4715
ata_for_each_dev(dev, link, ALL) {
4716
unsigned long flags;
4717
4718
spin_lock_irqsave(ap->lock, flags);
4719
if (!(dev->flags & ATA_DFLAG_DETACHED)) {
4720
spin_unlock_irqrestore(ap->lock, flags);
4721
continue;
4722
}
4723
4724
dev->flags &= ~ATA_DFLAG_DETACHED;
4725
spin_unlock_irqrestore(ap->lock, flags);
4726
4727
ata_scsi_remove_dev(dev);
4728
}
4729
}
4730
4731
/**
4732
* ata_scsi_media_change_notify - send media change event
4733
* @dev: Pointer to the disk device with media change event
4734
*
4735
* Tell the block layer to send a media change notification
4736
* event.
4737
*
4738
* LOCKING:
4739
* spin_lock_irqsave(host lock)
4740
*/
4741
void ata_scsi_media_change_notify(struct ata_device *dev)
4742
{
4743
if (dev->sdev)
4744
sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
4745
GFP_ATOMIC);
4746
}
4747
4748
/**
4749
* ata_scsi_hotplug - SCSI part of hotplug
4750
* @work: Pointer to ATA port to perform SCSI hotplug on
4751
*
4752
* Perform SCSI part of hotplug. It's executed from a separate
4753
* workqueue after EH completes. This is necessary because SCSI
4754
* hot plugging requires working EH and hot unplugging is
4755
* synchronized with hot plugging with a mutex.
4756
*
4757
* LOCKING:
4758
* Kernel thread context (may sleep).
4759
*/
4760
void ata_scsi_hotplug(struct work_struct *work)
4761
{
4762
struct ata_port *ap =
4763
container_of(work, struct ata_port, hotplug_task.work);
4764
int i;
4765
4766
if (ap->pflags & ATA_PFLAG_UNLOADING)
4767
return;
4768
4769
mutex_lock(&ap->scsi_scan_mutex);
4770
4771
/* Unplug detached devices. We cannot use link iterator here
4772
* because PMP links have to be scanned even if PMP is
4773
* currently not attached. Iterate manually.
4774
*/
4775
ata_scsi_handle_link_detach(&ap->link);
4776
if (ap->pmp_link)
4777
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
4778
ata_scsi_handle_link_detach(&ap->pmp_link[i]);
4779
4780
/* scan for new ones */
4781
ata_scsi_scan_host(ap, 0);
4782
4783
mutex_unlock(&ap->scsi_scan_mutex);
4784
}
4785
4786
/**
4787
* ata_scsi_user_scan - indication for user-initiated bus scan
4788
* @shost: SCSI host to scan
4789
* @channel: Channel to scan
4790
* @id: ID to scan
4791
* @lun: LUN to scan
4792
*
4793
* This function is called when user explicitly requests bus
4794
* scan. Set probe pending flag and invoke EH.
4795
*
4796
* LOCKING:
4797
* SCSI layer (we don't care)
4798
*
4799
* RETURNS:
4800
* Zero.
4801
*/
4802
int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
4803
unsigned int id, u64 lun)
4804
{
4805
struct ata_port *ap = ata_shost_to_port(shost);
4806
unsigned long flags;
4807
int devno, rc = 0;
4808
4809
if (lun != SCAN_WILD_CARD && lun)
4810
return -EINVAL;
4811
4812
if (!sata_pmp_attached(ap)) {
4813
if (channel != SCAN_WILD_CARD && channel)
4814
return -EINVAL;
4815
devno = id;
4816
} else {
4817
if (id != SCAN_WILD_CARD && id)
4818
return -EINVAL;
4819
devno = channel;
4820
}
4821
4822
spin_lock_irqsave(ap->lock, flags);
4823
4824
if (devno == SCAN_WILD_CARD) {
4825
struct ata_link *link;
4826
4827
ata_for_each_link(link, ap, EDGE) {
4828
struct ata_eh_info *ehi = &link->eh_info;
4829
ehi->probe_mask |= ATA_ALL_DEVICES;
4830
ehi->action |= ATA_EH_RESET;
4831
}
4832
} else {
4833
struct ata_device *dev = ata_find_dev(ap, devno);
4834
4835
if (dev) {
4836
struct ata_eh_info *ehi = &dev->link->eh_info;
4837
ehi->probe_mask |= 1 << dev->devno;
4838
ehi->action |= ATA_EH_RESET;
4839
} else
4840
rc = -EINVAL;
4841
}
4842
4843
if (rc == 0) {
4844
ata_port_schedule_eh(ap);
4845
spin_unlock_irqrestore(ap->lock, flags);
4846
ata_port_wait_eh(ap);
4847
} else
4848
spin_unlock_irqrestore(ap->lock, flags);
4849
4850
return rc;
4851
}
4852
4853
/**
4854
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
4855
* @work: Pointer to ATA port to perform scsi_rescan_device()
4856
*
4857
* After ATA pass thru (SAT) commands are executed successfully,
4858
* libata need to propagate the changes to SCSI layer.
4859
*
4860
* LOCKING:
4861
* Kernel thread context (may sleep).
4862
*/
4863
void ata_scsi_dev_rescan(struct work_struct *work)
4864
{
4865
struct ata_port *ap =
4866
container_of(work, struct ata_port, scsi_rescan_task.work);
4867
struct ata_link *link;
4868
struct ata_device *dev;
4869
unsigned long flags;
4870
bool do_resume;
4871
int ret = 0;
4872
4873
mutex_lock(&ap->scsi_scan_mutex);
4874
spin_lock_irqsave(ap->lock, flags);
4875
4876
ata_for_each_link(link, ap, EDGE) {
4877
ata_for_each_dev(dev, link, ENABLED) {
4878
struct scsi_device *sdev = dev->sdev;
4879
4880
/*
4881
* If the port was suspended before this was scheduled,
4882
* bail out.
4883
*/
4884
if (ap->pflags & ATA_PFLAG_SUSPENDED)
4885
goto unlock_ap;
4886
4887
if (!sdev)
4888
continue;
4889
if (scsi_device_get(sdev))
4890
continue;
4891
4892
do_resume = dev->flags & ATA_DFLAG_RESUMING;
4893
4894
spin_unlock_irqrestore(ap->lock, flags);
4895
if (do_resume) {
4896
ret = scsi_resume_device(sdev);
4897
if (ret == -EWOULDBLOCK)
4898
goto unlock_scan;
4899
dev->flags &= ~ATA_DFLAG_RESUMING;
4900
}
4901
ret = scsi_rescan_device(sdev);
4902
scsi_device_put(sdev);
4903
spin_lock_irqsave(ap->lock, flags);
4904
4905
if (ret)
4906
goto unlock_ap;
4907
}
4908
}
4909
4910
unlock_ap:
4911
spin_unlock_irqrestore(ap->lock, flags);
4912
unlock_scan:
4913
mutex_unlock(&ap->scsi_scan_mutex);
4914
4915
/* Reschedule with a delay if scsi_rescan_device() returned an error */
4916
if (ret)
4917
schedule_delayed_work(&ap->scsi_rescan_task,
4918
msecs_to_jiffies(5));
4919
}
4920
4921