Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/ata/libata-scsi.c
15109 views
1
/*
2
* libata-scsi.c - helper library for ATA
3
*
4
* Maintained by: Jeff Garzik <[email protected]>
5
* Please ALWAYS copy [email protected]
6
* on emails.
7
*
8
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9
* Copyright 2003-2004 Jeff Garzik
10
*
11
*
12
* This program is free software; you can redistribute it and/or modify
13
* it under the terms of the GNU General Public License as published by
14
* the Free Software Foundation; either version 2, or (at your option)
15
* any later version.
16
*
17
* This program is distributed in the hope that it will be useful,
18
* but WITHOUT ANY WARRANTY; without even the implied warranty of
19
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
* GNU General Public License for more details.
21
*
22
* You should have received a copy of the GNU General Public License
23
* along with this program; see the file COPYING. If not, write to
24
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25
*
26
*
27
* libata documentation is available via 'make {ps|pdf}docs',
28
* as Documentation/DocBook/libata.*
29
*
30
* Hardware documentation available from
31
* - http://www.t10.org/
32
* - http://www.t13.org/
33
*
34
*/
35
36
#include <linux/slab.h>
37
#include <linux/kernel.h>
38
#include <linux/blkdev.h>
39
#include <linux/spinlock.h>
40
#include <scsi/scsi.h>
41
#include <scsi/scsi_host.h>
42
#include <scsi/scsi_cmnd.h>
43
#include <scsi/scsi_eh.h>
44
#include <scsi/scsi_device.h>
45
#include <scsi/scsi_tcq.h>
46
#include <scsi/scsi_transport.h>
47
#include <linux/libata.h>
48
#include <linux/hdreg.h>
49
#include <linux/uaccess.h>
50
#include <linux/suspend.h>
51
#include <asm/unaligned.h>
52
53
#include "libata.h"
54
#include "libata-transport.h"
55
56
#define ATA_SCSI_RBUF_SIZE 4096
57
58
static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
59
static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
60
61
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
62
63
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
64
const struct scsi_device *scsidev);
65
static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
66
const struct scsi_device *scsidev);
67
68
#define RW_RECOVERY_MPAGE 0x1
69
#define RW_RECOVERY_MPAGE_LEN 12
70
#define CACHE_MPAGE 0x8
71
#define CACHE_MPAGE_LEN 20
72
#define CONTROL_MPAGE 0xa
73
#define CONTROL_MPAGE_LEN 12
74
#define ALL_MPAGES 0x3f
75
#define ALL_SUB_MPAGES 0xff
76
77
78
static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
79
RW_RECOVERY_MPAGE,
80
RW_RECOVERY_MPAGE_LEN - 2,
81
(1 << 7), /* AWRE */
82
0, /* read retry count */
83
0, 0, 0, 0,
84
0, /* write retry count */
85
0, 0, 0
86
};
87
88
static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
89
CACHE_MPAGE,
90
CACHE_MPAGE_LEN - 2,
91
0, /* contains WCE, needs to be 0 for logic */
92
0, 0, 0, 0, 0, 0, 0, 0, 0,
93
0, /* contains DRA, needs to be 0 for logic */
94
0, 0, 0, 0, 0, 0, 0
95
};
96
97
static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
98
CONTROL_MPAGE,
99
CONTROL_MPAGE_LEN - 2,
100
2, /* DSENSE=0, GLTSD=1 */
101
0, /* [QAM+QERR may be 1, see 05-359r1] */
102
0, 0, 0, 0, 0xff, 0xff,
103
0, 30 /* extended self test time, see 05-359r1 */
104
};
105
106
static const char *ata_lpm_policy_names[] = {
107
[ATA_LPM_UNKNOWN] = "max_performance",
108
[ATA_LPM_MAX_POWER] = "max_performance",
109
[ATA_LPM_MED_POWER] = "medium_power",
110
[ATA_LPM_MIN_POWER] = "min_power",
111
};
112
113
static ssize_t ata_scsi_lpm_store(struct device *dev,
114
struct device_attribute *attr,
115
const char *buf, size_t count)
116
{
117
struct Scsi_Host *shost = class_to_shost(dev);
118
struct ata_port *ap = ata_shost_to_port(shost);
119
enum ata_lpm_policy policy;
120
unsigned long flags;
121
122
/* UNKNOWN is internal state, iterate from MAX_POWER */
123
for (policy = ATA_LPM_MAX_POWER;
124
policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
125
const char *name = ata_lpm_policy_names[policy];
126
127
if (strncmp(name, buf, strlen(name)) == 0)
128
break;
129
}
130
if (policy == ARRAY_SIZE(ata_lpm_policy_names))
131
return -EINVAL;
132
133
spin_lock_irqsave(ap->lock, flags);
134
ap->target_lpm_policy = policy;
135
ata_port_schedule_eh(ap);
136
spin_unlock_irqrestore(ap->lock, flags);
137
138
return count;
139
}
140
141
static ssize_t ata_scsi_lpm_show(struct device *dev,
142
struct device_attribute *attr, char *buf)
143
{
144
struct Scsi_Host *shost = class_to_shost(dev);
145
struct ata_port *ap = ata_shost_to_port(shost);
146
147
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
148
return -EINVAL;
149
150
return snprintf(buf, PAGE_SIZE, "%s\n",
151
ata_lpm_policy_names[ap->target_lpm_policy]);
152
}
153
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
154
ata_scsi_lpm_show, ata_scsi_lpm_store);
155
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
156
157
static ssize_t ata_scsi_park_show(struct device *device,
158
struct device_attribute *attr, char *buf)
159
{
160
struct scsi_device *sdev = to_scsi_device(device);
161
struct ata_port *ap;
162
struct ata_link *link;
163
struct ata_device *dev;
164
unsigned long flags, now;
165
unsigned int uninitialized_var(msecs);
166
int rc = 0;
167
168
ap = ata_shost_to_port(sdev->host);
169
170
spin_lock_irqsave(ap->lock, flags);
171
dev = ata_scsi_find_dev(ap, sdev);
172
if (!dev) {
173
rc = -ENODEV;
174
goto unlock;
175
}
176
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
177
rc = -EOPNOTSUPP;
178
goto unlock;
179
}
180
181
link = dev->link;
182
now = jiffies;
183
if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
184
link->eh_context.unloaded_mask & (1 << dev->devno) &&
185
time_after(dev->unpark_deadline, now))
186
msecs = jiffies_to_msecs(dev->unpark_deadline - now);
187
else
188
msecs = 0;
189
190
unlock:
191
spin_unlock_irq(ap->lock);
192
193
return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
194
}
195
196
static ssize_t ata_scsi_park_store(struct device *device,
197
struct device_attribute *attr,
198
const char *buf, size_t len)
199
{
200
struct scsi_device *sdev = to_scsi_device(device);
201
struct ata_port *ap;
202
struct ata_device *dev;
203
long int input;
204
unsigned long flags;
205
int rc;
206
207
rc = strict_strtol(buf, 10, &input);
208
if (rc || input < -2)
209
return -EINVAL;
210
if (input > ATA_TMOUT_MAX_PARK) {
211
rc = -EOVERFLOW;
212
input = ATA_TMOUT_MAX_PARK;
213
}
214
215
ap = ata_shost_to_port(sdev->host);
216
217
spin_lock_irqsave(ap->lock, flags);
218
dev = ata_scsi_find_dev(ap, sdev);
219
if (unlikely(!dev)) {
220
rc = -ENODEV;
221
goto unlock;
222
}
223
if (dev->class != ATA_DEV_ATA) {
224
rc = -EOPNOTSUPP;
225
goto unlock;
226
}
227
228
if (input >= 0) {
229
if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
230
rc = -EOPNOTSUPP;
231
goto unlock;
232
}
233
234
dev->unpark_deadline = ata_deadline(jiffies, input);
235
dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
236
ata_port_schedule_eh(ap);
237
complete(&ap->park_req_pending);
238
} else {
239
switch (input) {
240
case -1:
241
dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
242
break;
243
case -2:
244
dev->flags |= ATA_DFLAG_NO_UNLOAD;
245
break;
246
}
247
}
248
unlock:
249
spin_unlock_irqrestore(ap->lock, flags);
250
251
return rc ? rc : len;
252
}
253
DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
254
ata_scsi_park_show, ata_scsi_park_store);
255
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
256
257
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
258
{
259
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
260
261
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
262
}
263
264
static ssize_t
265
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
266
const char *buf, size_t count)
267
{
268
struct Scsi_Host *shost = class_to_shost(dev);
269
struct ata_port *ap = ata_shost_to_port(shost);
270
if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
271
return ap->ops->em_store(ap, buf, count);
272
return -EINVAL;
273
}
274
275
static ssize_t
276
ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
277
char *buf)
278
{
279
struct Scsi_Host *shost = class_to_shost(dev);
280
struct ata_port *ap = ata_shost_to_port(shost);
281
282
if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
283
return ap->ops->em_show(ap, buf);
284
return -EINVAL;
285
}
286
DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
287
ata_scsi_em_message_show, ata_scsi_em_message_store);
288
EXPORT_SYMBOL_GPL(dev_attr_em_message);
289
290
static ssize_t
291
ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
292
char *buf)
293
{
294
struct Scsi_Host *shost = class_to_shost(dev);
295
struct ata_port *ap = ata_shost_to_port(shost);
296
297
return snprintf(buf, 23, "%d\n", ap->em_message_type);
298
}
299
DEVICE_ATTR(em_message_type, S_IRUGO,
300
ata_scsi_em_message_type_show, NULL);
301
EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
302
303
static ssize_t
304
ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
305
char *buf)
306
{
307
struct scsi_device *sdev = to_scsi_device(dev);
308
struct ata_port *ap = ata_shost_to_port(sdev->host);
309
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
310
311
if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
312
return ap->ops->sw_activity_show(atadev, buf);
313
return -EINVAL;
314
}
315
316
static ssize_t
317
ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
318
const char *buf, size_t count)
319
{
320
struct scsi_device *sdev = to_scsi_device(dev);
321
struct ata_port *ap = ata_shost_to_port(sdev->host);
322
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
323
enum sw_activity val;
324
int rc;
325
326
if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
327
val = simple_strtoul(buf, NULL, 0);
328
switch (val) {
329
case OFF: case BLINK_ON: case BLINK_OFF:
330
rc = ap->ops->sw_activity_store(atadev, val);
331
if (!rc)
332
return count;
333
else
334
return rc;
335
}
336
}
337
return -EINVAL;
338
}
339
DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
340
ata_scsi_activity_store);
341
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
342
343
struct device_attribute *ata_common_sdev_attrs[] = {
344
&dev_attr_unload_heads,
345
NULL
346
};
347
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
348
349
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
350
{
351
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
352
/* "Invalid field in cbd" */
353
cmd->scsi_done(cmd);
354
}
355
356
/**
357
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
358
* @sdev: SCSI device for which BIOS geometry is to be determined
359
* @bdev: block device associated with @sdev
360
* @capacity: capacity of SCSI device
361
* @geom: location to which geometry will be output
362
*
363
* Generic bios head/sector/cylinder calculator
364
* used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS)
365
* mapping. Some situations may arise where the disk is not
366
* bootable if this is not used.
367
*
368
* LOCKING:
369
* Defined by the SCSI layer. We don't really care.
370
*
371
* RETURNS:
372
* Zero.
373
*/
374
int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
375
sector_t capacity, int geom[])
376
{
377
geom[0] = 255;
378
geom[1] = 63;
379
sector_div(capacity, 255*63);
380
geom[2] = capacity;
381
382
return 0;
383
}
384
385
/**
386
* ata_scsi_unlock_native_capacity - unlock native capacity
387
* @sdev: SCSI device to adjust device capacity for
388
*
389
* This function is called if a partition on @sdev extends beyond
390
* the end of the device. It requests EH to unlock HPA.
391
*
392
* LOCKING:
393
* Defined by the SCSI layer. Might sleep.
394
*/
395
void ata_scsi_unlock_native_capacity(struct scsi_device *sdev)
396
{
397
struct ata_port *ap = ata_shost_to_port(sdev->host);
398
struct ata_device *dev;
399
unsigned long flags;
400
401
spin_lock_irqsave(ap->lock, flags);
402
403
dev = ata_scsi_find_dev(ap, sdev);
404
if (dev && dev->n_sectors < dev->n_native_sectors) {
405
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
406
dev->link->eh_info.action |= ATA_EH_RESET;
407
ata_port_schedule_eh(ap);
408
}
409
410
spin_unlock_irqrestore(ap->lock, flags);
411
ata_port_wait_eh(ap);
412
}
413
414
/**
415
* ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
416
* @ap: target port
417
* @sdev: SCSI device to get identify data for
418
* @arg: User buffer area for identify data
419
*
420
* LOCKING:
421
* Defined by the SCSI layer. We don't really care.
422
*
423
* RETURNS:
424
* Zero on success, negative errno on error.
425
*/
426
static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
427
void __user *arg)
428
{
429
struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
430
u16 __user *dst = arg;
431
char buf[40];
432
433
if (!dev)
434
return -ENOMSG;
435
436
if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
437
return -EFAULT;
438
439
ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
440
if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
441
return -EFAULT;
442
443
ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
444
if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
445
return -EFAULT;
446
447
ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
448
if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
449
return -EFAULT;
450
451
return 0;
452
}
453
454
/**
455
* ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
456
* @scsidev: Device to which we are issuing command
457
* @arg: User provided data for issuing command
458
*
459
* LOCKING:
460
* Defined by the SCSI layer. We don't really care.
461
*
462
* RETURNS:
463
* Zero on success, negative errno on error.
464
*/
465
int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
466
{
467
int rc = 0;
468
u8 scsi_cmd[MAX_COMMAND_SIZE];
469
u8 args[4], *argbuf = NULL, *sensebuf = NULL;
470
int argsize = 0;
471
enum dma_data_direction data_dir;
472
int cmd_result;
473
474
if (arg == NULL)
475
return -EINVAL;
476
477
if (copy_from_user(args, arg, sizeof(args)))
478
return -EFAULT;
479
480
sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
481
if (!sensebuf)
482
return -ENOMEM;
483
484
memset(scsi_cmd, 0, sizeof(scsi_cmd));
485
486
if (args[3]) {
487
argsize = ATA_SECT_SIZE * args[3];
488
argbuf = kmalloc(argsize, GFP_KERNEL);
489
if (argbuf == NULL) {
490
rc = -ENOMEM;
491
goto error;
492
}
493
494
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
495
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
496
block count in sector count field */
497
data_dir = DMA_FROM_DEVICE;
498
} else {
499
scsi_cmd[1] = (3 << 1); /* Non-data */
500
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
501
data_dir = DMA_NONE;
502
}
503
504
scsi_cmd[0] = ATA_16;
505
506
scsi_cmd[4] = args[2];
507
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
508
scsi_cmd[6] = args[3];
509
scsi_cmd[8] = args[1];
510
scsi_cmd[10] = 0x4f;
511
scsi_cmd[12] = 0xc2;
512
} else {
513
scsi_cmd[6] = args[1];
514
}
515
scsi_cmd[14] = args[0];
516
517
/* Good values for timeout and retries? Values below
518
from scsi_ioctl_send_command() for default case... */
519
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
520
sensebuf, (10*HZ), 5, 0, NULL);
521
522
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
523
u8 *desc = sensebuf + 8;
524
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
525
526
/* If we set cc then ATA pass-through will cause a
527
* check condition even if no error. Filter that. */
528
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
529
struct scsi_sense_hdr sshdr;
530
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
531
&sshdr);
532
if (sshdr.sense_key == 0 &&
533
sshdr.asc == 0 && sshdr.ascq == 0)
534
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
535
}
536
537
/* Send userspace a few ATA registers (same as drivers/ide) */
538
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
539
desc[0] == 0x09) { /* code is "ATA Descriptor" */
540
args[0] = desc[13]; /* status */
541
args[1] = desc[3]; /* error */
542
args[2] = desc[5]; /* sector count (0:7) */
543
if (copy_to_user(arg, args, sizeof(args)))
544
rc = -EFAULT;
545
}
546
}
547
548
549
if (cmd_result) {
550
rc = -EIO;
551
goto error;
552
}
553
554
if ((argbuf)
555
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
556
rc = -EFAULT;
557
error:
558
kfree(sensebuf);
559
kfree(argbuf);
560
return rc;
561
}
562
563
/**
564
* ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl
565
* @scsidev: Device to which we are issuing command
566
* @arg: User provided data for issuing command
567
*
568
* LOCKING:
569
* Defined by the SCSI layer. We don't really care.
570
*
571
* RETURNS:
572
* Zero on success, negative errno on error.
573
*/
574
int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
575
{
576
int rc = 0;
577
u8 scsi_cmd[MAX_COMMAND_SIZE];
578
u8 args[7], *sensebuf = NULL;
579
int cmd_result;
580
581
if (arg == NULL)
582
return -EINVAL;
583
584
if (copy_from_user(args, arg, sizeof(args)))
585
return -EFAULT;
586
587
sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
588
if (!sensebuf)
589
return -ENOMEM;
590
591
memset(scsi_cmd, 0, sizeof(scsi_cmd));
592
scsi_cmd[0] = ATA_16;
593
scsi_cmd[1] = (3 << 1); /* Non-data */
594
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
595
scsi_cmd[4] = args[1];
596
scsi_cmd[6] = args[2];
597
scsi_cmd[8] = args[3];
598
scsi_cmd[10] = args[4];
599
scsi_cmd[12] = args[5];
600
scsi_cmd[13] = args[6] & 0x4f;
601
scsi_cmd[14] = args[0];
602
603
/* Good values for timeout and retries? Values below
604
from scsi_ioctl_send_command() for default case... */
605
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
606
sensebuf, (10*HZ), 5, 0, NULL);
607
608
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
609
u8 *desc = sensebuf + 8;
610
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
611
612
/* If we set cc then ATA pass-through will cause a
613
* check condition even if no error. Filter that. */
614
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
615
struct scsi_sense_hdr sshdr;
616
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
617
&sshdr);
618
if (sshdr.sense_key == 0 &&
619
sshdr.asc == 0 && sshdr.ascq == 0)
620
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
621
}
622
623
/* Send userspace ATA registers */
624
if (sensebuf[0] == 0x72 && /* format is "descriptor" */
625
desc[0] == 0x09) {/* code is "ATA Descriptor" */
626
args[0] = desc[13]; /* status */
627
args[1] = desc[3]; /* error */
628
args[2] = desc[5]; /* sector count (0:7) */
629
args[3] = desc[7]; /* lbal */
630
args[4] = desc[9]; /* lbam */
631
args[5] = desc[11]; /* lbah */
632
args[6] = desc[12]; /* select */
633
if (copy_to_user(arg, args, sizeof(args)))
634
rc = -EFAULT;
635
}
636
}
637
638
if (cmd_result) {
639
rc = -EIO;
640
goto error;
641
}
642
643
error:
644
kfree(sensebuf);
645
return rc;
646
}
647
648
static int ata_ioc32(struct ata_port *ap)
649
{
650
if (ap->flags & ATA_FLAG_PIO_DMA)
651
return 1;
652
if (ap->pflags & ATA_PFLAG_PIO32)
653
return 1;
654
return 0;
655
}
656
657
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
658
int cmd, void __user *arg)
659
{
660
int val = -EINVAL, rc = -EINVAL;
661
unsigned long flags;
662
663
switch (cmd) {
664
case ATA_IOC_GET_IO32:
665
spin_lock_irqsave(ap->lock, flags);
666
val = ata_ioc32(ap);
667
spin_unlock_irqrestore(ap->lock, flags);
668
if (copy_to_user(arg, &val, 1))
669
return -EFAULT;
670
return 0;
671
672
case ATA_IOC_SET_IO32:
673
val = (unsigned long) arg;
674
rc = 0;
675
spin_lock_irqsave(ap->lock, flags);
676
if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
677
if (val)
678
ap->pflags |= ATA_PFLAG_PIO32;
679
else
680
ap->pflags &= ~ATA_PFLAG_PIO32;
681
} else {
682
if (val != ata_ioc32(ap))
683
rc = -EINVAL;
684
}
685
spin_unlock_irqrestore(ap->lock, flags);
686
return rc;
687
688
case HDIO_GET_IDENTITY:
689
return ata_get_identity(ap, scsidev, arg);
690
691
case HDIO_DRIVE_CMD:
692
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
693
return -EACCES;
694
return ata_cmd_ioctl(scsidev, arg);
695
696
case HDIO_DRIVE_TASK:
697
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
698
return -EACCES;
699
return ata_task_ioctl(scsidev, arg);
700
701
default:
702
rc = -ENOTTY;
703
break;
704
}
705
706
return rc;
707
}
708
EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
709
710
int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
711
{
712
return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
713
scsidev, cmd, arg);
714
}
715
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
716
717
/**
718
* ata_scsi_qc_new - acquire new ata_queued_cmd reference
719
* @dev: ATA device to which the new command is attached
720
* @cmd: SCSI command that originated this ATA command
721
*
722
* Obtain a reference to an unused ata_queued_cmd structure,
723
* which is the basic libata structure representing a single
724
* ATA command sent to the hardware.
725
*
726
* If a command was available, fill in the SCSI-specific
727
* portions of the structure with information on the
728
* current command.
729
*
730
* LOCKING:
731
* spin_lock_irqsave(host lock)
732
*
733
* RETURNS:
734
* Command allocated, or %NULL if none available.
735
*/
736
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
737
struct scsi_cmnd *cmd)
738
{
739
struct ata_queued_cmd *qc;
740
741
qc = ata_qc_new_init(dev);
742
if (qc) {
743
qc->scsicmd = cmd;
744
qc->scsidone = cmd->scsi_done;
745
746
qc->sg = scsi_sglist(cmd);
747
qc->n_elem = scsi_sg_count(cmd);
748
} else {
749
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
750
cmd->scsi_done(cmd);
751
}
752
753
return qc;
754
}
755
756
static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
757
{
758
struct scsi_cmnd *scmd = qc->scsicmd;
759
760
qc->extrabytes = scmd->request->extra_len;
761
qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
762
}
763
764
/**
765
* ata_dump_status - user friendly display of error info
766
* @id: id of the port in question
767
* @tf: ptr to filled out taskfile
768
*
769
* Decode and dump the ATA error/status registers for the user so
770
* that they have some idea what really happened at the non
771
* make-believe layer.
772
*
773
* LOCKING:
774
* inherited from caller
775
*/
776
static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
777
{
778
u8 stat = tf->command, err = tf->feature;
779
780
printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
781
if (stat & ATA_BUSY) {
782
printk("Busy }\n"); /* Data is not valid in this case */
783
} else {
784
if (stat & 0x40) printk("DriveReady ");
785
if (stat & 0x20) printk("DeviceFault ");
786
if (stat & 0x10) printk("SeekComplete ");
787
if (stat & 0x08) printk("DataRequest ");
788
if (stat & 0x04) printk("CorrectedError ");
789
if (stat & 0x02) printk("Index ");
790
if (stat & 0x01) printk("Error ");
791
printk("}\n");
792
793
if (err) {
794
printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
795
if (err & 0x04) printk("DriveStatusError ");
796
if (err & 0x80) {
797
if (err & 0x04) printk("BadCRC ");
798
else printk("Sector ");
799
}
800
if (err & 0x40) printk("UncorrectableError ");
801
if (err & 0x10) printk("SectorIdNotFound ");
802
if (err & 0x02) printk("TrackZeroNotFound ");
803
if (err & 0x01) printk("AddrMarkNotFound ");
804
printk("}\n");
805
}
806
}
807
}
808
809
/**
810
* ata_to_sense_error - convert ATA error to SCSI error
811
* @id: ATA device number
812
* @drv_stat: value contained in ATA status register
813
* @drv_err: value contained in ATA error register
814
* @sk: the sense key we'll fill out
815
* @asc: the additional sense code we'll fill out
816
* @ascq: the additional sense code qualifier we'll fill out
817
* @verbose: be verbose
818
*
819
* Converts an ATA error into a SCSI error. Fill out pointers to
820
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
821
* format sense blocks.
822
*
823
* LOCKING:
824
* spin_lock_irqsave(host lock)
825
*/
826
static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
827
u8 *asc, u8 *ascq, int verbose)
828
{
829
int i;
830
831
/* Based on the 3ware driver translation table */
832
static const unsigned char sense_table[][4] = {
833
/* BBD|ECC|ID|MAR */
834
{0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
835
/* BBD|ECC|ID */
836
{0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command
837
/* ECC|MC|MARK */
838
{0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error
839
/* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */
840
{0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error
841
/* MC|ID|ABRT|TRK0|MARK */
842
{0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready
843
/* MCR|MARK */
844
{0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready
845
/* Bad address mark */
846
{0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field
847
/* TRK0 */
848
{0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error
849
/* Abort & !ICRC */
850
{0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command
851
/* Media change request */
852
{0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline
853
/* SRV */
854
{0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found
855
/* Media change */
856
{0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline
857
/* ECC */
858
{0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error
859
/* BBD - block marked bad */
860
{0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error
861
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
862
};
863
static const unsigned char stat_table[][4] = {
864
/* Must be first because BUSY means no other bits valid */
865
{0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now
866
{0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault
867
{0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now
868
{0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered
869
{0xFF, 0xFF, 0xFF, 0xFF}, // END mark
870
};
871
872
/*
873
* Is this an error we can process/parse
874
*/
875
if (drv_stat & ATA_BUSY) {
876
drv_err = 0; /* Ignore the err bits, they're invalid */
877
}
878
879
if (drv_err) {
880
/* Look for drv_err */
881
for (i = 0; sense_table[i][0] != 0xFF; i++) {
882
/* Look for best matches first */
883
if ((sense_table[i][0] & drv_err) ==
884
sense_table[i][0]) {
885
*sk = sense_table[i][1];
886
*asc = sense_table[i][2];
887
*ascq = sense_table[i][3];
888
goto translate_done;
889
}
890
}
891
/* No immediate match */
892
if (verbose)
893
printk(KERN_WARNING "ata%u: no sense translation for "
894
"error 0x%02x\n", id, drv_err);
895
}
896
897
/* Fall back to interpreting status bits */
898
for (i = 0; stat_table[i][0] != 0xFF; i++) {
899
if (stat_table[i][0] & drv_stat) {
900
*sk = stat_table[i][1];
901
*asc = stat_table[i][2];
902
*ascq = stat_table[i][3];
903
goto translate_done;
904
}
905
}
906
/* No error? Undecoded? */
907
if (verbose)
908
printk(KERN_WARNING "ata%u: no sense translation for "
909
"status: 0x%02x\n", id, drv_stat);
910
911
/* We need a sensible error return here, which is tricky, and one
912
that won't cause people to do things like return a disk wrongly */
913
*sk = ABORTED_COMMAND;
914
*asc = 0x00;
915
*ascq = 0x00;
916
917
translate_done:
918
if (verbose)
919
printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
920
"to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
921
id, drv_stat, drv_err, *sk, *asc, *ascq);
922
return;
923
}
924
925
/*
926
* ata_gen_passthru_sense - Generate check condition sense block.
927
* @qc: Command that completed.
928
*
929
* This function is specific to the ATA descriptor format sense
930
* block specified for the ATA pass through commands. Regardless
931
* of whether the command errored or not, return a sense
932
* block. Copy all controller registers into the sense
933
* block. Clear sense key, ASC & ASCQ if there is no error.
934
*
935
* LOCKING:
936
* None.
937
*/
938
static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
939
{
940
struct scsi_cmnd *cmd = qc->scsicmd;
941
struct ata_taskfile *tf = &qc->result_tf;
942
unsigned char *sb = cmd->sense_buffer;
943
unsigned char *desc = sb + 8;
944
int verbose = qc->ap->ops->error_handler == NULL;
945
946
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
947
948
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
949
950
/*
951
* Use ata_to_sense_error() to map status register bits
952
* onto sense key, asc & ascq.
953
*/
954
if (qc->err_mask ||
955
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
956
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
957
&sb[1], &sb[2], &sb[3], verbose);
958
sb[1] &= 0x0f;
959
}
960
961
/*
962
* Sense data is current and format is descriptor.
963
*/
964
sb[0] = 0x72;
965
966
desc[0] = 0x09;
967
968
/* set length of additional sense data */
969
sb[7] = 14;
970
desc[1] = 12;
971
972
/*
973
* Copy registers into sense buffer.
974
*/
975
desc[2] = 0x00;
976
desc[3] = tf->feature; /* == error reg */
977
desc[5] = tf->nsect;
978
desc[7] = tf->lbal;
979
desc[9] = tf->lbam;
980
desc[11] = tf->lbah;
981
desc[12] = tf->device;
982
desc[13] = tf->command; /* == status reg */
983
984
/*
985
* Fill in Extend bit, and the high order bytes
986
* if applicable.
987
*/
988
if (tf->flags & ATA_TFLAG_LBA48) {
989
desc[2] |= 0x01;
990
desc[4] = tf->hob_nsect;
991
desc[6] = tf->hob_lbal;
992
desc[8] = tf->hob_lbam;
993
desc[10] = tf->hob_lbah;
994
}
995
}
996
997
/**
998
* ata_gen_ata_sense - generate a SCSI fixed sense block
999
* @qc: Command that we are erroring out
1000
*
1001
* Generate sense block for a failed ATA command @qc. Descriptor
1002
* format is used to accommodate LBA48 block address.
1003
*
1004
* LOCKING:
1005
* None.
1006
*/
1007
static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
1008
{
1009
struct ata_device *dev = qc->dev;
1010
struct scsi_cmnd *cmd = qc->scsicmd;
1011
struct ata_taskfile *tf = &qc->result_tf;
1012
unsigned char *sb = cmd->sense_buffer;
1013
unsigned char *desc = sb + 8;
1014
int verbose = qc->ap->ops->error_handler == NULL;
1015
u64 block;
1016
1017
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
1018
1019
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1020
1021
/* sense data is current and format is descriptor */
1022
sb[0] = 0x72;
1023
1024
/* Use ata_to_sense_error() to map status register bits
1025
* onto sense key, asc & ascq.
1026
*/
1027
if (qc->err_mask ||
1028
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1029
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
1030
&sb[1], &sb[2], &sb[3], verbose);
1031
sb[1] &= 0x0f;
1032
}
1033
1034
block = ata_tf_read_block(&qc->result_tf, dev);
1035
1036
/* information sense data descriptor */
1037
sb[7] = 12;
1038
desc[0] = 0x00;
1039
desc[1] = 10;
1040
1041
desc[2] |= 0x80; /* valid */
1042
desc[6] = block >> 40;
1043
desc[7] = block >> 32;
1044
desc[8] = block >> 24;
1045
desc[9] = block >> 16;
1046
desc[10] = block >> 8;
1047
desc[11] = block;
1048
}
1049
1050
static void ata_scsi_sdev_config(struct scsi_device *sdev)
1051
{
1052
sdev->use_10_for_rw = 1;
1053
sdev->use_10_for_ms = 1;
1054
1055
/* Schedule policy is determined by ->qc_defer() callback and
1056
* it needs to see every deferred qc. Set dev_blocked to 1 to
1057
* prevent SCSI midlayer from automatically deferring
1058
* requests.
1059
*/
1060
sdev->max_device_blocked = 1;
1061
}
1062
1063
/**
1064
* atapi_drain_needed - Check whether data transfer may overflow
1065
* @rq: request to be checked
1066
*
1067
* ATAPI commands which transfer variable length data to host
1068
* might overflow due to application error or hardare bug. This
1069
* function checks whether overflow should be drained and ignored
1070
* for @request.
1071
*
1072
* LOCKING:
1073
* None.
1074
*
1075
* RETURNS:
1076
* 1 if ; otherwise, 0.
1077
*/
1078
static int atapi_drain_needed(struct request *rq)
1079
{
1080
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
1081
return 0;
1082
1083
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
1084
return 0;
1085
1086
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
1087
}
1088
1089
static int ata_scsi_dev_config(struct scsi_device *sdev,
1090
struct ata_device *dev)
1091
{
1092
struct request_queue *q = sdev->request_queue;
1093
1094
if (!ata_id_has_unload(dev->id))
1095
dev->flags |= ATA_DFLAG_NO_UNLOAD;
1096
1097
/* configure max sectors */
1098
blk_queue_max_hw_sectors(q, dev->max_sectors);
1099
1100
if (dev->class == ATA_DEV_ATAPI) {
1101
void *buf;
1102
1103
sdev->sector_size = ATA_SECT_SIZE;
1104
1105
/* set DMA padding */
1106
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1107
1108
/* configure draining */
1109
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
1110
if (!buf) {
1111
ata_dev_printk(dev, KERN_ERR,
1112
"drain buffer allocation failed\n");
1113
return -ENOMEM;
1114
}
1115
1116
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1117
} else {
1118
sdev->sector_size = ata_id_logical_sector_size(dev->id);
1119
sdev->manage_start_stop = 1;
1120
}
1121
1122
/*
1123
* ata_pio_sectors() expects buffer for each sector to not cross
1124
* page boundary. Enforce it by requiring buffers to be sector
1125
* aligned, which works iff sector_size is not larger than
1126
* PAGE_SIZE. ATAPI devices also need the alignment as
1127
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
1128
*/
1129
if (sdev->sector_size > PAGE_SIZE)
1130
ata_dev_printk(dev, KERN_WARNING,
1131
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132
sdev->sector_size);
1133
1134
blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1135
1136
if (dev->flags & ATA_DFLAG_AN)
1137
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1138
1139
if (dev->flags & ATA_DFLAG_NCQ) {
1140
int depth;
1141
1142
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1143
depth = min(ATA_MAX_QUEUE - 1, depth);
1144
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1145
}
1146
1147
blk_queue_flush_queueable(q, false);
1148
1149
dev->sdev = sdev;
1150
return 0;
1151
}
1152
1153
/**
1154
* ata_scsi_slave_config - Set SCSI device attributes
1155
* @sdev: SCSI device to examine
1156
*
1157
* This is called before we actually start reading
1158
* and writing to the device, to configure certain
1159
* SCSI mid-layer behaviors.
1160
*
1161
* LOCKING:
1162
* Defined by SCSI layer. We don't really care.
1163
*/
1164
1165
int ata_scsi_slave_config(struct scsi_device *sdev)
1166
{
1167
struct ata_port *ap = ata_shost_to_port(sdev->host);
1168
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1169
int rc = 0;
1170
1171
ata_scsi_sdev_config(sdev);
1172
1173
if (dev)
1174
rc = ata_scsi_dev_config(sdev, dev);
1175
1176
return rc;
1177
}
1178
1179
/**
1180
* ata_scsi_slave_destroy - SCSI device is about to be destroyed
1181
* @sdev: SCSI device to be destroyed
1182
*
1183
* @sdev is about to be destroyed for hot/warm unplugging. If
1184
* this unplugging was initiated by libata as indicated by NULL
1185
* dev->sdev, this function doesn't have to do anything.
1186
* Otherwise, SCSI layer initiated warm-unplug is in progress.
1187
* Clear dev->sdev, schedule the device for ATA detach and invoke
1188
* EH.
1189
*
1190
* LOCKING:
1191
* Defined by SCSI layer. We don't really care.
1192
*/
1193
void ata_scsi_slave_destroy(struct scsi_device *sdev)
1194
{
1195
struct ata_port *ap = ata_shost_to_port(sdev->host);
1196
struct request_queue *q = sdev->request_queue;
1197
unsigned long flags;
1198
struct ata_device *dev;
1199
1200
if (!ap->ops->error_handler)
1201
return;
1202
1203
spin_lock_irqsave(ap->lock, flags);
1204
dev = __ata_scsi_find_dev(ap, sdev);
1205
if (dev && dev->sdev) {
1206
/* SCSI device already in CANCEL state, no need to offline it */
1207
dev->sdev = NULL;
1208
dev->flags |= ATA_DFLAG_DETACH;
1209
ata_port_schedule_eh(ap);
1210
}
1211
spin_unlock_irqrestore(ap->lock, flags);
1212
1213
kfree(q->dma_drain_buffer);
1214
q->dma_drain_buffer = NULL;
1215
q->dma_drain_size = 0;
1216
}
1217
1218
/**
1219
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
1220
* @sdev: SCSI device to configure queue depth for
1221
* @queue_depth: new queue depth
1222
* @reason: calling context
1223
*
1224
* This is libata standard hostt->change_queue_depth callback.
1225
* SCSI will call into this callback when user tries to set queue
1226
* depth via sysfs.
1227
*
1228
* LOCKING:
1229
* SCSI layer (we don't care)
1230
*
1231
* RETURNS:
1232
* Newly configured queue depth.
1233
*/
1234
int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
1235
int reason)
1236
{
1237
struct ata_port *ap = ata_shost_to_port(sdev->host);
1238
struct ata_device *dev;
1239
unsigned long flags;
1240
1241
if (reason != SCSI_QDEPTH_DEFAULT)
1242
return -EOPNOTSUPP;
1243
1244
if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1245
return sdev->queue_depth;
1246
1247
dev = ata_scsi_find_dev(ap, sdev);
1248
if (!dev || !ata_dev_enabled(dev))
1249
return sdev->queue_depth;
1250
1251
/* NCQ enabled? */
1252
spin_lock_irqsave(ap->lock, flags);
1253
dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1254
if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1255
dev->flags |= ATA_DFLAG_NCQ_OFF;
1256
queue_depth = 1;
1257
}
1258
spin_unlock_irqrestore(ap->lock, flags);
1259
1260
/* limit and apply queue depth */
1261
queue_depth = min(queue_depth, sdev->host->can_queue);
1262
queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1263
queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
1264
1265
if (sdev->queue_depth == queue_depth)
1266
return -EINVAL;
1267
1268
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1269
return queue_depth;
1270
}
1271
1272
/**
1273
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1274
* @qc: Storage for translated ATA taskfile
1275
*
1276
* Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY
1277
* (to start). Perhaps these commands should be preceded by
1278
* CHECK POWER MODE to see what power mode the device is already in.
1279
* [See SAT revision 5 at www.t10.org]
1280
*
1281
* LOCKING:
1282
* spin_lock_irqsave(host lock)
1283
*
1284
* RETURNS:
1285
* Zero on success, non-zero on error.
1286
*/
1287
static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1288
{
1289
struct scsi_cmnd *scmd = qc->scsicmd;
1290
struct ata_taskfile *tf = &qc->tf;
1291
const u8 *cdb = scmd->cmnd;
1292
1293
if (scmd->cmd_len < 5)
1294
goto invalid_fld;
1295
1296
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1297
tf->protocol = ATA_PROT_NODATA;
1298
if (cdb[1] & 0x1) {
1299
; /* ignore IMMED bit, violates sat-r05 */
1300
}
1301
if (cdb[4] & 0x2)
1302
goto invalid_fld; /* LOEJ bit set not supported */
1303
if (((cdb[4] >> 4) & 0xf) != 0)
1304
goto invalid_fld; /* power conditions not supported */
1305
1306
if (cdb[4] & 0x1) {
1307
tf->nsect = 1; /* 1 sector, lba=0 */
1308
1309
if (qc->dev->flags & ATA_DFLAG_LBA) {
1310
tf->flags |= ATA_TFLAG_LBA;
1311
1312
tf->lbah = 0x0;
1313
tf->lbam = 0x0;
1314
tf->lbal = 0x0;
1315
tf->device |= ATA_LBA;
1316
} else {
1317
/* CHS */
1318
tf->lbal = 0x1; /* sect */
1319
tf->lbam = 0x0; /* cyl low */
1320
tf->lbah = 0x0; /* cyl high */
1321
}
1322
1323
tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
1324
} else {
1325
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
1326
* or S5) causing some drives to spin up and down again.
1327
*/
1328
if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
1329
system_state == SYSTEM_POWER_OFF)
1330
goto skip;
1331
1332
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
1333
system_entering_hibernation())
1334
goto skip;
1335
1336
/* Issue ATA STANDBY IMMEDIATE command */
1337
tf->command = ATA_CMD_STANDBYNOW1;
1338
}
1339
1340
/*
1341
* Standby and Idle condition timers could be implemented but that
1342
* would require libata to implement the Power condition mode page
1343
* and allow the user to change it. Changing mode pages requires
1344
* MODE SELECT to be implemented.
1345
*/
1346
1347
return 0;
1348
1349
invalid_fld:
1350
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1351
/* "Invalid field in cbd" */
1352
return 1;
1353
skip:
1354
scmd->result = SAM_STAT_GOOD;
1355
return 1;
1356
}
1357
1358
1359
/**
1360
* ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command
1361
* @qc: Storage for translated ATA taskfile
1362
*
1363
* Sets up an ATA taskfile to issue FLUSH CACHE or
1364
* FLUSH CACHE EXT.
1365
*
1366
* LOCKING:
1367
* spin_lock_irqsave(host lock)
1368
*
1369
* RETURNS:
1370
* Zero on success, non-zero on error.
1371
*/
1372
static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1373
{
1374
struct ata_taskfile *tf = &qc->tf;
1375
1376
tf->flags |= ATA_TFLAG_DEVICE;
1377
tf->protocol = ATA_PROT_NODATA;
1378
1379
if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1380
tf->command = ATA_CMD_FLUSH_EXT;
1381
else
1382
tf->command = ATA_CMD_FLUSH;
1383
1384
/* flush is critical for IO integrity, consider it an IO command */
1385
qc->flags |= ATA_QCFLAG_IO;
1386
1387
return 0;
1388
}
1389
1390
/**
1391
* scsi_6_lba_len - Get LBA and transfer length
1392
* @cdb: SCSI command to translate
1393
*
1394
* Calculate LBA and transfer length for 6-byte commands.
1395
*
1396
* RETURNS:
1397
* @plba: the LBA
1398
* @plen: the transfer length
1399
*/
1400
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1401
{
1402
u64 lba = 0;
1403
u32 len;
1404
1405
VPRINTK("six-byte command\n");
1406
1407
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
1408
lba |= ((u64)cdb[2]) << 8;
1409
lba |= ((u64)cdb[3]);
1410
1411
len = cdb[4];
1412
1413
*plba = lba;
1414
*plen = len;
1415
}
1416
1417
/**
1418
* scsi_10_lba_len - Get LBA and transfer length
1419
* @cdb: SCSI command to translate
1420
*
1421
* Calculate LBA and transfer length for 10-byte commands.
1422
*
1423
* RETURNS:
1424
* @plba: the LBA
1425
* @plen: the transfer length
1426
*/
1427
static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1428
{
1429
u64 lba = 0;
1430
u32 len = 0;
1431
1432
VPRINTK("ten-byte command\n");
1433
1434
lba |= ((u64)cdb[2]) << 24;
1435
lba |= ((u64)cdb[3]) << 16;
1436
lba |= ((u64)cdb[4]) << 8;
1437
lba |= ((u64)cdb[5]);
1438
1439
len |= ((u32)cdb[7]) << 8;
1440
len |= ((u32)cdb[8]);
1441
1442
*plba = lba;
1443
*plen = len;
1444
}
1445
1446
/**
1447
* scsi_16_lba_len - Get LBA and transfer length
1448
* @cdb: SCSI command to translate
1449
*
1450
* Calculate LBA and transfer length for 16-byte commands.
1451
*
1452
* RETURNS:
1453
* @plba: the LBA
1454
* @plen: the transfer length
1455
*/
1456
static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1457
{
1458
u64 lba = 0;
1459
u32 len = 0;
1460
1461
VPRINTK("sixteen-byte command\n");
1462
1463
lba |= ((u64)cdb[2]) << 56;
1464
lba |= ((u64)cdb[3]) << 48;
1465
lba |= ((u64)cdb[4]) << 40;
1466
lba |= ((u64)cdb[5]) << 32;
1467
lba |= ((u64)cdb[6]) << 24;
1468
lba |= ((u64)cdb[7]) << 16;
1469
lba |= ((u64)cdb[8]) << 8;
1470
lba |= ((u64)cdb[9]);
1471
1472
len |= ((u32)cdb[10]) << 24;
1473
len |= ((u32)cdb[11]) << 16;
1474
len |= ((u32)cdb[12]) << 8;
1475
len |= ((u32)cdb[13]);
1476
1477
*plba = lba;
1478
*plen = len;
1479
}
1480
1481
/**
1482
* ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
1483
* @qc: Storage for translated ATA taskfile
1484
*
1485
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
1486
*
1487
* LOCKING:
1488
* spin_lock_irqsave(host lock)
1489
*
1490
* RETURNS:
1491
* Zero on success, non-zero on error.
1492
*/
1493
static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1494
{
1495
struct scsi_cmnd *scmd = qc->scsicmd;
1496
struct ata_taskfile *tf = &qc->tf;
1497
struct ata_device *dev = qc->dev;
1498
u64 dev_sectors = qc->dev->n_sectors;
1499
const u8 *cdb = scmd->cmnd;
1500
u64 block;
1501
u32 n_block;
1502
1503
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1504
tf->protocol = ATA_PROT_NODATA;
1505
1506
if (cdb[0] == VERIFY) {
1507
if (scmd->cmd_len < 10)
1508
goto invalid_fld;
1509
scsi_10_lba_len(cdb, &block, &n_block);
1510
} else if (cdb[0] == VERIFY_16) {
1511
if (scmd->cmd_len < 16)
1512
goto invalid_fld;
1513
scsi_16_lba_len(cdb, &block, &n_block);
1514
} else
1515
goto invalid_fld;
1516
1517
if (!n_block)
1518
goto nothing_to_do;
1519
if (block >= dev_sectors)
1520
goto out_of_range;
1521
if ((block + n_block) > dev_sectors)
1522
goto out_of_range;
1523
1524
if (dev->flags & ATA_DFLAG_LBA) {
1525
tf->flags |= ATA_TFLAG_LBA;
1526
1527
if (lba_28_ok(block, n_block)) {
1528
/* use LBA28 */
1529
tf->command = ATA_CMD_VERIFY;
1530
tf->device |= (block >> 24) & 0xf;
1531
} else if (lba_48_ok(block, n_block)) {
1532
if (!(dev->flags & ATA_DFLAG_LBA48))
1533
goto out_of_range;
1534
1535
/* use LBA48 */
1536
tf->flags |= ATA_TFLAG_LBA48;
1537
tf->command = ATA_CMD_VERIFY_EXT;
1538
1539
tf->hob_nsect = (n_block >> 8) & 0xff;
1540
1541
tf->hob_lbah = (block >> 40) & 0xff;
1542
tf->hob_lbam = (block >> 32) & 0xff;
1543
tf->hob_lbal = (block >> 24) & 0xff;
1544
} else
1545
/* request too large even for LBA48 */
1546
goto out_of_range;
1547
1548
tf->nsect = n_block & 0xff;
1549
1550
tf->lbah = (block >> 16) & 0xff;
1551
tf->lbam = (block >> 8) & 0xff;
1552
tf->lbal = block & 0xff;
1553
1554
tf->device |= ATA_LBA;
1555
} else {
1556
/* CHS */
1557
u32 sect, head, cyl, track;
1558
1559
if (!lba_28_ok(block, n_block))
1560
goto out_of_range;
1561
1562
/* Convert LBA to CHS */
1563
track = (u32)block / dev->sectors;
1564
cyl = track / dev->heads;
1565
head = track % dev->heads;
1566
sect = (u32)block % dev->sectors + 1;
1567
1568
DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1569
(u32)block, track, cyl, head, sect);
1570
1571
/* Check whether the converted CHS can fit.
1572
Cylinder: 0-65535
1573
Head: 0-15
1574
Sector: 1-255*/
1575
if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1576
goto out_of_range;
1577
1578
tf->command = ATA_CMD_VERIFY;
1579
tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
1580
tf->lbal = sect;
1581
tf->lbam = cyl;
1582
tf->lbah = cyl >> 8;
1583
tf->device |= head;
1584
}
1585
1586
return 0;
1587
1588
invalid_fld:
1589
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1590
/* "Invalid field in cbd" */
1591
return 1;
1592
1593
out_of_range:
1594
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1595
/* "Logical Block Address out of range" */
1596
return 1;
1597
1598
nothing_to_do:
1599
scmd->result = SAM_STAT_GOOD;
1600
return 1;
1601
}
1602
1603
/**
1604
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
1605
* @qc: Storage for translated ATA taskfile
1606
*
1607
* Converts any of six SCSI read/write commands into the
1608
* ATA counterpart, including starting sector (LBA),
1609
* sector count, and taking into account the device's LBA48
1610
* support.
1611
*
1612
* Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
1613
* %WRITE_16 are currently supported.
1614
*
1615
* LOCKING:
1616
* spin_lock_irqsave(host lock)
1617
*
1618
* RETURNS:
1619
* Zero on success, non-zero on error.
1620
*/
1621
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1622
{
1623
struct scsi_cmnd *scmd = qc->scsicmd;
1624
const u8 *cdb = scmd->cmnd;
1625
unsigned int tf_flags = 0;
1626
u64 block;
1627
u32 n_block;
1628
int rc;
1629
1630
if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
1631
tf_flags |= ATA_TFLAG_WRITE;
1632
1633
/* Calculate the SCSI LBA, transfer length and FUA. */
1634
switch (cdb[0]) {
1635
case READ_10:
1636
case WRITE_10:
1637
if (unlikely(scmd->cmd_len < 10))
1638
goto invalid_fld;
1639
scsi_10_lba_len(cdb, &block, &n_block);
1640
if (unlikely(cdb[1] & (1 << 3)))
1641
tf_flags |= ATA_TFLAG_FUA;
1642
break;
1643
case READ_6:
1644
case WRITE_6:
1645
if (unlikely(scmd->cmd_len < 6))
1646
goto invalid_fld;
1647
scsi_6_lba_len(cdb, &block, &n_block);
1648
1649
/* for 6-byte r/w commands, transfer length 0
1650
* means 256 blocks of data, not 0 block.
1651
*/
1652
if (!n_block)
1653
n_block = 256;
1654
break;
1655
case READ_16:
1656
case WRITE_16:
1657
if (unlikely(scmd->cmd_len < 16))
1658
goto invalid_fld;
1659
scsi_16_lba_len(cdb, &block, &n_block);
1660
if (unlikely(cdb[1] & (1 << 3)))
1661
tf_flags |= ATA_TFLAG_FUA;
1662
break;
1663
default:
1664
DPRINTK("no-byte command\n");
1665
goto invalid_fld;
1666
}
1667
1668
/* Check and compose ATA command */
1669
if (!n_block)
1670
/* For 10-byte and 16-byte SCSI R/W commands, transfer
1671
* length 0 means transfer 0 block of data.
1672
* However, for ATA R/W commands, sector count 0 means
1673
* 256 or 65536 sectors, not 0 sectors as in SCSI.
1674
*
1675
* WARNING: one or two older ATA drives treat 0 as 0...
1676
*/
1677
goto nothing_to_do;
1678
1679
qc->flags |= ATA_QCFLAG_IO;
1680
qc->nbytes = n_block * scmd->device->sector_size;
1681
1682
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1683
qc->tag);
1684
if (likely(rc == 0))
1685
return 0;
1686
1687
if (rc == -ERANGE)
1688
goto out_of_range;
1689
/* treat all other errors as -EINVAL, fall through */
1690
invalid_fld:
1691
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1692
/* "Invalid field in cbd" */
1693
return 1;
1694
1695
out_of_range:
1696
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1697
/* "Logical Block Address out of range" */
1698
return 1;
1699
1700
nothing_to_do:
1701
scmd->result = SAM_STAT_GOOD;
1702
return 1;
1703
}
1704
1705
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1706
{
1707
struct ata_port *ap = qc->ap;
1708
struct scsi_cmnd *cmd = qc->scsicmd;
1709
u8 *cdb = cmd->cmnd;
1710
int need_sense = (qc->err_mask != 0);
1711
1712
/* For ATA pass thru (SAT) commands, generate a sense block if
1713
* user mandated it or if there's an error. Note that if we
1714
* generate because the user forced us to, a check condition
1715
* is generated and the ATA register values are returned
1716
* whether the command completed successfully or not. If there
1717
* was no error, SK, ASC and ASCQ will all be zero.
1718
*/
1719
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1720
((cdb[2] & 0x20) || need_sense)) {
1721
ata_gen_passthru_sense(qc);
1722
} else {
1723
if (!need_sense) {
1724
cmd->result = SAM_STAT_GOOD;
1725
} else {
1726
/* TODO: decide which descriptor format to use
1727
* for 48b LBA devices and call that here
1728
* instead of the fixed desc, which is only
1729
* good for smaller LBA (and maybe CHS?)
1730
* devices.
1731
*/
1732
ata_gen_ata_sense(qc);
1733
}
1734
}
1735
1736
if (need_sense && !ap->ops->error_handler)
1737
ata_dump_status(ap->print_id, &qc->result_tf);
1738
1739
qc->scsidone(cmd);
1740
1741
ata_qc_free(qc);
1742
}
1743
1744
/**
1745
* ata_scsi_translate - Translate then issue SCSI command to ATA device
1746
* @dev: ATA device to which the command is addressed
1747
* @cmd: SCSI command to execute
1748
* @xlat_func: Actor which translates @cmd to an ATA taskfile
1749
*
1750
* Our ->queuecommand() function has decided that the SCSI
1751
* command issued can be directly translated into an ATA
1752
* command, rather than handled internally.
1753
*
1754
* This function sets up an ata_queued_cmd structure for the
1755
* SCSI command, and sends that ata_queued_cmd to the hardware.
1756
*
1757
* The xlat_func argument (actor) returns 0 if ready to execute
1758
* ATA command, else 1 to finish translation. If 1 is returned
1759
* then cmd->result (and possibly cmd->sense_buffer) are assumed
1760
* to be set reflecting an error condition or clean (early)
1761
* termination.
1762
*
1763
* LOCKING:
1764
* spin_lock_irqsave(host lock)
1765
*
1766
* RETURNS:
1767
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
1768
* needs to be deferred.
1769
*/
1770
static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1771
ata_xlat_func_t xlat_func)
1772
{
1773
struct ata_port *ap = dev->link->ap;
1774
struct ata_queued_cmd *qc;
1775
int rc;
1776
1777
VPRINTK("ENTER\n");
1778
1779
qc = ata_scsi_qc_new(dev, cmd);
1780
if (!qc)
1781
goto err_mem;
1782
1783
/* data is present; dma-map it */
1784
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1785
cmd->sc_data_direction == DMA_TO_DEVICE) {
1786
if (unlikely(scsi_bufflen(cmd) < 1)) {
1787
ata_dev_printk(dev, KERN_WARNING,
1788
"WARNING: zero len r/w req\n");
1789
goto err_did;
1790
}
1791
1792
ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1793
1794
qc->dma_dir = cmd->sc_data_direction;
1795
}
1796
1797
qc->complete_fn = ata_scsi_qc_complete;
1798
1799
if (xlat_func(qc))
1800
goto early_finish;
1801
1802
if (ap->ops->qc_defer) {
1803
if ((rc = ap->ops->qc_defer(qc)))
1804
goto defer;
1805
}
1806
1807
/* select device, send command to hardware */
1808
ata_qc_issue(qc);
1809
1810
VPRINTK("EXIT\n");
1811
return 0;
1812
1813
early_finish:
1814
ata_qc_free(qc);
1815
cmd->scsi_done(cmd);
1816
DPRINTK("EXIT - early finish (good or error)\n");
1817
return 0;
1818
1819
err_did:
1820
ata_qc_free(qc);
1821
cmd->result = (DID_ERROR << 16);
1822
cmd->scsi_done(cmd);
1823
err_mem:
1824
DPRINTK("EXIT - internal\n");
1825
return 0;
1826
1827
defer:
1828
ata_qc_free(qc);
1829
DPRINTK("EXIT - defer\n");
1830
if (rc == ATA_DEFER_LINK)
1831
return SCSI_MLQUEUE_DEVICE_BUSY;
1832
else
1833
return SCSI_MLQUEUE_HOST_BUSY;
1834
}
1835
1836
/**
1837
* ata_scsi_rbuf_get - Map response buffer.
1838
* @cmd: SCSI command containing buffer to be mapped.
1839
* @flags: unsigned long variable to store irq enable status
1840
* @copy_in: copy in from user buffer
1841
*
1842
* Prepare buffer for simulated SCSI commands.
1843
*
1844
* LOCKING:
1845
* spin_lock_irqsave(ata_scsi_rbuf_lock) on success
1846
*
1847
* RETURNS:
1848
* Pointer to response buffer.
1849
*/
1850
static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
1851
unsigned long *flags)
1852
{
1853
spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
1854
1855
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1856
if (copy_in)
1857
sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1858
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1859
return ata_scsi_rbuf;
1860
}
1861
1862
/**
1863
* ata_scsi_rbuf_put - Unmap response buffer.
1864
* @cmd: SCSI command containing buffer to be unmapped.
1865
* @copy_out: copy out result
1866
* @flags: @flags passed to ata_scsi_rbuf_get()
1867
*
1868
* Returns rbuf buffer. The result is copied to @cmd's buffer if
1869
* @copy_back is true.
1870
*
1871
* LOCKING:
1872
* Unlocks ata_scsi_rbuf_lock.
1873
*/
1874
static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
1875
unsigned long *flags)
1876
{
1877
if (copy_out)
1878
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1879
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1880
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1881
}
1882
1883
/**
1884
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
1885
* @args: device IDENTIFY data / SCSI command of interest.
1886
* @actor: Callback hook for desired SCSI command simulator
1887
*
1888
* Takes care of the hard work of simulating a SCSI command...
1889
* Mapping the response buffer, calling the command's handler,
1890
* and handling the handler's return value. This return value
1891
* indicates whether the handler wishes the SCSI command to be
1892
* completed successfully (0), or not (in which case cmd->result
1893
* and sense buffer are assumed to be set).
1894
*
1895
* LOCKING:
1896
* spin_lock_irqsave(host lock)
1897
*/
1898
static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1899
unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1900
{
1901
u8 *rbuf;
1902
unsigned int rc;
1903
struct scsi_cmnd *cmd = args->cmd;
1904
unsigned long flags;
1905
1906
rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
1907
rc = actor(args, rbuf);
1908
ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1909
1910
if (rc == 0)
1911
cmd->result = SAM_STAT_GOOD;
1912
args->done(cmd);
1913
}
1914
1915
/**
1916
* ata_scsiop_inq_std - Simulate INQUIRY command
1917
* @args: device IDENTIFY data / SCSI command of interest.
1918
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1919
*
1920
* Returns standard device identification data associated
1921
* with non-VPD INQUIRY command output.
1922
*
1923
* LOCKING:
1924
* spin_lock_irqsave(host lock)
1925
*/
1926
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1927
{
1928
const u8 versions[] = {
1929
0x60, /* SAM-3 (no version claimed) */
1930
1931
0x03,
1932
0x20, /* SBC-2 (no version claimed) */
1933
1934
0x02,
1935
0x60 /* SPC-3 (no version claimed) */
1936
};
1937
u8 hdr[] = {
1938
TYPE_DISK,
1939
0,
1940
0x5, /* claim SPC-3 version compatibility */
1941
2,
1942
95 - 4
1943
};
1944
1945
VPRINTK("ENTER\n");
1946
1947
/* set scsi removeable (RMB) bit per ata bit */
1948
if (ata_id_removeable(args->id))
1949
hdr[1] |= (1 << 7);
1950
1951
memcpy(rbuf, hdr, sizeof(hdr));
1952
memcpy(&rbuf[8], "ATA ", 8);
1953
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1954
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1955
1956
if (rbuf[32] == 0 || rbuf[32] == ' ')
1957
memcpy(&rbuf[32], "n/a ", 4);
1958
1959
memcpy(rbuf + 59, versions, sizeof(versions));
1960
1961
return 0;
1962
}
1963
1964
/**
1965
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
1966
* @args: device IDENTIFY data / SCSI command of interest.
1967
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1968
*
1969
* Returns list of inquiry VPD pages available.
1970
*
1971
* LOCKING:
1972
* spin_lock_irqsave(host lock)
1973
*/
1974
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
1975
{
1976
const u8 pages[] = {
1977
0x00, /* page 0x00, this page */
1978
0x80, /* page 0x80, unit serial no page */
1979
0x83, /* page 0x83, device ident page */
1980
0x89, /* page 0x89, ata info page */
1981
0xb0, /* page 0xb0, block limits page */
1982
0xb1, /* page 0xb1, block device characteristics page */
1983
0xb2, /* page 0xb2, thin provisioning page */
1984
};
1985
1986
rbuf[3] = sizeof(pages); /* number of supported VPD pages */
1987
memcpy(rbuf + 4, pages, sizeof(pages));
1988
return 0;
1989
}
1990
1991
/**
1992
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
1993
* @args: device IDENTIFY data / SCSI command of interest.
1994
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
1995
*
1996
* Returns ATA device serial number.
1997
*
1998
* LOCKING:
1999
* spin_lock_irqsave(host lock)
2000
*/
2001
static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
2002
{
2003
const u8 hdr[] = {
2004
0,
2005
0x80, /* this page code */
2006
0,
2007
ATA_ID_SERNO_LEN, /* page len */
2008
};
2009
2010
memcpy(rbuf, hdr, sizeof(hdr));
2011
ata_id_string(args->id, (unsigned char *) &rbuf[4],
2012
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2013
return 0;
2014
}
2015
2016
/**
2017
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
2018
* @args: device IDENTIFY data / SCSI command of interest.
2019
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2020
*
2021
* Yields two logical unit device identification designators:
2022
* - vendor specific ASCII containing the ATA serial number
2023
* - SAT defined "t10 vendor id based" containing ASCII vendor
2024
* name ("ATA "), model and serial numbers.
2025
*
2026
* LOCKING:
2027
* spin_lock_irqsave(host lock)
2028
*/
2029
static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2030
{
2031
const int sat_model_serial_desc_len = 68;
2032
int num;
2033
2034
rbuf[1] = 0x83; /* this page code */
2035
num = 4;
2036
2037
/* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
2038
rbuf[num + 0] = 2;
2039
rbuf[num + 3] = ATA_ID_SERNO_LEN;
2040
num += 4;
2041
ata_id_string(args->id, (unsigned char *) rbuf + num,
2042
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2043
num += ATA_ID_SERNO_LEN;
2044
2045
/* SAT defined lu model and serial numbers descriptor */
2046
/* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
2047
rbuf[num + 0] = 2;
2048
rbuf[num + 1] = 1;
2049
rbuf[num + 3] = sat_model_serial_desc_len;
2050
num += 4;
2051
memcpy(rbuf + num, "ATA ", 8);
2052
num += 8;
2053
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
2054
ATA_ID_PROD_LEN);
2055
num += ATA_ID_PROD_LEN;
2056
ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
2057
ATA_ID_SERNO_LEN);
2058
num += ATA_ID_SERNO_LEN;
2059
2060
if (ata_id_has_wwn(args->id)) {
2061
/* SAT defined lu world wide name */
2062
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
2063
rbuf[num + 0] = 1;
2064
rbuf[num + 1] = 3;
2065
rbuf[num + 3] = ATA_ID_WWN_LEN;
2066
num += 4;
2067
ata_id_string(args->id, (unsigned char *) rbuf + num,
2068
ATA_ID_WWN, ATA_ID_WWN_LEN);
2069
num += ATA_ID_WWN_LEN;
2070
}
2071
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
2072
return 0;
2073
}
2074
2075
/**
2076
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
2077
* @args: device IDENTIFY data / SCSI command of interest.
2078
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2079
*
2080
* Yields SAT-specified ATA VPD page.
2081
*
2082
* LOCKING:
2083
* spin_lock_irqsave(host lock)
2084
*/
2085
static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2086
{
2087
struct ata_taskfile tf;
2088
2089
memset(&tf, 0, sizeof(tf));
2090
2091
rbuf[1] = 0x89; /* our page code */
2092
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
2093
rbuf[3] = (0x238 & 0xff);
2094
2095
memcpy(&rbuf[8], "linux ", 8);
2096
memcpy(&rbuf[16], "libata ", 16);
2097
memcpy(&rbuf[32], DRV_VERSION, 4);
2098
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
2099
2100
/* we don't store the ATA device signature, so we fake it */
2101
2102
tf.command = ATA_DRDY; /* really, this is Status reg */
2103
tf.lbal = 0x1;
2104
tf.nsect = 0x1;
2105
2106
ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */
2107
rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */
2108
2109
rbuf[56] = ATA_CMD_ID_ATA;
2110
2111
memcpy(&rbuf[60], &args->id[0], 512);
2112
return 0;
2113
}
2114
2115
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2116
{
2117
u16 min_io_sectors;
2118
2119
rbuf[1] = 0xb0;
2120
rbuf[3] = 0x3c; /* required VPD size with unmap support */
2121
2122
/*
2123
* Optimal transfer length granularity.
2124
*
2125
* This is always one physical block, but for disks with a smaller
2126
* logical than physical sector size we need to figure out what the
2127
* latter is.
2128
*/
2129
min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
2130
put_unaligned_be16(min_io_sectors, &rbuf[6]);
2131
2132
/*
2133
* Optimal unmap granularity.
2134
*
2135
* The ATA spec doesn't even know about a granularity or alignment
2136
* for the TRIM command. We can leave away most of the unmap related
2137
* VPD page entries, but we have specifify a granularity to signal
2138
* that we support some form of unmap - in thise case via WRITE SAME
2139
* with the unmap bit set.
2140
*/
2141
if (ata_id_has_trim(args->id)) {
2142
put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
2143
put_unaligned_be32(1, &rbuf[28]);
2144
}
2145
2146
return 0;
2147
}
2148
2149
static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2150
{
2151
int form_factor = ata_id_form_factor(args->id);
2152
int media_rotation_rate = ata_id_rotation_rate(args->id);
2153
2154
rbuf[1] = 0xb1;
2155
rbuf[3] = 0x3c;
2156
rbuf[4] = media_rotation_rate >> 8;
2157
rbuf[5] = media_rotation_rate;
2158
rbuf[7] = form_factor;
2159
2160
return 0;
2161
}
2162
2163
static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
2164
{
2165
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
2166
rbuf[1] = 0xb2;
2167
rbuf[3] = 0x4;
2168
rbuf[5] = 1 << 6; /* TPWS */
2169
2170
return 0;
2171
}
2172
2173
/**
2174
* ata_scsiop_noop - Command handler that simply returns success.
2175
* @args: device IDENTIFY data / SCSI command of interest.
2176
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2177
*
2178
* No operation. Simply returns success to caller, to indicate
2179
* that the caller should successfully complete this SCSI command.
2180
*
2181
* LOCKING:
2182
* spin_lock_irqsave(host lock)
2183
*/
2184
static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
2185
{
2186
VPRINTK("ENTER\n");
2187
return 0;
2188
}
2189
2190
/**
2191
* ata_msense_caching - Simulate MODE SENSE caching info page
2192
* @id: device IDENTIFY data
2193
* @buf: output buffer
2194
*
2195
* Generate a caching info page, which conditionally indicates
2196
* write caching to the SCSI layer, depending on device
2197
* capabilities.
2198
*
2199
* LOCKING:
2200
* None.
2201
*/
2202
static unsigned int ata_msense_caching(u16 *id, u8 *buf)
2203
{
2204
memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
2205
if (ata_id_wcache_enabled(id))
2206
buf[2] |= (1 << 2); /* write cache enable */
2207
if (!ata_id_rahead_enabled(id))
2208
buf[12] |= (1 << 5); /* disable read ahead */
2209
return sizeof(def_cache_mpage);
2210
}
2211
2212
/**
2213
* ata_msense_ctl_mode - Simulate MODE SENSE control mode page
2214
* @buf: output buffer
2215
*
2216
* Generate a generic MODE SENSE control mode page.
2217
*
2218
* LOCKING:
2219
* None.
2220
*/
2221
static unsigned int ata_msense_ctl_mode(u8 *buf)
2222
{
2223
memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
2224
return sizeof(def_control_mpage);
2225
}
2226
2227
/**
2228
* ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
2229
* @buf: output buffer
2230
*
2231
* Generate a generic MODE SENSE r/w error recovery page.
2232
*
2233
* LOCKING:
2234
* None.
2235
*/
2236
static unsigned int ata_msense_rw_recovery(u8 *buf)
2237
{
2238
memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
2239
return sizeof(def_rw_recovery_mpage);
2240
}
2241
2242
/*
2243
* We can turn this into a real blacklist if it's needed, for now just
2244
* blacklist any Maxtor BANC1G10 revision firmware
2245
*/
2246
static int ata_dev_supports_fua(u16 *id)
2247
{
2248
unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
2249
2250
if (!libata_fua)
2251
return 0;
2252
if (!ata_id_has_fua(id))
2253
return 0;
2254
2255
ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
2256
ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
2257
2258
if (strcmp(model, "Maxtor"))
2259
return 1;
2260
if (strcmp(fw, "BANC1G10"))
2261
return 1;
2262
2263
return 0; /* blacklisted */
2264
}
2265
2266
/**
2267
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
2268
* @args: device IDENTIFY data / SCSI command of interest.
2269
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2270
*
2271
* Simulate MODE SENSE commands. Assume this is invoked for direct
2272
* access devices (e.g. disks) only. There should be no block
2273
* descriptor for other device types.
2274
*
2275
* LOCKING:
2276
* spin_lock_irqsave(host lock)
2277
*/
2278
static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2279
{
2280
struct ata_device *dev = args->dev;
2281
u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2282
const u8 sat_blk_desc[] = {
2283
0, 0, 0, 0, /* number of blocks: sat unspecified */
2284
0,
2285
0, 0x2, 0x0 /* block length: 512 bytes */
2286
};
2287
u8 pg, spg;
2288
unsigned int ebd, page_control, six_byte;
2289
u8 dpofua;
2290
2291
VPRINTK("ENTER\n");
2292
2293
six_byte = (scsicmd[0] == MODE_SENSE);
2294
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
2295
/*
2296
* LLBA bit in msense(10) ignored (compliant)
2297
*/
2298
2299
page_control = scsicmd[2] >> 6;
2300
switch (page_control) {
2301
case 0: /* current */
2302
break; /* supported */
2303
case 3: /* saved */
2304
goto saving_not_supp;
2305
case 1: /* changeable */
2306
case 2: /* defaults */
2307
default:
2308
goto invalid_fld;
2309
}
2310
2311
if (six_byte)
2312
p += 4 + (ebd ? 8 : 0);
2313
else
2314
p += 8 + (ebd ? 8 : 0);
2315
2316
pg = scsicmd[2] & 0x3f;
2317
spg = scsicmd[3];
2318
/*
2319
* No mode subpages supported (yet) but asking for _all_
2320
* subpages may be valid
2321
*/
2322
if (spg && (spg != ALL_SUB_MPAGES))
2323
goto invalid_fld;
2324
2325
switch(pg) {
2326
case RW_RECOVERY_MPAGE:
2327
p += ata_msense_rw_recovery(p);
2328
break;
2329
2330
case CACHE_MPAGE:
2331
p += ata_msense_caching(args->id, p);
2332
break;
2333
2334
case CONTROL_MPAGE:
2335
p += ata_msense_ctl_mode(p);
2336
break;
2337
2338
case ALL_MPAGES:
2339
p += ata_msense_rw_recovery(p);
2340
p += ata_msense_caching(args->id, p);
2341
p += ata_msense_ctl_mode(p);
2342
break;
2343
2344
default: /* invalid page code */
2345
goto invalid_fld;
2346
}
2347
2348
dpofua = 0;
2349
if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2350
(!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2351
dpofua = 1 << 4;
2352
2353
if (six_byte) {
2354
rbuf[0] = p - rbuf - 1;
2355
rbuf[2] |= dpofua;
2356
if (ebd) {
2357
rbuf[3] = sizeof(sat_blk_desc);
2358
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2359
}
2360
} else {
2361
unsigned int output_len = p - rbuf - 2;
2362
2363
rbuf[0] = output_len >> 8;
2364
rbuf[1] = output_len;
2365
rbuf[3] |= dpofua;
2366
if (ebd) {
2367
rbuf[7] = sizeof(sat_blk_desc);
2368
memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2369
}
2370
}
2371
return 0;
2372
2373
invalid_fld:
2374
ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2375
/* "Invalid field in cbd" */
2376
return 1;
2377
2378
saving_not_supp:
2379
ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2380
/* "Saving parameters not supported" */
2381
return 1;
2382
}
2383
2384
/**
2385
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
2386
* @args: device IDENTIFY data / SCSI command of interest.
2387
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2388
*
2389
* Simulate READ CAPACITY commands.
2390
*
2391
* LOCKING:
2392
* None.
2393
*/
2394
static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2395
{
2396
struct ata_device *dev = args->dev;
2397
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
2398
u32 sector_size; /* physical sector size in bytes */
2399
u8 log2_per_phys;
2400
u16 lowest_aligned;
2401
2402
sector_size = ata_id_logical_sector_size(dev->id);
2403
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
2404
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
2405
2406
VPRINTK("ENTER\n");
2407
2408
if (args->cmd->cmnd[0] == READ_CAPACITY) {
2409
if (last_lba >= 0xffffffffULL)
2410
last_lba = 0xffffffff;
2411
2412
/* sector count, 32-bit */
2413
rbuf[0] = last_lba >> (8 * 3);
2414
rbuf[1] = last_lba >> (8 * 2);
2415
rbuf[2] = last_lba >> (8 * 1);
2416
rbuf[3] = last_lba;
2417
2418
/* sector size */
2419
rbuf[4] = sector_size >> (8 * 3);
2420
rbuf[5] = sector_size >> (8 * 2);
2421
rbuf[6] = sector_size >> (8 * 1);
2422
rbuf[7] = sector_size;
2423
} else {
2424
/* sector count, 64-bit */
2425
rbuf[0] = last_lba >> (8 * 7);
2426
rbuf[1] = last_lba >> (8 * 6);
2427
rbuf[2] = last_lba >> (8 * 5);
2428
rbuf[3] = last_lba >> (8 * 4);
2429
rbuf[4] = last_lba >> (8 * 3);
2430
rbuf[5] = last_lba >> (8 * 2);
2431
rbuf[6] = last_lba >> (8 * 1);
2432
rbuf[7] = last_lba;
2433
2434
/* sector size */
2435
rbuf[ 8] = sector_size >> (8 * 3);
2436
rbuf[ 9] = sector_size >> (8 * 2);
2437
rbuf[10] = sector_size >> (8 * 1);
2438
rbuf[11] = sector_size;
2439
2440
rbuf[12] = 0;
2441
rbuf[13] = log2_per_phys;
2442
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2443
rbuf[15] = lowest_aligned;
2444
2445
if (ata_id_has_trim(args->id)) {
2446
rbuf[14] |= 0x80; /* TPE */
2447
2448
if (ata_id_has_zero_after_trim(args->id))
2449
rbuf[14] |= 0x40; /* TPRZ */
2450
}
2451
}
2452
2453
return 0;
2454
}
2455
2456
/**
2457
* ata_scsiop_report_luns - Simulate REPORT LUNS command
2458
* @args: device IDENTIFY data / SCSI command of interest.
2459
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
2460
*
2461
* Simulate REPORT LUNS command.
2462
*
2463
* LOCKING:
2464
* spin_lock_irqsave(host lock)
2465
*/
2466
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2467
{
2468
VPRINTK("ENTER\n");
2469
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
2470
2471
return 0;
2472
}
2473
2474
static void atapi_sense_complete(struct ata_queued_cmd *qc)
2475
{
2476
if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2477
/* FIXME: not quite right; we don't want the
2478
* translation of taskfile registers into
2479
* a sense descriptors, since that's only
2480
* correct for ATA, not ATAPI
2481
*/
2482
ata_gen_passthru_sense(qc);
2483
}
2484
2485
qc->scsidone(qc->scsicmd);
2486
ata_qc_free(qc);
2487
}
2488
2489
/* is it pointless to prefer PIO for "safety reasons"? */
2490
static inline int ata_pio_use_silly(struct ata_port *ap)
2491
{
2492
return (ap->flags & ATA_FLAG_PIO_DMA);
2493
}
2494
2495
static void atapi_request_sense(struct ata_queued_cmd *qc)
2496
{
2497
struct ata_port *ap = qc->ap;
2498
struct scsi_cmnd *cmd = qc->scsicmd;
2499
2500
DPRINTK("ATAPI request sense\n");
2501
2502
/* FIXME: is this needed? */
2503
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2504
2505
#ifdef CONFIG_ATA_SFF
2506
if (ap->ops->sff_tf_read)
2507
ap->ops->sff_tf_read(ap, &qc->tf);
2508
#endif
2509
2510
/* fill these in, for the case where they are -not- overwritten */
2511
cmd->sense_buffer[0] = 0x70;
2512
cmd->sense_buffer[2] = qc->tf.feature >> 4;
2513
2514
ata_qc_reinit(qc);
2515
2516
/* setup sg table and init transfer direction */
2517
sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2518
ata_sg_init(qc, &qc->sgent, 1);
2519
qc->dma_dir = DMA_FROM_DEVICE;
2520
2521
memset(&qc->cdb, 0, qc->dev->cdb_len);
2522
qc->cdb[0] = REQUEST_SENSE;
2523
qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2524
2525
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2526
qc->tf.command = ATA_CMD_PACKET;
2527
2528
if (ata_pio_use_silly(ap)) {
2529
qc->tf.protocol = ATAPI_PROT_DMA;
2530
qc->tf.feature |= ATAPI_PKT_DMA;
2531
} else {
2532
qc->tf.protocol = ATAPI_PROT_PIO;
2533
qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2534
qc->tf.lbah = 0;
2535
}
2536
qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2537
2538
qc->complete_fn = atapi_sense_complete;
2539
2540
ata_qc_issue(qc);
2541
2542
DPRINTK("EXIT\n");
2543
}
2544
2545
static void atapi_qc_complete(struct ata_queued_cmd *qc)
2546
{
2547
struct scsi_cmnd *cmd = qc->scsicmd;
2548
unsigned int err_mask = qc->err_mask;
2549
2550
VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2551
2552
/* handle completion from new EH */
2553
if (unlikely(qc->ap->ops->error_handler &&
2554
(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2555
2556
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2557
/* FIXME: not quite right; we don't want the
2558
* translation of taskfile registers into a
2559
* sense descriptors, since that's only
2560
* correct for ATA, not ATAPI
2561
*/
2562
ata_gen_passthru_sense(qc);
2563
}
2564
2565
/* SCSI EH automatically locks door if sdev->locked is
2566
* set. Sometimes door lock request continues to
2567
* fail, for example, when no media is present. This
2568
* creates a loop - SCSI EH issues door lock which
2569
* fails and gets invoked again to acquire sense data
2570
* for the failed command.
2571
*
2572
* If door lock fails, always clear sdev->locked to
2573
* avoid this infinite loop.
2574
*
2575
* This may happen before SCSI scan is complete. Make
2576
* sure qc->dev->sdev isn't NULL before dereferencing.
2577
*/
2578
if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2579
qc->dev->sdev->locked = 0;
2580
2581
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2582
qc->scsidone(cmd);
2583
ata_qc_free(qc);
2584
return;
2585
}
2586
2587
/* successful completion or old EH failure path */
2588
if (unlikely(err_mask & AC_ERR_DEV)) {
2589
cmd->result = SAM_STAT_CHECK_CONDITION;
2590
atapi_request_sense(qc);
2591
return;
2592
} else if (unlikely(err_mask)) {
2593
/* FIXME: not quite right; we don't want the
2594
* translation of taskfile registers into
2595
* a sense descriptors, since that's only
2596
* correct for ATA, not ATAPI
2597
*/
2598
ata_gen_passthru_sense(qc);
2599
} else {
2600
u8 *scsicmd = cmd->cmnd;
2601
2602
if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2603
unsigned long flags;
2604
u8 *buf;
2605
2606
buf = ata_scsi_rbuf_get(cmd, true, &flags);
2607
2608
/* ATAPI devices typically report zero for their SCSI version,
2609
* and sometimes deviate from the spec WRT response data
2610
* format. If SCSI version is reported as zero like normal,
2611
* then we make the following fixups: 1) Fake MMC-5 version,
2612
* to indicate to the Linux scsi midlayer this is a modern
2613
* device. 2) Ensure response data format / ATAPI information
2614
* are always correct.
2615
*/
2616
if (buf[2] == 0) {
2617
buf[2] = 0x5;
2618
buf[3] = 0x32;
2619
}
2620
2621
ata_scsi_rbuf_put(cmd, true, &flags);
2622
}
2623
2624
cmd->result = SAM_STAT_GOOD;
2625
}
2626
2627
qc->scsidone(cmd);
2628
ata_qc_free(qc);
2629
}
2630
/**
2631
* atapi_xlat - Initialize PACKET taskfile
2632
* @qc: command structure to be initialized
2633
*
2634
* LOCKING:
2635
* spin_lock_irqsave(host lock)
2636
*
2637
* RETURNS:
2638
* Zero on success, non-zero on failure.
2639
*/
2640
static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2641
{
2642
struct scsi_cmnd *scmd = qc->scsicmd;
2643
struct ata_device *dev = qc->dev;
2644
int nodata = (scmd->sc_data_direction == DMA_NONE);
2645
int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2646
unsigned int nbytes;
2647
2648
memset(qc->cdb, 0, dev->cdb_len);
2649
memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2650
2651
qc->complete_fn = atapi_qc_complete;
2652
2653
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2654
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2655
qc->tf.flags |= ATA_TFLAG_WRITE;
2656
DPRINTK("direction: write\n");
2657
}
2658
2659
qc->tf.command = ATA_CMD_PACKET;
2660
ata_qc_set_pc_nbytes(qc);
2661
2662
/* check whether ATAPI DMA is safe */
2663
if (!nodata && !using_pio && atapi_check_dma(qc))
2664
using_pio = 1;
2665
2666
/* Some controller variants snoop this value for Packet
2667
* transfers to do state machine and FIFO management. Thus we
2668
* want to set it properly, and for DMA where it is
2669
* effectively meaningless.
2670
*/
2671
nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2672
2673
/* Most ATAPI devices which honor transfer chunk size don't
2674
* behave according to the spec when odd chunk size which
2675
* matches the transfer length is specified. If the number of
2676
* bytes to transfer is 2n+1. According to the spec, what
2677
* should happen is to indicate that 2n+1 is going to be
2678
* transferred and transfer 2n+2 bytes where the last byte is
2679
* padding.
2680
*
2681
* In practice, this doesn't happen. ATAPI devices first
2682
* indicate and transfer 2n bytes and then indicate and
2683
* transfer 2 bytes where the last byte is padding.
2684
*
2685
* This inconsistency confuses several controllers which
2686
* perform PIO using DMA such as Intel AHCIs and sil3124/32.
2687
* These controllers use actual number of transferred bytes to
2688
* update DMA poitner and transfer of 4n+2 bytes make those
2689
* controller push DMA pointer by 4n+4 bytes because SATA data
2690
* FISes are aligned to 4 bytes. This causes data corruption
2691
* and buffer overrun.
2692
*
2693
* Always setting nbytes to even number solves this problem
2694
* because then ATAPI devices don't have to split data at 2n
2695
* boundaries.
2696
*/
2697
if (nbytes & 0x1)
2698
nbytes++;
2699
2700
qc->tf.lbam = (nbytes & 0xFF);
2701
qc->tf.lbah = (nbytes >> 8);
2702
2703
if (nodata)
2704
qc->tf.protocol = ATAPI_PROT_NODATA;
2705
else if (using_pio)
2706
qc->tf.protocol = ATAPI_PROT_PIO;
2707
else {
2708
/* DMA data xfer */
2709
qc->tf.protocol = ATAPI_PROT_DMA;
2710
qc->tf.feature |= ATAPI_PKT_DMA;
2711
2712
if ((dev->flags & ATA_DFLAG_DMADIR) &&
2713
(scmd->sc_data_direction != DMA_TO_DEVICE))
2714
/* some SATA bridges need us to indicate data xfer direction */
2715
qc->tf.feature |= ATAPI_DMADIR;
2716
}
2717
2718
2719
/* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE
2720
as ATAPI tape drives don't get this right otherwise */
2721
return 0;
2722
}
2723
2724
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
2725
{
2726
if (!sata_pmp_attached(ap)) {
2727
if (likely(devno < ata_link_max_devices(&ap->link)))
2728
return &ap->link.device[devno];
2729
} else {
2730
if (likely(devno < ap->nr_pmp_links))
2731
return &ap->pmp_link[devno].device[0];
2732
}
2733
2734
return NULL;
2735
}
2736
2737
static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2738
const struct scsi_device *scsidev)
2739
{
2740
int devno;
2741
2742
/* skip commands not addressed to targets we simulate */
2743
if (!sata_pmp_attached(ap)) {
2744
if (unlikely(scsidev->channel || scsidev->lun))
2745
return NULL;
2746
devno = scsidev->id;
2747
} else {
2748
if (unlikely(scsidev->id || scsidev->lun))
2749
return NULL;
2750
devno = scsidev->channel;
2751
}
2752
2753
return ata_find_dev(ap, devno);
2754
}
2755
2756
/**
2757
* ata_scsi_find_dev - lookup ata_device from scsi_cmnd
2758
* @ap: ATA port to which the device is attached
2759
* @scsidev: SCSI device from which we derive the ATA device
2760
*
2761
* Given various information provided in struct scsi_cmnd,
2762
* map that onto an ATA bus, and using that mapping
2763
* determine which ata_device is associated with the
2764
* SCSI command to be sent.
2765
*
2766
* LOCKING:
2767
* spin_lock_irqsave(host lock)
2768
*
2769
* RETURNS:
2770
* Associated ATA device, or %NULL if not found.
2771
*/
2772
static struct ata_device *
2773
ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2774
{
2775
struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2776
2777
if (unlikely(!dev || !ata_dev_enabled(dev)))
2778
return NULL;
2779
2780
return dev;
2781
}
2782
2783
/*
2784
* ata_scsi_map_proto - Map pass-thru protocol value to taskfile value.
2785
* @byte1: Byte 1 from pass-thru CDB.
2786
*
2787
* RETURNS:
2788
* ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise.
2789
*/
2790
static u8
2791
ata_scsi_map_proto(u8 byte1)
2792
{
2793
switch((byte1 & 0x1e) >> 1) {
2794
case 3: /* Non-data */
2795
return ATA_PROT_NODATA;
2796
2797
case 6: /* DMA */
2798
case 10: /* UDMA Data-in */
2799
case 11: /* UDMA Data-Out */
2800
return ATA_PROT_DMA;
2801
2802
case 4: /* PIO Data-in */
2803
case 5: /* PIO Data-out */
2804
return ATA_PROT_PIO;
2805
2806
case 0: /* Hard Reset */
2807
case 1: /* SRST */
2808
case 8: /* Device Diagnostic */
2809
case 9: /* Device Reset */
2810
case 7: /* DMA Queued */
2811
case 12: /* FPDMA */
2812
case 15: /* Return Response Info */
2813
default: /* Reserved */
2814
break;
2815
}
2816
2817
return ATA_PROT_UNKNOWN;
2818
}
2819
2820
/**
2821
* ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
2822
* @qc: command structure to be initialized
2823
*
2824
* Handles either 12 or 16-byte versions of the CDB.
2825
*
2826
* RETURNS:
2827
* Zero on success, non-zero on failure.
2828
*/
2829
static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2830
{
2831
struct ata_taskfile *tf = &(qc->tf);
2832
struct scsi_cmnd *scmd = qc->scsicmd;
2833
struct ata_device *dev = qc->dev;
2834
const u8 *cdb = scmd->cmnd;
2835
2836
if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
2837
goto invalid_fld;
2838
2839
/*
2840
* 12 and 16 byte CDBs use different offsets to
2841
* provide the various register values.
2842
*/
2843
if (cdb[0] == ATA_16) {
2844
/*
2845
* 16-byte CDB - may contain extended commands.
2846
*
2847
* If that is the case, copy the upper byte register values.
2848
*/
2849
if (cdb[1] & 0x01) {
2850
tf->hob_feature = cdb[3];
2851
tf->hob_nsect = cdb[5];
2852
tf->hob_lbal = cdb[7];
2853
tf->hob_lbam = cdb[9];
2854
tf->hob_lbah = cdb[11];
2855
tf->flags |= ATA_TFLAG_LBA48;
2856
} else
2857
tf->flags &= ~ATA_TFLAG_LBA48;
2858
2859
/*
2860
* Always copy low byte, device and command registers.
2861
*/
2862
tf->feature = cdb[4];
2863
tf->nsect = cdb[6];
2864
tf->lbal = cdb[8];
2865
tf->lbam = cdb[10];
2866
tf->lbah = cdb[12];
2867
tf->device = cdb[13];
2868
tf->command = cdb[14];
2869
} else {
2870
/*
2871
* 12-byte CDB - incapable of extended commands.
2872
*/
2873
tf->flags &= ~ATA_TFLAG_LBA48;
2874
2875
tf->feature = cdb[3];
2876
tf->nsect = cdb[4];
2877
tf->lbal = cdb[5];
2878
tf->lbam = cdb[6];
2879
tf->lbah = cdb[7];
2880
tf->device = cdb[8];
2881
tf->command = cdb[9];
2882
}
2883
2884
/* enforce correct master/slave bit */
2885
tf->device = dev->devno ?
2886
tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2887
2888
switch (tf->command) {
2889
/* READ/WRITE LONG use a non-standard sect_size */
2890
case ATA_CMD_READ_LONG:
2891
case ATA_CMD_READ_LONG_ONCE:
2892
case ATA_CMD_WRITE_LONG:
2893
case ATA_CMD_WRITE_LONG_ONCE:
2894
if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2895
goto invalid_fld;
2896
qc->sect_size = scsi_bufflen(scmd);
2897
break;
2898
2899
/* commands using reported Logical Block size (e.g. 512 or 4K) */
2900
case ATA_CMD_CFA_WRITE_NE:
2901
case ATA_CMD_CFA_TRANS_SECT:
2902
case ATA_CMD_CFA_WRITE_MULT_NE:
2903
/* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
2904
case ATA_CMD_READ:
2905
case ATA_CMD_READ_EXT:
2906
case ATA_CMD_READ_QUEUED:
2907
/* XXX: case ATA_CMD_READ_QUEUED_EXT: */
2908
case ATA_CMD_FPDMA_READ:
2909
case ATA_CMD_READ_MULTI:
2910
case ATA_CMD_READ_MULTI_EXT:
2911
case ATA_CMD_PIO_READ:
2912
case ATA_CMD_PIO_READ_EXT:
2913
case ATA_CMD_READ_STREAM_DMA_EXT:
2914
case ATA_CMD_READ_STREAM_EXT:
2915
case ATA_CMD_VERIFY:
2916
case ATA_CMD_VERIFY_EXT:
2917
case ATA_CMD_WRITE:
2918
case ATA_CMD_WRITE_EXT:
2919
case ATA_CMD_WRITE_FUA_EXT:
2920
case ATA_CMD_WRITE_QUEUED:
2921
case ATA_CMD_WRITE_QUEUED_FUA_EXT:
2922
case ATA_CMD_FPDMA_WRITE:
2923
case ATA_CMD_WRITE_MULTI:
2924
case ATA_CMD_WRITE_MULTI_EXT:
2925
case ATA_CMD_WRITE_MULTI_FUA_EXT:
2926
case ATA_CMD_PIO_WRITE:
2927
case ATA_CMD_PIO_WRITE_EXT:
2928
case ATA_CMD_WRITE_STREAM_DMA_EXT:
2929
case ATA_CMD_WRITE_STREAM_EXT:
2930
qc->sect_size = scmd->device->sector_size;
2931
break;
2932
2933
/* Everything else uses 512 byte "sectors" */
2934
default:
2935
qc->sect_size = ATA_SECT_SIZE;
2936
}
2937
2938
/*
2939
* Set flags so that all registers will be written, pass on
2940
* write indication (used for PIO/DMA setup), result TF is
2941
* copied back and we don't whine too much about its failure.
2942
*/
2943
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2944
if (scmd->sc_data_direction == DMA_TO_DEVICE)
2945
tf->flags |= ATA_TFLAG_WRITE;
2946
2947
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2948
2949
/*
2950
* Set transfer length.
2951
*
2952
* TODO: find out if we need to do more here to
2953
* cover scatter/gather case.
2954
*/
2955
ata_qc_set_pc_nbytes(qc);
2956
2957
/* We may not issue DMA commands if no DMA mode is set */
2958
if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2959
goto invalid_fld;
2960
2961
/* sanity check for pio multi commands */
2962
if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
2963
goto invalid_fld;
2964
2965
if (is_multi_taskfile(tf)) {
2966
unsigned int multi_count = 1 << (cdb[1] >> 5);
2967
2968
/* compare the passed through multi_count
2969
* with the cached multi_count of libata
2970
*/
2971
if (multi_count != dev->multi_count)
2972
ata_dev_printk(dev, KERN_WARNING,
2973
"invalid multi_count %u ignored\n",
2974
multi_count);
2975
}
2976
2977
/*
2978
* Filter SET_FEATURES - XFER MODE command -- otherwise,
2979
* SET_FEATURES - XFER MODE must be preceded/succeeded
2980
* by an update to hardware-specific registers for each
2981
* controller (i.e. the reason for ->set_piomode(),
2982
* ->set_dmamode(), and ->post_set_mode() hooks).
2983
*/
2984
if (tf->command == ATA_CMD_SET_FEATURES &&
2985
tf->feature == SETFEATURES_XFER)
2986
goto invalid_fld;
2987
2988
/*
2989
* Filter TPM commands by default. These provide an
2990
* essentially uncontrolled encrypted "back door" between
2991
* applications and the disk. Set libata.allow_tpm=1 if you
2992
* have a real reason for wanting to use them. This ensures
2993
* that installed software cannot easily mess stuff up without
2994
* user intent. DVR type users will probably ship with this enabled
2995
* for movie content management.
2996
*
2997
* Note that for ATA8 we can issue a DCS change and DCS freeze lock
2998
* for this and should do in future but that it is not sufficient as
2999
* DCS is an optional feature set. Thus we also do the software filter
3000
* so that we comply with the TC consortium stated goal that the user
3001
* can turn off TC features of their system.
3002
*/
3003
if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
3004
goto invalid_fld;
3005
3006
return 0;
3007
3008
invalid_fld:
3009
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
3010
/* "Invalid field in cdb" */
3011
return 1;
3012
}
3013
3014
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
3015
{
3016
struct ata_taskfile *tf = &qc->tf;
3017
struct scsi_cmnd *scmd = qc->scsicmd;
3018
struct ata_device *dev = qc->dev;
3019
const u8 *cdb = scmd->cmnd;
3020
u64 block;
3021
u32 n_block;
3022
u32 size;
3023
void *buf;
3024
3025
/* we may not issue DMA commands if no DMA mode is set */
3026
if (unlikely(!dev->dma_mode))
3027
goto invalid_fld;
3028
3029
if (unlikely(scmd->cmd_len < 16))
3030
goto invalid_fld;
3031
scsi_16_lba_len(cdb, &block, &n_block);
3032
3033
/* for now we only support WRITE SAME with the unmap bit set */
3034
if (unlikely(!(cdb[1] & 0x8)))
3035
goto invalid_fld;
3036
3037
/*
3038
* WRITE SAME always has a sector sized buffer as payload, this
3039
* should never be a multiple entry S/G list.
3040
*/
3041
if (!scsi_sg_count(scmd))
3042
goto invalid_fld;
3043
3044
buf = page_address(sg_page(scsi_sglist(scmd)));
3045
size = ata_set_lba_range_entries(buf, 512, block, n_block);
3046
3047
tf->protocol = ATA_PROT_DMA;
3048
tf->hob_feature = 0;
3049
tf->feature = ATA_DSM_TRIM;
3050
tf->hob_nsect = (size / 512) >> 8;
3051
tf->nsect = size / 512;
3052
tf->command = ATA_CMD_DSM;
3053
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
3054
ATA_TFLAG_WRITE;
3055
3056
ata_qc_set_pc_nbytes(qc);
3057
3058
return 0;
3059
3060
invalid_fld:
3061
ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
3062
/* "Invalid field in cdb" */
3063
return 1;
3064
}
3065
3066
/**
3067
* ata_get_xlat_func - check if SCSI to ATA translation is possible
3068
* @dev: ATA device
3069
* @cmd: SCSI command opcode to consider
3070
*
3071
* Look up the SCSI command given, and determine whether the
3072
* SCSI command is to be translated or simulated.
3073
*
3074
* RETURNS:
3075
* Pointer to translation function if possible, %NULL if not.
3076
*/
3077
3078
static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
3079
{
3080
switch (cmd) {
3081
case READ_6:
3082
case READ_10:
3083
case READ_16:
3084
3085
case WRITE_6:
3086
case WRITE_10:
3087
case WRITE_16:
3088
return ata_scsi_rw_xlat;
3089
3090
case WRITE_SAME_16:
3091
return ata_scsi_write_same_xlat;
3092
3093
case SYNCHRONIZE_CACHE:
3094
if (ata_try_flush_cache(dev))
3095
return ata_scsi_flush_xlat;
3096
break;
3097
3098
case VERIFY:
3099
case VERIFY_16:
3100
return ata_scsi_verify_xlat;
3101
3102
case ATA_12:
3103
case ATA_16:
3104
return ata_scsi_pass_thru;
3105
3106
case START_STOP:
3107
return ata_scsi_start_stop_xlat;
3108
}
3109
3110
return NULL;
3111
}
3112
3113
/**
3114
* ata_scsi_dump_cdb - dump SCSI command contents to dmesg
3115
* @ap: ATA port to which the command was being sent
3116
* @cmd: SCSI command to dump
3117
*
3118
* Prints the contents of a SCSI command via printk().
3119
*/
3120
3121
static inline void ata_scsi_dump_cdb(struct ata_port *ap,
3122
struct scsi_cmnd *cmd)
3123
{
3124
#ifdef ATA_DEBUG
3125
struct scsi_device *scsidev = cmd->device;
3126
u8 *scsicmd = cmd->cmnd;
3127
3128
DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
3129
ap->print_id,
3130
scsidev->channel, scsidev->id, scsidev->lun,
3131
scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
3132
scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
3133
scsicmd[8]);
3134
#endif
3135
}
3136
3137
static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3138
struct ata_device *dev)
3139
{
3140
u8 scsi_op = scmd->cmnd[0];
3141
ata_xlat_func_t xlat_func;
3142
int rc = 0;
3143
3144
if (dev->class == ATA_DEV_ATA) {
3145
if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
3146
goto bad_cdb_len;
3147
3148
xlat_func = ata_get_xlat_func(dev, scsi_op);
3149
} else {
3150
if (unlikely(!scmd->cmd_len))
3151
goto bad_cdb_len;
3152
3153
xlat_func = NULL;
3154
if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
3155
/* relay SCSI command to ATAPI device */
3156
int len = COMMAND_SIZE(scsi_op);
3157
if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
3158
goto bad_cdb_len;
3159
3160
xlat_func = atapi_xlat;
3161
} else {
3162
/* ATA_16 passthru, treat as an ATA command */
3163
if (unlikely(scmd->cmd_len > 16))
3164
goto bad_cdb_len;
3165
3166
xlat_func = ata_get_xlat_func(dev, scsi_op);
3167
}
3168
}
3169
3170
if (xlat_func)
3171
rc = ata_scsi_translate(dev, scmd, xlat_func);
3172
else
3173
ata_scsi_simulate(dev, scmd);
3174
3175
return rc;
3176
3177
bad_cdb_len:
3178
DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3179
scmd->cmd_len, scsi_op, dev->cdb_len);
3180
scmd->result = DID_ERROR << 16;
3181
scmd->scsi_done(scmd);
3182
return 0;
3183
}
3184
3185
/**
3186
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
3187
* @shost: SCSI host of command to be sent
3188
* @cmd: SCSI command to be sent
3189
*
3190
* In some cases, this function translates SCSI commands into
3191
* ATA taskfiles, and queues the taskfiles to be sent to
3192
* hardware. In other cases, this function simulates a
3193
* SCSI device by evaluating and responding to certain
3194
* SCSI commands. This creates the overall effect of
3195
* ATA and ATAPI devices appearing as SCSI devices.
3196
*
3197
* LOCKING:
3198
* ATA host lock
3199
*
3200
* RETURNS:
3201
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
3202
* 0 otherwise.
3203
*/
3204
int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
3205
{
3206
struct ata_port *ap;
3207
struct ata_device *dev;
3208
struct scsi_device *scsidev = cmd->device;
3209
int rc = 0;
3210
unsigned long irq_flags;
3211
3212
ap = ata_shost_to_port(shost);
3213
3214
spin_lock_irqsave(ap->lock, irq_flags);
3215
3216
ata_scsi_dump_cdb(ap, cmd);
3217
3218
dev = ata_scsi_find_dev(ap, scsidev);
3219
if (likely(dev))
3220
rc = __ata_scsi_queuecmd(cmd, dev);
3221
else {
3222
cmd->result = (DID_BAD_TARGET << 16);
3223
cmd->scsi_done(cmd);
3224
}
3225
3226
spin_unlock_irqrestore(ap->lock, irq_flags);
3227
3228
return rc;
3229
}
3230
3231
/**
3232
* ata_scsi_simulate - simulate SCSI command on ATA device
3233
* @dev: the target device
3234
* @cmd: SCSI command being sent to device.
3235
*
3236
* Interprets and directly executes a select list of SCSI commands
3237
* that can be handled internally.
3238
*
3239
* LOCKING:
3240
* spin_lock_irqsave(host lock)
3241
*/
3242
3243
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
3244
{
3245
struct ata_scsi_args args;
3246
const u8 *scsicmd = cmd->cmnd;
3247
u8 tmp8;
3248
3249
args.dev = dev;
3250
args.id = dev->id;
3251
args.cmd = cmd;
3252
args.done = cmd->scsi_done;
3253
3254
switch(scsicmd[0]) {
3255
/* TODO: worth improving? */
3256
case FORMAT_UNIT:
3257
ata_scsi_invalid_field(cmd);
3258
break;
3259
3260
case INQUIRY:
3261
if (scsicmd[1] & 2) /* is CmdDt set? */
3262
ata_scsi_invalid_field(cmd);
3263
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
3264
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3265
else switch (scsicmd[2]) {
3266
case 0x00:
3267
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
3268
break;
3269
case 0x80:
3270
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
3271
break;
3272
case 0x83:
3273
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
3274
break;
3275
case 0x89:
3276
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
3277
break;
3278
case 0xb0:
3279
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
3280
break;
3281
case 0xb1:
3282
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3283
break;
3284
case 0xb2:
3285
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
3286
break;
3287
default:
3288
ata_scsi_invalid_field(cmd);
3289
break;
3290
}
3291
break;
3292
3293
case MODE_SENSE:
3294
case MODE_SENSE_10:
3295
ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
3296
break;
3297
3298
case MODE_SELECT: /* unconditionally return */
3299
case MODE_SELECT_10: /* bad-field-in-cdb */
3300
ata_scsi_invalid_field(cmd);
3301
break;
3302
3303
case READ_CAPACITY:
3304
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3305
break;
3306
3307
case SERVICE_ACTION_IN:
3308
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3309
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3310
else
3311
ata_scsi_invalid_field(cmd);
3312
break;
3313
3314
case REPORT_LUNS:
3315
ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
3316
break;
3317
3318
case REQUEST_SENSE:
3319
ata_scsi_set_sense(cmd, 0, 0, 0);
3320
cmd->result = (DRIVER_SENSE << 24);
3321
cmd->scsi_done(cmd);
3322
break;
3323
3324
/* if we reach this, then writeback caching is disabled,
3325
* turning this into a no-op.
3326
*/
3327
case SYNCHRONIZE_CACHE:
3328
/* fall through */
3329
3330
/* no-op's, complete with success */
3331
case REZERO_UNIT:
3332
case SEEK_6:
3333
case SEEK_10:
3334
case TEST_UNIT_READY:
3335
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3336
break;
3337
3338
case SEND_DIAGNOSTIC:
3339
tmp8 = scsicmd[1] & ~(1 << 3);
3340
if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3341
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3342
else
3343
ata_scsi_invalid_field(cmd);
3344
break;
3345
3346
/* all other commands */
3347
default:
3348
ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3349
/* "Invalid command operation code" */
3350
cmd->scsi_done(cmd);
3351
break;
3352
}
3353
}
3354
3355
int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3356
{
3357
int i, rc;
3358
3359
for (i = 0; i < host->n_ports; i++) {
3360
struct ata_port *ap = host->ports[i];
3361
struct Scsi_Host *shost;
3362
3363
rc = -ENOMEM;
3364
shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
3365
if (!shost)
3366
goto err_alloc;
3367
3368
*(struct ata_port **)&shost->hostdata[0] = ap;
3369
ap->scsi_host = shost;
3370
3371
shost->transportt = ata_scsi_transport_template;
3372
shost->unique_id = ap->print_id;
3373
shost->max_id = 16;
3374
shost->max_lun = 1;
3375
shost->max_channel = 1;
3376
shost->max_cmd_len = 16;
3377
3378
/* Schedule policy is determined by ->qc_defer()
3379
* callback and it needs to see every deferred qc.
3380
* Set host_blocked to 1 to prevent SCSI midlayer from
3381
* automatically deferring requests.
3382
*/
3383
shost->max_host_blocked = 1;
3384
3385
rc = scsi_add_host(ap->scsi_host, ap->host->dev);
3386
if (rc)
3387
goto err_add;
3388
}
3389
3390
return 0;
3391
3392
err_add:
3393
scsi_host_put(host->ports[i]->scsi_host);
3394
err_alloc:
3395
while (--i >= 0) {
3396
struct Scsi_Host *shost = host->ports[i]->scsi_host;
3397
3398
scsi_remove_host(shost);
3399
scsi_host_put(shost);
3400
}
3401
return rc;
3402
}
3403
3404
void ata_scsi_scan_host(struct ata_port *ap, int sync)
3405
{
3406
int tries = 5;
3407
struct ata_device *last_failed_dev = NULL;
3408
struct ata_link *link;
3409
struct ata_device *dev;
3410
3411
repeat:
3412
ata_for_each_link(link, ap, EDGE) {
3413
ata_for_each_dev(dev, link, ENABLED) {
3414
struct scsi_device *sdev;
3415
int channel = 0, id = 0;
3416
3417
if (dev->sdev)
3418
continue;
3419
3420
if (ata_is_host_link(link))
3421
id = dev->devno;
3422
else
3423
channel = link->pmp;
3424
3425
sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
3426
NULL);
3427
if (!IS_ERR(sdev)) {
3428
dev->sdev = sdev;
3429
scsi_device_put(sdev);
3430
} else {
3431
dev->sdev = NULL;
3432
}
3433
}
3434
}
3435
3436
/* If we scanned while EH was in progress or allocation
3437
* failure occurred, scan would have failed silently. Check
3438
* whether all devices are attached.
3439
*/
3440
ata_for_each_link(link, ap, EDGE) {
3441
ata_for_each_dev(dev, link, ENABLED) {
3442
if (!dev->sdev)
3443
goto exit_loop;
3444
}
3445
}
3446
exit_loop:
3447
if (!link)
3448
return;
3449
3450
/* we're missing some SCSI devices */
3451
if (sync) {
3452
/* If caller requested synchrnous scan && we've made
3453
* any progress, sleep briefly and repeat.
3454
*/
3455
if (dev != last_failed_dev) {
3456
msleep(100);
3457
last_failed_dev = dev;
3458
goto repeat;
3459
}
3460
3461
/* We might be failing to detect boot device, give it
3462
* a few more chances.
3463
*/
3464
if (--tries) {
3465
msleep(100);
3466
goto repeat;
3467
}
3468
3469
ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
3470
"failed without making any progress,\n"
3471
" switching to async\n");
3472
}
3473
3474
queue_delayed_work(system_long_wq, &ap->hotplug_task,
3475
round_jiffies_relative(HZ));
3476
}
3477
3478
/**
3479
* ata_scsi_offline_dev - offline attached SCSI device
3480
* @dev: ATA device to offline attached SCSI device for
3481
*
3482
* This function is called from ata_eh_hotplug() and responsible
3483
* for taking the SCSI device attached to @dev offline. This
3484
* function is called with host lock which protects dev->sdev
3485
* against clearing.
3486
*
3487
* LOCKING:
3488
* spin_lock_irqsave(host lock)
3489
*
3490
* RETURNS:
3491
* 1 if attached SCSI device exists, 0 otherwise.
3492
*/
3493
int ata_scsi_offline_dev(struct ata_device *dev)
3494
{
3495
if (dev->sdev) {
3496
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
3497
return 1;
3498
}
3499
return 0;
3500
}
3501
3502
/**
3503
* ata_scsi_remove_dev - remove attached SCSI device
3504
* @dev: ATA device to remove attached SCSI device for
3505
*
3506
* This function is called from ata_eh_scsi_hotplug() and
3507
* responsible for removing the SCSI device attached to @dev.
3508
*
3509
* LOCKING:
3510
* Kernel thread context (may sleep).
3511
*/
3512
static void ata_scsi_remove_dev(struct ata_device *dev)
3513
{
3514
struct ata_port *ap = dev->link->ap;
3515
struct scsi_device *sdev;
3516
unsigned long flags;
3517
3518
/* Alas, we need to grab scan_mutex to ensure SCSI device
3519
* state doesn't change underneath us and thus
3520
* scsi_device_get() always succeeds. The mutex locking can
3521
* be removed if there is __scsi_device_get() interface which
3522
* increments reference counts regardless of device state.
3523
*/
3524
mutex_lock(&ap->scsi_host->scan_mutex);
3525
spin_lock_irqsave(ap->lock, flags);
3526
3527
/* clearing dev->sdev is protected by host lock */
3528
sdev = dev->sdev;
3529
dev->sdev = NULL;
3530
3531
if (sdev) {
3532
/* If user initiated unplug races with us, sdev can go
3533
* away underneath us after the host lock and
3534
* scan_mutex are released. Hold onto it.
3535
*/
3536
if (scsi_device_get(sdev) == 0) {
3537
/* The following ensures the attached sdev is
3538
* offline on return from ata_scsi_offline_dev()
3539
* regardless it wins or loses the race
3540
* against this function.
3541
*/
3542
scsi_device_set_state(sdev, SDEV_OFFLINE);
3543
} else {
3544
WARN_ON(1);
3545
sdev = NULL;
3546
}
3547
}
3548
3549
spin_unlock_irqrestore(ap->lock, flags);
3550
mutex_unlock(&ap->scsi_host->scan_mutex);
3551
3552
if (sdev) {
3553
ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3554
dev_name(&sdev->sdev_gendev));
3555
3556
scsi_remove_device(sdev);
3557
scsi_device_put(sdev);
3558
}
3559
}
3560
3561
static void ata_scsi_handle_link_detach(struct ata_link *link)
3562
{
3563
struct ata_port *ap = link->ap;
3564
struct ata_device *dev;
3565
3566
ata_for_each_dev(dev, link, ALL) {
3567
unsigned long flags;
3568
3569
if (!(dev->flags & ATA_DFLAG_DETACHED))
3570
continue;
3571
3572
spin_lock_irqsave(ap->lock, flags);
3573
dev->flags &= ~ATA_DFLAG_DETACHED;
3574
spin_unlock_irqrestore(ap->lock, flags);
3575
3576
ata_scsi_remove_dev(dev);
3577
}
3578
}
3579
3580
/**
3581
* ata_scsi_media_change_notify - send media change event
3582
* @dev: Pointer to the disk device with media change event
3583
*
3584
* Tell the block layer to send a media change notification
3585
* event.
3586
*
3587
* LOCKING:
3588
* spin_lock_irqsave(host lock)
3589
*/
3590
void ata_scsi_media_change_notify(struct ata_device *dev)
3591
{
3592
if (dev->sdev)
3593
sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
3594
GFP_ATOMIC);
3595
}
3596
3597
/**
3598
* ata_scsi_hotplug - SCSI part of hotplug
3599
* @work: Pointer to ATA port to perform SCSI hotplug on
3600
*
3601
* Perform SCSI part of hotplug. It's executed from a separate
3602
* workqueue after EH completes. This is necessary because SCSI
3603
* hot plugging requires working EH and hot unplugging is
3604
* synchronized with hot plugging with a mutex.
3605
*
3606
* LOCKING:
3607
* Kernel thread context (may sleep).
3608
*/
3609
void ata_scsi_hotplug(struct work_struct *work)
3610
{
3611
struct ata_port *ap =
3612
container_of(work, struct ata_port, hotplug_task.work);
3613
int i;
3614
3615
if (ap->pflags & ATA_PFLAG_UNLOADING) {
3616
DPRINTK("ENTER/EXIT - unloading\n");
3617
return;
3618
}
3619
3620
DPRINTK("ENTER\n");
3621
mutex_lock(&ap->scsi_scan_mutex);
3622
3623
/* Unplug detached devices. We cannot use link iterator here
3624
* because PMP links have to be scanned even if PMP is
3625
* currently not attached. Iterate manually.
3626
*/
3627
ata_scsi_handle_link_detach(&ap->link);
3628
if (ap->pmp_link)
3629
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
3630
ata_scsi_handle_link_detach(&ap->pmp_link[i]);
3631
3632
/* scan for new ones */
3633
ata_scsi_scan_host(ap, 0);
3634
3635
mutex_unlock(&ap->scsi_scan_mutex);
3636
DPRINTK("EXIT\n");
3637
}
3638
3639
/**
3640
* ata_scsi_user_scan - indication for user-initiated bus scan
3641
* @shost: SCSI host to scan
3642
* @channel: Channel to scan
3643
* @id: ID to scan
3644
* @lun: LUN to scan
3645
*
3646
* This function is called when user explicitly requests bus
3647
* scan. Set probe pending flag and invoke EH.
3648
*
3649
* LOCKING:
3650
* SCSI layer (we don't care)
3651
*
3652
* RETURNS:
3653
* Zero.
3654
*/
3655
int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3656
unsigned int id, unsigned int lun)
3657
{
3658
struct ata_port *ap = ata_shost_to_port(shost);
3659
unsigned long flags;
3660
int devno, rc = 0;
3661
3662
if (!ap->ops->error_handler)
3663
return -EOPNOTSUPP;
3664
3665
if (lun != SCAN_WILD_CARD && lun)
3666
return -EINVAL;
3667
3668
if (!sata_pmp_attached(ap)) {
3669
if (channel != SCAN_WILD_CARD && channel)
3670
return -EINVAL;
3671
devno = id;
3672
} else {
3673
if (id != SCAN_WILD_CARD && id)
3674
return -EINVAL;
3675
devno = channel;
3676
}
3677
3678
spin_lock_irqsave(ap->lock, flags);
3679
3680
if (devno == SCAN_WILD_CARD) {
3681
struct ata_link *link;
3682
3683
ata_for_each_link(link, ap, EDGE) {
3684
struct ata_eh_info *ehi = &link->eh_info;
3685
ehi->probe_mask |= ATA_ALL_DEVICES;
3686
ehi->action |= ATA_EH_RESET;
3687
}
3688
} else {
3689
struct ata_device *dev = ata_find_dev(ap, devno);
3690
3691
if (dev) {
3692
struct ata_eh_info *ehi = &dev->link->eh_info;
3693
ehi->probe_mask |= 1 << dev->devno;
3694
ehi->action |= ATA_EH_RESET;
3695
} else
3696
rc = -EINVAL;
3697
}
3698
3699
if (rc == 0) {
3700
ata_port_schedule_eh(ap);
3701
spin_unlock_irqrestore(ap->lock, flags);
3702
ata_port_wait_eh(ap);
3703
} else
3704
spin_unlock_irqrestore(ap->lock, flags);
3705
3706
return rc;
3707
}
3708
3709
/**
3710
* ata_scsi_dev_rescan - initiate scsi_rescan_device()
3711
* @work: Pointer to ATA port to perform scsi_rescan_device()
3712
*
3713
* After ATA pass thru (SAT) commands are executed successfully,
3714
* libata need to propagate the changes to SCSI layer.
3715
*
3716
* LOCKING:
3717
* Kernel thread context (may sleep).
3718
*/
3719
void ata_scsi_dev_rescan(struct work_struct *work)
3720
{
3721
struct ata_port *ap =
3722
container_of(work, struct ata_port, scsi_rescan_task);
3723
struct ata_link *link;
3724
struct ata_device *dev;
3725
unsigned long flags;
3726
3727
mutex_lock(&ap->scsi_scan_mutex);
3728
spin_lock_irqsave(ap->lock, flags);
3729
3730
ata_for_each_link(link, ap, EDGE) {
3731
ata_for_each_dev(dev, link, ENABLED) {
3732
struct scsi_device *sdev = dev->sdev;
3733
3734
if (!sdev)
3735
continue;
3736
if (scsi_device_get(sdev))
3737
continue;
3738
3739
spin_unlock_irqrestore(ap->lock, flags);
3740
scsi_rescan_device(&(sdev->sdev_gendev));
3741
scsi_device_put(sdev);
3742
spin_lock_irqsave(ap->lock, flags);
3743
}
3744
}
3745
3746
spin_unlock_irqrestore(ap->lock, flags);
3747
mutex_unlock(&ap->scsi_scan_mutex);
3748
}
3749
3750
/**
3751
* ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3752
* @host: ATA host container for all SAS ports
3753
* @port_info: Information from low-level host driver
3754
* @shost: SCSI host that the scsi device is attached to
3755
*
3756
* LOCKING:
3757
* PCI/etc. bus probe sem.
3758
*
3759
* RETURNS:
3760
* ata_port pointer on success / NULL on failure.
3761
*/
3762
3763
struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3764
struct ata_port_info *port_info,
3765
struct Scsi_Host *shost)
3766
{
3767
struct ata_port *ap;
3768
3769
ap = ata_port_alloc(host);
3770
if (!ap)
3771
return NULL;
3772
3773
ap->port_no = 0;
3774
ap->lock = &host->lock;
3775
ap->pio_mask = port_info->pio_mask;
3776
ap->mwdma_mask = port_info->mwdma_mask;
3777
ap->udma_mask = port_info->udma_mask;
3778
ap->flags |= port_info->flags;
3779
ap->ops = port_info->port_ops;
3780
ap->cbl = ATA_CBL_SATA;
3781
3782
return ap;
3783
}
3784
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3785
3786
/**
3787
* ata_sas_port_start - Set port up for dma.
3788
* @ap: Port to initialize
3789
*
3790
* Called just after data structures for each port are
3791
* initialized.
3792
*
3793
* May be used as the port_start() entry in ata_port_operations.
3794
*
3795
* LOCKING:
3796
* Inherited from caller.
3797
*/
3798
int ata_sas_port_start(struct ata_port *ap)
3799
{
3800
/*
3801
* the port is marked as frozen at allocation time, but if we don't
3802
* have new eh, we won't thaw it
3803
*/
3804
if (!ap->ops->error_handler)
3805
ap->pflags &= ~ATA_PFLAG_FROZEN;
3806
return 0;
3807
}
3808
EXPORT_SYMBOL_GPL(ata_sas_port_start);
3809
3810
/**
3811
* ata_port_stop - Undo ata_sas_port_start()
3812
* @ap: Port to shut down
3813
*
3814
* May be used as the port_stop() entry in ata_port_operations.
3815
*
3816
* LOCKING:
3817
* Inherited from caller.
3818
*/
3819
3820
void ata_sas_port_stop(struct ata_port *ap)
3821
{
3822
}
3823
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3824
3825
/**
3826
* ata_sas_port_init - Initialize a SATA device
3827
* @ap: SATA port to initialize
3828
*
3829
* LOCKING:
3830
* PCI/etc. bus probe sem.
3831
*
3832
* RETURNS:
3833
* Zero on success, non-zero on error.
3834
*/
3835
3836
int ata_sas_port_init(struct ata_port *ap)
3837
{
3838
int rc = ap->ops->port_start(ap);
3839
3840
if (!rc) {
3841
ap->print_id = ata_print_id++;
3842
rc = ata_port_probe(ap);
3843
}
3844
3845
return rc;
3846
}
3847
EXPORT_SYMBOL_GPL(ata_sas_port_init);
3848
3849
/**
3850
* ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
3851
* @ap: SATA port to destroy
3852
*
3853
*/
3854
3855
void ata_sas_port_destroy(struct ata_port *ap)
3856
{
3857
if (ap->ops->port_stop)
3858
ap->ops->port_stop(ap);
3859
kfree(ap);
3860
}
3861
EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3862
3863
/**
3864
* ata_sas_slave_configure - Default slave_config routine for libata devices
3865
* @sdev: SCSI device to configure
3866
* @ap: ATA port to which SCSI device is attached
3867
*
3868
* RETURNS:
3869
* Zero.
3870
*/
3871
3872
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3873
{
3874
ata_scsi_sdev_config(sdev);
3875
ata_scsi_dev_config(sdev, ap->link.device);
3876
return 0;
3877
}
3878
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3879
3880
/**
3881
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3882
* @cmd: SCSI command to be sent
3883
* @ap: ATA port to which the command is being sent
3884
*
3885
* RETURNS:
3886
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
3887
* 0 otherwise.
3888
*/
3889
3890
int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
3891
{
3892
int rc = 0;
3893
3894
ata_scsi_dump_cdb(ap, cmd);
3895
3896
if (likely(ata_dev_enabled(ap->link.device)))
3897
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
3898
else {
3899
cmd->result = (DID_BAD_TARGET << 16);
3900
cmd->scsi_done(cmd);
3901
}
3902
return rc;
3903
}
3904
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
3905
3906