Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/ata/libata-sata.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* SATA specific part of ATA helper library
4
*
5
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6
* Copyright 2003-2004 Jeff Garzik
7
* Copyright 2006 Tejun Heo <[email protected]>
8
*/
9
10
#include <linux/kernel.h>
11
#include <linux/module.h>
12
#include <scsi/scsi_cmnd.h>
13
#include <scsi/scsi_device.h>
14
#include <scsi/scsi_eh.h>
15
#include <linux/libata.h>
16
#include <linux/unaligned.h>
17
18
#include "libata.h"
19
#include "libata-transport.h"
20
21
/* debounce timing parameters in msecs { interval, duration, timeout } */
22
const unsigned int sata_deb_timing_normal[] = { 5, 100, 2000 };
23
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
24
const unsigned int sata_deb_timing_hotplug[] = { 25, 500, 2000 };
25
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
26
const unsigned int sata_deb_timing_long[] = { 100, 2000, 5000 };
27
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
28
29
/**
30
* sata_scr_valid - test whether SCRs are accessible
31
* @link: ATA link to test SCR accessibility for
32
*
33
* Test whether SCRs are accessible for @link.
34
*
35
* LOCKING:
36
* None.
37
*
38
* RETURNS:
39
* 1 if SCRs are accessible, 0 otherwise.
40
*/
41
int sata_scr_valid(struct ata_link *link)
42
{
43
struct ata_port *ap = link->ap;
44
45
return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
46
}
47
EXPORT_SYMBOL_GPL(sata_scr_valid);
48
49
/**
50
* sata_scr_read - read SCR register of the specified port
51
* @link: ATA link to read SCR for
52
* @reg: SCR to read
53
* @val: Place to store read value
54
*
55
* Read SCR register @reg of @link into *@val. This function is
56
* guaranteed to succeed if @link is ap->link, the cable type of
57
* the port is SATA and the port implements ->scr_read.
58
*
59
* LOCKING:
60
* None if @link is ap->link. Kernel thread context otherwise.
61
*
62
* RETURNS:
63
* 0 on success, negative errno on failure.
64
*/
65
int sata_scr_read(struct ata_link *link, int reg, u32 *val)
66
{
67
if (ata_is_host_link(link)) {
68
if (sata_scr_valid(link))
69
return link->ap->ops->scr_read(link, reg, val);
70
return -EOPNOTSUPP;
71
}
72
73
return sata_pmp_scr_read(link, reg, val);
74
}
75
EXPORT_SYMBOL_GPL(sata_scr_read);
76
77
/**
78
* sata_scr_write - write SCR register of the specified port
79
* @link: ATA link to write SCR for
80
* @reg: SCR to write
81
* @val: value to write
82
*
83
* Write @val to SCR register @reg of @link. This function is
84
* guaranteed to succeed if @link is ap->link, the cable type of
85
* the port is SATA and the port implements ->scr_read.
86
*
87
* LOCKING:
88
* None if @link is ap->link. Kernel thread context otherwise.
89
*
90
* RETURNS:
91
* 0 on success, negative errno on failure.
92
*/
93
int sata_scr_write(struct ata_link *link, int reg, u32 val)
94
{
95
if (ata_is_host_link(link)) {
96
if (sata_scr_valid(link))
97
return link->ap->ops->scr_write(link, reg, val);
98
return -EOPNOTSUPP;
99
}
100
101
return sata_pmp_scr_write(link, reg, val);
102
}
103
EXPORT_SYMBOL_GPL(sata_scr_write);
104
105
/**
106
* sata_scr_write_flush - write SCR register of the specified port and flush
107
* @link: ATA link to write SCR for
108
* @reg: SCR to write
109
* @val: value to write
110
*
111
* This function is identical to sata_scr_write() except that this
112
* function performs flush after writing to the register.
113
*
114
* LOCKING:
115
* None if @link is ap->link. Kernel thread context otherwise.
116
*
117
* RETURNS:
118
* 0 on success, negative errno on failure.
119
*/
120
int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
121
{
122
if (ata_is_host_link(link)) {
123
int rc;
124
125
if (sata_scr_valid(link)) {
126
rc = link->ap->ops->scr_write(link, reg, val);
127
if (rc == 0)
128
rc = link->ap->ops->scr_read(link, reg, &val);
129
return rc;
130
}
131
return -EOPNOTSUPP;
132
}
133
134
return sata_pmp_scr_write(link, reg, val);
135
}
136
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
137
138
/**
139
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
140
* @tf: Taskfile to convert
141
* @pmp: Port multiplier port
142
* @is_cmd: This FIS is for command
143
* @fis: Buffer into which data will output
144
*
145
* Converts a standard ATA taskfile to a Serial ATA
146
* FIS structure (Register - Host to Device).
147
*
148
* LOCKING:
149
* Inherited from caller.
150
*/
151
void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
152
{
153
fis[0] = 0x27; /* Register - Host to Device FIS */
154
fis[1] = pmp & 0xf; /* Port multiplier number*/
155
if (is_cmd)
156
fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
157
158
fis[2] = tf->command;
159
fis[3] = tf->feature;
160
161
fis[4] = tf->lbal;
162
fis[5] = tf->lbam;
163
fis[6] = tf->lbah;
164
fis[7] = tf->device;
165
166
fis[8] = tf->hob_lbal;
167
fis[9] = tf->hob_lbam;
168
fis[10] = tf->hob_lbah;
169
fis[11] = tf->hob_feature;
170
171
fis[12] = tf->nsect;
172
fis[13] = tf->hob_nsect;
173
fis[14] = 0;
174
fis[15] = tf->ctl;
175
176
fis[16] = tf->auxiliary & 0xff;
177
fis[17] = (tf->auxiliary >> 8) & 0xff;
178
fis[18] = (tf->auxiliary >> 16) & 0xff;
179
fis[19] = (tf->auxiliary >> 24) & 0xff;
180
}
181
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
182
183
/**
184
* ata_tf_from_fis - Convert SATA FIS to ATA taskfile
185
* @fis: Buffer from which data will be input
186
* @tf: Taskfile to output
187
*
188
* Converts a serial ATA FIS structure to a standard ATA taskfile.
189
*
190
* LOCKING:
191
* Inherited from caller.
192
*/
193
194
void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
195
{
196
tf->status = fis[2];
197
tf->error = fis[3];
198
199
tf->lbal = fis[4];
200
tf->lbam = fis[5];
201
tf->lbah = fis[6];
202
tf->device = fis[7];
203
204
tf->hob_lbal = fis[8];
205
tf->hob_lbam = fis[9];
206
tf->hob_lbah = fis[10];
207
208
tf->nsect = fis[12];
209
tf->hob_nsect = fis[13];
210
}
211
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
212
213
/**
214
* sata_link_debounce - debounce SATA phy status
215
* @link: ATA link to debounce SATA phy status for
216
* @params: timing parameters { interval, duration, timeout } in msec
217
* @deadline: deadline jiffies for the operation
218
*
219
* Make sure SStatus of @link reaches stable state, determined by
220
* holding the same value where DET is not 1 for @duration polled
221
* every @interval, before @timeout. Timeout constraints the
222
* beginning of the stable state. Because DET gets stuck at 1 on
223
* some controllers after hot unplugging, this functions waits
224
* until timeout then returns 0 if DET is stable at 1.
225
*
226
* @timeout is further limited by @deadline. The sooner of the
227
* two is used.
228
*
229
* LOCKING:
230
* Kernel thread context (may sleep)
231
*
232
* RETURNS:
233
* 0 on success, -errno on failure.
234
*/
235
int sata_link_debounce(struct ata_link *link, const unsigned int *params,
236
unsigned long deadline)
237
{
238
unsigned int interval = params[0];
239
unsigned int duration = params[1];
240
unsigned long last_jiffies, t;
241
u32 last, cur;
242
int rc;
243
244
t = ata_deadline(jiffies, params[2]);
245
if (time_before(t, deadline))
246
deadline = t;
247
248
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
249
return rc;
250
cur &= 0xf;
251
252
last = cur;
253
last_jiffies = jiffies;
254
255
while (1) {
256
ata_msleep(link->ap, interval);
257
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
258
return rc;
259
cur &= 0xf;
260
261
/* DET stable? */
262
if (cur == last) {
263
if (cur == 1 && time_before(jiffies, deadline))
264
continue;
265
if (time_after(jiffies,
266
ata_deadline(last_jiffies, duration)))
267
return 0;
268
continue;
269
}
270
271
/* unstable, start over */
272
last = cur;
273
last_jiffies = jiffies;
274
275
/* Check deadline. If debouncing failed, return
276
* -EPIPE to tell upper layer to lower link speed.
277
*/
278
if (time_after(jiffies, deadline))
279
return -EPIPE;
280
}
281
}
282
EXPORT_SYMBOL_GPL(sata_link_debounce);
283
284
/**
285
* sata_link_resume - resume SATA link
286
* @link: ATA link to resume SATA
287
* @params: timing parameters { interval, duration, timeout } in msec
288
* @deadline: deadline jiffies for the operation
289
*
290
* Resume SATA phy @link and debounce it.
291
*
292
* LOCKING:
293
* Kernel thread context (may sleep)
294
*
295
* RETURNS:
296
* 0 on success, -errno on failure.
297
*/
298
int sata_link_resume(struct ata_link *link, const unsigned int *params,
299
unsigned long deadline)
300
{
301
int tries = ATA_LINK_RESUME_TRIES;
302
u32 scontrol, serror;
303
int rc;
304
305
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
306
return rc;
307
308
/*
309
* Writes to SControl sometimes get ignored under certain
310
* controllers (ata_piix SIDPR). Make sure DET actually is
311
* cleared.
312
*/
313
do {
314
scontrol = (scontrol & 0x0f0) | 0x300;
315
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
316
return rc;
317
/*
318
* Some PHYs react badly if SStatus is pounded
319
* immediately after resuming. Delay 200ms before
320
* debouncing.
321
*/
322
if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
323
ata_msleep(link->ap, 200);
324
325
/* is SControl restored correctly? */
326
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
327
return rc;
328
} while ((scontrol & 0xf0f) != 0x300 && --tries);
329
330
if ((scontrol & 0xf0f) != 0x300) {
331
ata_link_warn(link, "failed to resume link (SControl %X)\n",
332
scontrol);
333
return 0;
334
}
335
336
if (tries < ATA_LINK_RESUME_TRIES)
337
ata_link_warn(link, "link resume succeeded after %d retries\n",
338
ATA_LINK_RESUME_TRIES - tries);
339
340
if ((rc = sata_link_debounce(link, params, deadline)))
341
return rc;
342
343
/* clear SError, some PHYs require this even for SRST to work */
344
if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
345
rc = sata_scr_write(link, SCR_ERROR, serror);
346
347
return rc != -EINVAL ? rc : 0;
348
}
349
EXPORT_SYMBOL_GPL(sata_link_resume);
350
351
/**
352
* sata_link_scr_lpm - manipulate SControl IPM and SPM fields
353
* @link: ATA link to manipulate SControl for
354
* @policy: LPM policy to configure
355
* @spm_wakeup: initiate LPM transition to active state
356
*
357
* Manipulate the IPM field of the SControl register of @link
358
* according to @policy. If @policy is ATA_LPM_MAX_POWER and
359
* @spm_wakeup is %true, the SPM field is manipulated to wake up
360
* the link. This function also clears PHYRDY_CHG before
361
* returning.
362
*
363
* LOCKING:
364
* EH context.
365
*
366
* RETURNS:
367
* 0 on success, -errno otherwise.
368
*/
369
int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
370
bool spm_wakeup)
371
{
372
struct ata_eh_context *ehc = &link->eh_context;
373
bool woken_up = false;
374
u32 scontrol;
375
int rc;
376
377
rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
378
if (rc)
379
return rc;
380
381
switch (policy) {
382
case ATA_LPM_MAX_POWER:
383
/* disable all LPM transitions */
384
scontrol |= (0x7 << 8);
385
/* initiate transition to active state */
386
if (spm_wakeup) {
387
scontrol |= (0x4 << 12);
388
woken_up = true;
389
}
390
break;
391
case ATA_LPM_MED_POWER:
392
/* allow LPM to PARTIAL */
393
scontrol &= ~(0x1 << 8);
394
scontrol |= (0x6 << 8);
395
break;
396
case ATA_LPM_MED_POWER_WITH_DIPM:
397
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
398
case ATA_LPM_MIN_POWER:
399
if (ata_link_nr_enabled(link) > 0) {
400
/* assume no restrictions on LPM transitions */
401
scontrol &= ~(0x7 << 8);
402
403
/*
404
* If the controller does not support partial, slumber,
405
* or devsleep, then disallow these transitions.
406
*/
407
if (link->ap->host->flags & ATA_HOST_NO_PART)
408
scontrol |= (0x1 << 8);
409
410
if (link->ap->host->flags & ATA_HOST_NO_SSC)
411
scontrol |= (0x2 << 8);
412
413
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
414
scontrol |= (0x4 << 8);
415
} else {
416
/* empty port, power off */
417
scontrol &= ~0xf;
418
scontrol |= (0x1 << 2);
419
}
420
break;
421
default:
422
WARN_ON(1);
423
}
424
425
rc = sata_scr_write(link, SCR_CONTROL, scontrol);
426
if (rc)
427
return rc;
428
429
/* give the link time to transit out of LPM state */
430
if (woken_up)
431
msleep(10);
432
433
/* clear PHYRDY_CHG from SError */
434
ehc->i.serror &= ~SERR_PHYRDY_CHG;
435
return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
436
}
437
EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
438
439
static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
440
{
441
struct ata_link *host_link = &link->ap->link;
442
u32 limit, target, spd;
443
444
limit = link->sata_spd_limit;
445
446
/* Don't configure downstream link faster than upstream link.
447
* It doesn't speed up anything and some PMPs choke on such
448
* configuration.
449
*/
450
if (!ata_is_host_link(link) && host_link->sata_spd)
451
limit &= (1 << host_link->sata_spd) - 1;
452
453
if (limit == UINT_MAX)
454
target = 0;
455
else
456
target = fls(limit);
457
458
spd = (*scontrol >> 4) & 0xf;
459
*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
460
461
return spd != target;
462
}
463
464
/**
465
* sata_set_spd_needed - is SATA spd configuration needed
466
* @link: Link in question
467
*
468
* Test whether the spd limit in SControl matches
469
* @link->sata_spd_limit. This function is used to determine
470
* whether hardreset is necessary to apply SATA spd
471
* configuration.
472
*
473
* LOCKING:
474
* Inherited from caller.
475
*
476
* RETURNS:
477
* 1 if SATA spd configuration is needed, 0 otherwise.
478
*/
479
static int sata_set_spd_needed(struct ata_link *link)
480
{
481
u32 scontrol;
482
483
if (sata_scr_read(link, SCR_CONTROL, &scontrol))
484
return 1;
485
486
return __sata_set_spd_needed(link, &scontrol);
487
}
488
489
/**
490
* sata_set_spd - set SATA spd according to spd limit
491
* @link: Link to set SATA spd for
492
*
493
* Set SATA spd of @link according to sata_spd_limit.
494
*
495
* LOCKING:
496
* Inherited from caller.
497
*
498
* RETURNS:
499
* 0 if spd doesn't need to be changed, 1 if spd has been
500
* changed. Negative errno if SCR registers are inaccessible.
501
*/
502
int sata_set_spd(struct ata_link *link)
503
{
504
u32 scontrol;
505
int rc;
506
507
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
508
return rc;
509
510
if (!__sata_set_spd_needed(link, &scontrol))
511
return 0;
512
513
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
514
return rc;
515
516
return 1;
517
}
518
EXPORT_SYMBOL_GPL(sata_set_spd);
519
520
/**
521
* sata_down_spd_limit - adjust SATA spd limit downward
522
* @link: Link to adjust SATA spd limit for
523
* @spd_limit: Additional limit
524
*
525
* Adjust SATA spd limit of @link downward. Note that this
526
* function only adjusts the limit. The change must be applied
527
* using sata_set_spd().
528
*
529
* If @spd_limit is non-zero, the speed is limited to equal to or
530
* lower than @spd_limit if such speed is supported. If
531
* @spd_limit is slower than any supported speed, only the lowest
532
* supported speed is allowed.
533
*
534
* LOCKING:
535
* Inherited from caller.
536
*
537
* RETURNS:
538
* 0 on success, negative errno on failure
539
*/
540
int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
541
{
542
u32 sstatus, spd, mask;
543
int rc, bit;
544
545
if (!sata_scr_valid(link))
546
return -EOPNOTSUPP;
547
548
/* If SCR can be read, use it to determine the current SPD.
549
* If not, use cached value in link->sata_spd.
550
*/
551
rc = sata_scr_read(link, SCR_STATUS, &sstatus);
552
if (rc == 0 && ata_sstatus_online(sstatus))
553
spd = (sstatus >> 4) & 0xf;
554
else
555
spd = link->sata_spd;
556
557
mask = link->sata_spd_limit;
558
if (mask <= 1)
559
return -EINVAL;
560
561
/* unconditionally mask off the highest bit */
562
bit = fls(mask) - 1;
563
mask &= ~(1 << bit);
564
565
/*
566
* Mask off all speeds higher than or equal to the current one. At
567
* this point, if current SPD is not available and we previously
568
* recorded the link speed from SStatus, the driver has already
569
* masked off the highest bit so mask should already be 1 or 0.
570
* Otherwise, we should not force 1.5Gbps on a link where we have
571
* not previously recorded speed from SStatus. Just return in this
572
* case.
573
*/
574
if (spd > 1)
575
mask &= (1 << (spd - 1)) - 1;
576
else if (link->sata_spd)
577
return -EINVAL;
578
579
/* were we already at the bottom? */
580
if (!mask)
581
return -EINVAL;
582
583
if (spd_limit) {
584
if (mask & ((1 << spd_limit) - 1))
585
mask &= (1 << spd_limit) - 1;
586
else {
587
bit = ffs(mask) - 1;
588
mask = 1 << bit;
589
}
590
}
591
592
link->sata_spd_limit = mask;
593
594
ata_link_warn(link, "limiting SATA link speed to %s\n",
595
sata_spd_string(fls(mask)));
596
597
return 0;
598
}
599
600
/**
601
* sata_link_hardreset - reset link via SATA phy reset
602
* @link: link to reset
603
* @timing: timing parameters { interval, duration, timeout } in msec
604
* @deadline: deadline jiffies for the operation
605
* @online: optional out parameter indicating link onlineness
606
* @check_ready: optional callback to check link readiness
607
*
608
* SATA phy-reset @link using DET bits of SControl register.
609
* After hardreset, link readiness is waited upon using
610
* ata_wait_ready() if @check_ready is specified. LLDs are
611
* allowed to not specify @check_ready and wait itself after this
612
* function returns. Device classification is LLD's
613
* responsibility.
614
*
615
* *@online is set to one iff reset succeeded and @link is online
616
* after reset.
617
*
618
* LOCKING:
619
* Kernel thread context (may sleep)
620
*
621
* RETURNS:
622
* 0 on success, -errno otherwise.
623
*/
624
int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
625
unsigned long deadline,
626
bool *online, int (*check_ready)(struct ata_link *))
627
{
628
u32 scontrol;
629
int rc;
630
631
if (online)
632
*online = false;
633
634
if (sata_set_spd_needed(link)) {
635
/* SATA spec says nothing about how to reconfigure
636
* spd. To be on the safe side, turn off phy during
637
* reconfiguration. This works for at least ICH7 AHCI
638
* and Sil3124.
639
*/
640
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
641
goto out;
642
643
scontrol = (scontrol & 0x0f0) | 0x304;
644
645
if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
646
goto out;
647
648
sata_set_spd(link);
649
}
650
651
/* issue phy wake/reset */
652
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
653
goto out;
654
655
scontrol = (scontrol & 0x0f0) | 0x301;
656
657
if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
658
goto out;
659
660
/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
661
* 10.4.2 says at least 1 ms.
662
*/
663
ata_msleep(link->ap, 1);
664
665
/* bring link back */
666
rc = sata_link_resume(link, timing, deadline);
667
if (rc)
668
goto out;
669
/* if link is offline nothing more to do */
670
if (ata_phys_link_offline(link))
671
goto out;
672
673
/* Link is online. From this point, -ENODEV too is an error. */
674
if (online)
675
*online = true;
676
677
if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
678
/* If PMP is supported, we have to do follow-up SRST.
679
* Some PMPs don't send D2H Reg FIS after hardreset if
680
* the first port is empty. Wait only for
681
* ATA_TMOUT_PMP_SRST_WAIT.
682
*/
683
if (check_ready) {
684
unsigned long pmp_deadline;
685
686
pmp_deadline = ata_deadline(jiffies,
687
ATA_TMOUT_PMP_SRST_WAIT);
688
if (time_after(pmp_deadline, deadline))
689
pmp_deadline = deadline;
690
ata_wait_ready(link, pmp_deadline, check_ready);
691
}
692
rc = -EAGAIN;
693
goto out;
694
}
695
696
rc = 0;
697
if (check_ready)
698
rc = ata_wait_ready(link, deadline, check_ready);
699
out:
700
if (rc && rc != -EAGAIN) {
701
/* online is set iff link is online && reset succeeded */
702
if (online)
703
*online = false;
704
}
705
return rc;
706
}
707
EXPORT_SYMBOL_GPL(sata_link_hardreset);
708
709
/**
710
* sata_std_hardreset - COMRESET w/o waiting or classification
711
* @link: link to reset
712
* @class: resulting class of attached device
713
* @deadline: deadline jiffies for the operation
714
*
715
* Standard SATA COMRESET w/o waiting or classification.
716
*
717
* LOCKING:
718
* Kernel thread context (may sleep)
719
*
720
* RETURNS:
721
* 0 if link offline, -EAGAIN if link online, -errno on errors.
722
*/
723
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
724
unsigned long deadline)
725
{
726
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
727
bool online;
728
int rc;
729
730
rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
731
if (online)
732
return -EAGAIN;
733
return rc;
734
}
735
EXPORT_SYMBOL_GPL(sata_std_hardreset);
736
737
/**
738
* ata_qc_complete_multiple - Complete multiple qcs successfully
739
* @ap: port in question
740
* @qc_active: new qc_active mask
741
*
742
* Complete in-flight commands. This functions is meant to be
743
* called from low-level driver's interrupt routine to complete
744
* requests normally. ap->qc_active and @qc_active is compared
745
* and commands are completed accordingly.
746
*
747
* Always use this function when completing multiple NCQ commands
748
* from IRQ handlers instead of calling ata_qc_complete()
749
* multiple times to keep IRQ expect status properly in sync.
750
*
751
* LOCKING:
752
* spin_lock_irqsave(host lock)
753
*
754
* RETURNS:
755
* Number of completed commands on success, -errno otherwise.
756
*/
757
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
758
{
759
u64 done_mask, ap_qc_active = ap->qc_active;
760
int nr_done = 0;
761
762
/*
763
* If the internal tag is set on ap->qc_active, then we care about
764
* bit0 on the passed in qc_active mask. Move that bit up to match
765
* the internal tag.
766
*/
767
if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
768
qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
769
qc_active ^= qc_active & 0x01;
770
}
771
772
done_mask = ap_qc_active ^ qc_active;
773
774
if (unlikely(done_mask & qc_active)) {
775
ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
776
ap->qc_active, qc_active);
777
return -EINVAL;
778
}
779
780
if (ap->ops->qc_ncq_fill_rtf)
781
ap->ops->qc_ncq_fill_rtf(ap, done_mask);
782
783
while (done_mask) {
784
struct ata_queued_cmd *qc;
785
unsigned int tag = __ffs64(done_mask);
786
787
qc = ata_qc_from_tag(ap, tag);
788
if (qc) {
789
ata_qc_complete(qc);
790
nr_done++;
791
}
792
done_mask &= ~(1ULL << tag);
793
}
794
795
return nr_done;
796
}
797
EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
798
799
/**
800
* ata_slave_link_init - initialize slave link
801
* @ap: port to initialize slave link for
802
*
803
* Create and initialize slave link for @ap. This enables slave
804
* link handling on the port.
805
*
806
* In libata, a port contains links and a link contains devices.
807
* There is single host link but if a PMP is attached to it,
808
* there can be multiple fan-out links. On SATA, there's usually
809
* a single device connected to a link but PATA and SATA
810
* controllers emulating TF based interface can have two - master
811
* and slave.
812
*
813
* However, there are a few controllers which don't fit into this
814
* abstraction too well - SATA controllers which emulate TF
815
* interface with both master and slave devices but also have
816
* separate SCR register sets for each device. These controllers
817
* need separate links for physical link handling
818
* (e.g. onlineness, link speed) but should be treated like a
819
* traditional M/S controller for everything else (e.g. command
820
* issue, softreset).
821
*
822
* slave_link is libata's way of handling this class of
823
* controllers without impacting core layer too much. For
824
* anything other than physical link handling, the default host
825
* link is used for both master and slave. For physical link
826
* handling, separate @ap->slave_link is used. All dirty details
827
* are implemented inside libata core layer. From LLD's POV, the
828
* only difference is that prereset, hardreset and postreset are
829
* called once more for the slave link, so the reset sequence
830
* looks like the following.
831
*
832
* prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
833
* softreset(M) -> postreset(M) -> postreset(S)
834
*
835
* Note that softreset is called only for the master. Softreset
836
* resets both M/S by definition, so SRST on master should handle
837
* both (the standard method will work just fine).
838
*
839
* LOCKING:
840
* Should be called before host is registered.
841
*
842
* RETURNS:
843
* 0 on success, -errno on failure.
844
*/
845
int ata_slave_link_init(struct ata_port *ap)
846
{
847
struct ata_link *link;
848
849
WARN_ON(ap->slave_link);
850
WARN_ON(ap->flags & ATA_FLAG_PMP);
851
852
link = kzalloc(sizeof(*link), GFP_KERNEL);
853
if (!link)
854
return -ENOMEM;
855
856
ata_link_init(ap, link, 1);
857
ap->slave_link = link;
858
return 0;
859
}
860
EXPORT_SYMBOL_GPL(ata_slave_link_init);
861
862
/**
863
* sata_lpm_ignore_phy_events - test if PHY event should be ignored
864
* @link: Link receiving the event
865
*
866
* Test whether the received PHY event has to be ignored or not.
867
*
868
* LOCKING:
869
* None:
870
*
871
* RETURNS:
872
* True if the event has to be ignored.
873
*/
874
bool sata_lpm_ignore_phy_events(struct ata_link *link)
875
{
876
unsigned long lpm_timeout = link->last_lpm_change +
877
msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
878
879
/* if LPM is enabled, PHYRDY doesn't mean anything */
880
if (link->lpm_policy > ATA_LPM_MAX_POWER)
881
return true;
882
883
/* ignore the first PHY event after the LPM policy changed
884
* as it is might be spurious
885
*/
886
if ((link->flags & ATA_LFLAG_CHANGED) &&
887
time_before(jiffies, lpm_timeout))
888
return true;
889
890
return false;
891
}
892
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
893
894
static const char *ata_lpm_policy_names[] = {
895
[ATA_LPM_UNKNOWN] = "keep_firmware_settings",
896
[ATA_LPM_MAX_POWER] = "max_performance",
897
[ATA_LPM_MED_POWER] = "medium_power",
898
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
899
[ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial",
900
[ATA_LPM_MIN_POWER] = "min_power",
901
};
902
903
/*
904
* Check if a port supports link power management.
905
* Must be called with the port locked.
906
*/
907
static bool ata_scsi_lpm_supported(struct ata_port *ap)
908
{
909
struct ata_link *link;
910
struct ata_device *dev;
911
912
if (ap->flags & ATA_FLAG_NO_LPM)
913
return false;
914
915
ata_for_each_link(link, ap, EDGE) {
916
ata_for_each_dev(dev, &ap->link, ENABLED) {
917
if (dev->quirks & ATA_QUIRK_NOLPM)
918
return false;
919
}
920
}
921
922
return true;
923
}
924
925
static ssize_t ata_scsi_lpm_supported_show(struct device *dev,
926
struct device_attribute *attr, char *buf)
927
{
928
struct Scsi_Host *shost = class_to_shost(dev);
929
struct ata_port *ap = ata_shost_to_port(shost);
930
unsigned long flags;
931
bool supported;
932
933
spin_lock_irqsave(ap->lock, flags);
934
supported = ata_scsi_lpm_supported(ap);
935
spin_unlock_irqrestore(ap->lock, flags);
936
937
return sysfs_emit(buf, "%d\n", supported);
938
}
939
DEVICE_ATTR(link_power_management_supported, S_IRUGO,
940
ata_scsi_lpm_supported_show, NULL);
941
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_supported);
942
943
static ssize_t ata_scsi_lpm_store(struct device *device,
944
struct device_attribute *attr,
945
const char *buf, size_t count)
946
{
947
struct Scsi_Host *shost = class_to_shost(device);
948
struct ata_port *ap = ata_shost_to_port(shost);
949
enum ata_lpm_policy policy;
950
unsigned long flags;
951
952
/* UNKNOWN is internal state, iterate from MAX_POWER */
953
for (policy = ATA_LPM_MAX_POWER;
954
policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
955
const char *name = ata_lpm_policy_names[policy];
956
957
if (strncmp(name, buf, strlen(name)) == 0)
958
break;
959
}
960
if (policy == ARRAY_SIZE(ata_lpm_policy_names))
961
return -EINVAL;
962
963
spin_lock_irqsave(ap->lock, flags);
964
965
if (!ata_scsi_lpm_supported(ap)) {
966
count = -EOPNOTSUPP;
967
goto out_unlock;
968
}
969
970
ap->target_lpm_policy = policy;
971
ata_port_schedule_eh(ap);
972
out_unlock:
973
spin_unlock_irqrestore(ap->lock, flags);
974
return count;
975
}
976
977
static ssize_t ata_scsi_lpm_show(struct device *dev,
978
struct device_attribute *attr, char *buf)
979
{
980
struct Scsi_Host *shost = class_to_shost(dev);
981
struct ata_port *ap = ata_shost_to_port(shost);
982
983
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
984
return -EINVAL;
985
986
return sysfs_emit(buf, "%s\n",
987
ata_lpm_policy_names[ap->target_lpm_policy]);
988
}
989
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
990
ata_scsi_lpm_show, ata_scsi_lpm_store);
991
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
992
993
/**
994
* ata_ncq_prio_supported - Check if device supports NCQ Priority
995
* @ap: ATA port of the target device
996
* @sdev: SCSI device
997
* @supported: Address of a boolean to store the result
998
*
999
* Helper to check if device supports NCQ Priority feature.
1000
*
1001
* Context: Any context. Takes and releases @ap->lock.
1002
*
1003
* Return:
1004
* * %0 - OK. Status is stored into @supported
1005
* * %-ENODEV - Failed to find the ATA device
1006
*/
1007
int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
1008
bool *supported)
1009
{
1010
struct ata_device *dev;
1011
unsigned long flags;
1012
int rc = 0;
1013
1014
spin_lock_irqsave(ap->lock, flags);
1015
dev = ata_scsi_find_dev(ap, sdev);
1016
if (!dev)
1017
rc = -ENODEV;
1018
else
1019
*supported = dev->flags & ATA_DFLAG_NCQ_PRIO;
1020
spin_unlock_irqrestore(ap->lock, flags);
1021
1022
return rc;
1023
}
1024
EXPORT_SYMBOL_GPL(ata_ncq_prio_supported);
1025
1026
static ssize_t ata_ncq_prio_supported_show(struct device *device,
1027
struct device_attribute *attr,
1028
char *buf)
1029
{
1030
struct scsi_device *sdev = to_scsi_device(device);
1031
struct ata_port *ap = ata_shost_to_port(sdev->host);
1032
bool supported;
1033
int rc;
1034
1035
rc = ata_ncq_prio_supported(ap, sdev, &supported);
1036
if (rc)
1037
return rc;
1038
1039
return sysfs_emit(buf, "%d\n", supported);
1040
}
1041
1042
DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL);
1043
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported);
1044
1045
/**
1046
* ata_ncq_prio_enabled - Check if NCQ Priority is enabled
1047
* @ap: ATA port of the target device
1048
* @sdev: SCSI device
1049
* @enabled: Address of a boolean to store the result
1050
*
1051
* Helper to check if NCQ Priority feature is enabled.
1052
*
1053
* Context: Any context. Takes and releases @ap->lock.
1054
*
1055
* Return:
1056
* * %0 - OK. Status is stored into @enabled
1057
* * %-ENODEV - Failed to find the ATA device
1058
*/
1059
int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
1060
bool *enabled)
1061
{
1062
struct ata_device *dev;
1063
unsigned long flags;
1064
int rc = 0;
1065
1066
spin_lock_irqsave(ap->lock, flags);
1067
dev = ata_scsi_find_dev(ap, sdev);
1068
if (!dev)
1069
rc = -ENODEV;
1070
else
1071
*enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
1072
spin_unlock_irqrestore(ap->lock, flags);
1073
1074
return rc;
1075
}
1076
EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled);
1077
1078
static ssize_t ata_ncq_prio_enable_show(struct device *device,
1079
struct device_attribute *attr,
1080
char *buf)
1081
{
1082
struct scsi_device *sdev = to_scsi_device(device);
1083
struct ata_port *ap = ata_shost_to_port(sdev->host);
1084
bool enabled;
1085
int rc;
1086
1087
rc = ata_ncq_prio_enabled(ap, sdev, &enabled);
1088
if (rc)
1089
return rc;
1090
1091
return sysfs_emit(buf, "%d\n", enabled);
1092
}
1093
1094
/**
1095
* ata_ncq_prio_enable - Enable/disable NCQ Priority
1096
* @ap: ATA port of the target device
1097
* @sdev: SCSI device
1098
* @enable: true - enable NCQ Priority, false - disable NCQ Priority
1099
*
1100
* Helper to enable/disable NCQ Priority feature.
1101
*
1102
* Context: Any context. Takes and releases @ap->lock.
1103
*
1104
* Return:
1105
* * %0 - OK. Status is stored into @enabled
1106
* * %-ENODEV - Failed to find the ATA device
1107
* * %-EINVAL - NCQ Priority is not supported or CDL is enabled
1108
*/
1109
int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
1110
bool enable)
1111
{
1112
struct ata_device *dev;
1113
unsigned long flags;
1114
int rc = 0;
1115
1116
spin_lock_irqsave(ap->lock, flags);
1117
1118
dev = ata_scsi_find_dev(ap, sdev);
1119
if (!dev) {
1120
rc = -ENODEV;
1121
goto unlock;
1122
}
1123
1124
if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
1125
rc = -EINVAL;
1126
goto unlock;
1127
}
1128
1129
if (enable) {
1130
if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
1131
ata_dev_err(dev,
1132
"CDL must be disabled to enable NCQ priority\n");
1133
rc = -EINVAL;
1134
goto unlock;
1135
}
1136
dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED;
1137
} else {
1138
dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
1139
}
1140
1141
unlock:
1142
spin_unlock_irqrestore(ap->lock, flags);
1143
1144
return rc;
1145
}
1146
EXPORT_SYMBOL_GPL(ata_ncq_prio_enable);
1147
1148
static ssize_t ata_ncq_prio_enable_store(struct device *device,
1149
struct device_attribute *attr,
1150
const char *buf, size_t len)
1151
{
1152
struct scsi_device *sdev = to_scsi_device(device);
1153
struct ata_port *ap = ata_shost_to_port(sdev->host);
1154
bool enable;
1155
int rc;
1156
1157
rc = kstrtobool(buf, &enable);
1158
if (rc)
1159
return rc;
1160
1161
rc = ata_ncq_prio_enable(ap, sdev, enable);
1162
if (rc)
1163
return rc;
1164
1165
return len;
1166
}
1167
1168
DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
1169
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
1170
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
1171
1172
static struct attribute *ata_ncq_sdev_attrs[] = {
1173
&dev_attr_unload_heads.attr,
1174
&dev_attr_ncq_prio_enable.attr,
1175
&dev_attr_ncq_prio_supported.attr,
1176
NULL
1177
};
1178
1179
static const struct attribute_group ata_ncq_sdev_attr_group = {
1180
.attrs = ata_ncq_sdev_attrs
1181
};
1182
1183
const struct attribute_group *ata_ncq_sdev_groups[] = {
1184
&ata_ncq_sdev_attr_group,
1185
NULL
1186
};
1187
EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups);
1188
1189
static ssize_t
1190
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
1191
const char *buf, size_t count)
1192
{
1193
struct Scsi_Host *shost = class_to_shost(dev);
1194
struct ata_port *ap = ata_shost_to_port(shost);
1195
if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
1196
return ap->ops->em_store(ap, buf, count);
1197
return -EINVAL;
1198
}
1199
1200
static ssize_t
1201
ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
1202
char *buf)
1203
{
1204
struct Scsi_Host *shost = class_to_shost(dev);
1205
struct ata_port *ap = ata_shost_to_port(shost);
1206
1207
if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
1208
return ap->ops->em_show(ap, buf);
1209
return -EINVAL;
1210
}
1211
DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
1212
ata_scsi_em_message_show, ata_scsi_em_message_store);
1213
EXPORT_SYMBOL_GPL(dev_attr_em_message);
1214
1215
static ssize_t
1216
ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
1217
char *buf)
1218
{
1219
struct Scsi_Host *shost = class_to_shost(dev);
1220
struct ata_port *ap = ata_shost_to_port(shost);
1221
1222
return sysfs_emit(buf, "%d\n", ap->em_message_type);
1223
}
1224
DEVICE_ATTR(em_message_type, S_IRUGO,
1225
ata_scsi_em_message_type_show, NULL);
1226
EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
1227
1228
static ssize_t
1229
ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
1230
char *buf)
1231
{
1232
struct scsi_device *sdev = to_scsi_device(dev);
1233
struct ata_port *ap = ata_shost_to_port(sdev->host);
1234
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
1235
1236
if (atadev && ap->ops->sw_activity_show &&
1237
(ap->flags & ATA_FLAG_SW_ACTIVITY))
1238
return ap->ops->sw_activity_show(atadev, buf);
1239
return -EINVAL;
1240
}
1241
1242
static ssize_t
1243
ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
1244
const char *buf, size_t count)
1245
{
1246
struct scsi_device *sdev = to_scsi_device(dev);
1247
struct ata_port *ap = ata_shost_to_port(sdev->host);
1248
struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
1249
enum sw_activity val;
1250
int rc;
1251
1252
if (atadev && ap->ops->sw_activity_store &&
1253
(ap->flags & ATA_FLAG_SW_ACTIVITY)) {
1254
val = simple_strtoul(buf, NULL, 0);
1255
switch (val) {
1256
case OFF: case BLINK_ON: case BLINK_OFF:
1257
rc = ap->ops->sw_activity_store(atadev, val);
1258
if (!rc)
1259
return count;
1260
else
1261
return rc;
1262
}
1263
}
1264
return -EINVAL;
1265
}
1266
DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
1267
ata_scsi_activity_store);
1268
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
1269
1270
/**
1271
* ata_change_queue_depth - Set a device maximum queue depth
1272
* @ap: ATA port of the target device
1273
* @sdev: SCSI device to configure queue depth for
1274
* @queue_depth: new queue depth
1275
*
1276
* Helper to set a device maximum queue depth, usable with both libsas
1277
* and libata.
1278
*
1279
*/
1280
int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1281
int queue_depth)
1282
{
1283
struct ata_device *dev;
1284
unsigned long flags;
1285
int max_queue_depth;
1286
1287
spin_lock_irqsave(ap->lock, flags);
1288
1289
dev = ata_scsi_find_dev(ap, sdev);
1290
if (!dev || queue_depth < 1 || queue_depth == sdev->queue_depth) {
1291
spin_unlock_irqrestore(ap->lock, flags);
1292
return sdev->queue_depth;
1293
}
1294
1295
/*
1296
* Make sure that the queue depth requested does not exceed the device
1297
* capabilities.
1298
*/
1299
max_queue_depth = min(ATA_MAX_QUEUE, sdev->host->can_queue);
1300
max_queue_depth = min(max_queue_depth, ata_id_queue_depth(dev->id));
1301
if (queue_depth > max_queue_depth) {
1302
spin_unlock_irqrestore(ap->lock, flags);
1303
return -EINVAL;
1304
}
1305
1306
/*
1307
* If NCQ is not supported by the device or if the target queue depth
1308
* is 1 (to disable drive side command queueing), turn off NCQ.
1309
*/
1310
if (queue_depth == 1 || !ata_ncq_supported(dev)) {
1311
dev->flags |= ATA_DFLAG_NCQ_OFF;
1312
queue_depth = 1;
1313
} else {
1314
dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1315
}
1316
1317
spin_unlock_irqrestore(ap->lock, flags);
1318
1319
if (queue_depth == sdev->queue_depth)
1320
return sdev->queue_depth;
1321
1322
return scsi_change_queue_depth(sdev, queue_depth);
1323
}
1324
EXPORT_SYMBOL_GPL(ata_change_queue_depth);
1325
1326
/**
1327
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
1328
* @sdev: SCSI device to configure queue depth for
1329
* @queue_depth: new queue depth
1330
*
1331
* This is libata standard hostt->change_queue_depth callback.
1332
* SCSI will call into this callback when user tries to set queue
1333
* depth via sysfs.
1334
*
1335
* LOCKING:
1336
* SCSI layer (we don't care)
1337
*
1338
* RETURNS:
1339
* Newly configured queue depth.
1340
*/
1341
int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1342
{
1343
struct ata_port *ap = ata_shost_to_port(sdev->host);
1344
1345
return ata_change_queue_depth(ap, sdev, queue_depth);
1346
}
1347
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1348
1349
/**
1350
* ata_sas_sdev_configure - Default sdev_configure routine for libata
1351
* devices
1352
* @sdev: SCSI device to configure
1353
* @lim: queue limits
1354
* @ap: ATA port to which SCSI device is attached
1355
*
1356
* RETURNS:
1357
* Zero.
1358
*/
1359
1360
int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
1361
struct ata_port *ap)
1362
{
1363
ata_scsi_sdev_config(sdev);
1364
1365
return ata_scsi_dev_config(sdev, lim, ap->link.device);
1366
}
1367
EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
1368
1369
/**
1370
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
1371
* @cmd: SCSI command to be sent
1372
* @ap: ATA port to which the command is being sent
1373
*
1374
* RETURNS:
1375
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
1376
* 0 otherwise.
1377
*/
1378
1379
int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
1380
{
1381
int rc = 0;
1382
1383
if (likely(ata_dev_enabled(ap->link.device)))
1384
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
1385
else {
1386
cmd->result = (DID_BAD_TARGET << 16);
1387
scsi_done(cmd);
1388
}
1389
return rc;
1390
}
1391
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
1392
1393
/**
1394
* sata_async_notification - SATA async notification handler
1395
* @ap: ATA port where async notification is received
1396
*
1397
* Handler to be called when async notification via SDB FIS is
1398
* received. This function schedules EH if necessary.
1399
*
1400
* LOCKING:
1401
* spin_lock_irqsave(host lock)
1402
*
1403
* RETURNS:
1404
* 1 if EH is scheduled, 0 otherwise.
1405
*/
1406
int sata_async_notification(struct ata_port *ap)
1407
{
1408
u32 sntf;
1409
int rc;
1410
1411
if (!(ap->flags & ATA_FLAG_AN))
1412
return 0;
1413
1414
rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1415
if (rc == 0)
1416
sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1417
1418
if (!sata_pmp_attached(ap) || rc) {
1419
/* PMP is not attached or SNTF is not available */
1420
if (!sata_pmp_attached(ap)) {
1421
/* PMP is not attached. Check whether ATAPI
1422
* AN is configured. If so, notify media
1423
* change.
1424
*/
1425
struct ata_device *dev = ap->link.device;
1426
1427
if ((dev->class == ATA_DEV_ATAPI) &&
1428
(dev->flags & ATA_DFLAG_AN))
1429
ata_scsi_media_change_notify(dev);
1430
return 0;
1431
} else {
1432
/* PMP is attached but SNTF is not available.
1433
* ATAPI async media change notification is
1434
* not used. The PMP must be reporting PHY
1435
* status change, schedule EH.
1436
*/
1437
ata_port_schedule_eh(ap);
1438
return 1;
1439
}
1440
} else {
1441
/* PMP is attached and SNTF is available */
1442
struct ata_link *link;
1443
1444
/* check and notify ATAPI AN */
1445
ata_for_each_link(link, ap, EDGE) {
1446
if (!(sntf & (1 << link->pmp)))
1447
continue;
1448
1449
if ((link->device->class == ATA_DEV_ATAPI) &&
1450
(link->device->flags & ATA_DFLAG_AN))
1451
ata_scsi_media_change_notify(link->device);
1452
}
1453
1454
/* If PMP is reporting that PHY status of some
1455
* downstream ports has changed, schedule EH.
1456
*/
1457
if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1458
ata_port_schedule_eh(ap);
1459
return 1;
1460
}
1461
1462
return 0;
1463
}
1464
}
1465
EXPORT_SYMBOL_GPL(sata_async_notification);
1466
1467
/**
1468
* ata_eh_read_log_10h - Read log page 10h for NCQ error details
1469
* @dev: Device to read log page 10h from
1470
* @tag: Resulting tag of the failed command
1471
* @tf: Resulting taskfile registers of the failed command
1472
*
1473
* Read log page 10h to obtain NCQ error details and clear error
1474
* condition.
1475
*
1476
* LOCKING:
1477
* Kernel thread context (may sleep).
1478
*
1479
* RETURNS:
1480
* 0 on success, -errno otherwise.
1481
*/
1482
static int ata_eh_read_log_10h(struct ata_device *dev,
1483
int *tag, struct ata_taskfile *tf)
1484
{
1485
u8 *buf = dev->sector_buf;
1486
unsigned int err_mask;
1487
u8 csum;
1488
int i;
1489
1490
err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1491
if (err_mask)
1492
return -EIO;
1493
1494
csum = 0;
1495
for (i = 0; i < ATA_SECT_SIZE; i++)
1496
csum += buf[i];
1497
if (csum)
1498
ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1499
csum);
1500
1501
if (buf[0] & 0x80)
1502
return -ENOENT;
1503
1504
*tag = buf[0] & 0x1f;
1505
1506
tf->status = buf[2];
1507
tf->error = buf[3];
1508
tf->lbal = buf[4];
1509
tf->lbam = buf[5];
1510
tf->lbah = buf[6];
1511
tf->device = buf[7];
1512
tf->hob_lbal = buf[8];
1513
tf->hob_lbam = buf[9];
1514
tf->hob_lbah = buf[10];
1515
tf->nsect = buf[12];
1516
tf->hob_nsect = buf[13];
1517
if (ata_id_has_ncq_autosense(dev->id) && (tf->status & ATA_SENSE))
1518
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1519
1520
return 0;
1521
}
1522
1523
/**
1524
* ata_eh_get_ncq_success_sense - Read and process the sense data for
1525
* successful NCQ commands log page
1526
* @link: ATA link to get sense data for
1527
*
1528
* Read the sense data for successful NCQ commands log page to obtain
1529
* sense data for all NCQ commands that completed successfully with
1530
* the sense data available bit set.
1531
*
1532
* LOCKING:
1533
* Kernel thread context (may sleep).
1534
*
1535
* RETURNS:
1536
* 0 on success, -errno otherwise.
1537
*/
1538
int ata_eh_get_ncq_success_sense(struct ata_link *link)
1539
{
1540
struct ata_device *dev = link->device;
1541
struct ata_port *ap = dev->link->ap;
1542
u8 *buf = dev->cdl->ncq_sense_log_buf;
1543
struct ata_queued_cmd *qc;
1544
unsigned int err_mask, tag;
1545
u8 *sense, sk = 0, asc = 0, ascq = 0;
1546
u16 extended_sense;
1547
bool aux_icc_valid;
1548
u32 sense_valid;
1549
u64 val;
1550
int ret = 0;
1551
1552
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
1553
if (err_mask) {
1554
ata_dev_err(dev,
1555
"Failed to read Sense Data for Successful NCQ Commands log\n");
1556
return -EIO;
1557
}
1558
1559
/* Check the log header */
1560
val = get_unaligned_le64(&buf[0]);
1561
if ((val & 0xffff) != 1 || ((val >> 16) & 0xff) != 0x0f) {
1562
ata_dev_err(dev,
1563
"Invalid Sense Data for Successful NCQ Commands log\n");
1564
return -EIO;
1565
}
1566
1567
sense_valid = get_unaligned_le32(&buf[8]);
1568
extended_sense = get_unaligned_le16(&buf[14]);
1569
aux_icc_valid = extended_sense & BIT(15);
1570
1571
ata_qc_for_each_raw(ap, qc, tag) {
1572
if (!(qc->flags & ATA_QCFLAG_EH) ||
1573
!(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
1574
qc->err_mask ||
1575
ata_dev_phys_link(qc->dev) != link)
1576
continue;
1577
1578
/*
1579
* If the command does not have any sense data, clear ATA_SENSE.
1580
* Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished.
1581
*/
1582
if (!(sense_valid & BIT(tag))) {
1583
qc->result_tf.status &= ~ATA_SENSE;
1584
continue;
1585
}
1586
1587
sense = &buf[32 + 24 * tag];
1588
sk = sense[0];
1589
asc = sense[1];
1590
ascq = sense[2];
1591
1592
if (!ata_scsi_sense_is_valid(sk, asc, ascq)) {
1593
ret = -EIO;
1594
continue;
1595
}
1596
1597
qc->result_tf.nsect = sense[6];
1598
qc->result_tf.hob_nsect = sense[7];
1599
qc->result_tf.lbal = sense[8];
1600
qc->result_tf.lbam = sense[9];
1601
qc->result_tf.lbah = sense[10];
1602
qc->result_tf.hob_lbal = sense[11];
1603
qc->result_tf.hob_lbam = sense[12];
1604
qc->result_tf.hob_lbah = sense[13];
1605
if (aux_icc_valid)
1606
qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
1607
1608
/* Set sense without also setting scsicmd->result */
1609
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
1610
qc->scsicmd->sense_buffer, sk,
1611
asc, ascq);
1612
qc->flags |= ATA_QCFLAG_SENSE_VALID;
1613
1614
/*
1615
* No point in checking the return value, since the command has
1616
* already completed successfully.
1617
*/
1618
ata_eh_decide_disposition(qc);
1619
}
1620
1621
return ret;
1622
}
1623
1624
/**
1625
* ata_eh_analyze_ncq_error - analyze NCQ error
1626
* @link: ATA link to analyze NCQ error for
1627
*
1628
* Read log page 10h, determine the offending qc and acquire
1629
* error status TF. For NCQ device errors, all LLDDs have to do
1630
* is setting AC_ERR_DEV in ehi->err_mask. This function takes
1631
* care of the rest.
1632
*
1633
* LOCKING:
1634
* Kernel thread context (may sleep).
1635
*/
1636
void ata_eh_analyze_ncq_error(struct ata_link *link)
1637
{
1638
struct ata_port *ap = link->ap;
1639
struct ata_eh_context *ehc = &link->eh_context;
1640
struct ata_device *dev = link->device;
1641
struct ata_queued_cmd *qc;
1642
struct ata_taskfile tf;
1643
int tag, rc;
1644
1645
/* if frozen, we can't do much */
1646
if (ata_port_is_frozen(ap))
1647
return;
1648
1649
/* is it NCQ device error? */
1650
if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1651
return;
1652
1653
/* has LLDD analyzed already? */
1654
ata_qc_for_each_raw(ap, qc, tag) {
1655
if (!(qc->flags & ATA_QCFLAG_EH))
1656
continue;
1657
1658
if (qc->err_mask)
1659
return;
1660
}
1661
1662
/* okay, this error is ours */
1663
memset(&tf, 0, sizeof(tf));
1664
rc = ata_eh_read_log_10h(dev, &tag, &tf);
1665
if (rc) {
1666
ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1667
rc);
1668
return;
1669
}
1670
1671
if (!(link->sactive & BIT(tag))) {
1672
ata_link_err(link, "log page 10h reported inactive tag %d\n",
1673
tag);
1674
return;
1675
}
1676
1677
/* we've got the perpetrator, condemn it */
1678
qc = __ata_qc_from_tag(ap, tag);
1679
memcpy(&qc->result_tf, &tf, sizeof(tf));
1680
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1681
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1682
1683
/*
1684
* If the device supports NCQ autosense, ata_eh_read_log_10h() will have
1685
* stored the sense data in qc->result_tf.auxiliary.
1686
*/
1687
if (qc->result_tf.auxiliary) {
1688
char sense_key, asc, ascq;
1689
1690
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1691
asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1692
ascq = qc->result_tf.auxiliary & 0xff;
1693
if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) {
1694
ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc,
1695
ascq);
1696
qc->flags |= ATA_QCFLAG_SENSE_VALID;
1697
}
1698
}
1699
1700
ata_qc_for_each_raw(ap, qc, tag) {
1701
if (!(qc->flags & ATA_QCFLAG_EH) ||
1702
qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD ||
1703
ata_dev_phys_link(qc->dev) != link)
1704
continue;
1705
1706
/* Skip the single QC which caused the NCQ error. */
1707
if (qc->err_mask)
1708
continue;
1709
1710
/*
1711
* For SATA, the STATUS and ERROR fields are shared for all NCQ
1712
* commands that were completed with the same SDB FIS.
1713
* Therefore, we have to clear the ATA_ERR bit for all QCs
1714
* except the one that caused the NCQ error.
1715
*/
1716
qc->result_tf.status &= ~ATA_ERR;
1717
qc->result_tf.error = 0;
1718
1719
/*
1720
* If we get a NCQ error, that means that a single command was
1721
* aborted. All other failed commands for our link should be
1722
* retried and has no business of going though further scrutiny
1723
* by ata_eh_link_autopsy().
1724
*/
1725
qc->flags |= ATA_QCFLAG_RETRY;
1726
}
1727
1728
ehc->i.err_mask &= ~AC_ERR_DEV;
1729
}
1730
EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
1731
1732
const struct ata_port_operations sata_port_ops = {
1733
.inherits = &ata_base_port_ops,
1734
1735
.qc_defer = ata_std_qc_defer,
1736
.reset.hardreset = sata_std_hardreset,
1737
};
1738
EXPORT_SYMBOL_GPL(sata_port_ops);
1739
1740