Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/ata/libahci.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* libahci.c - Common AHCI SATA low-level routines
4
*
5
* Maintained by: Tejun Heo <[email protected]>
6
* Please ALWAYS copy [email protected]
7
* on emails.
8
*
9
* Copyright 2004-2005 Red Hat, Inc.
10
*
11
* libata documentation is available via 'make {ps|pdf}docs',
12
* as Documentation/driver-api/libata.rst
13
*
14
* AHCI hardware documentation:
15
* http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
16
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
17
*/
18
19
#include <linux/bitops.h>
20
#include <linux/kernel.h>
21
#include <linux/gfp.h>
22
#include <linux/module.h>
23
#include <linux/nospec.h>
24
#include <linux/blkdev.h>
25
#include <linux/delay.h>
26
#include <linux/interrupt.h>
27
#include <linux/dma-mapping.h>
28
#include <linux/device.h>
29
#include <scsi/scsi_host.h>
30
#include <scsi/scsi_cmnd.h>
31
#include <linux/libata.h>
32
#include <linux/pci.h>
33
#include "ahci.h"
34
#include "libata.h"
35
36
static int ahci_skip_host_reset;
37
int ahci_ignore_sss;
38
EXPORT_SYMBOL_GPL(ahci_ignore_sss);
39
40
module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
41
MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
42
43
module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
44
MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
45
46
static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
47
unsigned hints);
48
static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
49
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
50
size_t size);
51
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
52
ssize_t size);
53
54
55
56
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
57
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
58
static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
59
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask);
60
static int ahci_port_start(struct ata_port *ap);
61
static void ahci_port_stop(struct ata_port *ap);
62
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
63
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
64
static void ahci_freeze(struct ata_port *ap);
65
static void ahci_thaw(struct ata_port *ap);
66
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
67
static void ahci_enable_fbs(struct ata_port *ap);
68
static void ahci_disable_fbs(struct ata_port *ap);
69
static void ahci_pmp_attach(struct ata_port *ap);
70
static void ahci_pmp_detach(struct ata_port *ap);
71
static int ahci_softreset(struct ata_link *link, unsigned int *class,
72
unsigned long deadline);
73
static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
74
unsigned long deadline);
75
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
76
unsigned long deadline);
77
static void ahci_postreset(struct ata_link *link, unsigned int *class);
78
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
79
static void ahci_dev_config(struct ata_device *dev);
80
#ifdef CONFIG_PM
81
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
82
#endif
83
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
84
static ssize_t ahci_activity_store(struct ata_device *dev,
85
enum sw_activity val);
86
static void ahci_init_sw_activity(struct ata_link *link);
87
88
static ssize_t ahci_show_host_caps(struct device *dev,
89
struct device_attribute *attr, char *buf);
90
static ssize_t ahci_show_host_cap2(struct device *dev,
91
struct device_attribute *attr, char *buf);
92
static ssize_t ahci_show_host_version(struct device *dev,
93
struct device_attribute *attr, char *buf);
94
static ssize_t ahci_show_port_cmd(struct device *dev,
95
struct device_attribute *attr, char *buf);
96
static ssize_t ahci_read_em_buffer(struct device *dev,
97
struct device_attribute *attr, char *buf);
98
static ssize_t ahci_store_em_buffer(struct device *dev,
99
struct device_attribute *attr,
100
const char *buf, size_t size);
101
static ssize_t ahci_show_em_supported(struct device *dev,
102
struct device_attribute *attr, char *buf);
103
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
104
105
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
106
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
107
static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
108
static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
109
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
110
ahci_read_em_buffer, ahci_store_em_buffer);
111
static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
112
113
static struct attribute *ahci_shost_attrs[] = {
114
&dev_attr_link_power_management_supported.attr,
115
&dev_attr_link_power_management_policy.attr,
116
&dev_attr_em_message_type.attr,
117
&dev_attr_em_message.attr,
118
&dev_attr_ahci_host_caps.attr,
119
&dev_attr_ahci_host_cap2.attr,
120
&dev_attr_ahci_host_version.attr,
121
&dev_attr_ahci_port_cmd.attr,
122
&dev_attr_em_buffer.attr,
123
&dev_attr_em_message_supported.attr,
124
NULL
125
};
126
127
static const struct attribute_group ahci_shost_attr_group = {
128
.attrs = ahci_shost_attrs
129
};
130
131
const struct attribute_group *ahci_shost_groups[] = {
132
&ahci_shost_attr_group,
133
NULL
134
};
135
EXPORT_SYMBOL_GPL(ahci_shost_groups);
136
137
static struct attribute *ahci_sdev_attrs[] = {
138
&dev_attr_sw_activity.attr,
139
&dev_attr_unload_heads.attr,
140
&dev_attr_ncq_prio_supported.attr,
141
&dev_attr_ncq_prio_enable.attr,
142
NULL
143
};
144
145
static const struct attribute_group ahci_sdev_attr_group = {
146
.attrs = ahci_sdev_attrs
147
};
148
149
const struct attribute_group *ahci_sdev_groups[] = {
150
&ahci_sdev_attr_group,
151
NULL
152
};
153
EXPORT_SYMBOL_GPL(ahci_sdev_groups);
154
155
struct ata_port_operations ahci_ops = {
156
.inherits = &sata_pmp_port_ops,
157
158
.qc_defer = ahci_pmp_qc_defer,
159
.qc_prep = ahci_qc_prep,
160
.qc_issue = ahci_qc_issue,
161
.qc_fill_rtf = ahci_qc_fill_rtf,
162
.qc_ncq_fill_rtf = ahci_qc_ncq_fill_rtf,
163
164
.freeze = ahci_freeze,
165
.thaw = ahci_thaw,
166
.reset.softreset = ahci_softreset,
167
.reset.hardreset = ahci_hardreset,
168
.reset.postreset = ahci_postreset,
169
.pmp_reset.softreset = ahci_softreset,
170
.error_handler = ahci_error_handler,
171
.post_internal_cmd = ahci_post_internal_cmd,
172
.dev_config = ahci_dev_config,
173
174
.scr_read = ahci_scr_read,
175
.scr_write = ahci_scr_write,
176
.pmp_attach = ahci_pmp_attach,
177
.pmp_detach = ahci_pmp_detach,
178
179
.set_lpm = ahci_set_lpm,
180
.em_show = ahci_led_show,
181
.em_store = ahci_led_store,
182
.sw_activity_show = ahci_activity_show,
183
.sw_activity_store = ahci_activity_store,
184
.transmit_led_message = ahci_transmit_led_message,
185
#ifdef CONFIG_PM
186
.port_suspend = ahci_port_suspend,
187
.port_resume = ahci_port_resume,
188
#endif
189
.port_start = ahci_port_start,
190
.port_stop = ahci_port_stop,
191
};
192
EXPORT_SYMBOL_GPL(ahci_ops);
193
194
struct ata_port_operations ahci_pmp_retry_srst_ops = {
195
.inherits = &ahci_ops,
196
.reset.softreset = ahci_pmp_retry_softreset,
197
};
198
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
199
200
static bool ahci_em_messages __read_mostly = true;
201
module_param(ahci_em_messages, bool, 0444);
202
/* add other LED protocol types when they become supported */
203
MODULE_PARM_DESC(ahci_em_messages,
204
"AHCI Enclosure Management Message control (0 = off, 1 = on)");
205
206
/* device sleep idle timeout in ms */
207
static int devslp_idle_timeout __read_mostly = 1000;
208
module_param(devslp_idle_timeout, int, 0644);
209
MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
210
211
static void ahci_enable_ahci(void __iomem *mmio)
212
{
213
int i;
214
u32 tmp;
215
216
/* turn on AHCI_EN */
217
tmp = readl(mmio + HOST_CTL);
218
if (tmp & HOST_AHCI_EN)
219
return;
220
221
/* Some controllers need AHCI_EN to be written multiple times.
222
* Try a few times before giving up.
223
*/
224
for (i = 0; i < 5; i++) {
225
tmp |= HOST_AHCI_EN;
226
writel(tmp, mmio + HOST_CTL);
227
tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
228
if (tmp & HOST_AHCI_EN)
229
return;
230
msleep(10);
231
}
232
233
WARN_ON(1);
234
}
235
236
/**
237
* ahci_rpm_get_port - Make sure the port is powered on
238
* @ap: Port to power on
239
*
240
* Whenever there is need to access the AHCI host registers outside of
241
* normal execution paths, call this function to make sure the host is
242
* actually powered on.
243
*/
244
static int ahci_rpm_get_port(struct ata_port *ap)
245
{
246
return pm_runtime_get_sync(ap->dev);
247
}
248
249
/**
250
* ahci_rpm_put_port - Undoes ahci_rpm_get_port()
251
* @ap: Port to power down
252
*
253
* Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
254
* if it has no more active users.
255
*/
256
static void ahci_rpm_put_port(struct ata_port *ap)
257
{
258
pm_runtime_put(ap->dev);
259
}
260
261
static ssize_t ahci_show_host_caps(struct device *dev,
262
struct device_attribute *attr, char *buf)
263
{
264
struct Scsi_Host *shost = class_to_shost(dev);
265
struct ata_port *ap = ata_shost_to_port(shost);
266
struct ahci_host_priv *hpriv = ap->host->private_data;
267
268
return sprintf(buf, "%x\n", hpriv->cap);
269
}
270
271
static ssize_t ahci_show_host_cap2(struct device *dev,
272
struct device_attribute *attr, char *buf)
273
{
274
struct Scsi_Host *shost = class_to_shost(dev);
275
struct ata_port *ap = ata_shost_to_port(shost);
276
struct ahci_host_priv *hpriv = ap->host->private_data;
277
278
return sprintf(buf, "%x\n", hpriv->cap2);
279
}
280
281
static ssize_t ahci_show_host_version(struct device *dev,
282
struct device_attribute *attr, char *buf)
283
{
284
struct Scsi_Host *shost = class_to_shost(dev);
285
struct ata_port *ap = ata_shost_to_port(shost);
286
struct ahci_host_priv *hpriv = ap->host->private_data;
287
288
return sprintf(buf, "%x\n", hpriv->version);
289
}
290
291
static ssize_t ahci_show_port_cmd(struct device *dev,
292
struct device_attribute *attr, char *buf)
293
{
294
struct Scsi_Host *shost = class_to_shost(dev);
295
struct ata_port *ap = ata_shost_to_port(shost);
296
void __iomem *port_mmio = ahci_port_base(ap);
297
ssize_t ret;
298
299
ahci_rpm_get_port(ap);
300
ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
301
ahci_rpm_put_port(ap);
302
303
return ret;
304
}
305
306
static ssize_t ahci_read_em_buffer(struct device *dev,
307
struct device_attribute *attr, char *buf)
308
{
309
struct Scsi_Host *shost = class_to_shost(dev);
310
struct ata_port *ap = ata_shost_to_port(shost);
311
struct ahci_host_priv *hpriv = ap->host->private_data;
312
void __iomem *mmio = hpriv->mmio;
313
void __iomem *em_mmio = mmio + hpriv->em_loc;
314
u32 em_ctl, msg;
315
unsigned long flags;
316
size_t count;
317
int i;
318
319
ahci_rpm_get_port(ap);
320
spin_lock_irqsave(ap->lock, flags);
321
322
em_ctl = readl(mmio + HOST_EM_CTL);
323
if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
324
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
325
spin_unlock_irqrestore(ap->lock, flags);
326
ahci_rpm_put_port(ap);
327
return -EINVAL;
328
}
329
330
if (!(em_ctl & EM_CTL_MR)) {
331
spin_unlock_irqrestore(ap->lock, flags);
332
ahci_rpm_put_port(ap);
333
return -EAGAIN;
334
}
335
336
if (!(em_ctl & EM_CTL_SMB))
337
em_mmio += hpriv->em_buf_sz;
338
339
count = hpriv->em_buf_sz;
340
341
/* the count should not be larger than PAGE_SIZE */
342
if (count > PAGE_SIZE) {
343
if (printk_ratelimit())
344
ata_port_warn(ap,
345
"EM read buffer size too large: "
346
"buffer size %u, page size %lu\n",
347
hpriv->em_buf_sz, PAGE_SIZE);
348
count = PAGE_SIZE;
349
}
350
351
for (i = 0; i < count; i += 4) {
352
msg = readl(em_mmio + i);
353
buf[i] = msg & 0xff;
354
buf[i + 1] = (msg >> 8) & 0xff;
355
buf[i + 2] = (msg >> 16) & 0xff;
356
buf[i + 3] = (msg >> 24) & 0xff;
357
}
358
359
spin_unlock_irqrestore(ap->lock, flags);
360
ahci_rpm_put_port(ap);
361
362
return i;
363
}
364
365
static ssize_t ahci_store_em_buffer(struct device *dev,
366
struct device_attribute *attr,
367
const char *buf, size_t size)
368
{
369
struct Scsi_Host *shost = class_to_shost(dev);
370
struct ata_port *ap = ata_shost_to_port(shost);
371
struct ahci_host_priv *hpriv = ap->host->private_data;
372
void __iomem *mmio = hpriv->mmio;
373
void __iomem *em_mmio = mmio + hpriv->em_loc;
374
const unsigned char *msg_buf = buf;
375
u32 em_ctl, msg;
376
unsigned long flags;
377
int i;
378
379
/* check size validity */
380
if (!(ap->flags & ATA_FLAG_EM) ||
381
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
382
size % 4 || size > hpriv->em_buf_sz)
383
return -EINVAL;
384
385
ahci_rpm_get_port(ap);
386
spin_lock_irqsave(ap->lock, flags);
387
388
em_ctl = readl(mmio + HOST_EM_CTL);
389
if (em_ctl & EM_CTL_TM) {
390
spin_unlock_irqrestore(ap->lock, flags);
391
ahci_rpm_put_port(ap);
392
return -EBUSY;
393
}
394
395
for (i = 0; i < size; i += 4) {
396
msg = msg_buf[i] | msg_buf[i + 1] << 8 |
397
msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
398
writel(msg, em_mmio + i);
399
}
400
401
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
402
403
spin_unlock_irqrestore(ap->lock, flags);
404
ahci_rpm_put_port(ap);
405
406
return size;
407
}
408
409
static ssize_t ahci_show_em_supported(struct device *dev,
410
struct device_attribute *attr, char *buf)
411
{
412
struct Scsi_Host *shost = class_to_shost(dev);
413
struct ata_port *ap = ata_shost_to_port(shost);
414
struct ahci_host_priv *hpriv = ap->host->private_data;
415
void __iomem *mmio = hpriv->mmio;
416
u32 em_ctl;
417
418
ahci_rpm_get_port(ap);
419
em_ctl = readl(mmio + HOST_EM_CTL);
420
ahci_rpm_put_port(ap);
421
422
return sprintf(buf, "%s%s%s%s\n",
423
em_ctl & EM_CTL_LED ? "led " : "",
424
em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
425
em_ctl & EM_CTL_SES ? "ses-2 " : "",
426
em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
427
}
428
429
/**
430
* ahci_save_initial_config - Save and fixup initial config values
431
* @dev: target AHCI device
432
* @hpriv: host private area to store config values
433
*
434
* Some registers containing configuration info might be setup by
435
* BIOS and might be cleared on reset. This function saves the
436
* initial values of those registers into @hpriv such that they
437
* can be restored after controller reset.
438
*
439
* If inconsistent, config values are fixed up by this function.
440
*
441
* If it is not set already this function sets hpriv->start_engine to
442
* ahci_start_engine.
443
*
444
* LOCKING:
445
* None.
446
*/
447
void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
448
{
449
void __iomem *mmio = hpriv->mmio;
450
void __iomem *port_mmio;
451
unsigned long port_map;
452
u32 cap, cap2, vers;
453
int i;
454
455
/* make sure AHCI mode is enabled before accessing CAP */
456
ahci_enable_ahci(mmio);
457
458
/*
459
* Values prefixed with saved_ are written back to the HBA and ports
460
* registers after reset. Values without are used for driver operation.
461
*/
462
463
/*
464
* Override HW-init HBA capability fields with the platform-specific
465
* values. The rest of the HBA capabilities are defined as Read-only
466
* and can't be modified in CSR anyway.
467
*/
468
cap = readl(mmio + HOST_CAP);
469
if (hpriv->saved_cap)
470
cap = (cap & ~(HOST_CAP_SSS | HOST_CAP_MPS)) | hpriv->saved_cap;
471
hpriv->saved_cap = cap;
472
473
/* CAP2 register is only defined for AHCI 1.2 and later */
474
vers = readl(mmio + HOST_VERSION);
475
if ((vers >> 16) > 1 ||
476
((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
477
hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
478
else
479
hpriv->saved_cap2 = cap2 = 0;
480
481
/* some chips have errata preventing 64bit use */
482
if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
483
dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
484
cap &= ~HOST_CAP_64;
485
}
486
487
if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
488
dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
489
cap &= ~HOST_CAP_NCQ;
490
}
491
492
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
493
dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
494
cap |= HOST_CAP_NCQ;
495
}
496
497
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
498
dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
499
cap &= ~HOST_CAP_PMP;
500
}
501
502
if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
503
dev_info(dev,
504
"controller can't do SNTF, turning off CAP_SNTF\n");
505
cap &= ~HOST_CAP_SNTF;
506
}
507
508
if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
509
dev_info(dev,
510
"controller can't do DEVSLP, turning off\n");
511
cap2 &= ~HOST_CAP2_SDS;
512
cap2 &= ~HOST_CAP2_SADM;
513
}
514
515
if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
516
dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
517
cap |= HOST_CAP_FBS;
518
}
519
520
if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
521
dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
522
cap &= ~HOST_CAP_FBS;
523
}
524
525
if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
526
dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
527
cap |= HOST_CAP_ALPM;
528
}
529
530
if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
531
dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
532
cap &= ~HOST_CAP_SXS;
533
}
534
535
/* Override the HBA ports mapping if the platform needs it */
536
port_map = readl(mmio + HOST_PORTS_IMPL);
537
if (hpriv->saved_port_map && port_map != hpriv->saved_port_map) {
538
dev_info(dev, "forcing port_map 0x%lx -> 0x%x\n",
539
port_map, hpriv->saved_port_map);
540
port_map = hpriv->saved_port_map;
541
} else {
542
hpriv->saved_port_map = port_map;
543
}
544
545
/* mask_port_map not set means that all ports are available */
546
if (hpriv->mask_port_map) {
547
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
548
port_map,
549
port_map & hpriv->mask_port_map);
550
port_map &= hpriv->mask_port_map;
551
}
552
553
/* cross check port_map and cap.n_ports */
554
if (port_map) {
555
int map_ports = 0;
556
557
for (i = 0; i < AHCI_MAX_PORTS; i++)
558
if (port_map & (1 << i))
559
map_ports++;
560
561
/* If PI has more ports than n_ports, whine, clear
562
* port_map and let it be generated from n_ports.
563
*/
564
if (map_ports > ahci_nr_ports(cap)) {
565
dev_warn(dev,
566
"implemented port map (0x%lx) contains more ports than nr_ports (%u), using nr_ports\n",
567
port_map, ahci_nr_ports(cap));
568
port_map = 0;
569
}
570
}
571
572
/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
573
if (!port_map && vers < 0x10300) {
574
port_map = (1 << ahci_nr_ports(cap)) - 1;
575
dev_warn(dev, "forcing PORTS_IMPL to 0x%lx\n", port_map);
576
577
/* write the fixed up value to the PI register */
578
hpriv->saved_port_map = port_map;
579
}
580
581
/*
582
* Preserve the ports capabilities defined by the platform. Note there
583
* is no need in storing the rest of the P#.CMD fields since they are
584
* volatile.
585
*/
586
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
587
if (hpriv->saved_port_cap[i])
588
continue;
589
590
port_mmio = __ahci_port_base(hpriv, i);
591
hpriv->saved_port_cap[i] =
592
readl(port_mmio + PORT_CMD) & PORT_CMD_CAP;
593
}
594
595
/* record values to use during operation */
596
hpriv->cap = cap;
597
hpriv->cap2 = cap2;
598
hpriv->version = vers;
599
hpriv->port_map = port_map;
600
601
if (!hpriv->start_engine)
602
hpriv->start_engine = ahci_start_engine;
603
604
if (!hpriv->stop_engine)
605
hpriv->stop_engine = ahci_stop_engine;
606
607
if (!hpriv->irq_handler)
608
hpriv->irq_handler = ahci_single_level_irq_intr;
609
}
610
EXPORT_SYMBOL_GPL(ahci_save_initial_config);
611
612
/**
613
* ahci_restore_initial_config - Restore initial config
614
* @host: target ATA host
615
*
616
* Restore initial config stored by ahci_save_initial_config().
617
*
618
* LOCKING:
619
* None.
620
*/
621
static void ahci_restore_initial_config(struct ata_host *host)
622
{
623
struct ahci_host_priv *hpriv = host->private_data;
624
unsigned long port_map = hpriv->port_map;
625
void __iomem *mmio = hpriv->mmio;
626
void __iomem *port_mmio;
627
int i;
628
629
writel(hpriv->saved_cap, mmio + HOST_CAP);
630
if (hpriv->saved_cap2)
631
writel(hpriv->saved_cap2, mmio + HOST_CAP2);
632
writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
633
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
634
635
for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
636
port_mmio = __ahci_port_base(hpriv, i);
637
writel(hpriv->saved_port_cap[i], port_mmio + PORT_CMD);
638
}
639
}
640
641
static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
642
{
643
static const int offset[] = {
644
[SCR_STATUS] = PORT_SCR_STAT,
645
[SCR_CONTROL] = PORT_SCR_CTL,
646
[SCR_ERROR] = PORT_SCR_ERR,
647
[SCR_ACTIVE] = PORT_SCR_ACT,
648
[SCR_NOTIFICATION] = PORT_SCR_NTF,
649
};
650
struct ahci_host_priv *hpriv = ap->host->private_data;
651
652
if (sc_reg < ARRAY_SIZE(offset) &&
653
(sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
654
return offset[sc_reg];
655
return 0;
656
}
657
658
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
659
{
660
void __iomem *port_mmio = ahci_port_base(link->ap);
661
int offset = ahci_scr_offset(link->ap, sc_reg);
662
663
if (offset) {
664
*val = readl(port_mmio + offset);
665
return 0;
666
}
667
return -EINVAL;
668
}
669
670
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
671
{
672
void __iomem *port_mmio = ahci_port_base(link->ap);
673
int offset = ahci_scr_offset(link->ap, sc_reg);
674
675
if (offset) {
676
writel(val, port_mmio + offset);
677
return 0;
678
}
679
return -EINVAL;
680
}
681
682
void ahci_start_engine(struct ata_port *ap)
683
{
684
void __iomem *port_mmio = ahci_port_base(ap);
685
u32 tmp;
686
687
/* start DMA */
688
tmp = readl(port_mmio + PORT_CMD);
689
tmp |= PORT_CMD_START;
690
writel(tmp, port_mmio + PORT_CMD);
691
readl(port_mmio + PORT_CMD); /* flush */
692
}
693
EXPORT_SYMBOL_GPL(ahci_start_engine);
694
695
int ahci_stop_engine(struct ata_port *ap)
696
{
697
void __iomem *port_mmio = ahci_port_base(ap);
698
struct ahci_host_priv *hpriv = ap->host->private_data;
699
u32 tmp;
700
701
/*
702
* On some controllers, stopping a port's DMA engine while the port
703
* is in ALPM state (partial or slumber) results in failures on
704
* subsequent DMA engine starts. For those controllers, put the
705
* port back in active state before stopping its DMA engine.
706
*/
707
if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
708
(ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
709
ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
710
dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
711
return -EIO;
712
}
713
714
tmp = readl(port_mmio + PORT_CMD);
715
716
/* check if the HBA is idle */
717
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
718
return 0;
719
720
/*
721
* Don't try to issue commands but return with ENODEV if the
722
* AHCI controller not available anymore (e.g. due to PCIe hot
723
* unplugging). Otherwise a 500ms delay for each port is added.
724
*/
725
if (tmp == 0xffffffff) {
726
dev_err(ap->host->dev, "AHCI controller unavailable!\n");
727
return -ENODEV;
728
}
729
730
/* setting HBA to idle */
731
tmp &= ~PORT_CMD_START;
732
writel(tmp, port_mmio + PORT_CMD);
733
734
/* wait for engine to stop. This could be as long as 500 msec */
735
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
736
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
737
if (tmp & PORT_CMD_LIST_ON)
738
return -EIO;
739
740
return 0;
741
}
742
EXPORT_SYMBOL_GPL(ahci_stop_engine);
743
744
void ahci_start_fis_rx(struct ata_port *ap)
745
{
746
void __iomem *port_mmio = ahci_port_base(ap);
747
struct ahci_host_priv *hpriv = ap->host->private_data;
748
struct ahci_port_priv *pp = ap->private_data;
749
u32 tmp;
750
751
/* set FIS registers */
752
if (hpriv->cap & HOST_CAP_64)
753
writel((pp->cmd_slot_dma >> 16) >> 16,
754
port_mmio + PORT_LST_ADDR_HI);
755
writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
756
757
if (hpriv->cap & HOST_CAP_64)
758
writel((pp->rx_fis_dma >> 16) >> 16,
759
port_mmio + PORT_FIS_ADDR_HI);
760
writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
761
762
/* enable FIS reception */
763
tmp = readl(port_mmio + PORT_CMD);
764
tmp |= PORT_CMD_FIS_RX;
765
writel(tmp, port_mmio + PORT_CMD);
766
767
/* flush */
768
readl(port_mmio + PORT_CMD);
769
}
770
EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
771
772
static int ahci_stop_fis_rx(struct ata_port *ap)
773
{
774
void __iomem *port_mmio = ahci_port_base(ap);
775
u32 tmp;
776
777
/* disable FIS reception */
778
tmp = readl(port_mmio + PORT_CMD);
779
tmp &= ~PORT_CMD_FIS_RX;
780
writel(tmp, port_mmio + PORT_CMD);
781
782
/* wait for completion, spec says 500ms, give it 1000 */
783
tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
784
PORT_CMD_FIS_ON, 10, 1000);
785
if (tmp & PORT_CMD_FIS_ON)
786
return -EBUSY;
787
788
return 0;
789
}
790
791
static void ahci_power_up(struct ata_port *ap)
792
{
793
struct ahci_host_priv *hpriv = ap->host->private_data;
794
void __iomem *port_mmio = ahci_port_base(ap);
795
u32 cmd;
796
797
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
798
799
/* spin up device */
800
if (hpriv->cap & HOST_CAP_SSS) {
801
cmd |= PORT_CMD_SPIN_UP;
802
writel(cmd, port_mmio + PORT_CMD);
803
}
804
805
/* wake up link */
806
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
807
}
808
809
static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
810
unsigned int hints)
811
{
812
struct ata_port *ap = link->ap;
813
struct ahci_host_priv *hpriv = ap->host->private_data;
814
struct ahci_port_priv *pp = ap->private_data;
815
void __iomem *port_mmio = ahci_port_base(ap);
816
817
if (policy != ATA_LPM_MAX_POWER) {
818
/* wakeup flag only applies to the max power policy */
819
hints &= ~ATA_LPM_WAKE_ONLY;
820
821
/*
822
* Disable interrupts on Phy Ready. This keeps us from
823
* getting woken up due to spurious phy ready
824
* interrupts.
825
*/
826
pp->intr_mask &= ~PORT_IRQ_PHYRDY;
827
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
828
829
sata_link_scr_lpm(link, policy, false);
830
}
831
832
if (hpriv->cap & HOST_CAP_ALPM) {
833
u32 cmd = readl(port_mmio + PORT_CMD);
834
835
if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
836
if (!(hints & ATA_LPM_WAKE_ONLY))
837
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
838
cmd |= PORT_CMD_ICC_ACTIVE;
839
840
writel(cmd, port_mmio + PORT_CMD);
841
readl(port_mmio + PORT_CMD);
842
843
/* wait 10ms to be sure we've come out of LPM state */
844
ata_msleep(ap, 10);
845
846
if (hints & ATA_LPM_WAKE_ONLY)
847
return 0;
848
} else {
849
cmd |= PORT_CMD_ALPE;
850
if (policy == ATA_LPM_MIN_POWER)
851
cmd |= PORT_CMD_ASP;
852
else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
853
cmd &= ~PORT_CMD_ASP;
854
855
/* write out new cmd value */
856
writel(cmd, port_mmio + PORT_CMD);
857
}
858
}
859
860
/* set aggressive device sleep */
861
if ((hpriv->cap2 & HOST_CAP2_SDS) &&
862
(hpriv->cap2 & HOST_CAP2_SADM) &&
863
(link->device->flags & ATA_DFLAG_DEVSLP)) {
864
if (policy == ATA_LPM_MIN_POWER ||
865
policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
866
ahci_set_aggressive_devslp(ap, true);
867
else
868
ahci_set_aggressive_devslp(ap, false);
869
}
870
871
if (policy == ATA_LPM_MAX_POWER) {
872
sata_link_scr_lpm(link, policy, false);
873
874
/* turn PHYRDY IRQ back on */
875
pp->intr_mask |= PORT_IRQ_PHYRDY;
876
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
877
}
878
879
return 0;
880
}
881
882
#ifdef CONFIG_PM
883
static void ahci_power_down(struct ata_port *ap)
884
{
885
struct ahci_host_priv *hpriv = ap->host->private_data;
886
void __iomem *port_mmio = ahci_port_base(ap);
887
u32 cmd, scontrol;
888
889
if (!(hpriv->cap & HOST_CAP_SSS))
890
return;
891
892
/* put device into listen mode, first set PxSCTL.DET to 0 */
893
scontrol = readl(port_mmio + PORT_SCR_CTL);
894
scontrol &= ~0xf;
895
writel(scontrol, port_mmio + PORT_SCR_CTL);
896
897
/* then set PxCMD.SUD to 0 */
898
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
899
cmd &= ~PORT_CMD_SPIN_UP;
900
writel(cmd, port_mmio + PORT_CMD);
901
}
902
#endif
903
904
static void ahci_start_port(struct ata_port *ap)
905
{
906
struct ahci_host_priv *hpriv = ap->host->private_data;
907
struct ahci_port_priv *pp = ap->private_data;
908
struct ata_link *link;
909
struct ahci_em_priv *emp;
910
ssize_t rc;
911
int i;
912
913
/* enable FIS reception */
914
ahci_start_fis_rx(ap);
915
916
/* enable DMA */
917
if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
918
hpriv->start_engine(ap);
919
920
/* turn on LEDs */
921
if (ap->flags & ATA_FLAG_EM) {
922
ata_for_each_link(link, ap, EDGE) {
923
emp = &pp->em_priv[link->pmp];
924
925
/* EM Transmit bit maybe busy during init */
926
for (i = 0; i < EM_MAX_RETRY; i++) {
927
rc = ap->ops->transmit_led_message(ap,
928
emp->led_state,
929
4);
930
/*
931
* If busy, give a breather but do not
932
* release EH ownership by using msleep()
933
* instead of ata_msleep(). EM Transmit
934
* bit is busy for the whole host and
935
* releasing ownership will cause other
936
* ports to fail the same way.
937
*/
938
if (rc == -EBUSY)
939
msleep(1);
940
else
941
break;
942
}
943
}
944
}
945
946
if (ap->flags & ATA_FLAG_SW_ACTIVITY)
947
ata_for_each_link(link, ap, EDGE)
948
ahci_init_sw_activity(link);
949
950
}
951
952
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
953
{
954
int rc;
955
struct ahci_host_priv *hpriv = ap->host->private_data;
956
957
/* disable DMA */
958
rc = hpriv->stop_engine(ap);
959
if (rc) {
960
*emsg = "failed to stop engine";
961
return rc;
962
}
963
964
/* disable FIS reception */
965
rc = ahci_stop_fis_rx(ap);
966
if (rc) {
967
*emsg = "failed stop FIS RX";
968
return rc;
969
}
970
971
return 0;
972
}
973
974
int ahci_reset_controller(struct ata_host *host)
975
{
976
struct ahci_host_priv *hpriv = host->private_data;
977
void __iomem *mmio = hpriv->mmio;
978
u32 tmp;
979
980
/*
981
* We must be in AHCI mode, before using anything AHCI-specific, such
982
* as HOST_RESET.
983
*/
984
ahci_enable_ahci(mmio);
985
986
/* Global controller reset */
987
if (ahci_skip_host_reset) {
988
dev_info(host->dev, "Skipping global host reset\n");
989
return 0;
990
}
991
992
tmp = readl(mmio + HOST_CTL);
993
if (!(tmp & HOST_RESET)) {
994
writel(tmp | HOST_RESET, mmio + HOST_CTL);
995
readl(mmio + HOST_CTL); /* flush */
996
}
997
998
/*
999
* To perform host reset, OS should set HOST_RESET and poll until this
1000
* bit is read to be "0". Reset must complete within 1 second, or the
1001
* hardware should be considered fried.
1002
*/
1003
tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
1004
HOST_RESET, 10, 1000);
1005
if (tmp & HOST_RESET) {
1006
dev_err(host->dev, "Controller reset failed (0x%x)\n",
1007
tmp);
1008
return -EIO;
1009
}
1010
1011
/* Turn on AHCI mode */
1012
ahci_enable_ahci(mmio);
1013
1014
/* Some registers might be cleared on reset. Restore initial values. */
1015
if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
1016
ahci_restore_initial_config(host);
1017
1018
return 0;
1019
}
1020
EXPORT_SYMBOL_GPL(ahci_reset_controller);
1021
1022
static void ahci_sw_activity(struct ata_link *link)
1023
{
1024
struct ata_port *ap = link->ap;
1025
struct ahci_port_priv *pp = ap->private_data;
1026
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1027
1028
if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1029
return;
1030
1031
emp->activity++;
1032
if (!timer_pending(&emp->timer))
1033
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1034
}
1035
1036
static void ahci_sw_activity_blink(struct timer_list *t)
1037
{
1038
struct ahci_em_priv *emp = timer_container_of(emp, t, timer);
1039
struct ata_link *link = emp->link;
1040
struct ata_port *ap = link->ap;
1041
1042
unsigned long led_message = emp->led_state;
1043
u32 activity_led_state;
1044
unsigned long flags;
1045
1046
led_message &= EM_MSG_LED_VALUE;
1047
led_message |= ap->port_no | (link->pmp << 8);
1048
1049
/* check to see if we've had activity. If so,
1050
* toggle state of LED and reset timer. If not,
1051
* turn LED to desired idle state.
1052
*/
1053
spin_lock_irqsave(ap->lock, flags);
1054
if (emp->saved_activity != emp->activity) {
1055
emp->saved_activity = emp->activity;
1056
/* get the current LED state */
1057
activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1058
1059
if (activity_led_state)
1060
activity_led_state = 0;
1061
else
1062
activity_led_state = 1;
1063
1064
/* clear old state */
1065
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1066
1067
/* toggle state */
1068
led_message |= (activity_led_state << 16);
1069
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1070
} else {
1071
/* switch to idle */
1072
led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1073
if (emp->blink_policy == BLINK_OFF)
1074
led_message |= (1 << 16);
1075
}
1076
spin_unlock_irqrestore(ap->lock, flags);
1077
ap->ops->transmit_led_message(ap, led_message, 4);
1078
}
1079
1080
static void ahci_init_sw_activity(struct ata_link *link)
1081
{
1082
struct ata_port *ap = link->ap;
1083
struct ahci_port_priv *pp = ap->private_data;
1084
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1085
1086
/* init activity stats, setup timer */
1087
emp->saved_activity = emp->activity = 0;
1088
emp->link = link;
1089
timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
1090
1091
/* check our blink policy and set flag for link if it's enabled */
1092
if (emp->blink_policy)
1093
link->flags |= ATA_LFLAG_SW_ACTIVITY;
1094
}
1095
1096
int ahci_reset_em(struct ata_host *host)
1097
{
1098
struct ahci_host_priv *hpriv = host->private_data;
1099
void __iomem *mmio = hpriv->mmio;
1100
u32 em_ctl;
1101
1102
em_ctl = readl(mmio + HOST_EM_CTL);
1103
if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1104
return -EINVAL;
1105
1106
writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1107
return 0;
1108
}
1109
EXPORT_SYMBOL_GPL(ahci_reset_em);
1110
1111
static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1112
ssize_t size)
1113
{
1114
struct ahci_host_priv *hpriv = ap->host->private_data;
1115
struct ahci_port_priv *pp = ap->private_data;
1116
void __iomem *mmio = hpriv->mmio;
1117
u32 em_ctl;
1118
u32 message[] = {0, 0};
1119
unsigned long flags;
1120
int pmp;
1121
struct ahci_em_priv *emp;
1122
1123
/* get the slot number from the message */
1124
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1125
if (pmp < EM_MAX_SLOTS)
1126
emp = &pp->em_priv[pmp];
1127
else
1128
return -EINVAL;
1129
1130
ahci_rpm_get_port(ap);
1131
spin_lock_irqsave(ap->lock, flags);
1132
1133
/*
1134
* if we are still busy transmitting a previous message,
1135
* do not allow
1136
*/
1137
em_ctl = readl(mmio + HOST_EM_CTL);
1138
if (em_ctl & EM_CTL_TM) {
1139
spin_unlock_irqrestore(ap->lock, flags);
1140
ahci_rpm_put_port(ap);
1141
return -EBUSY;
1142
}
1143
1144
if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1145
/*
1146
* create message header - this is all zero except for
1147
* the message size, which is 4 bytes.
1148
*/
1149
message[0] |= (4 << 8);
1150
1151
/* ignore 0:4 of byte zero, fill in port info yourself */
1152
message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1153
1154
/* write message to EM_LOC */
1155
writel(message[0], mmio + hpriv->em_loc);
1156
writel(message[1], mmio + hpriv->em_loc+4);
1157
1158
/*
1159
* tell hardware to transmit the message
1160
*/
1161
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1162
}
1163
1164
/* save off new led state for port/slot */
1165
emp->led_state = state;
1166
1167
spin_unlock_irqrestore(ap->lock, flags);
1168
ahci_rpm_put_port(ap);
1169
1170
return size;
1171
}
1172
1173
static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1174
{
1175
struct ahci_port_priv *pp = ap->private_data;
1176
struct ata_link *link;
1177
struct ahci_em_priv *emp;
1178
int rc = 0;
1179
1180
ata_for_each_link(link, ap, EDGE) {
1181
emp = &pp->em_priv[link->pmp];
1182
rc += sprintf(buf, "%lx\n", emp->led_state);
1183
}
1184
return rc;
1185
}
1186
1187
static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1188
size_t size)
1189
{
1190
unsigned int state;
1191
int pmp;
1192
struct ahci_port_priv *pp = ap->private_data;
1193
struct ahci_em_priv *emp;
1194
1195
if (kstrtouint(buf, 0, &state) < 0)
1196
return -EINVAL;
1197
1198
/* get the slot number from the message */
1199
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1200
if (pmp < EM_MAX_SLOTS) {
1201
pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
1202
emp = &pp->em_priv[pmp];
1203
} else {
1204
return -EINVAL;
1205
}
1206
1207
/* mask off the activity bits if we are in sw_activity
1208
* mode, user should turn off sw_activity before setting
1209
* activity led through em_message
1210
*/
1211
if (emp->blink_policy)
1212
state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1213
1214
return ap->ops->transmit_led_message(ap, state, size);
1215
}
1216
1217
static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1218
{
1219
struct ata_link *link = dev->link;
1220
struct ata_port *ap = link->ap;
1221
struct ahci_port_priv *pp = ap->private_data;
1222
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1223
u32 port_led_state = emp->led_state;
1224
1225
/* save the desired Activity LED behavior */
1226
if (val == OFF) {
1227
/* clear LFLAG */
1228
link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1229
1230
/* set the LED to OFF */
1231
port_led_state &= EM_MSG_LED_VALUE_OFF;
1232
port_led_state |= (ap->port_no | (link->pmp << 8));
1233
ap->ops->transmit_led_message(ap, port_led_state, 4);
1234
} else {
1235
link->flags |= ATA_LFLAG_SW_ACTIVITY;
1236
if (val == BLINK_OFF) {
1237
/* set LED to ON for idle */
1238
port_led_state &= EM_MSG_LED_VALUE_OFF;
1239
port_led_state |= (ap->port_no | (link->pmp << 8));
1240
port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1241
ap->ops->transmit_led_message(ap, port_led_state, 4);
1242
}
1243
}
1244
emp->blink_policy = val;
1245
return 0;
1246
}
1247
1248
static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1249
{
1250
struct ata_link *link = dev->link;
1251
struct ata_port *ap = link->ap;
1252
struct ahci_port_priv *pp = ap->private_data;
1253
struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1254
1255
/* display the saved value of activity behavior for this
1256
* disk.
1257
*/
1258
return sprintf(buf, "%d\n", emp->blink_policy);
1259
}
1260
1261
static void ahci_port_clear_pending_irq(struct ata_port *ap)
1262
{
1263
struct ahci_host_priv *hpriv = ap->host->private_data;
1264
void __iomem *port_mmio = ahci_port_base(ap);
1265
u32 tmp;
1266
1267
/* clear SError */
1268
tmp = readl(port_mmio + PORT_SCR_ERR);
1269
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
1270
writel(tmp, port_mmio + PORT_SCR_ERR);
1271
1272
/* clear port IRQ */
1273
tmp = readl(port_mmio + PORT_IRQ_STAT);
1274
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
1275
if (tmp)
1276
writel(tmp, port_mmio + PORT_IRQ_STAT);
1277
1278
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
1279
}
1280
1281
static void ahci_port_init(struct device *dev, struct ata_port *ap,
1282
int port_no, void __iomem *mmio,
1283
void __iomem *port_mmio)
1284
{
1285
const char *emsg = NULL;
1286
int rc;
1287
1288
/* make sure port is not active */
1289
rc = ahci_deinit_port(ap, &emsg);
1290
if (rc)
1291
dev_warn(dev, "%s (%d)\n", emsg, rc);
1292
1293
ahci_port_clear_pending_irq(ap);
1294
}
1295
1296
void ahci_init_controller(struct ata_host *host)
1297
{
1298
struct ahci_host_priv *hpriv = host->private_data;
1299
void __iomem *mmio = hpriv->mmio;
1300
int i;
1301
void __iomem *port_mmio;
1302
u32 tmp;
1303
1304
for (i = 0; i < host->n_ports; i++) {
1305
struct ata_port *ap = host->ports[i];
1306
1307
port_mmio = ahci_port_base(ap);
1308
if (ata_port_is_dummy(ap))
1309
continue;
1310
1311
ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1312
}
1313
1314
tmp = readl(mmio + HOST_CTL);
1315
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
1316
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1317
tmp = readl(mmio + HOST_CTL);
1318
dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
1319
}
1320
EXPORT_SYMBOL_GPL(ahci_init_controller);
1321
1322
static void ahci_dev_config(struct ata_device *dev)
1323
{
1324
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1325
1326
if ((dev->class == ATA_DEV_ATAPI) &&
1327
(hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
1328
dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
1329
1330
if (hpriv->flags & AHCI_HFLAG_SECT255) {
1331
dev->max_sectors = 255;
1332
ata_dev_info(dev,
1333
"SB600 AHCI: limiting to 255 sectors per cmd\n");
1334
}
1335
}
1336
1337
unsigned int ahci_dev_classify(struct ata_port *ap)
1338
{
1339
void __iomem *port_mmio = ahci_port_base(ap);
1340
struct ata_taskfile tf;
1341
u32 tmp;
1342
1343
tmp = readl(port_mmio + PORT_SIG);
1344
tf.lbah = (tmp >> 24) & 0xff;
1345
tf.lbam = (tmp >> 16) & 0xff;
1346
tf.lbal = (tmp >> 8) & 0xff;
1347
tf.nsect = (tmp) & 0xff;
1348
1349
return ata_port_classify(ap, &tf);
1350
}
1351
EXPORT_SYMBOL_GPL(ahci_dev_classify);
1352
1353
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1354
u32 opts)
1355
{
1356
dma_addr_t cmd_tbl_dma;
1357
1358
cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1359
1360
pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1361
pp->cmd_slot[tag].status = 0;
1362
pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1363
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1364
}
1365
EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
1366
1367
int ahci_kick_engine(struct ata_port *ap)
1368
{
1369
void __iomem *port_mmio = ahci_port_base(ap);
1370
struct ahci_host_priv *hpriv = ap->host->private_data;
1371
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1372
u32 tmp;
1373
int busy, rc;
1374
1375
/* stop engine */
1376
rc = hpriv->stop_engine(ap);
1377
if (rc)
1378
goto out_restart;
1379
1380
/* need to do CLO?
1381
* always do CLO if PMP is attached (AHCI-1.3 9.2)
1382
*/
1383
busy = status & (ATA_BUSY | ATA_DRQ);
1384
if (!busy && !sata_pmp_attached(ap)) {
1385
rc = 0;
1386
goto out_restart;
1387
}
1388
1389
if (!(hpriv->cap & HOST_CAP_CLO)) {
1390
rc = -EOPNOTSUPP;
1391
goto out_restart;
1392
}
1393
1394
/* perform CLO */
1395
tmp = readl(port_mmio + PORT_CMD);
1396
tmp |= PORT_CMD_CLO;
1397
writel(tmp, port_mmio + PORT_CMD);
1398
1399
rc = 0;
1400
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
1401
PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1402
if (tmp & PORT_CMD_CLO)
1403
rc = -EIO;
1404
1405
/* restart engine */
1406
out_restart:
1407
hpriv->start_engine(ap);
1408
return rc;
1409
}
1410
EXPORT_SYMBOL_GPL(ahci_kick_engine);
1411
1412
static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1413
struct ata_taskfile *tf, int is_cmd, u16 flags,
1414
unsigned int timeout_msec)
1415
{
1416
const u32 cmd_fis_len = 5; /* five dwords */
1417
struct ahci_port_priv *pp = ap->private_data;
1418
void __iomem *port_mmio = ahci_port_base(ap);
1419
u8 *fis = pp->cmd_tbl;
1420
u32 tmp;
1421
1422
/* prep the command */
1423
ata_tf_to_fis(tf, pmp, is_cmd, fis);
1424
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1425
1426
/* set port value for softreset of Port Multiplier */
1427
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
1428
tmp = readl(port_mmio + PORT_FBS);
1429
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1430
tmp |= pmp << PORT_FBS_DEV_OFFSET;
1431
writel(tmp, port_mmio + PORT_FBS);
1432
pp->fbs_last_dev = pmp;
1433
}
1434
1435
/* issue & wait */
1436
writel(1, port_mmio + PORT_CMD_ISSUE);
1437
1438
if (timeout_msec) {
1439
tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
1440
0x1, 0x1, 1, timeout_msec);
1441
if (tmp & 0x1) {
1442
ahci_kick_engine(ap);
1443
return -EBUSY;
1444
}
1445
} else
1446
readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1447
1448
return 0;
1449
}
1450
1451
int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1452
int pmp, unsigned long deadline,
1453
int (*check_ready)(struct ata_link *link))
1454
{
1455
struct ata_port *ap = link->ap;
1456
struct ahci_host_priv *hpriv = ap->host->private_data;
1457
struct ahci_port_priv *pp = ap->private_data;
1458
const char *reason = NULL;
1459
unsigned long now;
1460
unsigned int msecs;
1461
struct ata_taskfile tf;
1462
bool fbs_disabled = false;
1463
int rc;
1464
1465
/* prepare for SRST (AHCI-1.1 10.4.1) */
1466
rc = ahci_kick_engine(ap);
1467
if (rc && rc != -EOPNOTSUPP)
1468
ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
1469
1470
/*
1471
* According to AHCI-1.2 9.3.9: if FBS is enable, software shall
1472
* clear PxFBS.EN to '0' prior to issuing software reset to devices
1473
* that is attached to port multiplier.
1474
*/
1475
if (!ata_is_host_link(link) && pp->fbs_enabled) {
1476
ahci_disable_fbs(ap);
1477
fbs_disabled = true;
1478
}
1479
1480
ata_tf_init(link->device, &tf);
1481
1482
/* issue the first H2D Register FIS */
1483
msecs = 0;
1484
now = jiffies;
1485
if (time_after(deadline, now))
1486
msecs = jiffies_to_msecs(deadline - now);
1487
1488
tf.ctl |= ATA_SRST;
1489
if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1490
AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1491
rc = -EIO;
1492
reason = "1st FIS failed";
1493
goto fail;
1494
}
1495
1496
/* spec says at least 5us, but be generous and sleep for 1ms */
1497
ata_msleep(ap, 1);
1498
1499
/* issue the second H2D Register FIS */
1500
tf.ctl &= ~ATA_SRST;
1501
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1502
1503
/* wait for link to become ready */
1504
rc = ata_wait_after_reset(link, deadline, check_ready);
1505
if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1506
/*
1507
* Workaround for cases where link online status can't
1508
* be trusted. Treat device readiness timeout as link
1509
* offline.
1510
*/
1511
ata_link_info(link, "device not ready, treating as offline\n");
1512
*class = ATA_DEV_NONE;
1513
} else if (rc) {
1514
/* link occupied, -ENODEV too is an error */
1515
reason = "device not ready";
1516
goto fail;
1517
} else
1518
*class = ahci_dev_classify(ap);
1519
1520
/* re-enable FBS if disabled before */
1521
if (fbs_disabled)
1522
ahci_enable_fbs(ap);
1523
1524
return 0;
1525
1526
fail:
1527
ata_link_err(link, "softreset failed (%s)\n", reason);
1528
return rc;
1529
}
1530
1531
int ahci_check_ready(struct ata_link *link)
1532
{
1533
void __iomem *port_mmio = ahci_port_base(link->ap);
1534
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1535
1536
return ata_check_ready(status);
1537
}
1538
EXPORT_SYMBOL_GPL(ahci_check_ready);
1539
1540
static int ahci_softreset(struct ata_link *link, unsigned int *class,
1541
unsigned long deadline)
1542
{
1543
int pmp = sata_srst_pmp(link);
1544
1545
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1546
}
1547
EXPORT_SYMBOL_GPL(ahci_do_softreset);
1548
1549
static int ahci_bad_pmp_check_ready(struct ata_link *link)
1550
{
1551
void __iomem *port_mmio = ahci_port_base(link->ap);
1552
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1553
u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1554
1555
/*
1556
* There is no need to check TFDATA if BAD PMP is found due to HW bug,
1557
* which can save timeout delay.
1558
*/
1559
if (irq_status & PORT_IRQ_BAD_PMP)
1560
return -EIO;
1561
1562
return ata_check_ready(status);
1563
}
1564
1565
static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
1566
unsigned long deadline)
1567
{
1568
struct ata_port *ap = link->ap;
1569
void __iomem *port_mmio = ahci_port_base(ap);
1570
int pmp = sata_srst_pmp(link);
1571
int rc;
1572
u32 irq_sts;
1573
1574
rc = ahci_do_softreset(link, class, pmp, deadline,
1575
ahci_bad_pmp_check_ready);
1576
1577
/*
1578
* Soft reset fails with IPMS set when PMP is enabled but
1579
* SATA HDD/ODD is connected to SATA port, do soft reset
1580
* again to port 0.
1581
*/
1582
if (rc == -EIO) {
1583
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1584
if (irq_sts & PORT_IRQ_BAD_PMP) {
1585
ata_link_warn(link,
1586
"applying PMP SRST workaround "
1587
"and retrying\n");
1588
rc = ahci_do_softreset(link, class, 0, deadline,
1589
ahci_check_ready);
1590
}
1591
}
1592
1593
return rc;
1594
}
1595
1596
int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
1597
unsigned long deadline, bool *online)
1598
{
1599
const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
1600
struct ata_port *ap = link->ap;
1601
struct ahci_port_priv *pp = ap->private_data;
1602
struct ahci_host_priv *hpriv = ap->host->private_data;
1603
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1604
struct ata_taskfile tf;
1605
int rc;
1606
1607
hpriv->stop_engine(ap);
1608
1609
/* clear D2H reception area to properly wait for D2H FIS */
1610
ata_tf_init(link->device, &tf);
1611
tf.status = ATA_BUSY;
1612
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1613
1614
ahci_port_clear_pending_irq(ap);
1615
1616
rc = sata_link_hardreset(link, timing, deadline, online,
1617
ahci_check_ready);
1618
1619
hpriv->start_engine(ap);
1620
1621
if (*online)
1622
*class = ahci_dev_classify(ap);
1623
1624
return rc;
1625
}
1626
EXPORT_SYMBOL_GPL(ahci_do_hardreset);
1627
1628
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1629
unsigned long deadline)
1630
{
1631
bool online;
1632
1633
return ahci_do_hardreset(link, class, deadline, &online);
1634
}
1635
1636
static void ahci_postreset(struct ata_link *link, unsigned int *class)
1637
{
1638
struct ata_port *ap = link->ap;
1639
void __iomem *port_mmio = ahci_port_base(ap);
1640
u32 new_tmp, tmp;
1641
1642
ata_std_postreset(link, class);
1643
1644
/* Make sure port's ATAPI bit is set appropriately */
1645
new_tmp = tmp = readl(port_mmio + PORT_CMD);
1646
if (*class == ATA_DEV_ATAPI)
1647
new_tmp |= PORT_CMD_ATAPI;
1648
else
1649
new_tmp &= ~PORT_CMD_ATAPI;
1650
if (new_tmp != tmp) {
1651
writel(new_tmp, port_mmio + PORT_CMD);
1652
readl(port_mmio + PORT_CMD); /* flush */
1653
}
1654
}
1655
1656
static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1657
{
1658
struct scatterlist *sg;
1659
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1660
unsigned int si;
1661
1662
/*
1663
* Next, the S/G list.
1664
*/
1665
for_each_sg(qc->sg, sg, qc->n_elem, si) {
1666
dma_addr_t addr = sg_dma_address(sg);
1667
u32 sg_len = sg_dma_len(sg);
1668
1669
ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1670
ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1671
ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1672
}
1673
1674
return si;
1675
}
1676
1677
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1678
{
1679
struct ata_port *ap = qc->ap;
1680
struct ahci_port_priv *pp = ap->private_data;
1681
1682
if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1683
return ata_std_qc_defer(qc);
1684
else
1685
return sata_pmp_qc_defer_cmd_switch(qc);
1686
}
1687
1688
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
1689
{
1690
struct ata_port *ap = qc->ap;
1691
struct ahci_port_priv *pp = ap->private_data;
1692
int is_atapi = ata_is_atapi(qc->tf.protocol);
1693
void *cmd_tbl;
1694
u32 opts;
1695
const u32 cmd_fis_len = 5; /* five dwords */
1696
unsigned int n_elem;
1697
1698
/*
1699
* Fill in command table information. First, the header,
1700
* a SATA Register - Host to Device command FIS.
1701
*/
1702
cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
1703
1704
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1705
if (is_atapi) {
1706
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1707
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1708
}
1709
1710
n_elem = 0;
1711
if (qc->flags & ATA_QCFLAG_DMAMAP)
1712
n_elem = ahci_fill_sg(qc, cmd_tbl);
1713
1714
/*
1715
* Fill in command slot information.
1716
*/
1717
opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1718
if (qc->tf.flags & ATA_TFLAG_WRITE)
1719
opts |= AHCI_CMD_WRITE;
1720
if (is_atapi)
1721
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1722
1723
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
1724
1725
return AC_ERR_OK;
1726
}
1727
1728
static void ahci_fbs_dec_intr(struct ata_port *ap)
1729
{
1730
struct ahci_port_priv *pp = ap->private_data;
1731
void __iomem *port_mmio = ahci_port_base(ap);
1732
u32 fbs = readl(port_mmio + PORT_FBS);
1733
int retries = 3;
1734
1735
BUG_ON(!pp->fbs_enabled);
1736
1737
/* time to wait for DEC is not specified by AHCI spec,
1738
* add a retry loop for safety.
1739
*/
1740
writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1741
fbs = readl(port_mmio + PORT_FBS);
1742
while ((fbs & PORT_FBS_DEC) && retries--) {
1743
udelay(1);
1744
fbs = readl(port_mmio + PORT_FBS);
1745
}
1746
1747
if (fbs & PORT_FBS_DEC)
1748
dev_err(ap->host->dev, "failed to clear device error\n");
1749
}
1750
1751
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1752
{
1753
struct ahci_host_priv *hpriv = ap->host->private_data;
1754
struct ahci_port_priv *pp = ap->private_data;
1755
struct ata_eh_info *host_ehi = &ap->link.eh_info;
1756
struct ata_link *link = NULL;
1757
struct ata_queued_cmd *active_qc;
1758
struct ata_eh_info *active_ehi;
1759
bool fbs_need_dec = false;
1760
u32 serror;
1761
1762
/* determine active link with error */
1763
if (pp->fbs_enabled) {
1764
void __iomem *port_mmio = ahci_port_base(ap);
1765
u32 fbs = readl(port_mmio + PORT_FBS);
1766
int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1767
1768
if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
1769
link = &ap->pmp_link[pmp];
1770
fbs_need_dec = true;
1771
}
1772
1773
} else
1774
ata_for_each_link(link, ap, EDGE)
1775
if (ata_link_active(link))
1776
break;
1777
1778
if (!link)
1779
link = &ap->link;
1780
1781
active_qc = ata_qc_from_tag(ap, link->active_tag);
1782
active_ehi = &link->eh_info;
1783
1784
/* record irq stat */
1785
ata_ehi_clear_desc(host_ehi);
1786
ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1787
1788
/* AHCI needs SError cleared; otherwise, it might lock up */
1789
ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1790
ahci_scr_write(&ap->link, SCR_ERROR, serror);
1791
host_ehi->serror |= serror;
1792
1793
/* some controllers set IRQ_IF_ERR on device errors, ignore it */
1794
if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1795
irq_stat &= ~PORT_IRQ_IF_ERR;
1796
1797
if (irq_stat & PORT_IRQ_TF_ERR) {
1798
/* If qc is active, charge it; otherwise, the active
1799
* link. There's no active qc on NCQ errors. It will
1800
* be determined by EH by reading log page 10h.
1801
*/
1802
if (active_qc)
1803
active_qc->err_mask |= AC_ERR_DEV;
1804
else
1805
active_ehi->err_mask |= AC_ERR_DEV;
1806
1807
if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1808
host_ehi->serror &= ~SERR_INTERNAL;
1809
}
1810
1811
if (irq_stat & PORT_IRQ_UNK_FIS) {
1812
u32 *unk = pp->rx_fis + RX_FIS_UNK;
1813
1814
active_ehi->err_mask |= AC_ERR_HSM;
1815
active_ehi->action |= ATA_EH_RESET;
1816
ata_ehi_push_desc(active_ehi,
1817
"unknown FIS %08x %08x %08x %08x" ,
1818
unk[0], unk[1], unk[2], unk[3]);
1819
}
1820
1821
if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1822
active_ehi->err_mask |= AC_ERR_HSM;
1823
active_ehi->action |= ATA_EH_RESET;
1824
ata_ehi_push_desc(active_ehi, "incorrect PMP");
1825
}
1826
1827
if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1828
host_ehi->err_mask |= AC_ERR_HOST_BUS;
1829
host_ehi->action |= ATA_EH_RESET;
1830
ata_ehi_push_desc(host_ehi, "host bus error");
1831
}
1832
1833
if (irq_stat & PORT_IRQ_IF_ERR) {
1834
if (fbs_need_dec)
1835
active_ehi->err_mask |= AC_ERR_DEV;
1836
else {
1837
host_ehi->err_mask |= AC_ERR_ATA_BUS;
1838
host_ehi->action |= ATA_EH_RESET;
1839
}
1840
1841
ata_ehi_push_desc(host_ehi, "interface fatal error");
1842
}
1843
1844
if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1845
ata_ehi_hotplugged(host_ehi);
1846
ata_ehi_push_desc(host_ehi, "%s",
1847
irq_stat & PORT_IRQ_CONNECT ?
1848
"connection status changed" : "PHY RDY changed");
1849
}
1850
1851
/* okay, let's hand over to EH */
1852
1853
if (irq_stat & PORT_IRQ_FREEZE)
1854
ata_port_freeze(ap);
1855
else if (fbs_need_dec) {
1856
ata_link_abort(link);
1857
ahci_fbs_dec_intr(ap);
1858
} else
1859
ata_port_abort(ap);
1860
}
1861
1862
static void ahci_qc_complete(struct ata_port *ap, void __iomem *port_mmio)
1863
{
1864
struct ata_eh_info *ehi = &ap->link.eh_info;
1865
struct ahci_port_priv *pp = ap->private_data;
1866
u32 qc_active = 0;
1867
int rc;
1868
1869
/*
1870
* pp->active_link is not reliable once FBS is enabled, both
1871
* PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1872
* NCQ and non-NCQ commands may be in flight at the same time.
1873
*/
1874
if (pp->fbs_enabled) {
1875
if (ap->qc_active) {
1876
qc_active = readl(port_mmio + PORT_SCR_ACT);
1877
qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1878
}
1879
} else {
1880
/* pp->active_link is valid iff any command is in flight */
1881
if (ap->qc_active && pp->active_link->sactive)
1882
qc_active = readl(port_mmio + PORT_SCR_ACT);
1883
else
1884
qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1885
}
1886
1887
rc = ata_qc_complete_multiple(ap, qc_active);
1888
if (unlikely(rc < 0 && !(ap->pflags & ATA_PFLAG_RESETTING))) {
1889
ehi->err_mask |= AC_ERR_HSM;
1890
ehi->action |= ATA_EH_RESET;
1891
ata_port_freeze(ap);
1892
}
1893
}
1894
1895
static void ahci_handle_port_interrupt(struct ata_port *ap,
1896
void __iomem *port_mmio, u32 status)
1897
{
1898
struct ahci_port_priv *pp = ap->private_data;
1899
struct ahci_host_priv *hpriv = ap->host->private_data;
1900
1901
/* ignore BAD_PMP while resetting */
1902
if (unlikely(ap->pflags & ATA_PFLAG_RESETTING))
1903
status &= ~PORT_IRQ_BAD_PMP;
1904
1905
if (sata_lpm_ignore_phy_events(&ap->link)) {
1906
status &= ~PORT_IRQ_PHYRDY;
1907
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
1908
}
1909
1910
if (unlikely(status & PORT_IRQ_ERROR)) {
1911
/*
1912
* Before getting the error notification, we may have
1913
* received SDB FISes notifying successful completions.
1914
* Handle these first and then handle the error.
1915
*/
1916
ahci_qc_complete(ap, port_mmio);
1917
ahci_error_intr(ap, status);
1918
return;
1919
}
1920
1921
if (status & PORT_IRQ_SDB_FIS) {
1922
/* If SNotification is available, leave notification
1923
* handling to sata_async_notification(). If not,
1924
* emulate it by snooping SDB FIS RX area.
1925
*
1926
* Snooping FIS RX area is probably cheaper than
1927
* poking SNotification but some constrollers which
1928
* implement SNotification, ICH9 for example, don't
1929
* store AN SDB FIS into receive area.
1930
*/
1931
if (hpriv->cap & HOST_CAP_SNTF)
1932
sata_async_notification(ap);
1933
else {
1934
/* If the 'N' bit in word 0 of the FIS is set,
1935
* we just received asynchronous notification.
1936
* Tell libata about it.
1937
*
1938
* Lack of SNotification should not appear in
1939
* ahci 1.2, so the workaround is unnecessary
1940
* when FBS is enabled.
1941
*/
1942
if (pp->fbs_enabled)
1943
WARN_ON_ONCE(1);
1944
else {
1945
const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1946
u32 f0 = le32_to_cpu(f[0]);
1947
if (f0 & (1 << 15))
1948
sata_async_notification(ap);
1949
}
1950
}
1951
}
1952
1953
/* Handle completed commands */
1954
ahci_qc_complete(ap, port_mmio);
1955
}
1956
1957
static void ahci_port_intr(struct ata_port *ap)
1958
{
1959
void __iomem *port_mmio = ahci_port_base(ap);
1960
u32 status;
1961
1962
status = readl(port_mmio + PORT_IRQ_STAT);
1963
writel(status, port_mmio + PORT_IRQ_STAT);
1964
1965
ahci_handle_port_interrupt(ap, port_mmio, status);
1966
}
1967
1968
static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
1969
{
1970
struct ata_port *ap = dev_instance;
1971
void __iomem *port_mmio = ahci_port_base(ap);
1972
u32 status;
1973
1974
status = readl(port_mmio + PORT_IRQ_STAT);
1975
writel(status, port_mmio + PORT_IRQ_STAT);
1976
1977
spin_lock(ap->lock);
1978
ahci_handle_port_interrupt(ap, port_mmio, status);
1979
spin_unlock(ap->lock);
1980
1981
return IRQ_HANDLED;
1982
}
1983
1984
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1985
{
1986
unsigned int i, handled = 0;
1987
1988
for (i = 0; i < host->n_ports; i++) {
1989
struct ata_port *ap;
1990
1991
if (!(irq_masked & (1 << i)))
1992
continue;
1993
1994
ap = host->ports[i];
1995
if (ap) {
1996
ahci_port_intr(ap);
1997
} else {
1998
if (ata_ratelimit())
1999
dev_warn(host->dev,
2000
"interrupt on disabled port %u\n", i);
2001
}
2002
2003
handled = 1;
2004
}
2005
2006
return handled;
2007
}
2008
EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
2009
2010
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
2011
{
2012
struct ata_host *host = dev_instance;
2013
struct ahci_host_priv *hpriv;
2014
unsigned int rc = 0;
2015
void __iomem *mmio;
2016
u32 irq_stat, irq_masked;
2017
2018
hpriv = host->private_data;
2019
mmio = hpriv->mmio;
2020
2021
/* sigh. 0xffffffff is a valid return from h/w */
2022
irq_stat = readl(mmio + HOST_IRQ_STAT);
2023
if (!irq_stat)
2024
return IRQ_NONE;
2025
2026
irq_masked = irq_stat & hpriv->port_map;
2027
2028
spin_lock(&host->lock);
2029
2030
rc = ahci_handle_port_intr(host, irq_masked);
2031
2032
/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2033
* it should be cleared after all the port events are cleared;
2034
* otherwise, it will raise a spurious interrupt after each
2035
* valid one. Please read section 10.6.2 of ahci 1.1 for more
2036
* information.
2037
*
2038
* Also, use the unmasked value to clear interrupt as spurious
2039
* pending event on a dummy port might cause screaming IRQ.
2040
*/
2041
writel(irq_stat, mmio + HOST_IRQ_STAT);
2042
2043
spin_unlock(&host->lock);
2044
2045
return IRQ_RETVAL(rc);
2046
}
2047
2048
unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2049
{
2050
struct ata_port *ap = qc->ap;
2051
void __iomem *port_mmio = ahci_port_base(ap);
2052
struct ahci_port_priv *pp = ap->private_data;
2053
2054
/* Keep track of the currently active link. It will be used
2055
* in completion path to determine whether NCQ phase is in
2056
* progress.
2057
*/
2058
pp->active_link = qc->dev->link;
2059
2060
if (ata_is_ncq(qc->tf.protocol))
2061
writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
2062
2063
if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2064
u32 fbs = readl(port_mmio + PORT_FBS);
2065
fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2066
fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2067
writel(fbs, port_mmio + PORT_FBS);
2068
pp->fbs_last_dev = qc->dev->link->pmp;
2069
}
2070
2071
writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
2072
2073
ahci_sw_activity(qc->dev->link);
2074
2075
return 0;
2076
}
2077
EXPORT_SYMBOL_GPL(ahci_qc_issue);
2078
2079
static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2080
{
2081
struct ahci_port_priv *pp = qc->ap->private_data;
2082
u8 *rx_fis = pp->rx_fis;
2083
2084
if (pp->fbs_enabled)
2085
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2086
2087
/*
2088
* After a successful execution of an ATA PIO data-in command,
2089
* the device doesn't send D2H Reg FIS to update the TF and
2090
* the host should take TF and E_Status from the preceding PIO
2091
* Setup FIS.
2092
*/
2093
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
2094
!(qc->flags & ATA_QCFLAG_EH)) {
2095
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
2096
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
2097
return;
2098
}
2099
2100
/*
2101
* For NCQ commands, we never get a D2H FIS, so reading the D2H Register
2102
* FIS area of the Received FIS Structure (which contains a copy of the
2103
* last D2H FIS received) will contain an outdated status code.
2104
* For NCQ commands, we instead get a SDB FIS, so read the SDB FIS area
2105
* instead. However, the SDB FIS does not contain the LBA, so we can't
2106
* use the ata_tf_from_fis() helper.
2107
*/
2108
if (ata_is_ncq(qc->tf.protocol)) {
2109
const u8 *fis = rx_fis + RX_FIS_SDB;
2110
2111
/*
2112
* Successful NCQ commands have been filled already.
2113
* A failed NCQ command will read the status here.
2114
* (Note that a failed NCQ command will get a more specific
2115
* error when reading the NCQ Command Error log.)
2116
*/
2117
qc->result_tf.status = fis[2];
2118
qc->result_tf.error = fis[3];
2119
return;
2120
}
2121
2122
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
2123
}
2124
2125
static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
2126
{
2127
struct ahci_port_priv *pp = ap->private_data;
2128
const u8 *fis;
2129
2130
/* No outstanding commands. */
2131
if (!ap->qc_active)
2132
return;
2133
2134
/*
2135
* FBS not enabled, so read status and error once, since they are shared
2136
* for all QCs.
2137
*/
2138
if (!pp->fbs_enabled) {
2139
u8 status, error;
2140
2141
/* No outstanding NCQ commands. */
2142
if (!pp->active_link->sactive)
2143
return;
2144
2145
fis = pp->rx_fis + RX_FIS_SDB;
2146
status = fis[2];
2147
error = fis[3];
2148
2149
while (done_mask) {
2150
struct ata_queued_cmd *qc;
2151
unsigned int tag = __ffs64(done_mask);
2152
2153
qc = ata_qc_from_tag(ap, tag);
2154
if (qc && ata_is_ncq(qc->tf.protocol)) {
2155
qc->result_tf.status = status;
2156
qc->result_tf.error = error;
2157
qc->result_tf.flags = qc->tf.flags;
2158
qc->flags |= ATA_QCFLAG_RTF_FILLED;
2159
}
2160
done_mask &= ~(1ULL << tag);
2161
}
2162
2163
return;
2164
}
2165
2166
/*
2167
* FBS enabled, so read the status and error for each QC, since the QCs
2168
* can belong to different PMP links. (Each PMP link has its own FIS
2169
* Receive Area.)
2170
*/
2171
while (done_mask) {
2172
struct ata_queued_cmd *qc;
2173
unsigned int tag = __ffs64(done_mask);
2174
2175
qc = ata_qc_from_tag(ap, tag);
2176
if (qc && ata_is_ncq(qc->tf.protocol)) {
2177
fis = pp->rx_fis;
2178
fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2179
fis += RX_FIS_SDB;
2180
qc->result_tf.status = fis[2];
2181
qc->result_tf.error = fis[3];
2182
qc->result_tf.flags = qc->tf.flags;
2183
qc->flags |= ATA_QCFLAG_RTF_FILLED;
2184
}
2185
done_mask &= ~(1ULL << tag);
2186
}
2187
}
2188
2189
static void ahci_freeze(struct ata_port *ap)
2190
{
2191
void __iomem *port_mmio = ahci_port_base(ap);
2192
2193
/* turn IRQ off */
2194
writel(0, port_mmio + PORT_IRQ_MASK);
2195
}
2196
2197
static void ahci_thaw(struct ata_port *ap)
2198
{
2199
struct ahci_host_priv *hpriv = ap->host->private_data;
2200
void __iomem *mmio = hpriv->mmio;
2201
void __iomem *port_mmio = ahci_port_base(ap);
2202
u32 tmp;
2203
struct ahci_port_priv *pp = ap->private_data;
2204
2205
/* clear IRQ */
2206
tmp = readl(port_mmio + PORT_IRQ_STAT);
2207
writel(tmp, port_mmio + PORT_IRQ_STAT);
2208
writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2209
2210
/* turn IRQ back on */
2211
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2212
}
2213
2214
void ahci_error_handler(struct ata_port *ap)
2215
{
2216
struct ahci_host_priv *hpriv = ap->host->private_data;
2217
2218
if (!ata_port_is_frozen(ap)) {
2219
/* restart engine */
2220
hpriv->stop_engine(ap);
2221
hpriv->start_engine(ap);
2222
}
2223
2224
sata_pmp_error_handler(ap);
2225
2226
if (!ata_dev_enabled(ap->link.device))
2227
hpriv->stop_engine(ap);
2228
}
2229
EXPORT_SYMBOL_GPL(ahci_error_handler);
2230
2231
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2232
{
2233
struct ata_port *ap = qc->ap;
2234
2235
/* make DMA engine forget about the failed command */
2236
if (qc->flags & ATA_QCFLAG_EH)
2237
ahci_kick_engine(ap);
2238
}
2239
2240
static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
2241
{
2242
struct ahci_host_priv *hpriv = ap->host->private_data;
2243
void __iomem *port_mmio = ahci_port_base(ap);
2244
struct ata_device *dev = ap->link.device;
2245
u32 devslp, dm, dito, mdat, deto, dito_conf;
2246
int rc;
2247
unsigned int err_mask;
2248
2249
devslp = readl(port_mmio + PORT_DEVSLP);
2250
if (!(devslp & PORT_DEVSLP_DSP)) {
2251
dev_info(ap->host->dev, "port does not support device sleep\n");
2252
return;
2253
}
2254
2255
/* disable device sleep */
2256
if (!sleep) {
2257
if (devslp & PORT_DEVSLP_ADSE) {
2258
writel(devslp & ~PORT_DEVSLP_ADSE,
2259
port_mmio + PORT_DEVSLP);
2260
err_mask = ata_dev_set_feature(dev,
2261
SETFEATURES_SATA_DISABLE,
2262
SATA_DEVSLP);
2263
if (err_mask && err_mask != AC_ERR_DEV)
2264
ata_dev_warn(dev, "failed to disable DEVSLP\n");
2265
}
2266
return;
2267
}
2268
2269
dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
2270
dito = devslp_idle_timeout / (dm + 1);
2271
if (dito > 0x3ff)
2272
dito = 0x3ff;
2273
2274
dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
2275
2276
/* device sleep was already enabled and same dito */
2277
if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
2278
return;
2279
2280
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
2281
rc = hpriv->stop_engine(ap);
2282
if (rc)
2283
return;
2284
2285
/* Use the nominal value 10 ms if the read MDAT is zero,
2286
* the nominal value of DETO is 20 ms.
2287
*/
2288
if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
2289
ATA_LOG_DEVSLP_VALID_MASK) {
2290
mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
2291
ATA_LOG_DEVSLP_MDAT_MASK;
2292
if (!mdat)
2293
mdat = 10;
2294
deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
2295
if (!deto)
2296
deto = 20;
2297
} else {
2298
mdat = 10;
2299
deto = 20;
2300
}
2301
2302
/* Make dito, mdat, deto bits to 0s */
2303
devslp &= ~GENMASK_ULL(24, 2);
2304
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
2305
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
2306
(deto << PORT_DEVSLP_DETO_OFFSET) |
2307
PORT_DEVSLP_ADSE);
2308
writel(devslp, port_mmio + PORT_DEVSLP);
2309
2310
hpriv->start_engine(ap);
2311
2312
/* enable device sleep feature for the drive */
2313
err_mask = ata_dev_set_feature(dev,
2314
SETFEATURES_SATA_ENABLE,
2315
SATA_DEVSLP);
2316
if (err_mask && err_mask != AC_ERR_DEV)
2317
ata_dev_warn(dev, "failed to enable DEVSLP\n");
2318
}
2319
2320
static void ahci_enable_fbs(struct ata_port *ap)
2321
{
2322
struct ahci_host_priv *hpriv = ap->host->private_data;
2323
struct ahci_port_priv *pp = ap->private_data;
2324
void __iomem *port_mmio = ahci_port_base(ap);
2325
u32 fbs;
2326
int rc;
2327
2328
if (!pp->fbs_supported)
2329
return;
2330
2331
fbs = readl(port_mmio + PORT_FBS);
2332
if (fbs & PORT_FBS_EN) {
2333
pp->fbs_enabled = true;
2334
pp->fbs_last_dev = -1; /* initialization */
2335
return;
2336
}
2337
2338
rc = hpriv->stop_engine(ap);
2339
if (rc)
2340
return;
2341
2342
writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2343
fbs = readl(port_mmio + PORT_FBS);
2344
if (fbs & PORT_FBS_EN) {
2345
dev_info(ap->host->dev, "FBS is enabled\n");
2346
pp->fbs_enabled = true;
2347
pp->fbs_last_dev = -1; /* initialization */
2348
} else
2349
dev_err(ap->host->dev, "Failed to enable FBS\n");
2350
2351
hpriv->start_engine(ap);
2352
}
2353
2354
static void ahci_disable_fbs(struct ata_port *ap)
2355
{
2356
struct ahci_host_priv *hpriv = ap->host->private_data;
2357
struct ahci_port_priv *pp = ap->private_data;
2358
void __iomem *port_mmio = ahci_port_base(ap);
2359
u32 fbs;
2360
int rc;
2361
2362
if (!pp->fbs_supported)
2363
return;
2364
2365
fbs = readl(port_mmio + PORT_FBS);
2366
if ((fbs & PORT_FBS_EN) == 0) {
2367
pp->fbs_enabled = false;
2368
return;
2369
}
2370
2371
rc = hpriv->stop_engine(ap);
2372
if (rc)
2373
return;
2374
2375
writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2376
fbs = readl(port_mmio + PORT_FBS);
2377
if (fbs & PORT_FBS_EN)
2378
dev_err(ap->host->dev, "Failed to disable FBS\n");
2379
else {
2380
dev_info(ap->host->dev, "FBS is disabled\n");
2381
pp->fbs_enabled = false;
2382
}
2383
2384
hpriv->start_engine(ap);
2385
}
2386
2387
static void ahci_pmp_attach(struct ata_port *ap)
2388
{
2389
void __iomem *port_mmio = ahci_port_base(ap);
2390
struct ahci_port_priv *pp = ap->private_data;
2391
u32 cmd;
2392
2393
cmd = readl(port_mmio + PORT_CMD);
2394
cmd |= PORT_CMD_PMP;
2395
writel(cmd, port_mmio + PORT_CMD);
2396
2397
ahci_enable_fbs(ap);
2398
2399
pp->intr_mask |= PORT_IRQ_BAD_PMP;
2400
2401
/*
2402
* We must not change the port interrupt mask register if the
2403
* port is marked frozen, the value in pp->intr_mask will be
2404
* restored later when the port is thawed.
2405
*
2406
* Note that during initialization, the port is marked as
2407
* frozen since the irq handler is not yet registered.
2408
*/
2409
if (!ata_port_is_frozen(ap))
2410
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2411
}
2412
2413
static void ahci_pmp_detach(struct ata_port *ap)
2414
{
2415
void __iomem *port_mmio = ahci_port_base(ap);
2416
struct ahci_port_priv *pp = ap->private_data;
2417
u32 cmd;
2418
2419
ahci_disable_fbs(ap);
2420
2421
cmd = readl(port_mmio + PORT_CMD);
2422
cmd &= ~PORT_CMD_PMP;
2423
writel(cmd, port_mmio + PORT_CMD);
2424
2425
pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2426
2427
/* see comment above in ahci_pmp_attach() */
2428
if (!ata_port_is_frozen(ap))
2429
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2430
}
2431
2432
int ahci_port_resume(struct ata_port *ap)
2433
{
2434
ahci_rpm_get_port(ap);
2435
2436
ahci_power_up(ap);
2437
ahci_start_port(ap);
2438
2439
if (sata_pmp_attached(ap))
2440
ahci_pmp_attach(ap);
2441
else
2442
ahci_pmp_detach(ap);
2443
2444
return 0;
2445
}
2446
EXPORT_SYMBOL_GPL(ahci_port_resume);
2447
2448
#ifdef CONFIG_PM
2449
static void ahci_handle_s2idle(struct ata_port *ap)
2450
{
2451
void __iomem *port_mmio = ahci_port_base(ap);
2452
u32 devslp;
2453
2454
if (pm_suspend_via_firmware())
2455
return;
2456
devslp = readl(port_mmio + PORT_DEVSLP);
2457
if ((devslp & PORT_DEVSLP_ADSE))
2458
ata_msleep(ap, devslp_idle_timeout);
2459
}
2460
2461
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2462
{
2463
const char *emsg = NULL;
2464
int rc;
2465
2466
rc = ahci_deinit_port(ap, &emsg);
2467
if (rc == 0)
2468
ahci_power_down(ap);
2469
else {
2470
ata_port_err(ap, "%s (%d)\n", emsg, rc);
2471
ata_port_freeze(ap);
2472
}
2473
2474
if (acpi_storage_d3(ap->host->dev))
2475
ahci_handle_s2idle(ap);
2476
2477
ahci_rpm_put_port(ap);
2478
return rc;
2479
}
2480
#endif
2481
2482
static int ahci_port_start(struct ata_port *ap)
2483
{
2484
struct ahci_host_priv *hpriv = ap->host->private_data;
2485
struct device *dev = ap->host->dev;
2486
struct ahci_port_priv *pp;
2487
void *mem;
2488
dma_addr_t mem_dma;
2489
size_t dma_sz, rx_fis_sz;
2490
2491
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2492
if (!pp)
2493
return -ENOMEM;
2494
2495
if (ap->host->n_ports > 1) {
2496
pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL);
2497
if (!pp->irq_desc) {
2498
devm_kfree(dev, pp);
2499
return -ENOMEM;
2500
}
2501
snprintf(pp->irq_desc, 8,
2502
"%s%d", dev_driver_string(dev), ap->port_no);
2503
}
2504
2505
/* check FBS capability */
2506
if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2507
void __iomem *port_mmio = ahci_port_base(ap);
2508
u32 cmd = readl(port_mmio + PORT_CMD);
2509
if (cmd & PORT_CMD_FBSCP)
2510
pp->fbs_supported = true;
2511
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2512
dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
2513
ap->port_no);
2514
pp->fbs_supported = true;
2515
} else
2516
dev_warn(dev, "port %d is not capable of FBS\n",
2517
ap->port_no);
2518
}
2519
2520
if (pp->fbs_supported) {
2521
dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2522
rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2523
} else {
2524
dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2525
rx_fis_sz = AHCI_RX_FIS_SZ;
2526
}
2527
2528
mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2529
if (!mem)
2530
return -ENOMEM;
2531
2532
/*
2533
* First item in chunk of DMA memory: 32-slot command table,
2534
* 32 bytes each in size
2535
*/
2536
pp->cmd_slot = mem;
2537
pp->cmd_slot_dma = mem_dma;
2538
2539
mem += AHCI_CMD_SLOT_SZ;
2540
mem_dma += AHCI_CMD_SLOT_SZ;
2541
2542
/*
2543
* Second item: Received-FIS area
2544
*/
2545
pp->rx_fis = mem;
2546
pp->rx_fis_dma = mem_dma;
2547
2548
mem += rx_fis_sz;
2549
mem_dma += rx_fis_sz;
2550
2551
/*
2552
* Third item: data area for storing a single command
2553
* and its scatter-gather table
2554
*/
2555
pp->cmd_tbl = mem;
2556
pp->cmd_tbl_dma = mem_dma;
2557
2558
/*
2559
* Save off initial list of interrupts to be enabled.
2560
* This could be changed later
2561
*/
2562
pp->intr_mask = DEF_PORT_IRQ;
2563
2564
/*
2565
* Switch to per-port locking in case each port has its own MSI vector.
2566
*/
2567
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
2568
spin_lock_init(&pp->lock);
2569
ap->lock = &pp->lock;
2570
}
2571
2572
ap->private_data = pp;
2573
2574
/* engage engines, captain */
2575
return ahci_port_resume(ap);
2576
}
2577
2578
static void ahci_port_stop(struct ata_port *ap)
2579
{
2580
const char *emsg = NULL;
2581
struct ahci_host_priv *hpriv = ap->host->private_data;
2582
void __iomem *host_mmio = hpriv->mmio;
2583
int rc;
2584
2585
/* de-initialize port */
2586
rc = ahci_deinit_port(ap, &emsg);
2587
if (rc)
2588
ata_port_warn(ap, "%s (%d)\n", emsg, rc);
2589
2590
/*
2591
* Clear GHC.IS to prevent stuck INTx after disabling MSI and
2592
* re-enabling INTx.
2593
*/
2594
writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
2595
2596
ahci_rpm_put_port(ap);
2597
}
2598
2599
void ahci_print_info(struct ata_host *host, const char *scc_s)
2600
{
2601
struct ahci_host_priv *hpriv = host->private_data;
2602
u32 vers, cap, cap2, impl, speed;
2603
const char *speed_s;
2604
2605
vers = hpriv->version;
2606
cap = hpriv->cap;
2607
cap2 = hpriv->cap2;
2608
impl = hpriv->port_map;
2609
2610
speed = (cap >> 20) & 0xf;
2611
if (speed == 1)
2612
speed_s = "1.5";
2613
else if (speed == 2)
2614
speed_s = "3";
2615
else if (speed == 3)
2616
speed_s = "6";
2617
else
2618
speed_s = "?";
2619
2620
dev_info(host->dev,
2621
"AHCI vers %02x%02x.%02x%02x, "
2622
"%u command slots, %s Gbps, %s mode\n"
2623
,
2624
2625
(vers >> 24) & 0xff,
2626
(vers >> 16) & 0xff,
2627
(vers >> 8) & 0xff,
2628
vers & 0xff,
2629
2630
((cap >> 8) & 0x1f) + 1,
2631
speed_s,
2632
scc_s);
2633
2634
dev_info(host->dev,
2635
"%u/%u ports implemented (port mask 0x%x)\n"
2636
,
2637
2638
hweight32(impl),
2639
(cap & 0x1f) + 1,
2640
impl);
2641
2642
dev_info(host->dev,
2643
"flags: "
2644
"%s%s%s%s%s%s%s"
2645
"%s%s%s%s%s%s%s"
2646
"%s%s%s%s%s%s%s"
2647
"%s%s\n"
2648
,
2649
2650
cap & HOST_CAP_64 ? "64bit " : "",
2651
cap & HOST_CAP_NCQ ? "ncq " : "",
2652
cap & HOST_CAP_SNTF ? "sntf " : "",
2653
cap & HOST_CAP_MPS ? "ilck " : "",
2654
cap & HOST_CAP_SSS ? "stag " : "",
2655
cap & HOST_CAP_ALPM ? "pm " : "",
2656
cap & HOST_CAP_LED ? "led " : "",
2657
cap & HOST_CAP_CLO ? "clo " : "",
2658
cap & HOST_CAP_ONLY ? "only " : "",
2659
cap & HOST_CAP_PMP ? "pmp " : "",
2660
cap & HOST_CAP_FBS ? "fbs " : "",
2661
cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2662
cap & HOST_CAP_SSC ? "slum " : "",
2663
cap & HOST_CAP_PART ? "part " : "",
2664
cap & HOST_CAP_CCC ? "ccc " : "",
2665
cap & HOST_CAP_EMS ? "ems " : "",
2666
cap & HOST_CAP_SXS ? "sxs " : "",
2667
cap2 & HOST_CAP2_DESO ? "deso " : "",
2668
cap2 & HOST_CAP2_SADM ? "sadm " : "",
2669
cap2 & HOST_CAP2_SDS ? "sds " : "",
2670
cap2 & HOST_CAP2_APST ? "apst " : "",
2671
cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2672
cap2 & HOST_CAP2_BOH ? "boh " : ""
2673
);
2674
}
2675
EXPORT_SYMBOL_GPL(ahci_print_info);
2676
2677
void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2678
struct ata_port_info *pi)
2679
{
2680
u8 messages;
2681
void __iomem *mmio = hpriv->mmio;
2682
u32 em_loc = readl(mmio + HOST_EM_LOC);
2683
u32 em_ctl = readl(mmio + HOST_EM_CTL);
2684
2685
if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2686
return;
2687
2688
messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2689
2690
if (messages) {
2691
/* store em_loc */
2692
hpriv->em_loc = ((em_loc >> 16) * 4);
2693
hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2694
hpriv->em_msg_type = messages;
2695
pi->flags |= ATA_FLAG_EM;
2696
if (!(em_ctl & EM_CTL_ALHD))
2697
pi->flags |= ATA_FLAG_SW_ACTIVITY;
2698
}
2699
}
2700
EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2701
2702
static int ahci_host_activate_multi_irqs(struct ata_host *host,
2703
const struct scsi_host_template *sht)
2704
{
2705
struct ahci_host_priv *hpriv = host->private_data;
2706
int i, rc;
2707
2708
rc = ata_host_start(host);
2709
if (rc)
2710
return rc;
2711
/*
2712
* Requests IRQs according to AHCI-1.1 when multiple MSIs were
2713
* allocated. That is one MSI per port, starting from @irq.
2714
*/
2715
for (i = 0; i < host->n_ports; i++) {
2716
struct ahci_port_priv *pp = host->ports[i]->private_data;
2717
int irq = hpriv->get_irq_vector(host, i);
2718
2719
/* Do not receive interrupts sent by dummy ports */
2720
if (!pp) {
2721
disable_irq(irq);
2722
continue;
2723
}
2724
2725
rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard,
2726
0, pp->irq_desc, host->ports[i]);
2727
2728
if (rc)
2729
return rc;
2730
ata_port_desc_misc(host->ports[i], irq);
2731
}
2732
2733
return ata_host_register(host, sht);
2734
}
2735
2736
/**
2737
* ahci_host_activate - start AHCI host, request IRQs and register it
2738
* @host: target ATA host
2739
* @sht: scsi_host_template to use when registering the host
2740
*
2741
* LOCKING:
2742
* Inherited from calling layer (may sleep).
2743
*
2744
* RETURNS:
2745
* 0 on success, -errno otherwise.
2746
*/
2747
int ahci_host_activate(struct ata_host *host, const struct scsi_host_template *sht)
2748
{
2749
struct ahci_host_priv *hpriv = host->private_data;
2750
int irq = hpriv->irq;
2751
int rc;
2752
2753
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
2754
if (hpriv->irq_handler &&
2755
hpriv->irq_handler != ahci_single_level_irq_intr)
2756
dev_warn(host->dev,
2757
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
2758
if (!hpriv->get_irq_vector) {
2759
dev_err(host->dev,
2760
"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
2761
return -EIO;
2762
}
2763
2764
rc = ahci_host_activate_multi_irqs(host, sht);
2765
} else {
2766
rc = ata_host_activate(host, irq, hpriv->irq_handler,
2767
IRQF_SHARED, sht);
2768
}
2769
2770
2771
return rc;
2772
}
2773
EXPORT_SYMBOL_GPL(ahci_host_activate);
2774
2775
MODULE_AUTHOR("Jeff Garzik");
2776
MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2777
MODULE_LICENSE("GPL");
2778
2779