Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/ata/libata-sff.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* libata-sff.c - helper library for PCI IDE BMDMA
4
*
5
* Copyright 2003-2006 Red Hat, Inc. All rights reserved.
6
* Copyright 2003-2006 Jeff Garzik
7
*
8
* libata documentation is available via 'make {ps|pdf}docs',
9
* as Documentation/driver-api/libata.rst
10
*
11
* Hardware documentation available from http://www.t13.org/ and
12
* http://www.sata-io.org/
13
*/
14
15
#include <linux/kernel.h>
16
#include <linux/gfp.h>
17
#include <linux/pci.h>
18
#include <linux/module.h>
19
#include <linux/libata.h>
20
#include <linux/highmem.h>
21
#include <trace/events/libata.h>
22
#include "libata.h"
23
24
static struct workqueue_struct *ata_sff_wq;
25
26
const struct ata_port_operations ata_sff_port_ops = {
27
.inherits = &ata_base_port_ops,
28
29
.qc_issue = ata_sff_qc_issue,
30
.qc_fill_rtf = ata_sff_qc_fill_rtf,
31
32
.freeze = ata_sff_freeze,
33
.thaw = ata_sff_thaw,
34
.reset.prereset = ata_sff_prereset,
35
.reset.softreset = ata_sff_softreset,
36
.reset.hardreset = sata_sff_hardreset,
37
.reset.postreset = ata_sff_postreset,
38
.error_handler = ata_sff_error_handler,
39
40
.sff_dev_select = ata_sff_dev_select,
41
.sff_check_status = ata_sff_check_status,
42
.sff_tf_load = ata_sff_tf_load,
43
.sff_tf_read = ata_sff_tf_read,
44
.sff_exec_command = ata_sff_exec_command,
45
.sff_data_xfer = ata_sff_data_xfer,
46
.sff_drain_fifo = ata_sff_drain_fifo,
47
48
.lost_interrupt = ata_sff_lost_interrupt,
49
};
50
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
51
52
/**
53
* ata_sff_check_status - Read device status reg & clear interrupt
54
* @ap: port where the device is
55
*
56
* Reads ATA taskfile status register for currently-selected device
57
* and return its value. This also clears pending interrupts
58
* from this device
59
*
60
* LOCKING:
61
* Inherited from caller.
62
*/
63
u8 ata_sff_check_status(struct ata_port *ap)
64
{
65
return ioread8(ap->ioaddr.status_addr);
66
}
67
EXPORT_SYMBOL_GPL(ata_sff_check_status);
68
69
/**
70
* ata_sff_altstatus - Read device alternate status reg
71
* @ap: port where the device is
72
* @status: pointer to a status value
73
*
74
* Reads ATA alternate status register for currently-selected device
75
* and return its value.
76
*
77
* RETURN:
78
* true if the register exists, false if not.
79
*
80
* LOCKING:
81
* Inherited from caller.
82
*/
83
static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
84
{
85
u8 tmp;
86
87
if (ap->ops->sff_check_altstatus) {
88
tmp = ap->ops->sff_check_altstatus(ap);
89
goto read;
90
}
91
if (ap->ioaddr.altstatus_addr) {
92
tmp = ioread8(ap->ioaddr.altstatus_addr);
93
goto read;
94
}
95
return false;
96
97
read:
98
if (status)
99
*status = tmp;
100
return true;
101
}
102
103
/**
104
* ata_sff_irq_status - Check if the device is busy
105
* @ap: port where the device is
106
*
107
* Determine if the port is currently busy. Uses altstatus
108
* if available in order to avoid clearing shared IRQ status
109
* when finding an IRQ source. Non ctl capable devices don't
110
* share interrupt lines fortunately for us.
111
*
112
* LOCKING:
113
* Inherited from caller.
114
*/
115
static u8 ata_sff_irq_status(struct ata_port *ap)
116
{
117
u8 status;
118
119
/* Not us: We are busy */
120
if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
121
return status;
122
/* Clear INTRQ latch */
123
status = ap->ops->sff_check_status(ap);
124
return status;
125
}
126
127
/**
128
* ata_sff_sync - Flush writes
129
* @ap: Port to wait for.
130
*
131
* CAUTION:
132
* If we have an mmio device with no ctl and no altstatus
133
* method this will fail. No such devices are known to exist.
134
*
135
* LOCKING:
136
* Inherited from caller.
137
*/
138
139
static void ata_sff_sync(struct ata_port *ap)
140
{
141
ata_sff_altstatus(ap, NULL);
142
}
143
144
/**
145
* ata_sff_pause - Flush writes and wait 400nS
146
* @ap: Port to pause for.
147
*
148
* CAUTION:
149
* If we have an mmio device with no ctl and no altstatus
150
* method this will fail. No such devices are known to exist.
151
*
152
* LOCKING:
153
* Inherited from caller.
154
*/
155
156
void ata_sff_pause(struct ata_port *ap)
157
{
158
ata_sff_sync(ap);
159
ndelay(400);
160
}
161
EXPORT_SYMBOL_GPL(ata_sff_pause);
162
163
/**
164
* ata_sff_dma_pause - Pause before commencing DMA
165
* @ap: Port to pause for.
166
*
167
* Perform I/O fencing and ensure sufficient cycle delays occur
168
* for the HDMA1:0 transition
169
*/
170
171
void ata_sff_dma_pause(struct ata_port *ap)
172
{
173
/*
174
* An altstatus read will cause the needed delay without
175
* messing up the IRQ status
176
*/
177
if (ata_sff_altstatus(ap, NULL))
178
return;
179
/* There are no DMA controllers without ctl. BUG here to ensure
180
we never violate the HDMA1:0 transition timing and risk
181
corruption. */
182
BUG();
183
}
184
EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
185
186
static int ata_sff_check_ready(struct ata_link *link)
187
{
188
u8 status = link->ap->ops->sff_check_status(link->ap);
189
190
return ata_check_ready(status);
191
}
192
193
/**
194
* ata_sff_wait_ready - sleep until BSY clears, or timeout
195
* @link: SFF link to wait ready status for
196
* @deadline: deadline jiffies for the operation
197
*
198
* Sleep until ATA Status register bit BSY clears, or timeout
199
* occurs.
200
*
201
* LOCKING:
202
* Kernel thread context (may sleep).
203
*
204
* RETURNS:
205
* 0 on success, -errno otherwise.
206
*/
207
int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
208
{
209
return ata_wait_ready(link, deadline, ata_sff_check_ready);
210
}
211
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
212
213
/**
214
* ata_sff_set_devctl - Write device control reg
215
* @ap: port where the device is
216
* @ctl: value to write
217
*
218
* Writes ATA device control register.
219
*
220
* RETURN:
221
* true if the register exists, false if not.
222
*
223
* LOCKING:
224
* Inherited from caller.
225
*/
226
static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
227
{
228
if (ap->ops->sff_set_devctl) {
229
ap->ops->sff_set_devctl(ap, ctl);
230
return true;
231
}
232
if (ap->ioaddr.ctl_addr) {
233
iowrite8(ctl, ap->ioaddr.ctl_addr);
234
return true;
235
}
236
237
return false;
238
}
239
240
/**
241
* ata_sff_dev_select - Select device 0/1 on ATA bus
242
* @ap: ATA channel to manipulate
243
* @device: ATA device (numbered from zero) to select
244
*
245
* Use the method defined in the ATA specification to
246
* make either device 0, or device 1, active on the
247
* ATA channel. Works with both PIO and MMIO.
248
*
249
* May be used as the dev_select() entry in ata_port_operations.
250
*
251
* LOCKING:
252
* caller.
253
*/
254
void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
255
{
256
u8 tmp;
257
258
if (device == 0)
259
tmp = ATA_DEVICE_OBS;
260
else
261
tmp = ATA_DEVICE_OBS | ATA_DEV1;
262
263
iowrite8(tmp, ap->ioaddr.device_addr);
264
ata_sff_pause(ap); /* needed; also flushes, for mmio */
265
}
266
EXPORT_SYMBOL_GPL(ata_sff_dev_select);
267
268
/**
269
* ata_dev_select - Select device 0/1 on ATA bus
270
* @ap: ATA channel to manipulate
271
* @device: ATA device (numbered from zero) to select
272
* @wait: non-zero to wait for Status register BSY bit to clear
273
* @can_sleep: non-zero if context allows sleeping
274
*
275
* Use the method defined in the ATA specification to
276
* make either device 0, or device 1, active on the
277
* ATA channel.
278
*
279
* This is a high-level version of ata_sff_dev_select(), which
280
* additionally provides the services of inserting the proper
281
* pauses and status polling, where needed.
282
*
283
* LOCKING:
284
* caller.
285
*/
286
static void ata_dev_select(struct ata_port *ap, unsigned int device,
287
unsigned int wait, unsigned int can_sleep)
288
{
289
if (wait)
290
ata_wait_idle(ap);
291
292
ap->ops->sff_dev_select(ap, device);
293
294
if (wait) {
295
if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
296
ata_msleep(ap, 150);
297
ata_wait_idle(ap);
298
}
299
}
300
301
/**
302
* ata_sff_irq_on - Enable interrupts on a port.
303
* @ap: Port on which interrupts are enabled.
304
*
305
* Enable interrupts on a legacy IDE device using MMIO or PIO,
306
* wait for idle, clear any pending interrupts.
307
*
308
* Note: may NOT be used as the sff_irq_on() entry in
309
* ata_port_operations.
310
*
311
* LOCKING:
312
* Inherited from caller.
313
*/
314
void ata_sff_irq_on(struct ata_port *ap)
315
{
316
if (ap->ops->sff_irq_on) {
317
ap->ops->sff_irq_on(ap);
318
return;
319
}
320
321
ap->ctl &= ~ATA_NIEN;
322
ap->last_ctl = ap->ctl;
323
324
ata_sff_set_devctl(ap, ap->ctl);
325
ata_wait_idle(ap);
326
327
if (ap->ops->sff_irq_clear)
328
ap->ops->sff_irq_clear(ap);
329
}
330
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
331
332
/**
333
* ata_sff_tf_load - send taskfile registers to host controller
334
* @ap: Port to which output is sent
335
* @tf: ATA taskfile register set
336
*
337
* Outputs ATA taskfile to standard ATA host controller.
338
*
339
* LOCKING:
340
* Inherited from caller.
341
*/
342
void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
343
{
344
struct ata_ioports *ioaddr = &ap->ioaddr;
345
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
346
347
if (tf->ctl != ap->last_ctl) {
348
if (ioaddr->ctl_addr)
349
iowrite8(tf->ctl, ioaddr->ctl_addr);
350
ap->last_ctl = tf->ctl;
351
ata_wait_idle(ap);
352
}
353
354
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
355
WARN_ON_ONCE(!ioaddr->ctl_addr);
356
iowrite8(tf->hob_feature, ioaddr->feature_addr);
357
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
358
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
359
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
360
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
361
}
362
363
if (is_addr) {
364
iowrite8(tf->feature, ioaddr->feature_addr);
365
iowrite8(tf->nsect, ioaddr->nsect_addr);
366
iowrite8(tf->lbal, ioaddr->lbal_addr);
367
iowrite8(tf->lbam, ioaddr->lbam_addr);
368
iowrite8(tf->lbah, ioaddr->lbah_addr);
369
}
370
371
if (tf->flags & ATA_TFLAG_DEVICE)
372
iowrite8(tf->device, ioaddr->device_addr);
373
374
ata_wait_idle(ap);
375
}
376
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
377
378
/**
379
* ata_sff_tf_read - input device's ATA taskfile shadow registers
380
* @ap: Port from which input is read
381
* @tf: ATA taskfile register set for storing input
382
*
383
* Reads ATA taskfile registers for currently-selected device
384
* into @tf. Assumes the device has a fully SFF compliant task file
385
* layout and behaviour. If you device does not (eg has a different
386
* status method) then you will need to provide a replacement tf_read
387
*
388
* LOCKING:
389
* Inherited from caller.
390
*/
391
void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
392
{
393
struct ata_ioports *ioaddr = &ap->ioaddr;
394
395
tf->status = ata_sff_check_status(ap);
396
tf->error = ioread8(ioaddr->error_addr);
397
tf->nsect = ioread8(ioaddr->nsect_addr);
398
tf->lbal = ioread8(ioaddr->lbal_addr);
399
tf->lbam = ioread8(ioaddr->lbam_addr);
400
tf->lbah = ioread8(ioaddr->lbah_addr);
401
tf->device = ioread8(ioaddr->device_addr);
402
403
if (tf->flags & ATA_TFLAG_LBA48) {
404
if (likely(ioaddr->ctl_addr)) {
405
iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
406
tf->hob_feature = ioread8(ioaddr->error_addr);
407
tf->hob_nsect = ioread8(ioaddr->nsect_addr);
408
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
409
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
410
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
411
iowrite8(tf->ctl, ioaddr->ctl_addr);
412
ap->last_ctl = tf->ctl;
413
} else
414
WARN_ON_ONCE(1);
415
}
416
}
417
EXPORT_SYMBOL_GPL(ata_sff_tf_read);
418
419
/**
420
* ata_sff_exec_command - issue ATA command to host controller
421
* @ap: port to which command is being issued
422
* @tf: ATA taskfile register set
423
*
424
* Issues ATA command, with proper synchronization with interrupt
425
* handler / other threads.
426
*
427
* LOCKING:
428
* spin_lock_irqsave(host lock)
429
*/
430
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
431
{
432
iowrite8(tf->command, ap->ioaddr.command_addr);
433
ata_sff_pause(ap);
434
}
435
EXPORT_SYMBOL_GPL(ata_sff_exec_command);
436
437
/**
438
* ata_tf_to_host - issue ATA taskfile to host controller
439
* @ap: port to which command is being issued
440
* @tf: ATA taskfile register set
441
* @tag: tag of the associated command
442
*
443
* Issues ATA taskfile register set to ATA host controller,
444
* with proper synchronization with interrupt handler and
445
* other threads.
446
*
447
* LOCKING:
448
* spin_lock_irqsave(host lock)
449
*/
450
static inline void ata_tf_to_host(struct ata_port *ap,
451
const struct ata_taskfile *tf,
452
unsigned int tag)
453
{
454
trace_ata_tf_load(ap, tf);
455
ap->ops->sff_tf_load(ap, tf);
456
trace_ata_exec_command(ap, tf, tag);
457
ap->ops->sff_exec_command(ap, tf);
458
}
459
460
/**
461
* ata_sff_data_xfer - Transfer data by PIO
462
* @qc: queued command
463
* @buf: data buffer
464
* @buflen: buffer length
465
* @rw: read/write
466
*
467
* Transfer data from/to the device data register by PIO.
468
*
469
* LOCKING:
470
* Inherited from caller.
471
*
472
* RETURNS:
473
* Bytes consumed.
474
*/
475
unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
476
unsigned int buflen, int rw)
477
{
478
struct ata_port *ap = qc->dev->link->ap;
479
void __iomem *data_addr = ap->ioaddr.data_addr;
480
unsigned int words = buflen >> 1;
481
482
/* Transfer multiple of 2 bytes */
483
if (rw == READ)
484
ioread16_rep(data_addr, buf, words);
485
else
486
iowrite16_rep(data_addr, buf, words);
487
488
/* Transfer trailing byte, if any. */
489
if (unlikely(buflen & 0x01)) {
490
unsigned char pad[2] = { };
491
492
/* Point buf to the tail of buffer */
493
buf += buflen - 1;
494
495
/*
496
* Use io*16_rep() accessors here as well to avoid pointlessly
497
* swapping bytes to and from on the big endian machines...
498
*/
499
if (rw == READ) {
500
ioread16_rep(data_addr, pad, 1);
501
*buf = pad[0];
502
} else {
503
pad[0] = *buf;
504
iowrite16_rep(data_addr, pad, 1);
505
}
506
words++;
507
}
508
509
return words << 1;
510
}
511
EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
512
513
/**
514
* ata_sff_data_xfer32 - Transfer data by PIO
515
* @qc: queued command
516
* @buf: data buffer
517
* @buflen: buffer length
518
* @rw: read/write
519
*
520
* Transfer data from/to the device data register by PIO using 32bit
521
* I/O operations.
522
*
523
* LOCKING:
524
* Inherited from caller.
525
*
526
* RETURNS:
527
* Bytes consumed.
528
*/
529
530
unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
531
unsigned int buflen, int rw)
532
{
533
struct ata_device *dev = qc->dev;
534
struct ata_port *ap = dev->link->ap;
535
void __iomem *data_addr = ap->ioaddr.data_addr;
536
unsigned int words = buflen >> 2;
537
int slop = buflen & 3;
538
539
if (!(ap->pflags & ATA_PFLAG_PIO32))
540
return ata_sff_data_xfer(qc, buf, buflen, rw);
541
542
/* Transfer multiple of 4 bytes */
543
if (rw == READ)
544
ioread32_rep(data_addr, buf, words);
545
else
546
iowrite32_rep(data_addr, buf, words);
547
548
/* Transfer trailing bytes, if any */
549
if (unlikely(slop)) {
550
unsigned char pad[4] = { };
551
552
/* Point buf to the tail of buffer */
553
buf += buflen - slop;
554
555
/*
556
* Use io*_rep() accessors here as well to avoid pointlessly
557
* swapping bytes to and from on the big endian machines...
558
*/
559
if (rw == READ) {
560
if (slop < 3)
561
ioread16_rep(data_addr, pad, 1);
562
else
563
ioread32_rep(data_addr, pad, 1);
564
memcpy(buf, pad, slop);
565
} else {
566
memcpy(pad, buf, slop);
567
if (slop < 3)
568
iowrite16_rep(data_addr, pad, 1);
569
else
570
iowrite32_rep(data_addr, pad, 1);
571
}
572
}
573
return (buflen + 1) & ~1;
574
}
575
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
576
577
static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
578
unsigned int offset, size_t xfer_size)
579
{
580
bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
581
unsigned char *buf;
582
583
buf = kmap_atomic(page);
584
qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
585
kunmap_atomic(buf);
586
587
if (!do_write && !PageSlab(page))
588
flush_dcache_page(page);
589
}
590
591
/**
592
* ata_pio_sector - Transfer a sector of data.
593
* @qc: Command on going
594
*
595
* Transfer qc->sect_size bytes of data from/to the ATA device.
596
*
597
* LOCKING:
598
* Inherited from caller.
599
*/
600
static void ata_pio_sector(struct ata_queued_cmd *qc)
601
{
602
struct ata_port *ap = qc->ap;
603
struct page *page;
604
unsigned int offset, count;
605
606
if (!qc->cursg) {
607
qc->curbytes = qc->nbytes;
608
return;
609
}
610
if (qc->curbytes == qc->nbytes - qc->sect_size)
611
ap->hsm_task_state = HSM_ST_LAST;
612
613
page = sg_page(qc->cursg);
614
offset = qc->cursg->offset + qc->cursg_ofs;
615
616
/* get the current page and offset */
617
page = nth_page(page, (offset >> PAGE_SHIFT));
618
offset %= PAGE_SIZE;
619
620
/* don't overrun current sg */
621
count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
622
623
trace_ata_sff_pio_transfer_data(qc, offset, count);
624
625
/*
626
* Split the transfer when it splits a page boundary. Note that the
627
* split still has to be dword aligned like all ATA data transfers.
628
*/
629
WARN_ON_ONCE(offset % 4);
630
if (offset + count > PAGE_SIZE) {
631
unsigned int split_len = PAGE_SIZE - offset;
632
633
ata_pio_xfer(qc, page, offset, split_len);
634
ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
635
} else {
636
ata_pio_xfer(qc, page, offset, count);
637
}
638
639
qc->curbytes += count;
640
qc->cursg_ofs += count;
641
642
if (qc->cursg_ofs == qc->cursg->length) {
643
qc->cursg = sg_next(qc->cursg);
644
if (!qc->cursg)
645
ap->hsm_task_state = HSM_ST_LAST;
646
qc->cursg_ofs = 0;
647
}
648
}
649
650
/**
651
* ata_pio_sectors - Transfer one or many sectors.
652
* @qc: Command on going
653
*
654
* Transfer one or many sectors of data from/to the
655
* ATA device for the DRQ request.
656
*
657
* LOCKING:
658
* Inherited from caller.
659
*/
660
static void ata_pio_sectors(struct ata_queued_cmd *qc)
661
{
662
if (is_multi_taskfile(&qc->tf)) {
663
/* READ/WRITE MULTIPLE */
664
unsigned int nsect;
665
666
WARN_ON_ONCE(qc->dev->multi_count == 0);
667
668
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
669
qc->dev->multi_count);
670
while (nsect--)
671
ata_pio_sector(qc);
672
} else
673
ata_pio_sector(qc);
674
675
ata_sff_sync(qc->ap); /* flush */
676
}
677
678
/**
679
* atapi_send_cdb - Write CDB bytes to hardware
680
* @ap: Port to which ATAPI device is attached.
681
* @qc: Taskfile currently active
682
*
683
* When device has indicated its readiness to accept
684
* a CDB, this function is called. Send the CDB.
685
*
686
* LOCKING:
687
* caller.
688
*/
689
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
690
{
691
/* send SCSI cdb */
692
trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
693
WARN_ON_ONCE(qc->dev->cdb_len < 12);
694
695
ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
696
ata_sff_sync(ap);
697
/* FIXME: If the CDB is for DMA do we need to do the transition delay
698
or is bmdma_start guaranteed to do it ? */
699
switch (qc->tf.protocol) {
700
case ATAPI_PROT_PIO:
701
ap->hsm_task_state = HSM_ST;
702
break;
703
case ATAPI_PROT_NODATA:
704
ap->hsm_task_state = HSM_ST_LAST;
705
break;
706
#ifdef CONFIG_ATA_BMDMA
707
case ATAPI_PROT_DMA:
708
ap->hsm_task_state = HSM_ST_LAST;
709
/* initiate bmdma */
710
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
711
ap->ops->bmdma_start(qc);
712
break;
713
#endif /* CONFIG_ATA_BMDMA */
714
default:
715
BUG();
716
}
717
}
718
719
/**
720
* __atapi_pio_bytes - Transfer data from/to the ATAPI device.
721
* @qc: Command on going
722
* @bytes: number of bytes
723
*
724
* Transfer data from/to the ATAPI device.
725
*
726
* LOCKING:
727
* Inherited from caller.
728
*
729
*/
730
static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
731
{
732
int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
733
struct ata_port *ap = qc->ap;
734
struct ata_device *dev = qc->dev;
735
struct ata_eh_info *ehi = &dev->link->eh_info;
736
struct scatterlist *sg;
737
struct page *page;
738
unsigned char *buf;
739
unsigned int offset, count, consumed;
740
741
next_sg:
742
sg = qc->cursg;
743
if (unlikely(!sg)) {
744
ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
745
"buf=%u cur=%u bytes=%u",
746
qc->nbytes, qc->curbytes, bytes);
747
return -1;
748
}
749
750
page = sg_page(sg);
751
offset = sg->offset + qc->cursg_ofs;
752
753
/* get the current page and offset */
754
page = nth_page(page, (offset >> PAGE_SHIFT));
755
offset %= PAGE_SIZE;
756
757
/* don't overrun current sg */
758
count = min(sg->length - qc->cursg_ofs, bytes);
759
760
/* don't cross page boundaries */
761
count = min(count, (unsigned int)PAGE_SIZE - offset);
762
763
trace_atapi_pio_transfer_data(qc, offset, count);
764
765
/* do the actual data transfer */
766
buf = kmap_atomic(page);
767
consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
768
kunmap_atomic(buf);
769
770
bytes -= min(bytes, consumed);
771
qc->curbytes += count;
772
qc->cursg_ofs += count;
773
774
if (qc->cursg_ofs == sg->length) {
775
qc->cursg = sg_next(qc->cursg);
776
qc->cursg_ofs = 0;
777
}
778
779
/*
780
* There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
781
* Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
782
* check correctly as it doesn't know if it is the last request being
783
* made. Somebody should implement a proper sanity check.
784
*/
785
if (bytes)
786
goto next_sg;
787
return 0;
788
}
789
790
/**
791
* atapi_pio_bytes - Transfer data from/to the ATAPI device.
792
* @qc: Command on going
793
*
794
* Transfer Transfer data from/to the ATAPI device.
795
*
796
* LOCKING:
797
* Inherited from caller.
798
*/
799
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
800
{
801
struct ata_port *ap = qc->ap;
802
struct ata_device *dev = qc->dev;
803
struct ata_eh_info *ehi = &dev->link->eh_info;
804
unsigned int ireason, bc_lo, bc_hi, bytes;
805
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
806
807
/* Abuse qc->result_tf for temp storage of intermediate TF
808
* here to save some kernel stack usage.
809
* For normal completion, qc->result_tf is not relevant. For
810
* error, qc->result_tf is later overwritten by ata_qc_complete().
811
* So, the correctness of qc->result_tf is not affected.
812
*/
813
ap->ops->sff_tf_read(ap, &qc->result_tf);
814
ireason = qc->result_tf.nsect;
815
bc_lo = qc->result_tf.lbam;
816
bc_hi = qc->result_tf.lbah;
817
bytes = (bc_hi << 8) | bc_lo;
818
819
/* shall be cleared to zero, indicating xfer of data */
820
if (unlikely(ireason & ATAPI_COD))
821
goto atapi_check;
822
823
/* make sure transfer direction matches expected */
824
i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
825
if (unlikely(do_write != i_write))
826
goto atapi_check;
827
828
if (unlikely(!bytes))
829
goto atapi_check;
830
831
if (unlikely(__atapi_pio_bytes(qc, bytes)))
832
goto err_out;
833
ata_sff_sync(ap); /* flush */
834
835
return;
836
837
atapi_check:
838
ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
839
ireason, bytes);
840
err_out:
841
qc->err_mask |= AC_ERR_HSM;
842
ap->hsm_task_state = HSM_ST_ERR;
843
}
844
845
/**
846
* ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
847
* @ap: the target ata_port
848
* @qc: qc on going
849
*
850
* RETURNS:
851
* 1 if ok in workqueue, 0 otherwise.
852
*/
853
static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
854
struct ata_queued_cmd *qc)
855
{
856
if (qc->tf.flags & ATA_TFLAG_POLLING)
857
return 1;
858
859
if (ap->hsm_task_state == HSM_ST_FIRST) {
860
if (qc->tf.protocol == ATA_PROT_PIO &&
861
(qc->tf.flags & ATA_TFLAG_WRITE))
862
return 1;
863
864
if (ata_is_atapi(qc->tf.protocol) &&
865
!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
866
return 1;
867
}
868
869
return 0;
870
}
871
872
/**
873
* ata_hsm_qc_complete - finish a qc running on standard HSM
874
* @qc: Command to complete
875
* @in_wq: 1 if called from workqueue, 0 otherwise
876
*
877
* Finish @qc which is running on standard HSM.
878
*
879
* LOCKING:
880
* If @in_wq is zero, spin_lock_irqsave(host lock).
881
* Otherwise, none on entry and grabs host lock.
882
*/
883
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
884
{
885
struct ata_port *ap = qc->ap;
886
887
if (in_wq) {
888
/* EH might have kicked in while host lock is released. */
889
qc = ata_qc_from_tag(ap, qc->tag);
890
if (qc) {
891
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
892
ata_sff_irq_on(ap);
893
ata_qc_complete(qc);
894
} else
895
ata_port_freeze(ap);
896
}
897
} else {
898
if (likely(!(qc->err_mask & AC_ERR_HSM)))
899
ata_qc_complete(qc);
900
else
901
ata_port_freeze(ap);
902
}
903
}
904
905
/**
906
* ata_sff_hsm_move - move the HSM to the next state.
907
* @ap: the target ata_port
908
* @qc: qc on going
909
* @status: current device status
910
* @in_wq: 1 if called from workqueue, 0 otherwise
911
*
912
* RETURNS:
913
* 1 when poll next status needed, 0 otherwise.
914
*/
915
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
916
u8 status, int in_wq)
917
{
918
struct ata_link *link = qc->dev->link;
919
struct ata_eh_info *ehi = &link->eh_info;
920
int poll_next;
921
922
lockdep_assert_held(ap->lock);
923
924
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
925
926
/* Make sure ata_sff_qc_issue() does not throw things
927
* like DMA polling into the workqueue. Notice that
928
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
929
*/
930
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
931
932
fsm_start:
933
trace_ata_sff_hsm_state(qc, status);
934
935
switch (ap->hsm_task_state) {
936
case HSM_ST_FIRST:
937
/* Send first data block or PACKET CDB */
938
939
/* If polling, we will stay in the work queue after
940
* sending the data. Otherwise, interrupt handler
941
* takes over after sending the data.
942
*/
943
poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
944
945
/* check device status */
946
if (unlikely((status & ATA_DRQ) == 0)) {
947
/* handle BSY=0, DRQ=0 as error */
948
if (likely(status & (ATA_ERR | ATA_DF)))
949
/* device stops HSM for abort/error */
950
qc->err_mask |= AC_ERR_DEV;
951
else {
952
/* HSM violation. Let EH handle this */
953
ata_ehi_push_desc(ehi,
954
"ST_FIRST: !(DRQ|ERR|DF)");
955
qc->err_mask |= AC_ERR_HSM;
956
}
957
958
ap->hsm_task_state = HSM_ST_ERR;
959
goto fsm_start;
960
}
961
962
/* Device should not ask for data transfer (DRQ=1)
963
* when it finds something wrong.
964
* We ignore DRQ here and stop the HSM by
965
* changing hsm_task_state to HSM_ST_ERR and
966
* let the EH abort the command or reset the device.
967
*/
968
if (unlikely(status & (ATA_ERR | ATA_DF))) {
969
/* Some ATAPI tape drives forget to clear the ERR bit
970
* when doing the next command (mostly request sense).
971
* We ignore ERR here to workaround and proceed sending
972
* the CDB.
973
*/
974
if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
975
ata_ehi_push_desc(ehi, "ST_FIRST: "
976
"DRQ=1 with device error, "
977
"dev_stat 0x%X", status);
978
qc->err_mask |= AC_ERR_HSM;
979
ap->hsm_task_state = HSM_ST_ERR;
980
goto fsm_start;
981
}
982
}
983
984
if (qc->tf.protocol == ATA_PROT_PIO) {
985
/* PIO data out protocol.
986
* send first data block.
987
*/
988
989
/* ata_pio_sectors() might change the state
990
* to HSM_ST_LAST. so, the state is changed here
991
* before ata_pio_sectors().
992
*/
993
ap->hsm_task_state = HSM_ST;
994
ata_pio_sectors(qc);
995
} else
996
/* send CDB */
997
atapi_send_cdb(ap, qc);
998
999
/* if polling, ata_sff_pio_task() handles the rest.
1000
* otherwise, interrupt handler takes over from here.
1001
*/
1002
break;
1003
1004
case HSM_ST:
1005
/* complete command or read/write the data register */
1006
if (qc->tf.protocol == ATAPI_PROT_PIO) {
1007
/* ATAPI PIO protocol */
1008
if ((status & ATA_DRQ) == 0) {
1009
/* No more data to transfer or device error.
1010
* Device error will be tagged in HSM_ST_LAST.
1011
*/
1012
ap->hsm_task_state = HSM_ST_LAST;
1013
goto fsm_start;
1014
}
1015
1016
/* Device should not ask for data transfer (DRQ=1)
1017
* when it finds something wrong.
1018
* We ignore DRQ here and stop the HSM by
1019
* changing hsm_task_state to HSM_ST_ERR and
1020
* let the EH abort the command or reset the device.
1021
*/
1022
if (unlikely(status & (ATA_ERR | ATA_DF))) {
1023
ata_ehi_push_desc(ehi, "ST-ATAPI: "
1024
"DRQ=1 with device error, "
1025
"dev_stat 0x%X", status);
1026
qc->err_mask |= AC_ERR_HSM;
1027
ap->hsm_task_state = HSM_ST_ERR;
1028
goto fsm_start;
1029
}
1030
1031
atapi_pio_bytes(qc);
1032
1033
if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1034
/* bad ireason reported by device */
1035
goto fsm_start;
1036
1037
} else {
1038
/* ATA PIO protocol */
1039
if (unlikely((status & ATA_DRQ) == 0)) {
1040
/* handle BSY=0, DRQ=0 as error */
1041
if (likely(status & (ATA_ERR | ATA_DF))) {
1042
/* device stops HSM for abort/error */
1043
qc->err_mask |= AC_ERR_DEV;
1044
1045
/* If diagnostic failed and this is
1046
* IDENTIFY, it's likely a phantom
1047
* device. Mark hint.
1048
*/
1049
if (qc->dev->quirks &
1050
ATA_QUIRK_DIAGNOSTIC)
1051
qc->err_mask |=
1052
AC_ERR_NODEV_HINT;
1053
} else {
1054
/* HSM violation. Let EH handle this.
1055
* Phantom devices also trigger this
1056
* condition. Mark hint.
1057
*/
1058
ata_ehi_push_desc(ehi, "ST-ATA: "
1059
"DRQ=0 without device error, "
1060
"dev_stat 0x%X", status);
1061
qc->err_mask |= AC_ERR_HSM |
1062
AC_ERR_NODEV_HINT;
1063
}
1064
1065
ap->hsm_task_state = HSM_ST_ERR;
1066
goto fsm_start;
1067
}
1068
1069
/* For PIO reads, some devices may ask for
1070
* data transfer (DRQ=1) alone with ERR=1.
1071
* We respect DRQ here and transfer one
1072
* block of junk data before changing the
1073
* hsm_task_state to HSM_ST_ERR.
1074
*
1075
* For PIO writes, ERR=1 DRQ=1 doesn't make
1076
* sense since the data block has been
1077
* transferred to the device.
1078
*/
1079
if (unlikely(status & (ATA_ERR | ATA_DF))) {
1080
/* data might be corrputed */
1081
qc->err_mask |= AC_ERR_DEV;
1082
1083
if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1084
ata_pio_sectors(qc);
1085
status = ata_wait_idle(ap);
1086
}
1087
1088
if (status & (ATA_BUSY | ATA_DRQ)) {
1089
ata_ehi_push_desc(ehi, "ST-ATA: "
1090
"BUSY|DRQ persists on ERR|DF, "
1091
"dev_stat 0x%X", status);
1092
qc->err_mask |= AC_ERR_HSM;
1093
}
1094
1095
/* There are oddball controllers with
1096
* status register stuck at 0x7f and
1097
* lbal/m/h at zero which makes it
1098
* pass all other presence detection
1099
* mechanisms we have. Set NODEV_HINT
1100
* for it. Kernel bz#7241.
1101
*/
1102
if (status == 0x7f)
1103
qc->err_mask |= AC_ERR_NODEV_HINT;
1104
1105
/* ata_pio_sectors() might change the
1106
* state to HSM_ST_LAST. so, the state
1107
* is changed after ata_pio_sectors().
1108
*/
1109
ap->hsm_task_state = HSM_ST_ERR;
1110
goto fsm_start;
1111
}
1112
1113
ata_pio_sectors(qc);
1114
1115
if (ap->hsm_task_state == HSM_ST_LAST &&
1116
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1117
/* all data read */
1118
status = ata_wait_idle(ap);
1119
goto fsm_start;
1120
}
1121
}
1122
1123
poll_next = 1;
1124
break;
1125
1126
case HSM_ST_LAST:
1127
if (unlikely(!ata_ok(status))) {
1128
qc->err_mask |= __ac_err_mask(status);
1129
ap->hsm_task_state = HSM_ST_ERR;
1130
goto fsm_start;
1131
}
1132
1133
/* no more data to transfer */
1134
trace_ata_sff_hsm_command_complete(qc, status);
1135
1136
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1137
1138
ap->hsm_task_state = HSM_ST_IDLE;
1139
1140
/* complete taskfile transaction */
1141
ata_hsm_qc_complete(qc, in_wq);
1142
1143
poll_next = 0;
1144
break;
1145
1146
case HSM_ST_ERR:
1147
ap->hsm_task_state = HSM_ST_IDLE;
1148
1149
/* complete taskfile transaction */
1150
ata_hsm_qc_complete(qc, in_wq);
1151
1152
poll_next = 0;
1153
break;
1154
default:
1155
poll_next = 0;
1156
WARN(true, "ata%d: SFF host state machine in invalid state %d",
1157
ap->print_id, ap->hsm_task_state);
1158
}
1159
1160
return poll_next;
1161
}
1162
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1163
1164
void ata_sff_queue_work(struct work_struct *work)
1165
{
1166
queue_work(ata_sff_wq, work);
1167
}
1168
EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1169
1170
void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1171
{
1172
queue_delayed_work(ata_sff_wq, dwork, delay);
1173
}
1174
EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1175
1176
void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1177
{
1178
struct ata_port *ap = link->ap;
1179
1180
WARN_ON((ap->sff_pio_task_link != NULL) &&
1181
(ap->sff_pio_task_link != link));
1182
ap->sff_pio_task_link = link;
1183
1184
/* may fail if ata_sff_flush_pio_task() in progress */
1185
ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1186
}
1187
EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1188
1189
void ata_sff_flush_pio_task(struct ata_port *ap)
1190
{
1191
trace_ata_sff_flush_pio_task(ap);
1192
1193
cancel_delayed_work_sync(&ap->sff_pio_task);
1194
1195
/*
1196
* We wanna reset the HSM state to IDLE. If we do so without
1197
* grabbing the port lock, critical sections protected by it which
1198
* expect the HSM state to stay stable may get surprised. For
1199
* example, we may set IDLE in between the time
1200
* __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1201
* ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1202
*/
1203
spin_lock_irq(ap->lock);
1204
ap->hsm_task_state = HSM_ST_IDLE;
1205
spin_unlock_irq(ap->lock);
1206
1207
ap->sff_pio_task_link = NULL;
1208
}
1209
1210
static void ata_sff_pio_task(struct work_struct *work)
1211
{
1212
struct ata_port *ap =
1213
container_of(work, struct ata_port, sff_pio_task.work);
1214
struct ata_link *link = ap->sff_pio_task_link;
1215
struct ata_queued_cmd *qc;
1216
u8 status;
1217
int poll_next;
1218
1219
spin_lock_irq(ap->lock);
1220
1221
BUG_ON(ap->sff_pio_task_link == NULL);
1222
/* qc can be NULL if timeout occurred */
1223
qc = ata_qc_from_tag(ap, link->active_tag);
1224
if (!qc) {
1225
ap->sff_pio_task_link = NULL;
1226
goto out_unlock;
1227
}
1228
1229
fsm_start:
1230
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1231
1232
/*
1233
* This is purely heuristic. This is a fast path.
1234
* Sometimes when we enter, BSY will be cleared in
1235
* a chk-status or two. If not, the drive is probably seeking
1236
* or something. Snooze for a couple msecs, then
1237
* chk-status again. If still busy, queue delayed work.
1238
*/
1239
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1240
if (status & ATA_BUSY) {
1241
spin_unlock_irq(ap->lock);
1242
ata_msleep(ap, 2);
1243
spin_lock_irq(ap->lock);
1244
1245
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1246
if (status & ATA_BUSY) {
1247
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1248
goto out_unlock;
1249
}
1250
}
1251
1252
/*
1253
* hsm_move() may trigger another command to be processed.
1254
* clean the link beforehand.
1255
*/
1256
ap->sff_pio_task_link = NULL;
1257
/* move the HSM */
1258
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1259
1260
/* another command or interrupt handler
1261
* may be running at this point.
1262
*/
1263
if (poll_next)
1264
goto fsm_start;
1265
out_unlock:
1266
spin_unlock_irq(ap->lock);
1267
}
1268
1269
/**
1270
* ata_sff_qc_issue - issue taskfile to a SFF controller
1271
* @qc: command to issue to device
1272
*
1273
* This function issues a PIO or NODATA command to a SFF
1274
* controller.
1275
*
1276
* LOCKING:
1277
* spin_lock_irqsave(host lock)
1278
*
1279
* RETURNS:
1280
* Zero on success, AC_ERR_* mask on failure
1281
*/
1282
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1283
{
1284
struct ata_port *ap = qc->ap;
1285
struct ata_link *link = qc->dev->link;
1286
1287
/* Use polling pio if the LLD doesn't handle
1288
* interrupt driven pio and atapi CDB interrupt.
1289
*/
1290
if (ap->flags & ATA_FLAG_PIO_POLLING)
1291
qc->tf.flags |= ATA_TFLAG_POLLING;
1292
1293
/* select the device */
1294
ata_dev_select(ap, qc->dev->devno, 1, 0);
1295
1296
/* start the command */
1297
switch (qc->tf.protocol) {
1298
case ATA_PROT_NODATA:
1299
if (qc->tf.flags & ATA_TFLAG_POLLING)
1300
ata_qc_set_polling(qc);
1301
1302
ata_tf_to_host(ap, &qc->tf, qc->tag);
1303
ap->hsm_task_state = HSM_ST_LAST;
1304
1305
if (qc->tf.flags & ATA_TFLAG_POLLING)
1306
ata_sff_queue_pio_task(link, 0);
1307
1308
break;
1309
1310
case ATA_PROT_PIO:
1311
if (qc->tf.flags & ATA_TFLAG_POLLING)
1312
ata_qc_set_polling(qc);
1313
1314
ata_tf_to_host(ap, &qc->tf, qc->tag);
1315
1316
if (qc->tf.flags & ATA_TFLAG_WRITE) {
1317
/* PIO data out protocol */
1318
ap->hsm_task_state = HSM_ST_FIRST;
1319
ata_sff_queue_pio_task(link, 0);
1320
1321
/* always send first data block using the
1322
* ata_sff_pio_task() codepath.
1323
*/
1324
} else {
1325
/* PIO data in protocol */
1326
ap->hsm_task_state = HSM_ST;
1327
1328
if (qc->tf.flags & ATA_TFLAG_POLLING)
1329
ata_sff_queue_pio_task(link, 0);
1330
1331
/* if polling, ata_sff_pio_task() handles the
1332
* rest. otherwise, interrupt handler takes
1333
* over from here.
1334
*/
1335
}
1336
1337
break;
1338
1339
case ATAPI_PROT_PIO:
1340
case ATAPI_PROT_NODATA:
1341
if (qc->tf.flags & ATA_TFLAG_POLLING)
1342
ata_qc_set_polling(qc);
1343
1344
ata_tf_to_host(ap, &qc->tf, qc->tag);
1345
1346
ap->hsm_task_state = HSM_ST_FIRST;
1347
1348
/* send cdb by polling if no cdb interrupt */
1349
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1350
(qc->tf.flags & ATA_TFLAG_POLLING))
1351
ata_sff_queue_pio_task(link, 0);
1352
break;
1353
1354
default:
1355
return AC_ERR_SYSTEM;
1356
}
1357
1358
return 0;
1359
}
1360
EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1361
1362
/**
1363
* ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1364
* @qc: qc to fill result TF for
1365
*
1366
* @qc is finished and result TF needs to be filled. Fill it
1367
* using ->sff_tf_read.
1368
*
1369
* LOCKING:
1370
* spin_lock_irqsave(host lock)
1371
*/
1372
void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1373
{
1374
qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1375
}
1376
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1377
1378
static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1379
{
1380
ap->stats.idle_irq++;
1381
1382
#ifdef ATA_IRQ_TRAP
1383
if ((ap->stats.idle_irq % 1000) == 0) {
1384
ap->ops->sff_check_status(ap);
1385
if (ap->ops->sff_irq_clear)
1386
ap->ops->sff_irq_clear(ap);
1387
ata_port_warn(ap, "irq trap\n");
1388
return 1;
1389
}
1390
#endif
1391
return 0; /* irq not handled */
1392
}
1393
1394
static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1395
struct ata_queued_cmd *qc,
1396
bool hsmv_on_idle)
1397
{
1398
u8 status;
1399
1400
trace_ata_sff_port_intr(qc, hsmv_on_idle);
1401
1402
/* Check whether we are expecting interrupt in this state */
1403
switch (ap->hsm_task_state) {
1404
case HSM_ST_FIRST:
1405
/* Some pre-ATAPI-4 devices assert INTRQ
1406
* at this state when ready to receive CDB.
1407
*/
1408
1409
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1410
* The flag was turned on only for atapi devices. No
1411
* need to check ata_is_atapi(qc->tf.protocol) again.
1412
*/
1413
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1414
return ata_sff_idle_irq(ap);
1415
break;
1416
case HSM_ST_IDLE:
1417
return ata_sff_idle_irq(ap);
1418
default:
1419
break;
1420
}
1421
1422
/* check main status, clearing INTRQ if needed */
1423
status = ata_sff_irq_status(ap);
1424
if (status & ATA_BUSY) {
1425
if (hsmv_on_idle) {
1426
/* BMDMA engine is already stopped, we're screwed */
1427
qc->err_mask |= AC_ERR_HSM;
1428
ap->hsm_task_state = HSM_ST_ERR;
1429
} else
1430
return ata_sff_idle_irq(ap);
1431
}
1432
1433
/* clear irq events */
1434
if (ap->ops->sff_irq_clear)
1435
ap->ops->sff_irq_clear(ap);
1436
1437
ata_sff_hsm_move(ap, qc, status, 0);
1438
1439
return 1; /* irq handled */
1440
}
1441
1442
/**
1443
* ata_sff_port_intr - Handle SFF port interrupt
1444
* @ap: Port on which interrupt arrived (possibly...)
1445
* @qc: Taskfile currently active in engine
1446
*
1447
* Handle port interrupt for given queued command.
1448
*
1449
* LOCKING:
1450
* spin_lock_irqsave(host lock)
1451
*
1452
* RETURNS:
1453
* One if interrupt was handled, zero if not (shared irq).
1454
*/
1455
unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1456
{
1457
return __ata_sff_port_intr(ap, qc, false);
1458
}
1459
EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1460
1461
static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1462
unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1463
{
1464
struct ata_host *host = dev_instance;
1465
bool retried = false;
1466
unsigned int i;
1467
unsigned int handled, idle, polling;
1468
unsigned long flags;
1469
1470
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1471
spin_lock_irqsave(&host->lock, flags);
1472
1473
retry:
1474
handled = idle = polling = 0;
1475
for (i = 0; i < host->n_ports; i++) {
1476
struct ata_port *ap = host->ports[i];
1477
struct ata_queued_cmd *qc;
1478
1479
qc = ata_qc_from_tag(ap, ap->link.active_tag);
1480
if (qc) {
1481
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1482
handled |= port_intr(ap, qc);
1483
else
1484
polling |= 1 << i;
1485
} else
1486
idle |= 1 << i;
1487
}
1488
1489
/*
1490
* If no port was expecting IRQ but the controller is actually
1491
* asserting IRQ line, nobody cared will ensue. Check IRQ
1492
* pending status if available and clear spurious IRQ.
1493
*/
1494
if (!handled && !retried) {
1495
bool retry = false;
1496
1497
for (i = 0; i < host->n_ports; i++) {
1498
struct ata_port *ap = host->ports[i];
1499
1500
if (polling & (1 << i))
1501
continue;
1502
1503
if (!ap->ops->sff_irq_check ||
1504
!ap->ops->sff_irq_check(ap))
1505
continue;
1506
1507
if (idle & (1 << i)) {
1508
ap->ops->sff_check_status(ap);
1509
if (ap->ops->sff_irq_clear)
1510
ap->ops->sff_irq_clear(ap);
1511
} else {
1512
/* clear INTRQ and check if BUSY cleared */
1513
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1514
retry |= true;
1515
/*
1516
* With command in flight, we can't do
1517
* sff_irq_clear() w/o racing with completion.
1518
*/
1519
}
1520
}
1521
1522
if (retry) {
1523
retried = true;
1524
goto retry;
1525
}
1526
}
1527
1528
spin_unlock_irqrestore(&host->lock, flags);
1529
1530
return IRQ_RETVAL(handled);
1531
}
1532
1533
/**
1534
* ata_sff_interrupt - Default SFF ATA host interrupt handler
1535
* @irq: irq line (unused)
1536
* @dev_instance: pointer to our ata_host information structure
1537
*
1538
* Default interrupt handler for PCI IDE devices. Calls
1539
* ata_sff_port_intr() for each port that is not disabled.
1540
*
1541
* LOCKING:
1542
* Obtains host lock during operation.
1543
*
1544
* RETURNS:
1545
* IRQ_NONE or IRQ_HANDLED.
1546
*/
1547
irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1548
{
1549
return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1550
}
1551
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1552
1553
/**
1554
* ata_sff_lost_interrupt - Check for an apparent lost interrupt
1555
* @ap: port that appears to have timed out
1556
*
1557
* Called from the libata error handlers when the core code suspects
1558
* an interrupt has been lost. If it has complete anything we can and
1559
* then return. Interface must support altstatus for this faster
1560
* recovery to occur.
1561
*
1562
* Locking:
1563
* Caller holds host lock
1564
*/
1565
1566
void ata_sff_lost_interrupt(struct ata_port *ap)
1567
{
1568
u8 status = 0;
1569
struct ata_queued_cmd *qc;
1570
1571
/* Only one outstanding command per SFF channel */
1572
qc = ata_qc_from_tag(ap, ap->link.active_tag);
1573
/* We cannot lose an interrupt on a non-existent or polled command */
1574
if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1575
return;
1576
/* See if the controller thinks it is still busy - if so the command
1577
isn't a lost IRQ but is still in progress */
1578
if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
1579
return;
1580
if (status & ATA_BUSY)
1581
return;
1582
1583
/* There was a command running, we are no longer busy and we have
1584
no interrupt. */
1585
ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
1586
/* Run the host interrupt logic as if the interrupt had not been
1587
lost */
1588
ata_sff_port_intr(ap, qc);
1589
}
1590
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1591
1592
/**
1593
* ata_sff_freeze - Freeze SFF controller port
1594
* @ap: port to freeze
1595
*
1596
* Freeze SFF controller port.
1597
*
1598
* LOCKING:
1599
* Inherited from caller.
1600
*/
1601
void ata_sff_freeze(struct ata_port *ap)
1602
{
1603
ap->ctl |= ATA_NIEN;
1604
ap->last_ctl = ap->ctl;
1605
1606
ata_sff_set_devctl(ap, ap->ctl);
1607
1608
/* Under certain circumstances, some controllers raise IRQ on
1609
* ATA_NIEN manipulation. Also, many controllers fail to mask
1610
* previously pending IRQ on ATA_NIEN assertion. Clear it.
1611
*/
1612
ap->ops->sff_check_status(ap);
1613
1614
if (ap->ops->sff_irq_clear)
1615
ap->ops->sff_irq_clear(ap);
1616
}
1617
EXPORT_SYMBOL_GPL(ata_sff_freeze);
1618
1619
/**
1620
* ata_sff_thaw - Thaw SFF controller port
1621
* @ap: port to thaw
1622
*
1623
* Thaw SFF controller port.
1624
*
1625
* LOCKING:
1626
* Inherited from caller.
1627
*/
1628
void ata_sff_thaw(struct ata_port *ap)
1629
{
1630
/* clear & re-enable interrupts */
1631
ap->ops->sff_check_status(ap);
1632
if (ap->ops->sff_irq_clear)
1633
ap->ops->sff_irq_clear(ap);
1634
ata_sff_irq_on(ap);
1635
}
1636
EXPORT_SYMBOL_GPL(ata_sff_thaw);
1637
1638
/**
1639
* ata_sff_prereset - prepare SFF link for reset
1640
* @link: SFF link to be reset
1641
* @deadline: deadline jiffies for the operation
1642
*
1643
* SFF link @link is about to be reset. Initialize it. It first
1644
* calls ata_std_prereset() and wait for !BSY if the port is
1645
* being softreset.
1646
*
1647
* LOCKING:
1648
* Kernel thread context (may sleep)
1649
*
1650
* RETURNS:
1651
* Always 0.
1652
*/
1653
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1654
{
1655
struct ata_eh_context *ehc = &link->eh_context;
1656
int rc;
1657
1658
/* The standard prereset is best-effort and always returns 0 */
1659
ata_std_prereset(link, deadline);
1660
1661
/* if we're about to do hardreset, nothing more to do */
1662
if (ehc->i.action & ATA_EH_HARDRESET)
1663
return 0;
1664
1665
/* wait for !BSY if we don't know that no device is attached */
1666
if (!ata_link_offline(link)) {
1667
rc = ata_sff_wait_ready(link, deadline);
1668
if (rc && rc != -ENODEV) {
1669
ata_link_warn(link,
1670
"device not ready (errno=%d), forcing hardreset\n",
1671
rc);
1672
ehc->i.action |= ATA_EH_HARDRESET;
1673
}
1674
}
1675
1676
return 0;
1677
}
1678
EXPORT_SYMBOL_GPL(ata_sff_prereset);
1679
1680
/**
1681
* ata_devchk - PATA device presence detection
1682
* @ap: ATA channel to examine
1683
* @device: Device to examine (starting at zero)
1684
*
1685
* This technique was originally described in
1686
* Hale Landis's ATADRVR (www.ata-atapi.com), and
1687
* later found its way into the ATA/ATAPI spec.
1688
*
1689
* Write a pattern to the ATA shadow registers,
1690
* and if a device is present, it will respond by
1691
* correctly storing and echoing back the
1692
* ATA shadow register contents.
1693
*
1694
* RETURN:
1695
* true if device is present, false if not.
1696
*
1697
* LOCKING:
1698
* caller.
1699
*/
1700
static bool ata_devchk(struct ata_port *ap, unsigned int device)
1701
{
1702
struct ata_ioports *ioaddr = &ap->ioaddr;
1703
u8 nsect, lbal;
1704
1705
ap->ops->sff_dev_select(ap, device);
1706
1707
iowrite8(0x55, ioaddr->nsect_addr);
1708
iowrite8(0xaa, ioaddr->lbal_addr);
1709
1710
iowrite8(0xaa, ioaddr->nsect_addr);
1711
iowrite8(0x55, ioaddr->lbal_addr);
1712
1713
iowrite8(0x55, ioaddr->nsect_addr);
1714
iowrite8(0xaa, ioaddr->lbal_addr);
1715
1716
nsect = ioread8(ioaddr->nsect_addr);
1717
lbal = ioread8(ioaddr->lbal_addr);
1718
1719
if ((nsect == 0x55) && (lbal == 0xaa))
1720
return true; /* we found a device */
1721
1722
return false; /* nothing found */
1723
}
1724
1725
/**
1726
* ata_sff_dev_classify - Parse returned ATA device signature
1727
* @dev: ATA device to classify (starting at zero)
1728
* @present: device seems present
1729
* @r_err: Value of error register on completion
1730
*
1731
* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1732
* an ATA/ATAPI-defined set of values is placed in the ATA
1733
* shadow registers, indicating the results of device detection
1734
* and diagnostics.
1735
*
1736
* Select the ATA device, and read the values from the ATA shadow
1737
* registers. Then parse according to the Error register value,
1738
* and the spec-defined values examined by ata_dev_classify().
1739
*
1740
* LOCKING:
1741
* caller.
1742
*
1743
* RETURNS:
1744
* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1745
*/
1746
unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1747
u8 *r_err)
1748
{
1749
struct ata_port *ap = dev->link->ap;
1750
struct ata_taskfile tf;
1751
unsigned int class;
1752
u8 err;
1753
1754
ap->ops->sff_dev_select(ap, dev->devno);
1755
1756
memset(&tf, 0, sizeof(tf));
1757
1758
ap->ops->sff_tf_read(ap, &tf);
1759
err = tf.error;
1760
if (r_err)
1761
*r_err = err;
1762
1763
/* see if device passed diags: continue and warn later */
1764
if (err == 0)
1765
/* diagnostic fail : do nothing _YET_ */
1766
dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
1767
else if (err == 1)
1768
/* do nothing */ ;
1769
else if ((dev->devno == 0) && (err == 0x81))
1770
/* do nothing */ ;
1771
else
1772
return ATA_DEV_NONE;
1773
1774
/* determine if device is ATA or ATAPI */
1775
class = ata_port_classify(ap, &tf);
1776
switch (class) {
1777
case ATA_DEV_UNKNOWN:
1778
/*
1779
* If the device failed diagnostic, it's likely to
1780
* have reported incorrect device signature too.
1781
* Assume ATA device if the device seems present but
1782
* device signature is invalid with diagnostic
1783
* failure.
1784
*/
1785
if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
1786
class = ATA_DEV_ATA;
1787
else
1788
class = ATA_DEV_NONE;
1789
break;
1790
case ATA_DEV_ATA:
1791
if (ap->ops->sff_check_status(ap) == 0)
1792
class = ATA_DEV_NONE;
1793
break;
1794
}
1795
return class;
1796
}
1797
EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1798
1799
/**
1800
* ata_sff_wait_after_reset - wait for devices to become ready after reset
1801
* @link: SFF link which is just reset
1802
* @devmask: mask of present devices
1803
* @deadline: deadline jiffies for the operation
1804
*
1805
* Wait devices attached to SFF @link to become ready after
1806
* reset. It contains preceding 150ms wait to avoid accessing TF
1807
* status register too early.
1808
*
1809
* LOCKING:
1810
* Kernel thread context (may sleep).
1811
*
1812
* RETURNS:
1813
* 0 on success, -ENODEV if some or all of devices in @devmask
1814
* don't seem to exist. -errno on other errors.
1815
*/
1816
int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1817
unsigned long deadline)
1818
{
1819
struct ata_port *ap = link->ap;
1820
struct ata_ioports *ioaddr = &ap->ioaddr;
1821
unsigned int dev0 = devmask & (1 << 0);
1822
unsigned int dev1 = devmask & (1 << 1);
1823
int rc, ret = 0;
1824
1825
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1826
1827
/* always check readiness of the master device */
1828
rc = ata_sff_wait_ready(link, deadline);
1829
/* -ENODEV means the odd clown forgot the D7 pulldown resistor
1830
* and TF status is 0xff, bail out on it too.
1831
*/
1832
if (rc)
1833
return rc;
1834
1835
/* if device 1 was found in ata_devchk, wait for register
1836
* access briefly, then wait for BSY to clear.
1837
*/
1838
if (dev1) {
1839
int i;
1840
1841
ap->ops->sff_dev_select(ap, 1);
1842
1843
/* Wait for register access. Some ATAPI devices fail
1844
* to set nsect/lbal after reset, so don't waste too
1845
* much time on it. We're gonna wait for !BSY anyway.
1846
*/
1847
for (i = 0; i < 2; i++) {
1848
u8 nsect, lbal;
1849
1850
nsect = ioread8(ioaddr->nsect_addr);
1851
lbal = ioread8(ioaddr->lbal_addr);
1852
if ((nsect == 1) && (lbal == 1))
1853
break;
1854
ata_msleep(ap, 50); /* give drive a breather */
1855
}
1856
1857
rc = ata_sff_wait_ready(link, deadline);
1858
if (rc) {
1859
if (rc != -ENODEV)
1860
return rc;
1861
ret = rc;
1862
}
1863
}
1864
1865
/* is all this really necessary? */
1866
ap->ops->sff_dev_select(ap, 0);
1867
if (dev1)
1868
ap->ops->sff_dev_select(ap, 1);
1869
if (dev0)
1870
ap->ops->sff_dev_select(ap, 0);
1871
1872
return ret;
1873
}
1874
EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1875
1876
static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1877
unsigned long deadline)
1878
{
1879
struct ata_ioports *ioaddr = &ap->ioaddr;
1880
1881
if (ap->ioaddr.ctl_addr) {
1882
/* software reset. causes dev0 to be selected */
1883
iowrite8(ap->ctl, ioaddr->ctl_addr);
1884
udelay(20); /* FIXME: flush */
1885
iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1886
udelay(20); /* FIXME: flush */
1887
iowrite8(ap->ctl, ioaddr->ctl_addr);
1888
ap->last_ctl = ap->ctl;
1889
}
1890
1891
/* wait the port to become ready */
1892
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1893
}
1894
1895
/**
1896
* ata_sff_softreset - reset host port via ATA SRST
1897
* @link: ATA link to reset
1898
* @classes: resulting classes of attached devices
1899
* @deadline: deadline jiffies for the operation
1900
*
1901
* Reset host port using ATA SRST.
1902
*
1903
* LOCKING:
1904
* Kernel thread context (may sleep)
1905
*
1906
* RETURNS:
1907
* 0 on success, -errno otherwise.
1908
*/
1909
int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1910
unsigned long deadline)
1911
{
1912
struct ata_port *ap = link->ap;
1913
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1914
unsigned int devmask = 0;
1915
int rc;
1916
u8 err;
1917
1918
/* determine if device 0/1 are present */
1919
if (ata_devchk(ap, 0))
1920
devmask |= (1 << 0);
1921
if (slave_possible && ata_devchk(ap, 1))
1922
devmask |= (1 << 1);
1923
1924
/* select device 0 again */
1925
ap->ops->sff_dev_select(ap, 0);
1926
1927
/* issue bus reset */
1928
rc = ata_bus_softreset(ap, devmask, deadline);
1929
/* if link is occupied, -ENODEV too is an error */
1930
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1931
ata_link_err(link, "SRST failed (errno=%d)\n", rc);
1932
return rc;
1933
}
1934
1935
/* determine by signature whether we have ATA or ATAPI devices */
1936
classes[0] = ata_sff_dev_classify(&link->device[0],
1937
devmask & (1 << 0), &err);
1938
if (slave_possible && err != 0x81)
1939
classes[1] = ata_sff_dev_classify(&link->device[1],
1940
devmask & (1 << 1), &err);
1941
1942
return 0;
1943
}
1944
EXPORT_SYMBOL_GPL(ata_sff_softreset);
1945
1946
/**
1947
* sata_sff_hardreset - reset host port via SATA phy reset
1948
* @link: link to reset
1949
* @class: resulting class of attached device
1950
* @deadline: deadline jiffies for the operation
1951
*
1952
* SATA phy-reset host port using DET bits of SControl register,
1953
* wait for !BSY and classify the attached device.
1954
*
1955
* LOCKING:
1956
* Kernel thread context (may sleep)
1957
*
1958
* RETURNS:
1959
* 0 on success, -errno otherwise.
1960
*/
1961
int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
1962
unsigned long deadline)
1963
{
1964
struct ata_eh_context *ehc = &link->eh_context;
1965
const unsigned int *timing = sata_ehc_deb_timing(ehc);
1966
bool online;
1967
int rc;
1968
1969
rc = sata_link_hardreset(link, timing, deadline, &online,
1970
ata_sff_check_ready);
1971
if (online)
1972
*class = ata_sff_dev_classify(link->device, 1, NULL);
1973
1974
return rc;
1975
}
1976
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
1977
1978
/**
1979
* ata_sff_postreset - SFF postreset callback
1980
* @link: the target SFF ata_link
1981
* @classes: classes of attached devices
1982
*
1983
* This function is invoked after a successful reset. It first
1984
* calls ata_std_postreset() and performs SFF specific postreset
1985
* processing.
1986
*
1987
* LOCKING:
1988
* Kernel thread context (may sleep)
1989
*/
1990
void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
1991
{
1992
struct ata_port *ap = link->ap;
1993
1994
ata_std_postreset(link, classes);
1995
1996
/* is double-select really necessary? */
1997
if (classes[0] != ATA_DEV_NONE)
1998
ap->ops->sff_dev_select(ap, 1);
1999
if (classes[1] != ATA_DEV_NONE)
2000
ap->ops->sff_dev_select(ap, 0);
2001
2002
/* bail out if no device is present */
2003
if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
2004
return;
2005
2006
/* set up device control */
2007
if (ata_sff_set_devctl(ap, ap->ctl))
2008
ap->last_ctl = ap->ctl;
2009
}
2010
EXPORT_SYMBOL_GPL(ata_sff_postreset);
2011
2012
/**
2013
* ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2014
* @qc: command
2015
*
2016
* Drain the FIFO and device of any stuck data following a command
2017
* failing to complete. In some cases this is necessary before a
2018
* reset will recover the device.
2019
*
2020
*/
2021
2022
void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2023
{
2024
int count;
2025
struct ata_port *ap;
2026
2027
/* We only need to flush incoming data when a command was running */
2028
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2029
return;
2030
2031
ap = qc->ap;
2032
/* Drain up to 64K of data before we give up this recovery method */
2033
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2034
&& count < 65536; count += 2)
2035
ioread16(ap->ioaddr.data_addr);
2036
2037
if (count)
2038
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2039
2040
}
2041
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2042
2043
/**
2044
* ata_sff_error_handler - Stock error handler for SFF controller
2045
* @ap: port to handle error for
2046
*
2047
* Stock error handler for SFF controller. It can handle both
2048
* PATA and SATA controllers. Many controllers should be able to
2049
* use this EH as-is or with some added handling before and
2050
* after.
2051
*
2052
* LOCKING:
2053
* Kernel thread context (may sleep)
2054
*/
2055
void ata_sff_error_handler(struct ata_port *ap)
2056
{
2057
struct ata_queued_cmd *qc;
2058
unsigned long flags;
2059
2060
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2061
if (qc && !(qc->flags & ATA_QCFLAG_EH))
2062
qc = NULL;
2063
2064
spin_lock_irqsave(ap->lock, flags);
2065
2066
/*
2067
* We *MUST* do FIFO draining before we issue a reset as
2068
* several devices helpfully clear their internal state and
2069
* will lock solid if we touch the data port post reset. Pass
2070
* qc in case anyone wants to do different PIO/DMA recovery or
2071
* has per command fixups
2072
*/
2073
if (ap->ops->sff_drain_fifo)
2074
ap->ops->sff_drain_fifo(qc);
2075
2076
spin_unlock_irqrestore(ap->lock, flags);
2077
2078
ata_std_error_handler(ap);
2079
}
2080
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2081
2082
/**
2083
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
2084
* @ioaddr: IO address structure to be initialized
2085
*
2086
* Utility function which initializes data_addr, error_addr,
2087
* feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2088
* device_addr, status_addr, and command_addr to standard offsets
2089
* relative to cmd_addr.
2090
*
2091
* Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2092
*/
2093
void ata_sff_std_ports(struct ata_ioports *ioaddr)
2094
{
2095
ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2096
ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2097
ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2098
ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2099
ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2100
ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2101
ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2102
ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2103
ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2104
ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2105
}
2106
EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2107
2108
#ifdef CONFIG_PCI
2109
2110
static bool ata_resources_present(struct pci_dev *pdev, int port)
2111
{
2112
int i;
2113
2114
/* Check the PCI resources for this channel are enabled */
2115
port *= 2;
2116
for (i = 0; i < 2; i++) {
2117
if (pci_resource_start(pdev, port + i) == 0 ||
2118
pci_resource_len(pdev, port + i) == 0)
2119
return false;
2120
}
2121
return true;
2122
}
2123
2124
/**
2125
* ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2126
* @host: target ATA host
2127
*
2128
* Acquire native PCI ATA resources for @host and initialize the
2129
* first two ports of @host accordingly. Ports marked dummy are
2130
* skipped and allocation failure makes the port dummy.
2131
*
2132
* Note that native PCI resources are valid even for legacy hosts
2133
* as we fix up pdev resources array early in boot, so this
2134
* function can be used for both native and legacy SFF hosts.
2135
*
2136
* LOCKING:
2137
* Inherited from calling layer (may sleep).
2138
*
2139
* RETURNS:
2140
* 0 if at least one port is initialized, -ENODEV if no port is
2141
* available.
2142
*/
2143
int ata_pci_sff_init_host(struct ata_host *host)
2144
{
2145
struct device *gdev = host->dev;
2146
struct pci_dev *pdev = to_pci_dev(gdev);
2147
unsigned int mask = 0;
2148
int i, rc;
2149
2150
/* request, iomap BARs and init port addresses accordingly */
2151
for (i = 0; i < 2; i++) {
2152
struct ata_port *ap = host->ports[i];
2153
int base = i * 2;
2154
void __iomem * const *iomap;
2155
2156
if (ata_port_is_dummy(ap))
2157
continue;
2158
2159
/* Discard disabled ports. Some controllers show
2160
* their unused channels this way. Disabled ports are
2161
* made dummy.
2162
*/
2163
if (!ata_resources_present(pdev, i)) {
2164
ap->ops = &ata_dummy_port_ops;
2165
continue;
2166
}
2167
2168
rc = pcim_iomap_regions(pdev, 0x3 << base,
2169
dev_driver_string(gdev));
2170
if (rc) {
2171
dev_warn(gdev,
2172
"failed to request/iomap BARs for port %d (errno=%d)\n",
2173
i, rc);
2174
if (rc == -EBUSY)
2175
pcim_pin_device(pdev);
2176
ap->ops = &ata_dummy_port_ops;
2177
continue;
2178
}
2179
host->iomap = iomap = pcim_iomap_table(pdev);
2180
2181
ap->ioaddr.cmd_addr = iomap[base];
2182
ap->ioaddr.altstatus_addr =
2183
ap->ioaddr.ctl_addr = (void __iomem *)
2184
((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2185
ata_sff_std_ports(&ap->ioaddr);
2186
2187
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2188
(unsigned long long)pci_resource_start(pdev, base),
2189
(unsigned long long)pci_resource_start(pdev, base + 1));
2190
2191
mask |= 1 << i;
2192
}
2193
2194
if (!mask) {
2195
dev_err(gdev, "no available native port\n");
2196
return -ENODEV;
2197
}
2198
2199
return 0;
2200
}
2201
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2202
2203
/**
2204
* ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2205
* @pdev: target PCI device
2206
* @ppi: array of port_info, must be enough for two ports
2207
* @r_host: out argument for the initialized ATA host
2208
*
2209
* Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2210
* all PCI resources and initialize it accordingly in one go.
2211
*
2212
* LOCKING:
2213
* Inherited from calling layer (may sleep).
2214
*
2215
* RETURNS:
2216
* 0 on success, -errno otherwise.
2217
*/
2218
int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2219
const struct ata_port_info * const *ppi,
2220
struct ata_host **r_host)
2221
{
2222
struct ata_host *host;
2223
int rc;
2224
2225
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2226
return -ENOMEM;
2227
2228
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2229
if (!host) {
2230
dev_err(&pdev->dev, "failed to allocate ATA host\n");
2231
rc = -ENOMEM;
2232
goto err_out;
2233
}
2234
2235
rc = ata_pci_sff_init_host(host);
2236
if (rc)
2237
goto err_out;
2238
2239
devres_remove_group(&pdev->dev, NULL);
2240
*r_host = host;
2241
return 0;
2242
2243
err_out:
2244
devres_release_group(&pdev->dev, NULL);
2245
return rc;
2246
}
2247
EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2248
2249
/**
2250
* ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2251
* @host: target SFF ATA host
2252
* @irq_handler: irq_handler used when requesting IRQ(s)
2253
* @sht: scsi_host_template to use when registering the host
2254
*
2255
* This is the counterpart of ata_host_activate() for SFF ATA
2256
* hosts. This separate helper is necessary because SFF hosts
2257
* use two separate interrupts in legacy mode.
2258
*
2259
* LOCKING:
2260
* Inherited from calling layer (may sleep).
2261
*
2262
* RETURNS:
2263
* 0 on success, -errno otherwise.
2264
*/
2265
int ata_pci_sff_activate_host(struct ata_host *host,
2266
irq_handler_t irq_handler,
2267
const struct scsi_host_template *sht)
2268
{
2269
struct device *dev = host->dev;
2270
struct pci_dev *pdev = to_pci_dev(dev);
2271
const char *drv_name = dev_driver_string(host->dev);
2272
int legacy_mode = 0, rc;
2273
2274
rc = ata_host_start(host);
2275
if (rc)
2276
return rc;
2277
2278
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2279
u8 tmp8, mask = 0;
2280
2281
/*
2282
* ATA spec says we should use legacy mode when one
2283
* port is in legacy mode, but disabled ports on some
2284
* PCI hosts appear as fixed legacy ports, e.g SB600/700
2285
* on which the secondary port is not wired, so
2286
* ignore ports that are marked as 'dummy' during
2287
* this check
2288
*/
2289
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2290
if (!ata_port_is_dummy(host->ports[0]))
2291
mask |= (1 << 0);
2292
if (!ata_port_is_dummy(host->ports[1]))
2293
mask |= (1 << 2);
2294
if ((tmp8 & mask) != mask)
2295
legacy_mode = 1;
2296
}
2297
2298
if (!devres_open_group(dev, NULL, GFP_KERNEL))
2299
return -ENOMEM;
2300
2301
if (!legacy_mode && pdev->irq) {
2302
int i;
2303
2304
rc = devm_request_irq(dev, pdev->irq, irq_handler,
2305
IRQF_SHARED, drv_name, host);
2306
if (rc)
2307
goto out;
2308
2309
for (i = 0; i < 2; i++) {
2310
if (ata_port_is_dummy(host->ports[i]))
2311
continue;
2312
ata_port_desc_misc(host->ports[i], pdev->irq);
2313
}
2314
} else if (legacy_mode) {
2315
if (!ata_port_is_dummy(host->ports[0])) {
2316
rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2317
irq_handler, IRQF_SHARED,
2318
drv_name, host);
2319
if (rc)
2320
goto out;
2321
2322
ata_port_desc_misc(host->ports[0],
2323
ATA_PRIMARY_IRQ(pdev));
2324
}
2325
2326
if (!ata_port_is_dummy(host->ports[1])) {
2327
rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2328
irq_handler, IRQF_SHARED,
2329
drv_name, host);
2330
if (rc)
2331
goto out;
2332
2333
ata_port_desc_misc(host->ports[1],
2334
ATA_SECONDARY_IRQ(pdev));
2335
}
2336
}
2337
2338
rc = ata_host_register(host, sht);
2339
out:
2340
if (rc == 0)
2341
devres_remove_group(dev, NULL);
2342
else
2343
devres_release_group(dev, NULL);
2344
2345
return rc;
2346
}
2347
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2348
2349
static const struct ata_port_info *ata_sff_find_valid_pi(
2350
const struct ata_port_info * const *ppi)
2351
{
2352
int i;
2353
2354
/* look up the first valid port_info */
2355
for (i = 0; i < 2 && ppi[i]; i++)
2356
if (ppi[i]->port_ops != &ata_dummy_port_ops)
2357
return ppi[i];
2358
2359
return NULL;
2360
}
2361
2362
static int ata_pci_init_one(struct pci_dev *pdev,
2363
const struct ata_port_info * const *ppi,
2364
const struct scsi_host_template *sht, void *host_priv,
2365
int hflags, bool bmdma)
2366
{
2367
struct device *dev = &pdev->dev;
2368
const struct ata_port_info *pi;
2369
struct ata_host *host = NULL;
2370
int rc;
2371
2372
pi = ata_sff_find_valid_pi(ppi);
2373
if (!pi) {
2374
dev_err(&pdev->dev, "no valid port_info specified\n");
2375
return -EINVAL;
2376
}
2377
2378
if (!devres_open_group(dev, NULL, GFP_KERNEL))
2379
return -ENOMEM;
2380
2381
rc = pcim_enable_device(pdev);
2382
if (rc)
2383
goto out;
2384
2385
#ifdef CONFIG_ATA_BMDMA
2386
if (bmdma)
2387
/* prepare and activate BMDMA host */
2388
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2389
else
2390
#endif
2391
/* prepare and activate SFF host */
2392
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2393
if (rc)
2394
goto out;
2395
host->private_data = host_priv;
2396
host->flags |= hflags;
2397
2398
#ifdef CONFIG_ATA_BMDMA
2399
if (bmdma) {
2400
pci_set_master(pdev);
2401
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2402
} else
2403
#endif
2404
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2405
out:
2406
if (rc == 0)
2407
devres_remove_group(&pdev->dev, NULL);
2408
else
2409
devres_release_group(&pdev->dev, NULL);
2410
2411
return rc;
2412
}
2413
2414
/**
2415
* ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2416
* @pdev: Controller to be initialized
2417
* @ppi: array of port_info, must be enough for two ports
2418
* @sht: scsi_host_template to use when registering the host
2419
* @host_priv: host private_data
2420
* @hflag: host flags
2421
*
2422
* This is a helper function which can be called from a driver's
2423
* xxx_init_one() probe function if the hardware uses traditional
2424
* IDE taskfile registers and is PIO only.
2425
*
2426
* ASSUMPTION:
2427
* Nobody makes a single channel controller that appears solely as
2428
* the secondary legacy port on PCI.
2429
*
2430
* LOCKING:
2431
* Inherited from PCI layer (may sleep).
2432
*
2433
* RETURNS:
2434
* Zero on success, negative on errno-based value on error.
2435
*/
2436
int ata_pci_sff_init_one(struct pci_dev *pdev,
2437
const struct ata_port_info * const *ppi,
2438
const struct scsi_host_template *sht, void *host_priv, int hflag)
2439
{
2440
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2441
}
2442
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2443
2444
#endif /* CONFIG_PCI */
2445
2446
/*
2447
* BMDMA support
2448
*/
2449
2450
#ifdef CONFIG_ATA_BMDMA
2451
2452
const struct ata_port_operations ata_bmdma_port_ops = {
2453
.inherits = &ata_sff_port_ops,
2454
2455
.error_handler = ata_bmdma_error_handler,
2456
.post_internal_cmd = ata_bmdma_post_internal_cmd,
2457
2458
.qc_prep = ata_bmdma_qc_prep,
2459
.qc_issue = ata_bmdma_qc_issue,
2460
2461
.sff_irq_clear = ata_bmdma_irq_clear,
2462
.bmdma_setup = ata_bmdma_setup,
2463
.bmdma_start = ata_bmdma_start,
2464
.bmdma_stop = ata_bmdma_stop,
2465
.bmdma_status = ata_bmdma_status,
2466
2467
.port_start = ata_bmdma_port_start,
2468
};
2469
EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2470
2471
const struct ata_port_operations ata_bmdma32_port_ops = {
2472
.inherits = &ata_bmdma_port_ops,
2473
2474
.sff_data_xfer = ata_sff_data_xfer32,
2475
.port_start = ata_bmdma_port_start32,
2476
};
2477
EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2478
2479
/**
2480
* ata_bmdma_fill_sg - Fill PCI IDE PRD table
2481
* @qc: Metadata associated with taskfile to be transferred
2482
*
2483
* Fill PCI IDE PRD (scatter-gather) table with segments
2484
* associated with the current disk command.
2485
*
2486
* LOCKING:
2487
* spin_lock_irqsave(host lock)
2488
*
2489
*/
2490
static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2491
{
2492
struct ata_port *ap = qc->ap;
2493
struct ata_bmdma_prd *prd = ap->bmdma_prd;
2494
struct scatterlist *sg;
2495
unsigned int si, pi;
2496
2497
pi = 0;
2498
for_each_sg(qc->sg, sg, qc->n_elem, si) {
2499
u32 addr, offset;
2500
u32 sg_len, len;
2501
2502
/* determine if physical DMA addr spans 64K boundary.
2503
* Note h/w doesn't support 64-bit, so we unconditionally
2504
* truncate dma_addr_t to u32.
2505
*/
2506
addr = (u32) sg_dma_address(sg);
2507
sg_len = sg_dma_len(sg);
2508
2509
while (sg_len) {
2510
offset = addr & 0xffff;
2511
len = sg_len;
2512
if ((offset + sg_len) > 0x10000)
2513
len = 0x10000 - offset;
2514
2515
prd[pi].addr = cpu_to_le32(addr);
2516
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2517
2518
pi++;
2519
sg_len -= len;
2520
addr += len;
2521
}
2522
}
2523
2524
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2525
}
2526
2527
/**
2528
* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2529
* @qc: Metadata associated with taskfile to be transferred
2530
*
2531
* Fill PCI IDE PRD (scatter-gather) table with segments
2532
* associated with the current disk command. Perform the fill
2533
* so that we avoid writing any length 64K records for
2534
* controllers that don't follow the spec.
2535
*
2536
* LOCKING:
2537
* spin_lock_irqsave(host lock)
2538
*
2539
*/
2540
static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2541
{
2542
struct ata_port *ap = qc->ap;
2543
struct ata_bmdma_prd *prd = ap->bmdma_prd;
2544
struct scatterlist *sg;
2545
unsigned int si, pi;
2546
2547
pi = 0;
2548
for_each_sg(qc->sg, sg, qc->n_elem, si) {
2549
u32 addr, offset;
2550
u32 sg_len, len, blen;
2551
2552
/* determine if physical DMA addr spans 64K boundary.
2553
* Note h/w doesn't support 64-bit, so we unconditionally
2554
* truncate dma_addr_t to u32.
2555
*/
2556
addr = (u32) sg_dma_address(sg);
2557
sg_len = sg_dma_len(sg);
2558
2559
while (sg_len) {
2560
offset = addr & 0xffff;
2561
len = sg_len;
2562
if ((offset + sg_len) > 0x10000)
2563
len = 0x10000 - offset;
2564
2565
blen = len & 0xffff;
2566
prd[pi].addr = cpu_to_le32(addr);
2567
if (blen == 0) {
2568
/* Some PATA chipsets like the CS5530 can't
2569
cope with 0x0000 meaning 64K as the spec
2570
says */
2571
prd[pi].flags_len = cpu_to_le32(0x8000);
2572
blen = 0x8000;
2573
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2574
}
2575
prd[pi].flags_len = cpu_to_le32(blen);
2576
2577
pi++;
2578
sg_len -= len;
2579
addr += len;
2580
}
2581
}
2582
2583
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2584
}
2585
2586
/**
2587
* ata_bmdma_qc_prep - Prepare taskfile for submission
2588
* @qc: Metadata associated with taskfile to be prepared
2589
*
2590
* Prepare ATA taskfile for submission.
2591
*
2592
* LOCKING:
2593
* spin_lock_irqsave(host lock)
2594
*/
2595
enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2596
{
2597
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2598
return AC_ERR_OK;
2599
2600
ata_bmdma_fill_sg(qc);
2601
2602
return AC_ERR_OK;
2603
}
2604
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2605
2606
/**
2607
* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2608
* @qc: Metadata associated with taskfile to be prepared
2609
*
2610
* Prepare ATA taskfile for submission.
2611
*
2612
* LOCKING:
2613
* spin_lock_irqsave(host lock)
2614
*/
2615
enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2616
{
2617
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2618
return AC_ERR_OK;
2619
2620
ata_bmdma_fill_sg_dumb(qc);
2621
2622
return AC_ERR_OK;
2623
}
2624
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2625
2626
/**
2627
* ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2628
* @qc: command to issue to device
2629
*
2630
* This function issues a PIO, NODATA or DMA command to a
2631
* SFF/BMDMA controller. PIO and NODATA are handled by
2632
* ata_sff_qc_issue().
2633
*
2634
* LOCKING:
2635
* spin_lock_irqsave(host lock)
2636
*
2637
* RETURNS:
2638
* Zero on success, AC_ERR_* mask on failure
2639
*/
2640
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2641
{
2642
struct ata_port *ap = qc->ap;
2643
struct ata_link *link = qc->dev->link;
2644
2645
/* defer PIO handling to sff_qc_issue */
2646
if (!ata_is_dma(qc->tf.protocol))
2647
return ata_sff_qc_issue(qc);
2648
2649
/* select the device */
2650
ata_dev_select(ap, qc->dev->devno, 1, 0);
2651
2652
/* start the command */
2653
switch (qc->tf.protocol) {
2654
case ATA_PROT_DMA:
2655
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2656
2657
trace_ata_tf_load(ap, &qc->tf);
2658
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2659
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2660
ap->ops->bmdma_setup(qc); /* set up bmdma */
2661
trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
2662
ap->ops->bmdma_start(qc); /* initiate bmdma */
2663
ap->hsm_task_state = HSM_ST_LAST;
2664
break;
2665
2666
case ATAPI_PROT_DMA:
2667
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2668
2669
trace_ata_tf_load(ap, &qc->tf);
2670
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2671
trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2672
ap->ops->bmdma_setup(qc); /* set up bmdma */
2673
ap->hsm_task_state = HSM_ST_FIRST;
2674
2675
/* send cdb by polling if no cdb interrupt */
2676
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2677
ata_sff_queue_pio_task(link, 0);
2678
break;
2679
2680
default:
2681
WARN_ON(1);
2682
return AC_ERR_SYSTEM;
2683
}
2684
2685
return 0;
2686
}
2687
EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2688
2689
/**
2690
* ata_bmdma_port_intr - Handle BMDMA port interrupt
2691
* @ap: Port on which interrupt arrived (possibly...)
2692
* @qc: Taskfile currently active in engine
2693
*
2694
* Handle port interrupt for given queued command.
2695
*
2696
* LOCKING:
2697
* spin_lock_irqsave(host lock)
2698
*
2699
* RETURNS:
2700
* One if interrupt was handled, zero if not (shared irq).
2701
*/
2702
unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2703
{
2704
struct ata_eh_info *ehi = &ap->link.eh_info;
2705
u8 host_stat = 0;
2706
bool bmdma_stopped = false;
2707
unsigned int handled;
2708
2709
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2710
/* check status of DMA engine */
2711
host_stat = ap->ops->bmdma_status(ap);
2712
trace_ata_bmdma_status(ap, host_stat);
2713
2714
/* if it's not our irq... */
2715
if (!(host_stat & ATA_DMA_INTR))
2716
return ata_sff_idle_irq(ap);
2717
2718
/* before we do anything else, clear DMA-Start bit */
2719
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2720
ap->ops->bmdma_stop(qc);
2721
bmdma_stopped = true;
2722
2723
if (unlikely(host_stat & ATA_DMA_ERR)) {
2724
/* error when transferring data to/from memory */
2725
qc->err_mask |= AC_ERR_HOST_BUS;
2726
ap->hsm_task_state = HSM_ST_ERR;
2727
}
2728
}
2729
2730
handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2731
2732
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2733
ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2734
2735
return handled;
2736
}
2737
EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2738
2739
/**
2740
* ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2741
* @irq: irq line (unused)
2742
* @dev_instance: pointer to our ata_host information structure
2743
*
2744
* Default interrupt handler for PCI IDE devices. Calls
2745
* ata_bmdma_port_intr() for each port that is not disabled.
2746
*
2747
* LOCKING:
2748
* Obtains host lock during operation.
2749
*
2750
* RETURNS:
2751
* IRQ_NONE or IRQ_HANDLED.
2752
*/
2753
irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2754
{
2755
return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2756
}
2757
EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2758
2759
/**
2760
* ata_bmdma_error_handler - Stock error handler for BMDMA controller
2761
* @ap: port to handle error for
2762
*
2763
* Stock error handler for BMDMA controller. It can handle both
2764
* PATA and SATA controllers. Most BMDMA controllers should be
2765
* able to use this EH as-is or with some added handling before
2766
* and after.
2767
*
2768
* LOCKING:
2769
* Kernel thread context (may sleep)
2770
*/
2771
void ata_bmdma_error_handler(struct ata_port *ap)
2772
{
2773
struct ata_queued_cmd *qc;
2774
unsigned long flags;
2775
bool thaw = false;
2776
2777
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2778
if (qc && !(qc->flags & ATA_QCFLAG_EH))
2779
qc = NULL;
2780
2781
/* reset PIO HSM and stop DMA engine */
2782
spin_lock_irqsave(ap->lock, flags);
2783
2784
if (qc && ata_is_dma(qc->tf.protocol)) {
2785
u8 host_stat;
2786
2787
host_stat = ap->ops->bmdma_status(ap);
2788
trace_ata_bmdma_status(ap, host_stat);
2789
2790
/* BMDMA controllers indicate host bus error by
2791
* setting DMA_ERR bit and timing out. As it wasn't
2792
* really a timeout event, adjust error mask and
2793
* cancel frozen state.
2794
*/
2795
if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2796
qc->err_mask = AC_ERR_HOST_BUS;
2797
thaw = true;
2798
}
2799
2800
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2801
ap->ops->bmdma_stop(qc);
2802
2803
/* if we're gonna thaw, make sure IRQ is clear */
2804
if (thaw) {
2805
ap->ops->sff_check_status(ap);
2806
if (ap->ops->sff_irq_clear)
2807
ap->ops->sff_irq_clear(ap);
2808
}
2809
}
2810
2811
spin_unlock_irqrestore(ap->lock, flags);
2812
2813
if (thaw)
2814
ata_eh_thaw_port(ap);
2815
2816
ata_sff_error_handler(ap);
2817
}
2818
EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2819
2820
/**
2821
* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2822
* @qc: internal command to clean up
2823
*
2824
* LOCKING:
2825
* Kernel thread context (may sleep)
2826
*/
2827
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2828
{
2829
struct ata_port *ap = qc->ap;
2830
unsigned long flags;
2831
2832
if (ata_is_dma(qc->tf.protocol)) {
2833
spin_lock_irqsave(ap->lock, flags);
2834
trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2835
ap->ops->bmdma_stop(qc);
2836
spin_unlock_irqrestore(ap->lock, flags);
2837
}
2838
}
2839
EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2840
2841
/**
2842
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2843
* @ap: Port associated with this ATA transaction.
2844
*
2845
* Clear interrupt and error flags in DMA status register.
2846
*
2847
* May be used as the irq_clear() entry in ata_port_operations.
2848
*
2849
* LOCKING:
2850
* spin_lock_irqsave(host lock)
2851
*/
2852
void ata_bmdma_irq_clear(struct ata_port *ap)
2853
{
2854
void __iomem *mmio = ap->ioaddr.bmdma_addr;
2855
2856
if (!mmio)
2857
return;
2858
2859
iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2860
}
2861
EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2862
2863
/**
2864
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2865
* @qc: Info associated with this ATA transaction.
2866
*
2867
* LOCKING:
2868
* spin_lock_irqsave(host lock)
2869
*/
2870
void ata_bmdma_setup(struct ata_queued_cmd *qc)
2871
{
2872
struct ata_port *ap = qc->ap;
2873
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2874
u8 dmactl;
2875
2876
/* load PRD table addr. */
2877
mb(); /* make sure PRD table writes are visible to controller */
2878
iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2879
2880
/* specify data direction, triple-check start bit is clear */
2881
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2882
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2883
if (!rw)
2884
dmactl |= ATA_DMA_WR;
2885
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2886
2887
/* issue r/w command */
2888
ap->ops->sff_exec_command(ap, &qc->tf);
2889
}
2890
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2891
2892
/**
2893
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
2894
* @qc: Info associated with this ATA transaction.
2895
*
2896
* LOCKING:
2897
* spin_lock_irqsave(host lock)
2898
*/
2899
void ata_bmdma_start(struct ata_queued_cmd *qc)
2900
{
2901
struct ata_port *ap = qc->ap;
2902
u8 dmactl;
2903
2904
/* start host DMA transaction */
2905
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2906
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2907
2908
/* Strictly, one may wish to issue an ioread8() here, to
2909
* flush the mmio write. However, control also passes
2910
* to the hardware at this point, and it will interrupt
2911
* us when we are to resume control. So, in effect,
2912
* we don't care when the mmio write flushes.
2913
* Further, a read of the DMA status register _immediately_
2914
* following the write may not be what certain flaky hardware
2915
* is expected, so I think it is best to not add a readb()
2916
* without first all the MMIO ATA cards/mobos.
2917
* Or maybe I'm just being paranoid.
2918
*
2919
* FIXME: The posting of this write means I/O starts are
2920
* unnecessarily delayed for MMIO
2921
*/
2922
}
2923
EXPORT_SYMBOL_GPL(ata_bmdma_start);
2924
2925
/**
2926
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2927
* @qc: Command we are ending DMA for
2928
*
2929
* Clears the ATA_DMA_START flag in the dma control register
2930
*
2931
* May be used as the bmdma_stop() entry in ata_port_operations.
2932
*
2933
* LOCKING:
2934
* spin_lock_irqsave(host lock)
2935
*/
2936
void ata_bmdma_stop(struct ata_queued_cmd *qc)
2937
{
2938
struct ata_port *ap = qc->ap;
2939
void __iomem *mmio = ap->ioaddr.bmdma_addr;
2940
2941
/* clear start/stop bit */
2942
iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2943
mmio + ATA_DMA_CMD);
2944
2945
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2946
ata_sff_dma_pause(ap);
2947
}
2948
EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2949
2950
/**
2951
* ata_bmdma_status - Read PCI IDE BMDMA status
2952
* @ap: Port associated with this ATA transaction.
2953
*
2954
* Read and return BMDMA status register.
2955
*
2956
* May be used as the bmdma_status() entry in ata_port_operations.
2957
*
2958
* LOCKING:
2959
* spin_lock_irqsave(host lock)
2960
*/
2961
u8 ata_bmdma_status(struct ata_port *ap)
2962
{
2963
return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2964
}
2965
EXPORT_SYMBOL_GPL(ata_bmdma_status);
2966
2967
2968
/**
2969
* ata_bmdma_port_start - Set port up for bmdma.
2970
* @ap: Port to initialize
2971
*
2972
* Called just after data structures for each port are
2973
* initialized. Allocates space for PRD table.
2974
*
2975
* May be used as the port_start() entry in ata_port_operations.
2976
*
2977
* LOCKING:
2978
* Inherited from caller.
2979
*/
2980
int ata_bmdma_port_start(struct ata_port *ap)
2981
{
2982
if (ap->mwdma_mask || ap->udma_mask) {
2983
ap->bmdma_prd =
2984
dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
2985
&ap->bmdma_prd_dma, GFP_KERNEL);
2986
if (!ap->bmdma_prd)
2987
return -ENOMEM;
2988
}
2989
2990
return 0;
2991
}
2992
EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
2993
2994
/**
2995
* ata_bmdma_port_start32 - Set port up for dma.
2996
* @ap: Port to initialize
2997
*
2998
* Called just after data structures for each port are
2999
* initialized. Enables 32bit PIO and allocates space for PRD
3000
* table.
3001
*
3002
* May be used as the port_start() entry in ata_port_operations for
3003
* devices that are capable of 32bit PIO.
3004
*
3005
* LOCKING:
3006
* Inherited from caller.
3007
*/
3008
int ata_bmdma_port_start32(struct ata_port *ap)
3009
{
3010
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3011
return ata_bmdma_port_start(ap);
3012
}
3013
EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3014
3015
#ifdef CONFIG_PCI
3016
3017
/**
3018
* ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3019
* @pdev: PCI device
3020
*
3021
* Some PCI ATA devices report simplex mode but in fact can be told to
3022
* enter non simplex mode. This implements the necessary logic to
3023
* perform the task on such devices. Calling it on other devices will
3024
* have -undefined- behaviour.
3025
*/
3026
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3027
{
3028
#ifdef CONFIG_HAS_IOPORT
3029
unsigned long bmdma = pci_resource_start(pdev, 4);
3030
u8 simplex;
3031
3032
if (bmdma == 0)
3033
return -ENOENT;
3034
3035
simplex = inb(bmdma + 0x02);
3036
outb(simplex & 0x60, bmdma + 0x02);
3037
simplex = inb(bmdma + 0x02);
3038
if (simplex & 0x80)
3039
return -EOPNOTSUPP;
3040
return 0;
3041
#else
3042
return -ENOENT;
3043
#endif /* CONFIG_HAS_IOPORT */
3044
}
3045
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3046
3047
static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3048
{
3049
int i;
3050
3051
dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3052
3053
for (i = 0; i < 2; i++) {
3054
host->ports[i]->mwdma_mask = 0;
3055
host->ports[i]->udma_mask = 0;
3056
}
3057
}
3058
3059
/**
3060
* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3061
* @host: target ATA host
3062
*
3063
* Acquire PCI BMDMA resources and initialize @host accordingly.
3064
*
3065
* LOCKING:
3066
* Inherited from calling layer (may sleep).
3067
*/
3068
void ata_pci_bmdma_init(struct ata_host *host)
3069
{
3070
struct device *gdev = host->dev;
3071
struct pci_dev *pdev = to_pci_dev(gdev);
3072
int i, rc;
3073
3074
/* No BAR4 allocation: No DMA */
3075
if (pci_resource_start(pdev, 4) == 0) {
3076
ata_bmdma_nodma(host, "BAR4 is zero");
3077
return;
3078
}
3079
3080
/*
3081
* Some controllers require BMDMA region to be initialized
3082
* even if DMA is not in use to clear IRQ status via
3083
* ->sff_irq_clear method. Try to initialize bmdma_addr
3084
* regardless of dma masks.
3085
*/
3086
rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3087
if (rc)
3088
ata_bmdma_nodma(host, "failed to set dma mask");
3089
3090
/* request and iomap DMA region */
3091
rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3092
if (rc) {
3093
ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3094
return;
3095
}
3096
host->iomap = pcim_iomap_table(pdev);
3097
3098
for (i = 0; i < 2; i++) {
3099
struct ata_port *ap = host->ports[i];
3100
void __iomem *bmdma = host->iomap[4] + 8 * i;
3101
3102
if (ata_port_is_dummy(ap))
3103
continue;
3104
3105
ap->ioaddr.bmdma_addr = bmdma;
3106
if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3107
(ioread8(bmdma + 2) & 0x80))
3108
host->flags |= ATA_HOST_SIMPLEX;
3109
3110
ata_port_desc(ap, "bmdma 0x%llx",
3111
(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3112
}
3113
}
3114
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3115
3116
/**
3117
* ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3118
* @pdev: target PCI device
3119
* @ppi: array of port_info, must be enough for two ports
3120
* @r_host: out argument for the initialized ATA host
3121
*
3122
* Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3123
* resources and initialize it accordingly in one go.
3124
*
3125
* LOCKING:
3126
* Inherited from calling layer (may sleep).
3127
*
3128
* RETURNS:
3129
* 0 on success, -errno otherwise.
3130
*/
3131
int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3132
const struct ata_port_info * const * ppi,
3133
struct ata_host **r_host)
3134
{
3135
int rc;
3136
3137
rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3138
if (rc)
3139
return rc;
3140
3141
ata_pci_bmdma_init(*r_host);
3142
return 0;
3143
}
3144
EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3145
3146
/**
3147
* ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3148
* @pdev: Controller to be initialized
3149
* @ppi: array of port_info, must be enough for two ports
3150
* @sht: scsi_host_template to use when registering the host
3151
* @host_priv: host private_data
3152
* @hflags: host flags
3153
*
3154
* This function is similar to ata_pci_sff_init_one() but also
3155
* takes care of BMDMA initialization.
3156
*
3157
* LOCKING:
3158
* Inherited from PCI layer (may sleep).
3159
*
3160
* RETURNS:
3161
* Zero on success, negative on errno-based value on error.
3162
*/
3163
int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3164
const struct ata_port_info * const * ppi,
3165
const struct scsi_host_template *sht, void *host_priv,
3166
int hflags)
3167
{
3168
return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3169
}
3170
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3171
3172
#endif /* CONFIG_PCI */
3173
#endif /* CONFIG_ATA_BMDMA */
3174
3175
/**
3176
* ata_sff_port_init - Initialize SFF/BMDMA ATA port
3177
* @ap: Port to initialize
3178
*
3179
* Called on port allocation to initialize SFF/BMDMA specific
3180
* fields.
3181
*
3182
* LOCKING:
3183
* None.
3184
*/
3185
void ata_sff_port_init(struct ata_port *ap)
3186
{
3187
INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3188
ap->ctl = ATA_DEVCTL_OBS;
3189
ap->last_ctl = 0xFF;
3190
}
3191
3192
int __init ata_sff_init(void)
3193
{
3194
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3195
if (!ata_sff_wq)
3196
return -ENOMEM;
3197
3198
return 0;
3199
}
3200
3201
void ata_sff_exit(void)
3202
{
3203
destroy_workqueue(ata_sff_wq);
3204
}
3205
3206