Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/ata/libata-sff.c
15111 views
1
/*
2
* libata-sff.c - helper library for PCI IDE BMDMA
3
*
4
* Maintained by: Jeff Garzik <[email protected]>
5
* Please ALWAYS copy [email protected]
6
* on emails.
7
*
8
* Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9
* Copyright 2003-2006 Jeff Garzik
10
*
11
*
12
* This program is free software; you can redistribute it and/or modify
13
* it under the terms of the GNU General Public License as published by
14
* the Free Software Foundation; either version 2, or (at your option)
15
* any later version.
16
*
17
* This program is distributed in the hope that it will be useful,
18
* but WITHOUT ANY WARRANTY; without even the implied warranty of
19
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20
* GNU General Public License for more details.
21
*
22
* You should have received a copy of the GNU General Public License
23
* along with this program; see the file COPYING. If not, write to
24
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25
*
26
*
27
* libata documentation is available via 'make {ps|pdf}docs',
28
* as Documentation/DocBook/libata.*
29
*
30
* Hardware documentation available from http://www.t13.org/ and
31
* http://www.sata-io.org/
32
*
33
*/
34
35
#include <linux/kernel.h>
36
#include <linux/gfp.h>
37
#include <linux/pci.h>
38
#include <linux/libata.h>
39
#include <linux/highmem.h>
40
41
#include "libata.h"
42
43
static struct workqueue_struct *ata_sff_wq;
44
45
const struct ata_port_operations ata_sff_port_ops = {
46
.inherits = &ata_base_port_ops,
47
48
.qc_prep = ata_noop_qc_prep,
49
.qc_issue = ata_sff_qc_issue,
50
.qc_fill_rtf = ata_sff_qc_fill_rtf,
51
52
.freeze = ata_sff_freeze,
53
.thaw = ata_sff_thaw,
54
.prereset = ata_sff_prereset,
55
.softreset = ata_sff_softreset,
56
.hardreset = sata_sff_hardreset,
57
.postreset = ata_sff_postreset,
58
.error_handler = ata_sff_error_handler,
59
60
.sff_dev_select = ata_sff_dev_select,
61
.sff_check_status = ata_sff_check_status,
62
.sff_tf_load = ata_sff_tf_load,
63
.sff_tf_read = ata_sff_tf_read,
64
.sff_exec_command = ata_sff_exec_command,
65
.sff_data_xfer = ata_sff_data_xfer,
66
.sff_drain_fifo = ata_sff_drain_fifo,
67
68
.lost_interrupt = ata_sff_lost_interrupt,
69
};
70
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
71
72
/**
73
* ata_sff_check_status - Read device status reg & clear interrupt
74
* @ap: port where the device is
75
*
76
* Reads ATA taskfile status register for currently-selected device
77
* and return its value. This also clears pending interrupts
78
* from this device
79
*
80
* LOCKING:
81
* Inherited from caller.
82
*/
83
u8 ata_sff_check_status(struct ata_port *ap)
84
{
85
return ioread8(ap->ioaddr.status_addr);
86
}
87
EXPORT_SYMBOL_GPL(ata_sff_check_status);
88
89
/**
90
* ata_sff_altstatus - Read device alternate status reg
91
* @ap: port where the device is
92
*
93
* Reads ATA taskfile alternate status register for
94
* currently-selected device and return its value.
95
*
96
* Note: may NOT be used as the check_altstatus() entry in
97
* ata_port_operations.
98
*
99
* LOCKING:
100
* Inherited from caller.
101
*/
102
static u8 ata_sff_altstatus(struct ata_port *ap)
103
{
104
if (ap->ops->sff_check_altstatus)
105
return ap->ops->sff_check_altstatus(ap);
106
107
return ioread8(ap->ioaddr.altstatus_addr);
108
}
109
110
/**
111
* ata_sff_irq_status - Check if the device is busy
112
* @ap: port where the device is
113
*
114
* Determine if the port is currently busy. Uses altstatus
115
* if available in order to avoid clearing shared IRQ status
116
* when finding an IRQ source. Non ctl capable devices don't
117
* share interrupt lines fortunately for us.
118
*
119
* LOCKING:
120
* Inherited from caller.
121
*/
122
static u8 ata_sff_irq_status(struct ata_port *ap)
123
{
124
u8 status;
125
126
if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
127
status = ata_sff_altstatus(ap);
128
/* Not us: We are busy */
129
if (status & ATA_BUSY)
130
return status;
131
}
132
/* Clear INTRQ latch */
133
status = ap->ops->sff_check_status(ap);
134
return status;
135
}
136
137
/**
138
* ata_sff_sync - Flush writes
139
* @ap: Port to wait for.
140
*
141
* CAUTION:
142
* If we have an mmio device with no ctl and no altstatus
143
* method this will fail. No such devices are known to exist.
144
*
145
* LOCKING:
146
* Inherited from caller.
147
*/
148
149
static void ata_sff_sync(struct ata_port *ap)
150
{
151
if (ap->ops->sff_check_altstatus)
152
ap->ops->sff_check_altstatus(ap);
153
else if (ap->ioaddr.altstatus_addr)
154
ioread8(ap->ioaddr.altstatus_addr);
155
}
156
157
/**
158
* ata_sff_pause - Flush writes and wait 400nS
159
* @ap: Port to pause for.
160
*
161
* CAUTION:
162
* If we have an mmio device with no ctl and no altstatus
163
* method this will fail. No such devices are known to exist.
164
*
165
* LOCKING:
166
* Inherited from caller.
167
*/
168
169
void ata_sff_pause(struct ata_port *ap)
170
{
171
ata_sff_sync(ap);
172
ndelay(400);
173
}
174
EXPORT_SYMBOL_GPL(ata_sff_pause);
175
176
/**
177
* ata_sff_dma_pause - Pause before commencing DMA
178
* @ap: Port to pause for.
179
*
180
* Perform I/O fencing and ensure sufficient cycle delays occur
181
* for the HDMA1:0 transition
182
*/
183
184
void ata_sff_dma_pause(struct ata_port *ap)
185
{
186
if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
187
/* An altstatus read will cause the needed delay without
188
messing up the IRQ status */
189
ata_sff_altstatus(ap);
190
return;
191
}
192
/* There are no DMA controllers without ctl. BUG here to ensure
193
we never violate the HDMA1:0 transition timing and risk
194
corruption. */
195
BUG();
196
}
197
EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
198
199
/**
200
* ata_sff_busy_sleep - sleep until BSY clears, or timeout
201
* @ap: port containing status register to be polled
202
* @tmout_pat: impatience timeout in msecs
203
* @tmout: overall timeout in msecs
204
*
205
* Sleep until ATA Status register bit BSY clears,
206
* or a timeout occurs.
207
*
208
* LOCKING:
209
* Kernel thread context (may sleep).
210
*
211
* RETURNS:
212
* 0 on success, -errno otherwise.
213
*/
214
int ata_sff_busy_sleep(struct ata_port *ap,
215
unsigned long tmout_pat, unsigned long tmout)
216
{
217
unsigned long timer_start, timeout;
218
u8 status;
219
220
status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
221
timer_start = jiffies;
222
timeout = ata_deadline(timer_start, tmout_pat);
223
while (status != 0xff && (status & ATA_BUSY) &&
224
time_before(jiffies, timeout)) {
225
ata_msleep(ap, 50);
226
status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
227
}
228
229
if (status != 0xff && (status & ATA_BUSY))
230
ata_port_printk(ap, KERN_WARNING,
231
"port is slow to respond, please be patient "
232
"(Status 0x%x)\n", status);
233
234
timeout = ata_deadline(timer_start, tmout);
235
while (status != 0xff && (status & ATA_BUSY) &&
236
time_before(jiffies, timeout)) {
237
ata_msleep(ap, 50);
238
status = ap->ops->sff_check_status(ap);
239
}
240
241
if (status == 0xff)
242
return -ENODEV;
243
244
if (status & ATA_BUSY) {
245
ata_port_printk(ap, KERN_ERR, "port failed to respond "
246
"(%lu secs, Status 0x%x)\n",
247
DIV_ROUND_UP(tmout, 1000), status);
248
return -EBUSY;
249
}
250
251
return 0;
252
}
253
EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
254
255
static int ata_sff_check_ready(struct ata_link *link)
256
{
257
u8 status = link->ap->ops->sff_check_status(link->ap);
258
259
return ata_check_ready(status);
260
}
261
262
/**
263
* ata_sff_wait_ready - sleep until BSY clears, or timeout
264
* @link: SFF link to wait ready status for
265
* @deadline: deadline jiffies for the operation
266
*
267
* Sleep until ATA Status register bit BSY clears, or timeout
268
* occurs.
269
*
270
* LOCKING:
271
* Kernel thread context (may sleep).
272
*
273
* RETURNS:
274
* 0 on success, -errno otherwise.
275
*/
276
int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
277
{
278
return ata_wait_ready(link, deadline, ata_sff_check_ready);
279
}
280
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
281
282
/**
283
* ata_sff_set_devctl - Write device control reg
284
* @ap: port where the device is
285
* @ctl: value to write
286
*
287
* Writes ATA taskfile device control register.
288
*
289
* Note: may NOT be used as the sff_set_devctl() entry in
290
* ata_port_operations.
291
*
292
* LOCKING:
293
* Inherited from caller.
294
*/
295
static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
296
{
297
if (ap->ops->sff_set_devctl)
298
ap->ops->sff_set_devctl(ap, ctl);
299
else
300
iowrite8(ctl, ap->ioaddr.ctl_addr);
301
}
302
303
/**
304
* ata_sff_dev_select - Select device 0/1 on ATA bus
305
* @ap: ATA channel to manipulate
306
* @device: ATA device (numbered from zero) to select
307
*
308
* Use the method defined in the ATA specification to
309
* make either device 0, or device 1, active on the
310
* ATA channel. Works with both PIO and MMIO.
311
*
312
* May be used as the dev_select() entry in ata_port_operations.
313
*
314
* LOCKING:
315
* caller.
316
*/
317
void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
318
{
319
u8 tmp;
320
321
if (device == 0)
322
tmp = ATA_DEVICE_OBS;
323
else
324
tmp = ATA_DEVICE_OBS | ATA_DEV1;
325
326
iowrite8(tmp, ap->ioaddr.device_addr);
327
ata_sff_pause(ap); /* needed; also flushes, for mmio */
328
}
329
EXPORT_SYMBOL_GPL(ata_sff_dev_select);
330
331
/**
332
* ata_dev_select - Select device 0/1 on ATA bus
333
* @ap: ATA channel to manipulate
334
* @device: ATA device (numbered from zero) to select
335
* @wait: non-zero to wait for Status register BSY bit to clear
336
* @can_sleep: non-zero if context allows sleeping
337
*
338
* Use the method defined in the ATA specification to
339
* make either device 0, or device 1, active on the
340
* ATA channel.
341
*
342
* This is a high-level version of ata_sff_dev_select(), which
343
* additionally provides the services of inserting the proper
344
* pauses and status polling, where needed.
345
*
346
* LOCKING:
347
* caller.
348
*/
349
static void ata_dev_select(struct ata_port *ap, unsigned int device,
350
unsigned int wait, unsigned int can_sleep)
351
{
352
if (ata_msg_probe(ap))
353
ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
354
"device %u, wait %u\n", device, wait);
355
356
if (wait)
357
ata_wait_idle(ap);
358
359
ap->ops->sff_dev_select(ap, device);
360
361
if (wait) {
362
if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
363
ata_msleep(ap, 150);
364
ata_wait_idle(ap);
365
}
366
}
367
368
/**
369
* ata_sff_irq_on - Enable interrupts on a port.
370
* @ap: Port on which interrupts are enabled.
371
*
372
* Enable interrupts on a legacy IDE device using MMIO or PIO,
373
* wait for idle, clear any pending interrupts.
374
*
375
* Note: may NOT be used as the sff_irq_on() entry in
376
* ata_port_operations.
377
*
378
* LOCKING:
379
* Inherited from caller.
380
*/
381
void ata_sff_irq_on(struct ata_port *ap)
382
{
383
struct ata_ioports *ioaddr = &ap->ioaddr;
384
385
if (ap->ops->sff_irq_on) {
386
ap->ops->sff_irq_on(ap);
387
return;
388
}
389
390
ap->ctl &= ~ATA_NIEN;
391
ap->last_ctl = ap->ctl;
392
393
if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
394
ata_sff_set_devctl(ap, ap->ctl);
395
ata_wait_idle(ap);
396
397
if (ap->ops->sff_irq_clear)
398
ap->ops->sff_irq_clear(ap);
399
}
400
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
401
402
/**
403
* ata_sff_tf_load - send taskfile registers to host controller
404
* @ap: Port to which output is sent
405
* @tf: ATA taskfile register set
406
*
407
* Outputs ATA taskfile to standard ATA host controller.
408
*
409
* LOCKING:
410
* Inherited from caller.
411
*/
412
void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
413
{
414
struct ata_ioports *ioaddr = &ap->ioaddr;
415
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
416
417
if (tf->ctl != ap->last_ctl) {
418
if (ioaddr->ctl_addr)
419
iowrite8(tf->ctl, ioaddr->ctl_addr);
420
ap->last_ctl = tf->ctl;
421
ata_wait_idle(ap);
422
}
423
424
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
425
WARN_ON_ONCE(!ioaddr->ctl_addr);
426
iowrite8(tf->hob_feature, ioaddr->feature_addr);
427
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
428
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
429
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
430
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
431
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
432
tf->hob_feature,
433
tf->hob_nsect,
434
tf->hob_lbal,
435
tf->hob_lbam,
436
tf->hob_lbah);
437
}
438
439
if (is_addr) {
440
iowrite8(tf->feature, ioaddr->feature_addr);
441
iowrite8(tf->nsect, ioaddr->nsect_addr);
442
iowrite8(tf->lbal, ioaddr->lbal_addr);
443
iowrite8(tf->lbam, ioaddr->lbam_addr);
444
iowrite8(tf->lbah, ioaddr->lbah_addr);
445
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
446
tf->feature,
447
tf->nsect,
448
tf->lbal,
449
tf->lbam,
450
tf->lbah);
451
}
452
453
if (tf->flags & ATA_TFLAG_DEVICE) {
454
iowrite8(tf->device, ioaddr->device_addr);
455
VPRINTK("device 0x%X\n", tf->device);
456
}
457
458
ata_wait_idle(ap);
459
}
460
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
461
462
/**
463
* ata_sff_tf_read - input device's ATA taskfile shadow registers
464
* @ap: Port from which input is read
465
* @tf: ATA taskfile register set for storing input
466
*
467
* Reads ATA taskfile registers for currently-selected device
468
* into @tf. Assumes the device has a fully SFF compliant task file
469
* layout and behaviour. If you device does not (eg has a different
470
* status method) then you will need to provide a replacement tf_read
471
*
472
* LOCKING:
473
* Inherited from caller.
474
*/
475
void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
476
{
477
struct ata_ioports *ioaddr = &ap->ioaddr;
478
479
tf->command = ata_sff_check_status(ap);
480
tf->feature = ioread8(ioaddr->error_addr);
481
tf->nsect = ioread8(ioaddr->nsect_addr);
482
tf->lbal = ioread8(ioaddr->lbal_addr);
483
tf->lbam = ioread8(ioaddr->lbam_addr);
484
tf->lbah = ioread8(ioaddr->lbah_addr);
485
tf->device = ioread8(ioaddr->device_addr);
486
487
if (tf->flags & ATA_TFLAG_LBA48) {
488
if (likely(ioaddr->ctl_addr)) {
489
iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
490
tf->hob_feature = ioread8(ioaddr->error_addr);
491
tf->hob_nsect = ioread8(ioaddr->nsect_addr);
492
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
493
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
494
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
495
iowrite8(tf->ctl, ioaddr->ctl_addr);
496
ap->last_ctl = tf->ctl;
497
} else
498
WARN_ON_ONCE(1);
499
}
500
}
501
EXPORT_SYMBOL_GPL(ata_sff_tf_read);
502
503
/**
504
* ata_sff_exec_command - issue ATA command to host controller
505
* @ap: port to which command is being issued
506
* @tf: ATA taskfile register set
507
*
508
* Issues ATA command, with proper synchronization with interrupt
509
* handler / other threads.
510
*
511
* LOCKING:
512
* spin_lock_irqsave(host lock)
513
*/
514
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
515
{
516
DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
517
518
iowrite8(tf->command, ap->ioaddr.command_addr);
519
ata_sff_pause(ap);
520
}
521
EXPORT_SYMBOL_GPL(ata_sff_exec_command);
522
523
/**
524
* ata_tf_to_host - issue ATA taskfile to host controller
525
* @ap: port to which command is being issued
526
* @tf: ATA taskfile register set
527
*
528
* Issues ATA taskfile register set to ATA host controller,
529
* with proper synchronization with interrupt handler and
530
* other threads.
531
*
532
* LOCKING:
533
* spin_lock_irqsave(host lock)
534
*/
535
static inline void ata_tf_to_host(struct ata_port *ap,
536
const struct ata_taskfile *tf)
537
{
538
ap->ops->sff_tf_load(ap, tf);
539
ap->ops->sff_exec_command(ap, tf);
540
}
541
542
/**
543
* ata_sff_data_xfer - Transfer data by PIO
544
* @dev: device to target
545
* @buf: data buffer
546
* @buflen: buffer length
547
* @rw: read/write
548
*
549
* Transfer data from/to the device data register by PIO.
550
*
551
* LOCKING:
552
* Inherited from caller.
553
*
554
* RETURNS:
555
* Bytes consumed.
556
*/
557
unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
558
unsigned int buflen, int rw)
559
{
560
struct ata_port *ap = dev->link->ap;
561
void __iomem *data_addr = ap->ioaddr.data_addr;
562
unsigned int words = buflen >> 1;
563
564
/* Transfer multiple of 2 bytes */
565
if (rw == READ)
566
ioread16_rep(data_addr, buf, words);
567
else
568
iowrite16_rep(data_addr, buf, words);
569
570
/* Transfer trailing byte, if any. */
571
if (unlikely(buflen & 0x01)) {
572
unsigned char pad[2];
573
574
/* Point buf to the tail of buffer */
575
buf += buflen - 1;
576
577
/*
578
* Use io*16_rep() accessors here as well to avoid pointlessly
579
* swapping bytes to and from on the big endian machines...
580
*/
581
if (rw == READ) {
582
ioread16_rep(data_addr, pad, 1);
583
*buf = pad[0];
584
} else {
585
pad[0] = *buf;
586
iowrite16_rep(data_addr, pad, 1);
587
}
588
words++;
589
}
590
591
return words << 1;
592
}
593
EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
594
595
/**
596
* ata_sff_data_xfer32 - Transfer data by PIO
597
* @dev: device to target
598
* @buf: data buffer
599
* @buflen: buffer length
600
* @rw: read/write
601
*
602
* Transfer data from/to the device data register by PIO using 32bit
603
* I/O operations.
604
*
605
* LOCKING:
606
* Inherited from caller.
607
*
608
* RETURNS:
609
* Bytes consumed.
610
*/
611
612
unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
613
unsigned int buflen, int rw)
614
{
615
struct ata_port *ap = dev->link->ap;
616
void __iomem *data_addr = ap->ioaddr.data_addr;
617
unsigned int words = buflen >> 2;
618
int slop = buflen & 3;
619
620
if (!(ap->pflags & ATA_PFLAG_PIO32))
621
return ata_sff_data_xfer(dev, buf, buflen, rw);
622
623
/* Transfer multiple of 4 bytes */
624
if (rw == READ)
625
ioread32_rep(data_addr, buf, words);
626
else
627
iowrite32_rep(data_addr, buf, words);
628
629
/* Transfer trailing bytes, if any */
630
if (unlikely(slop)) {
631
unsigned char pad[4];
632
633
/* Point buf to the tail of buffer */
634
buf += buflen - slop;
635
636
/*
637
* Use io*_rep() accessors here as well to avoid pointlessly
638
* swapping bytes to and from on the big endian machines...
639
*/
640
if (rw == READ) {
641
if (slop < 3)
642
ioread16_rep(data_addr, pad, 1);
643
else
644
ioread32_rep(data_addr, pad, 1);
645
memcpy(buf, pad, slop);
646
} else {
647
memcpy(pad, buf, slop);
648
if (slop < 3)
649
iowrite16_rep(data_addr, pad, 1);
650
else
651
iowrite32_rep(data_addr, pad, 1);
652
}
653
}
654
return (buflen + 1) & ~1;
655
}
656
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
657
658
/**
659
* ata_sff_data_xfer_noirq - Transfer data by PIO
660
* @dev: device to target
661
* @buf: data buffer
662
* @buflen: buffer length
663
* @rw: read/write
664
*
665
* Transfer data from/to the device data register by PIO. Do the
666
* transfer with interrupts disabled.
667
*
668
* LOCKING:
669
* Inherited from caller.
670
*
671
* RETURNS:
672
* Bytes consumed.
673
*/
674
unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
675
unsigned int buflen, int rw)
676
{
677
unsigned long flags;
678
unsigned int consumed;
679
680
local_irq_save(flags);
681
consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
682
local_irq_restore(flags);
683
684
return consumed;
685
}
686
EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
687
688
/**
689
* ata_pio_sector - Transfer a sector of data.
690
* @qc: Command on going
691
*
692
* Transfer qc->sect_size bytes of data from/to the ATA device.
693
*
694
* LOCKING:
695
* Inherited from caller.
696
*/
697
static void ata_pio_sector(struct ata_queued_cmd *qc)
698
{
699
int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
700
struct ata_port *ap = qc->ap;
701
struct page *page;
702
unsigned int offset;
703
unsigned char *buf;
704
705
if (qc->curbytes == qc->nbytes - qc->sect_size)
706
ap->hsm_task_state = HSM_ST_LAST;
707
708
page = sg_page(qc->cursg);
709
offset = qc->cursg->offset + qc->cursg_ofs;
710
711
/* get the current page and offset */
712
page = nth_page(page, (offset >> PAGE_SHIFT));
713
offset %= PAGE_SIZE;
714
715
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
716
717
if (PageHighMem(page)) {
718
unsigned long flags;
719
720
/* FIXME: use a bounce buffer */
721
local_irq_save(flags);
722
buf = kmap_atomic(page, KM_IRQ0);
723
724
/* do the actual data transfer */
725
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
726
do_write);
727
728
kunmap_atomic(buf, KM_IRQ0);
729
local_irq_restore(flags);
730
} else {
731
buf = page_address(page);
732
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
733
do_write);
734
}
735
736
if (!do_write && !PageSlab(page))
737
flush_dcache_page(page);
738
739
qc->curbytes += qc->sect_size;
740
qc->cursg_ofs += qc->sect_size;
741
742
if (qc->cursg_ofs == qc->cursg->length) {
743
qc->cursg = sg_next(qc->cursg);
744
qc->cursg_ofs = 0;
745
}
746
}
747
748
/**
749
* ata_pio_sectors - Transfer one or many sectors.
750
* @qc: Command on going
751
*
752
* Transfer one or many sectors of data from/to the
753
* ATA device for the DRQ request.
754
*
755
* LOCKING:
756
* Inherited from caller.
757
*/
758
static void ata_pio_sectors(struct ata_queued_cmd *qc)
759
{
760
if (is_multi_taskfile(&qc->tf)) {
761
/* READ/WRITE MULTIPLE */
762
unsigned int nsect;
763
764
WARN_ON_ONCE(qc->dev->multi_count == 0);
765
766
nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
767
qc->dev->multi_count);
768
while (nsect--)
769
ata_pio_sector(qc);
770
} else
771
ata_pio_sector(qc);
772
773
ata_sff_sync(qc->ap); /* flush */
774
}
775
776
/**
777
* atapi_send_cdb - Write CDB bytes to hardware
778
* @ap: Port to which ATAPI device is attached.
779
* @qc: Taskfile currently active
780
*
781
* When device has indicated its readiness to accept
782
* a CDB, this function is called. Send the CDB.
783
*
784
* LOCKING:
785
* caller.
786
*/
787
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
788
{
789
/* send SCSI cdb */
790
DPRINTK("send cdb\n");
791
WARN_ON_ONCE(qc->dev->cdb_len < 12);
792
793
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
794
ata_sff_sync(ap);
795
/* FIXME: If the CDB is for DMA do we need to do the transition delay
796
or is bmdma_start guaranteed to do it ? */
797
switch (qc->tf.protocol) {
798
case ATAPI_PROT_PIO:
799
ap->hsm_task_state = HSM_ST;
800
break;
801
case ATAPI_PROT_NODATA:
802
ap->hsm_task_state = HSM_ST_LAST;
803
break;
804
#ifdef CONFIG_ATA_BMDMA
805
case ATAPI_PROT_DMA:
806
ap->hsm_task_state = HSM_ST_LAST;
807
/* initiate bmdma */
808
ap->ops->bmdma_start(qc);
809
break;
810
#endif /* CONFIG_ATA_BMDMA */
811
default:
812
BUG();
813
}
814
}
815
816
/**
817
* __atapi_pio_bytes - Transfer data from/to the ATAPI device.
818
* @qc: Command on going
819
* @bytes: number of bytes
820
*
821
* Transfer Transfer data from/to the ATAPI device.
822
*
823
* LOCKING:
824
* Inherited from caller.
825
*
826
*/
827
static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
828
{
829
int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
830
struct ata_port *ap = qc->ap;
831
struct ata_device *dev = qc->dev;
832
struct ata_eh_info *ehi = &dev->link->eh_info;
833
struct scatterlist *sg;
834
struct page *page;
835
unsigned char *buf;
836
unsigned int offset, count, consumed;
837
838
next_sg:
839
sg = qc->cursg;
840
if (unlikely(!sg)) {
841
ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
842
"buf=%u cur=%u bytes=%u",
843
qc->nbytes, qc->curbytes, bytes);
844
return -1;
845
}
846
847
page = sg_page(sg);
848
offset = sg->offset + qc->cursg_ofs;
849
850
/* get the current page and offset */
851
page = nth_page(page, (offset >> PAGE_SHIFT));
852
offset %= PAGE_SIZE;
853
854
/* don't overrun current sg */
855
count = min(sg->length - qc->cursg_ofs, bytes);
856
857
/* don't cross page boundaries */
858
count = min(count, (unsigned int)PAGE_SIZE - offset);
859
860
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
861
862
if (PageHighMem(page)) {
863
unsigned long flags;
864
865
/* FIXME: use bounce buffer */
866
local_irq_save(flags);
867
buf = kmap_atomic(page, KM_IRQ0);
868
869
/* do the actual data transfer */
870
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
871
count, rw);
872
873
kunmap_atomic(buf, KM_IRQ0);
874
local_irq_restore(flags);
875
} else {
876
buf = page_address(page);
877
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
878
count, rw);
879
}
880
881
bytes -= min(bytes, consumed);
882
qc->curbytes += count;
883
qc->cursg_ofs += count;
884
885
if (qc->cursg_ofs == sg->length) {
886
qc->cursg = sg_next(qc->cursg);
887
qc->cursg_ofs = 0;
888
}
889
890
/*
891
* There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
892
* Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
893
* check correctly as it doesn't know if it is the last request being
894
* made. Somebody should implement a proper sanity check.
895
*/
896
if (bytes)
897
goto next_sg;
898
return 0;
899
}
900
901
/**
902
* atapi_pio_bytes - Transfer data from/to the ATAPI device.
903
* @qc: Command on going
904
*
905
* Transfer Transfer data from/to the ATAPI device.
906
*
907
* LOCKING:
908
* Inherited from caller.
909
*/
910
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
911
{
912
struct ata_port *ap = qc->ap;
913
struct ata_device *dev = qc->dev;
914
struct ata_eh_info *ehi = &dev->link->eh_info;
915
unsigned int ireason, bc_lo, bc_hi, bytes;
916
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
917
918
/* Abuse qc->result_tf for temp storage of intermediate TF
919
* here to save some kernel stack usage.
920
* For normal completion, qc->result_tf is not relevant. For
921
* error, qc->result_tf is later overwritten by ata_qc_complete().
922
* So, the correctness of qc->result_tf is not affected.
923
*/
924
ap->ops->sff_tf_read(ap, &qc->result_tf);
925
ireason = qc->result_tf.nsect;
926
bc_lo = qc->result_tf.lbam;
927
bc_hi = qc->result_tf.lbah;
928
bytes = (bc_hi << 8) | bc_lo;
929
930
/* shall be cleared to zero, indicating xfer of data */
931
if (unlikely(ireason & (1 << 0)))
932
goto atapi_check;
933
934
/* make sure transfer direction matches expected */
935
i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
936
if (unlikely(do_write != i_write))
937
goto atapi_check;
938
939
if (unlikely(!bytes))
940
goto atapi_check;
941
942
VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
943
944
if (unlikely(__atapi_pio_bytes(qc, bytes)))
945
goto err_out;
946
ata_sff_sync(ap); /* flush */
947
948
return;
949
950
atapi_check:
951
ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
952
ireason, bytes);
953
err_out:
954
qc->err_mask |= AC_ERR_HSM;
955
ap->hsm_task_state = HSM_ST_ERR;
956
}
957
958
/**
959
* ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
960
* @ap: the target ata_port
961
* @qc: qc on going
962
*
963
* RETURNS:
964
* 1 if ok in workqueue, 0 otherwise.
965
*/
966
static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
967
struct ata_queued_cmd *qc)
968
{
969
if (qc->tf.flags & ATA_TFLAG_POLLING)
970
return 1;
971
972
if (ap->hsm_task_state == HSM_ST_FIRST) {
973
if (qc->tf.protocol == ATA_PROT_PIO &&
974
(qc->tf.flags & ATA_TFLAG_WRITE))
975
return 1;
976
977
if (ata_is_atapi(qc->tf.protocol) &&
978
!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
979
return 1;
980
}
981
982
return 0;
983
}
984
985
/**
986
* ata_hsm_qc_complete - finish a qc running on standard HSM
987
* @qc: Command to complete
988
* @in_wq: 1 if called from workqueue, 0 otherwise
989
*
990
* Finish @qc which is running on standard HSM.
991
*
992
* LOCKING:
993
* If @in_wq is zero, spin_lock_irqsave(host lock).
994
* Otherwise, none on entry and grabs host lock.
995
*/
996
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
997
{
998
struct ata_port *ap = qc->ap;
999
unsigned long flags;
1000
1001
if (ap->ops->error_handler) {
1002
if (in_wq) {
1003
spin_lock_irqsave(ap->lock, flags);
1004
1005
/* EH might have kicked in while host lock is
1006
* released.
1007
*/
1008
qc = ata_qc_from_tag(ap, qc->tag);
1009
if (qc) {
1010
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1011
ata_sff_irq_on(ap);
1012
ata_qc_complete(qc);
1013
} else
1014
ata_port_freeze(ap);
1015
}
1016
1017
spin_unlock_irqrestore(ap->lock, flags);
1018
} else {
1019
if (likely(!(qc->err_mask & AC_ERR_HSM)))
1020
ata_qc_complete(qc);
1021
else
1022
ata_port_freeze(ap);
1023
}
1024
} else {
1025
if (in_wq) {
1026
spin_lock_irqsave(ap->lock, flags);
1027
ata_sff_irq_on(ap);
1028
ata_qc_complete(qc);
1029
spin_unlock_irqrestore(ap->lock, flags);
1030
} else
1031
ata_qc_complete(qc);
1032
}
1033
}
1034
1035
/**
1036
* ata_sff_hsm_move - move the HSM to the next state.
1037
* @ap: the target ata_port
1038
* @qc: qc on going
1039
* @status: current device status
1040
* @in_wq: 1 if called from workqueue, 0 otherwise
1041
*
1042
* RETURNS:
1043
* 1 when poll next status needed, 0 otherwise.
1044
*/
1045
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1046
u8 status, int in_wq)
1047
{
1048
struct ata_link *link = qc->dev->link;
1049
struct ata_eh_info *ehi = &link->eh_info;
1050
unsigned long flags = 0;
1051
int poll_next;
1052
1053
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1054
1055
/* Make sure ata_sff_qc_issue() does not throw things
1056
* like DMA polling into the workqueue. Notice that
1057
* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1058
*/
1059
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1060
1061
fsm_start:
1062
DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1063
ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1064
1065
switch (ap->hsm_task_state) {
1066
case HSM_ST_FIRST:
1067
/* Send first data block or PACKET CDB */
1068
1069
/* If polling, we will stay in the work queue after
1070
* sending the data. Otherwise, interrupt handler
1071
* takes over after sending the data.
1072
*/
1073
poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1074
1075
/* check device status */
1076
if (unlikely((status & ATA_DRQ) == 0)) {
1077
/* handle BSY=0, DRQ=0 as error */
1078
if (likely(status & (ATA_ERR | ATA_DF)))
1079
/* device stops HSM for abort/error */
1080
qc->err_mask |= AC_ERR_DEV;
1081
else {
1082
/* HSM violation. Let EH handle this */
1083
ata_ehi_push_desc(ehi,
1084
"ST_FIRST: !(DRQ|ERR|DF)");
1085
qc->err_mask |= AC_ERR_HSM;
1086
}
1087
1088
ap->hsm_task_state = HSM_ST_ERR;
1089
goto fsm_start;
1090
}
1091
1092
/* Device should not ask for data transfer (DRQ=1)
1093
* when it finds something wrong.
1094
* We ignore DRQ here and stop the HSM by
1095
* changing hsm_task_state to HSM_ST_ERR and
1096
* let the EH abort the command or reset the device.
1097
*/
1098
if (unlikely(status & (ATA_ERR | ATA_DF))) {
1099
/* Some ATAPI tape drives forget to clear the ERR bit
1100
* when doing the next command (mostly request sense).
1101
* We ignore ERR here to workaround and proceed sending
1102
* the CDB.
1103
*/
1104
if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1105
ata_ehi_push_desc(ehi, "ST_FIRST: "
1106
"DRQ=1 with device error, "
1107
"dev_stat 0x%X", status);
1108
qc->err_mask |= AC_ERR_HSM;
1109
ap->hsm_task_state = HSM_ST_ERR;
1110
goto fsm_start;
1111
}
1112
}
1113
1114
/* Send the CDB (atapi) or the first data block (ata pio out).
1115
* During the state transition, interrupt handler shouldn't
1116
* be invoked before the data transfer is complete and
1117
* hsm_task_state is changed. Hence, the following locking.
1118
*/
1119
if (in_wq)
1120
spin_lock_irqsave(ap->lock, flags);
1121
1122
if (qc->tf.protocol == ATA_PROT_PIO) {
1123
/* PIO data out protocol.
1124
* send first data block.
1125
*/
1126
1127
/* ata_pio_sectors() might change the state
1128
* to HSM_ST_LAST. so, the state is changed here
1129
* before ata_pio_sectors().
1130
*/
1131
ap->hsm_task_state = HSM_ST;
1132
ata_pio_sectors(qc);
1133
} else
1134
/* send CDB */
1135
atapi_send_cdb(ap, qc);
1136
1137
if (in_wq)
1138
spin_unlock_irqrestore(ap->lock, flags);
1139
1140
/* if polling, ata_sff_pio_task() handles the rest.
1141
* otherwise, interrupt handler takes over from here.
1142
*/
1143
break;
1144
1145
case HSM_ST:
1146
/* complete command or read/write the data register */
1147
if (qc->tf.protocol == ATAPI_PROT_PIO) {
1148
/* ATAPI PIO protocol */
1149
if ((status & ATA_DRQ) == 0) {
1150
/* No more data to transfer or device error.
1151
* Device error will be tagged in HSM_ST_LAST.
1152
*/
1153
ap->hsm_task_state = HSM_ST_LAST;
1154
goto fsm_start;
1155
}
1156
1157
/* Device should not ask for data transfer (DRQ=1)
1158
* when it finds something wrong.
1159
* We ignore DRQ here and stop the HSM by
1160
* changing hsm_task_state to HSM_ST_ERR and
1161
* let the EH abort the command or reset the device.
1162
*/
1163
if (unlikely(status & (ATA_ERR | ATA_DF))) {
1164
ata_ehi_push_desc(ehi, "ST-ATAPI: "
1165
"DRQ=1 with device error, "
1166
"dev_stat 0x%X", status);
1167
qc->err_mask |= AC_ERR_HSM;
1168
ap->hsm_task_state = HSM_ST_ERR;
1169
goto fsm_start;
1170
}
1171
1172
atapi_pio_bytes(qc);
1173
1174
if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1175
/* bad ireason reported by device */
1176
goto fsm_start;
1177
1178
} else {
1179
/* ATA PIO protocol */
1180
if (unlikely((status & ATA_DRQ) == 0)) {
1181
/* handle BSY=0, DRQ=0 as error */
1182
if (likely(status & (ATA_ERR | ATA_DF))) {
1183
/* device stops HSM for abort/error */
1184
qc->err_mask |= AC_ERR_DEV;
1185
1186
/* If diagnostic failed and this is
1187
* IDENTIFY, it's likely a phantom
1188
* device. Mark hint.
1189
*/
1190
if (qc->dev->horkage &
1191
ATA_HORKAGE_DIAGNOSTIC)
1192
qc->err_mask |=
1193
AC_ERR_NODEV_HINT;
1194
} else {
1195
/* HSM violation. Let EH handle this.
1196
* Phantom devices also trigger this
1197
* condition. Mark hint.
1198
*/
1199
ata_ehi_push_desc(ehi, "ST-ATA: "
1200
"DRQ=0 without device error, "
1201
"dev_stat 0x%X", status);
1202
qc->err_mask |= AC_ERR_HSM |
1203
AC_ERR_NODEV_HINT;
1204
}
1205
1206
ap->hsm_task_state = HSM_ST_ERR;
1207
goto fsm_start;
1208
}
1209
1210
/* For PIO reads, some devices may ask for
1211
* data transfer (DRQ=1) alone with ERR=1.
1212
* We respect DRQ here and transfer one
1213
* block of junk data before changing the
1214
* hsm_task_state to HSM_ST_ERR.
1215
*
1216
* For PIO writes, ERR=1 DRQ=1 doesn't make
1217
* sense since the data block has been
1218
* transferred to the device.
1219
*/
1220
if (unlikely(status & (ATA_ERR | ATA_DF))) {
1221
/* data might be corrputed */
1222
qc->err_mask |= AC_ERR_DEV;
1223
1224
if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1225
ata_pio_sectors(qc);
1226
status = ata_wait_idle(ap);
1227
}
1228
1229
if (status & (ATA_BUSY | ATA_DRQ)) {
1230
ata_ehi_push_desc(ehi, "ST-ATA: "
1231
"BUSY|DRQ persists on ERR|DF, "
1232
"dev_stat 0x%X", status);
1233
qc->err_mask |= AC_ERR_HSM;
1234
}
1235
1236
/* There are oddball controllers with
1237
* status register stuck at 0x7f and
1238
* lbal/m/h at zero which makes it
1239
* pass all other presence detection
1240
* mechanisms we have. Set NODEV_HINT
1241
* for it. Kernel bz#7241.
1242
*/
1243
if (status == 0x7f)
1244
qc->err_mask |= AC_ERR_NODEV_HINT;
1245
1246
/* ata_pio_sectors() might change the
1247
* state to HSM_ST_LAST. so, the state
1248
* is changed after ata_pio_sectors().
1249
*/
1250
ap->hsm_task_state = HSM_ST_ERR;
1251
goto fsm_start;
1252
}
1253
1254
ata_pio_sectors(qc);
1255
1256
if (ap->hsm_task_state == HSM_ST_LAST &&
1257
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1258
/* all data read */
1259
status = ata_wait_idle(ap);
1260
goto fsm_start;
1261
}
1262
}
1263
1264
poll_next = 1;
1265
break;
1266
1267
case HSM_ST_LAST:
1268
if (unlikely(!ata_ok(status))) {
1269
qc->err_mask |= __ac_err_mask(status);
1270
ap->hsm_task_state = HSM_ST_ERR;
1271
goto fsm_start;
1272
}
1273
1274
/* no more data to transfer */
1275
DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1276
ap->print_id, qc->dev->devno, status);
1277
1278
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1279
1280
ap->hsm_task_state = HSM_ST_IDLE;
1281
1282
/* complete taskfile transaction */
1283
ata_hsm_qc_complete(qc, in_wq);
1284
1285
poll_next = 0;
1286
break;
1287
1288
case HSM_ST_ERR:
1289
ap->hsm_task_state = HSM_ST_IDLE;
1290
1291
/* complete taskfile transaction */
1292
ata_hsm_qc_complete(qc, in_wq);
1293
1294
poll_next = 0;
1295
break;
1296
default:
1297
poll_next = 0;
1298
BUG();
1299
}
1300
1301
return poll_next;
1302
}
1303
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1304
1305
void ata_sff_queue_work(struct work_struct *work)
1306
{
1307
queue_work(ata_sff_wq, work);
1308
}
1309
EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1310
1311
void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1312
{
1313
queue_delayed_work(ata_sff_wq, dwork, delay);
1314
}
1315
EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1316
1317
void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1318
{
1319
struct ata_port *ap = link->ap;
1320
1321
WARN_ON((ap->sff_pio_task_link != NULL) &&
1322
(ap->sff_pio_task_link != link));
1323
ap->sff_pio_task_link = link;
1324
1325
/* may fail if ata_sff_flush_pio_task() in progress */
1326
ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1327
}
1328
EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1329
1330
void ata_sff_flush_pio_task(struct ata_port *ap)
1331
{
1332
DPRINTK("ENTER\n");
1333
1334
cancel_delayed_work_sync(&ap->sff_pio_task);
1335
ap->hsm_task_state = HSM_ST_IDLE;
1336
1337
if (ata_msg_ctl(ap))
1338
ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1339
}
1340
1341
static void ata_sff_pio_task(struct work_struct *work)
1342
{
1343
struct ata_port *ap =
1344
container_of(work, struct ata_port, sff_pio_task.work);
1345
struct ata_link *link = ap->sff_pio_task_link;
1346
struct ata_queued_cmd *qc;
1347
u8 status;
1348
int poll_next;
1349
1350
BUG_ON(ap->sff_pio_task_link == NULL);
1351
/* qc can be NULL if timeout occurred */
1352
qc = ata_qc_from_tag(ap, link->active_tag);
1353
if (!qc) {
1354
ap->sff_pio_task_link = NULL;
1355
return;
1356
}
1357
1358
fsm_start:
1359
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1360
1361
/*
1362
* This is purely heuristic. This is a fast path.
1363
* Sometimes when we enter, BSY will be cleared in
1364
* a chk-status or two. If not, the drive is probably seeking
1365
* or something. Snooze for a couple msecs, then
1366
* chk-status again. If still busy, queue delayed work.
1367
*/
1368
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1369
if (status & ATA_BUSY) {
1370
ata_msleep(ap, 2);
1371
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1372
if (status & ATA_BUSY) {
1373
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1374
return;
1375
}
1376
}
1377
1378
/*
1379
* hsm_move() may trigger another command to be processed.
1380
* clean the link beforehand.
1381
*/
1382
ap->sff_pio_task_link = NULL;
1383
/* move the HSM */
1384
poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1385
1386
/* another command or interrupt handler
1387
* may be running at this point.
1388
*/
1389
if (poll_next)
1390
goto fsm_start;
1391
}
1392
1393
/**
1394
* ata_sff_qc_issue - issue taskfile to a SFF controller
1395
* @qc: command to issue to device
1396
*
1397
* This function issues a PIO or NODATA command to a SFF
1398
* controller.
1399
*
1400
* LOCKING:
1401
* spin_lock_irqsave(host lock)
1402
*
1403
* RETURNS:
1404
* Zero on success, AC_ERR_* mask on failure
1405
*/
1406
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1407
{
1408
struct ata_port *ap = qc->ap;
1409
struct ata_link *link = qc->dev->link;
1410
1411
/* Use polling pio if the LLD doesn't handle
1412
* interrupt driven pio and atapi CDB interrupt.
1413
*/
1414
if (ap->flags & ATA_FLAG_PIO_POLLING)
1415
qc->tf.flags |= ATA_TFLAG_POLLING;
1416
1417
/* select the device */
1418
ata_dev_select(ap, qc->dev->devno, 1, 0);
1419
1420
/* start the command */
1421
switch (qc->tf.protocol) {
1422
case ATA_PROT_NODATA:
1423
if (qc->tf.flags & ATA_TFLAG_POLLING)
1424
ata_qc_set_polling(qc);
1425
1426
ata_tf_to_host(ap, &qc->tf);
1427
ap->hsm_task_state = HSM_ST_LAST;
1428
1429
if (qc->tf.flags & ATA_TFLAG_POLLING)
1430
ata_sff_queue_pio_task(link, 0);
1431
1432
break;
1433
1434
case ATA_PROT_PIO:
1435
if (qc->tf.flags & ATA_TFLAG_POLLING)
1436
ata_qc_set_polling(qc);
1437
1438
ata_tf_to_host(ap, &qc->tf);
1439
1440
if (qc->tf.flags & ATA_TFLAG_WRITE) {
1441
/* PIO data out protocol */
1442
ap->hsm_task_state = HSM_ST_FIRST;
1443
ata_sff_queue_pio_task(link, 0);
1444
1445
/* always send first data block using the
1446
* ata_sff_pio_task() codepath.
1447
*/
1448
} else {
1449
/* PIO data in protocol */
1450
ap->hsm_task_state = HSM_ST;
1451
1452
if (qc->tf.flags & ATA_TFLAG_POLLING)
1453
ata_sff_queue_pio_task(link, 0);
1454
1455
/* if polling, ata_sff_pio_task() handles the
1456
* rest. otherwise, interrupt handler takes
1457
* over from here.
1458
*/
1459
}
1460
1461
break;
1462
1463
case ATAPI_PROT_PIO:
1464
case ATAPI_PROT_NODATA:
1465
if (qc->tf.flags & ATA_TFLAG_POLLING)
1466
ata_qc_set_polling(qc);
1467
1468
ata_tf_to_host(ap, &qc->tf);
1469
1470
ap->hsm_task_state = HSM_ST_FIRST;
1471
1472
/* send cdb by polling if no cdb interrupt */
1473
if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1474
(qc->tf.flags & ATA_TFLAG_POLLING))
1475
ata_sff_queue_pio_task(link, 0);
1476
break;
1477
1478
default:
1479
WARN_ON_ONCE(1);
1480
return AC_ERR_SYSTEM;
1481
}
1482
1483
return 0;
1484
}
1485
EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1486
1487
/**
1488
* ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1489
* @qc: qc to fill result TF for
1490
*
1491
* @qc is finished and result TF needs to be filled. Fill it
1492
* using ->sff_tf_read.
1493
*
1494
* LOCKING:
1495
* spin_lock_irqsave(host lock)
1496
*
1497
* RETURNS:
1498
* true indicating that result TF is successfully filled.
1499
*/
1500
bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1501
{
1502
qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1503
return true;
1504
}
1505
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1506
1507
static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1508
{
1509
ap->stats.idle_irq++;
1510
1511
#ifdef ATA_IRQ_TRAP
1512
if ((ap->stats.idle_irq % 1000) == 0) {
1513
ap->ops->sff_check_status(ap);
1514
if (ap->ops->sff_irq_clear)
1515
ap->ops->sff_irq_clear(ap);
1516
ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1517
return 1;
1518
}
1519
#endif
1520
return 0; /* irq not handled */
1521
}
1522
1523
static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1524
struct ata_queued_cmd *qc,
1525
bool hsmv_on_idle)
1526
{
1527
u8 status;
1528
1529
VPRINTK("ata%u: protocol %d task_state %d\n",
1530
ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1531
1532
/* Check whether we are expecting interrupt in this state */
1533
switch (ap->hsm_task_state) {
1534
case HSM_ST_FIRST:
1535
/* Some pre-ATAPI-4 devices assert INTRQ
1536
* at this state when ready to receive CDB.
1537
*/
1538
1539
/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1540
* The flag was turned on only for atapi devices. No
1541
* need to check ata_is_atapi(qc->tf.protocol) again.
1542
*/
1543
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1544
return ata_sff_idle_irq(ap);
1545
break;
1546
case HSM_ST_IDLE:
1547
return ata_sff_idle_irq(ap);
1548
default:
1549
break;
1550
}
1551
1552
/* check main status, clearing INTRQ if needed */
1553
status = ata_sff_irq_status(ap);
1554
if (status & ATA_BUSY) {
1555
if (hsmv_on_idle) {
1556
/* BMDMA engine is already stopped, we're screwed */
1557
qc->err_mask |= AC_ERR_HSM;
1558
ap->hsm_task_state = HSM_ST_ERR;
1559
} else
1560
return ata_sff_idle_irq(ap);
1561
}
1562
1563
/* clear irq events */
1564
if (ap->ops->sff_irq_clear)
1565
ap->ops->sff_irq_clear(ap);
1566
1567
ata_sff_hsm_move(ap, qc, status, 0);
1568
1569
return 1; /* irq handled */
1570
}
1571
1572
/**
1573
* ata_sff_port_intr - Handle SFF port interrupt
1574
* @ap: Port on which interrupt arrived (possibly...)
1575
* @qc: Taskfile currently active in engine
1576
*
1577
* Handle port interrupt for given queued command.
1578
*
1579
* LOCKING:
1580
* spin_lock_irqsave(host lock)
1581
*
1582
* RETURNS:
1583
* One if interrupt was handled, zero if not (shared irq).
1584
*/
1585
unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1586
{
1587
return __ata_sff_port_intr(ap, qc, false);
1588
}
1589
EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1590
1591
static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1592
unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1593
{
1594
struct ata_host *host = dev_instance;
1595
bool retried = false;
1596
unsigned int i;
1597
unsigned int handled, idle, polling;
1598
unsigned long flags;
1599
1600
/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1601
spin_lock_irqsave(&host->lock, flags);
1602
1603
retry:
1604
handled = idle = polling = 0;
1605
for (i = 0; i < host->n_ports; i++) {
1606
struct ata_port *ap = host->ports[i];
1607
struct ata_queued_cmd *qc;
1608
1609
qc = ata_qc_from_tag(ap, ap->link.active_tag);
1610
if (qc) {
1611
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1612
handled |= port_intr(ap, qc);
1613
else
1614
polling |= 1 << i;
1615
} else
1616
idle |= 1 << i;
1617
}
1618
1619
/*
1620
* If no port was expecting IRQ but the controller is actually
1621
* asserting IRQ line, nobody cared will ensue. Check IRQ
1622
* pending status if available and clear spurious IRQ.
1623
*/
1624
if (!handled && !retried) {
1625
bool retry = false;
1626
1627
for (i = 0; i < host->n_ports; i++) {
1628
struct ata_port *ap = host->ports[i];
1629
1630
if (polling & (1 << i))
1631
continue;
1632
1633
if (!ap->ops->sff_irq_check ||
1634
!ap->ops->sff_irq_check(ap))
1635
continue;
1636
1637
if (idle & (1 << i)) {
1638
ap->ops->sff_check_status(ap);
1639
if (ap->ops->sff_irq_clear)
1640
ap->ops->sff_irq_clear(ap);
1641
} else {
1642
/* clear INTRQ and check if BUSY cleared */
1643
if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1644
retry |= true;
1645
/*
1646
* With command in flight, we can't do
1647
* sff_irq_clear() w/o racing with completion.
1648
*/
1649
}
1650
}
1651
1652
if (retry) {
1653
retried = true;
1654
goto retry;
1655
}
1656
}
1657
1658
spin_unlock_irqrestore(&host->lock, flags);
1659
1660
return IRQ_RETVAL(handled);
1661
}
1662
1663
/**
1664
* ata_sff_interrupt - Default SFF ATA host interrupt handler
1665
* @irq: irq line (unused)
1666
* @dev_instance: pointer to our ata_host information structure
1667
*
1668
* Default interrupt handler for PCI IDE devices. Calls
1669
* ata_sff_port_intr() for each port that is not disabled.
1670
*
1671
* LOCKING:
1672
* Obtains host lock during operation.
1673
*
1674
* RETURNS:
1675
* IRQ_NONE or IRQ_HANDLED.
1676
*/
1677
irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1678
{
1679
return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1680
}
1681
EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1682
1683
/**
1684
* ata_sff_lost_interrupt - Check for an apparent lost interrupt
1685
* @ap: port that appears to have timed out
1686
*
1687
* Called from the libata error handlers when the core code suspects
1688
* an interrupt has been lost. If it has complete anything we can and
1689
* then return. Interface must support altstatus for this faster
1690
* recovery to occur.
1691
*
1692
* Locking:
1693
* Caller holds host lock
1694
*/
1695
1696
void ata_sff_lost_interrupt(struct ata_port *ap)
1697
{
1698
u8 status;
1699
struct ata_queued_cmd *qc;
1700
1701
/* Only one outstanding command per SFF channel */
1702
qc = ata_qc_from_tag(ap, ap->link.active_tag);
1703
/* We cannot lose an interrupt on a non-existent or polled command */
1704
if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1705
return;
1706
/* See if the controller thinks it is still busy - if so the command
1707
isn't a lost IRQ but is still in progress */
1708
status = ata_sff_altstatus(ap);
1709
if (status & ATA_BUSY)
1710
return;
1711
1712
/* There was a command running, we are no longer busy and we have
1713
no interrupt. */
1714
ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
1715
status);
1716
/* Run the host interrupt logic as if the interrupt had not been
1717
lost */
1718
ata_sff_port_intr(ap, qc);
1719
}
1720
EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1721
1722
/**
1723
* ata_sff_freeze - Freeze SFF controller port
1724
* @ap: port to freeze
1725
*
1726
* Freeze SFF controller port.
1727
*
1728
* LOCKING:
1729
* Inherited from caller.
1730
*/
1731
void ata_sff_freeze(struct ata_port *ap)
1732
{
1733
ap->ctl |= ATA_NIEN;
1734
ap->last_ctl = ap->ctl;
1735
1736
if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1737
ata_sff_set_devctl(ap, ap->ctl);
1738
1739
/* Under certain circumstances, some controllers raise IRQ on
1740
* ATA_NIEN manipulation. Also, many controllers fail to mask
1741
* previously pending IRQ on ATA_NIEN assertion. Clear it.
1742
*/
1743
ap->ops->sff_check_status(ap);
1744
1745
if (ap->ops->sff_irq_clear)
1746
ap->ops->sff_irq_clear(ap);
1747
}
1748
EXPORT_SYMBOL_GPL(ata_sff_freeze);
1749
1750
/**
1751
* ata_sff_thaw - Thaw SFF controller port
1752
* @ap: port to thaw
1753
*
1754
* Thaw SFF controller port.
1755
*
1756
* LOCKING:
1757
* Inherited from caller.
1758
*/
1759
void ata_sff_thaw(struct ata_port *ap)
1760
{
1761
/* clear & re-enable interrupts */
1762
ap->ops->sff_check_status(ap);
1763
if (ap->ops->sff_irq_clear)
1764
ap->ops->sff_irq_clear(ap);
1765
ata_sff_irq_on(ap);
1766
}
1767
EXPORT_SYMBOL_GPL(ata_sff_thaw);
1768
1769
/**
1770
* ata_sff_prereset - prepare SFF link for reset
1771
* @link: SFF link to be reset
1772
* @deadline: deadline jiffies for the operation
1773
*
1774
* SFF link @link is about to be reset. Initialize it. It first
1775
* calls ata_std_prereset() and wait for !BSY if the port is
1776
* being softreset.
1777
*
1778
* LOCKING:
1779
* Kernel thread context (may sleep)
1780
*
1781
* RETURNS:
1782
* 0 on success, -errno otherwise.
1783
*/
1784
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1785
{
1786
struct ata_eh_context *ehc = &link->eh_context;
1787
int rc;
1788
1789
rc = ata_std_prereset(link, deadline);
1790
if (rc)
1791
return rc;
1792
1793
/* if we're about to do hardreset, nothing more to do */
1794
if (ehc->i.action & ATA_EH_HARDRESET)
1795
return 0;
1796
1797
/* wait for !BSY if we don't know that no device is attached */
1798
if (!ata_link_offline(link)) {
1799
rc = ata_sff_wait_ready(link, deadline);
1800
if (rc && rc != -ENODEV) {
1801
ata_link_printk(link, KERN_WARNING, "device not ready "
1802
"(errno=%d), forcing hardreset\n", rc);
1803
ehc->i.action |= ATA_EH_HARDRESET;
1804
}
1805
}
1806
1807
return 0;
1808
}
1809
EXPORT_SYMBOL_GPL(ata_sff_prereset);
1810
1811
/**
1812
* ata_devchk - PATA device presence detection
1813
* @ap: ATA channel to examine
1814
* @device: Device to examine (starting at zero)
1815
*
1816
* This technique was originally described in
1817
* Hale Landis's ATADRVR (www.ata-atapi.com), and
1818
* later found its way into the ATA/ATAPI spec.
1819
*
1820
* Write a pattern to the ATA shadow registers,
1821
* and if a device is present, it will respond by
1822
* correctly storing and echoing back the
1823
* ATA shadow register contents.
1824
*
1825
* LOCKING:
1826
* caller.
1827
*/
1828
static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1829
{
1830
struct ata_ioports *ioaddr = &ap->ioaddr;
1831
u8 nsect, lbal;
1832
1833
ap->ops->sff_dev_select(ap, device);
1834
1835
iowrite8(0x55, ioaddr->nsect_addr);
1836
iowrite8(0xaa, ioaddr->lbal_addr);
1837
1838
iowrite8(0xaa, ioaddr->nsect_addr);
1839
iowrite8(0x55, ioaddr->lbal_addr);
1840
1841
iowrite8(0x55, ioaddr->nsect_addr);
1842
iowrite8(0xaa, ioaddr->lbal_addr);
1843
1844
nsect = ioread8(ioaddr->nsect_addr);
1845
lbal = ioread8(ioaddr->lbal_addr);
1846
1847
if ((nsect == 0x55) && (lbal == 0xaa))
1848
return 1; /* we found a device */
1849
1850
return 0; /* nothing found */
1851
}
1852
1853
/**
1854
* ata_sff_dev_classify - Parse returned ATA device signature
1855
* @dev: ATA device to classify (starting at zero)
1856
* @present: device seems present
1857
* @r_err: Value of error register on completion
1858
*
1859
* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1860
* an ATA/ATAPI-defined set of values is placed in the ATA
1861
* shadow registers, indicating the results of device detection
1862
* and diagnostics.
1863
*
1864
* Select the ATA device, and read the values from the ATA shadow
1865
* registers. Then parse according to the Error register value,
1866
* and the spec-defined values examined by ata_dev_classify().
1867
*
1868
* LOCKING:
1869
* caller.
1870
*
1871
* RETURNS:
1872
* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1873
*/
1874
unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1875
u8 *r_err)
1876
{
1877
struct ata_port *ap = dev->link->ap;
1878
struct ata_taskfile tf;
1879
unsigned int class;
1880
u8 err;
1881
1882
ap->ops->sff_dev_select(ap, dev->devno);
1883
1884
memset(&tf, 0, sizeof(tf));
1885
1886
ap->ops->sff_tf_read(ap, &tf);
1887
err = tf.feature;
1888
if (r_err)
1889
*r_err = err;
1890
1891
/* see if device passed diags: continue and warn later */
1892
if (err == 0)
1893
/* diagnostic fail : do nothing _YET_ */
1894
dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1895
else if (err == 1)
1896
/* do nothing */ ;
1897
else if ((dev->devno == 0) && (err == 0x81))
1898
/* do nothing */ ;
1899
else
1900
return ATA_DEV_NONE;
1901
1902
/* determine if device is ATA or ATAPI */
1903
class = ata_dev_classify(&tf);
1904
1905
if (class == ATA_DEV_UNKNOWN) {
1906
/* If the device failed diagnostic, it's likely to
1907
* have reported incorrect device signature too.
1908
* Assume ATA device if the device seems present but
1909
* device signature is invalid with diagnostic
1910
* failure.
1911
*/
1912
if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1913
class = ATA_DEV_ATA;
1914
else
1915
class = ATA_DEV_NONE;
1916
} else if ((class == ATA_DEV_ATA) &&
1917
(ap->ops->sff_check_status(ap) == 0))
1918
class = ATA_DEV_NONE;
1919
1920
return class;
1921
}
1922
EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1923
1924
/**
1925
* ata_sff_wait_after_reset - wait for devices to become ready after reset
1926
* @link: SFF link which is just reset
1927
* @devmask: mask of present devices
1928
* @deadline: deadline jiffies for the operation
1929
*
1930
* Wait devices attached to SFF @link to become ready after
1931
* reset. It contains preceding 150ms wait to avoid accessing TF
1932
* status register too early.
1933
*
1934
* LOCKING:
1935
* Kernel thread context (may sleep).
1936
*
1937
* RETURNS:
1938
* 0 on success, -ENODEV if some or all of devices in @devmask
1939
* don't seem to exist. -errno on other errors.
1940
*/
1941
int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1942
unsigned long deadline)
1943
{
1944
struct ata_port *ap = link->ap;
1945
struct ata_ioports *ioaddr = &ap->ioaddr;
1946
unsigned int dev0 = devmask & (1 << 0);
1947
unsigned int dev1 = devmask & (1 << 1);
1948
int rc, ret = 0;
1949
1950
ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1951
1952
/* always check readiness of the master device */
1953
rc = ata_sff_wait_ready(link, deadline);
1954
/* -ENODEV means the odd clown forgot the D7 pulldown resistor
1955
* and TF status is 0xff, bail out on it too.
1956
*/
1957
if (rc)
1958
return rc;
1959
1960
/* if device 1 was found in ata_devchk, wait for register
1961
* access briefly, then wait for BSY to clear.
1962
*/
1963
if (dev1) {
1964
int i;
1965
1966
ap->ops->sff_dev_select(ap, 1);
1967
1968
/* Wait for register access. Some ATAPI devices fail
1969
* to set nsect/lbal after reset, so don't waste too
1970
* much time on it. We're gonna wait for !BSY anyway.
1971
*/
1972
for (i = 0; i < 2; i++) {
1973
u8 nsect, lbal;
1974
1975
nsect = ioread8(ioaddr->nsect_addr);
1976
lbal = ioread8(ioaddr->lbal_addr);
1977
if ((nsect == 1) && (lbal == 1))
1978
break;
1979
ata_msleep(ap, 50); /* give drive a breather */
1980
}
1981
1982
rc = ata_sff_wait_ready(link, deadline);
1983
if (rc) {
1984
if (rc != -ENODEV)
1985
return rc;
1986
ret = rc;
1987
}
1988
}
1989
1990
/* is all this really necessary? */
1991
ap->ops->sff_dev_select(ap, 0);
1992
if (dev1)
1993
ap->ops->sff_dev_select(ap, 1);
1994
if (dev0)
1995
ap->ops->sff_dev_select(ap, 0);
1996
1997
return ret;
1998
}
1999
EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2000
2001
static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
2002
unsigned long deadline)
2003
{
2004
struct ata_ioports *ioaddr = &ap->ioaddr;
2005
2006
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2007
2008
/* software reset. causes dev0 to be selected */
2009
iowrite8(ap->ctl, ioaddr->ctl_addr);
2010
udelay(20); /* FIXME: flush */
2011
iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2012
udelay(20); /* FIXME: flush */
2013
iowrite8(ap->ctl, ioaddr->ctl_addr);
2014
ap->last_ctl = ap->ctl;
2015
2016
/* wait the port to become ready */
2017
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2018
}
2019
2020
/**
2021
* ata_sff_softreset - reset host port via ATA SRST
2022
* @link: ATA link to reset
2023
* @classes: resulting classes of attached devices
2024
* @deadline: deadline jiffies for the operation
2025
*
2026
* Reset host port using ATA SRST.
2027
*
2028
* LOCKING:
2029
* Kernel thread context (may sleep)
2030
*
2031
* RETURNS:
2032
* 0 on success, -errno otherwise.
2033
*/
2034
int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2035
unsigned long deadline)
2036
{
2037
struct ata_port *ap = link->ap;
2038
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2039
unsigned int devmask = 0;
2040
int rc;
2041
u8 err;
2042
2043
DPRINTK("ENTER\n");
2044
2045
/* determine if device 0/1 are present */
2046
if (ata_devchk(ap, 0))
2047
devmask |= (1 << 0);
2048
if (slave_possible && ata_devchk(ap, 1))
2049
devmask |= (1 << 1);
2050
2051
/* select device 0 again */
2052
ap->ops->sff_dev_select(ap, 0);
2053
2054
/* issue bus reset */
2055
DPRINTK("about to softreset, devmask=%x\n", devmask);
2056
rc = ata_bus_softreset(ap, devmask, deadline);
2057
/* if link is occupied, -ENODEV too is an error */
2058
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2059
ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
2060
return rc;
2061
}
2062
2063
/* determine by signature whether we have ATA or ATAPI devices */
2064
classes[0] = ata_sff_dev_classify(&link->device[0],
2065
devmask & (1 << 0), &err);
2066
if (slave_possible && err != 0x81)
2067
classes[1] = ata_sff_dev_classify(&link->device[1],
2068
devmask & (1 << 1), &err);
2069
2070
DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2071
return 0;
2072
}
2073
EXPORT_SYMBOL_GPL(ata_sff_softreset);
2074
2075
/**
2076
* sata_sff_hardreset - reset host port via SATA phy reset
2077
* @link: link to reset
2078
* @class: resulting class of attached device
2079
* @deadline: deadline jiffies for the operation
2080
*
2081
* SATA phy-reset host port using DET bits of SControl register,
2082
* wait for !BSY and classify the attached device.
2083
*
2084
* LOCKING:
2085
* Kernel thread context (may sleep)
2086
*
2087
* RETURNS:
2088
* 0 on success, -errno otherwise.
2089
*/
2090
int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2091
unsigned long deadline)
2092
{
2093
struct ata_eh_context *ehc = &link->eh_context;
2094
const unsigned long *timing = sata_ehc_deb_timing(ehc);
2095
bool online;
2096
int rc;
2097
2098
rc = sata_link_hardreset(link, timing, deadline, &online,
2099
ata_sff_check_ready);
2100
if (online)
2101
*class = ata_sff_dev_classify(link->device, 1, NULL);
2102
2103
DPRINTK("EXIT, class=%u\n", *class);
2104
return rc;
2105
}
2106
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2107
2108
/**
2109
* ata_sff_postreset - SFF postreset callback
2110
* @link: the target SFF ata_link
2111
* @classes: classes of attached devices
2112
*
2113
* This function is invoked after a successful reset. It first
2114
* calls ata_std_postreset() and performs SFF specific postreset
2115
* processing.
2116
*
2117
* LOCKING:
2118
* Kernel thread context (may sleep)
2119
*/
2120
void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2121
{
2122
struct ata_port *ap = link->ap;
2123
2124
ata_std_postreset(link, classes);
2125
2126
/* is double-select really necessary? */
2127
if (classes[0] != ATA_DEV_NONE)
2128
ap->ops->sff_dev_select(ap, 1);
2129
if (classes[1] != ATA_DEV_NONE)
2130
ap->ops->sff_dev_select(ap, 0);
2131
2132
/* bail out if no device is present */
2133
if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2134
DPRINTK("EXIT, no device\n");
2135
return;
2136
}
2137
2138
/* set up device control */
2139
if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2140
ata_sff_set_devctl(ap, ap->ctl);
2141
ap->last_ctl = ap->ctl;
2142
}
2143
}
2144
EXPORT_SYMBOL_GPL(ata_sff_postreset);
2145
2146
/**
2147
* ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2148
* @qc: command
2149
*
2150
* Drain the FIFO and device of any stuck data following a command
2151
* failing to complete. In some cases this is necessary before a
2152
* reset will recover the device.
2153
*
2154
*/
2155
2156
void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2157
{
2158
int count;
2159
struct ata_port *ap;
2160
2161
/* We only need to flush incoming data when a command was running */
2162
if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2163
return;
2164
2165
ap = qc->ap;
2166
/* Drain up to 64K of data before we give up this recovery method */
2167
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2168
&& count < 65536; count += 2)
2169
ioread16(ap->ioaddr.data_addr);
2170
2171
/* Can become DEBUG later */
2172
if (count)
2173
ata_port_printk(ap, KERN_DEBUG,
2174
"drained %d bytes to clear DRQ.\n", count);
2175
2176
}
2177
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2178
2179
/**
2180
* ata_sff_error_handler - Stock error handler for SFF controller
2181
* @ap: port to handle error for
2182
*
2183
* Stock error handler for SFF controller. It can handle both
2184
* PATA and SATA controllers. Many controllers should be able to
2185
* use this EH as-is or with some added handling before and
2186
* after.
2187
*
2188
* LOCKING:
2189
* Kernel thread context (may sleep)
2190
*/
2191
void ata_sff_error_handler(struct ata_port *ap)
2192
{
2193
ata_reset_fn_t softreset = ap->ops->softreset;
2194
ata_reset_fn_t hardreset = ap->ops->hardreset;
2195
struct ata_queued_cmd *qc;
2196
unsigned long flags;
2197
2198
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2199
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2200
qc = NULL;
2201
2202
spin_lock_irqsave(ap->lock, flags);
2203
2204
/*
2205
* We *MUST* do FIFO draining before we issue a reset as
2206
* several devices helpfully clear their internal state and
2207
* will lock solid if we touch the data port post reset. Pass
2208
* qc in case anyone wants to do different PIO/DMA recovery or
2209
* has per command fixups
2210
*/
2211
if (ap->ops->sff_drain_fifo)
2212
ap->ops->sff_drain_fifo(qc);
2213
2214
spin_unlock_irqrestore(ap->lock, flags);
2215
2216
/* ignore ata_sff_softreset if ctl isn't accessible */
2217
if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2218
softreset = NULL;
2219
2220
/* ignore built-in hardresets if SCR access is not available */
2221
if ((hardreset == sata_std_hardreset ||
2222
hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2223
hardreset = NULL;
2224
2225
ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2226
ap->ops->postreset);
2227
}
2228
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2229
2230
/**
2231
* ata_sff_std_ports - initialize ioaddr with standard port offsets.
2232
* @ioaddr: IO address structure to be initialized
2233
*
2234
* Utility function which initializes data_addr, error_addr,
2235
* feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2236
* device_addr, status_addr, and command_addr to standard offsets
2237
* relative to cmd_addr.
2238
*
2239
* Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2240
*/
2241
void ata_sff_std_ports(struct ata_ioports *ioaddr)
2242
{
2243
ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2244
ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2245
ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2246
ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2247
ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2248
ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2249
ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2250
ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2251
ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2252
ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2253
}
2254
EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2255
2256
#ifdef CONFIG_PCI
2257
2258
static int ata_resources_present(struct pci_dev *pdev, int port)
2259
{
2260
int i;
2261
2262
/* Check the PCI resources for this channel are enabled */
2263
port = port * 2;
2264
for (i = 0; i < 2; i++) {
2265
if (pci_resource_start(pdev, port + i) == 0 ||
2266
pci_resource_len(pdev, port + i) == 0)
2267
return 0;
2268
}
2269
return 1;
2270
}
2271
2272
/**
2273
* ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2274
* @host: target ATA host
2275
*
2276
* Acquire native PCI ATA resources for @host and initialize the
2277
* first two ports of @host accordingly. Ports marked dummy are
2278
* skipped and allocation failure makes the port dummy.
2279
*
2280
* Note that native PCI resources are valid even for legacy hosts
2281
* as we fix up pdev resources array early in boot, so this
2282
* function can be used for both native and legacy SFF hosts.
2283
*
2284
* LOCKING:
2285
* Inherited from calling layer (may sleep).
2286
*
2287
* RETURNS:
2288
* 0 if at least one port is initialized, -ENODEV if no port is
2289
* available.
2290
*/
2291
int ata_pci_sff_init_host(struct ata_host *host)
2292
{
2293
struct device *gdev = host->dev;
2294
struct pci_dev *pdev = to_pci_dev(gdev);
2295
unsigned int mask = 0;
2296
int i, rc;
2297
2298
/* request, iomap BARs and init port addresses accordingly */
2299
for (i = 0; i < 2; i++) {
2300
struct ata_port *ap = host->ports[i];
2301
int base = i * 2;
2302
void __iomem * const *iomap;
2303
2304
if (ata_port_is_dummy(ap))
2305
continue;
2306
2307
/* Discard disabled ports. Some controllers show
2308
* their unused channels this way. Disabled ports are
2309
* made dummy.
2310
*/
2311
if (!ata_resources_present(pdev, i)) {
2312
ap->ops = &ata_dummy_port_ops;
2313
continue;
2314
}
2315
2316
rc = pcim_iomap_regions(pdev, 0x3 << base,
2317
dev_driver_string(gdev));
2318
if (rc) {
2319
dev_printk(KERN_WARNING, gdev,
2320
"failed to request/iomap BARs for port %d "
2321
"(errno=%d)\n", i, rc);
2322
if (rc == -EBUSY)
2323
pcim_pin_device(pdev);
2324
ap->ops = &ata_dummy_port_ops;
2325
continue;
2326
}
2327
host->iomap = iomap = pcim_iomap_table(pdev);
2328
2329
ap->ioaddr.cmd_addr = iomap[base];
2330
ap->ioaddr.altstatus_addr =
2331
ap->ioaddr.ctl_addr = (void __iomem *)
2332
((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2333
ata_sff_std_ports(&ap->ioaddr);
2334
2335
ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2336
(unsigned long long)pci_resource_start(pdev, base),
2337
(unsigned long long)pci_resource_start(pdev, base + 1));
2338
2339
mask |= 1 << i;
2340
}
2341
2342
if (!mask) {
2343
dev_printk(KERN_ERR, gdev, "no available native port\n");
2344
return -ENODEV;
2345
}
2346
2347
return 0;
2348
}
2349
EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2350
2351
/**
2352
* ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2353
* @pdev: target PCI device
2354
* @ppi: array of port_info, must be enough for two ports
2355
* @r_host: out argument for the initialized ATA host
2356
*
2357
* Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2358
* all PCI resources and initialize it accordingly in one go.
2359
*
2360
* LOCKING:
2361
* Inherited from calling layer (may sleep).
2362
*
2363
* RETURNS:
2364
* 0 on success, -errno otherwise.
2365
*/
2366
int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2367
const struct ata_port_info * const *ppi,
2368
struct ata_host **r_host)
2369
{
2370
struct ata_host *host;
2371
int rc;
2372
2373
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2374
return -ENOMEM;
2375
2376
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2377
if (!host) {
2378
dev_printk(KERN_ERR, &pdev->dev,
2379
"failed to allocate ATA host\n");
2380
rc = -ENOMEM;
2381
goto err_out;
2382
}
2383
2384
rc = ata_pci_sff_init_host(host);
2385
if (rc)
2386
goto err_out;
2387
2388
devres_remove_group(&pdev->dev, NULL);
2389
*r_host = host;
2390
return 0;
2391
2392
err_out:
2393
devres_release_group(&pdev->dev, NULL);
2394
return rc;
2395
}
2396
EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2397
2398
/**
2399
* ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2400
* @host: target SFF ATA host
2401
* @irq_handler: irq_handler used when requesting IRQ(s)
2402
* @sht: scsi_host_template to use when registering the host
2403
*
2404
* This is the counterpart of ata_host_activate() for SFF ATA
2405
* hosts. This separate helper is necessary because SFF hosts
2406
* use two separate interrupts in legacy mode.
2407
*
2408
* LOCKING:
2409
* Inherited from calling layer (may sleep).
2410
*
2411
* RETURNS:
2412
* 0 on success, -errno otherwise.
2413
*/
2414
int ata_pci_sff_activate_host(struct ata_host *host,
2415
irq_handler_t irq_handler,
2416
struct scsi_host_template *sht)
2417
{
2418
struct device *dev = host->dev;
2419
struct pci_dev *pdev = to_pci_dev(dev);
2420
const char *drv_name = dev_driver_string(host->dev);
2421
int legacy_mode = 0, rc;
2422
2423
rc = ata_host_start(host);
2424
if (rc)
2425
return rc;
2426
2427
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2428
u8 tmp8, mask;
2429
2430
/* TODO: What if one channel is in native mode ... */
2431
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2432
mask = (1 << 2) | (1 << 0);
2433
if ((tmp8 & mask) != mask)
2434
legacy_mode = 1;
2435
#if defined(CONFIG_NO_ATA_LEGACY)
2436
/* Some platforms with PCI limits cannot address compat
2437
port space. In that case we punt if their firmware has
2438
left a device in compatibility mode */
2439
if (legacy_mode) {
2440
printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2441
return -EOPNOTSUPP;
2442
}
2443
#endif
2444
}
2445
2446
if (!devres_open_group(dev, NULL, GFP_KERNEL))
2447
return -ENOMEM;
2448
2449
if (!legacy_mode && pdev->irq) {
2450
int i;
2451
2452
rc = devm_request_irq(dev, pdev->irq, irq_handler,
2453
IRQF_SHARED, drv_name, host);
2454
if (rc)
2455
goto out;
2456
2457
for (i = 0; i < 2; i++) {
2458
if (ata_port_is_dummy(host->ports[i]))
2459
continue;
2460
ata_port_desc(host->ports[i], "irq %d", pdev->irq);
2461
}
2462
} else if (legacy_mode) {
2463
if (!ata_port_is_dummy(host->ports[0])) {
2464
rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2465
irq_handler, IRQF_SHARED,
2466
drv_name, host);
2467
if (rc)
2468
goto out;
2469
2470
ata_port_desc(host->ports[0], "irq %d",
2471
ATA_PRIMARY_IRQ(pdev));
2472
}
2473
2474
if (!ata_port_is_dummy(host->ports[1])) {
2475
rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2476
irq_handler, IRQF_SHARED,
2477
drv_name, host);
2478
if (rc)
2479
goto out;
2480
2481
ata_port_desc(host->ports[1], "irq %d",
2482
ATA_SECONDARY_IRQ(pdev));
2483
}
2484
}
2485
2486
rc = ata_host_register(host, sht);
2487
out:
2488
if (rc == 0)
2489
devres_remove_group(dev, NULL);
2490
else
2491
devres_release_group(dev, NULL);
2492
2493
return rc;
2494
}
2495
EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2496
2497
static const struct ata_port_info *ata_sff_find_valid_pi(
2498
const struct ata_port_info * const *ppi)
2499
{
2500
int i;
2501
2502
/* look up the first valid port_info */
2503
for (i = 0; i < 2 && ppi[i]; i++)
2504
if (ppi[i]->port_ops != &ata_dummy_port_ops)
2505
return ppi[i];
2506
2507
return NULL;
2508
}
2509
2510
/**
2511
* ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2512
* @pdev: Controller to be initialized
2513
* @ppi: array of port_info, must be enough for two ports
2514
* @sht: scsi_host_template to use when registering the host
2515
* @host_priv: host private_data
2516
* @hflag: host flags
2517
*
2518
* This is a helper function which can be called from a driver's
2519
* xxx_init_one() probe function if the hardware uses traditional
2520
* IDE taskfile registers and is PIO only.
2521
*
2522
* ASSUMPTION:
2523
* Nobody makes a single channel controller that appears solely as
2524
* the secondary legacy port on PCI.
2525
*
2526
* LOCKING:
2527
* Inherited from PCI layer (may sleep).
2528
*
2529
* RETURNS:
2530
* Zero on success, negative on errno-based value on error.
2531
*/
2532
int ata_pci_sff_init_one(struct pci_dev *pdev,
2533
const struct ata_port_info * const *ppi,
2534
struct scsi_host_template *sht, void *host_priv, int hflag)
2535
{
2536
struct device *dev = &pdev->dev;
2537
const struct ata_port_info *pi;
2538
struct ata_host *host = NULL;
2539
int rc;
2540
2541
DPRINTK("ENTER\n");
2542
2543
pi = ata_sff_find_valid_pi(ppi);
2544
if (!pi) {
2545
dev_printk(KERN_ERR, &pdev->dev,
2546
"no valid port_info specified\n");
2547
return -EINVAL;
2548
}
2549
2550
if (!devres_open_group(dev, NULL, GFP_KERNEL))
2551
return -ENOMEM;
2552
2553
rc = pcim_enable_device(pdev);
2554
if (rc)
2555
goto out;
2556
2557
/* prepare and activate SFF host */
2558
rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2559
if (rc)
2560
goto out;
2561
host->private_data = host_priv;
2562
host->flags |= hflag;
2563
2564
rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2565
out:
2566
if (rc == 0)
2567
devres_remove_group(&pdev->dev, NULL);
2568
else
2569
devres_release_group(&pdev->dev, NULL);
2570
2571
return rc;
2572
}
2573
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2574
2575
#endif /* CONFIG_PCI */
2576
2577
/*
2578
* BMDMA support
2579
*/
2580
2581
#ifdef CONFIG_ATA_BMDMA
2582
2583
const struct ata_port_operations ata_bmdma_port_ops = {
2584
.inherits = &ata_sff_port_ops,
2585
2586
.error_handler = ata_bmdma_error_handler,
2587
.post_internal_cmd = ata_bmdma_post_internal_cmd,
2588
2589
.qc_prep = ata_bmdma_qc_prep,
2590
.qc_issue = ata_bmdma_qc_issue,
2591
2592
.sff_irq_clear = ata_bmdma_irq_clear,
2593
.bmdma_setup = ata_bmdma_setup,
2594
.bmdma_start = ata_bmdma_start,
2595
.bmdma_stop = ata_bmdma_stop,
2596
.bmdma_status = ata_bmdma_status,
2597
2598
.port_start = ata_bmdma_port_start,
2599
};
2600
EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2601
2602
const struct ata_port_operations ata_bmdma32_port_ops = {
2603
.inherits = &ata_bmdma_port_ops,
2604
2605
.sff_data_xfer = ata_sff_data_xfer32,
2606
.port_start = ata_bmdma_port_start32,
2607
};
2608
EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2609
2610
/**
2611
* ata_bmdma_fill_sg - Fill PCI IDE PRD table
2612
* @qc: Metadata associated with taskfile to be transferred
2613
*
2614
* Fill PCI IDE PRD (scatter-gather) table with segments
2615
* associated with the current disk command.
2616
*
2617
* LOCKING:
2618
* spin_lock_irqsave(host lock)
2619
*
2620
*/
2621
static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2622
{
2623
struct ata_port *ap = qc->ap;
2624
struct ata_bmdma_prd *prd = ap->bmdma_prd;
2625
struct scatterlist *sg;
2626
unsigned int si, pi;
2627
2628
pi = 0;
2629
for_each_sg(qc->sg, sg, qc->n_elem, si) {
2630
u32 addr, offset;
2631
u32 sg_len, len;
2632
2633
/* determine if physical DMA addr spans 64K boundary.
2634
* Note h/w doesn't support 64-bit, so we unconditionally
2635
* truncate dma_addr_t to u32.
2636
*/
2637
addr = (u32) sg_dma_address(sg);
2638
sg_len = sg_dma_len(sg);
2639
2640
while (sg_len) {
2641
offset = addr & 0xffff;
2642
len = sg_len;
2643
if ((offset + sg_len) > 0x10000)
2644
len = 0x10000 - offset;
2645
2646
prd[pi].addr = cpu_to_le32(addr);
2647
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2648
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2649
2650
pi++;
2651
sg_len -= len;
2652
addr += len;
2653
}
2654
}
2655
2656
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2657
}
2658
2659
/**
2660
* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2661
* @qc: Metadata associated with taskfile to be transferred
2662
*
2663
* Fill PCI IDE PRD (scatter-gather) table with segments
2664
* associated with the current disk command. Perform the fill
2665
* so that we avoid writing any length 64K records for
2666
* controllers that don't follow the spec.
2667
*
2668
* LOCKING:
2669
* spin_lock_irqsave(host lock)
2670
*
2671
*/
2672
static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2673
{
2674
struct ata_port *ap = qc->ap;
2675
struct ata_bmdma_prd *prd = ap->bmdma_prd;
2676
struct scatterlist *sg;
2677
unsigned int si, pi;
2678
2679
pi = 0;
2680
for_each_sg(qc->sg, sg, qc->n_elem, si) {
2681
u32 addr, offset;
2682
u32 sg_len, len, blen;
2683
2684
/* determine if physical DMA addr spans 64K boundary.
2685
* Note h/w doesn't support 64-bit, so we unconditionally
2686
* truncate dma_addr_t to u32.
2687
*/
2688
addr = (u32) sg_dma_address(sg);
2689
sg_len = sg_dma_len(sg);
2690
2691
while (sg_len) {
2692
offset = addr & 0xffff;
2693
len = sg_len;
2694
if ((offset + sg_len) > 0x10000)
2695
len = 0x10000 - offset;
2696
2697
blen = len & 0xffff;
2698
prd[pi].addr = cpu_to_le32(addr);
2699
if (blen == 0) {
2700
/* Some PATA chipsets like the CS5530 can't
2701
cope with 0x0000 meaning 64K as the spec
2702
says */
2703
prd[pi].flags_len = cpu_to_le32(0x8000);
2704
blen = 0x8000;
2705
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2706
}
2707
prd[pi].flags_len = cpu_to_le32(blen);
2708
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2709
2710
pi++;
2711
sg_len -= len;
2712
addr += len;
2713
}
2714
}
2715
2716
prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2717
}
2718
2719
/**
2720
* ata_bmdma_qc_prep - Prepare taskfile for submission
2721
* @qc: Metadata associated with taskfile to be prepared
2722
*
2723
* Prepare ATA taskfile for submission.
2724
*
2725
* LOCKING:
2726
* spin_lock_irqsave(host lock)
2727
*/
2728
void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2729
{
2730
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2731
return;
2732
2733
ata_bmdma_fill_sg(qc);
2734
}
2735
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2736
2737
/**
2738
* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2739
* @qc: Metadata associated with taskfile to be prepared
2740
*
2741
* Prepare ATA taskfile for submission.
2742
*
2743
* LOCKING:
2744
* spin_lock_irqsave(host lock)
2745
*/
2746
void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2747
{
2748
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2749
return;
2750
2751
ata_bmdma_fill_sg_dumb(qc);
2752
}
2753
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2754
2755
/**
2756
* ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2757
* @qc: command to issue to device
2758
*
2759
* This function issues a PIO, NODATA or DMA command to a
2760
* SFF/BMDMA controller. PIO and NODATA are handled by
2761
* ata_sff_qc_issue().
2762
*
2763
* LOCKING:
2764
* spin_lock_irqsave(host lock)
2765
*
2766
* RETURNS:
2767
* Zero on success, AC_ERR_* mask on failure
2768
*/
2769
unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2770
{
2771
struct ata_port *ap = qc->ap;
2772
struct ata_link *link = qc->dev->link;
2773
2774
/* defer PIO handling to sff_qc_issue */
2775
if (!ata_is_dma(qc->tf.protocol))
2776
return ata_sff_qc_issue(qc);
2777
2778
/* select the device */
2779
ata_dev_select(ap, qc->dev->devno, 1, 0);
2780
2781
/* start the command */
2782
switch (qc->tf.protocol) {
2783
case ATA_PROT_DMA:
2784
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2785
2786
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2787
ap->ops->bmdma_setup(qc); /* set up bmdma */
2788
ap->ops->bmdma_start(qc); /* initiate bmdma */
2789
ap->hsm_task_state = HSM_ST_LAST;
2790
break;
2791
2792
case ATAPI_PROT_DMA:
2793
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2794
2795
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2796
ap->ops->bmdma_setup(qc); /* set up bmdma */
2797
ap->hsm_task_state = HSM_ST_FIRST;
2798
2799
/* send cdb by polling if no cdb interrupt */
2800
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2801
ata_sff_queue_pio_task(link, 0);
2802
break;
2803
2804
default:
2805
WARN_ON(1);
2806
return AC_ERR_SYSTEM;
2807
}
2808
2809
return 0;
2810
}
2811
EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2812
2813
/**
2814
* ata_bmdma_port_intr - Handle BMDMA port interrupt
2815
* @ap: Port on which interrupt arrived (possibly...)
2816
* @qc: Taskfile currently active in engine
2817
*
2818
* Handle port interrupt for given queued command.
2819
*
2820
* LOCKING:
2821
* spin_lock_irqsave(host lock)
2822
*
2823
* RETURNS:
2824
* One if interrupt was handled, zero if not (shared irq).
2825
*/
2826
unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2827
{
2828
struct ata_eh_info *ehi = &ap->link.eh_info;
2829
u8 host_stat = 0;
2830
bool bmdma_stopped = false;
2831
unsigned int handled;
2832
2833
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2834
/* check status of DMA engine */
2835
host_stat = ap->ops->bmdma_status(ap);
2836
VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2837
2838
/* if it's not our irq... */
2839
if (!(host_stat & ATA_DMA_INTR))
2840
return ata_sff_idle_irq(ap);
2841
2842
/* before we do anything else, clear DMA-Start bit */
2843
ap->ops->bmdma_stop(qc);
2844
bmdma_stopped = true;
2845
2846
if (unlikely(host_stat & ATA_DMA_ERR)) {
2847
/* error when transferring data to/from memory */
2848
qc->err_mask |= AC_ERR_HOST_BUS;
2849
ap->hsm_task_state = HSM_ST_ERR;
2850
}
2851
}
2852
2853
handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2854
2855
if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2856
ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2857
2858
return handled;
2859
}
2860
EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2861
2862
/**
2863
* ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2864
* @irq: irq line (unused)
2865
* @dev_instance: pointer to our ata_host information structure
2866
*
2867
* Default interrupt handler for PCI IDE devices. Calls
2868
* ata_bmdma_port_intr() for each port that is not disabled.
2869
*
2870
* LOCKING:
2871
* Obtains host lock during operation.
2872
*
2873
* RETURNS:
2874
* IRQ_NONE or IRQ_HANDLED.
2875
*/
2876
irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2877
{
2878
return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2879
}
2880
EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2881
2882
/**
2883
* ata_bmdma_error_handler - Stock error handler for BMDMA controller
2884
* @ap: port to handle error for
2885
*
2886
* Stock error handler for BMDMA controller. It can handle both
2887
* PATA and SATA controllers. Most BMDMA controllers should be
2888
* able to use this EH as-is or with some added handling before
2889
* and after.
2890
*
2891
* LOCKING:
2892
* Kernel thread context (may sleep)
2893
*/
2894
void ata_bmdma_error_handler(struct ata_port *ap)
2895
{
2896
struct ata_queued_cmd *qc;
2897
unsigned long flags;
2898
bool thaw = false;
2899
2900
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2901
if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2902
qc = NULL;
2903
2904
/* reset PIO HSM and stop DMA engine */
2905
spin_lock_irqsave(ap->lock, flags);
2906
2907
if (qc && ata_is_dma(qc->tf.protocol)) {
2908
u8 host_stat;
2909
2910
host_stat = ap->ops->bmdma_status(ap);
2911
2912
/* BMDMA controllers indicate host bus error by
2913
* setting DMA_ERR bit and timing out. As it wasn't
2914
* really a timeout event, adjust error mask and
2915
* cancel frozen state.
2916
*/
2917
if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2918
qc->err_mask = AC_ERR_HOST_BUS;
2919
thaw = true;
2920
}
2921
2922
ap->ops->bmdma_stop(qc);
2923
2924
/* if we're gonna thaw, make sure IRQ is clear */
2925
if (thaw) {
2926
ap->ops->sff_check_status(ap);
2927
if (ap->ops->sff_irq_clear)
2928
ap->ops->sff_irq_clear(ap);
2929
}
2930
}
2931
2932
spin_unlock_irqrestore(ap->lock, flags);
2933
2934
if (thaw)
2935
ata_eh_thaw_port(ap);
2936
2937
ata_sff_error_handler(ap);
2938
}
2939
EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2940
2941
/**
2942
* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2943
* @qc: internal command to clean up
2944
*
2945
* LOCKING:
2946
* Kernel thread context (may sleep)
2947
*/
2948
void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2949
{
2950
struct ata_port *ap = qc->ap;
2951
unsigned long flags;
2952
2953
if (ata_is_dma(qc->tf.protocol)) {
2954
spin_lock_irqsave(ap->lock, flags);
2955
ap->ops->bmdma_stop(qc);
2956
spin_unlock_irqrestore(ap->lock, flags);
2957
}
2958
}
2959
EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2960
2961
/**
2962
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2963
* @ap: Port associated with this ATA transaction.
2964
*
2965
* Clear interrupt and error flags in DMA status register.
2966
*
2967
* May be used as the irq_clear() entry in ata_port_operations.
2968
*
2969
* LOCKING:
2970
* spin_lock_irqsave(host lock)
2971
*/
2972
void ata_bmdma_irq_clear(struct ata_port *ap)
2973
{
2974
void __iomem *mmio = ap->ioaddr.bmdma_addr;
2975
2976
if (!mmio)
2977
return;
2978
2979
iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2980
}
2981
EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2982
2983
/**
2984
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2985
* @qc: Info associated with this ATA transaction.
2986
*
2987
* LOCKING:
2988
* spin_lock_irqsave(host lock)
2989
*/
2990
void ata_bmdma_setup(struct ata_queued_cmd *qc)
2991
{
2992
struct ata_port *ap = qc->ap;
2993
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2994
u8 dmactl;
2995
2996
/* load PRD table addr. */
2997
mb(); /* make sure PRD table writes are visible to controller */
2998
iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2999
3000
/* specify data direction, triple-check start bit is clear */
3001
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3002
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3003
if (!rw)
3004
dmactl |= ATA_DMA_WR;
3005
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3006
3007
/* issue r/w command */
3008
ap->ops->sff_exec_command(ap, &qc->tf);
3009
}
3010
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3011
3012
/**
3013
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
3014
* @qc: Info associated with this ATA transaction.
3015
*
3016
* LOCKING:
3017
* spin_lock_irqsave(host lock)
3018
*/
3019
void ata_bmdma_start(struct ata_queued_cmd *qc)
3020
{
3021
struct ata_port *ap = qc->ap;
3022
u8 dmactl;
3023
3024
/* start host DMA transaction */
3025
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3026
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3027
3028
/* Strictly, one may wish to issue an ioread8() here, to
3029
* flush the mmio write. However, control also passes
3030
* to the hardware at this point, and it will interrupt
3031
* us when we are to resume control. So, in effect,
3032
* we don't care when the mmio write flushes.
3033
* Further, a read of the DMA status register _immediately_
3034
* following the write may not be what certain flaky hardware
3035
* is expected, so I think it is best to not add a readb()
3036
* without first all the MMIO ATA cards/mobos.
3037
* Or maybe I'm just being paranoid.
3038
*
3039
* FIXME: The posting of this write means I/O starts are
3040
* unnecessarily delayed for MMIO
3041
*/
3042
}
3043
EXPORT_SYMBOL_GPL(ata_bmdma_start);
3044
3045
/**
3046
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3047
* @qc: Command we are ending DMA for
3048
*
3049
* Clears the ATA_DMA_START flag in the dma control register
3050
*
3051
* May be used as the bmdma_stop() entry in ata_port_operations.
3052
*
3053
* LOCKING:
3054
* spin_lock_irqsave(host lock)
3055
*/
3056
void ata_bmdma_stop(struct ata_queued_cmd *qc)
3057
{
3058
struct ata_port *ap = qc->ap;
3059
void __iomem *mmio = ap->ioaddr.bmdma_addr;
3060
3061
/* clear start/stop bit */
3062
iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3063
mmio + ATA_DMA_CMD);
3064
3065
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3066
ata_sff_dma_pause(ap);
3067
}
3068
EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3069
3070
/**
3071
* ata_bmdma_status - Read PCI IDE BMDMA status
3072
* @ap: Port associated with this ATA transaction.
3073
*
3074
* Read and return BMDMA status register.
3075
*
3076
* May be used as the bmdma_status() entry in ata_port_operations.
3077
*
3078
* LOCKING:
3079
* spin_lock_irqsave(host lock)
3080
*/
3081
u8 ata_bmdma_status(struct ata_port *ap)
3082
{
3083
return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3084
}
3085
EXPORT_SYMBOL_GPL(ata_bmdma_status);
3086
3087
3088
/**
3089
* ata_bmdma_port_start - Set port up for bmdma.
3090
* @ap: Port to initialize
3091
*
3092
* Called just after data structures for each port are
3093
* initialized. Allocates space for PRD table.
3094
*
3095
* May be used as the port_start() entry in ata_port_operations.
3096
*
3097
* LOCKING:
3098
* Inherited from caller.
3099
*/
3100
int ata_bmdma_port_start(struct ata_port *ap)
3101
{
3102
if (ap->mwdma_mask || ap->udma_mask) {
3103
ap->bmdma_prd =
3104
dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3105
&ap->bmdma_prd_dma, GFP_KERNEL);
3106
if (!ap->bmdma_prd)
3107
return -ENOMEM;
3108
}
3109
3110
return 0;
3111
}
3112
EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3113
3114
/**
3115
* ata_bmdma_port_start32 - Set port up for dma.
3116
* @ap: Port to initialize
3117
*
3118
* Called just after data structures for each port are
3119
* initialized. Enables 32bit PIO and allocates space for PRD
3120
* table.
3121
*
3122
* May be used as the port_start() entry in ata_port_operations for
3123
* devices that are capable of 32bit PIO.
3124
*
3125
* LOCKING:
3126
* Inherited from caller.
3127
*/
3128
int ata_bmdma_port_start32(struct ata_port *ap)
3129
{
3130
ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3131
return ata_bmdma_port_start(ap);
3132
}
3133
EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3134
3135
#ifdef CONFIG_PCI
3136
3137
/**
3138
* ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3139
* @pdev: PCI device
3140
*
3141
* Some PCI ATA devices report simplex mode but in fact can be told to
3142
* enter non simplex mode. This implements the necessary logic to
3143
* perform the task on such devices. Calling it on other devices will
3144
* have -undefined- behaviour.
3145
*/
3146
int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3147
{
3148
unsigned long bmdma = pci_resource_start(pdev, 4);
3149
u8 simplex;
3150
3151
if (bmdma == 0)
3152
return -ENOENT;
3153
3154
simplex = inb(bmdma + 0x02);
3155
outb(simplex & 0x60, bmdma + 0x02);
3156
simplex = inb(bmdma + 0x02);
3157
if (simplex & 0x80)
3158
return -EOPNOTSUPP;
3159
return 0;
3160
}
3161
EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3162
3163
static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3164
{
3165
int i;
3166
3167
dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
3168
reason);
3169
3170
for (i = 0; i < 2; i++) {
3171
host->ports[i]->mwdma_mask = 0;
3172
host->ports[i]->udma_mask = 0;
3173
}
3174
}
3175
3176
/**
3177
* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3178
* @host: target ATA host
3179
*
3180
* Acquire PCI BMDMA resources and initialize @host accordingly.
3181
*
3182
* LOCKING:
3183
* Inherited from calling layer (may sleep).
3184
*/
3185
void ata_pci_bmdma_init(struct ata_host *host)
3186
{
3187
struct device *gdev = host->dev;
3188
struct pci_dev *pdev = to_pci_dev(gdev);
3189
int i, rc;
3190
3191
/* No BAR4 allocation: No DMA */
3192
if (pci_resource_start(pdev, 4) == 0) {
3193
ata_bmdma_nodma(host, "BAR4 is zero");
3194
return;
3195
}
3196
3197
/*
3198
* Some controllers require BMDMA region to be initialized
3199
* even if DMA is not in use to clear IRQ status via
3200
* ->sff_irq_clear method. Try to initialize bmdma_addr
3201
* regardless of dma masks.
3202
*/
3203
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3204
if (rc)
3205
ata_bmdma_nodma(host, "failed to set dma mask");
3206
if (!rc) {
3207
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3208
if (rc)
3209
ata_bmdma_nodma(host,
3210
"failed to set consistent dma mask");
3211
}
3212
3213
/* request and iomap DMA region */
3214
rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3215
if (rc) {
3216
ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3217
return;
3218
}
3219
host->iomap = pcim_iomap_table(pdev);
3220
3221
for (i = 0; i < 2; i++) {
3222
struct ata_port *ap = host->ports[i];
3223
void __iomem *bmdma = host->iomap[4] + 8 * i;
3224
3225
if (ata_port_is_dummy(ap))
3226
continue;
3227
3228
ap->ioaddr.bmdma_addr = bmdma;
3229
if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3230
(ioread8(bmdma + 2) & 0x80))
3231
host->flags |= ATA_HOST_SIMPLEX;
3232
3233
ata_port_desc(ap, "bmdma 0x%llx",
3234
(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3235
}
3236
}
3237
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3238
3239
/**
3240
* ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3241
* @pdev: target PCI device
3242
* @ppi: array of port_info, must be enough for two ports
3243
* @r_host: out argument for the initialized ATA host
3244
*
3245
* Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3246
* resources and initialize it accordingly in one go.
3247
*
3248
* LOCKING:
3249
* Inherited from calling layer (may sleep).
3250
*
3251
* RETURNS:
3252
* 0 on success, -errno otherwise.
3253
*/
3254
int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3255
const struct ata_port_info * const * ppi,
3256
struct ata_host **r_host)
3257
{
3258
int rc;
3259
3260
rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3261
if (rc)
3262
return rc;
3263
3264
ata_pci_bmdma_init(*r_host);
3265
return 0;
3266
}
3267
EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3268
3269
/**
3270
* ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3271
* @pdev: Controller to be initialized
3272
* @ppi: array of port_info, must be enough for two ports
3273
* @sht: scsi_host_template to use when registering the host
3274
* @host_priv: host private_data
3275
* @hflags: host flags
3276
*
3277
* This function is similar to ata_pci_sff_init_one() but also
3278
* takes care of BMDMA initialization.
3279
*
3280
* LOCKING:
3281
* Inherited from PCI layer (may sleep).
3282
*
3283
* RETURNS:
3284
* Zero on success, negative on errno-based value on error.
3285
*/
3286
int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3287
const struct ata_port_info * const * ppi,
3288
struct scsi_host_template *sht, void *host_priv,
3289
int hflags)
3290
{
3291
struct device *dev = &pdev->dev;
3292
const struct ata_port_info *pi;
3293
struct ata_host *host = NULL;
3294
int rc;
3295
3296
DPRINTK("ENTER\n");
3297
3298
pi = ata_sff_find_valid_pi(ppi);
3299
if (!pi) {
3300
dev_printk(KERN_ERR, &pdev->dev,
3301
"no valid port_info specified\n");
3302
return -EINVAL;
3303
}
3304
3305
if (!devres_open_group(dev, NULL, GFP_KERNEL))
3306
return -ENOMEM;
3307
3308
rc = pcim_enable_device(pdev);
3309
if (rc)
3310
goto out;
3311
3312
/* prepare and activate BMDMA host */
3313
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
3314
if (rc)
3315
goto out;
3316
host->private_data = host_priv;
3317
host->flags |= hflags;
3318
3319
pci_set_master(pdev);
3320
rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
3321
out:
3322
if (rc == 0)
3323
devres_remove_group(&pdev->dev, NULL);
3324
else
3325
devres_release_group(&pdev->dev, NULL);
3326
3327
return rc;
3328
}
3329
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3330
3331
#endif /* CONFIG_PCI */
3332
#endif /* CONFIG_ATA_BMDMA */
3333
3334
/**
3335
* ata_sff_port_init - Initialize SFF/BMDMA ATA port
3336
* @ap: Port to initialize
3337
*
3338
* Called on port allocation to initialize SFF/BMDMA specific
3339
* fields.
3340
*
3341
* LOCKING:
3342
* None.
3343
*/
3344
void ata_sff_port_init(struct ata_port *ap)
3345
{
3346
INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3347
ap->ctl = ATA_DEVCTL_OBS;
3348
ap->last_ctl = 0xFF;
3349
}
3350
3351
int __init ata_sff_init(void)
3352
{
3353
ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3354
if (!ata_sff_wq)
3355
return -ENOMEM;
3356
3357
return 0;
3358
}
3359
3360
void ata_sff_exit(void)
3361
{
3362
destroy_workqueue(ata_sff_wq);
3363
}
3364
3365