/*1* libata-sff.c - helper library for PCI IDE BMDMA2*3* Maintained by: Jeff Garzik <[email protected]>4* Please ALWAYS copy [email protected]5* on emails.6*7* Copyright 2003-2006 Red Hat, Inc. All rights reserved.8* Copyright 2003-2006 Jeff Garzik9*10*11* This program is free software; you can redistribute it and/or modify12* it under the terms of the GNU General Public License as published by13* the Free Software Foundation; either version 2, or (at your option)14* any later version.15*16* This program is distributed in the hope that it will be useful,17* but WITHOUT ANY WARRANTY; without even the implied warranty of18* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the19* GNU General Public License for more details.20*21* You should have received a copy of the GNU General Public License22* along with this program; see the file COPYING. If not, write to23* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.24*25*26* libata documentation is available via 'make {ps|pdf}docs',27* as Documentation/DocBook/libata.*28*29* Hardware documentation available from http://www.t13.org/ and30* http://www.sata-io.org/31*32*/3334#include <linux/kernel.h>35#include <linux/gfp.h>36#include <linux/pci.h>37#include <linux/libata.h>38#include <linux/highmem.h>3940#include "libata.h"4142static struct workqueue_struct *ata_sff_wq;4344const struct ata_port_operations ata_sff_port_ops = {45.inherits = &ata_base_port_ops,4647.qc_prep = ata_noop_qc_prep,48.qc_issue = ata_sff_qc_issue,49.qc_fill_rtf = ata_sff_qc_fill_rtf,5051.freeze = ata_sff_freeze,52.thaw = ata_sff_thaw,53.prereset = ata_sff_prereset,54.softreset = ata_sff_softreset,55.hardreset = sata_sff_hardreset,56.postreset = ata_sff_postreset,57.error_handler = ata_sff_error_handler,5859.sff_dev_select = ata_sff_dev_select,60.sff_check_status = ata_sff_check_status,61.sff_tf_load = ata_sff_tf_load,62.sff_tf_read = ata_sff_tf_read,63.sff_exec_command = ata_sff_exec_command,64.sff_data_xfer = ata_sff_data_xfer,65.sff_drain_fifo = ata_sff_drain_fifo,6667.lost_interrupt = ata_sff_lost_interrupt,68};69EXPORT_SYMBOL_GPL(ata_sff_port_ops);7071/**72* ata_sff_check_status - Read device status reg & clear interrupt73* @ap: port where the device is74*75* Reads ATA taskfile status register for currently-selected device76* and return its value. This also clears pending interrupts77* from this device78*79* LOCKING:80* Inherited from caller.81*/82u8 ata_sff_check_status(struct ata_port *ap)83{84return ioread8(ap->ioaddr.status_addr);85}86EXPORT_SYMBOL_GPL(ata_sff_check_status);8788/**89* ata_sff_altstatus - Read device alternate status reg90* @ap: port where the device is91*92* Reads ATA taskfile alternate status register for93* currently-selected device and return its value.94*95* Note: may NOT be used as the check_altstatus() entry in96* ata_port_operations.97*98* LOCKING:99* Inherited from caller.100*/101static u8 ata_sff_altstatus(struct ata_port *ap)102{103if (ap->ops->sff_check_altstatus)104return ap->ops->sff_check_altstatus(ap);105106return ioread8(ap->ioaddr.altstatus_addr);107}108109/**110* ata_sff_irq_status - Check if the device is busy111* @ap: port where the device is112*113* Determine if the port is currently busy. Uses altstatus114* if available in order to avoid clearing shared IRQ status115* when finding an IRQ source. Non ctl capable devices don't116* share interrupt lines fortunately for us.117*118* LOCKING:119* Inherited from caller.120*/121static u8 ata_sff_irq_status(struct ata_port *ap)122{123u8 status;124125if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {126status = ata_sff_altstatus(ap);127/* Not us: We are busy */128if (status & ATA_BUSY)129return status;130}131/* Clear INTRQ latch */132status = ap->ops->sff_check_status(ap);133return status;134}135136/**137* ata_sff_sync - Flush writes138* @ap: Port to wait for.139*140* CAUTION:141* If we have an mmio device with no ctl and no altstatus142* method this will fail. No such devices are known to exist.143*144* LOCKING:145* Inherited from caller.146*/147148static void ata_sff_sync(struct ata_port *ap)149{150if (ap->ops->sff_check_altstatus)151ap->ops->sff_check_altstatus(ap);152else if (ap->ioaddr.altstatus_addr)153ioread8(ap->ioaddr.altstatus_addr);154}155156/**157* ata_sff_pause - Flush writes and wait 400nS158* @ap: Port to pause for.159*160* CAUTION:161* If we have an mmio device with no ctl and no altstatus162* method this will fail. No such devices are known to exist.163*164* LOCKING:165* Inherited from caller.166*/167168void ata_sff_pause(struct ata_port *ap)169{170ata_sff_sync(ap);171ndelay(400);172}173EXPORT_SYMBOL_GPL(ata_sff_pause);174175/**176* ata_sff_dma_pause - Pause before commencing DMA177* @ap: Port to pause for.178*179* Perform I/O fencing and ensure sufficient cycle delays occur180* for the HDMA1:0 transition181*/182183void ata_sff_dma_pause(struct ata_port *ap)184{185if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {186/* An altstatus read will cause the needed delay without187messing up the IRQ status */188ata_sff_altstatus(ap);189return;190}191/* There are no DMA controllers without ctl. BUG here to ensure192we never violate the HDMA1:0 transition timing and risk193corruption. */194BUG();195}196EXPORT_SYMBOL_GPL(ata_sff_dma_pause);197198/**199* ata_sff_busy_sleep - sleep until BSY clears, or timeout200* @ap: port containing status register to be polled201* @tmout_pat: impatience timeout in msecs202* @tmout: overall timeout in msecs203*204* Sleep until ATA Status register bit BSY clears,205* or a timeout occurs.206*207* LOCKING:208* Kernel thread context (may sleep).209*210* RETURNS:211* 0 on success, -errno otherwise.212*/213int ata_sff_busy_sleep(struct ata_port *ap,214unsigned long tmout_pat, unsigned long tmout)215{216unsigned long timer_start, timeout;217u8 status;218219status = ata_sff_busy_wait(ap, ATA_BUSY, 300);220timer_start = jiffies;221timeout = ata_deadline(timer_start, tmout_pat);222while (status != 0xff && (status & ATA_BUSY) &&223time_before(jiffies, timeout)) {224ata_msleep(ap, 50);225status = ata_sff_busy_wait(ap, ATA_BUSY, 3);226}227228if (status != 0xff && (status & ATA_BUSY))229ata_port_printk(ap, KERN_WARNING,230"port is slow to respond, please be patient "231"(Status 0x%x)\n", status);232233timeout = ata_deadline(timer_start, tmout);234while (status != 0xff && (status & ATA_BUSY) &&235time_before(jiffies, timeout)) {236ata_msleep(ap, 50);237status = ap->ops->sff_check_status(ap);238}239240if (status == 0xff)241return -ENODEV;242243if (status & ATA_BUSY) {244ata_port_printk(ap, KERN_ERR, "port failed to respond "245"(%lu secs, Status 0x%x)\n",246DIV_ROUND_UP(tmout, 1000), status);247return -EBUSY;248}249250return 0;251}252EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);253254static int ata_sff_check_ready(struct ata_link *link)255{256u8 status = link->ap->ops->sff_check_status(link->ap);257258return ata_check_ready(status);259}260261/**262* ata_sff_wait_ready - sleep until BSY clears, or timeout263* @link: SFF link to wait ready status for264* @deadline: deadline jiffies for the operation265*266* Sleep until ATA Status register bit BSY clears, or timeout267* occurs.268*269* LOCKING:270* Kernel thread context (may sleep).271*272* RETURNS:273* 0 on success, -errno otherwise.274*/275int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)276{277return ata_wait_ready(link, deadline, ata_sff_check_ready);278}279EXPORT_SYMBOL_GPL(ata_sff_wait_ready);280281/**282* ata_sff_set_devctl - Write device control reg283* @ap: port where the device is284* @ctl: value to write285*286* Writes ATA taskfile device control register.287*288* Note: may NOT be used as the sff_set_devctl() entry in289* ata_port_operations.290*291* LOCKING:292* Inherited from caller.293*/294static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)295{296if (ap->ops->sff_set_devctl)297ap->ops->sff_set_devctl(ap, ctl);298else299iowrite8(ctl, ap->ioaddr.ctl_addr);300}301302/**303* ata_sff_dev_select - Select device 0/1 on ATA bus304* @ap: ATA channel to manipulate305* @device: ATA device (numbered from zero) to select306*307* Use the method defined in the ATA specification to308* make either device 0, or device 1, active on the309* ATA channel. Works with both PIO and MMIO.310*311* May be used as the dev_select() entry in ata_port_operations.312*313* LOCKING:314* caller.315*/316void ata_sff_dev_select(struct ata_port *ap, unsigned int device)317{318u8 tmp;319320if (device == 0)321tmp = ATA_DEVICE_OBS;322else323tmp = ATA_DEVICE_OBS | ATA_DEV1;324325iowrite8(tmp, ap->ioaddr.device_addr);326ata_sff_pause(ap); /* needed; also flushes, for mmio */327}328EXPORT_SYMBOL_GPL(ata_sff_dev_select);329330/**331* ata_dev_select - Select device 0/1 on ATA bus332* @ap: ATA channel to manipulate333* @device: ATA device (numbered from zero) to select334* @wait: non-zero to wait for Status register BSY bit to clear335* @can_sleep: non-zero if context allows sleeping336*337* Use the method defined in the ATA specification to338* make either device 0, or device 1, active on the339* ATA channel.340*341* This is a high-level version of ata_sff_dev_select(), which342* additionally provides the services of inserting the proper343* pauses and status polling, where needed.344*345* LOCKING:346* caller.347*/348static void ata_dev_select(struct ata_port *ap, unsigned int device,349unsigned int wait, unsigned int can_sleep)350{351if (ata_msg_probe(ap))352ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "353"device %u, wait %u\n", device, wait);354355if (wait)356ata_wait_idle(ap);357358ap->ops->sff_dev_select(ap, device);359360if (wait) {361if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)362ata_msleep(ap, 150);363ata_wait_idle(ap);364}365}366367/**368* ata_sff_irq_on - Enable interrupts on a port.369* @ap: Port on which interrupts are enabled.370*371* Enable interrupts on a legacy IDE device using MMIO or PIO,372* wait for idle, clear any pending interrupts.373*374* Note: may NOT be used as the sff_irq_on() entry in375* ata_port_operations.376*377* LOCKING:378* Inherited from caller.379*/380void ata_sff_irq_on(struct ata_port *ap)381{382struct ata_ioports *ioaddr = &ap->ioaddr;383384if (ap->ops->sff_irq_on) {385ap->ops->sff_irq_on(ap);386return;387}388389ap->ctl &= ~ATA_NIEN;390ap->last_ctl = ap->ctl;391392if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)393ata_sff_set_devctl(ap, ap->ctl);394ata_wait_idle(ap);395396if (ap->ops->sff_irq_clear)397ap->ops->sff_irq_clear(ap);398}399EXPORT_SYMBOL_GPL(ata_sff_irq_on);400401/**402* ata_sff_tf_load - send taskfile registers to host controller403* @ap: Port to which output is sent404* @tf: ATA taskfile register set405*406* Outputs ATA taskfile to standard ATA host controller.407*408* LOCKING:409* Inherited from caller.410*/411void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)412{413struct ata_ioports *ioaddr = &ap->ioaddr;414unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;415416if (tf->ctl != ap->last_ctl) {417if (ioaddr->ctl_addr)418iowrite8(tf->ctl, ioaddr->ctl_addr);419ap->last_ctl = tf->ctl;420ata_wait_idle(ap);421}422423if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {424WARN_ON_ONCE(!ioaddr->ctl_addr);425iowrite8(tf->hob_feature, ioaddr->feature_addr);426iowrite8(tf->hob_nsect, ioaddr->nsect_addr);427iowrite8(tf->hob_lbal, ioaddr->lbal_addr);428iowrite8(tf->hob_lbam, ioaddr->lbam_addr);429iowrite8(tf->hob_lbah, ioaddr->lbah_addr);430VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",431tf->hob_feature,432tf->hob_nsect,433tf->hob_lbal,434tf->hob_lbam,435tf->hob_lbah);436}437438if (is_addr) {439iowrite8(tf->feature, ioaddr->feature_addr);440iowrite8(tf->nsect, ioaddr->nsect_addr);441iowrite8(tf->lbal, ioaddr->lbal_addr);442iowrite8(tf->lbam, ioaddr->lbam_addr);443iowrite8(tf->lbah, ioaddr->lbah_addr);444VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",445tf->feature,446tf->nsect,447tf->lbal,448tf->lbam,449tf->lbah);450}451452if (tf->flags & ATA_TFLAG_DEVICE) {453iowrite8(tf->device, ioaddr->device_addr);454VPRINTK("device 0x%X\n", tf->device);455}456457ata_wait_idle(ap);458}459EXPORT_SYMBOL_GPL(ata_sff_tf_load);460461/**462* ata_sff_tf_read - input device's ATA taskfile shadow registers463* @ap: Port from which input is read464* @tf: ATA taskfile register set for storing input465*466* Reads ATA taskfile registers for currently-selected device467* into @tf. Assumes the device has a fully SFF compliant task file468* layout and behaviour. If you device does not (eg has a different469* status method) then you will need to provide a replacement tf_read470*471* LOCKING:472* Inherited from caller.473*/474void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)475{476struct ata_ioports *ioaddr = &ap->ioaddr;477478tf->command = ata_sff_check_status(ap);479tf->feature = ioread8(ioaddr->error_addr);480tf->nsect = ioread8(ioaddr->nsect_addr);481tf->lbal = ioread8(ioaddr->lbal_addr);482tf->lbam = ioread8(ioaddr->lbam_addr);483tf->lbah = ioread8(ioaddr->lbah_addr);484tf->device = ioread8(ioaddr->device_addr);485486if (tf->flags & ATA_TFLAG_LBA48) {487if (likely(ioaddr->ctl_addr)) {488iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);489tf->hob_feature = ioread8(ioaddr->error_addr);490tf->hob_nsect = ioread8(ioaddr->nsect_addr);491tf->hob_lbal = ioread8(ioaddr->lbal_addr);492tf->hob_lbam = ioread8(ioaddr->lbam_addr);493tf->hob_lbah = ioread8(ioaddr->lbah_addr);494iowrite8(tf->ctl, ioaddr->ctl_addr);495ap->last_ctl = tf->ctl;496} else497WARN_ON_ONCE(1);498}499}500EXPORT_SYMBOL_GPL(ata_sff_tf_read);501502/**503* ata_sff_exec_command - issue ATA command to host controller504* @ap: port to which command is being issued505* @tf: ATA taskfile register set506*507* Issues ATA command, with proper synchronization with interrupt508* handler / other threads.509*510* LOCKING:511* spin_lock_irqsave(host lock)512*/513void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)514{515DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);516517iowrite8(tf->command, ap->ioaddr.command_addr);518ata_sff_pause(ap);519}520EXPORT_SYMBOL_GPL(ata_sff_exec_command);521522/**523* ata_tf_to_host - issue ATA taskfile to host controller524* @ap: port to which command is being issued525* @tf: ATA taskfile register set526*527* Issues ATA taskfile register set to ATA host controller,528* with proper synchronization with interrupt handler and529* other threads.530*531* LOCKING:532* spin_lock_irqsave(host lock)533*/534static inline void ata_tf_to_host(struct ata_port *ap,535const struct ata_taskfile *tf)536{537ap->ops->sff_tf_load(ap, tf);538ap->ops->sff_exec_command(ap, tf);539}540541/**542* ata_sff_data_xfer - Transfer data by PIO543* @dev: device to target544* @buf: data buffer545* @buflen: buffer length546* @rw: read/write547*548* Transfer data from/to the device data register by PIO.549*550* LOCKING:551* Inherited from caller.552*553* RETURNS:554* Bytes consumed.555*/556unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,557unsigned int buflen, int rw)558{559struct ata_port *ap = dev->link->ap;560void __iomem *data_addr = ap->ioaddr.data_addr;561unsigned int words = buflen >> 1;562563/* Transfer multiple of 2 bytes */564if (rw == READ)565ioread16_rep(data_addr, buf, words);566else567iowrite16_rep(data_addr, buf, words);568569/* Transfer trailing byte, if any. */570if (unlikely(buflen & 0x01)) {571unsigned char pad[2];572573/* Point buf to the tail of buffer */574buf += buflen - 1;575576/*577* Use io*16_rep() accessors here as well to avoid pointlessly578* swapping bytes to and from on the big endian machines...579*/580if (rw == READ) {581ioread16_rep(data_addr, pad, 1);582*buf = pad[0];583} else {584pad[0] = *buf;585iowrite16_rep(data_addr, pad, 1);586}587words++;588}589590return words << 1;591}592EXPORT_SYMBOL_GPL(ata_sff_data_xfer);593594/**595* ata_sff_data_xfer32 - Transfer data by PIO596* @dev: device to target597* @buf: data buffer598* @buflen: buffer length599* @rw: read/write600*601* Transfer data from/to the device data register by PIO using 32bit602* I/O operations.603*604* LOCKING:605* Inherited from caller.606*607* RETURNS:608* Bytes consumed.609*/610611unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,612unsigned int buflen, int rw)613{614struct ata_port *ap = dev->link->ap;615void __iomem *data_addr = ap->ioaddr.data_addr;616unsigned int words = buflen >> 2;617int slop = buflen & 3;618619if (!(ap->pflags & ATA_PFLAG_PIO32))620return ata_sff_data_xfer(dev, buf, buflen, rw);621622/* Transfer multiple of 4 bytes */623if (rw == READ)624ioread32_rep(data_addr, buf, words);625else626iowrite32_rep(data_addr, buf, words);627628/* Transfer trailing bytes, if any */629if (unlikely(slop)) {630unsigned char pad[4];631632/* Point buf to the tail of buffer */633buf += buflen - slop;634635/*636* Use io*_rep() accessors here as well to avoid pointlessly637* swapping bytes to and from on the big endian machines...638*/639if (rw == READ) {640if (slop < 3)641ioread16_rep(data_addr, pad, 1);642else643ioread32_rep(data_addr, pad, 1);644memcpy(buf, pad, slop);645} else {646memcpy(pad, buf, slop);647if (slop < 3)648iowrite16_rep(data_addr, pad, 1);649else650iowrite32_rep(data_addr, pad, 1);651}652}653return (buflen + 1) & ~1;654}655EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);656657/**658* ata_sff_data_xfer_noirq - Transfer data by PIO659* @dev: device to target660* @buf: data buffer661* @buflen: buffer length662* @rw: read/write663*664* Transfer data from/to the device data register by PIO. Do the665* transfer with interrupts disabled.666*667* LOCKING:668* Inherited from caller.669*670* RETURNS:671* Bytes consumed.672*/673unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,674unsigned int buflen, int rw)675{676unsigned long flags;677unsigned int consumed;678679local_irq_save(flags);680consumed = ata_sff_data_xfer(dev, buf, buflen, rw);681local_irq_restore(flags);682683return consumed;684}685EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);686687/**688* ata_pio_sector - Transfer a sector of data.689* @qc: Command on going690*691* Transfer qc->sect_size bytes of data from/to the ATA device.692*693* LOCKING:694* Inherited from caller.695*/696static void ata_pio_sector(struct ata_queued_cmd *qc)697{698int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);699struct ata_port *ap = qc->ap;700struct page *page;701unsigned int offset;702unsigned char *buf;703704if (qc->curbytes == qc->nbytes - qc->sect_size)705ap->hsm_task_state = HSM_ST_LAST;706707page = sg_page(qc->cursg);708offset = qc->cursg->offset + qc->cursg_ofs;709710/* get the current page and offset */711page = nth_page(page, (offset >> PAGE_SHIFT));712offset %= PAGE_SIZE;713714DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");715716if (PageHighMem(page)) {717unsigned long flags;718719/* FIXME: use a bounce buffer */720local_irq_save(flags);721buf = kmap_atomic(page, KM_IRQ0);722723/* do the actual data transfer */724ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,725do_write);726727kunmap_atomic(buf, KM_IRQ0);728local_irq_restore(flags);729} else {730buf = page_address(page);731ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,732do_write);733}734735if (!do_write && !PageSlab(page))736flush_dcache_page(page);737738qc->curbytes += qc->sect_size;739qc->cursg_ofs += qc->sect_size;740741if (qc->cursg_ofs == qc->cursg->length) {742qc->cursg = sg_next(qc->cursg);743qc->cursg_ofs = 0;744}745}746747/**748* ata_pio_sectors - Transfer one or many sectors.749* @qc: Command on going750*751* Transfer one or many sectors of data from/to the752* ATA device for the DRQ request.753*754* LOCKING:755* Inherited from caller.756*/757static void ata_pio_sectors(struct ata_queued_cmd *qc)758{759if (is_multi_taskfile(&qc->tf)) {760/* READ/WRITE MULTIPLE */761unsigned int nsect;762763WARN_ON_ONCE(qc->dev->multi_count == 0);764765nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,766qc->dev->multi_count);767while (nsect--)768ata_pio_sector(qc);769} else770ata_pio_sector(qc);771772ata_sff_sync(qc->ap); /* flush */773}774775/**776* atapi_send_cdb - Write CDB bytes to hardware777* @ap: Port to which ATAPI device is attached.778* @qc: Taskfile currently active779*780* When device has indicated its readiness to accept781* a CDB, this function is called. Send the CDB.782*783* LOCKING:784* caller.785*/786static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)787{788/* send SCSI cdb */789DPRINTK("send cdb\n");790WARN_ON_ONCE(qc->dev->cdb_len < 12);791792ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);793ata_sff_sync(ap);794/* FIXME: If the CDB is for DMA do we need to do the transition delay795or is bmdma_start guaranteed to do it ? */796switch (qc->tf.protocol) {797case ATAPI_PROT_PIO:798ap->hsm_task_state = HSM_ST;799break;800case ATAPI_PROT_NODATA:801ap->hsm_task_state = HSM_ST_LAST;802break;803#ifdef CONFIG_ATA_BMDMA804case ATAPI_PROT_DMA:805ap->hsm_task_state = HSM_ST_LAST;806/* initiate bmdma */807ap->ops->bmdma_start(qc);808break;809#endif /* CONFIG_ATA_BMDMA */810default:811BUG();812}813}814815/**816* __atapi_pio_bytes - Transfer data from/to the ATAPI device.817* @qc: Command on going818* @bytes: number of bytes819*820* Transfer Transfer data from/to the ATAPI device.821*822* LOCKING:823* Inherited from caller.824*825*/826static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)827{828int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;829struct ata_port *ap = qc->ap;830struct ata_device *dev = qc->dev;831struct ata_eh_info *ehi = &dev->link->eh_info;832struct scatterlist *sg;833struct page *page;834unsigned char *buf;835unsigned int offset, count, consumed;836837next_sg:838sg = qc->cursg;839if (unlikely(!sg)) {840ata_ehi_push_desc(ehi, "unexpected or too much trailing data "841"buf=%u cur=%u bytes=%u",842qc->nbytes, qc->curbytes, bytes);843return -1;844}845846page = sg_page(sg);847offset = sg->offset + qc->cursg_ofs;848849/* get the current page and offset */850page = nth_page(page, (offset >> PAGE_SHIFT));851offset %= PAGE_SIZE;852853/* don't overrun current sg */854count = min(sg->length - qc->cursg_ofs, bytes);855856/* don't cross page boundaries */857count = min(count, (unsigned int)PAGE_SIZE - offset);858859DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");860861if (PageHighMem(page)) {862unsigned long flags;863864/* FIXME: use bounce buffer */865local_irq_save(flags);866buf = kmap_atomic(page, KM_IRQ0);867868/* do the actual data transfer */869consumed = ap->ops->sff_data_xfer(dev, buf + offset,870count, rw);871872kunmap_atomic(buf, KM_IRQ0);873local_irq_restore(flags);874} else {875buf = page_address(page);876consumed = ap->ops->sff_data_xfer(dev, buf + offset,877count, rw);878}879880bytes -= min(bytes, consumed);881qc->curbytes += count;882qc->cursg_ofs += count;883884if (qc->cursg_ofs == sg->length) {885qc->cursg = sg_next(qc->cursg);886qc->cursg_ofs = 0;887}888889/*890* There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);891* Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN892* check correctly as it doesn't know if it is the last request being893* made. Somebody should implement a proper sanity check.894*/895if (bytes)896goto next_sg;897return 0;898}899900/**901* atapi_pio_bytes - Transfer data from/to the ATAPI device.902* @qc: Command on going903*904* Transfer Transfer data from/to the ATAPI device.905*906* LOCKING:907* Inherited from caller.908*/909static void atapi_pio_bytes(struct ata_queued_cmd *qc)910{911struct ata_port *ap = qc->ap;912struct ata_device *dev = qc->dev;913struct ata_eh_info *ehi = &dev->link->eh_info;914unsigned int ireason, bc_lo, bc_hi, bytes;915int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;916917/* Abuse qc->result_tf for temp storage of intermediate TF918* here to save some kernel stack usage.919* For normal completion, qc->result_tf is not relevant. For920* error, qc->result_tf is later overwritten by ata_qc_complete().921* So, the correctness of qc->result_tf is not affected.922*/923ap->ops->sff_tf_read(ap, &qc->result_tf);924ireason = qc->result_tf.nsect;925bc_lo = qc->result_tf.lbam;926bc_hi = qc->result_tf.lbah;927bytes = (bc_hi << 8) | bc_lo;928929/* shall be cleared to zero, indicating xfer of data */930if (unlikely(ireason & (1 << 0)))931goto atapi_check;932933/* make sure transfer direction matches expected */934i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;935if (unlikely(do_write != i_write))936goto atapi_check;937938if (unlikely(!bytes))939goto atapi_check;940941VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);942943if (unlikely(__atapi_pio_bytes(qc, bytes)))944goto err_out;945ata_sff_sync(ap); /* flush */946947return;948949atapi_check:950ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",951ireason, bytes);952err_out:953qc->err_mask |= AC_ERR_HSM;954ap->hsm_task_state = HSM_ST_ERR;955}956957/**958* ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.959* @ap: the target ata_port960* @qc: qc on going961*962* RETURNS:963* 1 if ok in workqueue, 0 otherwise.964*/965static inline int ata_hsm_ok_in_wq(struct ata_port *ap,966struct ata_queued_cmd *qc)967{968if (qc->tf.flags & ATA_TFLAG_POLLING)969return 1;970971if (ap->hsm_task_state == HSM_ST_FIRST) {972if (qc->tf.protocol == ATA_PROT_PIO &&973(qc->tf.flags & ATA_TFLAG_WRITE))974return 1;975976if (ata_is_atapi(qc->tf.protocol) &&977!(qc->dev->flags & ATA_DFLAG_CDB_INTR))978return 1;979}980981return 0;982}983984/**985* ata_hsm_qc_complete - finish a qc running on standard HSM986* @qc: Command to complete987* @in_wq: 1 if called from workqueue, 0 otherwise988*989* Finish @qc which is running on standard HSM.990*991* LOCKING:992* If @in_wq is zero, spin_lock_irqsave(host lock).993* Otherwise, none on entry and grabs host lock.994*/995static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)996{997struct ata_port *ap = qc->ap;998unsigned long flags;9991000if (ap->ops->error_handler) {1001if (in_wq) {1002spin_lock_irqsave(ap->lock, flags);10031004/* EH might have kicked in while host lock is1005* released.1006*/1007qc = ata_qc_from_tag(ap, qc->tag);1008if (qc) {1009if (likely(!(qc->err_mask & AC_ERR_HSM))) {1010ata_sff_irq_on(ap);1011ata_qc_complete(qc);1012} else1013ata_port_freeze(ap);1014}10151016spin_unlock_irqrestore(ap->lock, flags);1017} else {1018if (likely(!(qc->err_mask & AC_ERR_HSM)))1019ata_qc_complete(qc);1020else1021ata_port_freeze(ap);1022}1023} else {1024if (in_wq) {1025spin_lock_irqsave(ap->lock, flags);1026ata_sff_irq_on(ap);1027ata_qc_complete(qc);1028spin_unlock_irqrestore(ap->lock, flags);1029} else1030ata_qc_complete(qc);1031}1032}10331034/**1035* ata_sff_hsm_move - move the HSM to the next state.1036* @ap: the target ata_port1037* @qc: qc on going1038* @status: current device status1039* @in_wq: 1 if called from workqueue, 0 otherwise1040*1041* RETURNS:1042* 1 when poll next status needed, 0 otherwise.1043*/1044int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,1045u8 status, int in_wq)1046{1047struct ata_link *link = qc->dev->link;1048struct ata_eh_info *ehi = &link->eh_info;1049unsigned long flags = 0;1050int poll_next;10511052WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);10531054/* Make sure ata_sff_qc_issue() does not throw things1055* like DMA polling into the workqueue. Notice that1056* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).1057*/1058WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));10591060fsm_start:1061DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",1062ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);10631064switch (ap->hsm_task_state) {1065case HSM_ST_FIRST:1066/* Send first data block or PACKET CDB */10671068/* If polling, we will stay in the work queue after1069* sending the data. Otherwise, interrupt handler1070* takes over after sending the data.1071*/1072poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);10731074/* check device status */1075if (unlikely((status & ATA_DRQ) == 0)) {1076/* handle BSY=0, DRQ=0 as error */1077if (likely(status & (ATA_ERR | ATA_DF)))1078/* device stops HSM for abort/error */1079qc->err_mask |= AC_ERR_DEV;1080else {1081/* HSM violation. Let EH handle this */1082ata_ehi_push_desc(ehi,1083"ST_FIRST: !(DRQ|ERR|DF)");1084qc->err_mask |= AC_ERR_HSM;1085}10861087ap->hsm_task_state = HSM_ST_ERR;1088goto fsm_start;1089}10901091/* Device should not ask for data transfer (DRQ=1)1092* when it finds something wrong.1093* We ignore DRQ here and stop the HSM by1094* changing hsm_task_state to HSM_ST_ERR and1095* let the EH abort the command or reset the device.1096*/1097if (unlikely(status & (ATA_ERR | ATA_DF))) {1098/* Some ATAPI tape drives forget to clear the ERR bit1099* when doing the next command (mostly request sense).1100* We ignore ERR here to workaround and proceed sending1101* the CDB.1102*/1103if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {1104ata_ehi_push_desc(ehi, "ST_FIRST: "1105"DRQ=1 with device error, "1106"dev_stat 0x%X", status);1107qc->err_mask |= AC_ERR_HSM;1108ap->hsm_task_state = HSM_ST_ERR;1109goto fsm_start;1110}1111}11121113/* Send the CDB (atapi) or the first data block (ata pio out).1114* During the state transition, interrupt handler shouldn't1115* be invoked before the data transfer is complete and1116* hsm_task_state is changed. Hence, the following locking.1117*/1118if (in_wq)1119spin_lock_irqsave(ap->lock, flags);11201121if (qc->tf.protocol == ATA_PROT_PIO) {1122/* PIO data out protocol.1123* send first data block.1124*/11251126/* ata_pio_sectors() might change the state1127* to HSM_ST_LAST. so, the state is changed here1128* before ata_pio_sectors().1129*/1130ap->hsm_task_state = HSM_ST;1131ata_pio_sectors(qc);1132} else1133/* send CDB */1134atapi_send_cdb(ap, qc);11351136if (in_wq)1137spin_unlock_irqrestore(ap->lock, flags);11381139/* if polling, ata_sff_pio_task() handles the rest.1140* otherwise, interrupt handler takes over from here.1141*/1142break;11431144case HSM_ST:1145/* complete command or read/write the data register */1146if (qc->tf.protocol == ATAPI_PROT_PIO) {1147/* ATAPI PIO protocol */1148if ((status & ATA_DRQ) == 0) {1149/* No more data to transfer or device error.1150* Device error will be tagged in HSM_ST_LAST.1151*/1152ap->hsm_task_state = HSM_ST_LAST;1153goto fsm_start;1154}11551156/* Device should not ask for data transfer (DRQ=1)1157* when it finds something wrong.1158* We ignore DRQ here and stop the HSM by1159* changing hsm_task_state to HSM_ST_ERR and1160* let the EH abort the command or reset the device.1161*/1162if (unlikely(status & (ATA_ERR | ATA_DF))) {1163ata_ehi_push_desc(ehi, "ST-ATAPI: "1164"DRQ=1 with device error, "1165"dev_stat 0x%X", status);1166qc->err_mask |= AC_ERR_HSM;1167ap->hsm_task_state = HSM_ST_ERR;1168goto fsm_start;1169}11701171atapi_pio_bytes(qc);11721173if (unlikely(ap->hsm_task_state == HSM_ST_ERR))1174/* bad ireason reported by device */1175goto fsm_start;11761177} else {1178/* ATA PIO protocol */1179if (unlikely((status & ATA_DRQ) == 0)) {1180/* handle BSY=0, DRQ=0 as error */1181if (likely(status & (ATA_ERR | ATA_DF))) {1182/* device stops HSM for abort/error */1183qc->err_mask |= AC_ERR_DEV;11841185/* If diagnostic failed and this is1186* IDENTIFY, it's likely a phantom1187* device. Mark hint.1188*/1189if (qc->dev->horkage &1190ATA_HORKAGE_DIAGNOSTIC)1191qc->err_mask |=1192AC_ERR_NODEV_HINT;1193} else {1194/* HSM violation. Let EH handle this.1195* Phantom devices also trigger this1196* condition. Mark hint.1197*/1198ata_ehi_push_desc(ehi, "ST-ATA: "1199"DRQ=0 without device error, "1200"dev_stat 0x%X", status);1201qc->err_mask |= AC_ERR_HSM |1202AC_ERR_NODEV_HINT;1203}12041205ap->hsm_task_state = HSM_ST_ERR;1206goto fsm_start;1207}12081209/* For PIO reads, some devices may ask for1210* data transfer (DRQ=1) alone with ERR=1.1211* We respect DRQ here and transfer one1212* block of junk data before changing the1213* hsm_task_state to HSM_ST_ERR.1214*1215* For PIO writes, ERR=1 DRQ=1 doesn't make1216* sense since the data block has been1217* transferred to the device.1218*/1219if (unlikely(status & (ATA_ERR | ATA_DF))) {1220/* data might be corrputed */1221qc->err_mask |= AC_ERR_DEV;12221223if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {1224ata_pio_sectors(qc);1225status = ata_wait_idle(ap);1226}12271228if (status & (ATA_BUSY | ATA_DRQ)) {1229ata_ehi_push_desc(ehi, "ST-ATA: "1230"BUSY|DRQ persists on ERR|DF, "1231"dev_stat 0x%X", status);1232qc->err_mask |= AC_ERR_HSM;1233}12341235/* There are oddball controllers with1236* status register stuck at 0x7f and1237* lbal/m/h at zero which makes it1238* pass all other presence detection1239* mechanisms we have. Set NODEV_HINT1240* for it. Kernel bz#7241.1241*/1242if (status == 0x7f)1243qc->err_mask |= AC_ERR_NODEV_HINT;12441245/* ata_pio_sectors() might change the1246* state to HSM_ST_LAST. so, the state1247* is changed after ata_pio_sectors().1248*/1249ap->hsm_task_state = HSM_ST_ERR;1250goto fsm_start;1251}12521253ata_pio_sectors(qc);12541255if (ap->hsm_task_state == HSM_ST_LAST &&1256(!(qc->tf.flags & ATA_TFLAG_WRITE))) {1257/* all data read */1258status = ata_wait_idle(ap);1259goto fsm_start;1260}1261}12621263poll_next = 1;1264break;12651266case HSM_ST_LAST:1267if (unlikely(!ata_ok(status))) {1268qc->err_mask |= __ac_err_mask(status);1269ap->hsm_task_state = HSM_ST_ERR;1270goto fsm_start;1271}12721273/* no more data to transfer */1274DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",1275ap->print_id, qc->dev->devno, status);12761277WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));12781279ap->hsm_task_state = HSM_ST_IDLE;12801281/* complete taskfile transaction */1282ata_hsm_qc_complete(qc, in_wq);12831284poll_next = 0;1285break;12861287case HSM_ST_ERR:1288ap->hsm_task_state = HSM_ST_IDLE;12891290/* complete taskfile transaction */1291ata_hsm_qc_complete(qc, in_wq);12921293poll_next = 0;1294break;1295default:1296poll_next = 0;1297BUG();1298}12991300return poll_next;1301}1302EXPORT_SYMBOL_GPL(ata_sff_hsm_move);13031304void ata_sff_queue_work(struct work_struct *work)1305{1306queue_work(ata_sff_wq, work);1307}1308EXPORT_SYMBOL_GPL(ata_sff_queue_work);13091310void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)1311{1312queue_delayed_work(ata_sff_wq, dwork, delay);1313}1314EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);13151316void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)1317{1318struct ata_port *ap = link->ap;13191320WARN_ON((ap->sff_pio_task_link != NULL) &&1321(ap->sff_pio_task_link != link));1322ap->sff_pio_task_link = link;13231324/* may fail if ata_sff_flush_pio_task() in progress */1325ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));1326}1327EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);13281329void ata_sff_flush_pio_task(struct ata_port *ap)1330{1331DPRINTK("ENTER\n");13321333cancel_delayed_work_sync(&ap->sff_pio_task);1334ap->hsm_task_state = HSM_ST_IDLE;13351336if (ata_msg_ctl(ap))1337ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);1338}13391340static void ata_sff_pio_task(struct work_struct *work)1341{1342struct ata_port *ap =1343container_of(work, struct ata_port, sff_pio_task.work);1344struct ata_link *link = ap->sff_pio_task_link;1345struct ata_queued_cmd *qc;1346u8 status;1347int poll_next;13481349BUG_ON(ap->sff_pio_task_link == NULL);1350/* qc can be NULL if timeout occurred */1351qc = ata_qc_from_tag(ap, link->active_tag);1352if (!qc) {1353ap->sff_pio_task_link = NULL;1354return;1355}13561357fsm_start:1358WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);13591360/*1361* This is purely heuristic. This is a fast path.1362* Sometimes when we enter, BSY will be cleared in1363* a chk-status or two. If not, the drive is probably seeking1364* or something. Snooze for a couple msecs, then1365* chk-status again. If still busy, queue delayed work.1366*/1367status = ata_sff_busy_wait(ap, ATA_BUSY, 5);1368if (status & ATA_BUSY) {1369ata_msleep(ap, 2);1370status = ata_sff_busy_wait(ap, ATA_BUSY, 10);1371if (status & ATA_BUSY) {1372ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);1373return;1374}1375}13761377/*1378* hsm_move() may trigger another command to be processed.1379* clean the link beforehand.1380*/1381ap->sff_pio_task_link = NULL;1382/* move the HSM */1383poll_next = ata_sff_hsm_move(ap, qc, status, 1);13841385/* another command or interrupt handler1386* may be running at this point.1387*/1388if (poll_next)1389goto fsm_start;1390}13911392/**1393* ata_sff_qc_issue - issue taskfile to a SFF controller1394* @qc: command to issue to device1395*1396* This function issues a PIO or NODATA command to a SFF1397* controller.1398*1399* LOCKING:1400* spin_lock_irqsave(host lock)1401*1402* RETURNS:1403* Zero on success, AC_ERR_* mask on failure1404*/1405unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)1406{1407struct ata_port *ap = qc->ap;1408struct ata_link *link = qc->dev->link;14091410/* Use polling pio if the LLD doesn't handle1411* interrupt driven pio and atapi CDB interrupt.1412*/1413if (ap->flags & ATA_FLAG_PIO_POLLING)1414qc->tf.flags |= ATA_TFLAG_POLLING;14151416/* select the device */1417ata_dev_select(ap, qc->dev->devno, 1, 0);14181419/* start the command */1420switch (qc->tf.protocol) {1421case ATA_PROT_NODATA:1422if (qc->tf.flags & ATA_TFLAG_POLLING)1423ata_qc_set_polling(qc);14241425ata_tf_to_host(ap, &qc->tf);1426ap->hsm_task_state = HSM_ST_LAST;14271428if (qc->tf.flags & ATA_TFLAG_POLLING)1429ata_sff_queue_pio_task(link, 0);14301431break;14321433case ATA_PROT_PIO:1434if (qc->tf.flags & ATA_TFLAG_POLLING)1435ata_qc_set_polling(qc);14361437ata_tf_to_host(ap, &qc->tf);14381439if (qc->tf.flags & ATA_TFLAG_WRITE) {1440/* PIO data out protocol */1441ap->hsm_task_state = HSM_ST_FIRST;1442ata_sff_queue_pio_task(link, 0);14431444/* always send first data block using the1445* ata_sff_pio_task() codepath.1446*/1447} else {1448/* PIO data in protocol */1449ap->hsm_task_state = HSM_ST;14501451if (qc->tf.flags & ATA_TFLAG_POLLING)1452ata_sff_queue_pio_task(link, 0);14531454/* if polling, ata_sff_pio_task() handles the1455* rest. otherwise, interrupt handler takes1456* over from here.1457*/1458}14591460break;14611462case ATAPI_PROT_PIO:1463case ATAPI_PROT_NODATA:1464if (qc->tf.flags & ATA_TFLAG_POLLING)1465ata_qc_set_polling(qc);14661467ata_tf_to_host(ap, &qc->tf);14681469ap->hsm_task_state = HSM_ST_FIRST;14701471/* send cdb by polling if no cdb interrupt */1472if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||1473(qc->tf.flags & ATA_TFLAG_POLLING))1474ata_sff_queue_pio_task(link, 0);1475break;14761477default:1478WARN_ON_ONCE(1);1479return AC_ERR_SYSTEM;1480}14811482return 0;1483}1484EXPORT_SYMBOL_GPL(ata_sff_qc_issue);14851486/**1487* ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read1488* @qc: qc to fill result TF for1489*1490* @qc is finished and result TF needs to be filled. Fill it1491* using ->sff_tf_read.1492*1493* LOCKING:1494* spin_lock_irqsave(host lock)1495*1496* RETURNS:1497* true indicating that result TF is successfully filled.1498*/1499bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)1500{1501qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);1502return true;1503}1504EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);15051506static unsigned int ata_sff_idle_irq(struct ata_port *ap)1507{1508ap->stats.idle_irq++;15091510#ifdef ATA_IRQ_TRAP1511if ((ap->stats.idle_irq % 1000) == 0) {1512ap->ops->sff_check_status(ap);1513if (ap->ops->sff_irq_clear)1514ap->ops->sff_irq_clear(ap);1515ata_port_printk(ap, KERN_WARNING, "irq trap\n");1516return 1;1517}1518#endif1519return 0; /* irq not handled */1520}15211522static unsigned int __ata_sff_port_intr(struct ata_port *ap,1523struct ata_queued_cmd *qc,1524bool hsmv_on_idle)1525{1526u8 status;15271528VPRINTK("ata%u: protocol %d task_state %d\n",1529ap->print_id, qc->tf.protocol, ap->hsm_task_state);15301531/* Check whether we are expecting interrupt in this state */1532switch (ap->hsm_task_state) {1533case HSM_ST_FIRST:1534/* Some pre-ATAPI-4 devices assert INTRQ1535* at this state when ready to receive CDB.1536*/15371538/* Check the ATA_DFLAG_CDB_INTR flag is enough here.1539* The flag was turned on only for atapi devices. No1540* need to check ata_is_atapi(qc->tf.protocol) again.1541*/1542if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))1543return ata_sff_idle_irq(ap);1544break;1545case HSM_ST_IDLE:1546return ata_sff_idle_irq(ap);1547default:1548break;1549}15501551/* check main status, clearing INTRQ if needed */1552status = ata_sff_irq_status(ap);1553if (status & ATA_BUSY) {1554if (hsmv_on_idle) {1555/* BMDMA engine is already stopped, we're screwed */1556qc->err_mask |= AC_ERR_HSM;1557ap->hsm_task_state = HSM_ST_ERR;1558} else1559return ata_sff_idle_irq(ap);1560}15611562/* clear irq events */1563if (ap->ops->sff_irq_clear)1564ap->ops->sff_irq_clear(ap);15651566ata_sff_hsm_move(ap, qc, status, 0);15671568return 1; /* irq handled */1569}15701571/**1572* ata_sff_port_intr - Handle SFF port interrupt1573* @ap: Port on which interrupt arrived (possibly...)1574* @qc: Taskfile currently active in engine1575*1576* Handle port interrupt for given queued command.1577*1578* LOCKING:1579* spin_lock_irqsave(host lock)1580*1581* RETURNS:1582* One if interrupt was handled, zero if not (shared irq).1583*/1584unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)1585{1586return __ata_sff_port_intr(ap, qc, false);1587}1588EXPORT_SYMBOL_GPL(ata_sff_port_intr);15891590static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,1591unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))1592{1593struct ata_host *host = dev_instance;1594bool retried = false;1595unsigned int i;1596unsigned int handled, idle, polling;1597unsigned long flags;15981599/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */1600spin_lock_irqsave(&host->lock, flags);16011602retry:1603handled = idle = polling = 0;1604for (i = 0; i < host->n_ports; i++) {1605struct ata_port *ap = host->ports[i];1606struct ata_queued_cmd *qc;16071608qc = ata_qc_from_tag(ap, ap->link.active_tag);1609if (qc) {1610if (!(qc->tf.flags & ATA_TFLAG_POLLING))1611handled |= port_intr(ap, qc);1612else1613polling |= 1 << i;1614} else1615idle |= 1 << i;1616}16171618/*1619* If no port was expecting IRQ but the controller is actually1620* asserting IRQ line, nobody cared will ensue. Check IRQ1621* pending status if available and clear spurious IRQ.1622*/1623if (!handled && !retried) {1624bool retry = false;16251626for (i = 0; i < host->n_ports; i++) {1627struct ata_port *ap = host->ports[i];16281629if (polling & (1 << i))1630continue;16311632if (!ap->ops->sff_irq_check ||1633!ap->ops->sff_irq_check(ap))1634continue;16351636if (idle & (1 << i)) {1637ap->ops->sff_check_status(ap);1638if (ap->ops->sff_irq_clear)1639ap->ops->sff_irq_clear(ap);1640} else {1641/* clear INTRQ and check if BUSY cleared */1642if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))1643retry |= true;1644/*1645* With command in flight, we can't do1646* sff_irq_clear() w/o racing with completion.1647*/1648}1649}16501651if (retry) {1652retried = true;1653goto retry;1654}1655}16561657spin_unlock_irqrestore(&host->lock, flags);16581659return IRQ_RETVAL(handled);1660}16611662/**1663* ata_sff_interrupt - Default SFF ATA host interrupt handler1664* @irq: irq line (unused)1665* @dev_instance: pointer to our ata_host information structure1666*1667* Default interrupt handler for PCI IDE devices. Calls1668* ata_sff_port_intr() for each port that is not disabled.1669*1670* LOCKING:1671* Obtains host lock during operation.1672*1673* RETURNS:1674* IRQ_NONE or IRQ_HANDLED.1675*/1676irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)1677{1678return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);1679}1680EXPORT_SYMBOL_GPL(ata_sff_interrupt);16811682/**1683* ata_sff_lost_interrupt - Check for an apparent lost interrupt1684* @ap: port that appears to have timed out1685*1686* Called from the libata error handlers when the core code suspects1687* an interrupt has been lost. If it has complete anything we can and1688* then return. Interface must support altstatus for this faster1689* recovery to occur.1690*1691* Locking:1692* Caller holds host lock1693*/16941695void ata_sff_lost_interrupt(struct ata_port *ap)1696{1697u8 status;1698struct ata_queued_cmd *qc;16991700/* Only one outstanding command per SFF channel */1701qc = ata_qc_from_tag(ap, ap->link.active_tag);1702/* We cannot lose an interrupt on a non-existent or polled command */1703if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)1704return;1705/* See if the controller thinks it is still busy - if so the command1706isn't a lost IRQ but is still in progress */1707status = ata_sff_altstatus(ap);1708if (status & ATA_BUSY)1709return;17101711/* There was a command running, we are no longer busy and we have1712no interrupt. */1713ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",1714status);1715/* Run the host interrupt logic as if the interrupt had not been1716lost */1717ata_sff_port_intr(ap, qc);1718}1719EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);17201721/**1722* ata_sff_freeze - Freeze SFF controller port1723* @ap: port to freeze1724*1725* Freeze SFF controller port.1726*1727* LOCKING:1728* Inherited from caller.1729*/1730void ata_sff_freeze(struct ata_port *ap)1731{1732ap->ctl |= ATA_NIEN;1733ap->last_ctl = ap->ctl;17341735if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)1736ata_sff_set_devctl(ap, ap->ctl);17371738/* Under certain circumstances, some controllers raise IRQ on1739* ATA_NIEN manipulation. Also, many controllers fail to mask1740* previously pending IRQ on ATA_NIEN assertion. Clear it.1741*/1742ap->ops->sff_check_status(ap);17431744if (ap->ops->sff_irq_clear)1745ap->ops->sff_irq_clear(ap);1746}1747EXPORT_SYMBOL_GPL(ata_sff_freeze);17481749/**1750* ata_sff_thaw - Thaw SFF controller port1751* @ap: port to thaw1752*1753* Thaw SFF controller port.1754*1755* LOCKING:1756* Inherited from caller.1757*/1758void ata_sff_thaw(struct ata_port *ap)1759{1760/* clear & re-enable interrupts */1761ap->ops->sff_check_status(ap);1762if (ap->ops->sff_irq_clear)1763ap->ops->sff_irq_clear(ap);1764ata_sff_irq_on(ap);1765}1766EXPORT_SYMBOL_GPL(ata_sff_thaw);17671768/**1769* ata_sff_prereset - prepare SFF link for reset1770* @link: SFF link to be reset1771* @deadline: deadline jiffies for the operation1772*1773* SFF link @link is about to be reset. Initialize it. It first1774* calls ata_std_prereset() and wait for !BSY if the port is1775* being softreset.1776*1777* LOCKING:1778* Kernel thread context (may sleep)1779*1780* RETURNS:1781* 0 on success, -errno otherwise.1782*/1783int ata_sff_prereset(struct ata_link *link, unsigned long deadline)1784{1785struct ata_eh_context *ehc = &link->eh_context;1786int rc;17871788rc = ata_std_prereset(link, deadline);1789if (rc)1790return rc;17911792/* if we're about to do hardreset, nothing more to do */1793if (ehc->i.action & ATA_EH_HARDRESET)1794return 0;17951796/* wait for !BSY if we don't know that no device is attached */1797if (!ata_link_offline(link)) {1798rc = ata_sff_wait_ready(link, deadline);1799if (rc && rc != -ENODEV) {1800ata_link_printk(link, KERN_WARNING, "device not ready "1801"(errno=%d), forcing hardreset\n", rc);1802ehc->i.action |= ATA_EH_HARDRESET;1803}1804}18051806return 0;1807}1808EXPORT_SYMBOL_GPL(ata_sff_prereset);18091810/**1811* ata_devchk - PATA device presence detection1812* @ap: ATA channel to examine1813* @device: Device to examine (starting at zero)1814*1815* This technique was originally described in1816* Hale Landis's ATADRVR (www.ata-atapi.com), and1817* later found its way into the ATA/ATAPI spec.1818*1819* Write a pattern to the ATA shadow registers,1820* and if a device is present, it will respond by1821* correctly storing and echoing back the1822* ATA shadow register contents.1823*1824* LOCKING:1825* caller.1826*/1827static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)1828{1829struct ata_ioports *ioaddr = &ap->ioaddr;1830u8 nsect, lbal;18311832ap->ops->sff_dev_select(ap, device);18331834iowrite8(0x55, ioaddr->nsect_addr);1835iowrite8(0xaa, ioaddr->lbal_addr);18361837iowrite8(0xaa, ioaddr->nsect_addr);1838iowrite8(0x55, ioaddr->lbal_addr);18391840iowrite8(0x55, ioaddr->nsect_addr);1841iowrite8(0xaa, ioaddr->lbal_addr);18421843nsect = ioread8(ioaddr->nsect_addr);1844lbal = ioread8(ioaddr->lbal_addr);18451846if ((nsect == 0x55) && (lbal == 0xaa))1847return 1; /* we found a device */18481849return 0; /* nothing found */1850}18511852/**1853* ata_sff_dev_classify - Parse returned ATA device signature1854* @dev: ATA device to classify (starting at zero)1855* @present: device seems present1856* @r_err: Value of error register on completion1857*1858* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,1859* an ATA/ATAPI-defined set of values is placed in the ATA1860* shadow registers, indicating the results of device detection1861* and diagnostics.1862*1863* Select the ATA device, and read the values from the ATA shadow1864* registers. Then parse according to the Error register value,1865* and the spec-defined values examined by ata_dev_classify().1866*1867* LOCKING:1868* caller.1869*1870* RETURNS:1871* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.1872*/1873unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,1874u8 *r_err)1875{1876struct ata_port *ap = dev->link->ap;1877struct ata_taskfile tf;1878unsigned int class;1879u8 err;18801881ap->ops->sff_dev_select(ap, dev->devno);18821883memset(&tf, 0, sizeof(tf));18841885ap->ops->sff_tf_read(ap, &tf);1886err = tf.feature;1887if (r_err)1888*r_err = err;18891890/* see if device passed diags: continue and warn later */1891if (err == 0)1892/* diagnostic fail : do nothing _YET_ */1893dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;1894else if (err == 1)1895/* do nothing */ ;1896else if ((dev->devno == 0) && (err == 0x81))1897/* do nothing */ ;1898else1899return ATA_DEV_NONE;19001901/* determine if device is ATA or ATAPI */1902class = ata_dev_classify(&tf);19031904if (class == ATA_DEV_UNKNOWN) {1905/* If the device failed diagnostic, it's likely to1906* have reported incorrect device signature too.1907* Assume ATA device if the device seems present but1908* device signature is invalid with diagnostic1909* failure.1910*/1911if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))1912class = ATA_DEV_ATA;1913else1914class = ATA_DEV_NONE;1915} else if ((class == ATA_DEV_ATA) &&1916(ap->ops->sff_check_status(ap) == 0))1917class = ATA_DEV_NONE;19181919return class;1920}1921EXPORT_SYMBOL_GPL(ata_sff_dev_classify);19221923/**1924* ata_sff_wait_after_reset - wait for devices to become ready after reset1925* @link: SFF link which is just reset1926* @devmask: mask of present devices1927* @deadline: deadline jiffies for the operation1928*1929* Wait devices attached to SFF @link to become ready after1930* reset. It contains preceding 150ms wait to avoid accessing TF1931* status register too early.1932*1933* LOCKING:1934* Kernel thread context (may sleep).1935*1936* RETURNS:1937* 0 on success, -ENODEV if some or all of devices in @devmask1938* don't seem to exist. -errno on other errors.1939*/1940int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,1941unsigned long deadline)1942{1943struct ata_port *ap = link->ap;1944struct ata_ioports *ioaddr = &ap->ioaddr;1945unsigned int dev0 = devmask & (1 << 0);1946unsigned int dev1 = devmask & (1 << 1);1947int rc, ret = 0;19481949ata_msleep(ap, ATA_WAIT_AFTER_RESET);19501951/* always check readiness of the master device */1952rc = ata_sff_wait_ready(link, deadline);1953/* -ENODEV means the odd clown forgot the D7 pulldown resistor1954* and TF status is 0xff, bail out on it too.1955*/1956if (rc)1957return rc;19581959/* if device 1 was found in ata_devchk, wait for register1960* access briefly, then wait for BSY to clear.1961*/1962if (dev1) {1963int i;19641965ap->ops->sff_dev_select(ap, 1);19661967/* Wait for register access. Some ATAPI devices fail1968* to set nsect/lbal after reset, so don't waste too1969* much time on it. We're gonna wait for !BSY anyway.1970*/1971for (i = 0; i < 2; i++) {1972u8 nsect, lbal;19731974nsect = ioread8(ioaddr->nsect_addr);1975lbal = ioread8(ioaddr->lbal_addr);1976if ((nsect == 1) && (lbal == 1))1977break;1978ata_msleep(ap, 50); /* give drive a breather */1979}19801981rc = ata_sff_wait_ready(link, deadline);1982if (rc) {1983if (rc != -ENODEV)1984return rc;1985ret = rc;1986}1987}19881989/* is all this really necessary? */1990ap->ops->sff_dev_select(ap, 0);1991if (dev1)1992ap->ops->sff_dev_select(ap, 1);1993if (dev0)1994ap->ops->sff_dev_select(ap, 0);19951996return ret;1997}1998EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);19992000static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,2001unsigned long deadline)2002{2003struct ata_ioports *ioaddr = &ap->ioaddr;20042005DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);20062007/* software reset. causes dev0 to be selected */2008iowrite8(ap->ctl, ioaddr->ctl_addr);2009udelay(20); /* FIXME: flush */2010iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);2011udelay(20); /* FIXME: flush */2012iowrite8(ap->ctl, ioaddr->ctl_addr);2013ap->last_ctl = ap->ctl;20142015/* wait the port to become ready */2016return ata_sff_wait_after_reset(&ap->link, devmask, deadline);2017}20182019/**2020* ata_sff_softreset - reset host port via ATA SRST2021* @link: ATA link to reset2022* @classes: resulting classes of attached devices2023* @deadline: deadline jiffies for the operation2024*2025* Reset host port using ATA SRST.2026*2027* LOCKING:2028* Kernel thread context (may sleep)2029*2030* RETURNS:2031* 0 on success, -errno otherwise.2032*/2033int ata_sff_softreset(struct ata_link *link, unsigned int *classes,2034unsigned long deadline)2035{2036struct ata_port *ap = link->ap;2037unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;2038unsigned int devmask = 0;2039int rc;2040u8 err;20412042DPRINTK("ENTER\n");20432044/* determine if device 0/1 are present */2045if (ata_devchk(ap, 0))2046devmask |= (1 << 0);2047if (slave_possible && ata_devchk(ap, 1))2048devmask |= (1 << 1);20492050/* select device 0 again */2051ap->ops->sff_dev_select(ap, 0);20522053/* issue bus reset */2054DPRINTK("about to softreset, devmask=%x\n", devmask);2055rc = ata_bus_softreset(ap, devmask, deadline);2056/* if link is occupied, -ENODEV too is an error */2057if (rc && (rc != -ENODEV || sata_scr_valid(link))) {2058ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);2059return rc;2060}20612062/* determine by signature whether we have ATA or ATAPI devices */2063classes[0] = ata_sff_dev_classify(&link->device[0],2064devmask & (1 << 0), &err);2065if (slave_possible && err != 0x81)2066classes[1] = ata_sff_dev_classify(&link->device[1],2067devmask & (1 << 1), &err);20682069DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);2070return 0;2071}2072EXPORT_SYMBOL_GPL(ata_sff_softreset);20732074/**2075* sata_sff_hardreset - reset host port via SATA phy reset2076* @link: link to reset2077* @class: resulting class of attached device2078* @deadline: deadline jiffies for the operation2079*2080* SATA phy-reset host port using DET bits of SControl register,2081* wait for !BSY and classify the attached device.2082*2083* LOCKING:2084* Kernel thread context (may sleep)2085*2086* RETURNS:2087* 0 on success, -errno otherwise.2088*/2089int sata_sff_hardreset(struct ata_link *link, unsigned int *class,2090unsigned long deadline)2091{2092struct ata_eh_context *ehc = &link->eh_context;2093const unsigned long *timing = sata_ehc_deb_timing(ehc);2094bool online;2095int rc;20962097rc = sata_link_hardreset(link, timing, deadline, &online,2098ata_sff_check_ready);2099if (online)2100*class = ata_sff_dev_classify(link->device, 1, NULL);21012102DPRINTK("EXIT, class=%u\n", *class);2103return rc;2104}2105EXPORT_SYMBOL_GPL(sata_sff_hardreset);21062107/**2108* ata_sff_postreset - SFF postreset callback2109* @link: the target SFF ata_link2110* @classes: classes of attached devices2111*2112* This function is invoked after a successful reset. It first2113* calls ata_std_postreset() and performs SFF specific postreset2114* processing.2115*2116* LOCKING:2117* Kernel thread context (may sleep)2118*/2119void ata_sff_postreset(struct ata_link *link, unsigned int *classes)2120{2121struct ata_port *ap = link->ap;21222123ata_std_postreset(link, classes);21242125/* is double-select really necessary? */2126if (classes[0] != ATA_DEV_NONE)2127ap->ops->sff_dev_select(ap, 1);2128if (classes[1] != ATA_DEV_NONE)2129ap->ops->sff_dev_select(ap, 0);21302131/* bail out if no device is present */2132if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {2133DPRINTK("EXIT, no device\n");2134return;2135}21362137/* set up device control */2138if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {2139ata_sff_set_devctl(ap, ap->ctl);2140ap->last_ctl = ap->ctl;2141}2142}2143EXPORT_SYMBOL_GPL(ata_sff_postreset);21442145/**2146* ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers2147* @qc: command2148*2149* Drain the FIFO and device of any stuck data following a command2150* failing to complete. In some cases this is necessary before a2151* reset will recover the device.2152*2153*/21542155void ata_sff_drain_fifo(struct ata_queued_cmd *qc)2156{2157int count;2158struct ata_port *ap;21592160/* We only need to flush incoming data when a command was running */2161if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)2162return;21632164ap = qc->ap;2165/* Drain up to 64K of data before we give up this recovery method */2166for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)2167&& count < 65536; count += 2)2168ioread16(ap->ioaddr.data_addr);21692170/* Can become DEBUG later */2171if (count)2172ata_port_printk(ap, KERN_DEBUG,2173"drained %d bytes to clear DRQ.\n", count);21742175}2176EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);21772178/**2179* ata_sff_error_handler - Stock error handler for SFF controller2180* @ap: port to handle error for2181*2182* Stock error handler for SFF controller. It can handle both2183* PATA and SATA controllers. Many controllers should be able to2184* use this EH as-is or with some added handling before and2185* after.2186*2187* LOCKING:2188* Kernel thread context (may sleep)2189*/2190void ata_sff_error_handler(struct ata_port *ap)2191{2192ata_reset_fn_t softreset = ap->ops->softreset;2193ata_reset_fn_t hardreset = ap->ops->hardreset;2194struct ata_queued_cmd *qc;2195unsigned long flags;21962197qc = __ata_qc_from_tag(ap, ap->link.active_tag);2198if (qc && !(qc->flags & ATA_QCFLAG_FAILED))2199qc = NULL;22002201spin_lock_irqsave(ap->lock, flags);22022203/*2204* We *MUST* do FIFO draining before we issue a reset as2205* several devices helpfully clear their internal state and2206* will lock solid if we touch the data port post reset. Pass2207* qc in case anyone wants to do different PIO/DMA recovery or2208* has per command fixups2209*/2210if (ap->ops->sff_drain_fifo)2211ap->ops->sff_drain_fifo(qc);22122213spin_unlock_irqrestore(ap->lock, flags);22142215/* ignore ata_sff_softreset if ctl isn't accessible */2216if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)2217softreset = NULL;22182219/* ignore built-in hardresets if SCR access is not available */2220if ((hardreset == sata_std_hardreset ||2221hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))2222hardreset = NULL;22232224ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,2225ap->ops->postreset);2226}2227EXPORT_SYMBOL_GPL(ata_sff_error_handler);22282229/**2230* ata_sff_std_ports - initialize ioaddr with standard port offsets.2231* @ioaddr: IO address structure to be initialized2232*2233* Utility function which initializes data_addr, error_addr,2234* feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,2235* device_addr, status_addr, and command_addr to standard offsets2236* relative to cmd_addr.2237*2238* Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.2239*/2240void ata_sff_std_ports(struct ata_ioports *ioaddr)2241{2242ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;2243ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;2244ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;2245ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;2246ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;2247ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;2248ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;2249ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;2250ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;2251ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;2252}2253EXPORT_SYMBOL_GPL(ata_sff_std_ports);22542255#ifdef CONFIG_PCI22562257static int ata_resources_present(struct pci_dev *pdev, int port)2258{2259int i;22602261/* Check the PCI resources for this channel are enabled */2262port = port * 2;2263for (i = 0; i < 2; i++) {2264if (pci_resource_start(pdev, port + i) == 0 ||2265pci_resource_len(pdev, port + i) == 0)2266return 0;2267}2268return 1;2269}22702271/**2272* ata_pci_sff_init_host - acquire native PCI ATA resources and init host2273* @host: target ATA host2274*2275* Acquire native PCI ATA resources for @host and initialize the2276* first two ports of @host accordingly. Ports marked dummy are2277* skipped and allocation failure makes the port dummy.2278*2279* Note that native PCI resources are valid even for legacy hosts2280* as we fix up pdev resources array early in boot, so this2281* function can be used for both native and legacy SFF hosts.2282*2283* LOCKING:2284* Inherited from calling layer (may sleep).2285*2286* RETURNS:2287* 0 if at least one port is initialized, -ENODEV if no port is2288* available.2289*/2290int ata_pci_sff_init_host(struct ata_host *host)2291{2292struct device *gdev = host->dev;2293struct pci_dev *pdev = to_pci_dev(gdev);2294unsigned int mask = 0;2295int i, rc;22962297/* request, iomap BARs and init port addresses accordingly */2298for (i = 0; i < 2; i++) {2299struct ata_port *ap = host->ports[i];2300int base = i * 2;2301void __iomem * const *iomap;23022303if (ata_port_is_dummy(ap))2304continue;23052306/* Discard disabled ports. Some controllers show2307* their unused channels this way. Disabled ports are2308* made dummy.2309*/2310if (!ata_resources_present(pdev, i)) {2311ap->ops = &ata_dummy_port_ops;2312continue;2313}23142315rc = pcim_iomap_regions(pdev, 0x3 << base,2316dev_driver_string(gdev));2317if (rc) {2318dev_printk(KERN_WARNING, gdev,2319"failed to request/iomap BARs for port %d "2320"(errno=%d)\n", i, rc);2321if (rc == -EBUSY)2322pcim_pin_device(pdev);2323ap->ops = &ata_dummy_port_ops;2324continue;2325}2326host->iomap = iomap = pcim_iomap_table(pdev);23272328ap->ioaddr.cmd_addr = iomap[base];2329ap->ioaddr.altstatus_addr =2330ap->ioaddr.ctl_addr = (void __iomem *)2331((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);2332ata_sff_std_ports(&ap->ioaddr);23332334ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",2335(unsigned long long)pci_resource_start(pdev, base),2336(unsigned long long)pci_resource_start(pdev, base + 1));23372338mask |= 1 << i;2339}23402341if (!mask) {2342dev_printk(KERN_ERR, gdev, "no available native port\n");2343return -ENODEV;2344}23452346return 0;2347}2348EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);23492350/**2351* ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host2352* @pdev: target PCI device2353* @ppi: array of port_info, must be enough for two ports2354* @r_host: out argument for the initialized ATA host2355*2356* Helper to allocate PIO-only SFF ATA host for @pdev, acquire2357* all PCI resources and initialize it accordingly in one go.2358*2359* LOCKING:2360* Inherited from calling layer (may sleep).2361*2362* RETURNS:2363* 0 on success, -errno otherwise.2364*/2365int ata_pci_sff_prepare_host(struct pci_dev *pdev,2366const struct ata_port_info * const *ppi,2367struct ata_host **r_host)2368{2369struct ata_host *host;2370int rc;23712372if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))2373return -ENOMEM;23742375host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);2376if (!host) {2377dev_printk(KERN_ERR, &pdev->dev,2378"failed to allocate ATA host\n");2379rc = -ENOMEM;2380goto err_out;2381}23822383rc = ata_pci_sff_init_host(host);2384if (rc)2385goto err_out;23862387devres_remove_group(&pdev->dev, NULL);2388*r_host = host;2389return 0;23902391err_out:2392devres_release_group(&pdev->dev, NULL);2393return rc;2394}2395EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);23962397/**2398* ata_pci_sff_activate_host - start SFF host, request IRQ and register it2399* @host: target SFF ATA host2400* @irq_handler: irq_handler used when requesting IRQ(s)2401* @sht: scsi_host_template to use when registering the host2402*2403* This is the counterpart of ata_host_activate() for SFF ATA2404* hosts. This separate helper is necessary because SFF hosts2405* use two separate interrupts in legacy mode.2406*2407* LOCKING:2408* Inherited from calling layer (may sleep).2409*2410* RETURNS:2411* 0 on success, -errno otherwise.2412*/2413int ata_pci_sff_activate_host(struct ata_host *host,2414irq_handler_t irq_handler,2415struct scsi_host_template *sht)2416{2417struct device *dev = host->dev;2418struct pci_dev *pdev = to_pci_dev(dev);2419const char *drv_name = dev_driver_string(host->dev);2420int legacy_mode = 0, rc;24212422rc = ata_host_start(host);2423if (rc)2424return rc;24252426if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {2427u8 tmp8, mask;24282429/* TODO: What if one channel is in native mode ... */2430pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);2431mask = (1 << 2) | (1 << 0);2432if ((tmp8 & mask) != mask)2433legacy_mode = 1;2434#if defined(CONFIG_NO_ATA_LEGACY)2435/* Some platforms with PCI limits cannot address compat2436port space. In that case we punt if their firmware has2437left a device in compatibility mode */2438if (legacy_mode) {2439printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");2440return -EOPNOTSUPP;2441}2442#endif2443}24442445if (!devres_open_group(dev, NULL, GFP_KERNEL))2446return -ENOMEM;24472448if (!legacy_mode && pdev->irq) {2449int i;24502451rc = devm_request_irq(dev, pdev->irq, irq_handler,2452IRQF_SHARED, drv_name, host);2453if (rc)2454goto out;24552456for (i = 0; i < 2; i++) {2457if (ata_port_is_dummy(host->ports[i]))2458continue;2459ata_port_desc(host->ports[i], "irq %d", pdev->irq);2460}2461} else if (legacy_mode) {2462if (!ata_port_is_dummy(host->ports[0])) {2463rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),2464irq_handler, IRQF_SHARED,2465drv_name, host);2466if (rc)2467goto out;24682469ata_port_desc(host->ports[0], "irq %d",2470ATA_PRIMARY_IRQ(pdev));2471}24722473if (!ata_port_is_dummy(host->ports[1])) {2474rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),2475irq_handler, IRQF_SHARED,2476drv_name, host);2477if (rc)2478goto out;24792480ata_port_desc(host->ports[1], "irq %d",2481ATA_SECONDARY_IRQ(pdev));2482}2483}24842485rc = ata_host_register(host, sht);2486out:2487if (rc == 0)2488devres_remove_group(dev, NULL);2489else2490devres_release_group(dev, NULL);24912492return rc;2493}2494EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);24952496static const struct ata_port_info *ata_sff_find_valid_pi(2497const struct ata_port_info * const *ppi)2498{2499int i;25002501/* look up the first valid port_info */2502for (i = 0; i < 2 && ppi[i]; i++)2503if (ppi[i]->port_ops != &ata_dummy_port_ops)2504return ppi[i];25052506return NULL;2507}25082509/**2510* ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller2511* @pdev: Controller to be initialized2512* @ppi: array of port_info, must be enough for two ports2513* @sht: scsi_host_template to use when registering the host2514* @host_priv: host private_data2515* @hflag: host flags2516*2517* This is a helper function which can be called from a driver's2518* xxx_init_one() probe function if the hardware uses traditional2519* IDE taskfile registers and is PIO only.2520*2521* ASSUMPTION:2522* Nobody makes a single channel controller that appears solely as2523* the secondary legacy port on PCI.2524*2525* LOCKING:2526* Inherited from PCI layer (may sleep).2527*2528* RETURNS:2529* Zero on success, negative on errno-based value on error.2530*/2531int ata_pci_sff_init_one(struct pci_dev *pdev,2532const struct ata_port_info * const *ppi,2533struct scsi_host_template *sht, void *host_priv, int hflag)2534{2535struct device *dev = &pdev->dev;2536const struct ata_port_info *pi;2537struct ata_host *host = NULL;2538int rc;25392540DPRINTK("ENTER\n");25412542pi = ata_sff_find_valid_pi(ppi);2543if (!pi) {2544dev_printk(KERN_ERR, &pdev->dev,2545"no valid port_info specified\n");2546return -EINVAL;2547}25482549if (!devres_open_group(dev, NULL, GFP_KERNEL))2550return -ENOMEM;25512552rc = pcim_enable_device(pdev);2553if (rc)2554goto out;25552556/* prepare and activate SFF host */2557rc = ata_pci_sff_prepare_host(pdev, ppi, &host);2558if (rc)2559goto out;2560host->private_data = host_priv;2561host->flags |= hflag;25622563rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);2564out:2565if (rc == 0)2566devres_remove_group(&pdev->dev, NULL);2567else2568devres_release_group(&pdev->dev, NULL);25692570return rc;2571}2572EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);25732574#endif /* CONFIG_PCI */25752576/*2577* BMDMA support2578*/25792580#ifdef CONFIG_ATA_BMDMA25812582const struct ata_port_operations ata_bmdma_port_ops = {2583.inherits = &ata_sff_port_ops,25842585.error_handler = ata_bmdma_error_handler,2586.post_internal_cmd = ata_bmdma_post_internal_cmd,25872588.qc_prep = ata_bmdma_qc_prep,2589.qc_issue = ata_bmdma_qc_issue,25902591.sff_irq_clear = ata_bmdma_irq_clear,2592.bmdma_setup = ata_bmdma_setup,2593.bmdma_start = ata_bmdma_start,2594.bmdma_stop = ata_bmdma_stop,2595.bmdma_status = ata_bmdma_status,25962597.port_start = ata_bmdma_port_start,2598};2599EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);26002601const struct ata_port_operations ata_bmdma32_port_ops = {2602.inherits = &ata_bmdma_port_ops,26032604.sff_data_xfer = ata_sff_data_xfer32,2605.port_start = ata_bmdma_port_start32,2606};2607EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);26082609/**2610* ata_bmdma_fill_sg - Fill PCI IDE PRD table2611* @qc: Metadata associated with taskfile to be transferred2612*2613* Fill PCI IDE PRD (scatter-gather) table with segments2614* associated with the current disk command.2615*2616* LOCKING:2617* spin_lock_irqsave(host lock)2618*2619*/2620static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)2621{2622struct ata_port *ap = qc->ap;2623struct ata_bmdma_prd *prd = ap->bmdma_prd;2624struct scatterlist *sg;2625unsigned int si, pi;26262627pi = 0;2628for_each_sg(qc->sg, sg, qc->n_elem, si) {2629u32 addr, offset;2630u32 sg_len, len;26312632/* determine if physical DMA addr spans 64K boundary.2633* Note h/w doesn't support 64-bit, so we unconditionally2634* truncate dma_addr_t to u32.2635*/2636addr = (u32) sg_dma_address(sg);2637sg_len = sg_dma_len(sg);26382639while (sg_len) {2640offset = addr & 0xffff;2641len = sg_len;2642if ((offset + sg_len) > 0x10000)2643len = 0x10000 - offset;26442645prd[pi].addr = cpu_to_le32(addr);2646prd[pi].flags_len = cpu_to_le32(len & 0xffff);2647VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);26482649pi++;2650sg_len -= len;2651addr += len;2652}2653}26542655prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);2656}26572658/**2659* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table2660* @qc: Metadata associated with taskfile to be transferred2661*2662* Fill PCI IDE PRD (scatter-gather) table with segments2663* associated with the current disk command. Perform the fill2664* so that we avoid writing any length 64K records for2665* controllers that don't follow the spec.2666*2667* LOCKING:2668* spin_lock_irqsave(host lock)2669*2670*/2671static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)2672{2673struct ata_port *ap = qc->ap;2674struct ata_bmdma_prd *prd = ap->bmdma_prd;2675struct scatterlist *sg;2676unsigned int si, pi;26772678pi = 0;2679for_each_sg(qc->sg, sg, qc->n_elem, si) {2680u32 addr, offset;2681u32 sg_len, len, blen;26822683/* determine if physical DMA addr spans 64K boundary.2684* Note h/w doesn't support 64-bit, so we unconditionally2685* truncate dma_addr_t to u32.2686*/2687addr = (u32) sg_dma_address(sg);2688sg_len = sg_dma_len(sg);26892690while (sg_len) {2691offset = addr & 0xffff;2692len = sg_len;2693if ((offset + sg_len) > 0x10000)2694len = 0x10000 - offset;26952696blen = len & 0xffff;2697prd[pi].addr = cpu_to_le32(addr);2698if (blen == 0) {2699/* Some PATA chipsets like the CS5530 can't2700cope with 0x0000 meaning 64K as the spec2701says */2702prd[pi].flags_len = cpu_to_le32(0x8000);2703blen = 0x8000;2704prd[++pi].addr = cpu_to_le32(addr + 0x8000);2705}2706prd[pi].flags_len = cpu_to_le32(blen);2707VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);27082709pi++;2710sg_len -= len;2711addr += len;2712}2713}27142715prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);2716}27172718/**2719* ata_bmdma_qc_prep - Prepare taskfile for submission2720* @qc: Metadata associated with taskfile to be prepared2721*2722* Prepare ATA taskfile for submission.2723*2724* LOCKING:2725* spin_lock_irqsave(host lock)2726*/2727void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)2728{2729if (!(qc->flags & ATA_QCFLAG_DMAMAP))2730return;27312732ata_bmdma_fill_sg(qc);2733}2734EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);27352736/**2737* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission2738* @qc: Metadata associated with taskfile to be prepared2739*2740* Prepare ATA taskfile for submission.2741*2742* LOCKING:2743* spin_lock_irqsave(host lock)2744*/2745void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)2746{2747if (!(qc->flags & ATA_QCFLAG_DMAMAP))2748return;27492750ata_bmdma_fill_sg_dumb(qc);2751}2752EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);27532754/**2755* ata_bmdma_qc_issue - issue taskfile to a BMDMA controller2756* @qc: command to issue to device2757*2758* This function issues a PIO, NODATA or DMA command to a2759* SFF/BMDMA controller. PIO and NODATA are handled by2760* ata_sff_qc_issue().2761*2762* LOCKING:2763* spin_lock_irqsave(host lock)2764*2765* RETURNS:2766* Zero on success, AC_ERR_* mask on failure2767*/2768unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)2769{2770struct ata_port *ap = qc->ap;2771struct ata_link *link = qc->dev->link;27722773/* defer PIO handling to sff_qc_issue */2774if (!ata_is_dma(qc->tf.protocol))2775return ata_sff_qc_issue(qc);27762777/* select the device */2778ata_dev_select(ap, qc->dev->devno, 1, 0);27792780/* start the command */2781switch (qc->tf.protocol) {2782case ATA_PROT_DMA:2783WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);27842785ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */2786ap->ops->bmdma_setup(qc); /* set up bmdma */2787ap->ops->bmdma_start(qc); /* initiate bmdma */2788ap->hsm_task_state = HSM_ST_LAST;2789break;27902791case ATAPI_PROT_DMA:2792WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);27932794ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */2795ap->ops->bmdma_setup(qc); /* set up bmdma */2796ap->hsm_task_state = HSM_ST_FIRST;27972798/* send cdb by polling if no cdb interrupt */2799if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))2800ata_sff_queue_pio_task(link, 0);2801break;28022803default:2804WARN_ON(1);2805return AC_ERR_SYSTEM;2806}28072808return 0;2809}2810EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);28112812/**2813* ata_bmdma_port_intr - Handle BMDMA port interrupt2814* @ap: Port on which interrupt arrived (possibly...)2815* @qc: Taskfile currently active in engine2816*2817* Handle port interrupt for given queued command.2818*2819* LOCKING:2820* spin_lock_irqsave(host lock)2821*2822* RETURNS:2823* One if interrupt was handled, zero if not (shared irq).2824*/2825unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)2826{2827struct ata_eh_info *ehi = &ap->link.eh_info;2828u8 host_stat = 0;2829bool bmdma_stopped = false;2830unsigned int handled;28312832if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {2833/* check status of DMA engine */2834host_stat = ap->ops->bmdma_status(ap);2835VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);28362837/* if it's not our irq... */2838if (!(host_stat & ATA_DMA_INTR))2839return ata_sff_idle_irq(ap);28402841/* before we do anything else, clear DMA-Start bit */2842ap->ops->bmdma_stop(qc);2843bmdma_stopped = true;28442845if (unlikely(host_stat & ATA_DMA_ERR)) {2846/* error when transferring data to/from memory */2847qc->err_mask |= AC_ERR_HOST_BUS;2848ap->hsm_task_state = HSM_ST_ERR;2849}2850}28512852handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);28532854if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))2855ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);28562857return handled;2858}2859EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);28602861/**2862* ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler2863* @irq: irq line (unused)2864* @dev_instance: pointer to our ata_host information structure2865*2866* Default interrupt handler for PCI IDE devices. Calls2867* ata_bmdma_port_intr() for each port that is not disabled.2868*2869* LOCKING:2870* Obtains host lock during operation.2871*2872* RETURNS:2873* IRQ_NONE or IRQ_HANDLED.2874*/2875irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)2876{2877return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);2878}2879EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);28802881/**2882* ata_bmdma_error_handler - Stock error handler for BMDMA controller2883* @ap: port to handle error for2884*2885* Stock error handler for BMDMA controller. It can handle both2886* PATA and SATA controllers. Most BMDMA controllers should be2887* able to use this EH as-is or with some added handling before2888* and after.2889*2890* LOCKING:2891* Kernel thread context (may sleep)2892*/2893void ata_bmdma_error_handler(struct ata_port *ap)2894{2895struct ata_queued_cmd *qc;2896unsigned long flags;2897bool thaw = false;28982899qc = __ata_qc_from_tag(ap, ap->link.active_tag);2900if (qc && !(qc->flags & ATA_QCFLAG_FAILED))2901qc = NULL;29022903/* reset PIO HSM and stop DMA engine */2904spin_lock_irqsave(ap->lock, flags);29052906if (qc && ata_is_dma(qc->tf.protocol)) {2907u8 host_stat;29082909host_stat = ap->ops->bmdma_status(ap);29102911/* BMDMA controllers indicate host bus error by2912* setting DMA_ERR bit and timing out. As it wasn't2913* really a timeout event, adjust error mask and2914* cancel frozen state.2915*/2916if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {2917qc->err_mask = AC_ERR_HOST_BUS;2918thaw = true;2919}29202921ap->ops->bmdma_stop(qc);29222923/* if we're gonna thaw, make sure IRQ is clear */2924if (thaw) {2925ap->ops->sff_check_status(ap);2926if (ap->ops->sff_irq_clear)2927ap->ops->sff_irq_clear(ap);2928}2929}29302931spin_unlock_irqrestore(ap->lock, flags);29322933if (thaw)2934ata_eh_thaw_port(ap);29352936ata_sff_error_handler(ap);2937}2938EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);29392940/**2941* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA2942* @qc: internal command to clean up2943*2944* LOCKING:2945* Kernel thread context (may sleep)2946*/2947void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)2948{2949struct ata_port *ap = qc->ap;2950unsigned long flags;29512952if (ata_is_dma(qc->tf.protocol)) {2953spin_lock_irqsave(ap->lock, flags);2954ap->ops->bmdma_stop(qc);2955spin_unlock_irqrestore(ap->lock, flags);2956}2957}2958EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);29592960/**2961* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.2962* @ap: Port associated with this ATA transaction.2963*2964* Clear interrupt and error flags in DMA status register.2965*2966* May be used as the irq_clear() entry in ata_port_operations.2967*2968* LOCKING:2969* spin_lock_irqsave(host lock)2970*/2971void ata_bmdma_irq_clear(struct ata_port *ap)2972{2973void __iomem *mmio = ap->ioaddr.bmdma_addr;29742975if (!mmio)2976return;29772978iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);2979}2980EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);29812982/**2983* ata_bmdma_setup - Set up PCI IDE BMDMA transaction2984* @qc: Info associated with this ATA transaction.2985*2986* LOCKING:2987* spin_lock_irqsave(host lock)2988*/2989void ata_bmdma_setup(struct ata_queued_cmd *qc)2990{2991struct ata_port *ap = qc->ap;2992unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);2993u8 dmactl;29942995/* load PRD table addr. */2996mb(); /* make sure PRD table writes are visible to controller */2997iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);29982999/* specify data direction, triple-check start bit is clear */3000dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);3001dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);3002if (!rw)3003dmactl |= ATA_DMA_WR;3004iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);30053006/* issue r/w command */3007ap->ops->sff_exec_command(ap, &qc->tf);3008}3009EXPORT_SYMBOL_GPL(ata_bmdma_setup);30103011/**3012* ata_bmdma_start - Start a PCI IDE BMDMA transaction3013* @qc: Info associated with this ATA transaction.3014*3015* LOCKING:3016* spin_lock_irqsave(host lock)3017*/3018void ata_bmdma_start(struct ata_queued_cmd *qc)3019{3020struct ata_port *ap = qc->ap;3021u8 dmactl;30223023/* start host DMA transaction */3024dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);3025iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);30263027/* Strictly, one may wish to issue an ioread8() here, to3028* flush the mmio write. However, control also passes3029* to the hardware at this point, and it will interrupt3030* us when we are to resume control. So, in effect,3031* we don't care when the mmio write flushes.3032* Further, a read of the DMA status register _immediately_3033* following the write may not be what certain flaky hardware3034* is expected, so I think it is best to not add a readb()3035* without first all the MMIO ATA cards/mobos.3036* Or maybe I'm just being paranoid.3037*3038* FIXME: The posting of this write means I/O starts are3039* unnecessarily delayed for MMIO3040*/3041}3042EXPORT_SYMBOL_GPL(ata_bmdma_start);30433044/**3045* ata_bmdma_stop - Stop PCI IDE BMDMA transfer3046* @qc: Command we are ending DMA for3047*3048* Clears the ATA_DMA_START flag in the dma control register3049*3050* May be used as the bmdma_stop() entry in ata_port_operations.3051*3052* LOCKING:3053* spin_lock_irqsave(host lock)3054*/3055void ata_bmdma_stop(struct ata_queued_cmd *qc)3056{3057struct ata_port *ap = qc->ap;3058void __iomem *mmio = ap->ioaddr.bmdma_addr;30593060/* clear start/stop bit */3061iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,3062mmio + ATA_DMA_CMD);30633064/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */3065ata_sff_dma_pause(ap);3066}3067EXPORT_SYMBOL_GPL(ata_bmdma_stop);30683069/**3070* ata_bmdma_status - Read PCI IDE BMDMA status3071* @ap: Port associated with this ATA transaction.3072*3073* Read and return BMDMA status register.3074*3075* May be used as the bmdma_status() entry in ata_port_operations.3076*3077* LOCKING:3078* spin_lock_irqsave(host lock)3079*/3080u8 ata_bmdma_status(struct ata_port *ap)3081{3082return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);3083}3084EXPORT_SYMBOL_GPL(ata_bmdma_status);308530863087/**3088* ata_bmdma_port_start - Set port up for bmdma.3089* @ap: Port to initialize3090*3091* Called just after data structures for each port are3092* initialized. Allocates space for PRD table.3093*3094* May be used as the port_start() entry in ata_port_operations.3095*3096* LOCKING:3097* Inherited from caller.3098*/3099int ata_bmdma_port_start(struct ata_port *ap)3100{3101if (ap->mwdma_mask || ap->udma_mask) {3102ap->bmdma_prd =3103dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,3104&ap->bmdma_prd_dma, GFP_KERNEL);3105if (!ap->bmdma_prd)3106return -ENOMEM;3107}31083109return 0;3110}3111EXPORT_SYMBOL_GPL(ata_bmdma_port_start);31123113/**3114* ata_bmdma_port_start32 - Set port up for dma.3115* @ap: Port to initialize3116*3117* Called just after data structures for each port are3118* initialized. Enables 32bit PIO and allocates space for PRD3119* table.3120*3121* May be used as the port_start() entry in ata_port_operations for3122* devices that are capable of 32bit PIO.3123*3124* LOCKING:3125* Inherited from caller.3126*/3127int ata_bmdma_port_start32(struct ata_port *ap)3128{3129ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;3130return ata_bmdma_port_start(ap);3131}3132EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);31333134#ifdef CONFIG_PCI31353136/**3137* ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex3138* @pdev: PCI device3139*3140* Some PCI ATA devices report simplex mode but in fact can be told to3141* enter non simplex mode. This implements the necessary logic to3142* perform the task on such devices. Calling it on other devices will3143* have -undefined- behaviour.3144*/3145int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)3146{3147unsigned long bmdma = pci_resource_start(pdev, 4);3148u8 simplex;31493150if (bmdma == 0)3151return -ENOENT;31523153simplex = inb(bmdma + 0x02);3154outb(simplex & 0x60, bmdma + 0x02);3155simplex = inb(bmdma + 0x02);3156if (simplex & 0x80)3157return -EOPNOTSUPP;3158return 0;3159}3160EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);31613162static void ata_bmdma_nodma(struct ata_host *host, const char *reason)3163{3164int i;31653166dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",3167reason);31683169for (i = 0; i < 2; i++) {3170host->ports[i]->mwdma_mask = 0;3171host->ports[i]->udma_mask = 0;3172}3173}31743175/**3176* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host3177* @host: target ATA host3178*3179* Acquire PCI BMDMA resources and initialize @host accordingly.3180*3181* LOCKING:3182* Inherited from calling layer (may sleep).3183*/3184void ata_pci_bmdma_init(struct ata_host *host)3185{3186struct device *gdev = host->dev;3187struct pci_dev *pdev = to_pci_dev(gdev);3188int i, rc;31893190/* No BAR4 allocation: No DMA */3191if (pci_resource_start(pdev, 4) == 0) {3192ata_bmdma_nodma(host, "BAR4 is zero");3193return;3194}31953196/*3197* Some controllers require BMDMA region to be initialized3198* even if DMA is not in use to clear IRQ status via3199* ->sff_irq_clear method. Try to initialize bmdma_addr3200* regardless of dma masks.3201*/3202rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);3203if (rc)3204ata_bmdma_nodma(host, "failed to set dma mask");3205if (!rc) {3206rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);3207if (rc)3208ata_bmdma_nodma(host,3209"failed to set consistent dma mask");3210}32113212/* request and iomap DMA region */3213rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));3214if (rc) {3215ata_bmdma_nodma(host, "failed to request/iomap BAR4");3216return;3217}3218host->iomap = pcim_iomap_table(pdev);32193220for (i = 0; i < 2; i++) {3221struct ata_port *ap = host->ports[i];3222void __iomem *bmdma = host->iomap[4] + 8 * i;32233224if (ata_port_is_dummy(ap))3225continue;32263227ap->ioaddr.bmdma_addr = bmdma;3228if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&3229(ioread8(bmdma + 2) & 0x80))3230host->flags |= ATA_HOST_SIMPLEX;32313232ata_port_desc(ap, "bmdma 0x%llx",3233(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);3234}3235}3236EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);32373238/**3239* ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host3240* @pdev: target PCI device3241* @ppi: array of port_info, must be enough for two ports3242* @r_host: out argument for the initialized ATA host3243*3244* Helper to allocate BMDMA ATA host for @pdev, acquire all PCI3245* resources and initialize it accordingly in one go.3246*3247* LOCKING:3248* Inherited from calling layer (may sleep).3249*3250* RETURNS:3251* 0 on success, -errno otherwise.3252*/3253int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,3254const struct ata_port_info * const * ppi,3255struct ata_host **r_host)3256{3257int rc;32583259rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);3260if (rc)3261return rc;32623263ata_pci_bmdma_init(*r_host);3264return 0;3265}3266EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);32673268/**3269* ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller3270* @pdev: Controller to be initialized3271* @ppi: array of port_info, must be enough for two ports3272* @sht: scsi_host_template to use when registering the host3273* @host_priv: host private_data3274* @hflags: host flags3275*3276* This function is similar to ata_pci_sff_init_one() but also3277* takes care of BMDMA initialization.3278*3279* LOCKING:3280* Inherited from PCI layer (may sleep).3281*3282* RETURNS:3283* Zero on success, negative on errno-based value on error.3284*/3285int ata_pci_bmdma_init_one(struct pci_dev *pdev,3286const struct ata_port_info * const * ppi,3287struct scsi_host_template *sht, void *host_priv,3288int hflags)3289{3290struct device *dev = &pdev->dev;3291const struct ata_port_info *pi;3292struct ata_host *host = NULL;3293int rc;32943295DPRINTK("ENTER\n");32963297pi = ata_sff_find_valid_pi(ppi);3298if (!pi) {3299dev_printk(KERN_ERR, &pdev->dev,3300"no valid port_info specified\n");3301return -EINVAL;3302}33033304if (!devres_open_group(dev, NULL, GFP_KERNEL))3305return -ENOMEM;33063307rc = pcim_enable_device(pdev);3308if (rc)3309goto out;33103311/* prepare and activate BMDMA host */3312rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);3313if (rc)3314goto out;3315host->private_data = host_priv;3316host->flags |= hflags;33173318pci_set_master(pdev);3319rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);3320out:3321if (rc == 0)3322devres_remove_group(&pdev->dev, NULL);3323else3324devres_release_group(&pdev->dev, NULL);33253326return rc;3327}3328EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);33293330#endif /* CONFIG_PCI */3331#endif /* CONFIG_ATA_BMDMA */33323333/**3334* ata_sff_port_init - Initialize SFF/BMDMA ATA port3335* @ap: Port to initialize3336*3337* Called on port allocation to initialize SFF/BMDMA specific3338* fields.3339*3340* LOCKING:3341* None.3342*/3343void ata_sff_port_init(struct ata_port *ap)3344{3345INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);3346ap->ctl = ATA_DEVCTL_OBS;3347ap->last_ctl = 0xFF;3348}33493350int __init ata_sff_init(void)3351{3352ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);3353if (!ata_sff_wq)3354return -ENOMEM;33553356return 0;3357}33583359void ata_sff_exit(void)3360{3361destroy_workqueue(ata_sff_wq);3362}336333643365