// SPDX-License-Identifier: GPL-2.0-or-later1/*2* libata-sff.c - helper library for PCI IDE BMDMA3*4* Copyright 2003-2006 Red Hat, Inc. All rights reserved.5* Copyright 2003-2006 Jeff Garzik6*7* libata documentation is available via 'make {ps|pdf}docs',8* as Documentation/driver-api/libata.rst9*10* Hardware documentation available from http://www.t13.org/ and11* http://www.sata-io.org/12*/1314#include <linux/kernel.h>15#include <linux/gfp.h>16#include <linux/pci.h>17#include <linux/module.h>18#include <linux/libata.h>19#include <linux/highmem.h>20#include <trace/events/libata.h>21#include "libata.h"2223static struct workqueue_struct *ata_sff_wq;2425const struct ata_port_operations ata_sff_port_ops = {26.inherits = &ata_base_port_ops,2728.qc_issue = ata_sff_qc_issue,29.qc_fill_rtf = ata_sff_qc_fill_rtf,3031.freeze = ata_sff_freeze,32.thaw = ata_sff_thaw,33.reset.prereset = ata_sff_prereset,34.reset.softreset = ata_sff_softreset,35.reset.hardreset = sata_sff_hardreset,36.reset.postreset = ata_sff_postreset,37.error_handler = ata_sff_error_handler,3839.sff_dev_select = ata_sff_dev_select,40.sff_check_status = ata_sff_check_status,41.sff_tf_load = ata_sff_tf_load,42.sff_tf_read = ata_sff_tf_read,43.sff_exec_command = ata_sff_exec_command,44.sff_data_xfer = ata_sff_data_xfer,45.sff_drain_fifo = ata_sff_drain_fifo,4647.lost_interrupt = ata_sff_lost_interrupt,48};49EXPORT_SYMBOL_GPL(ata_sff_port_ops);5051/**52* ata_sff_check_status - Read device status reg & clear interrupt53* @ap: port where the device is54*55* Reads ATA taskfile status register for currently-selected device56* and return its value. This also clears pending interrupts57* from this device58*59* LOCKING:60* Inherited from caller.61*/62u8 ata_sff_check_status(struct ata_port *ap)63{64return ioread8(ap->ioaddr.status_addr);65}66EXPORT_SYMBOL_GPL(ata_sff_check_status);6768/**69* ata_sff_altstatus - Read device alternate status reg70* @ap: port where the device is71* @status: pointer to a status value72*73* Reads ATA alternate status register for currently-selected device74* and return its value.75*76* RETURN:77* true if the register exists, false if not.78*79* LOCKING:80* Inherited from caller.81*/82static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)83{84u8 tmp;8586if (ap->ops->sff_check_altstatus) {87tmp = ap->ops->sff_check_altstatus(ap);88goto read;89}90if (ap->ioaddr.altstatus_addr) {91tmp = ioread8(ap->ioaddr.altstatus_addr);92goto read;93}94return false;9596read:97if (status)98*status = tmp;99return true;100}101102/**103* ata_sff_irq_status - Check if the device is busy104* @ap: port where the device is105*106* Determine if the port is currently busy. Uses altstatus107* if available in order to avoid clearing shared IRQ status108* when finding an IRQ source. Non ctl capable devices don't109* share interrupt lines fortunately for us.110*111* LOCKING:112* Inherited from caller.113*/114static u8 ata_sff_irq_status(struct ata_port *ap)115{116u8 status;117118/* Not us: We are busy */119if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))120return status;121/* Clear INTRQ latch */122status = ap->ops->sff_check_status(ap);123return status;124}125126/**127* ata_sff_sync - Flush writes128* @ap: Port to wait for.129*130* CAUTION:131* If we have an mmio device with no ctl and no altstatus132* method this will fail. No such devices are known to exist.133*134* LOCKING:135* Inherited from caller.136*/137138static void ata_sff_sync(struct ata_port *ap)139{140ata_sff_altstatus(ap, NULL);141}142143/**144* ata_sff_pause - Flush writes and wait 400nS145* @ap: Port to pause for.146*147* CAUTION:148* If we have an mmio device with no ctl and no altstatus149* method this will fail. No such devices are known to exist.150*151* LOCKING:152* Inherited from caller.153*/154155void ata_sff_pause(struct ata_port *ap)156{157ata_sff_sync(ap);158ndelay(400);159}160EXPORT_SYMBOL_GPL(ata_sff_pause);161162/**163* ata_sff_dma_pause - Pause before commencing DMA164* @ap: Port to pause for.165*166* Perform I/O fencing and ensure sufficient cycle delays occur167* for the HDMA1:0 transition168*/169170void ata_sff_dma_pause(struct ata_port *ap)171{172/*173* An altstatus read will cause the needed delay without174* messing up the IRQ status175*/176if (ata_sff_altstatus(ap, NULL))177return;178/* There are no DMA controllers without ctl. BUG here to ensure179we never violate the HDMA1:0 transition timing and risk180corruption. */181BUG();182}183EXPORT_SYMBOL_GPL(ata_sff_dma_pause);184185static int ata_sff_check_ready(struct ata_link *link)186{187u8 status = link->ap->ops->sff_check_status(link->ap);188189return ata_check_ready(status);190}191192/**193* ata_sff_wait_ready - sleep until BSY clears, or timeout194* @link: SFF link to wait ready status for195* @deadline: deadline jiffies for the operation196*197* Sleep until ATA Status register bit BSY clears, or timeout198* occurs.199*200* LOCKING:201* Kernel thread context (may sleep).202*203* RETURNS:204* 0 on success, -errno otherwise.205*/206int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)207{208return ata_wait_ready(link, deadline, ata_sff_check_ready);209}210EXPORT_SYMBOL_GPL(ata_sff_wait_ready);211212/**213* ata_sff_set_devctl - Write device control reg214* @ap: port where the device is215* @ctl: value to write216*217* Writes ATA device control register.218*219* RETURN:220* true if the register exists, false if not.221*222* LOCKING:223* Inherited from caller.224*/225static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)226{227if (ap->ops->sff_set_devctl) {228ap->ops->sff_set_devctl(ap, ctl);229return true;230}231if (ap->ioaddr.ctl_addr) {232iowrite8(ctl, ap->ioaddr.ctl_addr);233return true;234}235236return false;237}238239/**240* ata_sff_dev_select - Select device 0/1 on ATA bus241* @ap: ATA channel to manipulate242* @device: ATA device (numbered from zero) to select243*244* Use the method defined in the ATA specification to245* make either device 0, or device 1, active on the246* ATA channel. Works with both PIO and MMIO.247*248* May be used as the dev_select() entry in ata_port_operations.249*250* LOCKING:251* caller.252*/253void ata_sff_dev_select(struct ata_port *ap, unsigned int device)254{255u8 tmp;256257if (device == 0)258tmp = ATA_DEVICE_OBS;259else260tmp = ATA_DEVICE_OBS | ATA_DEV1;261262iowrite8(tmp, ap->ioaddr.device_addr);263ata_sff_pause(ap); /* needed; also flushes, for mmio */264}265EXPORT_SYMBOL_GPL(ata_sff_dev_select);266267/**268* ata_dev_select - Select device 0/1 on ATA bus269* @ap: ATA channel to manipulate270* @device: ATA device (numbered from zero) to select271* @wait: non-zero to wait for Status register BSY bit to clear272* @can_sleep: non-zero if context allows sleeping273*274* Use the method defined in the ATA specification to275* make either device 0, or device 1, active on the276* ATA channel.277*278* This is a high-level version of ata_sff_dev_select(), which279* additionally provides the services of inserting the proper280* pauses and status polling, where needed.281*282* LOCKING:283* caller.284*/285static void ata_dev_select(struct ata_port *ap, unsigned int device,286unsigned int wait, unsigned int can_sleep)287{288if (wait)289ata_wait_idle(ap);290291ap->ops->sff_dev_select(ap, device);292293if (wait) {294if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)295ata_msleep(ap, 150);296ata_wait_idle(ap);297}298}299300/**301* ata_sff_irq_on - Enable interrupts on a port.302* @ap: Port on which interrupts are enabled.303*304* Enable interrupts on a legacy IDE device using MMIO or PIO,305* wait for idle, clear any pending interrupts.306*307* Note: may NOT be used as the sff_irq_on() entry in308* ata_port_operations.309*310* LOCKING:311* Inherited from caller.312*/313void ata_sff_irq_on(struct ata_port *ap)314{315if (ap->ops->sff_irq_on) {316ap->ops->sff_irq_on(ap);317return;318}319320ap->ctl &= ~ATA_NIEN;321ap->last_ctl = ap->ctl;322323ata_sff_set_devctl(ap, ap->ctl);324ata_wait_idle(ap);325326if (ap->ops->sff_irq_clear)327ap->ops->sff_irq_clear(ap);328}329EXPORT_SYMBOL_GPL(ata_sff_irq_on);330331/**332* ata_sff_tf_load - send taskfile registers to host controller333* @ap: Port to which output is sent334* @tf: ATA taskfile register set335*336* Outputs ATA taskfile to standard ATA host controller.337*338* LOCKING:339* Inherited from caller.340*/341void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)342{343struct ata_ioports *ioaddr = &ap->ioaddr;344unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;345346if (tf->ctl != ap->last_ctl) {347if (ioaddr->ctl_addr)348iowrite8(tf->ctl, ioaddr->ctl_addr);349ap->last_ctl = tf->ctl;350ata_wait_idle(ap);351}352353if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {354WARN_ON_ONCE(!ioaddr->ctl_addr);355iowrite8(tf->hob_feature, ioaddr->feature_addr);356iowrite8(tf->hob_nsect, ioaddr->nsect_addr);357iowrite8(tf->hob_lbal, ioaddr->lbal_addr);358iowrite8(tf->hob_lbam, ioaddr->lbam_addr);359iowrite8(tf->hob_lbah, ioaddr->lbah_addr);360}361362if (is_addr) {363iowrite8(tf->feature, ioaddr->feature_addr);364iowrite8(tf->nsect, ioaddr->nsect_addr);365iowrite8(tf->lbal, ioaddr->lbal_addr);366iowrite8(tf->lbam, ioaddr->lbam_addr);367iowrite8(tf->lbah, ioaddr->lbah_addr);368}369370if (tf->flags & ATA_TFLAG_DEVICE)371iowrite8(tf->device, ioaddr->device_addr);372373ata_wait_idle(ap);374}375EXPORT_SYMBOL_GPL(ata_sff_tf_load);376377/**378* ata_sff_tf_read - input device's ATA taskfile shadow registers379* @ap: Port from which input is read380* @tf: ATA taskfile register set for storing input381*382* Reads ATA taskfile registers for currently-selected device383* into @tf. Assumes the device has a fully SFF compliant task file384* layout and behaviour. If you device does not (eg has a different385* status method) then you will need to provide a replacement tf_read386*387* LOCKING:388* Inherited from caller.389*/390void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)391{392struct ata_ioports *ioaddr = &ap->ioaddr;393394tf->status = ata_sff_check_status(ap);395tf->error = ioread8(ioaddr->error_addr);396tf->nsect = ioread8(ioaddr->nsect_addr);397tf->lbal = ioread8(ioaddr->lbal_addr);398tf->lbam = ioread8(ioaddr->lbam_addr);399tf->lbah = ioread8(ioaddr->lbah_addr);400tf->device = ioread8(ioaddr->device_addr);401402if (tf->flags & ATA_TFLAG_LBA48) {403if (likely(ioaddr->ctl_addr)) {404iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);405tf->hob_feature = ioread8(ioaddr->error_addr);406tf->hob_nsect = ioread8(ioaddr->nsect_addr);407tf->hob_lbal = ioread8(ioaddr->lbal_addr);408tf->hob_lbam = ioread8(ioaddr->lbam_addr);409tf->hob_lbah = ioread8(ioaddr->lbah_addr);410iowrite8(tf->ctl, ioaddr->ctl_addr);411ap->last_ctl = tf->ctl;412} else413WARN_ON_ONCE(1);414}415}416EXPORT_SYMBOL_GPL(ata_sff_tf_read);417418/**419* ata_sff_exec_command - issue ATA command to host controller420* @ap: port to which command is being issued421* @tf: ATA taskfile register set422*423* Issues ATA command, with proper synchronization with interrupt424* handler / other threads.425*426* LOCKING:427* spin_lock_irqsave(host lock)428*/429void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)430{431iowrite8(tf->command, ap->ioaddr.command_addr);432ata_sff_pause(ap);433}434EXPORT_SYMBOL_GPL(ata_sff_exec_command);435436/**437* ata_tf_to_host - issue ATA taskfile to host controller438* @ap: port to which command is being issued439* @tf: ATA taskfile register set440* @tag: tag of the associated command441*442* Issues ATA taskfile register set to ATA host controller,443* with proper synchronization with interrupt handler and444* other threads.445*446* LOCKING:447* spin_lock_irqsave(host lock)448*/449static inline void ata_tf_to_host(struct ata_port *ap,450const struct ata_taskfile *tf,451unsigned int tag)452{453trace_ata_tf_load(ap, tf);454ap->ops->sff_tf_load(ap, tf);455trace_ata_exec_command(ap, tf, tag);456ap->ops->sff_exec_command(ap, tf);457}458459/**460* ata_sff_data_xfer - Transfer data by PIO461* @qc: queued command462* @buf: data buffer463* @buflen: buffer length464* @rw: read/write465*466* Transfer data from/to the device data register by PIO.467*468* LOCKING:469* Inherited from caller.470*471* RETURNS:472* Bytes consumed.473*/474unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,475unsigned int buflen, int rw)476{477struct ata_port *ap = qc->dev->link->ap;478void __iomem *data_addr = ap->ioaddr.data_addr;479unsigned int words = buflen >> 1;480481/* Transfer multiple of 2 bytes */482if (rw == READ)483ioread16_rep(data_addr, buf, words);484else485iowrite16_rep(data_addr, buf, words);486487/* Transfer trailing byte, if any. */488if (unlikely(buflen & 0x01)) {489unsigned char pad[2] = { };490491/* Point buf to the tail of buffer */492buf += buflen - 1;493494/*495* Use io*16_rep() accessors here as well to avoid pointlessly496* swapping bytes to and from on the big endian machines...497*/498if (rw == READ) {499ioread16_rep(data_addr, pad, 1);500*buf = pad[0];501} else {502pad[0] = *buf;503iowrite16_rep(data_addr, pad, 1);504}505words++;506}507508return words << 1;509}510EXPORT_SYMBOL_GPL(ata_sff_data_xfer);511512/**513* ata_sff_data_xfer32 - Transfer data by PIO514* @qc: queued command515* @buf: data buffer516* @buflen: buffer length517* @rw: read/write518*519* Transfer data from/to the device data register by PIO using 32bit520* I/O operations.521*522* LOCKING:523* Inherited from caller.524*525* RETURNS:526* Bytes consumed.527*/528529unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,530unsigned int buflen, int rw)531{532struct ata_device *dev = qc->dev;533struct ata_port *ap = dev->link->ap;534void __iomem *data_addr = ap->ioaddr.data_addr;535unsigned int words = buflen >> 2;536int slop = buflen & 3;537538if (!(ap->pflags & ATA_PFLAG_PIO32))539return ata_sff_data_xfer(qc, buf, buflen, rw);540541/* Transfer multiple of 4 bytes */542if (rw == READ)543ioread32_rep(data_addr, buf, words);544else545iowrite32_rep(data_addr, buf, words);546547/* Transfer trailing bytes, if any */548if (unlikely(slop)) {549unsigned char pad[4] = { };550551/* Point buf to the tail of buffer */552buf += buflen - slop;553554/*555* Use io*_rep() accessors here as well to avoid pointlessly556* swapping bytes to and from on the big endian machines...557*/558if (rw == READ) {559if (slop < 3)560ioread16_rep(data_addr, pad, 1);561else562ioread32_rep(data_addr, pad, 1);563memcpy(buf, pad, slop);564} else {565memcpy(pad, buf, slop);566if (slop < 3)567iowrite16_rep(data_addr, pad, 1);568else569iowrite32_rep(data_addr, pad, 1);570}571}572return (buflen + 1) & ~1;573}574EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);575576static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,577unsigned int offset, size_t xfer_size)578{579bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);580unsigned char *buf;581582buf = kmap_atomic(page);583qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);584kunmap_atomic(buf);585586if (!do_write && !PageSlab(page))587flush_dcache_page(page);588}589590/**591* ata_pio_sector - Transfer a sector of data.592* @qc: Command on going593*594* Transfer qc->sect_size bytes of data from/to the ATA device.595*596* LOCKING:597* Inherited from caller.598*/599static void ata_pio_sector(struct ata_queued_cmd *qc)600{601struct ata_port *ap = qc->ap;602struct page *page;603unsigned int offset, count;604605if (!qc->cursg) {606qc->curbytes = qc->nbytes;607return;608}609if (qc->curbytes == qc->nbytes - qc->sect_size)610ap->hsm_task_state = HSM_ST_LAST;611612page = sg_page(qc->cursg);613offset = qc->cursg->offset + qc->cursg_ofs;614615/* get the current page and offset */616page = nth_page(page, (offset >> PAGE_SHIFT));617offset %= PAGE_SIZE;618619/* don't overrun current sg */620count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);621622trace_ata_sff_pio_transfer_data(qc, offset, count);623624/*625* Split the transfer when it splits a page boundary. Note that the626* split still has to be dword aligned like all ATA data transfers.627*/628WARN_ON_ONCE(offset % 4);629if (offset + count > PAGE_SIZE) {630unsigned int split_len = PAGE_SIZE - offset;631632ata_pio_xfer(qc, page, offset, split_len);633ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);634} else {635ata_pio_xfer(qc, page, offset, count);636}637638qc->curbytes += count;639qc->cursg_ofs += count;640641if (qc->cursg_ofs == qc->cursg->length) {642qc->cursg = sg_next(qc->cursg);643if (!qc->cursg)644ap->hsm_task_state = HSM_ST_LAST;645qc->cursg_ofs = 0;646}647}648649/**650* ata_pio_sectors - Transfer one or many sectors.651* @qc: Command on going652*653* Transfer one or many sectors of data from/to the654* ATA device for the DRQ request.655*656* LOCKING:657* Inherited from caller.658*/659static void ata_pio_sectors(struct ata_queued_cmd *qc)660{661if (is_multi_taskfile(&qc->tf)) {662/* READ/WRITE MULTIPLE */663unsigned int nsect;664665WARN_ON_ONCE(qc->dev->multi_count == 0);666667nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,668qc->dev->multi_count);669while (nsect--)670ata_pio_sector(qc);671} else672ata_pio_sector(qc);673674ata_sff_sync(qc->ap); /* flush */675}676677/**678* atapi_send_cdb - Write CDB bytes to hardware679* @ap: Port to which ATAPI device is attached.680* @qc: Taskfile currently active681*682* When device has indicated its readiness to accept683* a CDB, this function is called. Send the CDB.684*685* LOCKING:686* caller.687*/688static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)689{690/* send SCSI cdb */691trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);692WARN_ON_ONCE(qc->dev->cdb_len < 12);693694ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);695ata_sff_sync(ap);696/* FIXME: If the CDB is for DMA do we need to do the transition delay697or is bmdma_start guaranteed to do it ? */698switch (qc->tf.protocol) {699case ATAPI_PROT_PIO:700ap->hsm_task_state = HSM_ST;701break;702case ATAPI_PROT_NODATA:703ap->hsm_task_state = HSM_ST_LAST;704break;705#ifdef CONFIG_ATA_BMDMA706case ATAPI_PROT_DMA:707ap->hsm_task_state = HSM_ST_LAST;708/* initiate bmdma */709trace_ata_bmdma_start(ap, &qc->tf, qc->tag);710ap->ops->bmdma_start(qc);711break;712#endif /* CONFIG_ATA_BMDMA */713default:714BUG();715}716}717718/**719* __atapi_pio_bytes - Transfer data from/to the ATAPI device.720* @qc: Command on going721* @bytes: number of bytes722*723* Transfer data from/to the ATAPI device.724*725* LOCKING:726* Inherited from caller.727*728*/729static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)730{731int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;732struct ata_port *ap = qc->ap;733struct ata_device *dev = qc->dev;734struct ata_eh_info *ehi = &dev->link->eh_info;735struct scatterlist *sg;736struct page *page;737unsigned char *buf;738unsigned int offset, count, consumed;739740next_sg:741sg = qc->cursg;742if (unlikely(!sg)) {743ata_ehi_push_desc(ehi, "unexpected or too much trailing data "744"buf=%u cur=%u bytes=%u",745qc->nbytes, qc->curbytes, bytes);746return -1;747}748749page = sg_page(sg);750offset = sg->offset + qc->cursg_ofs;751752/* get the current page and offset */753page = nth_page(page, (offset >> PAGE_SHIFT));754offset %= PAGE_SIZE;755756/* don't overrun current sg */757count = min(sg->length - qc->cursg_ofs, bytes);758759/* don't cross page boundaries */760count = min(count, (unsigned int)PAGE_SIZE - offset);761762trace_atapi_pio_transfer_data(qc, offset, count);763764/* do the actual data transfer */765buf = kmap_atomic(page);766consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);767kunmap_atomic(buf);768769bytes -= min(bytes, consumed);770qc->curbytes += count;771qc->cursg_ofs += count;772773if (qc->cursg_ofs == sg->length) {774qc->cursg = sg_next(qc->cursg);775qc->cursg_ofs = 0;776}777778/*779* There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);780* Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN781* check correctly as it doesn't know if it is the last request being782* made. Somebody should implement a proper sanity check.783*/784if (bytes)785goto next_sg;786return 0;787}788789/**790* atapi_pio_bytes - Transfer data from/to the ATAPI device.791* @qc: Command on going792*793* Transfer Transfer data from/to the ATAPI device.794*795* LOCKING:796* Inherited from caller.797*/798static void atapi_pio_bytes(struct ata_queued_cmd *qc)799{800struct ata_port *ap = qc->ap;801struct ata_device *dev = qc->dev;802struct ata_eh_info *ehi = &dev->link->eh_info;803unsigned int ireason, bc_lo, bc_hi, bytes;804int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;805806/* Abuse qc->result_tf for temp storage of intermediate TF807* here to save some kernel stack usage.808* For normal completion, qc->result_tf is not relevant. For809* error, qc->result_tf is later overwritten by ata_qc_complete().810* So, the correctness of qc->result_tf is not affected.811*/812ap->ops->sff_tf_read(ap, &qc->result_tf);813ireason = qc->result_tf.nsect;814bc_lo = qc->result_tf.lbam;815bc_hi = qc->result_tf.lbah;816bytes = (bc_hi << 8) | bc_lo;817818/* shall be cleared to zero, indicating xfer of data */819if (unlikely(ireason & ATAPI_COD))820goto atapi_check;821822/* make sure transfer direction matches expected */823i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;824if (unlikely(do_write != i_write))825goto atapi_check;826827if (unlikely(!bytes))828goto atapi_check;829830if (unlikely(__atapi_pio_bytes(qc, bytes)))831goto err_out;832ata_sff_sync(ap); /* flush */833834return;835836atapi_check:837ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",838ireason, bytes);839err_out:840qc->err_mask |= AC_ERR_HSM;841ap->hsm_task_state = HSM_ST_ERR;842}843844/**845* ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.846* @ap: the target ata_port847* @qc: qc on going848*849* RETURNS:850* 1 if ok in workqueue, 0 otherwise.851*/852static inline int ata_hsm_ok_in_wq(struct ata_port *ap,853struct ata_queued_cmd *qc)854{855if (qc->tf.flags & ATA_TFLAG_POLLING)856return 1;857858if (ap->hsm_task_state == HSM_ST_FIRST) {859if (qc->tf.protocol == ATA_PROT_PIO &&860(qc->tf.flags & ATA_TFLAG_WRITE))861return 1;862863if (ata_is_atapi(qc->tf.protocol) &&864!(qc->dev->flags & ATA_DFLAG_CDB_INTR))865return 1;866}867868return 0;869}870871/**872* ata_hsm_qc_complete - finish a qc running on standard HSM873* @qc: Command to complete874* @in_wq: 1 if called from workqueue, 0 otherwise875*876* Finish @qc which is running on standard HSM.877*878* LOCKING:879* If @in_wq is zero, spin_lock_irqsave(host lock).880* Otherwise, none on entry and grabs host lock.881*/882static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)883{884struct ata_port *ap = qc->ap;885886if (in_wq) {887/* EH might have kicked in while host lock is released. */888qc = ata_qc_from_tag(ap, qc->tag);889if (qc) {890if (likely(!(qc->err_mask & AC_ERR_HSM))) {891ata_sff_irq_on(ap);892ata_qc_complete(qc);893} else894ata_port_freeze(ap);895}896} else {897if (likely(!(qc->err_mask & AC_ERR_HSM)))898ata_qc_complete(qc);899else900ata_port_freeze(ap);901}902}903904/**905* ata_sff_hsm_move - move the HSM to the next state.906* @ap: the target ata_port907* @qc: qc on going908* @status: current device status909* @in_wq: 1 if called from workqueue, 0 otherwise910*911* RETURNS:912* 1 when poll next status needed, 0 otherwise.913*/914int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,915u8 status, int in_wq)916{917struct ata_link *link = qc->dev->link;918struct ata_eh_info *ehi = &link->eh_info;919int poll_next;920921lockdep_assert_held(ap->lock);922923WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);924925/* Make sure ata_sff_qc_issue() does not throw things926* like DMA polling into the workqueue. Notice that927* in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).928*/929WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));930931fsm_start:932trace_ata_sff_hsm_state(qc, status);933934switch (ap->hsm_task_state) {935case HSM_ST_FIRST:936/* Send first data block or PACKET CDB */937938/* If polling, we will stay in the work queue after939* sending the data. Otherwise, interrupt handler940* takes over after sending the data.941*/942poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);943944/* check device status */945if (unlikely((status & ATA_DRQ) == 0)) {946/* handle BSY=0, DRQ=0 as error */947if (likely(status & (ATA_ERR | ATA_DF)))948/* device stops HSM for abort/error */949qc->err_mask |= AC_ERR_DEV;950else {951/* HSM violation. Let EH handle this */952ata_ehi_push_desc(ehi,953"ST_FIRST: !(DRQ|ERR|DF)");954qc->err_mask |= AC_ERR_HSM;955}956957ap->hsm_task_state = HSM_ST_ERR;958goto fsm_start;959}960961/* Device should not ask for data transfer (DRQ=1)962* when it finds something wrong.963* We ignore DRQ here and stop the HSM by964* changing hsm_task_state to HSM_ST_ERR and965* let the EH abort the command or reset the device.966*/967if (unlikely(status & (ATA_ERR | ATA_DF))) {968/* Some ATAPI tape drives forget to clear the ERR bit969* when doing the next command (mostly request sense).970* We ignore ERR here to workaround and proceed sending971* the CDB.972*/973if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {974ata_ehi_push_desc(ehi, "ST_FIRST: "975"DRQ=1 with device error, "976"dev_stat 0x%X", status);977qc->err_mask |= AC_ERR_HSM;978ap->hsm_task_state = HSM_ST_ERR;979goto fsm_start;980}981}982983if (qc->tf.protocol == ATA_PROT_PIO) {984/* PIO data out protocol.985* send first data block.986*/987988/* ata_pio_sectors() might change the state989* to HSM_ST_LAST. so, the state is changed here990* before ata_pio_sectors().991*/992ap->hsm_task_state = HSM_ST;993ata_pio_sectors(qc);994} else995/* send CDB */996atapi_send_cdb(ap, qc);997998/* if polling, ata_sff_pio_task() handles the rest.999* otherwise, interrupt handler takes over from here.1000*/1001break;10021003case HSM_ST:1004/* complete command or read/write the data register */1005if (qc->tf.protocol == ATAPI_PROT_PIO) {1006/* ATAPI PIO protocol */1007if ((status & ATA_DRQ) == 0) {1008/* No more data to transfer or device error.1009* Device error will be tagged in HSM_ST_LAST.1010*/1011ap->hsm_task_state = HSM_ST_LAST;1012goto fsm_start;1013}10141015/* Device should not ask for data transfer (DRQ=1)1016* when it finds something wrong.1017* We ignore DRQ here and stop the HSM by1018* changing hsm_task_state to HSM_ST_ERR and1019* let the EH abort the command or reset the device.1020*/1021if (unlikely(status & (ATA_ERR | ATA_DF))) {1022ata_ehi_push_desc(ehi, "ST-ATAPI: "1023"DRQ=1 with device error, "1024"dev_stat 0x%X", status);1025qc->err_mask |= AC_ERR_HSM;1026ap->hsm_task_state = HSM_ST_ERR;1027goto fsm_start;1028}10291030atapi_pio_bytes(qc);10311032if (unlikely(ap->hsm_task_state == HSM_ST_ERR))1033/* bad ireason reported by device */1034goto fsm_start;10351036} else {1037/* ATA PIO protocol */1038if (unlikely((status & ATA_DRQ) == 0)) {1039/* handle BSY=0, DRQ=0 as error */1040if (likely(status & (ATA_ERR | ATA_DF))) {1041/* device stops HSM for abort/error */1042qc->err_mask |= AC_ERR_DEV;10431044/* If diagnostic failed and this is1045* IDENTIFY, it's likely a phantom1046* device. Mark hint.1047*/1048if (qc->dev->quirks &1049ATA_QUIRK_DIAGNOSTIC)1050qc->err_mask |=1051AC_ERR_NODEV_HINT;1052} else {1053/* HSM violation. Let EH handle this.1054* Phantom devices also trigger this1055* condition. Mark hint.1056*/1057ata_ehi_push_desc(ehi, "ST-ATA: "1058"DRQ=0 without device error, "1059"dev_stat 0x%X", status);1060qc->err_mask |= AC_ERR_HSM |1061AC_ERR_NODEV_HINT;1062}10631064ap->hsm_task_state = HSM_ST_ERR;1065goto fsm_start;1066}10671068/* For PIO reads, some devices may ask for1069* data transfer (DRQ=1) alone with ERR=1.1070* We respect DRQ here and transfer one1071* block of junk data before changing the1072* hsm_task_state to HSM_ST_ERR.1073*1074* For PIO writes, ERR=1 DRQ=1 doesn't make1075* sense since the data block has been1076* transferred to the device.1077*/1078if (unlikely(status & (ATA_ERR | ATA_DF))) {1079/* data might be corrputed */1080qc->err_mask |= AC_ERR_DEV;10811082if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {1083ata_pio_sectors(qc);1084status = ata_wait_idle(ap);1085}10861087if (status & (ATA_BUSY | ATA_DRQ)) {1088ata_ehi_push_desc(ehi, "ST-ATA: "1089"BUSY|DRQ persists on ERR|DF, "1090"dev_stat 0x%X", status);1091qc->err_mask |= AC_ERR_HSM;1092}10931094/* There are oddball controllers with1095* status register stuck at 0x7f and1096* lbal/m/h at zero which makes it1097* pass all other presence detection1098* mechanisms we have. Set NODEV_HINT1099* for it. Kernel bz#7241.1100*/1101if (status == 0x7f)1102qc->err_mask |= AC_ERR_NODEV_HINT;11031104/* ata_pio_sectors() might change the1105* state to HSM_ST_LAST. so, the state1106* is changed after ata_pio_sectors().1107*/1108ap->hsm_task_state = HSM_ST_ERR;1109goto fsm_start;1110}11111112ata_pio_sectors(qc);11131114if (ap->hsm_task_state == HSM_ST_LAST &&1115(!(qc->tf.flags & ATA_TFLAG_WRITE))) {1116/* all data read */1117status = ata_wait_idle(ap);1118goto fsm_start;1119}1120}11211122poll_next = 1;1123break;11241125case HSM_ST_LAST:1126if (unlikely(!ata_ok(status))) {1127qc->err_mask |= __ac_err_mask(status);1128ap->hsm_task_state = HSM_ST_ERR;1129goto fsm_start;1130}11311132/* no more data to transfer */1133trace_ata_sff_hsm_command_complete(qc, status);11341135WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));11361137ap->hsm_task_state = HSM_ST_IDLE;11381139/* complete taskfile transaction */1140ata_hsm_qc_complete(qc, in_wq);11411142poll_next = 0;1143break;11441145case HSM_ST_ERR:1146ap->hsm_task_state = HSM_ST_IDLE;11471148/* complete taskfile transaction */1149ata_hsm_qc_complete(qc, in_wq);11501151poll_next = 0;1152break;1153default:1154poll_next = 0;1155WARN(true, "ata%d: SFF host state machine in invalid state %d",1156ap->print_id, ap->hsm_task_state);1157}11581159return poll_next;1160}1161EXPORT_SYMBOL_GPL(ata_sff_hsm_move);11621163void ata_sff_queue_work(struct work_struct *work)1164{1165queue_work(ata_sff_wq, work);1166}1167EXPORT_SYMBOL_GPL(ata_sff_queue_work);11681169void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)1170{1171queue_delayed_work(ata_sff_wq, dwork, delay);1172}1173EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);11741175void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)1176{1177struct ata_port *ap = link->ap;11781179WARN_ON((ap->sff_pio_task_link != NULL) &&1180(ap->sff_pio_task_link != link));1181ap->sff_pio_task_link = link;11821183/* may fail if ata_sff_flush_pio_task() in progress */1184ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));1185}1186EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);11871188void ata_sff_flush_pio_task(struct ata_port *ap)1189{1190trace_ata_sff_flush_pio_task(ap);11911192cancel_delayed_work_sync(&ap->sff_pio_task);11931194/*1195* We wanna reset the HSM state to IDLE. If we do so without1196* grabbing the port lock, critical sections protected by it which1197* expect the HSM state to stay stable may get surprised. For1198* example, we may set IDLE in between the time1199* __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls1200* ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().1201*/1202spin_lock_irq(ap->lock);1203ap->hsm_task_state = HSM_ST_IDLE;1204spin_unlock_irq(ap->lock);12051206ap->sff_pio_task_link = NULL;1207}12081209static void ata_sff_pio_task(struct work_struct *work)1210{1211struct ata_port *ap =1212container_of(work, struct ata_port, sff_pio_task.work);1213struct ata_link *link = ap->sff_pio_task_link;1214struct ata_queued_cmd *qc;1215u8 status;1216int poll_next;12171218spin_lock_irq(ap->lock);12191220BUG_ON(ap->sff_pio_task_link == NULL);1221/* qc can be NULL if timeout occurred */1222qc = ata_qc_from_tag(ap, link->active_tag);1223if (!qc) {1224ap->sff_pio_task_link = NULL;1225goto out_unlock;1226}12271228fsm_start:1229WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);12301231/*1232* This is purely heuristic. This is a fast path.1233* Sometimes when we enter, BSY will be cleared in1234* a chk-status or two. If not, the drive is probably seeking1235* or something. Snooze for a couple msecs, then1236* chk-status again. If still busy, queue delayed work.1237*/1238status = ata_sff_busy_wait(ap, ATA_BUSY, 5);1239if (status & ATA_BUSY) {1240spin_unlock_irq(ap->lock);1241ata_msleep(ap, 2);1242spin_lock_irq(ap->lock);12431244status = ata_sff_busy_wait(ap, ATA_BUSY, 10);1245if (status & ATA_BUSY) {1246ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);1247goto out_unlock;1248}1249}12501251/*1252* hsm_move() may trigger another command to be processed.1253* clean the link beforehand.1254*/1255ap->sff_pio_task_link = NULL;1256/* move the HSM */1257poll_next = ata_sff_hsm_move(ap, qc, status, 1);12581259/* another command or interrupt handler1260* may be running at this point.1261*/1262if (poll_next)1263goto fsm_start;1264out_unlock:1265spin_unlock_irq(ap->lock);1266}12671268/**1269* ata_sff_qc_issue - issue taskfile to a SFF controller1270* @qc: command to issue to device1271*1272* This function issues a PIO or NODATA command to a SFF1273* controller.1274*1275* LOCKING:1276* spin_lock_irqsave(host lock)1277*1278* RETURNS:1279* Zero on success, AC_ERR_* mask on failure1280*/1281unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)1282{1283struct ata_port *ap = qc->ap;1284struct ata_link *link = qc->dev->link;12851286/* Use polling pio if the LLD doesn't handle1287* interrupt driven pio and atapi CDB interrupt.1288*/1289if (ap->flags & ATA_FLAG_PIO_POLLING)1290qc->tf.flags |= ATA_TFLAG_POLLING;12911292/* select the device */1293ata_dev_select(ap, qc->dev->devno, 1, 0);12941295/* start the command */1296switch (qc->tf.protocol) {1297case ATA_PROT_NODATA:1298if (qc->tf.flags & ATA_TFLAG_POLLING)1299ata_qc_set_polling(qc);13001301ata_tf_to_host(ap, &qc->tf, qc->tag);1302ap->hsm_task_state = HSM_ST_LAST;13031304if (qc->tf.flags & ATA_TFLAG_POLLING)1305ata_sff_queue_pio_task(link, 0);13061307break;13081309case ATA_PROT_PIO:1310if (qc->tf.flags & ATA_TFLAG_POLLING)1311ata_qc_set_polling(qc);13121313ata_tf_to_host(ap, &qc->tf, qc->tag);13141315if (qc->tf.flags & ATA_TFLAG_WRITE) {1316/* PIO data out protocol */1317ap->hsm_task_state = HSM_ST_FIRST;1318ata_sff_queue_pio_task(link, 0);13191320/* always send first data block using the1321* ata_sff_pio_task() codepath.1322*/1323} else {1324/* PIO data in protocol */1325ap->hsm_task_state = HSM_ST;13261327if (qc->tf.flags & ATA_TFLAG_POLLING)1328ata_sff_queue_pio_task(link, 0);13291330/* if polling, ata_sff_pio_task() handles the1331* rest. otherwise, interrupt handler takes1332* over from here.1333*/1334}13351336break;13371338case ATAPI_PROT_PIO:1339case ATAPI_PROT_NODATA:1340if (qc->tf.flags & ATA_TFLAG_POLLING)1341ata_qc_set_polling(qc);13421343ata_tf_to_host(ap, &qc->tf, qc->tag);13441345ap->hsm_task_state = HSM_ST_FIRST;13461347/* send cdb by polling if no cdb interrupt */1348if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||1349(qc->tf.flags & ATA_TFLAG_POLLING))1350ata_sff_queue_pio_task(link, 0);1351break;13521353default:1354return AC_ERR_SYSTEM;1355}13561357return 0;1358}1359EXPORT_SYMBOL_GPL(ata_sff_qc_issue);13601361/**1362* ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read1363* @qc: qc to fill result TF for1364*1365* @qc is finished and result TF needs to be filled. Fill it1366* using ->sff_tf_read.1367*1368* LOCKING:1369* spin_lock_irqsave(host lock)1370*/1371void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)1372{1373qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);1374}1375EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);13761377static unsigned int ata_sff_idle_irq(struct ata_port *ap)1378{1379ap->stats.idle_irq++;13801381#ifdef ATA_IRQ_TRAP1382if ((ap->stats.idle_irq % 1000) == 0) {1383ap->ops->sff_check_status(ap);1384if (ap->ops->sff_irq_clear)1385ap->ops->sff_irq_clear(ap);1386ata_port_warn(ap, "irq trap\n");1387return 1;1388}1389#endif1390return 0; /* irq not handled */1391}13921393static unsigned int __ata_sff_port_intr(struct ata_port *ap,1394struct ata_queued_cmd *qc,1395bool hsmv_on_idle)1396{1397u8 status;13981399trace_ata_sff_port_intr(qc, hsmv_on_idle);14001401/* Check whether we are expecting interrupt in this state */1402switch (ap->hsm_task_state) {1403case HSM_ST_FIRST:1404/* Some pre-ATAPI-4 devices assert INTRQ1405* at this state when ready to receive CDB.1406*/14071408/* Check the ATA_DFLAG_CDB_INTR flag is enough here.1409* The flag was turned on only for atapi devices. No1410* need to check ata_is_atapi(qc->tf.protocol) again.1411*/1412if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))1413return ata_sff_idle_irq(ap);1414break;1415case HSM_ST_IDLE:1416return ata_sff_idle_irq(ap);1417default:1418break;1419}14201421/* check main status, clearing INTRQ if needed */1422status = ata_sff_irq_status(ap);1423if (status & ATA_BUSY) {1424if (hsmv_on_idle) {1425/* BMDMA engine is already stopped, we're screwed */1426qc->err_mask |= AC_ERR_HSM;1427ap->hsm_task_state = HSM_ST_ERR;1428} else1429return ata_sff_idle_irq(ap);1430}14311432/* clear irq events */1433if (ap->ops->sff_irq_clear)1434ap->ops->sff_irq_clear(ap);14351436ata_sff_hsm_move(ap, qc, status, 0);14371438return 1; /* irq handled */1439}14401441/**1442* ata_sff_port_intr - Handle SFF port interrupt1443* @ap: Port on which interrupt arrived (possibly...)1444* @qc: Taskfile currently active in engine1445*1446* Handle port interrupt for given queued command.1447*1448* LOCKING:1449* spin_lock_irqsave(host lock)1450*1451* RETURNS:1452* One if interrupt was handled, zero if not (shared irq).1453*/1454unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)1455{1456return __ata_sff_port_intr(ap, qc, false);1457}1458EXPORT_SYMBOL_GPL(ata_sff_port_intr);14591460static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,1461unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))1462{1463struct ata_host *host = dev_instance;1464bool retried = false;1465unsigned int i;1466unsigned int handled, idle, polling;1467unsigned long flags;14681469/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */1470spin_lock_irqsave(&host->lock, flags);14711472retry:1473handled = idle = polling = 0;1474for (i = 0; i < host->n_ports; i++) {1475struct ata_port *ap = host->ports[i];1476struct ata_queued_cmd *qc;14771478qc = ata_qc_from_tag(ap, ap->link.active_tag);1479if (qc) {1480if (!(qc->tf.flags & ATA_TFLAG_POLLING))1481handled |= port_intr(ap, qc);1482else1483polling |= 1 << i;1484} else1485idle |= 1 << i;1486}14871488/*1489* If no port was expecting IRQ but the controller is actually1490* asserting IRQ line, nobody cared will ensue. Check IRQ1491* pending status if available and clear spurious IRQ.1492*/1493if (!handled && !retried) {1494bool retry = false;14951496for (i = 0; i < host->n_ports; i++) {1497struct ata_port *ap = host->ports[i];14981499if (polling & (1 << i))1500continue;15011502if (!ap->ops->sff_irq_check ||1503!ap->ops->sff_irq_check(ap))1504continue;15051506if (idle & (1 << i)) {1507ap->ops->sff_check_status(ap);1508if (ap->ops->sff_irq_clear)1509ap->ops->sff_irq_clear(ap);1510} else {1511/* clear INTRQ and check if BUSY cleared */1512if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))1513retry |= true;1514/*1515* With command in flight, we can't do1516* sff_irq_clear() w/o racing with completion.1517*/1518}1519}15201521if (retry) {1522retried = true;1523goto retry;1524}1525}15261527spin_unlock_irqrestore(&host->lock, flags);15281529return IRQ_RETVAL(handled);1530}15311532/**1533* ata_sff_interrupt - Default SFF ATA host interrupt handler1534* @irq: irq line (unused)1535* @dev_instance: pointer to our ata_host information structure1536*1537* Default interrupt handler for PCI IDE devices. Calls1538* ata_sff_port_intr() for each port that is not disabled.1539*1540* LOCKING:1541* Obtains host lock during operation.1542*1543* RETURNS:1544* IRQ_NONE or IRQ_HANDLED.1545*/1546irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)1547{1548return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);1549}1550EXPORT_SYMBOL_GPL(ata_sff_interrupt);15511552/**1553* ata_sff_lost_interrupt - Check for an apparent lost interrupt1554* @ap: port that appears to have timed out1555*1556* Called from the libata error handlers when the core code suspects1557* an interrupt has been lost. If it has complete anything we can and1558* then return. Interface must support altstatus for this faster1559* recovery to occur.1560*1561* Locking:1562* Caller holds host lock1563*/15641565void ata_sff_lost_interrupt(struct ata_port *ap)1566{1567u8 status = 0;1568struct ata_queued_cmd *qc;15691570/* Only one outstanding command per SFF channel */1571qc = ata_qc_from_tag(ap, ap->link.active_tag);1572/* We cannot lose an interrupt on a non-existent or polled command */1573if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)1574return;1575/* See if the controller thinks it is still busy - if so the command1576isn't a lost IRQ but is still in progress */1577if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))1578return;1579if (status & ATA_BUSY)1580return;15811582/* There was a command running, we are no longer busy and we have1583no interrupt. */1584ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);1585/* Run the host interrupt logic as if the interrupt had not been1586lost */1587ata_sff_port_intr(ap, qc);1588}1589EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);15901591/**1592* ata_sff_freeze - Freeze SFF controller port1593* @ap: port to freeze1594*1595* Freeze SFF controller port.1596*1597* LOCKING:1598* Inherited from caller.1599*/1600void ata_sff_freeze(struct ata_port *ap)1601{1602ap->ctl |= ATA_NIEN;1603ap->last_ctl = ap->ctl;16041605ata_sff_set_devctl(ap, ap->ctl);16061607/* Under certain circumstances, some controllers raise IRQ on1608* ATA_NIEN manipulation. Also, many controllers fail to mask1609* previously pending IRQ on ATA_NIEN assertion. Clear it.1610*/1611ap->ops->sff_check_status(ap);16121613if (ap->ops->sff_irq_clear)1614ap->ops->sff_irq_clear(ap);1615}1616EXPORT_SYMBOL_GPL(ata_sff_freeze);16171618/**1619* ata_sff_thaw - Thaw SFF controller port1620* @ap: port to thaw1621*1622* Thaw SFF controller port.1623*1624* LOCKING:1625* Inherited from caller.1626*/1627void ata_sff_thaw(struct ata_port *ap)1628{1629/* clear & re-enable interrupts */1630ap->ops->sff_check_status(ap);1631if (ap->ops->sff_irq_clear)1632ap->ops->sff_irq_clear(ap);1633ata_sff_irq_on(ap);1634}1635EXPORT_SYMBOL_GPL(ata_sff_thaw);16361637/**1638* ata_sff_prereset - prepare SFF link for reset1639* @link: SFF link to be reset1640* @deadline: deadline jiffies for the operation1641*1642* SFF link @link is about to be reset. Initialize it. It first1643* calls ata_std_prereset() and wait for !BSY if the port is1644* being softreset.1645*1646* LOCKING:1647* Kernel thread context (may sleep)1648*1649* RETURNS:1650* Always 0.1651*/1652int ata_sff_prereset(struct ata_link *link, unsigned long deadline)1653{1654struct ata_eh_context *ehc = &link->eh_context;1655int rc;16561657/* The standard prereset is best-effort and always returns 0 */1658ata_std_prereset(link, deadline);16591660/* if we're about to do hardreset, nothing more to do */1661if (ehc->i.action & ATA_EH_HARDRESET)1662return 0;16631664/* wait for !BSY if we don't know that no device is attached */1665if (!ata_link_offline(link)) {1666rc = ata_sff_wait_ready(link, deadline);1667if (rc && rc != -ENODEV) {1668ata_link_warn(link,1669"device not ready (errno=%d), forcing hardreset\n",1670rc);1671ehc->i.action |= ATA_EH_HARDRESET;1672}1673}16741675return 0;1676}1677EXPORT_SYMBOL_GPL(ata_sff_prereset);16781679/**1680* ata_devchk - PATA device presence detection1681* @ap: ATA channel to examine1682* @device: Device to examine (starting at zero)1683*1684* This technique was originally described in1685* Hale Landis's ATADRVR (www.ata-atapi.com), and1686* later found its way into the ATA/ATAPI spec.1687*1688* Write a pattern to the ATA shadow registers,1689* and if a device is present, it will respond by1690* correctly storing and echoing back the1691* ATA shadow register contents.1692*1693* RETURN:1694* true if device is present, false if not.1695*1696* LOCKING:1697* caller.1698*/1699static bool ata_devchk(struct ata_port *ap, unsigned int device)1700{1701struct ata_ioports *ioaddr = &ap->ioaddr;1702u8 nsect, lbal;17031704ap->ops->sff_dev_select(ap, device);17051706iowrite8(0x55, ioaddr->nsect_addr);1707iowrite8(0xaa, ioaddr->lbal_addr);17081709iowrite8(0xaa, ioaddr->nsect_addr);1710iowrite8(0x55, ioaddr->lbal_addr);17111712iowrite8(0x55, ioaddr->nsect_addr);1713iowrite8(0xaa, ioaddr->lbal_addr);17141715nsect = ioread8(ioaddr->nsect_addr);1716lbal = ioread8(ioaddr->lbal_addr);17171718if ((nsect == 0x55) && (lbal == 0xaa))1719return true; /* we found a device */17201721return false; /* nothing found */1722}17231724/**1725* ata_sff_dev_classify - Parse returned ATA device signature1726* @dev: ATA device to classify (starting at zero)1727* @present: device seems present1728* @r_err: Value of error register on completion1729*1730* After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,1731* an ATA/ATAPI-defined set of values is placed in the ATA1732* shadow registers, indicating the results of device detection1733* and diagnostics.1734*1735* Select the ATA device, and read the values from the ATA shadow1736* registers. Then parse according to the Error register value,1737* and the spec-defined values examined by ata_dev_classify().1738*1739* LOCKING:1740* caller.1741*1742* RETURNS:1743* Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.1744*/1745unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,1746u8 *r_err)1747{1748struct ata_port *ap = dev->link->ap;1749struct ata_taskfile tf;1750unsigned int class;1751u8 err;17521753ap->ops->sff_dev_select(ap, dev->devno);17541755memset(&tf, 0, sizeof(tf));17561757ap->ops->sff_tf_read(ap, &tf);1758err = tf.error;1759if (r_err)1760*r_err = err;17611762/* see if device passed diags: continue and warn later */1763if (err == 0)1764/* diagnostic fail : do nothing _YET_ */1765dev->quirks |= ATA_QUIRK_DIAGNOSTIC;1766else if (err == 1)1767/* do nothing */ ;1768else if ((dev->devno == 0) && (err == 0x81))1769/* do nothing */ ;1770else1771return ATA_DEV_NONE;17721773/* determine if device is ATA or ATAPI */1774class = ata_port_classify(ap, &tf);1775switch (class) {1776case ATA_DEV_UNKNOWN:1777/*1778* If the device failed diagnostic, it's likely to1779* have reported incorrect device signature too.1780* Assume ATA device if the device seems present but1781* device signature is invalid with diagnostic1782* failure.1783*/1784if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))1785class = ATA_DEV_ATA;1786else1787class = ATA_DEV_NONE;1788break;1789case ATA_DEV_ATA:1790if (ap->ops->sff_check_status(ap) == 0)1791class = ATA_DEV_NONE;1792break;1793}1794return class;1795}1796EXPORT_SYMBOL_GPL(ata_sff_dev_classify);17971798/**1799* ata_sff_wait_after_reset - wait for devices to become ready after reset1800* @link: SFF link which is just reset1801* @devmask: mask of present devices1802* @deadline: deadline jiffies for the operation1803*1804* Wait devices attached to SFF @link to become ready after1805* reset. It contains preceding 150ms wait to avoid accessing TF1806* status register too early.1807*1808* LOCKING:1809* Kernel thread context (may sleep).1810*1811* RETURNS:1812* 0 on success, -ENODEV if some or all of devices in @devmask1813* don't seem to exist. -errno on other errors.1814*/1815int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,1816unsigned long deadline)1817{1818struct ata_port *ap = link->ap;1819struct ata_ioports *ioaddr = &ap->ioaddr;1820unsigned int dev0 = devmask & (1 << 0);1821unsigned int dev1 = devmask & (1 << 1);1822int rc, ret = 0;18231824ata_msleep(ap, ATA_WAIT_AFTER_RESET);18251826/* always check readiness of the master device */1827rc = ata_sff_wait_ready(link, deadline);1828/* -ENODEV means the odd clown forgot the D7 pulldown resistor1829* and TF status is 0xff, bail out on it too.1830*/1831if (rc)1832return rc;18331834/* if device 1 was found in ata_devchk, wait for register1835* access briefly, then wait for BSY to clear.1836*/1837if (dev1) {1838int i;18391840ap->ops->sff_dev_select(ap, 1);18411842/* Wait for register access. Some ATAPI devices fail1843* to set nsect/lbal after reset, so don't waste too1844* much time on it. We're gonna wait for !BSY anyway.1845*/1846for (i = 0; i < 2; i++) {1847u8 nsect, lbal;18481849nsect = ioread8(ioaddr->nsect_addr);1850lbal = ioread8(ioaddr->lbal_addr);1851if ((nsect == 1) && (lbal == 1))1852break;1853ata_msleep(ap, 50); /* give drive a breather */1854}18551856rc = ata_sff_wait_ready(link, deadline);1857if (rc) {1858if (rc != -ENODEV)1859return rc;1860ret = rc;1861}1862}18631864/* is all this really necessary? */1865ap->ops->sff_dev_select(ap, 0);1866if (dev1)1867ap->ops->sff_dev_select(ap, 1);1868if (dev0)1869ap->ops->sff_dev_select(ap, 0);18701871return ret;1872}1873EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);18741875static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,1876unsigned long deadline)1877{1878struct ata_ioports *ioaddr = &ap->ioaddr;18791880if (ap->ioaddr.ctl_addr) {1881/* software reset. causes dev0 to be selected */1882iowrite8(ap->ctl, ioaddr->ctl_addr);1883udelay(20); /* FIXME: flush */1884iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);1885udelay(20); /* FIXME: flush */1886iowrite8(ap->ctl, ioaddr->ctl_addr);1887ap->last_ctl = ap->ctl;1888}18891890/* wait the port to become ready */1891return ata_sff_wait_after_reset(&ap->link, devmask, deadline);1892}18931894/**1895* ata_sff_softreset - reset host port via ATA SRST1896* @link: ATA link to reset1897* @classes: resulting classes of attached devices1898* @deadline: deadline jiffies for the operation1899*1900* Reset host port using ATA SRST.1901*1902* LOCKING:1903* Kernel thread context (may sleep)1904*1905* RETURNS:1906* 0 on success, -errno otherwise.1907*/1908int ata_sff_softreset(struct ata_link *link, unsigned int *classes,1909unsigned long deadline)1910{1911struct ata_port *ap = link->ap;1912unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;1913unsigned int devmask = 0;1914int rc;1915u8 err;19161917/* determine if device 0/1 are present */1918if (ata_devchk(ap, 0))1919devmask |= (1 << 0);1920if (slave_possible && ata_devchk(ap, 1))1921devmask |= (1 << 1);19221923/* select device 0 again */1924ap->ops->sff_dev_select(ap, 0);19251926/* issue bus reset */1927rc = ata_bus_softreset(ap, devmask, deadline);1928/* if link is occupied, -ENODEV too is an error */1929if (rc && (rc != -ENODEV || sata_scr_valid(link))) {1930ata_link_err(link, "SRST failed (errno=%d)\n", rc);1931return rc;1932}19331934/* determine by signature whether we have ATA or ATAPI devices */1935classes[0] = ata_sff_dev_classify(&link->device[0],1936devmask & (1 << 0), &err);1937if (slave_possible && err != 0x81)1938classes[1] = ata_sff_dev_classify(&link->device[1],1939devmask & (1 << 1), &err);19401941return 0;1942}1943EXPORT_SYMBOL_GPL(ata_sff_softreset);19441945/**1946* sata_sff_hardreset - reset host port via SATA phy reset1947* @link: link to reset1948* @class: resulting class of attached device1949* @deadline: deadline jiffies for the operation1950*1951* SATA phy-reset host port using DET bits of SControl register,1952* wait for !BSY and classify the attached device.1953*1954* LOCKING:1955* Kernel thread context (may sleep)1956*1957* RETURNS:1958* 0 on success, -errno otherwise.1959*/1960int sata_sff_hardreset(struct ata_link *link, unsigned int *class,1961unsigned long deadline)1962{1963struct ata_eh_context *ehc = &link->eh_context;1964const unsigned int *timing = sata_ehc_deb_timing(ehc);1965bool online;1966int rc;19671968rc = sata_link_hardreset(link, timing, deadline, &online,1969ata_sff_check_ready);1970if (online)1971*class = ata_sff_dev_classify(link->device, 1, NULL);19721973return rc;1974}1975EXPORT_SYMBOL_GPL(sata_sff_hardreset);19761977/**1978* ata_sff_postreset - SFF postreset callback1979* @link: the target SFF ata_link1980* @classes: classes of attached devices1981*1982* This function is invoked after a successful reset. It first1983* calls ata_std_postreset() and performs SFF specific postreset1984* processing.1985*1986* LOCKING:1987* Kernel thread context (may sleep)1988*/1989void ata_sff_postreset(struct ata_link *link, unsigned int *classes)1990{1991struct ata_port *ap = link->ap;19921993ata_std_postreset(link, classes);19941995/* is double-select really necessary? */1996if (classes[0] != ATA_DEV_NONE)1997ap->ops->sff_dev_select(ap, 1);1998if (classes[1] != ATA_DEV_NONE)1999ap->ops->sff_dev_select(ap, 0);20002001/* bail out if no device is present */2002if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)2003return;20042005/* set up device control */2006if (ata_sff_set_devctl(ap, ap->ctl))2007ap->last_ctl = ap->ctl;2008}2009EXPORT_SYMBOL_GPL(ata_sff_postreset);20102011/**2012* ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers2013* @qc: command2014*2015* Drain the FIFO and device of any stuck data following a command2016* failing to complete. In some cases this is necessary before a2017* reset will recover the device.2018*2019*/20202021void ata_sff_drain_fifo(struct ata_queued_cmd *qc)2022{2023int count;2024struct ata_port *ap;20252026/* We only need to flush incoming data when a command was running */2027if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)2028return;20292030ap = qc->ap;2031/* Drain up to 64K of data before we give up this recovery method */2032for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)2033&& count < 65536; count += 2)2034ioread16(ap->ioaddr.data_addr);20352036if (count)2037ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);20382039}2040EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);20412042/**2043* ata_sff_error_handler - Stock error handler for SFF controller2044* @ap: port to handle error for2045*2046* Stock error handler for SFF controller. It can handle both2047* PATA and SATA controllers. Many controllers should be able to2048* use this EH as-is or with some added handling before and2049* after.2050*2051* LOCKING:2052* Kernel thread context (may sleep)2053*/2054void ata_sff_error_handler(struct ata_port *ap)2055{2056struct ata_queued_cmd *qc;2057unsigned long flags;20582059qc = __ata_qc_from_tag(ap, ap->link.active_tag);2060if (qc && !(qc->flags & ATA_QCFLAG_EH))2061qc = NULL;20622063spin_lock_irqsave(ap->lock, flags);20642065/*2066* We *MUST* do FIFO draining before we issue a reset as2067* several devices helpfully clear their internal state and2068* will lock solid if we touch the data port post reset. Pass2069* qc in case anyone wants to do different PIO/DMA recovery or2070* has per command fixups2071*/2072if (ap->ops->sff_drain_fifo)2073ap->ops->sff_drain_fifo(qc);20742075spin_unlock_irqrestore(ap->lock, flags);20762077ata_std_error_handler(ap);2078}2079EXPORT_SYMBOL_GPL(ata_sff_error_handler);20802081/**2082* ata_sff_std_ports - initialize ioaddr with standard port offsets.2083* @ioaddr: IO address structure to be initialized2084*2085* Utility function which initializes data_addr, error_addr,2086* feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,2087* device_addr, status_addr, and command_addr to standard offsets2088* relative to cmd_addr.2089*2090* Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.2091*/2092void ata_sff_std_ports(struct ata_ioports *ioaddr)2093{2094ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;2095ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;2096ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;2097ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;2098ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;2099ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;2100ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;2101ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;2102ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;2103ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;2104}2105EXPORT_SYMBOL_GPL(ata_sff_std_ports);21062107#ifdef CONFIG_PCI21082109static bool ata_resources_present(struct pci_dev *pdev, int port)2110{2111int i;21122113/* Check the PCI resources for this channel are enabled */2114port *= 2;2115for (i = 0; i < 2; i++) {2116if (pci_resource_start(pdev, port + i) == 0 ||2117pci_resource_len(pdev, port + i) == 0)2118return false;2119}2120return true;2121}21222123/**2124* ata_pci_sff_init_host - acquire native PCI ATA resources and init host2125* @host: target ATA host2126*2127* Acquire native PCI ATA resources for @host and initialize the2128* first two ports of @host accordingly. Ports marked dummy are2129* skipped and allocation failure makes the port dummy.2130*2131* Note that native PCI resources are valid even for legacy hosts2132* as we fix up pdev resources array early in boot, so this2133* function can be used for both native and legacy SFF hosts.2134*2135* LOCKING:2136* Inherited from calling layer (may sleep).2137*2138* RETURNS:2139* 0 if at least one port is initialized, -ENODEV if no port is2140* available.2141*/2142int ata_pci_sff_init_host(struct ata_host *host)2143{2144struct device *gdev = host->dev;2145struct pci_dev *pdev = to_pci_dev(gdev);2146unsigned int mask = 0;2147int i, rc;21482149/* request, iomap BARs and init port addresses accordingly */2150for (i = 0; i < 2; i++) {2151struct ata_port *ap = host->ports[i];2152int base = i * 2;2153void __iomem * const *iomap;21542155if (ata_port_is_dummy(ap))2156continue;21572158/* Discard disabled ports. Some controllers show2159* their unused channels this way. Disabled ports are2160* made dummy.2161*/2162if (!ata_resources_present(pdev, i)) {2163ap->ops = &ata_dummy_port_ops;2164continue;2165}21662167rc = pcim_iomap_regions(pdev, 0x3 << base,2168dev_driver_string(gdev));2169if (rc) {2170dev_warn(gdev,2171"failed to request/iomap BARs for port %d (errno=%d)\n",2172i, rc);2173if (rc == -EBUSY)2174pcim_pin_device(pdev);2175ap->ops = &ata_dummy_port_ops;2176continue;2177}2178host->iomap = iomap = pcim_iomap_table(pdev);21792180ap->ioaddr.cmd_addr = iomap[base];2181ap->ioaddr.altstatus_addr =2182ap->ioaddr.ctl_addr = (void __iomem *)2183((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);2184ata_sff_std_ports(&ap->ioaddr);21852186ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",2187(unsigned long long)pci_resource_start(pdev, base),2188(unsigned long long)pci_resource_start(pdev, base + 1));21892190mask |= 1 << i;2191}21922193if (!mask) {2194dev_err(gdev, "no available native port\n");2195return -ENODEV;2196}21972198return 0;2199}2200EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);22012202/**2203* ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host2204* @pdev: target PCI device2205* @ppi: array of port_info, must be enough for two ports2206* @r_host: out argument for the initialized ATA host2207*2208* Helper to allocate PIO-only SFF ATA host for @pdev, acquire2209* all PCI resources and initialize it accordingly in one go.2210*2211* LOCKING:2212* Inherited from calling layer (may sleep).2213*2214* RETURNS:2215* 0 on success, -errno otherwise.2216*/2217int ata_pci_sff_prepare_host(struct pci_dev *pdev,2218const struct ata_port_info * const *ppi,2219struct ata_host **r_host)2220{2221struct ata_host *host;2222int rc;22232224if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))2225return -ENOMEM;22262227host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);2228if (!host) {2229dev_err(&pdev->dev, "failed to allocate ATA host\n");2230rc = -ENOMEM;2231goto err_out;2232}22332234rc = ata_pci_sff_init_host(host);2235if (rc)2236goto err_out;22372238devres_remove_group(&pdev->dev, NULL);2239*r_host = host;2240return 0;22412242err_out:2243devres_release_group(&pdev->dev, NULL);2244return rc;2245}2246EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);22472248/**2249* ata_pci_sff_activate_host - start SFF host, request IRQ and register it2250* @host: target SFF ATA host2251* @irq_handler: irq_handler used when requesting IRQ(s)2252* @sht: scsi_host_template to use when registering the host2253*2254* This is the counterpart of ata_host_activate() for SFF ATA2255* hosts. This separate helper is necessary because SFF hosts2256* use two separate interrupts in legacy mode.2257*2258* LOCKING:2259* Inherited from calling layer (may sleep).2260*2261* RETURNS:2262* 0 on success, -errno otherwise.2263*/2264int ata_pci_sff_activate_host(struct ata_host *host,2265irq_handler_t irq_handler,2266const struct scsi_host_template *sht)2267{2268struct device *dev = host->dev;2269struct pci_dev *pdev = to_pci_dev(dev);2270const char *drv_name = dev_driver_string(host->dev);2271int legacy_mode = 0, rc;22722273rc = ata_host_start(host);2274if (rc)2275return rc;22762277if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {2278u8 tmp8, mask = 0;22792280/*2281* ATA spec says we should use legacy mode when one2282* port is in legacy mode, but disabled ports on some2283* PCI hosts appear as fixed legacy ports, e.g SB600/7002284* on which the secondary port is not wired, so2285* ignore ports that are marked as 'dummy' during2286* this check2287*/2288pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);2289if (!ata_port_is_dummy(host->ports[0]))2290mask |= (1 << 0);2291if (!ata_port_is_dummy(host->ports[1]))2292mask |= (1 << 2);2293if ((tmp8 & mask) != mask)2294legacy_mode = 1;2295}22962297if (!devres_open_group(dev, NULL, GFP_KERNEL))2298return -ENOMEM;22992300if (!legacy_mode && pdev->irq) {2301int i;23022303rc = devm_request_irq(dev, pdev->irq, irq_handler,2304IRQF_SHARED, drv_name, host);2305if (rc)2306goto out;23072308for (i = 0; i < 2; i++) {2309if (ata_port_is_dummy(host->ports[i]))2310continue;2311ata_port_desc_misc(host->ports[i], pdev->irq);2312}2313} else if (legacy_mode) {2314if (!ata_port_is_dummy(host->ports[0])) {2315rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),2316irq_handler, IRQF_SHARED,2317drv_name, host);2318if (rc)2319goto out;23202321ata_port_desc_misc(host->ports[0],2322ATA_PRIMARY_IRQ(pdev));2323}23242325if (!ata_port_is_dummy(host->ports[1])) {2326rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),2327irq_handler, IRQF_SHARED,2328drv_name, host);2329if (rc)2330goto out;23312332ata_port_desc_misc(host->ports[1],2333ATA_SECONDARY_IRQ(pdev));2334}2335}23362337rc = ata_host_register(host, sht);2338out:2339if (rc == 0)2340devres_remove_group(dev, NULL);2341else2342devres_release_group(dev, NULL);23432344return rc;2345}2346EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);23472348static const struct ata_port_info *ata_sff_find_valid_pi(2349const struct ata_port_info * const *ppi)2350{2351int i;23522353/* look up the first valid port_info */2354for (i = 0; i < 2 && ppi[i]; i++)2355if (ppi[i]->port_ops != &ata_dummy_port_ops)2356return ppi[i];23572358return NULL;2359}23602361static int ata_pci_init_one(struct pci_dev *pdev,2362const struct ata_port_info * const *ppi,2363const struct scsi_host_template *sht, void *host_priv,2364int hflags, bool bmdma)2365{2366struct device *dev = &pdev->dev;2367const struct ata_port_info *pi;2368struct ata_host *host = NULL;2369int rc;23702371pi = ata_sff_find_valid_pi(ppi);2372if (!pi) {2373dev_err(&pdev->dev, "no valid port_info specified\n");2374return -EINVAL;2375}23762377if (!devres_open_group(dev, NULL, GFP_KERNEL))2378return -ENOMEM;23792380rc = pcim_enable_device(pdev);2381if (rc)2382goto out;23832384#ifdef CONFIG_ATA_BMDMA2385if (bmdma)2386/* prepare and activate BMDMA host */2387rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);2388else2389#endif2390/* prepare and activate SFF host */2391rc = ata_pci_sff_prepare_host(pdev, ppi, &host);2392if (rc)2393goto out;2394host->private_data = host_priv;2395host->flags |= hflags;23962397#ifdef CONFIG_ATA_BMDMA2398if (bmdma) {2399pci_set_master(pdev);2400rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);2401} else2402#endif2403rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);2404out:2405if (rc == 0)2406devres_remove_group(&pdev->dev, NULL);2407else2408devres_release_group(&pdev->dev, NULL);24092410return rc;2411}24122413/**2414* ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller2415* @pdev: Controller to be initialized2416* @ppi: array of port_info, must be enough for two ports2417* @sht: scsi_host_template to use when registering the host2418* @host_priv: host private_data2419* @hflag: host flags2420*2421* This is a helper function which can be called from a driver's2422* xxx_init_one() probe function if the hardware uses traditional2423* IDE taskfile registers and is PIO only.2424*2425* ASSUMPTION:2426* Nobody makes a single channel controller that appears solely as2427* the secondary legacy port on PCI.2428*2429* LOCKING:2430* Inherited from PCI layer (may sleep).2431*2432* RETURNS:2433* Zero on success, negative on errno-based value on error.2434*/2435int ata_pci_sff_init_one(struct pci_dev *pdev,2436const struct ata_port_info * const *ppi,2437const struct scsi_host_template *sht, void *host_priv, int hflag)2438{2439return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);2440}2441EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);24422443#endif /* CONFIG_PCI */24442445/*2446* BMDMA support2447*/24482449#ifdef CONFIG_ATA_BMDMA24502451const struct ata_port_operations ata_bmdma_port_ops = {2452.inherits = &ata_sff_port_ops,24532454.error_handler = ata_bmdma_error_handler,2455.post_internal_cmd = ata_bmdma_post_internal_cmd,24562457.qc_prep = ata_bmdma_qc_prep,2458.qc_issue = ata_bmdma_qc_issue,24592460.sff_irq_clear = ata_bmdma_irq_clear,2461.bmdma_setup = ata_bmdma_setup,2462.bmdma_start = ata_bmdma_start,2463.bmdma_stop = ata_bmdma_stop,2464.bmdma_status = ata_bmdma_status,24652466.port_start = ata_bmdma_port_start,2467};2468EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);24692470const struct ata_port_operations ata_bmdma32_port_ops = {2471.inherits = &ata_bmdma_port_ops,24722473.sff_data_xfer = ata_sff_data_xfer32,2474.port_start = ata_bmdma_port_start32,2475};2476EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);24772478/**2479* ata_bmdma_fill_sg - Fill PCI IDE PRD table2480* @qc: Metadata associated with taskfile to be transferred2481*2482* Fill PCI IDE PRD (scatter-gather) table with segments2483* associated with the current disk command.2484*2485* LOCKING:2486* spin_lock_irqsave(host lock)2487*2488*/2489static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)2490{2491struct ata_port *ap = qc->ap;2492struct ata_bmdma_prd *prd = ap->bmdma_prd;2493struct scatterlist *sg;2494unsigned int si, pi;24952496pi = 0;2497for_each_sg(qc->sg, sg, qc->n_elem, si) {2498u32 addr, offset;2499u32 sg_len, len;25002501/* determine if physical DMA addr spans 64K boundary.2502* Note h/w doesn't support 64-bit, so we unconditionally2503* truncate dma_addr_t to u32.2504*/2505addr = (u32) sg_dma_address(sg);2506sg_len = sg_dma_len(sg);25072508while (sg_len) {2509offset = addr & 0xffff;2510len = sg_len;2511if ((offset + sg_len) > 0x10000)2512len = 0x10000 - offset;25132514prd[pi].addr = cpu_to_le32(addr);2515prd[pi].flags_len = cpu_to_le32(len & 0xffff);25162517pi++;2518sg_len -= len;2519addr += len;2520}2521}25222523prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);2524}25252526/**2527* ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table2528* @qc: Metadata associated with taskfile to be transferred2529*2530* Fill PCI IDE PRD (scatter-gather) table with segments2531* associated with the current disk command. Perform the fill2532* so that we avoid writing any length 64K records for2533* controllers that don't follow the spec.2534*2535* LOCKING:2536* spin_lock_irqsave(host lock)2537*2538*/2539static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)2540{2541struct ata_port *ap = qc->ap;2542struct ata_bmdma_prd *prd = ap->bmdma_prd;2543struct scatterlist *sg;2544unsigned int si, pi;25452546pi = 0;2547for_each_sg(qc->sg, sg, qc->n_elem, si) {2548u32 addr, offset;2549u32 sg_len, len, blen;25502551/* determine if physical DMA addr spans 64K boundary.2552* Note h/w doesn't support 64-bit, so we unconditionally2553* truncate dma_addr_t to u32.2554*/2555addr = (u32) sg_dma_address(sg);2556sg_len = sg_dma_len(sg);25572558while (sg_len) {2559offset = addr & 0xffff;2560len = sg_len;2561if ((offset + sg_len) > 0x10000)2562len = 0x10000 - offset;25632564blen = len & 0xffff;2565prd[pi].addr = cpu_to_le32(addr);2566if (blen == 0) {2567/* Some PATA chipsets like the CS5530 can't2568cope with 0x0000 meaning 64K as the spec2569says */2570prd[pi].flags_len = cpu_to_le32(0x8000);2571blen = 0x8000;2572prd[++pi].addr = cpu_to_le32(addr + 0x8000);2573}2574prd[pi].flags_len = cpu_to_le32(blen);25752576pi++;2577sg_len -= len;2578addr += len;2579}2580}25812582prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);2583}25842585/**2586* ata_bmdma_qc_prep - Prepare taskfile for submission2587* @qc: Metadata associated with taskfile to be prepared2588*2589* Prepare ATA taskfile for submission.2590*2591* LOCKING:2592* spin_lock_irqsave(host lock)2593*/2594enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)2595{2596if (!(qc->flags & ATA_QCFLAG_DMAMAP))2597return AC_ERR_OK;25982599ata_bmdma_fill_sg(qc);26002601return AC_ERR_OK;2602}2603EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);26042605/**2606* ata_bmdma_dumb_qc_prep - Prepare taskfile for submission2607* @qc: Metadata associated with taskfile to be prepared2608*2609* Prepare ATA taskfile for submission.2610*2611* LOCKING:2612* spin_lock_irqsave(host lock)2613*/2614enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)2615{2616if (!(qc->flags & ATA_QCFLAG_DMAMAP))2617return AC_ERR_OK;26182619ata_bmdma_fill_sg_dumb(qc);26202621return AC_ERR_OK;2622}2623EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);26242625/**2626* ata_bmdma_qc_issue - issue taskfile to a BMDMA controller2627* @qc: command to issue to device2628*2629* This function issues a PIO, NODATA or DMA command to a2630* SFF/BMDMA controller. PIO and NODATA are handled by2631* ata_sff_qc_issue().2632*2633* LOCKING:2634* spin_lock_irqsave(host lock)2635*2636* RETURNS:2637* Zero on success, AC_ERR_* mask on failure2638*/2639unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)2640{2641struct ata_port *ap = qc->ap;2642struct ata_link *link = qc->dev->link;26432644/* defer PIO handling to sff_qc_issue */2645if (!ata_is_dma(qc->tf.protocol))2646return ata_sff_qc_issue(qc);26472648/* select the device */2649ata_dev_select(ap, qc->dev->devno, 1, 0);26502651/* start the command */2652switch (qc->tf.protocol) {2653case ATA_PROT_DMA:2654WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);26552656trace_ata_tf_load(ap, &qc->tf);2657ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */2658trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);2659ap->ops->bmdma_setup(qc); /* set up bmdma */2660trace_ata_bmdma_start(ap, &qc->tf, qc->tag);2661ap->ops->bmdma_start(qc); /* initiate bmdma */2662ap->hsm_task_state = HSM_ST_LAST;2663break;26642665case ATAPI_PROT_DMA:2666WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);26672668trace_ata_tf_load(ap, &qc->tf);2669ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */2670trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);2671ap->ops->bmdma_setup(qc); /* set up bmdma */2672ap->hsm_task_state = HSM_ST_FIRST;26732674/* send cdb by polling if no cdb interrupt */2675if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))2676ata_sff_queue_pio_task(link, 0);2677break;26782679default:2680WARN_ON(1);2681return AC_ERR_SYSTEM;2682}26832684return 0;2685}2686EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);26872688/**2689* ata_bmdma_port_intr - Handle BMDMA port interrupt2690* @ap: Port on which interrupt arrived (possibly...)2691* @qc: Taskfile currently active in engine2692*2693* Handle port interrupt for given queued command.2694*2695* LOCKING:2696* spin_lock_irqsave(host lock)2697*2698* RETURNS:2699* One if interrupt was handled, zero if not (shared irq).2700*/2701unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)2702{2703struct ata_eh_info *ehi = &ap->link.eh_info;2704u8 host_stat = 0;2705bool bmdma_stopped = false;2706unsigned int handled;27072708if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {2709/* check status of DMA engine */2710host_stat = ap->ops->bmdma_status(ap);2711trace_ata_bmdma_status(ap, host_stat);27122713/* if it's not our irq... */2714if (!(host_stat & ATA_DMA_INTR))2715return ata_sff_idle_irq(ap);27162717/* before we do anything else, clear DMA-Start bit */2718trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);2719ap->ops->bmdma_stop(qc);2720bmdma_stopped = true;27212722if (unlikely(host_stat & ATA_DMA_ERR)) {2723/* error when transferring data to/from memory */2724qc->err_mask |= AC_ERR_HOST_BUS;2725ap->hsm_task_state = HSM_ST_ERR;2726}2727}27282729handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);27302731if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))2732ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);27332734return handled;2735}2736EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);27372738/**2739* ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler2740* @irq: irq line (unused)2741* @dev_instance: pointer to our ata_host information structure2742*2743* Default interrupt handler for PCI IDE devices. Calls2744* ata_bmdma_port_intr() for each port that is not disabled.2745*2746* LOCKING:2747* Obtains host lock during operation.2748*2749* RETURNS:2750* IRQ_NONE or IRQ_HANDLED.2751*/2752irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)2753{2754return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);2755}2756EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);27572758/**2759* ata_bmdma_error_handler - Stock error handler for BMDMA controller2760* @ap: port to handle error for2761*2762* Stock error handler for BMDMA controller. It can handle both2763* PATA and SATA controllers. Most BMDMA controllers should be2764* able to use this EH as-is or with some added handling before2765* and after.2766*2767* LOCKING:2768* Kernel thread context (may sleep)2769*/2770void ata_bmdma_error_handler(struct ata_port *ap)2771{2772struct ata_queued_cmd *qc;2773unsigned long flags;2774bool thaw = false;27752776qc = __ata_qc_from_tag(ap, ap->link.active_tag);2777if (qc && !(qc->flags & ATA_QCFLAG_EH))2778qc = NULL;27792780/* reset PIO HSM and stop DMA engine */2781spin_lock_irqsave(ap->lock, flags);27822783if (qc && ata_is_dma(qc->tf.protocol)) {2784u8 host_stat;27852786host_stat = ap->ops->bmdma_status(ap);2787trace_ata_bmdma_status(ap, host_stat);27882789/* BMDMA controllers indicate host bus error by2790* setting DMA_ERR bit and timing out. As it wasn't2791* really a timeout event, adjust error mask and2792* cancel frozen state.2793*/2794if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {2795qc->err_mask = AC_ERR_HOST_BUS;2796thaw = true;2797}27982799trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);2800ap->ops->bmdma_stop(qc);28012802/* if we're gonna thaw, make sure IRQ is clear */2803if (thaw) {2804ap->ops->sff_check_status(ap);2805if (ap->ops->sff_irq_clear)2806ap->ops->sff_irq_clear(ap);2807}2808}28092810spin_unlock_irqrestore(ap->lock, flags);28112812if (thaw)2813ata_eh_thaw_port(ap);28142815ata_sff_error_handler(ap);2816}2817EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);28182819/**2820* ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA2821* @qc: internal command to clean up2822*2823* LOCKING:2824* Kernel thread context (may sleep)2825*/2826void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)2827{2828struct ata_port *ap = qc->ap;2829unsigned long flags;28302831if (ata_is_dma(qc->tf.protocol)) {2832spin_lock_irqsave(ap->lock, flags);2833trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);2834ap->ops->bmdma_stop(qc);2835spin_unlock_irqrestore(ap->lock, flags);2836}2837}2838EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);28392840/**2841* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.2842* @ap: Port associated with this ATA transaction.2843*2844* Clear interrupt and error flags in DMA status register.2845*2846* May be used as the irq_clear() entry in ata_port_operations.2847*2848* LOCKING:2849* spin_lock_irqsave(host lock)2850*/2851void ata_bmdma_irq_clear(struct ata_port *ap)2852{2853void __iomem *mmio = ap->ioaddr.bmdma_addr;28542855if (!mmio)2856return;28572858iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);2859}2860EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);28612862/**2863* ata_bmdma_setup - Set up PCI IDE BMDMA transaction2864* @qc: Info associated with this ATA transaction.2865*2866* LOCKING:2867* spin_lock_irqsave(host lock)2868*/2869void ata_bmdma_setup(struct ata_queued_cmd *qc)2870{2871struct ata_port *ap = qc->ap;2872unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);2873u8 dmactl;28742875/* load PRD table addr. */2876mb(); /* make sure PRD table writes are visible to controller */2877iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);28782879/* specify data direction, triple-check start bit is clear */2880dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);2881dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);2882if (!rw)2883dmactl |= ATA_DMA_WR;2884iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);28852886/* issue r/w command */2887ap->ops->sff_exec_command(ap, &qc->tf);2888}2889EXPORT_SYMBOL_GPL(ata_bmdma_setup);28902891/**2892* ata_bmdma_start - Start a PCI IDE BMDMA transaction2893* @qc: Info associated with this ATA transaction.2894*2895* LOCKING:2896* spin_lock_irqsave(host lock)2897*/2898void ata_bmdma_start(struct ata_queued_cmd *qc)2899{2900struct ata_port *ap = qc->ap;2901u8 dmactl;29022903/* start host DMA transaction */2904dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);2905iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);29062907/* Strictly, one may wish to issue an ioread8() here, to2908* flush the mmio write. However, control also passes2909* to the hardware at this point, and it will interrupt2910* us when we are to resume control. So, in effect,2911* we don't care when the mmio write flushes.2912* Further, a read of the DMA status register _immediately_2913* following the write may not be what certain flaky hardware2914* is expected, so I think it is best to not add a readb()2915* without first all the MMIO ATA cards/mobos.2916* Or maybe I'm just being paranoid.2917*2918* FIXME: The posting of this write means I/O starts are2919* unnecessarily delayed for MMIO2920*/2921}2922EXPORT_SYMBOL_GPL(ata_bmdma_start);29232924/**2925* ata_bmdma_stop - Stop PCI IDE BMDMA transfer2926* @qc: Command we are ending DMA for2927*2928* Clears the ATA_DMA_START flag in the dma control register2929*2930* May be used as the bmdma_stop() entry in ata_port_operations.2931*2932* LOCKING:2933* spin_lock_irqsave(host lock)2934*/2935void ata_bmdma_stop(struct ata_queued_cmd *qc)2936{2937struct ata_port *ap = qc->ap;2938void __iomem *mmio = ap->ioaddr.bmdma_addr;29392940/* clear start/stop bit */2941iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,2942mmio + ATA_DMA_CMD);29432944/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */2945ata_sff_dma_pause(ap);2946}2947EXPORT_SYMBOL_GPL(ata_bmdma_stop);29482949/**2950* ata_bmdma_status - Read PCI IDE BMDMA status2951* @ap: Port associated with this ATA transaction.2952*2953* Read and return BMDMA status register.2954*2955* May be used as the bmdma_status() entry in ata_port_operations.2956*2957* LOCKING:2958* spin_lock_irqsave(host lock)2959*/2960u8 ata_bmdma_status(struct ata_port *ap)2961{2962return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);2963}2964EXPORT_SYMBOL_GPL(ata_bmdma_status);296529662967/**2968* ata_bmdma_port_start - Set port up for bmdma.2969* @ap: Port to initialize2970*2971* Called just after data structures for each port are2972* initialized. Allocates space for PRD table.2973*2974* May be used as the port_start() entry in ata_port_operations.2975*2976* LOCKING:2977* Inherited from caller.2978*/2979int ata_bmdma_port_start(struct ata_port *ap)2980{2981if (ap->mwdma_mask || ap->udma_mask) {2982ap->bmdma_prd =2983dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,2984&ap->bmdma_prd_dma, GFP_KERNEL);2985if (!ap->bmdma_prd)2986return -ENOMEM;2987}29882989return 0;2990}2991EXPORT_SYMBOL_GPL(ata_bmdma_port_start);29922993/**2994* ata_bmdma_port_start32 - Set port up for dma.2995* @ap: Port to initialize2996*2997* Called just after data structures for each port are2998* initialized. Enables 32bit PIO and allocates space for PRD2999* table.3000*3001* May be used as the port_start() entry in ata_port_operations for3002* devices that are capable of 32bit PIO.3003*3004* LOCKING:3005* Inherited from caller.3006*/3007int ata_bmdma_port_start32(struct ata_port *ap)3008{3009ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;3010return ata_bmdma_port_start(ap);3011}3012EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);30133014#ifdef CONFIG_PCI30153016/**3017* ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex3018* @pdev: PCI device3019*3020* Some PCI ATA devices report simplex mode but in fact can be told to3021* enter non simplex mode. This implements the necessary logic to3022* perform the task on such devices. Calling it on other devices will3023* have -undefined- behaviour.3024*/3025int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)3026{3027#ifdef CONFIG_HAS_IOPORT3028unsigned long bmdma = pci_resource_start(pdev, 4);3029u8 simplex;30303031if (bmdma == 0)3032return -ENOENT;30333034simplex = inb(bmdma + 0x02);3035outb(simplex & 0x60, bmdma + 0x02);3036simplex = inb(bmdma + 0x02);3037if (simplex & 0x80)3038return -EOPNOTSUPP;3039return 0;3040#else3041return -ENOENT;3042#endif /* CONFIG_HAS_IOPORT */3043}3044EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);30453046static void ata_bmdma_nodma(struct ata_host *host, const char *reason)3047{3048int i;30493050dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);30513052for (i = 0; i < 2; i++) {3053host->ports[i]->mwdma_mask = 0;3054host->ports[i]->udma_mask = 0;3055}3056}30573058/**3059* ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host3060* @host: target ATA host3061*3062* Acquire PCI BMDMA resources and initialize @host accordingly.3063*3064* LOCKING:3065* Inherited from calling layer (may sleep).3066*/3067void ata_pci_bmdma_init(struct ata_host *host)3068{3069struct device *gdev = host->dev;3070struct pci_dev *pdev = to_pci_dev(gdev);3071int i, rc;30723073/* No BAR4 allocation: No DMA */3074if (pci_resource_start(pdev, 4) == 0) {3075ata_bmdma_nodma(host, "BAR4 is zero");3076return;3077}30783079/*3080* Some controllers require BMDMA region to be initialized3081* even if DMA is not in use to clear IRQ status via3082* ->sff_irq_clear method. Try to initialize bmdma_addr3083* regardless of dma masks.3084*/3085rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);3086if (rc)3087ata_bmdma_nodma(host, "failed to set dma mask");30883089/* request and iomap DMA region */3090rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));3091if (rc) {3092ata_bmdma_nodma(host, "failed to request/iomap BAR4");3093return;3094}3095host->iomap = pcim_iomap_table(pdev);30963097for (i = 0; i < 2; i++) {3098struct ata_port *ap = host->ports[i];3099void __iomem *bmdma = host->iomap[4] + 8 * i;31003101if (ata_port_is_dummy(ap))3102continue;31033104ap->ioaddr.bmdma_addr = bmdma;3105if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&3106(ioread8(bmdma + 2) & 0x80))3107host->flags |= ATA_HOST_SIMPLEX;31083109ata_port_desc(ap, "bmdma 0x%llx",3110(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);3111}3112}3113EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);31143115/**3116* ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host3117* @pdev: target PCI device3118* @ppi: array of port_info, must be enough for two ports3119* @r_host: out argument for the initialized ATA host3120*3121* Helper to allocate BMDMA ATA host for @pdev, acquire all PCI3122* resources and initialize it accordingly in one go.3123*3124* LOCKING:3125* Inherited from calling layer (may sleep).3126*3127* RETURNS:3128* 0 on success, -errno otherwise.3129*/3130int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,3131const struct ata_port_info * const * ppi,3132struct ata_host **r_host)3133{3134int rc;31353136rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);3137if (rc)3138return rc;31393140ata_pci_bmdma_init(*r_host);3141return 0;3142}3143EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);31443145/**3146* ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller3147* @pdev: Controller to be initialized3148* @ppi: array of port_info, must be enough for two ports3149* @sht: scsi_host_template to use when registering the host3150* @host_priv: host private_data3151* @hflags: host flags3152*3153* This function is similar to ata_pci_sff_init_one() but also3154* takes care of BMDMA initialization.3155*3156* LOCKING:3157* Inherited from PCI layer (may sleep).3158*3159* RETURNS:3160* Zero on success, negative on errno-based value on error.3161*/3162int ata_pci_bmdma_init_one(struct pci_dev *pdev,3163const struct ata_port_info * const * ppi,3164const struct scsi_host_template *sht, void *host_priv,3165int hflags)3166{3167return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);3168}3169EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);31703171#endif /* CONFIG_PCI */3172#endif /* CONFIG_ATA_BMDMA */31733174/**3175* ata_sff_port_init - Initialize SFF/BMDMA ATA port3176* @ap: Port to initialize3177*3178* Called on port allocation to initialize SFF/BMDMA specific3179* fields.3180*3181* LOCKING:3182* None.3183*/3184void ata_sff_port_init(struct ata_port *ap)3185{3186INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);3187ap->ctl = ATA_DEVCTL_OBS;3188ap->last_ctl = 0xFF;3189}31903191int __init ata_sff_init(void)3192{3193ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);3194if (!ata_sff_wq)3195return -ENOMEM;31963197return 0;3198}31993200void ata_sff_exit(void)3201{3202destroy_workqueue(ata_sff_wq);3203}320432053206