Path: blob/master/drivers/infiniband/hw/ipath/ipath_intr.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.2* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.3*4* This software is available to you under a choice of one of two5* licenses. You may choose to be licensed under the terms of the GNU6* General Public License (GPL) Version 2, available from the file7* COPYING in the main directory of this source tree, or the8* OpenIB.org BSD license below:9*10* Redistribution and use in source and binary forms, with or11* without modification, are permitted provided that the following12* conditions are met:13*14* - Redistributions of source code must retain the above15* copyright notice, this list of conditions and the following16* disclaimer.17*18* - Redistributions in binary form must reproduce the above19* copyright notice, this list of conditions and the following20* disclaimer in the documentation and/or other materials21* provided with the distribution.22*23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE30* SOFTWARE.31*/3233#include <linux/pci.h>34#include <linux/delay.h>35#include <linux/sched.h>3637#include "ipath_kernel.h"38#include "ipath_verbs.h"39#include "ipath_common.h"404142/*43* Called when we might have an error that is specific to a particular44* PIO buffer, and may need to cancel that buffer, so it can be re-used.45*/46void ipath_disarm_senderrbufs(struct ipath_devdata *dd)47{48u32 piobcnt;49unsigned long sbuf[4];50/*51* it's possible that sendbuffererror could have bits set; might52* have already done this as a result of hardware error handling53*/54piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;55/* read these before writing errorclear */56sbuf[0] = ipath_read_kreg64(57dd, dd->ipath_kregs->kr_sendbuffererror);58sbuf[1] = ipath_read_kreg64(59dd, dd->ipath_kregs->kr_sendbuffererror + 1);60if (piobcnt > 128)61sbuf[2] = ipath_read_kreg64(62dd, dd->ipath_kregs->kr_sendbuffererror + 2);63if (piobcnt > 192)64sbuf[3] = ipath_read_kreg64(65dd, dd->ipath_kregs->kr_sendbuffererror + 3);66else67sbuf[3] = 0;6869if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {70int i;71if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&72dd->ipath_lastcancel > jiffies) {73__IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,74"SendbufErrs %lx %lx", sbuf[0],75sbuf[1]);76if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)77printk(" %lx %lx ", sbuf[2], sbuf[3]);78printk("\n");79}8081for (i = 0; i < piobcnt; i++)82if (test_bit(i, sbuf))83ipath_disarm_piobufs(dd, i, 1);84/* ignore armlaunch errs for a bit */85dd->ipath_lastcancel = jiffies+3;86}87}888990/* These are all rcv-related errors which we want to count for stats */91#define E_SUM_PKTERRS \92(INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \93INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \94INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \95INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \96INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \97INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)9899/* These are all send-related errors which we want to count for stats */100#define E_SUM_ERRS \101(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \102INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \103INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \104INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \105INFINIPATH_E_INVALIDADDR)106107/*108* this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore109* errors not related to freeze and cancelling buffers. Can't ignore110* armlaunch because could get more while still cleaning up, and need111* to cancel those as they happen.112*/113#define E_SPKT_ERRS_IGNORE \114(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \115INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \116INFINIPATH_E_SPKTLEN)117118/*119* these are errors that can occur when the link changes state while120* a packet is being sent or received. This doesn't cover things121* like EBP or VCRC that can be the result of a sending having the122* link change state, so we receive a "known bad" packet.123*/124#define E_SUM_LINK_PKTERRS \125(INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \126INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \127INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \128INFINIPATH_E_RUNEXPCHAR)129130static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)131{132u64 ignore_this_time = 0;133134ipath_disarm_senderrbufs(dd);135if ((errs & E_SUM_LINK_PKTERRS) &&136!(dd->ipath_flags & IPATH_LINKACTIVE)) {137/*138* This can happen when SMA is trying to bring the link139* up, but the IB link changes state at the "wrong" time.140* The IB logic then complains that the packet isn't141* valid. We don't want to confuse people, so we just142* don't print them, except at debug143*/144ipath_dbg("Ignoring packet errors %llx, because link not "145"ACTIVE\n", (unsigned long long) errs);146ignore_this_time = errs & E_SUM_LINK_PKTERRS;147}148149return ignore_this_time;150}151152/* generic hw error messages... */153#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \154{ \155.mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a << \156INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ), \157.msg = "TXE " #a " Memory Parity" \158}159#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \160{ \161.mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a << \162INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ), \163.msg = "RXE " #a " Memory Parity" \164}165166static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {167INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),168INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),169170INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),171INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),172INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),173174INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),175INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),176INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),177INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),178INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),179INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),180INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),181};182183/**184* ipath_format_hwmsg - format a single hwerror message185* @msg message buffer186* @msgl length of message buffer187* @hwmsg message to add to message buffer188*/189static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)190{191strlcat(msg, "[", msgl);192strlcat(msg, hwmsg, msgl);193strlcat(msg, "]", msgl);194}195196/**197* ipath_format_hwerrors - format hardware error messages for display198* @hwerrs hardware errors bit vector199* @hwerrmsgs hardware error descriptions200* @nhwerrmsgs number of hwerrmsgs201* @msg message buffer202* @msgl message buffer length203*/204void ipath_format_hwerrors(u64 hwerrs,205const struct ipath_hwerror_msgs *hwerrmsgs,206size_t nhwerrmsgs,207char *msg, size_t msgl)208{209int i;210const int glen =211sizeof(ipath_generic_hwerror_msgs) /212sizeof(ipath_generic_hwerror_msgs[0]);213214for (i=0; i<glen; i++) {215if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {216ipath_format_hwmsg(msg, msgl,217ipath_generic_hwerror_msgs[i].msg);218}219}220221for (i=0; i<nhwerrmsgs; i++) {222if (hwerrs & hwerrmsgs[i].mask) {223ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);224}225}226}227228/* return the strings for the most common link states */229static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)230{231char *ret;232u32 state;233234state = ipath_ib_state(dd, ibcs);235if (state == dd->ib_init)236ret = "Init";237else if (state == dd->ib_arm)238ret = "Arm";239else if (state == dd->ib_active)240ret = "Active";241else242ret = "Down";243return ret;244}245246void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)247{248struct ib_event event;249250event.device = &dd->verbs_dev->ibdev;251event.element.port_num = 1;252event.event = ev;253ib_dispatch_event(&event);254}255256static void handle_e_ibstatuschanged(struct ipath_devdata *dd,257ipath_err_t errs)258{259u32 ltstate, lstate, ibstate, lastlstate;260u32 init = dd->ib_init;261u32 arm = dd->ib_arm;262u32 active = dd->ib_active;263const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);264265lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */266ibstate = ipath_ib_state(dd, ibcs);267/* linkstate at last interrupt */268lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);269ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */270271/*272* Since going into a recovery state causes the link state to go273* down and since recovery is transitory, it is better if we "miss"274* ever seeing the link training state go into recovery (i.e.,275* ignore this transition for link state special handling purposes)276* without even updating ipath_lastibcstat.277*/278if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||279(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||280(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))281goto done;282283/*284* if linkstate transitions into INIT from any of the various down285* states, or if it transitions from any of the up (INIT or better)286* states into any of the down states (except link recovery), then287* call the chip-specific code to take appropriate actions.288*/289if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&290lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {291/* transitioned to UP */292if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {293/* link came up, so we must no longer be disabled */294dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;295ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");296goto skip_ibchange; /* chip-code handled */297}298} else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||299(dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&300ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&301ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {302int handled;303handled = dd->ipath_f_ib_updown(dd, 0, ibcs);304dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;305if (handled) {306ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");307goto skip_ibchange; /* chip-code handled */308}309}310311/*312* Significant enough to always print and get into logs, if it was313* unexpected. If it was a requested state change, we'll have314* already cleared the flags, so we won't print this warning315*/316if ((ibstate != arm && ibstate != active) &&317(dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {318dev_info(&dd->pcidev->dev, "Link state changed from %s "319"to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?320"ARM" : "ACTIVE", ib_linkstate(dd, ibcs));321}322323if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||324ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {325u32 lastlts;326lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);327/*328* Ignore cycling back and forth from Polling.Active to329* Polling.Quiet while waiting for the other end of the link330* to come up, except to try and decide if we are connected331* to a live IB device or not. We will cycle back and332* forth between them if no cable is plugged in, the other333* device is powered off or disabled, etc.334*/335if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||336lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {337if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&338(++dd->ipath_ibpollcnt == 40)) {339dd->ipath_flags |= IPATH_NOCABLE;340*dd->ipath_statusp |=341IPATH_STATUS_IB_NOCABLE;342ipath_cdbg(LINKVERB, "Set NOCABLE\n");343}344ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",345ipath_ibcstatus_str[ltstate], ibstate);346goto skip_ibchange;347}348}349350dd->ipath_ibpollcnt = 0; /* not poll*, now */351ipath_stats.sps_iblink++;352353if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {354u64 linkrecov;355linkrecov = ipath_snap_cntr(dd,356dd->ipath_cregs->cr_iblinkerrrecovcnt);357if (linkrecov != dd->ipath_lastlinkrecov) {358ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",359(unsigned long long) ibcs,360ib_linkstate(dd, ibcs),361ipath_ibcstatus_str[ltstate],362(unsigned long long) linkrecov);363/* and no more until active again */364dd->ipath_lastlinkrecov = 0;365ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);366goto skip_ibchange;367}368}369370if (ibstate == init || ibstate == arm || ibstate == active) {371*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;372if (ibstate == init || ibstate == arm) {373*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;374if (dd->ipath_flags & IPATH_LINKACTIVE)375signal_ib_event(dd, IB_EVENT_PORT_ERR);376}377if (ibstate == arm) {378dd->ipath_flags |= IPATH_LINKARMED;379dd->ipath_flags &= ~(IPATH_LINKUNK |380IPATH_LINKINIT | IPATH_LINKDOWN |381IPATH_LINKACTIVE | IPATH_NOCABLE);382ipath_hol_down(dd);383} else if (ibstate == init) {384/*385* set INIT and DOWN. Down is checked by386* most of the other code, but INIT is387* useful to know in a few places.388*/389dd->ipath_flags |= IPATH_LINKINIT |390IPATH_LINKDOWN;391dd->ipath_flags &= ~(IPATH_LINKUNK |392IPATH_LINKARMED | IPATH_LINKACTIVE |393IPATH_NOCABLE);394ipath_hol_down(dd);395} else { /* active */396dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,397dd->ipath_cregs->cr_iblinkerrrecovcnt);398*dd->ipath_statusp |=399IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;400dd->ipath_flags |= IPATH_LINKACTIVE;401dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT402| IPATH_LINKDOWN | IPATH_LINKARMED |403IPATH_NOCABLE);404if (dd->ipath_flags & IPATH_HAS_SEND_DMA)405ipath_restart_sdma(dd);406signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);407/* LED active not handled in chip _f_updown */408dd->ipath_f_setextled(dd, lstate, ltstate);409ipath_hol_up(dd);410}411412/*413* print after we've already done the work, so as not to414* delay the state changes and notifications, for debugging415*/416if (lstate == lastlstate)417ipath_cdbg(LINKVERB, "Unchanged from last: %s "418"(%x)\n", ib_linkstate(dd, ibcs), ibstate);419else420ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",421dd->ipath_unit, ib_linkstate(dd, ibcs),422ipath_ibcstatus_str[ltstate], ibstate);423} else { /* down */424if (dd->ipath_flags & IPATH_LINKACTIVE)425signal_ib_event(dd, IB_EVENT_PORT_ERR);426dd->ipath_flags |= IPATH_LINKDOWN;427dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT428| IPATH_LINKACTIVE |429IPATH_LINKARMED);430*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;431dd->ipath_lli_counter = 0;432433if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)434ipath_cdbg(VERBOSE, "Unit %u link state down "435"(state 0x%x), from %s\n",436dd->ipath_unit, lstate,437ib_linkstate(dd, dd->ipath_lastibcstat));438else439ipath_cdbg(LINKVERB, "Unit %u link state changed "440"to %s (0x%x) from down (%x)\n",441dd->ipath_unit,442ipath_ibcstatus_str[ltstate],443ibstate, lastlstate);444}445446skip_ibchange:447dd->ipath_lastibcstat = ibcs;448done:449return;450}451452static void handle_supp_msgs(struct ipath_devdata *dd,453unsigned supp_msgs, char *msg, u32 msgsz)454{455/*456* Print the message unless it's ibc status change only, which457* happens so often we never want to count it.458*/459if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {460int iserr;461ipath_err_t mask;462iserr = ipath_decode_err(dd, msg, msgsz,463dd->ipath_lasterror &464~INFINIPATH_E_IBSTATUSCHANGED);465466mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |467INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;468469/* if we're in debug, then don't mask SDMADISABLED msgs */470if (ipath_debug & __IPATH_DBG)471mask &= ~INFINIPATH_E_SDMADISABLED;472473if (dd->ipath_lasterror & ~mask)474ipath_dev_err(dd, "Suppressed %u messages for "475"fast-repeating errors (%s) (%llx)\n",476supp_msgs, msg,477(unsigned long long)478dd->ipath_lasterror);479else {480/*481* rcvegrfull and rcvhdrqfull are "normal", for some482* types of processes (mostly benchmarks) that send483* huge numbers of messages, while not processing484* them. So only complain about these at debug485* level.486*/487if (iserr)488ipath_dbg("Suppressed %u messages for %s\n",489supp_msgs, msg);490else491ipath_cdbg(ERRPKT,492"Suppressed %u messages for %s\n",493supp_msgs, msg);494}495}496}497498static unsigned handle_frequent_errors(struct ipath_devdata *dd,499ipath_err_t errs, char *msg,500u32 msgsz, int *noprint)501{502unsigned long nc;503static unsigned long nextmsg_time;504static unsigned nmsgs, supp_msgs;505506/*507* Throttle back "fast" messages to no more than 10 per 5 seconds.508* This isn't perfect, but it's a reasonable heuristic. If we get509* more than 10, give a 6x longer delay.510*/511nc = jiffies;512if (nmsgs > 10) {513if (time_before(nc, nextmsg_time)) {514*noprint = 1;515if (!supp_msgs++)516nextmsg_time = nc + HZ * 3;517}518else if (supp_msgs) {519handle_supp_msgs(dd, supp_msgs, msg, msgsz);520supp_msgs = 0;521nmsgs = 0;522}523}524else if (!nmsgs++ || time_after(nc, nextmsg_time))525nextmsg_time = nc + HZ / 2;526527return supp_msgs;528}529530static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)531{532unsigned long flags;533int expected;534535if (ipath_debug & __IPATH_DBG) {536char msg[128];537ipath_decode_err(dd, msg, sizeof msg, errs &538INFINIPATH_E_SDMAERRS);539ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);540}541if (ipath_debug & __IPATH_VERBDBG) {542unsigned long tl, hd, status, lengen;543tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);544hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);545status = ipath_read_kreg64(dd546, dd->ipath_kregs->kr_senddmastatus);547lengen = ipath_read_kreg64(dd,548dd->ipath_kregs->kr_senddmalengen);549ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "550"lengen 0x%lx\n", tl, hd, status, lengen);551}552553spin_lock_irqsave(&dd->ipath_sdma_lock, flags);554__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);555expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);556spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);557if (!expected)558ipath_cancel_sends(dd, 1);559}560561static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)562{563unsigned long flags;564int expected;565566if ((istat & INFINIPATH_I_SDMAINT) &&567!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))568ipath_sdma_intr(dd);569570if (istat & INFINIPATH_I_SDMADISABLED) {571expected = test_bit(IPATH_SDMA_ABORTING,572&dd->ipath_sdma_status);573ipath_dbg("%s SDmaDisabled intr\n",574expected ? "expected" : "unexpected");575spin_lock_irqsave(&dd->ipath_sdma_lock, flags);576__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);577spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);578if (!expected)579ipath_cancel_sends(dd, 1);580if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))581tasklet_hi_schedule(&dd->ipath_sdma_abort_task);582}583}584585static int handle_hdrq_full(struct ipath_devdata *dd)586{587int chkerrpkts = 0;588u32 hd, tl;589u32 i;590591ipath_stats.sps_hdrqfull++;592for (i = 0; i < dd->ipath_cfgports; i++) {593struct ipath_portdata *pd = dd->ipath_pd[i];594595if (i == 0) {596/*597* For kernel receive queues, we just want to know598* if there are packets in the queue that we can599* process.600*/601if (pd->port_head != ipath_get_hdrqtail(pd))602chkerrpkts |= 1 << i;603continue;604}605606/* Skip if user context is not open */607if (!pd || !pd->port_cnt)608continue;609610/* Don't report the same point multiple times. */611if (dd->ipath_flags & IPATH_NODMA_RTAIL)612tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);613else614tl = ipath_get_rcvhdrtail(pd);615if (tl == pd->port_lastrcvhdrqtail)616continue;617618hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);619if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {620pd->port_lastrcvhdrqtail = tl;621pd->port_hdrqfull++;622/* flush hdrqfull so that poll() sees it */623wmb();624wake_up_interruptible(&pd->port_wait);625}626}627628return chkerrpkts;629}630631static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)632{633char msg[128];634u64 ignore_this_time = 0;635u64 iserr = 0;636int chkerrpkts = 0, noprint = 0;637unsigned supp_msgs;638int log_idx;639640/*641* don't report errors that are masked, either at init642* (not set in ipath_errormask), or temporarily (set in643* ipath_maskederrs)644*/645errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;646647supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,648&noprint);649650/* do these first, they are most important */651if (errs & INFINIPATH_E_HARDWARE) {652/* reuse same msg buf */653dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);654} else {655u64 mask;656for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {657mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;658if (errs & mask)659ipath_inc_eeprom_err(dd, log_idx, 1);660}661}662663if (errs & INFINIPATH_E_SDMAERRS)664handle_sdma_errors(dd, errs);665666if (!noprint && (errs & ~dd->ipath_e_bitsextant))667ipath_dev_err(dd, "error interrupt with unknown errors "668"%llx set\n", (unsigned long long)669(errs & ~dd->ipath_e_bitsextant));670671if (errs & E_SUM_ERRS)672ignore_this_time = handle_e_sum_errs(dd, errs);673else if ((errs & E_SUM_LINK_PKTERRS) &&674!(dd->ipath_flags & IPATH_LINKACTIVE)) {675/*676* This can happen when SMA is trying to bring the link677* up, but the IB link changes state at the "wrong" time.678* The IB logic then complains that the packet isn't679* valid. We don't want to confuse people, so we just680* don't print them, except at debug681*/682ipath_dbg("Ignoring packet errors %llx, because link not "683"ACTIVE\n", (unsigned long long) errs);684ignore_this_time = errs & E_SUM_LINK_PKTERRS;685}686687if (supp_msgs == 250000) {688int s_iserr;689/*690* It's not entirely reasonable assuming that the errors set691* in the last clear period are all responsible for the692* problem, but the alternative is to assume it's the only693* ones on this particular interrupt, which also isn't great694*/695dd->ipath_maskederrs |= dd->ipath_lasterror | errs;696697dd->ipath_errormask &= ~dd->ipath_maskederrs;698ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,699dd->ipath_errormask);700s_iserr = ipath_decode_err(dd, msg, sizeof msg,701dd->ipath_maskederrs);702703if (dd->ipath_maskederrs &704~(INFINIPATH_E_RRCVEGRFULL |705INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))706ipath_dev_err(dd, "Temporarily disabling "707"error(s) %llx reporting; too frequent (%s)\n",708(unsigned long long) dd->ipath_maskederrs,709msg);710else {711/*712* rcvegrfull and rcvhdrqfull are "normal",713* for some types of processes (mostly benchmarks)714* that send huge numbers of messages, while not715* processing them. So only complain about716* these at debug level.717*/718if (s_iserr)719ipath_dbg("Temporarily disabling reporting "720"too frequent queue full errors (%s)\n",721msg);722else723ipath_cdbg(ERRPKT,724"Temporarily disabling reporting too"725" frequent packet errors (%s)\n",726msg);727}728729/*730* Re-enable the masked errors after around 3 minutes. in731* ipath_get_faststats(). If we have a series of fast732* repeating but different errors, the interval will keep733* stretching out, but that's OK, as that's pretty734* catastrophic.735*/736dd->ipath_unmasktime = jiffies + HZ * 180;737}738739ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);740if (ignore_this_time)741errs &= ~ignore_this_time;742if (errs & ~dd->ipath_lasterror) {743errs &= ~dd->ipath_lasterror;744/* never suppress duplicate hwerrors or ibstatuschange */745dd->ipath_lasterror |= errs &746~(INFINIPATH_E_HARDWARE |747INFINIPATH_E_IBSTATUSCHANGED);748}749750if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {751dd->ipath_spectriggerhit++;752ipath_dbg("%lu special trigger hits\n",753dd->ipath_spectriggerhit);754}755756/* likely due to cancel; so suppress message unless verbose */757if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&758dd->ipath_lastcancel > jiffies) {759/* armlaunch takes precedence; it often causes both. */760ipath_cdbg(VERBOSE,761"Suppressed %s error (%llx) after sendbuf cancel\n",762(errs & INFINIPATH_E_SPIOARMLAUNCH) ?763"armlaunch" : "sendpktlen", (unsigned long long)errs);764errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);765}766767if (!errs)768return 0;769770if (!noprint) {771ipath_err_t mask;772/*773* The ones we mask off are handled specially below774* or above. Also mask SDMADISABLED by default as it775* is too chatty.776*/777mask = INFINIPATH_E_IBSTATUSCHANGED |778INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |779INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;780781/* if we're in debug, then don't mask SDMADISABLED msgs */782if (ipath_debug & __IPATH_DBG)783mask &= ~INFINIPATH_E_SDMADISABLED;784785ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);786} else787/* so we don't need if (!noprint) at strlcat's below */788*msg = 0;789790if (errs & E_SUM_PKTERRS) {791ipath_stats.sps_pkterrs++;792chkerrpkts = 1;793}794if (errs & E_SUM_ERRS)795ipath_stats.sps_errs++;796797if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {798ipath_stats.sps_crcerrs++;799chkerrpkts = 1;800}801iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);802803804/*805* We don't want to print these two as they happen, or we can make806* the situation even worse, because it takes so long to print807* messages to serial consoles. Kernel ports get printed from808* fast_stats, no more than every 5 seconds, user ports get printed809* on close810*/811if (errs & INFINIPATH_E_RRCVHDRFULL)812chkerrpkts |= handle_hdrq_full(dd);813if (errs & INFINIPATH_E_RRCVEGRFULL) {814struct ipath_portdata *pd = dd->ipath_pd[0];815816/*817* since this is of less importance and not likely to818* happen without also getting hdrfull, only count819* occurrences; don't check each port (or even the kernel820* vs user)821*/822ipath_stats.sps_etidfull++;823if (pd->port_head != ipath_get_hdrqtail(pd))824chkerrpkts |= 1;825}826827/*828* do this before IBSTATUSCHANGED, in case both bits set in a single829* interrupt; we want the STATUSCHANGE to "win", so we do our830* internal copy of state machine correctly831*/832if (errs & INFINIPATH_E_RIBLOSTLINK) {833/*834* force through block below835*/836errs |= INFINIPATH_E_IBSTATUSCHANGED;837ipath_stats.sps_iblink++;838dd->ipath_flags |= IPATH_LINKDOWN;839dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT840| IPATH_LINKARMED | IPATH_LINKACTIVE);841*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;842843ipath_dbg("Lost link, link now down (%s)\n",844ipath_ibcstatus_str[ipath_read_kreg64(dd,845dd->ipath_kregs->kr_ibcstatus) & 0xf]);846}847if (errs & INFINIPATH_E_IBSTATUSCHANGED)848handle_e_ibstatuschanged(dd, errs);849850if (errs & INFINIPATH_E_RESET) {851if (!noprint)852ipath_dev_err(dd, "Got reset, requires re-init "853"(unload and reload driver)\n");854dd->ipath_flags &= ~IPATH_INITTED; /* needs re-init */855/* mark as having had error */856*dd->ipath_statusp |= IPATH_STATUS_HWERROR;857*dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;858}859860if (!noprint && *msg) {861if (iserr)862ipath_dev_err(dd, "%s error\n", msg);863}864if (dd->ipath_state_wanted & dd->ipath_flags) {865ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "866"waking\n", dd->ipath_state_wanted,867dd->ipath_flags);868wake_up_interruptible(&ipath_state_wait);869}870871return chkerrpkts;872}873874/*875* try to cleanup as much as possible for anything that might have gone876* wrong while in freeze mode, such as pio buffers being written by user877* processes (causing armlaunch), send errors due to going into freeze mode,878* etc., and try to avoid causing extra interrupts while doing so.879* Forcibly update the in-memory pioavail register copies after cleanup880* because the chip won't do it while in freeze mode (the register values881* themselves are kept correct).882* Make sure that we don't lose any important interrupts by using the chip883* feature that says that writing 0 to a bit in *clear that is set in884* *status will cause an interrupt to be generated again (if allowed by885* the *mask value).886*/887void ipath_clear_freeze(struct ipath_devdata *dd)888{889/* disable error interrupts, to avoid confusion */890ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);891892/* also disable interrupts; errormask is sometimes overwriten */893ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);894895ipath_cancel_sends(dd, 1);896897/* clear the freeze, and be sure chip saw it */898ipath_write_kreg(dd, dd->ipath_kregs->kr_control,899dd->ipath_control);900ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);901902/* force in-memory update now we are out of freeze */903ipath_force_pio_avail_update(dd);904905/*906* force new interrupt if any hwerr, error or interrupt bits are907* still set, and clear "safe" send packet errors related to freeze908* and cancelling sends. Re-enable error interrupts before possible909* force of re-interrupt on pending interrupts.910*/911ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);912ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,913E_SPKT_ERRS_IGNORE);914ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,915dd->ipath_errormask);916ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);917ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);918}919920921/* this is separate to allow for better optimization of ipath_intr() */922923static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)924{925/*926* sometimes happen during driver init and unload, don't want927* to process any interrupts at that point928*/929930/* this is just a bandaid, not a fix, if something goes badly931* wrong */932if (++*unexpectp > 100) {933if (++*unexpectp > 105) {934/*935* ok, we must be taking somebody else's interrupts,936* due to a messed up mptable and/or PIRQ table, so937* unregister the interrupt. We've seen this during938* linuxbios development work, and it may happen in939* the future again.940*/941if (dd->pcidev && dd->ipath_irq) {942ipath_dev_err(dd, "Now %u unexpected "943"interrupts, unregistering "944"interrupt handler\n",945*unexpectp);946ipath_dbg("free_irq of irq %d\n",947dd->ipath_irq);948dd->ipath_f_free_irq(dd);949}950}951if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {952ipath_dev_err(dd, "%u unexpected interrupts, "953"disabling interrupts completely\n",954*unexpectp);955/*956* disable all interrupts, something is very wrong957*/958ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,9590ULL);960}961} else if (*unexpectp > 1)962ipath_dbg("Interrupt when not ready, should not happen, "963"ignoring\n");964}965966static noinline void ipath_bad_regread(struct ipath_devdata *dd)967{968static int allbits;969970/* separate routine, for better optimization of ipath_intr() */971972/*973* We print the message and disable interrupts, in hope of974* having a better chance of debugging the problem.975*/976ipath_dev_err(dd,977"Read of interrupt status failed (all bits set)\n");978if (allbits++) {979/* disable all interrupts, something is very wrong */980ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);981if (allbits == 2) {982ipath_dev_err(dd, "Still bad interrupt status, "983"unregistering interrupt\n");984dd->ipath_f_free_irq(dd);985} else if (allbits > 2) {986if ((allbits % 10000) == 0)987printk(".");988} else989ipath_dev_err(dd, "Disabling interrupts, "990"multiple errors\n");991}992}993994static void handle_layer_pioavail(struct ipath_devdata *dd)995{996unsigned long flags;997int ret;998999ret = ipath_ib_piobufavail(dd->verbs_dev);1000if (ret > 0)1001goto set;10021003return;1004set:1005spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);1006dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;1007ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,1008dd->ipath_sendctrl);1009ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);1010spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);1011}10121013/*1014* Handle receive interrupts for user ports; this means a user1015* process was waiting for a packet to arrive, and didn't want1016* to poll1017*/1018static void handle_urcv(struct ipath_devdata *dd, u64 istat)1019{1020u64 portr;1021int i;1022int rcvdint = 0;10231024/*1025* test_and_clear_bit(IPATH_PORT_WAITING_RCV) and1026* test_and_clear_bit(IPATH_PORT_WAITING_URG) below1027* would both like timely updates of the bits so that1028* we don't pass them by unnecessarily. the rmb()1029* here ensures that we see them promptly -- the1030* corresponding wmb()'s are in ipath_poll_urgent()1031* and ipath_poll_next()...1032*/1033rmb();1034portr = ((istat >> dd->ipath_i_rcvavail_shift) &1035dd->ipath_i_rcvavail_mask) |1036((istat >> dd->ipath_i_rcvurg_shift) &1037dd->ipath_i_rcvurg_mask);1038for (i = 1; i < dd->ipath_cfgports; i++) {1039struct ipath_portdata *pd = dd->ipath_pd[i];10401041if (portr & (1 << i) && pd && pd->port_cnt) {1042if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,1043&pd->port_flag)) {1044clear_bit(i + dd->ipath_r_intravail_shift,1045&dd->ipath_rcvctrl);1046wake_up_interruptible(&pd->port_wait);1047rcvdint = 1;1048} else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,1049&pd->port_flag)) {1050pd->port_urgent++;1051wake_up_interruptible(&pd->port_wait);1052}1053}1054}1055if (rcvdint) {1056/* only want to take one interrupt, so turn off the rcv1057* interrupt for all the ports that we set the rcv_waiting1058* (but never for kernel port)1059*/1060ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,1061dd->ipath_rcvctrl);1062}1063}10641065irqreturn_t ipath_intr(int irq, void *data)1066{1067struct ipath_devdata *dd = data;1068u64 istat, chk0rcv = 0;1069ipath_err_t estat = 0;1070irqreturn_t ret;1071static unsigned unexpected = 0;1072u64 kportrbits;10731074ipath_stats.sps_ints++;10751076if (dd->ipath_int_counter != (u32) -1)1077dd->ipath_int_counter++;10781079if (!(dd->ipath_flags & IPATH_PRESENT)) {1080/*1081* This return value is not great, but we do not want the1082* interrupt core code to remove our interrupt handler1083* because we don't appear to be handling an interrupt1084* during a chip reset.1085*/1086return IRQ_HANDLED;1087}10881089/*1090* this needs to be flags&initted, not statusp, so we keep1091* taking interrupts even after link goes down, etc.1092* Also, we *must* clear the interrupt at some point, or we won't1093* take it again, which can be real bad for errors, etc...1094*/10951096if (!(dd->ipath_flags & IPATH_INITTED)) {1097ipath_bad_intr(dd, &unexpected);1098ret = IRQ_NONE;1099goto bail;1100}11011102istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);11031104if (unlikely(!istat)) {1105ipath_stats.sps_nullintr++;1106ret = IRQ_NONE; /* not our interrupt, or already handled */1107goto bail;1108}1109if (unlikely(istat == -1)) {1110ipath_bad_regread(dd);1111/* don't know if it was our interrupt or not */1112ret = IRQ_NONE;1113goto bail;1114}11151116if (unexpected)1117unexpected = 0;11181119if (unlikely(istat & ~dd->ipath_i_bitsextant))1120ipath_dev_err(dd,1121"interrupt with unknown interrupts %Lx set\n",1122(unsigned long long)1123istat & ~dd->ipath_i_bitsextant);1124else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */1125ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",1126(unsigned long long) istat);11271128if (istat & INFINIPATH_I_ERROR) {1129ipath_stats.sps_errints++;1130estat = ipath_read_kreg64(dd,1131dd->ipath_kregs->kr_errorstatus);1132if (!estat)1133dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "1134"but no error bits set!\n",1135(unsigned long long) istat);1136else if (estat == -1LL)1137/*1138* should we try clearing all, or hope next read1139* works?1140*/1141ipath_dev_err(dd, "Read of error status failed "1142"(all bits set); ignoring\n");1143else1144chk0rcv |= handle_errors(dd, estat);1145}11461147if (istat & INFINIPATH_I_GPIO) {1148/*1149* GPIO interrupts fall in two broad classes:1150* GPIO_2 indicates (on some HT4xx boards) that a packet1151* has arrived for Port 0. Checking for this1152* is controlled by flag IPATH_GPIO_INTR.1153* GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate1154* errors that we need to count. Checking for this1155* is controlled by flag IPATH_GPIO_ERRINTRS.1156*/1157u32 gpiostatus;1158u32 to_clear = 0;11591160gpiostatus = ipath_read_kreg32(1161dd, dd->ipath_kregs->kr_gpio_status);1162/* First the error-counter case. */1163if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&1164(dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {1165/* want to clear the bits we see asserted. */1166to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);11671168/*1169* Count appropriately, clear bits out of our copy,1170* as they have been "handled".1171*/1172if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {1173ipath_dbg("FlowCtl on UnsupVL\n");1174dd->ipath_rxfc_unsupvl_errs++;1175}1176if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {1177ipath_dbg("Overrun Threshold exceeded\n");1178dd->ipath_overrun_thresh_errs++;1179}1180if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {1181ipath_dbg("Local Link Integrity error\n");1182dd->ipath_lli_errs++;1183}1184gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;1185}1186/* Now the Port0 Receive case */1187if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&1188(dd->ipath_flags & IPATH_GPIO_INTR)) {1189/*1190* GPIO status bit 2 is set, and we expected it.1191* clear it and indicate in p0bits.1192* This probably only happens if a Port0 pkt1193* arrives at _just_ the wrong time, and we1194* handle that by seting chk0rcv;1195*/1196to_clear |= (1 << IPATH_GPIO_PORT0_BIT);1197gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);1198chk0rcv = 1;1199}1200if (gpiostatus) {1201/*1202* Some unexpected bits remain. If they could have1203* caused the interrupt, complain and clear.1204* To avoid repetition of this condition, also clear1205* the mask. It is almost certainly due to error.1206*/1207const u32 mask = (u32) dd->ipath_gpio_mask;12081209if (mask & gpiostatus) {1210ipath_dbg("Unexpected GPIO IRQ bits %x\n",1211gpiostatus & mask);1212to_clear |= (gpiostatus & mask);1213dd->ipath_gpio_mask &= ~(gpiostatus & mask);1214ipath_write_kreg(dd,1215dd->ipath_kregs->kr_gpio_mask,1216dd->ipath_gpio_mask);1217}1218}1219if (to_clear) {1220ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,1221(u64) to_clear);1222}1223}12241225/*1226* Clear the interrupt bits we found set, unless they are receive1227* related, in which case we already cleared them above, and don't1228* want to clear them again, because we might lose an interrupt.1229* Clear it early, so we "know" know the chip will have seen this by1230* the time we process the queue, and will re-interrupt if necessary.1231* The processor itself won't take the interrupt again until we return.1232*/1233ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);12341235/*1236* Handle kernel receive queues before checking for pio buffers1237* available since receives can overflow; piobuf waiters can afford1238* a few extra cycles, since they were waiting anyway, and user's1239* waiting for receive are at the bottom.1240*/1241kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |1242(1ULL << dd->ipath_i_rcvurg_shift);1243if (chk0rcv || (istat & kportrbits)) {1244istat &= ~kportrbits;1245ipath_kreceive(dd->ipath_pd[0]);1246}12471248if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |1249(dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))1250handle_urcv(dd, istat);12511252if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))1253handle_sdma_intr(dd, istat);12541255if (istat & INFINIPATH_I_SPIOBUFAVAIL) {1256unsigned long flags;12571258spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);1259dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;1260ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,1261dd->ipath_sendctrl);1262ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);1263spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);12641265/* always process; sdma verbs uses PIO for acks and VL15 */1266handle_layer_pioavail(dd);1267}12681269ret = IRQ_HANDLED;12701271bail:1272return ret;1273}127412751276