Path: blob/master/drivers/infiniband/hw/qib/qib_iba6120.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.2* All rights reserved.3* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/33/*34* This file contains all of the code that is specific to the35* QLogic_IB 6120 PCIe chip.36*/3738#include <linux/interrupt.h>39#include <linux/pci.h>40#include <linux/delay.h>41#include <rdma/ib_verbs.h>4243#include "qib.h"44#include "qib_6120_regs.h"4546static void qib_6120_setup_setextled(struct qib_pportdata *, u32);47static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);48static u8 qib_6120_phys_portstate(u64);49static u32 qib_6120_iblink_state(u64);5051/*52* This file contains all the chip-specific register information and53* access functions for the QLogic QLogic_IB PCI-Express chip.54*55*/5657/* KREG_IDX uses machine-generated #defines */58#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))5960/* Use defines to tie machine-generated names to lower-case names */61#define kr_extctrl KREG_IDX(EXTCtrl)62#define kr_extstatus KREG_IDX(EXTStatus)63#define kr_gpio_clear KREG_IDX(GPIOClear)64#define kr_gpio_mask KREG_IDX(GPIOMask)65#define kr_gpio_out KREG_IDX(GPIOOut)66#define kr_gpio_status KREG_IDX(GPIOStatus)67#define kr_rcvctrl KREG_IDX(RcvCtrl)68#define kr_sendctrl KREG_IDX(SendCtrl)69#define kr_partitionkey KREG_IDX(RcvPartitionKey)70#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)71#define kr_ibcstatus KREG_IDX(IBCStatus)72#define kr_ibcctrl KREG_IDX(IBCCtrl)73#define kr_sendbuffererror KREG_IDX(SendBufErr0)74#define kr_rcvbthqp KREG_IDX(RcvBTHQP)75#define kr_counterregbase KREG_IDX(CntrRegBase)76#define kr_palign KREG_IDX(PageAlign)77#define kr_rcvegrbase KREG_IDX(RcvEgrBase)78#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)79#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)80#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)81#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)82#define kr_rcvtidbase KREG_IDX(RcvTIDBase)83#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)84#define kr_scratch KREG_IDX(Scratch)85#define kr_sendctrl KREG_IDX(SendCtrl)86#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)87#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)88#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)89#define kr_sendpiosize KREG_IDX(SendPIOSize)90#define kr_sendregbase KREG_IDX(SendRegBase)91#define kr_userregbase KREG_IDX(UserRegBase)92#define kr_control KREG_IDX(Control)93#define kr_intclear KREG_IDX(IntClear)94#define kr_intmask KREG_IDX(IntMask)95#define kr_intstatus KREG_IDX(IntStatus)96#define kr_errclear KREG_IDX(ErrClear)97#define kr_errmask KREG_IDX(ErrMask)98#define kr_errstatus KREG_IDX(ErrStatus)99#define kr_hwerrclear KREG_IDX(HwErrClear)100#define kr_hwerrmask KREG_IDX(HwErrMask)101#define kr_hwerrstatus KREG_IDX(HwErrStatus)102#define kr_revision KREG_IDX(Revision)103#define kr_portcnt KREG_IDX(PortCnt)104#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)105#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)106#define kr_serdes_stat KREG_IDX(SerdesStat)107#define kr_xgxs_cfg KREG_IDX(XGXSCfg)108109/* These must only be written via qib_write_kreg_ctxt() */110#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)111#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)112113#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \114QIB_6120_LBIntCnt_OFFS) / sizeof(u64))115116#define cr_badformat CREG_IDX(RxBadFormatCnt)117#define cr_erricrc CREG_IDX(RxICRCErrCnt)118#define cr_errlink CREG_IDX(RxLinkProblemCnt)119#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)120#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)121#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)122#define cr_err_rlen CREG_IDX(RxLenErrCnt)123#define cr_errslen CREG_IDX(TxLenErrCnt)124#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)125#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)126#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)127#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)128#define cr_lbint CREG_IDX(LBIntCnt)129#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)130#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)131#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)132#define cr_pktrcv CREG_IDX(RxDataPktCnt)133#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)134#define cr_pktsend CREG_IDX(TxDataPktCnt)135#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)136#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)137#define cr_rcvebp CREG_IDX(RxEBPCnt)138#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)139#define cr_senddropped CREG_IDX(TxDroppedPktCnt)140#define cr_sendstall CREG_IDX(TxFlowStallCnt)141#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)142#define cr_wordrcv CREG_IDX(RxDwordCnt)143#define cr_wordsend CREG_IDX(TxDwordCnt)144#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)145#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)146#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)147#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)148#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)149150#define SYM_RMASK(regname, fldname) ((u64) \151QIB_6120_##regname##_##fldname##_RMASK)152#define SYM_MASK(regname, fldname) ((u64) \153QIB_6120_##regname##_##fldname##_RMASK << \154QIB_6120_##regname##_##fldname##_LSB)155#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)156157#define SYM_FIELD(value, regname, fldname) ((u64) \158(((value) >> SYM_LSB(regname, fldname)) & \159SYM_RMASK(regname, fldname)))160#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)161#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)162163/* link training states, from IBC */164#define IB_6120_LT_STATE_DISABLED 0x00165#define IB_6120_LT_STATE_LINKUP 0x01166#define IB_6120_LT_STATE_POLLACTIVE 0x02167#define IB_6120_LT_STATE_POLLQUIET 0x03168#define IB_6120_LT_STATE_SLEEPDELAY 0x04169#define IB_6120_LT_STATE_SLEEPQUIET 0x05170#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08171#define IB_6120_LT_STATE_CFGRCVFCFG 0x09172#define IB_6120_LT_STATE_CFGWAITRMT 0x0a173#define IB_6120_LT_STATE_CFGIDLE 0x0b174#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c175#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e176#define IB_6120_LT_STATE_RECOVERIDLE 0x0f177178/* link state machine states from IBC */179#define IB_6120_L_STATE_DOWN 0x0180#define IB_6120_L_STATE_INIT 0x1181#define IB_6120_L_STATE_ARM 0x2182#define IB_6120_L_STATE_ACTIVE 0x3183#define IB_6120_L_STATE_ACT_DEFER 0x4184185static const u8 qib_6120_physportstate[0x20] = {186[IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,187[IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,188[IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,189[IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,190[IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,191[IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,192[IB_6120_LT_STATE_CFGDEBOUNCE] =193IB_PHYSPORTSTATE_CFG_TRAIN,194[IB_6120_LT_STATE_CFGRCVFCFG] =195IB_PHYSPORTSTATE_CFG_TRAIN,196[IB_6120_LT_STATE_CFGWAITRMT] =197IB_PHYSPORTSTATE_CFG_TRAIN,198[IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,199[IB_6120_LT_STATE_RECOVERRETRAIN] =200IB_PHYSPORTSTATE_LINK_ERR_RECOVER,201[IB_6120_LT_STATE_RECOVERWAITRMT] =202IB_PHYSPORTSTATE_LINK_ERR_RECOVER,203[IB_6120_LT_STATE_RECOVERIDLE] =204IB_PHYSPORTSTATE_LINK_ERR_RECOVER,205[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,206[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,207[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,208[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,209[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,210[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,211[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,212[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN213};214215216struct qib_chip_specific {217u64 __iomem *cregbase;218u64 *cntrs;219u64 *portcntrs;220void *dummy_hdrq; /* used after ctxt close */221dma_addr_t dummy_hdrq_phys;222spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */223spinlock_t user_tid_lock; /* no back to back user TID writes */224spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */225spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */226u64 hwerrmask;227u64 errormask;228u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */229u64 gpio_mask; /* shadow the gpio mask register */230u64 extctrl; /* shadow the gpio output enable, etc... */231/*232* these 5 fields are used to establish deltas for IB symbol233* errors and linkrecovery errors. They can be reported on234* some chips during link negotiation prior to INIT, and with235* DDR when faking DDR negotiations with non-IBTA switches.236* The chip counters are adjusted at driver unload if there is237* a non-zero delta.238*/239u64 ibdeltainprog;240u64 ibsymdelta;241u64 ibsymsnap;242u64 iblnkerrdelta;243u64 iblnkerrsnap;244u64 ibcctrl; /* shadow for kr_ibcctrl */245u32 lastlinkrecov; /* link recovery issue */246int irq;247u32 cntrnamelen;248u32 portcntrnamelen;249u32 ncntrs;250u32 nportcntrs;251/* used with gpio interrupts to implement IB counters */252u32 rxfc_unsupvl_errs;253u32 overrun_thresh_errs;254/*255* these count only cases where _successive_ LocalLinkIntegrity256* errors were seen in the receive headers of IB standard packets257*/258u32 lli_errs;259u32 lli_counter;260u64 lli_thresh;261u64 sword; /* total dwords sent (sample result) */262u64 rword; /* total dwords received (sample result) */263u64 spkts; /* total packets sent (sample result) */264u64 rpkts; /* total packets received (sample result) */265u64 xmit_wait; /* # of ticks no data sent (sample result) */266struct timer_list pma_timer;267char emsgbuf[128];268char bitsmsgbuf[64];269u8 pma_sample_status;270};271272/* ibcctrl bits */273#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1274/* cycle through TS1/TS2 till OK */275#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2276/* wait for TS1, then go on */277#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3278#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16279280#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */281#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */282#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */283#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18284285/*286* We could have a single register get/put routine, that takes a group type,287* but this is somewhat clearer and cleaner. It also gives us some error288* checking. 64 bit register reads should always work, but are inefficient289* on opteron (the northbridge always generates 2 separate HT 32 bit reads),290* so we use kreg32 wherever possible. User register and counter register291* reads are always 32 bit reads, so only one form of those routines.292*/293294/**295* qib_read_ureg32 - read 32-bit virtualized per-context register296* @dd: device297* @regno: register number298* @ctxt: context number299*300* Return the contents of a register that is virtualized to be per context.301* Returns -1 on errors (not distinguishable from valid contents at302* runtime; we may add a separate error variable at some point).303*/304static inline u32 qib_read_ureg32(const struct qib_devdata *dd,305enum qib_ureg regno, int ctxt)306{307if (!dd->kregbase || !(dd->flags & QIB_PRESENT))308return 0;309310if (dd->userbase)311return readl(regno + (u64 __iomem *)312((char __iomem *)dd->userbase +313dd->ureg_align * ctxt));314else315return readl(regno + (u64 __iomem *)316(dd->uregbase +317(char __iomem *)dd->kregbase +318dd->ureg_align * ctxt));319}320321/**322* qib_write_ureg - write 32-bit virtualized per-context register323* @dd: device324* @regno: register number325* @value: value326* @ctxt: context327*328* Write the contents of a register that is virtualized to be per context.329*/330static inline void qib_write_ureg(const struct qib_devdata *dd,331enum qib_ureg regno, u64 value, int ctxt)332{333u64 __iomem *ubase;334if (dd->userbase)335ubase = (u64 __iomem *)336((char __iomem *) dd->userbase +337dd->ureg_align * ctxt);338else339ubase = (u64 __iomem *)340(dd->uregbase +341(char __iomem *) dd->kregbase +342dd->ureg_align * ctxt);343344if (dd->kregbase && (dd->flags & QIB_PRESENT))345writeq(value, &ubase[regno]);346}347348static inline u32 qib_read_kreg32(const struct qib_devdata *dd,349const u16 regno)350{351if (!dd->kregbase || !(dd->flags & QIB_PRESENT))352return -1;353return readl((u32 __iomem *)&dd->kregbase[regno]);354}355356static inline u64 qib_read_kreg64(const struct qib_devdata *dd,357const u16 regno)358{359if (!dd->kregbase || !(dd->flags & QIB_PRESENT))360return -1;361362return readq(&dd->kregbase[regno]);363}364365static inline void qib_write_kreg(const struct qib_devdata *dd,366const u16 regno, u64 value)367{368if (dd->kregbase && (dd->flags & QIB_PRESENT))369writeq(value, &dd->kregbase[regno]);370}371372/**373* qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register374* @dd: the qlogic_ib device375* @regno: the register number to write376* @ctxt: the context containing the register377* @value: the value to write378*/379static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,380const u16 regno, unsigned ctxt,381u64 value)382{383qib_write_kreg(dd, regno + ctxt, value);384}385386static inline void write_6120_creg(const struct qib_devdata *dd,387u16 regno, u64 value)388{389if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))390writeq(value, &dd->cspec->cregbase[regno]);391}392393static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)394{395if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))396return 0;397return readq(&dd->cspec->cregbase[regno]);398}399400static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)401{402if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))403return 0;404return readl(&dd->cspec->cregbase[regno]);405}406407/* kr_control bits */408#define QLOGIC_IB_C_RESET 1U409410/* kr_intstatus, kr_intclear, kr_intmask bits */411#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)412#define QLOGIC_IB_I_RCVURG_SHIFT 0413#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)414#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12415416#define QLOGIC_IB_C_FREEZEMODE 0x00000002417#define QLOGIC_IB_C_LINKENABLE 0x00000004418#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL419#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL420#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL421#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL422#define QLOGIC_IB_I_BITSEXTANT \423((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \424(QLOGIC_IB_I_RCVAVAIL_MASK << \425QLOGIC_IB_I_RCVAVAIL_SHIFT) | \426QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \427QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)428429/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */430#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL431#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0432#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL433#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL434#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL435#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL436#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL437#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL438#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL439#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL440#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL441#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL442443444/* kr_extstatus bits */445#define QLOGIC_IB_EXTS_FREQSEL 0x2446#define QLOGIC_IB_EXTS_SERDESSEL 0x4447#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000448#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000449450/* kr_xgxsconfig bits */451#define QLOGIC_IB_XGXS_RESET 0x5ULL452453#define _QIB_GPIO_SDA_NUM 1454#define _QIB_GPIO_SCL_NUM 0455456/* Bits in GPIO for the added IB link interrupts */457#define GPIO_RXUVL_BIT 3458#define GPIO_OVRUN_BIT 4459#define GPIO_LLI_BIT 5460#define GPIO_ERRINTR_MASK 0x38461462463#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL464#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \465((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)466#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))467#define QLOGIC_IB_RT_IS_VALID(tid) \468(((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \469((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))470#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */471#define QLOGIC_IB_RT_ADDR_SHIFT 10472473#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16474#define QLOGIC_IB_R_TAILUPD_SHIFT 31475#define IBA6120_R_PKEY_DIS_SHIFT 30476477#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */478479#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)480#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)481482#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \483((1ULL << (SYM_LSB(regname, fldname) + (bit)))))484485#define TXEMEMPARITYERR_PIOBUF \486SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)487#define TXEMEMPARITYERR_PIOPBC \488SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)489#define TXEMEMPARITYERR_PIOLAUNCHFIFO \490SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)491492#define RXEMEMPARITYERR_RCVBUF \493SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)494#define RXEMEMPARITYERR_LOOKUPQ \495SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)496#define RXEMEMPARITYERR_EXPTID \497SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)498#define RXEMEMPARITYERR_EAGERTID \499SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)500#define RXEMEMPARITYERR_FLAGBUF \501SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)502#define RXEMEMPARITYERR_DATAINFO \503SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)504#define RXEMEMPARITYERR_HDRINFO \505SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)506507/* 6120 specific hardware errors... */508static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {509/* generic hardware errors */510QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),511QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),512513QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,514"TXE PIOBUF Memory Parity"),515QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,516"TXE PIOPBC Memory Parity"),517QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,518"TXE PIOLAUNCHFIFO Memory Parity"),519520QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,521"RXE RCVBUF Memory Parity"),522QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,523"RXE LOOKUPQ Memory Parity"),524QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,525"RXE EAGERTID Memory Parity"),526QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,527"RXE EXPTID Memory Parity"),528QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,529"RXE FLAGBUF Memory Parity"),530QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,531"RXE DATAINFO Memory Parity"),532QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,533"RXE HDRINFO Memory Parity"),534535/* chip-specific hardware errors */536QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,537"PCIe Poisoned TLP"),538QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,539"PCIe completion timeout"),540/*541* In practice, it's unlikely wthat we'll see PCIe PLL, or bus542* parity or memory parity error failures, because most likely we543* won't be able to talk to the core of the chip. Nonetheless, we544* might see them, if they are in parts of the PCIe core that aren't545* essential.546*/547QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,548"PCIePLL1"),549QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,550"PCIePLL0"),551QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,552"PCIe XTLH core parity"),553QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,554"PCIe ADM TX core parity"),555QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,556"PCIe ADM RX core parity"),557QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,558"SerDes PLL"),559};560561#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)562#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \563QLOGIC_IB_HWE_COREPLL_RFSLIP)564565/* variables for sanity checking interrupt and errors */566#define IB_HWE_BITSEXTANT \567(HWE_MASK(RXEMemParityErr) | \568HWE_MASK(TXEMemParityErr) | \569(QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \570QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \571QLOGIC_IB_HWE_PCIE1PLLFAILED | \572QLOGIC_IB_HWE_PCIE0PLLFAILED | \573QLOGIC_IB_HWE_PCIEPOISONEDTLP | \574QLOGIC_IB_HWE_PCIECPLTIMEOUT | \575QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \576QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \577QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \578HWE_MASK(PowerOnBISTFailed) | \579QLOGIC_IB_HWE_COREPLL_FBSLIP | \580QLOGIC_IB_HWE_COREPLL_RFSLIP | \581QLOGIC_IB_HWE_SERDESPLLFAILED | \582HWE_MASK(IBCBusToSPCParityErr) | \583HWE_MASK(IBCBusFromSPCParityErr))584585#define IB_E_BITSEXTANT \586(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \587ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \588ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \589ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \590ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \591ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \592ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \593ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \594ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \595ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \596ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \597ERR_MASK(SendDroppedSmpPktErr) | \598ERR_MASK(SendDroppedDataPktErr) | \599ERR_MASK(SendPioArmLaunchErr) | \600ERR_MASK(SendUnexpectedPktNumErr) | \601ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \602ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \603ERR_MASK(HardwareErr))604605#define QLOGIC_IB_E_PKTERRS ( \606ERR_MASK(SendPktLenErr) | \607ERR_MASK(SendDroppedDataPktErr) | \608ERR_MASK(RcvVCRCErr) | \609ERR_MASK(RcvICRCErr) | \610ERR_MASK(RcvShortPktLenErr) | \611ERR_MASK(RcvEBPErr))612613/* These are all rcv-related errors which we want to count for stats */614#define E_SUM_PKTERRS \615(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \616ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \617ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \618ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \619ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \620ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))621622/* These are all send-related errors which we want to count for stats */623#define E_SUM_ERRS \624(ERR_MASK(SendPioArmLaunchErr) | \625ERR_MASK(SendUnexpectedPktNumErr) | \626ERR_MASK(SendDroppedDataPktErr) | \627ERR_MASK(SendDroppedSmpPktErr) | \628ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \629ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \630ERR_MASK(InvalidAddrErr))631632/*633* this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore634* errors not related to freeze and cancelling buffers. Can't ignore635* armlaunch because could get more while still cleaning up, and need636* to cancel those as they happen.637*/638#define E_SPKT_ERRS_IGNORE \639(ERR_MASK(SendDroppedDataPktErr) | \640ERR_MASK(SendDroppedSmpPktErr) | \641ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \642ERR_MASK(SendPktLenErr))643644/*645* these are errors that can occur when the link changes state while646* a packet is being sent or received. This doesn't cover things647* like EBP or VCRC that can be the result of a sending having the648* link change state, so we receive a "known bad" packet.649*/650#define E_SUM_LINK_PKTERRS \651(ERR_MASK(SendDroppedDataPktErr) | \652ERR_MASK(SendDroppedSmpPktErr) | \653ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \654ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \655ERR_MASK(RcvUnexpectedCharErr))656657static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,658u32, unsigned long);659660/*661* On platforms using this chip, and not having ordered WC stores, we662* can get TXE parity errors due to speculative reads to the PIO buffers,663* and this, due to a chip issue can result in (many) false parity error664* reports. So it's a debug print on those, and an info print on systems665* where the speculative reads don't occur.666*/667static void qib_6120_txe_recover(struct qib_devdata *dd)668{669if (!qib_unordered_wc())670qib_devinfo(dd->pcidev,671"Recovering from TXE PIO parity error\n");672}673674/* enable/disable chip from delivering interrupts */675static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)676{677if (enable) {678if (dd->flags & QIB_BADINTR)679return;680qib_write_kreg(dd, kr_intmask, ~0ULL);681/* force re-interrupt of any pending interrupts. */682qib_write_kreg(dd, kr_intclear, 0ULL);683} else684qib_write_kreg(dd, kr_intmask, 0ULL);685}686687/*688* Try to cleanup as much as possible for anything that might have gone689* wrong while in freeze mode, such as pio buffers being written by user690* processes (causing armlaunch), send errors due to going into freeze mode,691* etc., and try to avoid causing extra interrupts while doing so.692* Forcibly update the in-memory pioavail register copies after cleanup693* because the chip won't do it while in freeze mode (the register values694* themselves are kept correct).695* Make sure that we don't lose any important interrupts by using the chip696* feature that says that writing 0 to a bit in *clear that is set in697* *status will cause an interrupt to be generated again (if allowed by698* the *mask value).699* This is in chip-specific code because of all of the register accesses,700* even though the details are similar on most chips701*/702static void qib_6120_clear_freeze(struct qib_devdata *dd)703{704/* disable error interrupts, to avoid confusion */705qib_write_kreg(dd, kr_errmask, 0ULL);706707/* also disable interrupts; errormask is sometimes overwriten */708qib_6120_set_intr_state(dd, 0);709710qib_cancel_sends(dd->pport);711712/* clear the freeze, and be sure chip saw it */713qib_write_kreg(dd, kr_control, dd->control);714qib_read_kreg32(dd, kr_scratch);715716/* force in-memory update now we are out of freeze */717qib_force_pio_avail_update(dd);718719/*720* force new interrupt if any hwerr, error or interrupt bits are721* still set, and clear "safe" send packet errors related to freeze722* and cancelling sends. Re-enable error interrupts before possible723* force of re-interrupt on pending interrupts.724*/725qib_write_kreg(dd, kr_hwerrclear, 0ULL);726qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);727qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);728qib_6120_set_intr_state(dd, 1);729}730731/**732* qib_handle_6120_hwerrors - display hardware errors.733* @dd: the qlogic_ib device734* @msg: the output buffer735* @msgl: the size of the output buffer736*737* Use same msg buffer as regular errors to avoid excessive stack738* use. Most hardware errors are catastrophic, but for right now,739* we'll print them and continue. Reuse the same message buffer as740* handle_6120_errors() to avoid excessive stack usage.741*/742static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,743size_t msgl)744{745u64 hwerrs;746u32 bits, ctrl;747int isfatal = 0;748char *bitsmsg;749int log_idx;750751hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);752if (!hwerrs)753return;754if (hwerrs == ~0ULL) {755qib_dev_err(dd, "Read of hardware error status failed "756"(all bits set); ignoring\n");757return;758}759qib_stats.sps_hwerrs++;760761/* Always clear the error status register, except MEMBISTFAIL,762* regardless of whether we continue or stop using the chip.763* We want that set so we know it failed, even across driver reload.764* We'll still ignore it in the hwerrmask. We do this partly for765* diagnostics, but also for support */766qib_write_kreg(dd, kr_hwerrclear,767hwerrs & ~HWE_MASK(PowerOnBISTFailed));768769hwerrs &= dd->cspec->hwerrmask;770771/* We log some errors to EEPROM, check if we have any of those. */772for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)773if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)774qib_inc_eeprom_err(dd, log_idx, 1);775776/*777* Make sure we get this much out, unless told to be quiet,778* or it's occurred within the last 5 seconds.779*/780if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))781qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "782"(cleared)\n", (unsigned long long) hwerrs);783784if (hwerrs & ~IB_HWE_BITSEXTANT)785qib_dev_err(dd, "hwerror interrupt with unknown errors "786"%llx set\n", (unsigned long long)787(hwerrs & ~IB_HWE_BITSEXTANT));788789ctrl = qib_read_kreg32(dd, kr_control);790if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {791/*792* Parity errors in send memory are recoverable,793* just cancel the send (if indicated in * sendbuffererror),794* count the occurrence, unfreeze (if no other handled795* hardware error bits are set), and continue. They can796* occur if a processor speculative read is done to the PIO797* buffer while we are sending a packet, for example.798*/799if (hwerrs & TXE_PIO_PARITY) {800qib_6120_txe_recover(dd);801hwerrs &= ~TXE_PIO_PARITY;802}803804if (!hwerrs) {805static u32 freeze_cnt;806807freeze_cnt++;808qib_6120_clear_freeze(dd);809} else810isfatal = 1;811}812813*msg = '\0';814815if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {816isfatal = 1;817strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"818" unusable]", msgl);819/* ignore from now on, so disable until driver reloaded */820dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);821qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);822}823824qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,825ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);826827bitsmsg = dd->cspec->bitsmsgbuf;828if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<829QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {830bits = (u32) ((hwerrs >>831QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &832QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);833snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,834"[PCIe Mem Parity Errs %x] ", bits);835strlcat(msg, bitsmsg, msgl);836}837838if (hwerrs & _QIB_PLL_FAIL) {839isfatal = 1;840snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,841"[PLL failed (%llx), InfiniPath hardware unusable]",842(unsigned long long) hwerrs & _QIB_PLL_FAIL);843strlcat(msg, bitsmsg, msgl);844/* ignore from now on, so disable until driver reloaded */845dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);846qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);847}848849if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {850/*851* If it occurs, it is left masked since the external852* interface is unused853*/854dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;855qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);856}857858if (hwerrs)859/*860* if any set that we aren't ignoring; only861* make the complaint once, in case it's stuck862* or recurring, and we get here multiple863* times.864*/865qib_dev_err(dd, "%s hardware error\n", msg);866else867*msg = 0; /* recovered from all of them */868869if (isfatal && !dd->diag_client) {870qib_dev_err(dd, "Fatal Hardware Error, no longer"871" usable, SN %.16s\n", dd->serial);872/*873* for /sys status file and user programs to print; if no874* trailing brace is copied, we'll know it was truncated.875*/876if (dd->freezemsg)877snprintf(dd->freezemsg, dd->freezelen,878"{%s}", msg);879qib_disable_after_error(dd);880}881}882883/*884* Decode the error status into strings, deciding whether to always885* print * it or not depending on "normal packet errors" vs everything886* else. Return 1 if "real" errors, otherwise 0 if only packet887* errors, so caller can decide what to print with the string.888*/889static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,890u64 err)891{892int iserr = 1;893894*buf = '\0';895if (err & QLOGIC_IB_E_PKTERRS) {896if (!(err & ~QLOGIC_IB_E_PKTERRS))897iserr = 0;898if ((err & ERR_MASK(RcvICRCErr)) &&899!(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))900strlcat(buf, "CRC ", blen);901if (!iserr)902goto done;903}904if (err & ERR_MASK(RcvHdrLenErr))905strlcat(buf, "rhdrlen ", blen);906if (err & ERR_MASK(RcvBadTidErr))907strlcat(buf, "rbadtid ", blen);908if (err & ERR_MASK(RcvBadVersionErr))909strlcat(buf, "rbadversion ", blen);910if (err & ERR_MASK(RcvHdrErr))911strlcat(buf, "rhdr ", blen);912if (err & ERR_MASK(RcvLongPktLenErr))913strlcat(buf, "rlongpktlen ", blen);914if (err & ERR_MASK(RcvMaxPktLenErr))915strlcat(buf, "rmaxpktlen ", blen);916if (err & ERR_MASK(RcvMinPktLenErr))917strlcat(buf, "rminpktlen ", blen);918if (err & ERR_MASK(SendMinPktLenErr))919strlcat(buf, "sminpktlen ", blen);920if (err & ERR_MASK(RcvFormatErr))921strlcat(buf, "rformaterr ", blen);922if (err & ERR_MASK(RcvUnsupportedVLErr))923strlcat(buf, "runsupvl ", blen);924if (err & ERR_MASK(RcvUnexpectedCharErr))925strlcat(buf, "runexpchar ", blen);926if (err & ERR_MASK(RcvIBFlowErr))927strlcat(buf, "ribflow ", blen);928if (err & ERR_MASK(SendUnderRunErr))929strlcat(buf, "sunderrun ", blen);930if (err & ERR_MASK(SendPioArmLaunchErr))931strlcat(buf, "spioarmlaunch ", blen);932if (err & ERR_MASK(SendUnexpectedPktNumErr))933strlcat(buf, "sunexperrpktnum ", blen);934if (err & ERR_MASK(SendDroppedSmpPktErr))935strlcat(buf, "sdroppedsmppkt ", blen);936if (err & ERR_MASK(SendMaxPktLenErr))937strlcat(buf, "smaxpktlen ", blen);938if (err & ERR_MASK(SendUnsupportedVLErr))939strlcat(buf, "sunsupVL ", blen);940if (err & ERR_MASK(InvalidAddrErr))941strlcat(buf, "invalidaddr ", blen);942if (err & ERR_MASK(RcvEgrFullErr))943strlcat(buf, "rcvegrfull ", blen);944if (err & ERR_MASK(RcvHdrFullErr))945strlcat(buf, "rcvhdrfull ", blen);946if (err & ERR_MASK(IBStatusChanged))947strlcat(buf, "ibcstatuschg ", blen);948if (err & ERR_MASK(RcvIBLostLinkErr))949strlcat(buf, "riblostlink ", blen);950if (err & ERR_MASK(HardwareErr))951strlcat(buf, "hardware ", blen);952if (err & ERR_MASK(ResetNegated))953strlcat(buf, "reset ", blen);954done:955return iserr;956}957958/*959* Called when we might have an error that is specific to a particular960* PIO buffer, and may need to cancel that buffer, so it can be re-used.961*/962static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)963{964unsigned long sbuf[2];965struct qib_devdata *dd = ppd->dd;966967/*968* It's possible that sendbuffererror could have bits set; might969* have already done this as a result of hardware error handling.970*/971sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);972sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);973974if (sbuf[0] || sbuf[1])975qib_disarm_piobufs_set(dd, sbuf,976dd->piobcnt2k + dd->piobcnt4k);977}978979static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)980{981int ret = 1;982u32 ibstate = qib_6120_iblink_state(ibcs);983u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);984985if (linkrecov != dd->cspec->lastlinkrecov) {986/* and no more until active again */987dd->cspec->lastlinkrecov = 0;988qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);989ret = 0;990}991if (ibstate == IB_PORT_ACTIVE)992dd->cspec->lastlinkrecov =993read_6120_creg32(dd, cr_iblinkerrrecov);994return ret;995}996997static void handle_6120_errors(struct qib_devdata *dd, u64 errs)998{999char *msg;1000u64 ignore_this_time = 0;1001u64 iserr = 0;1002int log_idx;1003struct qib_pportdata *ppd = dd->pport;1004u64 mask;10051006/* don't report errors that are masked */1007errs &= dd->cspec->errormask;1008msg = dd->cspec->emsgbuf;10091010/* do these first, they are most important */1011if (errs & ERR_MASK(HardwareErr))1012qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);1013else1014for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)1015if (errs & dd->eep_st_masks[log_idx].errs_to_log)1016qib_inc_eeprom_err(dd, log_idx, 1);10171018if (errs & ~IB_E_BITSEXTANT)1019qib_dev_err(dd, "error interrupt with unknown errors "1020"%llx set\n",1021(unsigned long long) (errs & ~IB_E_BITSEXTANT));10221023if (errs & E_SUM_ERRS) {1024qib_disarm_6120_senderrbufs(ppd);1025if ((errs & E_SUM_LINK_PKTERRS) &&1026!(ppd->lflags & QIBL_LINKACTIVE)) {1027/*1028* This can happen when trying to bring the link1029* up, but the IB link changes state at the "wrong"1030* time. The IB logic then complains that the packet1031* isn't valid. We don't want to confuse people, so1032* we just don't print them, except at debug1033*/1034ignore_this_time = errs & E_SUM_LINK_PKTERRS;1035}1036} else if ((errs & E_SUM_LINK_PKTERRS) &&1037!(ppd->lflags & QIBL_LINKACTIVE)) {1038/*1039* This can happen when SMA is trying to bring the link1040* up, but the IB link changes state at the "wrong" time.1041* The IB logic then complains that the packet isn't1042* valid. We don't want to confuse people, so we just1043* don't print them, except at debug1044*/1045ignore_this_time = errs & E_SUM_LINK_PKTERRS;1046}10471048qib_write_kreg(dd, kr_errclear, errs);10491050errs &= ~ignore_this_time;1051if (!errs)1052goto done;10531054/*1055* The ones we mask off are handled specially below1056* or above.1057*/1058mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |1059ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);1060qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);10611062if (errs & E_SUM_PKTERRS)1063qib_stats.sps_rcverrs++;1064if (errs & E_SUM_ERRS)1065qib_stats.sps_txerrs++;10661067iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);10681069if (errs & ERR_MASK(IBStatusChanged)) {1070u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);1071u32 ibstate = qib_6120_iblink_state(ibcs);1072int handle = 1;10731074if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)1075handle = chk_6120_linkrecovery(dd, ibcs);1076/*1077* Since going into a recovery state causes the link state1078* to go down and since recovery is transitory, it is better1079* if we "miss" ever seeing the link training state go into1080* recovery (i.e., ignore this transition for link state1081* special handling purposes) without updating lastibcstat.1082*/1083if (handle && qib_6120_phys_portstate(ibcs) ==1084IB_PHYSPORTSTATE_LINK_ERR_RECOVER)1085handle = 0;1086if (handle)1087qib_handle_e_ibstatuschanged(ppd, ibcs);1088}10891090if (errs & ERR_MASK(ResetNegated)) {1091qib_dev_err(dd, "Got reset, requires re-init "1092"(unload and reload driver)\n");1093dd->flags &= ~QIB_INITTED; /* needs re-init */1094/* mark as having had error */1095*dd->devstatusp |= QIB_STATUS_HWERROR;1096*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;1097}10981099if (*msg && iserr)1100qib_dev_porterr(dd, ppd->port, "%s error\n", msg);11011102if (ppd->state_wanted & ppd->lflags)1103wake_up_interruptible(&ppd->state_wait);11041105/*1106* If there were hdrq or egrfull errors, wake up any processes1107* waiting in poll. We used to try to check which contexts had1108* the overflow, but given the cost of that and the chip reads1109* to support it, it's better to just wake everybody up if we1110* get an overflow; waiters can poll again if it's not them.1111*/1112if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {1113qib_handle_urcv(dd, ~0U);1114if (errs & ERR_MASK(RcvEgrFullErr))1115qib_stats.sps_buffull++;1116else1117qib_stats.sps_hdrfull++;1118}1119done:1120return;1121}11221123/**1124* qib_6120_init_hwerrors - enable hardware errors1125* @dd: the qlogic_ib device1126*1127* now that we have finished initializing everything that might reasonably1128* cause a hardware error, and cleared those errors bits as they occur,1129* we can enable hardware errors in the mask (potentially enabling1130* freeze mode), and enable hardware errors as errors (along with1131* everything else) in errormask1132*/1133static void qib_6120_init_hwerrors(struct qib_devdata *dd)1134{1135u64 val;1136u64 extsval;11371138extsval = qib_read_kreg64(dd, kr_extstatus);11391140if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))1141qib_dev_err(dd, "MemBIST did not complete!\n");11421143/* init so all hwerrors interrupt, and enter freeze, ajdust below */1144val = ~0ULL;1145if (dd->minrev < 2) {1146/*1147* Avoid problem with internal interface bus parity1148* checking. Fixed in Rev2.1149*/1150val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;1151}1152/* avoid some intel cpu's speculative read freeze mode issue */1153val &= ~TXEMEMPARITYERR_PIOBUF;11541155dd->cspec->hwerrmask = val;11561157qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));1158qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);11591160/* clear all */1161qib_write_kreg(dd, kr_errclear, ~0ULL);1162/* enable errors that are masked, at least this first time. */1163qib_write_kreg(dd, kr_errmask, ~0ULL);1164dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);1165/* clear any interrupts up to this point (ints still not enabled) */1166qib_write_kreg(dd, kr_intclear, ~0ULL);11671168qib_write_kreg(dd, kr_rcvbthqp,1169dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |1170QIB_KD_QP);1171}11721173/*1174* Disable and enable the armlaunch error. Used for PIO bandwidth testing1175* on chips that are count-based, rather than trigger-based. There is no1176* reference counting, but that's also fine, given the intended use.1177* Only chip-specific because it's all register accesses1178*/1179static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)1180{1181if (enable) {1182qib_write_kreg(dd, kr_errclear,1183ERR_MASK(SendPioArmLaunchErr));1184dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);1185} else1186dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);1187qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);1188}11891190/*1191* Formerly took parameter <which> in pre-shifted,1192* pre-merged form with LinkCmd and LinkInitCmd1193* together, and assuming the zero was NOP.1194*/1195static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,1196u16 linitcmd)1197{1198u64 mod_wd;1199struct qib_devdata *dd = ppd->dd;1200unsigned long flags;12011202if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {1203/*1204* If we are told to disable, note that so link-recovery1205* code does not attempt to bring us back up.1206*/1207spin_lock_irqsave(&ppd->lflags_lock, flags);1208ppd->lflags |= QIBL_IB_LINK_DISABLED;1209spin_unlock_irqrestore(&ppd->lflags_lock, flags);1210} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {1211/*1212* Any other linkinitcmd will lead to LINKDOWN and then1213* to INIT (if all is well), so clear flag to let1214* link-recovery code attempt to bring us back up.1215*/1216spin_lock_irqsave(&ppd->lflags_lock, flags);1217ppd->lflags &= ~QIBL_IB_LINK_DISABLED;1218spin_unlock_irqrestore(&ppd->lflags_lock, flags);1219}12201221mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |1222(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);12231224qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);1225/* write to chip to prevent back-to-back writes of control reg */1226qib_write_kreg(dd, kr_scratch, 0);1227}12281229/**1230* qib_6120_bringup_serdes - bring up the serdes1231* @dd: the qlogic_ib device1232*/1233static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)1234{1235struct qib_devdata *dd = ppd->dd;1236u64 val, config1, prev_val, hwstat, ibc;12371238/* Put IBC in reset, sends disabled */1239dd->control &= ~QLOGIC_IB_C_LINKENABLE;1240qib_write_kreg(dd, kr_control, 0ULL);12411242dd->cspec->ibdeltainprog = 1;1243dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);1244dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);12451246/* flowcontrolwatermark is in units of KBytes */1247ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);1248/*1249* How often flowctrl sent. More or less in usecs; balance against1250* watermark value, so that in theory senders always get a flow1251* control update in time to not let the IB link go idle.1252*/1253ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);1254/* max error tolerance */1255dd->cspec->lli_thresh = 0xf;1256ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);1257/* use "real" buffer space for */1258ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);1259/* IB credit flow control. */1260ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);1261/*1262* set initial max size pkt IBC will send, including ICRC; it's the1263* PIO buffer size in dwords, less 1; also see qib_set_mtu()1264*/1265ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);1266dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */12671268/* initially come up waiting for TS1, without sending anything. */1269val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<1270QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);1271qib_write_kreg(dd, kr_ibcctrl, val);12721273val = qib_read_kreg64(dd, kr_serdes_cfg0);1274config1 = qib_read_kreg64(dd, kr_serdes_cfg1);12751276/*1277* Force reset on, also set rxdetect enable. Must do before reading1278* serdesstatus at least for simulation, or some of the bits in1279* serdes status will come back as undefined and cause simulation1280* failures1281*/1282val |= SYM_MASK(SerdesCfg0, ResetPLL) |1283SYM_MASK(SerdesCfg0, RxDetEnX) |1284(SYM_MASK(SerdesCfg0, L1PwrDnA) |1285SYM_MASK(SerdesCfg0, L1PwrDnB) |1286SYM_MASK(SerdesCfg0, L1PwrDnC) |1287SYM_MASK(SerdesCfg0, L1PwrDnD));1288qib_write_kreg(dd, kr_serdes_cfg0, val);1289/* be sure chip saw it */1290qib_read_kreg64(dd, kr_scratch);1291udelay(5); /* need pll reset set at least for a bit */1292/*1293* after PLL is reset, set the per-lane Resets and TxIdle and1294* clear the PLL reset and rxdetect (to get falling edge).1295* Leave L1PWR bits set (permanently)1296*/1297val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |1298SYM_MASK(SerdesCfg0, ResetPLL) |1299(SYM_MASK(SerdesCfg0, L1PwrDnA) |1300SYM_MASK(SerdesCfg0, L1PwrDnB) |1301SYM_MASK(SerdesCfg0, L1PwrDnC) |1302SYM_MASK(SerdesCfg0, L1PwrDnD)));1303val |= (SYM_MASK(SerdesCfg0, ResetA) |1304SYM_MASK(SerdesCfg0, ResetB) |1305SYM_MASK(SerdesCfg0, ResetC) |1306SYM_MASK(SerdesCfg0, ResetD)) |1307SYM_MASK(SerdesCfg0, TxIdeEnX);1308qib_write_kreg(dd, kr_serdes_cfg0, val);1309/* be sure chip saw it */1310(void) qib_read_kreg64(dd, kr_scratch);1311/* need PLL reset clear for at least 11 usec before lane1312* resets cleared; give it a few more to be sure */1313udelay(15);1314val &= ~((SYM_MASK(SerdesCfg0, ResetA) |1315SYM_MASK(SerdesCfg0, ResetB) |1316SYM_MASK(SerdesCfg0, ResetC) |1317SYM_MASK(SerdesCfg0, ResetD)) |1318SYM_MASK(SerdesCfg0, TxIdeEnX));13191320qib_write_kreg(dd, kr_serdes_cfg0, val);1321/* be sure chip saw it */1322(void) qib_read_kreg64(dd, kr_scratch);13231324val = qib_read_kreg64(dd, kr_xgxs_cfg);1325prev_val = val;1326if (val & QLOGIC_IB_XGXS_RESET)1327val &= ~QLOGIC_IB_XGXS_RESET;1328if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {1329/* need to compensate for Tx inversion in partner */1330val &= ~SYM_MASK(XGXSCfg, polarity_inv);1331val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);1332}1333if (val != prev_val)1334qib_write_kreg(dd, kr_xgxs_cfg, val);13351336val = qib_read_kreg64(dd, kr_serdes_cfg0);13371338/* clear current and de-emphasis bits */1339config1 &= ~0x0ffffffff00ULL;1340/* set current to 20ma */1341config1 |= 0x00000000000ULL;1342/* set de-emphasis to -5.68dB */1343config1 |= 0x0cccc000000ULL;1344qib_write_kreg(dd, kr_serdes_cfg1, config1);13451346/* base and port guid same for single port */1347ppd->guid = dd->base_guid;13481349/*1350* the process of setting and un-resetting the serdes normally1351* causes a serdes PLL error, so check for that and clear it1352* here. Also clearr hwerr bit in errstatus, but not others.1353*/1354hwstat = qib_read_kreg64(dd, kr_hwerrstatus);1355if (hwstat) {1356/* should just have PLL, clear all set, in an case */1357qib_write_kreg(dd, kr_hwerrclear, hwstat);1358qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));1359}13601361dd->control |= QLOGIC_IB_C_LINKENABLE;1362dd->control &= ~QLOGIC_IB_C_FREEZEMODE;1363qib_write_kreg(dd, kr_control, dd->control);13641365return 0;1366}13671368/**1369* qib_6120_quiet_serdes - set serdes to txidle1370* @ppd: physical port of the qlogic_ib device1371* Called when driver is being unloaded1372*/1373static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)1374{1375struct qib_devdata *dd = ppd->dd;1376u64 val;13771378qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);13791380/* disable IBC */1381dd->control &= ~QLOGIC_IB_C_LINKENABLE;1382qib_write_kreg(dd, kr_control,1383dd->control | QLOGIC_IB_C_FREEZEMODE);13841385if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||1386dd->cspec->ibdeltainprog) {1387u64 diagc;13881389/* enable counter writes */1390diagc = qib_read_kreg64(dd, kr_hwdiagctrl);1391qib_write_kreg(dd, kr_hwdiagctrl,1392diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));13931394if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {1395val = read_6120_creg32(dd, cr_ibsymbolerr);1396if (dd->cspec->ibdeltainprog)1397val -= val - dd->cspec->ibsymsnap;1398val -= dd->cspec->ibsymdelta;1399write_6120_creg(dd, cr_ibsymbolerr, val);1400}1401if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {1402val = read_6120_creg32(dd, cr_iblinkerrrecov);1403if (dd->cspec->ibdeltainprog)1404val -= val - dd->cspec->iblnkerrsnap;1405val -= dd->cspec->iblnkerrdelta;1406write_6120_creg(dd, cr_iblinkerrrecov, val);1407}14081409/* and disable counter writes */1410qib_write_kreg(dd, kr_hwdiagctrl, diagc);1411}14121413val = qib_read_kreg64(dd, kr_serdes_cfg0);1414val |= SYM_MASK(SerdesCfg0, TxIdeEnX);1415qib_write_kreg(dd, kr_serdes_cfg0, val);1416}14171418/**1419* qib_6120_setup_setextled - set the state of the two external LEDs1420* @dd: the qlogic_ib device1421* @on: whether the link is up or not1422*1423* The exact combo of LEDs if on is true is determined by looking1424* at the ibcstatus.14251426* These LEDs indicate the physical and logical state of IB link.1427* For this chip (at least with recommended board pinouts), LED11428* is Yellow (logical state) and LED2 is Green (physical state),1429*1430* Note: We try to match the Mellanox HCA LED behavior as best1431* we can. Green indicates physical link state is OK (something is1432* plugged in, and we can train).1433* Amber indicates the link is logically up (ACTIVE).1434* Mellanox further blinks the amber LED to indicate data packet1435* activity, but we have no hardware support for that, so it would1436* require waking up every 10-20 msecs and checking the counters1437* on the chip, and then turning the LED off if appropriate. That's1438* visible overhead, so not something we will do.1439*1440*/1441static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)1442{1443u64 extctl, val, lst, ltst;1444unsigned long flags;1445struct qib_devdata *dd = ppd->dd;14461447/*1448* The diags use the LED to indicate diag info, so we leave1449* the external LED alone when the diags are running.1450*/1451if (dd->diag_client)1452return;14531454/* Allow override of LED display for, e.g. Locating system in rack */1455if (ppd->led_override) {1456ltst = (ppd->led_override & QIB_LED_PHYS) ?1457IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,1458lst = (ppd->led_override & QIB_LED_LOG) ?1459IB_PORT_ACTIVE : IB_PORT_DOWN;1460} else if (on) {1461val = qib_read_kreg64(dd, kr_ibcstatus);1462ltst = qib_6120_phys_portstate(val);1463lst = qib_6120_iblink_state(val);1464} else {1465ltst = 0;1466lst = 0;1467}14681469spin_lock_irqsave(&dd->cspec->gpio_lock, flags);1470extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |1471SYM_MASK(EXTCtrl, LEDPriPortYellowOn));14721473if (ltst == IB_PHYSPORTSTATE_LINKUP)1474extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);1475if (lst == IB_PORT_ACTIVE)1476extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);1477dd->cspec->extctrl = extctl;1478qib_write_kreg(dd, kr_extctrl, extctl);1479spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);1480}14811482static void qib_6120_free_irq(struct qib_devdata *dd)1483{1484if (dd->cspec->irq) {1485free_irq(dd->cspec->irq, dd);1486dd->cspec->irq = 0;1487}1488qib_nomsi(dd);1489}14901491/**1492* qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff1493* @dd: the qlogic_ib device1494*1495* This is called during driver unload.1496*/1497static void qib_6120_setup_cleanup(struct qib_devdata *dd)1498{1499qib_6120_free_irq(dd);1500kfree(dd->cspec->cntrs);1501kfree(dd->cspec->portcntrs);1502if (dd->cspec->dummy_hdrq) {1503dma_free_coherent(&dd->pcidev->dev,1504ALIGN(dd->rcvhdrcnt *1505dd->rcvhdrentsize *1506sizeof(u32), PAGE_SIZE),1507dd->cspec->dummy_hdrq,1508dd->cspec->dummy_hdrq_phys);1509dd->cspec->dummy_hdrq = NULL;1510}1511}15121513static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)1514{1515unsigned long flags;15161517spin_lock_irqsave(&dd->sendctrl_lock, flags);1518if (needint)1519dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);1520else1521dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);1522qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);1523qib_write_kreg(dd, kr_scratch, 0ULL);1524spin_unlock_irqrestore(&dd->sendctrl_lock, flags);1525}15261527/*1528* handle errors and unusual events first, separate function1529* to improve cache hits for fast path interrupt handling1530*/1531static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)1532{1533if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))1534qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",1535istat & ~QLOGIC_IB_I_BITSEXTANT);15361537if (istat & QLOGIC_IB_I_ERROR) {1538u64 estat = 0;15391540qib_stats.sps_errints++;1541estat = qib_read_kreg64(dd, kr_errstatus);1542if (!estat)1543qib_devinfo(dd->pcidev, "error interrupt (%Lx), "1544"but no error bits set!\n", istat);1545handle_6120_errors(dd, estat);1546}15471548if (istat & QLOGIC_IB_I_GPIO) {1549u32 gpiostatus;1550u32 to_clear = 0;15511552/*1553* GPIO_3..5 on IBA6120 Rev2 chips indicate1554* errors that we need to count.1555*/1556gpiostatus = qib_read_kreg32(dd, kr_gpio_status);1557/* First the error-counter case. */1558if (gpiostatus & GPIO_ERRINTR_MASK) {1559/* want to clear the bits we see asserted. */1560to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);15611562/*1563* Count appropriately, clear bits out of our copy,1564* as they have been "handled".1565*/1566if (gpiostatus & (1 << GPIO_RXUVL_BIT))1567dd->cspec->rxfc_unsupvl_errs++;1568if (gpiostatus & (1 << GPIO_OVRUN_BIT))1569dd->cspec->overrun_thresh_errs++;1570if (gpiostatus & (1 << GPIO_LLI_BIT))1571dd->cspec->lli_errs++;1572gpiostatus &= ~GPIO_ERRINTR_MASK;1573}1574if (gpiostatus) {1575/*1576* Some unexpected bits remain. If they could have1577* caused the interrupt, complain and clear.1578* To avoid repetition of this condition, also clear1579* the mask. It is almost certainly due to error.1580*/1581const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);15821583/*1584* Also check that the chip reflects our shadow,1585* and report issues, If they caused the interrupt.1586* we will suppress by refreshing from the shadow.1587*/1588if (mask & gpiostatus) {1589to_clear |= (gpiostatus & mask);1590dd->cspec->gpio_mask &= ~(gpiostatus & mask);1591qib_write_kreg(dd, kr_gpio_mask,1592dd->cspec->gpio_mask);1593}1594}1595if (to_clear)1596qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);1597}1598}15991600static irqreturn_t qib_6120intr(int irq, void *data)1601{1602struct qib_devdata *dd = data;1603irqreturn_t ret;1604u32 istat, ctxtrbits, rmask, crcs = 0;1605unsigned i;16061607if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {1608/*1609* This return value is not great, but we do not want the1610* interrupt core code to remove our interrupt handler1611* because we don't appear to be handling an interrupt1612* during a chip reset.1613*/1614ret = IRQ_HANDLED;1615goto bail;1616}16171618istat = qib_read_kreg32(dd, kr_intstatus);16191620if (unlikely(!istat)) {1621ret = IRQ_NONE; /* not our interrupt, or already handled */1622goto bail;1623}1624if (unlikely(istat == -1)) {1625qib_bad_intrstatus(dd);1626/* don't know if it was our interrupt or not */1627ret = IRQ_NONE;1628goto bail;1629}16301631qib_stats.sps_ints++;1632if (dd->int_counter != (u32) -1)1633dd->int_counter++;16341635if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |1636QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))1637unlikely_6120_intr(dd, istat);16381639/*1640* Clear the interrupt bits we found set, relatively early, so we1641* "know" know the chip will have seen this by the time we process1642* the queue, and will re-interrupt if necessary. The processor1643* itself won't take the interrupt again until we return.1644*/1645qib_write_kreg(dd, kr_intclear, istat);16461647/*1648* Handle kernel receive queues before checking for pio buffers1649* available since receives can overflow; piobuf waiters can afford1650* a few extra cycles, since they were waiting anyway.1651*/1652ctxtrbits = istat &1653((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |1654(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));1655if (ctxtrbits) {1656rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |1657(1U << QLOGIC_IB_I_RCVURG_SHIFT);1658for (i = 0; i < dd->first_user_ctxt; i++) {1659if (ctxtrbits & rmask) {1660ctxtrbits &= ~rmask;1661crcs += qib_kreceive(dd->rcd[i],1662&dd->cspec->lli_counter,1663NULL);1664}1665rmask <<= 1;1666}1667if (crcs) {1668u32 cntr = dd->cspec->lli_counter;1669cntr += crcs;1670if (cntr) {1671if (cntr > dd->cspec->lli_thresh) {1672dd->cspec->lli_counter = 0;1673dd->cspec->lli_errs++;1674} else1675dd->cspec->lli_counter += cntr;1676}1677}167816791680if (ctxtrbits) {1681ctxtrbits =1682(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |1683(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);1684qib_handle_urcv(dd, ctxtrbits);1685}1686}16871688if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))1689qib_ib_piobufavail(dd);16901691ret = IRQ_HANDLED;1692bail:1693return ret;1694}16951696/*1697* Set up our chip-specific interrupt handler1698* The interrupt type has already been setup, so1699* we just need to do the registration and error checking.1700*/1701static void qib_setup_6120_interrupt(struct qib_devdata *dd)1702{1703/*1704* If the chip supports added error indication via GPIO pins,1705* enable interrupts on those bits so the interrupt routine1706* can count the events. Also set flag so interrupt routine1707* can know they are expected.1708*/1709if (SYM_FIELD(dd->revision, Revision_R,1710ChipRevMinor) > 1) {1711/* Rev2+ reports extra errors via internal GPIO pins */1712dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;1713qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);1714}17151716if (!dd->cspec->irq)1717qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "1718"work\n");1719else {1720int ret;1721ret = request_irq(dd->cspec->irq, qib_6120intr, 0,1722QIB_DRV_NAME, dd);1723if (ret)1724qib_dev_err(dd, "Couldn't setup interrupt "1725"(irq=%d): %d\n", dd->cspec->irq,1726ret);1727}1728}17291730/**1731* pe_boardname - fill in the board name1732* @dd: the qlogic_ib device1733*1734* info is based on the board revision register1735*/1736static void pe_boardname(struct qib_devdata *dd)1737{1738char *n;1739u32 boardid, namelen;17401741boardid = SYM_FIELD(dd->revision, Revision,1742BoardID);17431744switch (boardid) {1745case 2:1746n = "InfiniPath_QLE7140";1747break;1748default:1749qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);1750n = "Unknown_InfiniPath_6120";1751break;1752}1753namelen = strlen(n) + 1;1754dd->boardname = kmalloc(namelen, GFP_KERNEL);1755if (!dd->boardname)1756qib_dev_err(dd, "Failed allocation for board name: %s\n", n);1757else1758snprintf(dd->boardname, namelen, "%s", n);17591760if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)1761qib_dev_err(dd, "Unsupported InfiniPath hardware revision "1762"%u.%u!\n", dd->majrev, dd->minrev);17631764snprintf(dd->boardversion, sizeof(dd->boardversion),1765"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",1766QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,1767(unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),1768dd->majrev, dd->minrev,1769(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));17701771}17721773/*1774* This routine sleeps, so it can only be called from user context, not1775* from interrupt context. If we need interrupt context, we can split1776* it into two routines.1777*/1778static int qib_6120_setup_reset(struct qib_devdata *dd)1779{1780u64 val;1781int i;1782int ret;1783u16 cmdval;1784u8 int_line, clinesz;17851786qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);17871788/* Use ERROR so it shows up in logs, etc. */1789qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);17901791/* no interrupts till re-initted */1792qib_6120_set_intr_state(dd, 0);17931794dd->cspec->ibdeltainprog = 0;1795dd->cspec->ibsymdelta = 0;1796dd->cspec->iblnkerrdelta = 0;17971798/*1799* Keep chip from being accessed until we are ready. Use1800* writeq() directly, to allow the write even though QIB_PRESENT1801* isn't set.1802*/1803dd->flags &= ~(QIB_INITTED | QIB_PRESENT);1804dd->int_counter = 0; /* so we check interrupts work again */1805val = dd->control | QLOGIC_IB_C_RESET;1806writeq(val, &dd->kregbase[kr_control]);1807mb(); /* prevent compiler re-ordering around actual reset */18081809for (i = 1; i <= 5; i++) {1810/*1811* Allow MBIST, etc. to complete; longer on each retry.1812* We sometimes get machine checks from bus timeout if no1813* response, so for now, make it *really* long.1814*/1815msleep(1000 + (1 + i) * 2000);18161817qib_pcie_reenable(dd, cmdval, int_line, clinesz);18181819/*1820* Use readq directly, so we don't need to mark it as PRESENT1821* until we get a successful indication that all is well.1822*/1823val = readq(&dd->kregbase[kr_revision]);1824if (val == dd->revision) {1825dd->flags |= QIB_PRESENT; /* it's back */1826ret = qib_reinit_intr(dd);1827goto bail;1828}1829}1830ret = 0; /* failed */18311832bail:1833if (ret) {1834if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))1835qib_dev_err(dd, "Reset failed to setup PCIe or "1836"interrupts; continuing anyway\n");1837/* clear the reset error, init error/hwerror mask */1838qib_6120_init_hwerrors(dd);1839/* for Rev2 error interrupts; nop for rev 1 */1840qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);1841/* clear the reset error, init error/hwerror mask */1842qib_6120_init_hwerrors(dd);1843}1844return ret;1845}18461847/**1848* qib_6120_put_tid - write a TID in chip1849* @dd: the qlogic_ib device1850* @tidptr: pointer to the expected TID (in chip) to update1851* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)1852* for expected1853* @pa: physical address of in memory buffer; tidinvalid if freeing1854*1855* This exists as a separate routine to allow for special locking etc.1856* It's used for both the full cleanup on exit, as well as the normal1857* setup and teardown.1858*/1859static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,1860u32 type, unsigned long pa)1861{1862u32 __iomem *tidp32 = (u32 __iomem *)tidptr;1863unsigned long flags;1864int tidx;1865spinlock_t *tidlockp; /* select appropriate spinlock */18661867if (!dd->kregbase)1868return;18691870if (pa != dd->tidinvalid) {1871if (pa & ((1U << 11) - 1)) {1872qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",1873pa);1874return;1875}1876pa >>= 11;1877if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {1878qib_dev_err(dd, "Physical page address 0x%lx "1879"larger than supported\n", pa);1880return;1881}18821883if (type == RCVHQ_RCV_TYPE_EAGER)1884pa |= dd->tidtemplate;1885else /* for now, always full 4KB page */1886pa |= 2 << 29;1887}18881889/*1890* Avoid chip issue by writing the scratch register1891* before and after the TID, and with an io write barrier.1892* We use a spinlock around the writes, so they can't intermix1893* with other TID (eager or expected) writes (the chip problem1894* is triggered by back to back TID writes). Unfortunately, this1895* call can be done from interrupt level for the ctxt 0 eager TIDs,1896* so we have to use irqsave locks.1897*/1898/*1899* Assumes tidptr always > egrtidbase1900* if type == RCVHQ_RCV_TYPE_EAGER.1901*/1902tidx = tidptr - dd->egrtidbase;19031904tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)1905? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;1906spin_lock_irqsave(tidlockp, flags);1907qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);1908writel(pa, tidp32);1909qib_write_kreg(dd, kr_scratch, 0xdeadbeef);1910mmiowb();1911spin_unlock_irqrestore(tidlockp, flags);1912}19131914/**1915* qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher1916* @dd: the qlogic_ib device1917* @tidptr: pointer to the expected TID (in chip) to update1918* @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)1919* for expected1920* @pa: physical address of in memory buffer; tidinvalid if freeing1921*1922* This exists as a separate routine to allow for selection of the1923* appropriate "flavor". The static calls in cleanup just use the1924* revision-agnostic form, as they are not performance critical.1925*/1926static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,1927u32 type, unsigned long pa)1928{1929u32 __iomem *tidp32 = (u32 __iomem *)tidptr;1930u32 tidx;19311932if (!dd->kregbase)1933return;19341935if (pa != dd->tidinvalid) {1936if (pa & ((1U << 11) - 1)) {1937qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",1938pa);1939return;1940}1941pa >>= 11;1942if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {1943qib_dev_err(dd, "Physical page address 0x%lx "1944"larger than supported\n", pa);1945return;1946}19471948if (type == RCVHQ_RCV_TYPE_EAGER)1949pa |= dd->tidtemplate;1950else /* for now, always full 4KB page */1951pa |= 2 << 29;1952}1953tidx = tidptr - dd->egrtidbase;1954writel(pa, tidp32);1955mmiowb();1956}195719581959/**1960* qib_6120_clear_tids - clear all TID entries for a context, expected and eager1961* @dd: the qlogic_ib device1962* @ctxt: the context1963*1964* clear all TID entries for a context, expected and eager.1965* Used from qib_close(). On this chip, TIDs are only 32 bits,1966* not 64, but they are still on 64 bit boundaries, so tidbase1967* is declared as u64 * for the pointer math, even though we write 32 bits1968*/1969static void qib_6120_clear_tids(struct qib_devdata *dd,1970struct qib_ctxtdata *rcd)1971{1972u64 __iomem *tidbase;1973unsigned long tidinv;1974u32 ctxt;1975int i;19761977if (!dd->kregbase || !rcd)1978return;19791980ctxt = rcd->ctxt;19811982tidinv = dd->tidinvalid;1983tidbase = (u64 __iomem *)1984((char __iomem *)(dd->kregbase) +1985dd->rcvtidbase +1986ctxt * dd->rcvtidcnt * sizeof(*tidbase));19871988for (i = 0; i < dd->rcvtidcnt; i++)1989/* use func pointer because could be one of two funcs */1990dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,1991tidinv);19921993tidbase = (u64 __iomem *)1994((char __iomem *)(dd->kregbase) +1995dd->rcvegrbase +1996rcd->rcvegr_tid_base * sizeof(*tidbase));19971998for (i = 0; i < rcd->rcvegrcnt; i++)1999/* use func pointer because could be one of two funcs */2000dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,2001tidinv);2002}20032004/**2005* qib_6120_tidtemplate - setup constants for TID updates2006* @dd: the qlogic_ib device2007*2008* We setup stuff that we use a lot, to avoid calculating each time2009*/2010static void qib_6120_tidtemplate(struct qib_devdata *dd)2011{2012u32 egrsize = dd->rcvegrbufsize;20132014/*2015* For now, we always allocate 4KB buffers (at init) so we can2016* receive max size packets. We may want a module parameter to2017* specify 2KB or 4KB and/or make be per ctxt instead of per device2018* for those who want to reduce memory footprint. Note that the2019* rcvhdrentsize size must be large enough to hold the largest2020* IB header (currently 96 bytes) that we expect to handle (plus of2021* course the 2 dwords of RHF).2022*/2023if (egrsize == 2048)2024dd->tidtemplate = 1U << 29;2025else if (egrsize == 4096)2026dd->tidtemplate = 2U << 29;2027dd->tidinvalid = 0;2028}20292030int __attribute__((weak)) qib_unordered_wc(void)2031{2032return 0;2033}20342035/**2036* qib_6120_get_base_info - set chip-specific flags for user code2037* @rcd: the qlogic_ib ctxt2038* @kbase: qib_base_info pointer2039*2040* We set the PCIE flag because the lower bandwidth on PCIe vs2041* HyperTransport can affect some user packet algorithms.2042*/2043static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,2044struct qib_base_info *kinfo)2045{2046if (qib_unordered_wc())2047kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;20482049kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |2050QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;2051return 0;2052}205320542055static struct qib_message_header *2056qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)2057{2058return (struct qib_message_header *)2059&rhf_addr[sizeof(u64) / sizeof(u32)];2060}20612062static void qib_6120_config_ctxts(struct qib_devdata *dd)2063{2064dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);2065if (qib_n_krcv_queues > 1) {2066dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;2067if (dd->first_user_ctxt > dd->ctxtcnt)2068dd->first_user_ctxt = dd->ctxtcnt;2069dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;2070} else2071dd->first_user_ctxt = dd->num_pports;2072dd->n_krcv_queues = dd->first_user_ctxt;2073}20742075static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,2076u32 updegr, u32 egrhd, u32 npkts)2077{2078qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);2079if (updegr)2080qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);2081}20822083static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)2084{2085u32 head, tail;20862087head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);2088if (rcd->rcvhdrtail_kvaddr)2089tail = qib_get_rcvhdrtail(rcd);2090else2091tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);2092return head == tail;2093}20942095/*2096* Used when we close any ctxt, for DMA already in flight2097* at close. Can't be done until we know hdrq size, so not2098* early in chip init.2099*/2100static void alloc_dummy_hdrq(struct qib_devdata *dd)2101{2102dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,2103dd->rcd[0]->rcvhdrq_size,2104&dd->cspec->dummy_hdrq_phys,2105GFP_KERNEL | __GFP_COMP);2106if (!dd->cspec->dummy_hdrq) {2107qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");2108/* fallback to just 0'ing */2109dd->cspec->dummy_hdrq_phys = 0UL;2110}2111}21122113/*2114* Modify the RCVCTRL register in chip-specific way. This2115* is a function because bit positions and (future) register2116* location is chip-specific, but the needed operations are2117* generic. <op> is a bit-mask because we often want to2118* do multiple modifications.2119*/2120static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,2121int ctxt)2122{2123struct qib_devdata *dd = ppd->dd;2124u64 mask, val;2125unsigned long flags;21262127spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);21282129if (op & QIB_RCVCTRL_TAILUPD_ENB)2130dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);2131if (op & QIB_RCVCTRL_TAILUPD_DIS)2132dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);2133if (op & QIB_RCVCTRL_PKEY_ENB)2134dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);2135if (op & QIB_RCVCTRL_PKEY_DIS)2136dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);2137if (ctxt < 0)2138mask = (1ULL << dd->ctxtcnt) - 1;2139else2140mask = (1ULL << ctxt);2141if (op & QIB_RCVCTRL_CTXT_ENB) {2142/* always done for specific ctxt */2143dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));2144if (!(dd->flags & QIB_NODMA_RTAIL))2145dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;2146/* Write these registers before the context is enabled. */2147qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,2148dd->rcd[ctxt]->rcvhdrqtailaddr_phys);2149qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,2150dd->rcd[ctxt]->rcvhdrq_phys);21512152if (ctxt == 0 && !dd->cspec->dummy_hdrq)2153alloc_dummy_hdrq(dd);2154}2155if (op & QIB_RCVCTRL_CTXT_DIS)2156dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));2157if (op & QIB_RCVCTRL_INTRAVAIL_ENB)2158dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);2159if (op & QIB_RCVCTRL_INTRAVAIL_DIS)2160dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);2161qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);2162if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {2163/* arm rcv interrupt */2164val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |2165dd->rhdrhead_intr_off;2166qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);2167}2168if (op & QIB_RCVCTRL_CTXT_ENB) {2169/*2170* Init the context registers also; if we were2171* disabled, tail and head should both be zero2172* already from the enable, but since we don't2173* know, we have to do it explicitly.2174*/2175val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);2176qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);21772178val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);2179dd->rcd[ctxt]->head = val;2180/* If kctxt, interrupt on next receive. */2181if (ctxt < dd->first_user_ctxt)2182val |= dd->rhdrhead_intr_off;2183qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);2184}2185if (op & QIB_RCVCTRL_CTXT_DIS) {2186/*2187* Be paranoid, and never write 0's to these, just use an2188* unused page. Of course,2189* rcvhdraddr points to a large chunk of memory, so this2190* could still trash things, but at least it won't trash2191* page 0, and by disabling the ctxt, it should stop "soon",2192* even if a packet or two is in already in flight after we2193* disabled the ctxt. Only 6120 has this issue.2194*/2195if (ctxt >= 0) {2196qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,2197dd->cspec->dummy_hdrq_phys);2198qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,2199dd->cspec->dummy_hdrq_phys);2200} else {2201unsigned i;22022203for (i = 0; i < dd->cfgctxts; i++) {2204qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,2205i, dd->cspec->dummy_hdrq_phys);2206qib_write_kreg_ctxt(dd, kr_rcvhdraddr,2207i, dd->cspec->dummy_hdrq_phys);2208}2209}2210}2211spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);2212}22132214/*2215* Modify the SENDCTRL register in chip-specific way. This2216* is a function there may be multiple such registers with2217* slightly different layouts. Only operations actually used2218* are implemented yet.2219* Chip requires no back-back sendctrl writes, so write2220* scratch register after writing sendctrl2221*/2222static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)2223{2224struct qib_devdata *dd = ppd->dd;2225u64 tmp_dd_sendctrl;2226unsigned long flags;22272228spin_lock_irqsave(&dd->sendctrl_lock, flags);22292230/* First the ones that are "sticky", saved in shadow */2231if (op & QIB_SENDCTRL_CLEAR)2232dd->sendctrl = 0;2233if (op & QIB_SENDCTRL_SEND_DIS)2234dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);2235else if (op & QIB_SENDCTRL_SEND_ENB)2236dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);2237if (op & QIB_SENDCTRL_AVAIL_DIS)2238dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);2239else if (op & QIB_SENDCTRL_AVAIL_ENB)2240dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);22412242if (op & QIB_SENDCTRL_DISARM_ALL) {2243u32 i, last;22442245tmp_dd_sendctrl = dd->sendctrl;2246/*2247* disarm any that are not yet launched, disabling sends2248* and updates until done.2249*/2250last = dd->piobcnt2k + dd->piobcnt4k;2251tmp_dd_sendctrl &=2252~(SYM_MASK(SendCtrl, PIOEnable) |2253SYM_MASK(SendCtrl, PIOBufAvailUpd));2254for (i = 0; i < last; i++) {2255qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |2256SYM_MASK(SendCtrl, Disarm) | i);2257qib_write_kreg(dd, kr_scratch, 0);2258}2259}22602261tmp_dd_sendctrl = dd->sendctrl;22622263if (op & QIB_SENDCTRL_FLUSH)2264tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);2265if (op & QIB_SENDCTRL_DISARM)2266tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |2267((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<2268SYM_LSB(SendCtrl, DisarmPIOBuf));2269if (op & QIB_SENDCTRL_AVAIL_BLIP)2270tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);22712272qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);2273qib_write_kreg(dd, kr_scratch, 0);22742275if (op & QIB_SENDCTRL_AVAIL_BLIP) {2276qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);2277qib_write_kreg(dd, kr_scratch, 0);2278}22792280spin_unlock_irqrestore(&dd->sendctrl_lock, flags);22812282if (op & QIB_SENDCTRL_FLUSH) {2283u32 v;2284/*2285* ensure writes have hit chip, then do a few2286* more reads, to allow DMA of pioavail registers2287* to occur, so in-memory copy is in sync with2288* the chip. Not always safe to sleep.2289*/2290v = qib_read_kreg32(dd, kr_scratch);2291qib_write_kreg(dd, kr_scratch, v);2292v = qib_read_kreg32(dd, kr_scratch);2293qib_write_kreg(dd, kr_scratch, v);2294qib_read_kreg32(dd, kr_scratch);2295}2296}22972298/**2299* qib_portcntr_6120 - read a per-port counter2300* @dd: the qlogic_ib device2301* @creg: the counter to snapshot2302*/2303static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)2304{2305u64 ret = 0ULL;2306struct qib_devdata *dd = ppd->dd;2307u16 creg;2308/* 0xffff for unimplemented or synthesized counters */2309static const u16 xlator[] = {2310[QIBPORTCNTR_PKTSEND] = cr_pktsend,2311[QIBPORTCNTR_WORDSEND] = cr_wordsend,2312[QIBPORTCNTR_PSXMITDATA] = 0xffff,2313[QIBPORTCNTR_PSXMITPKTS] = 0xffff,2314[QIBPORTCNTR_PSXMITWAIT] = 0xffff,2315[QIBPORTCNTR_SENDSTALL] = cr_sendstall,2316[QIBPORTCNTR_PKTRCV] = cr_pktrcv,2317[QIBPORTCNTR_PSRCVDATA] = 0xffff,2318[QIBPORTCNTR_PSRCVPKTS] = 0xffff,2319[QIBPORTCNTR_RCVEBP] = cr_rcvebp,2320[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,2321[QIBPORTCNTR_WORDRCV] = cr_wordrcv,2322[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,2323[QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,2324[QIBPORTCNTR_RXVLERR] = 0xffff,2325[QIBPORTCNTR_ERRICRC] = cr_erricrc,2326[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,2327[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,2328[QIBPORTCNTR_BADFORMAT] = cr_badformat,2329[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,2330[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,2331[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,2332[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,2333[QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,2334[QIBPORTCNTR_ERRLINK] = cr_errlink,2335[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,2336[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,2337[QIBPORTCNTR_LLI] = 0xffff,2338[QIBPORTCNTR_PSINTERVAL] = 0xffff,2339[QIBPORTCNTR_PSSTART] = 0xffff,2340[QIBPORTCNTR_PSSTAT] = 0xffff,2341[QIBPORTCNTR_VL15PKTDROP] = 0xffff,2342[QIBPORTCNTR_ERRPKEY] = cr_errpkey,2343[QIBPORTCNTR_KHDROVFL] = 0xffff,2344};23452346if (reg >= ARRAY_SIZE(xlator)) {2347qib_devinfo(ppd->dd->pcidev,2348"Unimplemented portcounter %u\n", reg);2349goto done;2350}2351creg = xlator[reg];23522353/* handle counters requests not implemented as chip counters */2354if (reg == QIBPORTCNTR_LLI)2355ret = dd->cspec->lli_errs;2356else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)2357ret = dd->cspec->overrun_thresh_errs;2358else if (reg == QIBPORTCNTR_KHDROVFL) {2359int i;23602361/* sum over all kernel contexts */2362for (i = 0; i < dd->first_user_ctxt; i++)2363ret += read_6120_creg32(dd, cr_portovfl + i);2364} else if (reg == QIBPORTCNTR_PSSTAT)2365ret = dd->cspec->pma_sample_status;2366if (creg == 0xffff)2367goto done;23682369/*2370* only fast incrementing counters are 64bit; use 32 bit reads to2371* avoid two independent reads when on opteron2372*/2373if (creg == cr_wordsend || creg == cr_wordrcv ||2374creg == cr_pktsend || creg == cr_pktrcv)2375ret = read_6120_creg(dd, creg);2376else2377ret = read_6120_creg32(dd, creg);2378if (creg == cr_ibsymbolerr) {2379if (dd->cspec->ibdeltainprog)2380ret -= ret - dd->cspec->ibsymsnap;2381ret -= dd->cspec->ibsymdelta;2382} else if (creg == cr_iblinkerrrecov) {2383if (dd->cspec->ibdeltainprog)2384ret -= ret - dd->cspec->iblnkerrsnap;2385ret -= dd->cspec->iblnkerrdelta;2386}2387if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */2388ret += dd->cspec->rxfc_unsupvl_errs;23892390done:2391return ret;2392}23932394/*2395* Device counter names (not port-specific), one line per stat,2396* single string. Used by utilities like ipathstats to print the stats2397* in a way which works for different versions of drivers, without changing2398* the utility. Names need to be 12 chars or less (w/o newline), for proper2399* display by utility.2400* Non-error counters are first.2401* Start of "error" conters is indicated by a leading "E " on the first2402* "error" counter, and doesn't count in label length.2403* The EgrOvfl list needs to be last so we truncate them at the configured2404* context count for the device.2405* cntr6120indices contains the corresponding register indices.2406*/2407static const char cntr6120names[] =2408"Interrupts\n"2409"HostBusStall\n"2410"E RxTIDFull\n"2411"RxTIDInvalid\n"2412"Ctxt0EgrOvfl\n"2413"Ctxt1EgrOvfl\n"2414"Ctxt2EgrOvfl\n"2415"Ctxt3EgrOvfl\n"2416"Ctxt4EgrOvfl\n";24172418static const size_t cntr6120indices[] = {2419cr_lbint,2420cr_lbflowstall,2421cr_errtidfull,2422cr_errtidvalid,2423cr_portovfl + 0,2424cr_portovfl + 1,2425cr_portovfl + 2,2426cr_portovfl + 3,2427cr_portovfl + 4,2428};24292430/*2431* same as cntr6120names and cntr6120indices, but for port-specific counters.2432* portcntr6120indices is somewhat complicated by some registers needing2433* adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG2434*/2435static const char portcntr6120names[] =2436"TxPkt\n"2437"TxFlowPkt\n"2438"TxWords\n"2439"RxPkt\n"2440"RxFlowPkt\n"2441"RxWords\n"2442"TxFlowStall\n"2443"E IBStatusChng\n"2444"IBLinkDown\n"2445"IBLnkRecov\n"2446"IBRxLinkErr\n"2447"IBSymbolErr\n"2448"RxLLIErr\n"2449"RxBadFormat\n"2450"RxBadLen\n"2451"RxBufOvrfl\n"2452"RxEBP\n"2453"RxFlowCtlErr\n"2454"RxICRCerr\n"2455"RxLPCRCerr\n"2456"RxVCRCerr\n"2457"RxInvalLen\n"2458"RxInvalPKey\n"2459"RxPktDropped\n"2460"TxBadLength\n"2461"TxDropped\n"2462"TxInvalLen\n"2463"TxUnderrun\n"2464"TxUnsupVL\n"2465;24662467#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */2468static const size_t portcntr6120indices[] = {2469QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,2470cr_pktsendflow,2471QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,2472QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,2473cr_pktrcvflowctrl,2474QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,2475QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,2476cr_ibstatuschange,2477QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,2478QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,2479QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,2480QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,2481QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,2482QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,2483QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,2484QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,2485QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,2486cr_rcvflowctrl_err,2487QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,2488QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,2489QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,2490QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,2491QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,2492QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,2493cr_invalidslen,2494cr_senddropped,2495cr_errslen,2496cr_sendunderrun,2497cr_txunsupvl,2498};24992500/* do all the setup to make the counter reads efficient later */2501static void init_6120_cntrnames(struct qib_devdata *dd)2502{2503int i, j = 0;2504char *s;25052506for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;2507i++) {2508/* we always have at least one counter before the egrovfl */2509if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))2510j = 1;2511s = strchr(s + 1, '\n');2512if (s && j)2513j++;2514}2515dd->cspec->ncntrs = i;2516if (!s)2517/* full list; size is without terminating null */2518dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;2519else2520dd->cspec->cntrnamelen = 1 + s - cntr6120names;2521dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs2522* sizeof(u64), GFP_KERNEL);2523if (!dd->cspec->cntrs)2524qib_dev_err(dd, "Failed allocation for counters\n");25252526for (i = 0, s = (char *)portcntr6120names; s; i++)2527s = strchr(s + 1, '\n');2528dd->cspec->nportcntrs = i - 1;2529dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;2530dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs2531* sizeof(u64), GFP_KERNEL);2532if (!dd->cspec->portcntrs)2533qib_dev_err(dd, "Failed allocation for portcounters\n");2534}25352536static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,2537u64 **cntrp)2538{2539u32 ret;25402541if (namep) {2542ret = dd->cspec->cntrnamelen;2543if (pos >= ret)2544ret = 0; /* final read after getting everything */2545else2546*namep = (char *)cntr6120names;2547} else {2548u64 *cntr = dd->cspec->cntrs;2549int i;25502551ret = dd->cspec->ncntrs * sizeof(u64);2552if (!cntr || pos >= ret) {2553/* everything read, or couldn't get memory */2554ret = 0;2555goto done;2556}2557if (pos >= ret) {2558ret = 0; /* final read after getting everything */2559goto done;2560}2561*cntrp = cntr;2562for (i = 0; i < dd->cspec->ncntrs; i++)2563*cntr++ = read_6120_creg32(dd, cntr6120indices[i]);2564}2565done:2566return ret;2567}25682569static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,2570char **namep, u64 **cntrp)2571{2572u32 ret;25732574if (namep) {2575ret = dd->cspec->portcntrnamelen;2576if (pos >= ret)2577ret = 0; /* final read after getting everything */2578else2579*namep = (char *)portcntr6120names;2580} else {2581u64 *cntr = dd->cspec->portcntrs;2582struct qib_pportdata *ppd = &dd->pport[port];2583int i;25842585ret = dd->cspec->nportcntrs * sizeof(u64);2586if (!cntr || pos >= ret) {2587/* everything read, or couldn't get memory */2588ret = 0;2589goto done;2590}2591*cntrp = cntr;2592for (i = 0; i < dd->cspec->nportcntrs; i++) {2593if (portcntr6120indices[i] & _PORT_VIRT_FLAG)2594*cntr++ = qib_portcntr_6120(ppd,2595portcntr6120indices[i] &2596~_PORT_VIRT_FLAG);2597else2598*cntr++ = read_6120_creg32(dd,2599portcntr6120indices[i]);2600}2601}2602done:2603return ret;2604}26052606static void qib_chk_6120_errormask(struct qib_devdata *dd)2607{2608static u32 fixed;2609u32 ctrl;2610unsigned long errormask;2611unsigned long hwerrs;26122613if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))2614return;26152616errormask = qib_read_kreg64(dd, kr_errmask);26172618if (errormask == dd->cspec->errormask)2619return;2620fixed++;26212622hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);2623ctrl = qib_read_kreg32(dd, kr_control);26242625qib_write_kreg(dd, kr_errmask,2626dd->cspec->errormask);26272628if ((hwerrs & dd->cspec->hwerrmask) ||2629(ctrl & QLOGIC_IB_C_FREEZEMODE)) {2630qib_write_kreg(dd, kr_hwerrclear, 0ULL);2631qib_write_kreg(dd, kr_errclear, 0ULL);2632/* force re-interrupt of pending events, just in case */2633qib_write_kreg(dd, kr_intclear, 0ULL);2634qib_devinfo(dd->pcidev,2635"errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",2636fixed, errormask, (unsigned long)dd->cspec->errormask,2637ctrl, hwerrs);2638}2639}26402641/**2642* qib_get_faststats - get word counters from chip before they overflow2643* @opaque - contains a pointer to the qlogic_ib device qib_devdata2644*2645* This needs more work; in particular, decision on whether we really2646* need traffic_wds done the way it is2647* called from add_timer2648*/2649static void qib_get_6120_faststats(unsigned long opaque)2650{2651struct qib_devdata *dd = (struct qib_devdata *) opaque;2652struct qib_pportdata *ppd = dd->pport;2653unsigned long flags;2654u64 traffic_wds;26552656/*2657* don't access the chip while running diags, or memory diags can2658* fail2659*/2660if (!(dd->flags & QIB_INITTED) || dd->diag_client)2661/* but re-arm the timer, for diags case; won't hurt other */2662goto done;26632664/*2665* We now try to maintain an activity timer, based on traffic2666* exceeding a threshold, so we need to check the word-counts2667* even if they are 64-bit.2668*/2669traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +2670qib_portcntr_6120(ppd, cr_wordrcv);2671spin_lock_irqsave(&dd->eep_st_lock, flags);2672traffic_wds -= dd->traffic_wds;2673dd->traffic_wds += traffic_wds;2674if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)2675atomic_add(5, &dd->active_time); /* S/B #define */2676spin_unlock_irqrestore(&dd->eep_st_lock, flags);26772678qib_chk_6120_errormask(dd);2679done:2680mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);2681}26822683/* no interrupt fallback for these chips */2684static int qib_6120_nointr_fallback(struct qib_devdata *dd)2685{2686return 0;2687}26882689/*2690* reset the XGXS (between serdes and IBC). Slightly less intrusive2691* than resetting the IBC or external link state, and useful in some2692* cases to cause some retraining. To do this right, we reset IBC2693* as well.2694*/2695static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)2696{2697u64 val, prev_val;2698struct qib_devdata *dd = ppd->dd;26992700prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);2701val = prev_val | QLOGIC_IB_XGXS_RESET;2702prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */2703qib_write_kreg(dd, kr_control,2704dd->control & ~QLOGIC_IB_C_LINKENABLE);2705qib_write_kreg(dd, kr_xgxs_cfg, val);2706qib_read_kreg32(dd, kr_scratch);2707qib_write_kreg(dd, kr_xgxs_cfg, prev_val);2708qib_write_kreg(dd, kr_control, dd->control);2709}27102711static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)2712{2713int ret;27142715switch (which) {2716case QIB_IB_CFG_LWID:2717ret = ppd->link_width_active;2718break;27192720case QIB_IB_CFG_SPD:2721ret = ppd->link_speed_active;2722break;27232724case QIB_IB_CFG_LWID_ENB:2725ret = ppd->link_width_enabled;2726break;27272728case QIB_IB_CFG_SPD_ENB:2729ret = ppd->link_speed_enabled;2730break;27312732case QIB_IB_CFG_OP_VLS:2733ret = ppd->vls_operational;2734break;27352736case QIB_IB_CFG_VL_HIGH_CAP:2737ret = 0;2738break;27392740case QIB_IB_CFG_VL_LOW_CAP:2741ret = 0;2742break;27432744case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */2745ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,2746OverrunThreshold);2747break;27482749case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */2750ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,2751PhyerrThreshold);2752break;27532754case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */2755/* will only take effect when the link state changes */2756ret = (ppd->dd->cspec->ibcctrl &2757SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?2758IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;2759break;27602761case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */2762ret = 0; /* no heartbeat on this chip */2763break;27642765case QIB_IB_CFG_PMA_TICKS:2766ret = 250; /* 1 usec. */2767break;27682769default:2770ret = -EINVAL;2771break;2772}2773return ret;2774}27752776/*2777* We assume range checking is already done, if needed.2778*/2779static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)2780{2781struct qib_devdata *dd = ppd->dd;2782int ret = 0;2783u64 val64;2784u16 lcmd, licmd;27852786switch (which) {2787case QIB_IB_CFG_LWID_ENB:2788ppd->link_width_enabled = val;2789break;27902791case QIB_IB_CFG_SPD_ENB:2792ppd->link_speed_enabled = val;2793break;27942795case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */2796val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,2797OverrunThreshold);2798if (val64 != val) {2799dd->cspec->ibcctrl &=2800~SYM_MASK(IBCCtrl, OverrunThreshold);2801dd->cspec->ibcctrl |= (u64) val <<2802SYM_LSB(IBCCtrl, OverrunThreshold);2803qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);2804qib_write_kreg(dd, kr_scratch, 0);2805}2806break;28072808case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */2809val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,2810PhyerrThreshold);2811if (val64 != val) {2812dd->cspec->ibcctrl &=2813~SYM_MASK(IBCCtrl, PhyerrThreshold);2814dd->cspec->ibcctrl |= (u64) val <<2815SYM_LSB(IBCCtrl, PhyerrThreshold);2816qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);2817qib_write_kreg(dd, kr_scratch, 0);2818}2819break;28202821case QIB_IB_CFG_PKEYS: /* update pkeys */2822val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |2823((u64) ppd->pkeys[2] << 32) |2824((u64) ppd->pkeys[3] << 48);2825qib_write_kreg(dd, kr_partitionkey, val64);2826break;28272828case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */2829/* will only take effect when the link state changes */2830if (val == IB_LINKINITCMD_POLL)2831dd->cspec->ibcctrl &=2832~SYM_MASK(IBCCtrl, LinkDownDefaultState);2833else /* SLEEP */2834dd->cspec->ibcctrl |=2835SYM_MASK(IBCCtrl, LinkDownDefaultState);2836qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);2837qib_write_kreg(dd, kr_scratch, 0);2838break;28392840case QIB_IB_CFG_MTU: /* update the MTU in IBC */2841/*2842* Update our housekeeping variables, and set IBC max2843* size, same as init code; max IBC is max we allow in2844* buffer, less the qword pbc, plus 1 for ICRC, in dwords2845* Set even if it's unchanged, print debug message only2846* on changes.2847*/2848val = (ppd->ibmaxlen >> 2) + 1;2849dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);2850dd->cspec->ibcctrl |= (u64)val <<2851SYM_LSB(IBCCtrl, MaxPktLen);2852qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);2853qib_write_kreg(dd, kr_scratch, 0);2854break;28552856case QIB_IB_CFG_LSTATE: /* set the IB link state */2857switch (val & 0xffff0000) {2858case IB_LINKCMD_DOWN:2859lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;2860if (!dd->cspec->ibdeltainprog) {2861dd->cspec->ibdeltainprog = 1;2862dd->cspec->ibsymsnap =2863read_6120_creg32(dd, cr_ibsymbolerr);2864dd->cspec->iblnkerrsnap =2865read_6120_creg32(dd, cr_iblinkerrrecov);2866}2867break;28682869case IB_LINKCMD_ARMED:2870lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;2871break;28722873case IB_LINKCMD_ACTIVE:2874lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;2875break;28762877default:2878ret = -EINVAL;2879qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);2880goto bail;2881}2882switch (val & 0xffff) {2883case IB_LINKINITCMD_NOP:2884licmd = 0;2885break;28862887case IB_LINKINITCMD_POLL:2888licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;2889break;28902891case IB_LINKINITCMD_SLEEP:2892licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;2893break;28942895case IB_LINKINITCMD_DISABLE:2896licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;2897break;28982899default:2900ret = -EINVAL;2901qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",2902val & 0xffff);2903goto bail;2904}2905qib_set_ib_6120_lstate(ppd, lcmd, licmd);2906goto bail;29072908case QIB_IB_CFG_HRTBT:2909ret = -EINVAL;2910break;29112912default:2913ret = -EINVAL;2914}2915bail:2916return ret;2917}29182919static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)2920{2921int ret = 0;2922if (!strncmp(what, "ibc", 3)) {2923ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);2924qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",2925ppd->dd->unit, ppd->port);2926} else if (!strncmp(what, "off", 3)) {2927ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);2928qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "2929"(normal)\n", ppd->dd->unit, ppd->port);2930} else2931ret = -EINVAL;2932if (!ret) {2933qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);2934qib_write_kreg(ppd->dd, kr_scratch, 0);2935}2936return ret;2937}29382939static void pma_6120_timer(unsigned long data)2940{2941struct qib_pportdata *ppd = (struct qib_pportdata *)data;2942struct qib_chip_specific *cs = ppd->dd->cspec;2943struct qib_ibport *ibp = &ppd->ibport_data;2944unsigned long flags;29452946spin_lock_irqsave(&ibp->lock, flags);2947if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {2948cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;2949qib_snapshot_counters(ppd, &cs->sword, &cs->rword,2950&cs->spkts, &cs->rpkts, &cs->xmit_wait);2951mod_timer(&cs->pma_timer,2952jiffies + usecs_to_jiffies(ibp->pma_sample_interval));2953} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {2954u64 ta, tb, tc, td, te;29552956cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;2957qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);29582959cs->sword = ta - cs->sword;2960cs->rword = tb - cs->rword;2961cs->spkts = tc - cs->spkts;2962cs->rpkts = td - cs->rpkts;2963cs->xmit_wait = te - cs->xmit_wait;2964}2965spin_unlock_irqrestore(&ibp->lock, flags);2966}29672968/*2969* Note that the caller has the ibp->lock held.2970*/2971static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,2972u32 start)2973{2974struct qib_chip_specific *cs = ppd->dd->cspec;29752976if (start && intv) {2977cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;2978mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));2979} else if (intv) {2980cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;2981qib_snapshot_counters(ppd, &cs->sword, &cs->rword,2982&cs->spkts, &cs->rpkts, &cs->xmit_wait);2983mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));2984} else {2985cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;2986cs->sword = 0;2987cs->rword = 0;2988cs->spkts = 0;2989cs->rpkts = 0;2990cs->xmit_wait = 0;2991}2992}29932994static u32 qib_6120_iblink_state(u64 ibcs)2995{2996u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);29972998switch (state) {2999case IB_6120_L_STATE_INIT:3000state = IB_PORT_INIT;3001break;3002case IB_6120_L_STATE_ARM:3003state = IB_PORT_ARMED;3004break;3005case IB_6120_L_STATE_ACTIVE:3006/* fall through */3007case IB_6120_L_STATE_ACT_DEFER:3008state = IB_PORT_ACTIVE;3009break;3010default: /* fall through */3011case IB_6120_L_STATE_DOWN:3012state = IB_PORT_DOWN;3013break;3014}3015return state;3016}30173018/* returns the IBTA port state, rather than the IBC link training state */3019static u8 qib_6120_phys_portstate(u64 ibcs)3020{3021u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);3022return qib_6120_physportstate[state];3023}30243025static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)3026{3027unsigned long flags;30283029spin_lock_irqsave(&ppd->lflags_lock, flags);3030ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;3031spin_unlock_irqrestore(&ppd->lflags_lock, flags);30323033if (ibup) {3034if (ppd->dd->cspec->ibdeltainprog) {3035ppd->dd->cspec->ibdeltainprog = 0;3036ppd->dd->cspec->ibsymdelta +=3037read_6120_creg32(ppd->dd, cr_ibsymbolerr) -3038ppd->dd->cspec->ibsymsnap;3039ppd->dd->cspec->iblnkerrdelta +=3040read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -3041ppd->dd->cspec->iblnkerrsnap;3042}3043qib_hol_init(ppd);3044} else {3045ppd->dd->cspec->lli_counter = 0;3046if (!ppd->dd->cspec->ibdeltainprog) {3047ppd->dd->cspec->ibdeltainprog = 1;3048ppd->dd->cspec->ibsymsnap =3049read_6120_creg32(ppd->dd, cr_ibsymbolerr);3050ppd->dd->cspec->iblnkerrsnap =3051read_6120_creg32(ppd->dd, cr_iblinkerrrecov);3052}3053qib_hol_down(ppd);3054}30553056qib_6120_setup_setextled(ppd, ibup);30573058return 0;3059}30603061/* Does read/modify/write to appropriate registers to3062* set output and direction bits selected by mask.3063* these are in their canonical postions (e.g. lsb of3064* dir will end up in D48 of extctrl on existing chips).3065* returns contents of GP Inputs.3066*/3067static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)3068{3069u64 read_val, new_out;3070unsigned long flags;30713072if (mask) {3073/* some bits being written, lock access to GPIO */3074dir &= mask;3075out &= mask;3076spin_lock_irqsave(&dd->cspec->gpio_lock, flags);3077dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));3078dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));3079new_out = (dd->cspec->gpio_out & ~mask) | out;30803081qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);3082qib_write_kreg(dd, kr_gpio_out, new_out);3083dd->cspec->gpio_out = new_out;3084spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);3085}3086/*3087* It is unlikely that a read at this time would get valid3088* data on a pin whose direction line was set in the same3089* call to this function. We include the read here because3090* that allows us to potentially combine a change on one pin with3091* a read on another, and because the old code did something like3092* this.3093*/3094read_val = qib_read_kreg64(dd, kr_extstatus);3095return SYM_FIELD(read_val, EXTStatus, GPIOIn);3096}30973098/*3099* Read fundamental info we need to use the chip. These are3100* the registers that describe chip capabilities, and are3101* saved in shadow registers.3102*/3103static void get_6120_chip_params(struct qib_devdata *dd)3104{3105u64 val;3106u32 piobufs;3107int mtu;31083109dd->uregbase = qib_read_kreg32(dd, kr_userregbase);31103111dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);3112dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);3113dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);3114dd->palign = qib_read_kreg32(dd, kr_palign);3115dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);3116dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;31173118dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);31193120val = qib_read_kreg64(dd, kr_sendpiosize);3121dd->piosize2k = val & ~0U;3122dd->piosize4k = val >> 32;31233124mtu = ib_mtu_enum_to_int(qib_ibmtu);3125if (mtu == -1)3126mtu = QIB_DEFAULT_MTU;3127dd->pport->ibmtu = (u32)mtu;31283129val = qib_read_kreg64(dd, kr_sendpiobufcnt);3130dd->piobcnt2k = val & ~0U;3131dd->piobcnt4k = val >> 32;3132/* these may be adjusted in init_chip_wc_pat() */3133dd->pio2kbase = (u32 __iomem *)3134(((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);3135if (dd->piobcnt4k) {3136dd->pio4kbase = (u32 __iomem *)3137(((char __iomem *) dd->kregbase) +3138(dd->piobufbase >> 32));3139/*3140* 4K buffers take 2 pages; we use roundup just to be3141* paranoid; we calculate it once here, rather than on3142* ever buf allocate3143*/3144dd->align4k = ALIGN(dd->piosize4k, dd->palign);3145}31463147piobufs = dd->piobcnt4k + dd->piobcnt2k;31483149dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /3150(sizeof(u64) * BITS_PER_BYTE / 2);3151}31523153/*3154* The chip base addresses in cspec and cpspec have to be set3155* after possible init_chip_wc_pat(), rather than in3156* get_6120_chip_params(), so split out as separate function3157*/3158static void set_6120_baseaddrs(struct qib_devdata *dd)3159{3160u32 cregbase;3161cregbase = qib_read_kreg32(dd, kr_counterregbase);3162dd->cspec->cregbase = (u64 __iomem *)3163((char __iomem *) dd->kregbase + cregbase);31643165dd->egrtidbase = (u64 __iomem *)3166((char __iomem *) dd->kregbase + dd->rcvegrbase);3167}31683169/*3170* Write the final few registers that depend on some of the3171* init setup. Done late in init, just before bringing up3172* the serdes.3173*/3174static int qib_late_6120_initreg(struct qib_devdata *dd)3175{3176int ret = 0;3177u64 val;31783179qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);3180qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);3181qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);3182qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);3183val = qib_read_kreg64(dd, kr_sendpioavailaddr);3184if (val != dd->pioavailregs_phys) {3185qib_dev_err(dd, "Catastrophic software error, "3186"SendPIOAvailAddr written as %lx, "3187"read back as %llx\n",3188(unsigned long) dd->pioavailregs_phys,3189(unsigned long long) val);3190ret = -EINVAL;3191}3192return ret;3193}31943195static int init_6120_variables(struct qib_devdata *dd)3196{3197int ret = 0;3198struct qib_pportdata *ppd;3199u32 sbufs;32003201ppd = (struct qib_pportdata *)(dd + 1);3202dd->pport = ppd;3203dd->num_pports = 1;32043205dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);3206ppd->cpspec = NULL; /* not used in this chip */32073208spin_lock_init(&dd->cspec->kernel_tid_lock);3209spin_lock_init(&dd->cspec->user_tid_lock);3210spin_lock_init(&dd->cspec->rcvmod_lock);3211spin_lock_init(&dd->cspec->gpio_lock);32123213/* we haven't yet set QIB_PRESENT, so use read directly */3214dd->revision = readq(&dd->kregbase[kr_revision]);32153216if ((dd->revision & 0xffffffffU) == 0xffffffffU) {3217qib_dev_err(dd, "Revision register read failure, "3218"giving up initialization\n");3219ret = -ENODEV;3220goto bail;3221}3222dd->flags |= QIB_PRESENT; /* now register routines work */32233224dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,3225ChipRevMajor);3226dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,3227ChipRevMinor);32283229get_6120_chip_params(dd);3230pe_boardname(dd); /* fill in boardname */32313232/*3233* GPIO bits for TWSI data and clock,3234* used for serial EEPROM.3235*/3236dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;3237dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;3238dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;32393240if (qib_unordered_wc())3241dd->flags |= QIB_PIO_FLUSH_WC;32423243/*3244* EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.3245* 2 is Some Misc, 3 is reserved for future.3246*/3247dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);32483249/* Ignore errors in PIO/PBC on systems with unordered write-combining */3250if (qib_unordered_wc())3251dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;32523253dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);32543255dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);32563257qib_init_pportdata(ppd, dd, 0, 1);3258ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;3259ppd->link_speed_supported = QIB_IB_SDR;3260ppd->link_width_enabled = IB_WIDTH_4X;3261ppd->link_speed_enabled = ppd->link_speed_supported;3262/* these can't change for this chip, so set once */3263ppd->link_width_active = ppd->link_width_enabled;3264ppd->link_speed_active = ppd->link_speed_enabled;3265ppd->vls_supported = IB_VL_VL0;3266ppd->vls_operational = ppd->vls_supported;32673268dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;3269dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;3270dd->rhf_offset = 0;32713272/* we always allocate at least 2048 bytes for eager buffers */3273ret = ib_mtu_enum_to_int(qib_ibmtu);3274dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;32753276qib_6120_tidtemplate(dd);32773278/*3279* We can request a receive interrupt for 1 or3280* more packets from current offset. For now, we set this3281* up for a single packet.3282*/3283dd->rhdrhead_intr_off = 1ULL << 32;32843285/* setup the stats timer; the add_timer is done at end of init */3286init_timer(&dd->stats_timer);3287dd->stats_timer.function = qib_get_6120_faststats;3288dd->stats_timer.data = (unsigned long) dd;32893290init_timer(&dd->cspec->pma_timer);3291dd->cspec->pma_timer.function = pma_6120_timer;3292dd->cspec->pma_timer.data = (unsigned long) ppd;32933294dd->ureg_align = qib_read_kreg32(dd, kr_palign);32953296dd->piosize2kmax_dwords = dd->piosize2k >> 2;3297qib_6120_config_ctxts(dd);3298qib_set_ctxtcnt(dd);32993300if (qib_wc_pat) {3301ret = init_chip_wc_pat(dd, 0);3302if (ret)3303goto bail;3304}3305set_6120_baseaddrs(dd); /* set chip access pointers now */33063307ret = 0;3308if (qib_mini_init)3309goto bail;33103311qib_num_cfg_vls = 1; /* if any 6120's, only one VL */33123313ret = qib_create_ctxts(dd);3314init_6120_cntrnames(dd);33153316/* use all of 4KB buffers for the kernel, otherwise 16 */3317sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;33183319dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;3320dd->pbufsctxt = dd->lastctxt_piobuf /3321(dd->cfgctxts - dd->first_user_ctxt);33223323if (ret)3324goto bail;3325bail:3326return ret;3327}33283329/*3330* For this chip, we want to use the same buffer every time3331* when we are trying to bring the link up (they are always VL153332* packets). At that link state the packet should always go out immediately3333* (or at least be discarded at the tx interface if the link is down).3334* If it doesn't, and the buffer isn't available, that means some other3335* sender has gotten ahead of us, and is preventing our packet from going3336* out. In that case, we flush all packets, and try again. If that still3337* fails, we fail the request, and hope things work the next time around.3338*3339* We don't need very complicated heuristics on whether the packet had3340* time to go out or not, since even at SDR 1X, it goes out in very short3341* time periods, covered by the chip reads done here and as part of the3342* flush.3343*/3344static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)3345{3346u32 __iomem *buf;3347u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;33483349/*3350* always blip to get avail list updated, since it's almost3351* always needed, and is fairly cheap.3352*/3353sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);3354qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */3355buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);3356if (buf)3357goto done;33583359sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |3360QIB_SENDCTRL_AVAIL_BLIP);3361ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */3362qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */3363buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);3364done:3365return buf;3366}33673368static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,3369u32 *pbufnum)3370{3371u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;3372struct qib_devdata *dd = ppd->dd;3373u32 __iomem *buf;33743375if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&3376!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))3377buf = get_6120_link_buf(ppd, pbufnum);3378else {33793380if ((plen + 1) > dd->piosize2kmax_dwords)3381first = dd->piobcnt2k;3382else3383first = 0;3384/* try 4k if all 2k busy, so same last for both sizes */3385last = dd->piobcnt2k + dd->piobcnt4k - 1;3386buf = qib_getsendbuf_range(dd, pbufnum, first, last);3387}3388return buf;3389}33903391static int init_sdma_6120_regs(struct qib_pportdata *ppd)3392{3393return -ENODEV;3394}33953396static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)3397{3398return 0;3399}34003401static int qib_sdma_6120_busy(struct qib_pportdata *ppd)3402{3403return 0;3404}34053406static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)3407{3408}34093410static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)3411{3412}34133414static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)3415{3416}34173418/*3419* the pbc doesn't need a VL15 indicator, but we need it for link_buf.3420* The chip ignores the bit if set.3421*/3422static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,3423u8 srate, u8 vl)3424{3425return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;3426}34273428static void qib_6120_initvl15_bufs(struct qib_devdata *dd)3429{3430}34313432static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)3433{3434rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;3435rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;3436}34373438static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,3439u32 len, u32 avail, struct qib_ctxtdata *rcd)3440{3441}34423443static void writescratch(struct qib_devdata *dd, u32 val)3444{3445(void) qib_write_kreg(dd, kr_scratch, val);3446}34473448static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)3449{3450return -ENXIO;3451}34523453/* Dummy function, as 6120 boards never disable EEPROM Write */3454static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)3455{3456return 1;3457}34583459/**3460* qib_init_iba6120_funcs - set up the chip-specific function pointers3461* @pdev: pci_dev of the qlogic_ib device3462* @ent: pci_device_id matching this chip3463*3464* This is global, and is called directly at init to set up the3465* chip-specific function pointers for later use.3466*3467* It also allocates/partially-inits the qib_devdata struct for3468* this device.3469*/3470struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,3471const struct pci_device_id *ent)3472{3473struct qib_devdata *dd;3474int ret;34753476dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +3477sizeof(struct qib_chip_specific));3478if (IS_ERR(dd))3479goto bail;34803481dd->f_bringup_serdes = qib_6120_bringup_serdes;3482dd->f_cleanup = qib_6120_setup_cleanup;3483dd->f_clear_tids = qib_6120_clear_tids;3484dd->f_free_irq = qib_6120_free_irq;3485dd->f_get_base_info = qib_6120_get_base_info;3486dd->f_get_msgheader = qib_6120_get_msgheader;3487dd->f_getsendbuf = qib_6120_getsendbuf;3488dd->f_gpio_mod = gpio_6120_mod;3489dd->f_eeprom_wen = qib_6120_eeprom_wen;3490dd->f_hdrqempty = qib_6120_hdrqempty;3491dd->f_ib_updown = qib_6120_ib_updown;3492dd->f_init_ctxt = qib_6120_init_ctxt;3493dd->f_initvl15_bufs = qib_6120_initvl15_bufs;3494dd->f_intr_fallback = qib_6120_nointr_fallback;3495dd->f_late_initreg = qib_late_6120_initreg;3496dd->f_setpbc_control = qib_6120_setpbc_control;3497dd->f_portcntr = qib_portcntr_6120;3498dd->f_put_tid = (dd->minrev >= 2) ?3499qib_6120_put_tid_2 :3500qib_6120_put_tid;3501dd->f_quiet_serdes = qib_6120_quiet_serdes;3502dd->f_rcvctrl = rcvctrl_6120_mod;3503dd->f_read_cntrs = qib_read_6120cntrs;3504dd->f_read_portcntrs = qib_read_6120portcntrs;3505dd->f_reset = qib_6120_setup_reset;3506dd->f_init_sdma_regs = init_sdma_6120_regs;3507dd->f_sdma_busy = qib_sdma_6120_busy;3508dd->f_sdma_gethead = qib_sdma_6120_gethead;3509dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;3510dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;3511dd->f_sdma_update_tail = qib_sdma_update_6120_tail;3512dd->f_sendctrl = sendctrl_6120_mod;3513dd->f_set_armlaunch = qib_set_6120_armlaunch;3514dd->f_set_cntr_sample = qib_set_cntr_6120_sample;3515dd->f_iblink_state = qib_6120_iblink_state;3516dd->f_ibphys_portstate = qib_6120_phys_portstate;3517dd->f_get_ib_cfg = qib_6120_get_ib_cfg;3518dd->f_set_ib_cfg = qib_6120_set_ib_cfg;3519dd->f_set_ib_loopback = qib_6120_set_loopback;3520dd->f_set_intr_state = qib_6120_set_intr_state;3521dd->f_setextled = qib_6120_setup_setextled;3522dd->f_txchk_change = qib_6120_txchk_change;3523dd->f_update_usrhead = qib_update_6120_usrhead;3524dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;3525dd->f_xgxs_reset = qib_6120_xgxs_reset;3526dd->f_writescratch = writescratch;3527dd->f_tempsense_rd = qib_6120_tempsense_rd;3528/*3529* Do remaining pcie setup and save pcie values in dd.3530* Any error printing is already done by the init code.3531* On return, we have the chip mapped and accessible,3532* but chip registers are not set up until start of3533* init_6120_variables.3534*/3535ret = qib_pcie_ddinit(dd, pdev, ent);3536if (ret < 0)3537goto bail_free;35383539/* initialize chip-specific variables */3540ret = init_6120_variables(dd);3541if (ret)3542goto bail_cleanup;35433544if (qib_mini_init)3545goto bail;35463547if (qib_pcie_params(dd, 8, NULL, NULL))3548qib_dev_err(dd, "Failed to setup PCIe or interrupts; "3549"continuing anyway\n");3550dd->cspec->irq = pdev->irq; /* save IRQ */35513552/* clear diagctrl register, in case diags were running and crashed */3553qib_write_kreg(dd, kr_hwdiagctrl, 0);35543555if (qib_read_kreg64(dd, kr_hwerrstatus) &3556QLOGIC_IB_HWE_SERDESPLLFAILED)3557qib_write_kreg(dd, kr_hwerrclear,3558QLOGIC_IB_HWE_SERDESPLLFAILED);35593560/* setup interrupt handler (interrupt type handled above) */3561qib_setup_6120_interrupt(dd);3562/* Note that qpn_mask is set by qib_6120_config_ctxts() first */3563qib_6120_init_hwerrors(dd);35643565goto bail;35663567bail_cleanup:3568qib_pcie_ddcleanup(dd);3569bail_free:3570qib_free_devdata(dd);3571dd = ERR_PTR(ret);3572bail:3573return dd;3574}357535763577