Path: blob/master/drivers/infiniband/hw/qib/qib_iba7220.c
15112 views
/*1* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.2* All rights reserved.3* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.4*5* This software is available to you under a choice of one of two6* licenses. You may choose to be licensed under the terms of the GNU7* General Public License (GPL) Version 2, available from the file8* COPYING in the main directory of this source tree, or the9* OpenIB.org BSD license below:10*11* Redistribution and use in source and binary forms, with or12* without modification, are permitted provided that the following13* conditions are met:14*15* - Redistributions of source code must retain the above16* copyright notice, this list of conditions and the following17* disclaimer.18*19* - Redistributions in binary form must reproduce the above20* copyright notice, this list of conditions and the following21* disclaimer in the documentation and/or other materials22* provided with the distribution.23*24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE31* SOFTWARE.32*/33/*34* This file contains all of the code that is specific to the35* QLogic_IB 7220 chip (except that specific to the SerDes)36*/3738#include <linux/interrupt.h>39#include <linux/pci.h>40#include <linux/delay.h>41#include <linux/io.h>42#include <rdma/ib_verbs.h>4344#include "qib.h"45#include "qib_7220.h"4647static void qib_setup_7220_setextled(struct qib_pportdata *, u32);48static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);49static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);50static u32 qib_7220_iblink_state(u64);51static u8 qib_7220_phys_portstate(u64);52static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);53static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);5455/*56* This file contains almost all the chip-specific register information and57* access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the58* exception of SerDes support, which in in qib_sd7220.c.59*/6061/* Below uses machine-generated qib_chipnum_regs.h file */62#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))6364/* Use defines to tie machine-generated names to lower-case names */65#define kr_control KREG_IDX(Control)66#define kr_counterregbase KREG_IDX(CntrRegBase)67#define kr_errclear KREG_IDX(ErrClear)68#define kr_errmask KREG_IDX(ErrMask)69#define kr_errstatus KREG_IDX(ErrStatus)70#define kr_extctrl KREG_IDX(EXTCtrl)71#define kr_extstatus KREG_IDX(EXTStatus)72#define kr_gpio_clear KREG_IDX(GPIOClear)73#define kr_gpio_mask KREG_IDX(GPIOMask)74#define kr_gpio_out KREG_IDX(GPIOOut)75#define kr_gpio_status KREG_IDX(GPIOStatus)76#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)77#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)78#define kr_hwerrclear KREG_IDX(HwErrClear)79#define kr_hwerrmask KREG_IDX(HwErrMask)80#define kr_hwerrstatus KREG_IDX(HwErrStatus)81#define kr_ibcctrl KREG_IDX(IBCCtrl)82#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)83#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)84#define kr_ibcstatus KREG_IDX(IBCStatus)85#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)86#define kr_intclear KREG_IDX(IntClear)87#define kr_intmask KREG_IDX(IntMask)88#define kr_intstatus KREG_IDX(IntStatus)89#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)90#define kr_palign KREG_IDX(PageAlign)91#define kr_partitionkey KREG_IDX(RcvPartitionKey)92#define kr_portcnt KREG_IDX(PortCnt)93#define kr_rcvbthqp KREG_IDX(RcvBTHQP)94#define kr_rcvctrl KREG_IDX(RcvCtrl)95#define kr_rcvegrbase KREG_IDX(RcvEgrBase)96#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)97#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)98#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)99#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)100#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)101#define kr_rcvtidbase KREG_IDX(RcvTIDBase)102#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)103#define kr_revision KREG_IDX(Revision)104#define kr_scratch KREG_IDX(Scratch)105#define kr_sendbuffererror KREG_IDX(SendBufErr0)106#define kr_sendctrl KREG_IDX(SendCtrl)107#define kr_senddmabase KREG_IDX(SendDmaBase)108#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)109#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)110#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)111#define kr_senddmahead KREG_IDX(SendDmaHead)112#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)113#define kr_senddmalengen KREG_IDX(SendDmaLenGen)114#define kr_senddmastatus KREG_IDX(SendDmaStatus)115#define kr_senddmatail KREG_IDX(SendDmaTail)116#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)117#define kr_sendpiobufbase KREG_IDX(SendBufBase)118#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)119#define kr_sendpiosize KREG_IDX(SendBufSize)120#define kr_sendregbase KREG_IDX(SendRegBase)121#define kr_userregbase KREG_IDX(UserRegBase)122#define kr_xgxs_cfg KREG_IDX(XGXSCfg)123124/* These must only be written via qib_write_kreg_ctxt() */125#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)126#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)127128129#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \130QIB_7220_LBIntCnt_OFFS) / sizeof(u64))131132#define cr_badformat CREG_IDX(RxVersionErrCnt)133#define cr_erricrc CREG_IDX(RxICRCErrCnt)134#define cr_errlink CREG_IDX(RxLinkMalformCnt)135#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)136#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)137#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)138#define cr_err_rlen CREG_IDX(RxLenErrCnt)139#define cr_errslen CREG_IDX(TxLenErrCnt)140#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)141#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)142#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)143#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)144#define cr_lbint CREG_IDX(LBIntCnt)145#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)146#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)147#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)148#define cr_pktrcv CREG_IDX(RxDataPktCnt)149#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)150#define cr_pktsend CREG_IDX(TxDataPktCnt)151#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)152#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)153#define cr_rcvebp CREG_IDX(RxEBPCnt)154#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)155#define cr_senddropped CREG_IDX(TxDroppedPktCnt)156#define cr_sendstall CREG_IDX(TxFlowStallCnt)157#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)158#define cr_wordrcv CREG_IDX(RxDwordCnt)159#define cr_wordsend CREG_IDX(TxDwordCnt)160#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)161#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)162#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)163#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)164#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)165#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)166#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)167#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)168#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)169#define cr_rxvlerr CREG_IDX(RxVlErrCnt)170#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)171#define cr_psstat CREG_IDX(PSStat)172#define cr_psstart CREG_IDX(PSStart)173#define cr_psinterval CREG_IDX(PSInterval)174#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)175#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)176#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)177#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)178#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)179#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)180#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)181182#define SYM_RMASK(regname, fldname) ((u64) \183QIB_7220_##regname##_##fldname##_RMASK)184#define SYM_MASK(regname, fldname) ((u64) \185QIB_7220_##regname##_##fldname##_RMASK << \186QIB_7220_##regname##_##fldname##_LSB)187#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)188#define SYM_FIELD(value, regname, fldname) ((u64) \189(((value) >> SYM_LSB(regname, fldname)) & \190SYM_RMASK(regname, fldname)))191#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)192#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)193194/* ibcctrl bits */195#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1196/* cycle through TS1/TS2 till OK */197#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2198/* wait for TS1, then go on */199#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3200#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16201202#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */203#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */204#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */205206#define BLOB_7220_IBCHG 0x81207208/*209* We could have a single register get/put routine, that takes a group type,210* but this is somewhat clearer and cleaner. It also gives us some error211* checking. 64 bit register reads should always work, but are inefficient212* on opteron (the northbridge always generates 2 separate HT 32 bit reads),213* so we use kreg32 wherever possible. User register and counter register214* reads are always 32 bit reads, so only one form of those routines.215*/216217/**218* qib_read_ureg32 - read 32-bit virtualized per-context register219* @dd: device220* @regno: register number221* @ctxt: context number222*223* Return the contents of a register that is virtualized to be per context.224* Returns -1 on errors (not distinguishable from valid contents at225* runtime; we may add a separate error variable at some point).226*/227static inline u32 qib_read_ureg32(const struct qib_devdata *dd,228enum qib_ureg regno, int ctxt)229{230if (!dd->kregbase || !(dd->flags & QIB_PRESENT))231return 0;232233if (dd->userbase)234return readl(regno + (u64 __iomem *)235((char __iomem *)dd->userbase +236dd->ureg_align * ctxt));237else238return readl(regno + (u64 __iomem *)239(dd->uregbase +240(char __iomem *)dd->kregbase +241dd->ureg_align * ctxt));242}243244/**245* qib_write_ureg - write 32-bit virtualized per-context register246* @dd: device247* @regno: register number248* @value: value249* @ctxt: context250*251* Write the contents of a register that is virtualized to be per context.252*/253static inline void qib_write_ureg(const struct qib_devdata *dd,254enum qib_ureg regno, u64 value, int ctxt)255{256u64 __iomem *ubase;257258if (dd->userbase)259ubase = (u64 __iomem *)260((char __iomem *) dd->userbase +261dd->ureg_align * ctxt);262else263ubase = (u64 __iomem *)264(dd->uregbase +265(char __iomem *) dd->kregbase +266dd->ureg_align * ctxt);267268if (dd->kregbase && (dd->flags & QIB_PRESENT))269writeq(value, &ubase[regno]);270}271272/**273* qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register274* @dd: the qlogic_ib device275* @regno: the register number to write276* @ctxt: the context containing the register277* @value: the value to write278*/279static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,280const u16 regno, unsigned ctxt,281u64 value)282{283qib_write_kreg(dd, regno + ctxt, value);284}285286static inline void write_7220_creg(const struct qib_devdata *dd,287u16 regno, u64 value)288{289if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))290writeq(value, &dd->cspec->cregbase[regno]);291}292293static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)294{295if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))296return 0;297return readq(&dd->cspec->cregbase[regno]);298}299300static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)301{302if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))303return 0;304return readl(&dd->cspec->cregbase[regno]);305}306307/* kr_revision bits */308#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)309#define QLOGIC_IB_R_EMULATORREV_SHIFT 40310311/* kr_control bits */312#define QLOGIC_IB_C_RESET (1U << 7)313314/* kr_intstatus, kr_intclear, kr_intmask bits */315#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)316#define QLOGIC_IB_I_RCVURG_SHIFT 32317#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)318#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0319#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)320321#define QLOGIC_IB_C_FREEZEMODE 0x00000002322#define QLOGIC_IB_C_LINKENABLE 0x00000004323324#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL325#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL326#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL327#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL328#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL329#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL330331/* variables for sanity checking interrupt and errors */332#define QLOGIC_IB_I_BITSEXTANT \333(QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \334(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \335(QLOGIC_IB_I_RCVAVAIL_MASK << \336QLOGIC_IB_I_RCVAVAIL_SHIFT) | \337QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \338QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \339QLOGIC_IB_I_SERDESTRIMDONE)340341#define IB_HWE_BITSEXTANT \342(HWE_MASK(RXEMemParityErr) | \343HWE_MASK(TXEMemParityErr) | \344(QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \345QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \346QLOGIC_IB_HWE_PCIE1PLLFAILED | \347QLOGIC_IB_HWE_PCIE0PLLFAILED | \348QLOGIC_IB_HWE_PCIEPOISONEDTLP | \349QLOGIC_IB_HWE_PCIECPLTIMEOUT | \350QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \351QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \352QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \353HWE_MASK(PowerOnBISTFailed) | \354QLOGIC_IB_HWE_COREPLL_FBSLIP | \355QLOGIC_IB_HWE_COREPLL_RFSLIP | \356QLOGIC_IB_HWE_SERDESPLLFAILED | \357HWE_MASK(IBCBusToSPCParityErr) | \358HWE_MASK(IBCBusFromSPCParityErr) | \359QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \360QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \361QLOGIC_IB_HWE_SDMAMEMREADERR | \362QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \363QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \364QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \365QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \366QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \367QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \368QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \369QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \370QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)371372#define IB_E_BITSEXTANT \373(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \374ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \375ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \376ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \377ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \378ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \379ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \380ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \381ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \382ERR_MASK(SendSpecialTriggerErr) | \383ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \384ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \385ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \386ERR_MASK(SendDroppedDataPktErr) | \387ERR_MASK(SendPioArmLaunchErr) | \388ERR_MASK(SendUnexpectedPktNumErr) | \389ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \390ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \391ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \392ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \393ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \394ERR_MASK(SDmaUnexpDataErr) | \395ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \396ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \397ERR_MASK(SDmaDescAddrMisalignErr) | \398ERR_MASK(InvalidEEPCmd))399400/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */401#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL402#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0403#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL404#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL405#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL406#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL407#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL408#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL409#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL410#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL411#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL412#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL413/* specific to this chip */414#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL415#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL416#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL417#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL418#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL419#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL420#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL421#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL422#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL423#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL424#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL425#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL426427#define IBA7220_IBCC_LINKCMD_SHIFT 19428429/* kr_ibcddrctrl bits */430#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL431#define IBA7220_IBC_DLIDLMC_SHIFT 32432433#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \434SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))435#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)436437#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)438#define IBA7220_IBC_LREV_MASK 1439#define IBA7220_IBC_LREV_SHIFT 8440#define IBA7220_IBC_RXPOL_MASK 1441#define IBA7220_IBC_RXPOL_SHIFT 7442#define IBA7220_IBC_WIDTH_SHIFT 5443#define IBA7220_IBC_WIDTH_MASK 0x3444#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)445#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)446#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)447#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)448#define IBA7220_IBC_SPEED_SDR (1 << 2)449#define IBA7220_IBC_SPEED_DDR (1 << 3)450#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)451#define IBA7220_IBC_IBTA_1_2_MASK (1)452453/* kr_ibcddrstatus */454/* link latency shift is 0, don't bother defining */455#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff456457/* kr_extstatus bits */458#define QLOGIC_IB_EXTS_FREQSEL 0x2459#define QLOGIC_IB_EXTS_SERDESSEL 0x4460#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000461#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000462463/* kr_xgxsconfig bits */464#define QLOGIC_IB_XGXS_RESET 0x5ULL465#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)466467/* kr_rcvpktledcnt */468#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */469#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */470471#define _QIB_GPIO_SDA_NUM 1472#define _QIB_GPIO_SCL_NUM 0473#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */474#define QIB_TWSI_TEMP_DEV 0x98475476/* HW counter clock is at 4nsec */477#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000478479#define IBA7220_R_INTRAVAIL_SHIFT 17480#define IBA7220_R_PKEY_DIS_SHIFT 34481#define IBA7220_R_TAILUPD_SHIFT 35482#define IBA7220_R_CTXTCFG_SHIFT 36483484#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */485486/*487* the size bits give us 2^N, in KB units. 0 marks as invalid,488* and 7 is reserved. We currently use only 2KB and 4KB489*/490#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */491#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */492#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */493#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */494#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */495#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */496497#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */498499/* packet rate matching delay multiplier */500static u8 rate_to_delay[2][2] = {501/* 1x, 4x */502{ 8, 2 }, /* SDR */503{ 4, 1 } /* DDR */504};505506static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {507[IB_RATE_2_5_GBPS] = 8,508[IB_RATE_5_GBPS] = 4,509[IB_RATE_10_GBPS] = 2,510[IB_RATE_20_GBPS] = 1511};512513#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)514#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)515516/* link training states, from IBC */517#define IB_7220_LT_STATE_DISABLED 0x00518#define IB_7220_LT_STATE_LINKUP 0x01519#define IB_7220_LT_STATE_POLLACTIVE 0x02520#define IB_7220_LT_STATE_POLLQUIET 0x03521#define IB_7220_LT_STATE_SLEEPDELAY 0x04522#define IB_7220_LT_STATE_SLEEPQUIET 0x05523#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08524#define IB_7220_LT_STATE_CFGRCVFCFG 0x09525#define IB_7220_LT_STATE_CFGWAITRMT 0x0a526#define IB_7220_LT_STATE_CFGIDLE 0x0b527#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c528#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e529#define IB_7220_LT_STATE_RECOVERIDLE 0x0f530531/* link state machine states from IBC */532#define IB_7220_L_STATE_DOWN 0x0533#define IB_7220_L_STATE_INIT 0x1534#define IB_7220_L_STATE_ARM 0x2535#define IB_7220_L_STATE_ACTIVE 0x3536#define IB_7220_L_STATE_ACT_DEFER 0x4537538static const u8 qib_7220_physportstate[0x20] = {539[IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,540[IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,541[IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,542[IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,543[IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,544[IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,545[IB_7220_LT_STATE_CFGDEBOUNCE] =546IB_PHYSPORTSTATE_CFG_TRAIN,547[IB_7220_LT_STATE_CFGRCVFCFG] =548IB_PHYSPORTSTATE_CFG_TRAIN,549[IB_7220_LT_STATE_CFGWAITRMT] =550IB_PHYSPORTSTATE_CFG_TRAIN,551[IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,552[IB_7220_LT_STATE_RECOVERRETRAIN] =553IB_PHYSPORTSTATE_LINK_ERR_RECOVER,554[IB_7220_LT_STATE_RECOVERWAITRMT] =555IB_PHYSPORTSTATE_LINK_ERR_RECOVER,556[IB_7220_LT_STATE_RECOVERIDLE] =557IB_PHYSPORTSTATE_LINK_ERR_RECOVER,558[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,559[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,560[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,561[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,562[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,563[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,564[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,565[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN566};567568int qib_special_trigger;569module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);570MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");571572#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)573#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)574575#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \576(1ULL << (SYM_LSB(regname, fldname) + (bit))))577578#define TXEMEMPARITYERR_PIOBUF \579SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)580#define TXEMEMPARITYERR_PIOPBC \581SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)582#define TXEMEMPARITYERR_PIOLAUNCHFIFO \583SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)584585#define RXEMEMPARITYERR_RCVBUF \586SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)587#define RXEMEMPARITYERR_LOOKUPQ \588SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)589#define RXEMEMPARITYERR_EXPTID \590SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)591#define RXEMEMPARITYERR_EAGERTID \592SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)593#define RXEMEMPARITYERR_FLAGBUF \594SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)595#define RXEMEMPARITYERR_DATAINFO \596SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)597#define RXEMEMPARITYERR_HDRINFO \598SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)599600/* 7220 specific hardware errors... */601static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {602/* generic hardware errors */603QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),604QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),605606QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,607"TXE PIOBUF Memory Parity"),608QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,609"TXE PIOPBC Memory Parity"),610QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,611"TXE PIOLAUNCHFIFO Memory Parity"),612613QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,614"RXE RCVBUF Memory Parity"),615QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,616"RXE LOOKUPQ Memory Parity"),617QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,618"RXE EAGERTID Memory Parity"),619QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,620"RXE EXPTID Memory Parity"),621QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,622"RXE FLAGBUF Memory Parity"),623QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,624"RXE DATAINFO Memory Parity"),625QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,626"RXE HDRINFO Memory Parity"),627628/* chip-specific hardware errors */629QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,630"PCIe Poisoned TLP"),631QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,632"PCIe completion timeout"),633/*634* In practice, it's unlikely wthat we'll see PCIe PLL, or bus635* parity or memory parity error failures, because most likely we636* won't be able to talk to the core of the chip. Nonetheless, we637* might see them, if they are in parts of the PCIe core that aren't638* essential.639*/640QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,641"PCIePLL1"),642QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,643"PCIePLL0"),644QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,645"PCIe XTLH core parity"),646QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,647"PCIe ADM TX core parity"),648QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,649"PCIe ADM RX core parity"),650QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,651"SerDes PLL"),652QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,653"PCIe cpl header queue"),654QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,655"PCIe cpl data queue"),656QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,657"Send DMA memory read"),658QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,659"uC PLL clock not locked"),660QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,661"PCIe serdes Q0 no clock"),662QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,663"PCIe serdes Q1 no clock"),664QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,665"PCIe serdes Q2 no clock"),666QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,667"PCIe serdes Q3 no clock"),668QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,669"DDS RXEQ memory parity"),670QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,671"IB uC memory parity"),672QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,673"PCIe uC oct0 memory parity"),674QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,675"PCIe uC oct1 memory parity"),676};677678#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)679680#define QLOGIC_IB_E_PKTERRS (\681ERR_MASK(SendPktLenErr) | \682ERR_MASK(SendDroppedDataPktErr) | \683ERR_MASK(RcvVCRCErr) | \684ERR_MASK(RcvICRCErr) | \685ERR_MASK(RcvShortPktLenErr) | \686ERR_MASK(RcvEBPErr))687688/* Convenience for decoding Send DMA errors */689#define QLOGIC_IB_E_SDMAERRS ( \690ERR_MASK(SDmaGenMismatchErr) | \691ERR_MASK(SDmaOutOfBoundErr) | \692ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \693ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \694ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \695ERR_MASK(SDmaUnexpDataErr) | \696ERR_MASK(SDmaDescAddrMisalignErr) | \697ERR_MASK(SDmaDisabledErr) | \698ERR_MASK(SendBufMisuseErr))699700/* These are all rcv-related errors which we want to count for stats */701#define E_SUM_PKTERRS \702(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \703ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \704ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \705ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \706ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \707ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))708709/* These are all send-related errors which we want to count for stats */710#define E_SUM_ERRS \711(ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \712ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \713ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \714ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \715ERR_MASK(InvalidAddrErr))716717/*718* this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore719* errors not related to freeze and cancelling buffers. Can't ignore720* armlaunch because could get more while still cleaning up, and need721* to cancel those as they happen.722*/723#define E_SPKT_ERRS_IGNORE \724(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \725ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \726ERR_MASK(SendPktLenErr))727728/*729* these are errors that can occur when the link changes state while730* a packet is being sent or received. This doesn't cover things731* like EBP or VCRC that can be the result of a sending having the732* link change state, so we receive a "known bad" packet.733*/734#define E_SUM_LINK_PKTERRS \735(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \736ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \737ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \738ERR_MASK(RcvUnexpectedCharErr))739740static void autoneg_7220_work(struct work_struct *);741static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);742743/*744* Called when we might have an error that is specific to a particular745* PIO buffer, and may need to cancel that buffer, so it can be re-used.746* because we don't need to force the update of pioavail.747*/748static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)749{750unsigned long sbuf[3];751struct qib_devdata *dd = ppd->dd;752753/*754* It's possible that sendbuffererror could have bits set; might755* have already done this as a result of hardware error handling.756*/757/* read these before writing errorclear */758sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);759sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);760sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);761762if (sbuf[0] || sbuf[1] || sbuf[2])763qib_disarm_piobufs_set(dd, sbuf,764dd->piobcnt2k + dd->piobcnt4k);765}766767static void qib_7220_txe_recover(struct qib_devdata *dd)768{769qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");770qib_disarm_7220_senderrbufs(dd->pport);771}772773/*774* This is called with interrupts disabled and sdma_lock held.775*/776static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)777{778struct qib_devdata *dd = ppd->dd;779u64 set_sendctrl = 0;780u64 clr_sendctrl = 0;781782if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)783set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);784else785clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);786787if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)788set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);789else790clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);791792if (op & QIB_SDMA_SENDCTRL_OP_HALT)793set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);794else795clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);796797spin_lock(&dd->sendctrl_lock);798799dd->sendctrl |= set_sendctrl;800dd->sendctrl &= ~clr_sendctrl;801802qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);803qib_write_kreg(dd, kr_scratch, 0);804805spin_unlock(&dd->sendctrl_lock);806}807808static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,809u64 err, char *buf, size_t blen)810{811static const struct {812u64 err;813const char *msg;814} errs[] = {815{ ERR_MASK(SDmaGenMismatchErr),816"SDmaGenMismatch" },817{ ERR_MASK(SDmaOutOfBoundErr),818"SDmaOutOfBound" },819{ ERR_MASK(SDmaTailOutOfBoundErr),820"SDmaTailOutOfBound" },821{ ERR_MASK(SDmaBaseErr),822"SDmaBase" },823{ ERR_MASK(SDma1stDescErr),824"SDma1stDesc" },825{ ERR_MASK(SDmaRpyTagErr),826"SDmaRpyTag" },827{ ERR_MASK(SDmaDwEnErr),828"SDmaDwEn" },829{ ERR_MASK(SDmaMissingDwErr),830"SDmaMissingDw" },831{ ERR_MASK(SDmaUnexpDataErr),832"SDmaUnexpData" },833{ ERR_MASK(SDmaDescAddrMisalignErr),834"SDmaDescAddrMisalign" },835{ ERR_MASK(SendBufMisuseErr),836"SendBufMisuse" },837{ ERR_MASK(SDmaDisabledErr),838"SDmaDisabled" },839};840int i;841size_t bidx = 0;842843for (i = 0; i < ARRAY_SIZE(errs); i++) {844if (err & errs[i].err)845bidx += scnprintf(buf + bidx, blen - bidx,846"%s ", errs[i].msg);847}848}849850/*851* This is called as part of link down clean up so disarm and flush852* all send buffers so that SMP packets can be sent.853*/854static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)855{856/* This will trigger the Abort interrupt */857sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |858QIB_SENDCTRL_AVAIL_BLIP);859ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */860}861862static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)863{864/*865* Set SendDmaLenGen and clear and set866* the MSB of the generation count to enable generation checking867* and load the internal generation counter.868*/869qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);870qib_write_kreg(ppd->dd, kr_senddmalengen,871ppd->sdma_descq_cnt |872(1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));873}874875static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)876{877qib_sdma_7220_setlengen(ppd);878qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */879ppd->sdma_head_dma[0] = 0;880}881882#define DISABLES_SDMA ( \883ERR_MASK(SDmaDisabledErr) | \884ERR_MASK(SDmaBaseErr) | \885ERR_MASK(SDmaTailOutOfBoundErr) | \886ERR_MASK(SDmaOutOfBoundErr) | \887ERR_MASK(SDma1stDescErr) | \888ERR_MASK(SDmaRpyTagErr) | \889ERR_MASK(SDmaGenMismatchErr) | \890ERR_MASK(SDmaDescAddrMisalignErr) | \891ERR_MASK(SDmaMissingDwErr) | \892ERR_MASK(SDmaDwEnErr))893894static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)895{896unsigned long flags;897struct qib_devdata *dd = ppd->dd;898char *msg;899900errs &= QLOGIC_IB_E_SDMAERRS;901902msg = dd->cspec->sdmamsgbuf;903qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);904spin_lock_irqsave(&ppd->sdma_lock, flags);905906if (errs & ERR_MASK(SendBufMisuseErr)) {907unsigned long sbuf[3];908909sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);910sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);911sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);912913qib_dev_err(ppd->dd,914"IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",915ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],916sbuf[0]);917}918919if (errs & ERR_MASK(SDmaUnexpDataErr))920qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,921ppd->port);922923switch (ppd->sdma_state.current_state) {924case qib_sdma_state_s00_hw_down:925/* not expecting any interrupts */926break;927928case qib_sdma_state_s10_hw_start_up_wait:929/* handled in intr path */930break;931932case qib_sdma_state_s20_idle:933/* not expecting any interrupts */934break;935936case qib_sdma_state_s30_sw_clean_up_wait:937/* not expecting any interrupts */938break;939940case qib_sdma_state_s40_hw_clean_up_wait:941if (errs & ERR_MASK(SDmaDisabledErr))942__qib_sdma_process_event(ppd,943qib_sdma_event_e50_hw_cleaned);944break;945946case qib_sdma_state_s50_hw_halt_wait:947/* handled in intr path */948break;949950case qib_sdma_state_s99_running:951if (errs & DISABLES_SDMA)952__qib_sdma_process_event(ppd,953qib_sdma_event_e7220_err_halted);954break;955}956957spin_unlock_irqrestore(&ppd->sdma_lock, flags);958}959960/*961* Decode the error status into strings, deciding whether to always962* print * it or not depending on "normal packet errors" vs everything963* else. Return 1 if "real" errors, otherwise 0 if only packet964* errors, so caller can decide what to print with the string.965*/966static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,967u64 err)968{969int iserr = 1;970971*buf = '\0';972if (err & QLOGIC_IB_E_PKTERRS) {973if (!(err & ~QLOGIC_IB_E_PKTERRS))974iserr = 0;975if ((err & ERR_MASK(RcvICRCErr)) &&976!(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))977strlcat(buf, "CRC ", blen);978if (!iserr)979goto done;980}981if (err & ERR_MASK(RcvHdrLenErr))982strlcat(buf, "rhdrlen ", blen);983if (err & ERR_MASK(RcvBadTidErr))984strlcat(buf, "rbadtid ", blen);985if (err & ERR_MASK(RcvBadVersionErr))986strlcat(buf, "rbadversion ", blen);987if (err & ERR_MASK(RcvHdrErr))988strlcat(buf, "rhdr ", blen);989if (err & ERR_MASK(SendSpecialTriggerErr))990strlcat(buf, "sendspecialtrigger ", blen);991if (err & ERR_MASK(RcvLongPktLenErr))992strlcat(buf, "rlongpktlen ", blen);993if (err & ERR_MASK(RcvMaxPktLenErr))994strlcat(buf, "rmaxpktlen ", blen);995if (err & ERR_MASK(RcvMinPktLenErr))996strlcat(buf, "rminpktlen ", blen);997if (err & ERR_MASK(SendMinPktLenErr))998strlcat(buf, "sminpktlen ", blen);999if (err & ERR_MASK(RcvFormatErr))1000strlcat(buf, "rformaterr ", blen);1001if (err & ERR_MASK(RcvUnsupportedVLErr))1002strlcat(buf, "runsupvl ", blen);1003if (err & ERR_MASK(RcvUnexpectedCharErr))1004strlcat(buf, "runexpchar ", blen);1005if (err & ERR_MASK(RcvIBFlowErr))1006strlcat(buf, "ribflow ", blen);1007if (err & ERR_MASK(SendUnderRunErr))1008strlcat(buf, "sunderrun ", blen);1009if (err & ERR_MASK(SendPioArmLaunchErr))1010strlcat(buf, "spioarmlaunch ", blen);1011if (err & ERR_MASK(SendUnexpectedPktNumErr))1012strlcat(buf, "sunexperrpktnum ", blen);1013if (err & ERR_MASK(SendDroppedSmpPktErr))1014strlcat(buf, "sdroppedsmppkt ", blen);1015if (err & ERR_MASK(SendMaxPktLenErr))1016strlcat(buf, "smaxpktlen ", blen);1017if (err & ERR_MASK(SendUnsupportedVLErr))1018strlcat(buf, "sunsupVL ", blen);1019if (err & ERR_MASK(InvalidAddrErr))1020strlcat(buf, "invalidaddr ", blen);1021if (err & ERR_MASK(RcvEgrFullErr))1022strlcat(buf, "rcvegrfull ", blen);1023if (err & ERR_MASK(RcvHdrFullErr))1024strlcat(buf, "rcvhdrfull ", blen);1025if (err & ERR_MASK(IBStatusChanged))1026strlcat(buf, "ibcstatuschg ", blen);1027if (err & ERR_MASK(RcvIBLostLinkErr))1028strlcat(buf, "riblostlink ", blen);1029if (err & ERR_MASK(HardwareErr))1030strlcat(buf, "hardware ", blen);1031if (err & ERR_MASK(ResetNegated))1032strlcat(buf, "reset ", blen);1033if (err & QLOGIC_IB_E_SDMAERRS)1034qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);1035if (err & ERR_MASK(InvalidEEPCmd))1036strlcat(buf, "invalideepromcmd ", blen);1037done:1038return iserr;1039}10401041static void reenable_7220_chase(unsigned long opaque)1042{1043struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;1044ppd->cpspec->chase_timer.expires = 0;1045qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,1046QLOGIC_IB_IBCC_LINKINITCMD_POLL);1047}10481049static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)1050{1051u8 ibclt;1052u64 tnow;10531054ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);10551056/*1057* Detect and handle the state chase issue, where we can1058* get stuck if we are unlucky on timing on both sides of1059* the link. If we are, we disable, set a timer, and1060* then re-enable.1061*/1062switch (ibclt) {1063case IB_7220_LT_STATE_CFGRCVFCFG:1064case IB_7220_LT_STATE_CFGWAITRMT:1065case IB_7220_LT_STATE_TXREVLANES:1066case IB_7220_LT_STATE_CFGENH:1067tnow = get_jiffies_64();1068if (ppd->cpspec->chase_end &&1069time_after64(tnow, ppd->cpspec->chase_end)) {1070ppd->cpspec->chase_end = 0;1071qib_set_ib_7220_lstate(ppd,1072QLOGIC_IB_IBCC_LINKCMD_DOWN,1073QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);1074ppd->cpspec->chase_timer.expires = jiffies +1075QIB_CHASE_DIS_TIME;1076add_timer(&ppd->cpspec->chase_timer);1077} else if (!ppd->cpspec->chase_end)1078ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;1079break;10801081default:1082ppd->cpspec->chase_end = 0;1083break;1084}1085}10861087static void handle_7220_errors(struct qib_devdata *dd, u64 errs)1088{1089char *msg;1090u64 ignore_this_time = 0;1091u64 iserr = 0;1092int log_idx;1093struct qib_pportdata *ppd = dd->pport;1094u64 mask;10951096/* don't report errors that are masked */1097errs &= dd->cspec->errormask;1098msg = dd->cspec->emsgbuf;10991100/* do these first, they are most important */1101if (errs & ERR_MASK(HardwareErr))1102qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);1103else1104for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)1105if (errs & dd->eep_st_masks[log_idx].errs_to_log)1106qib_inc_eeprom_err(dd, log_idx, 1);11071108if (errs & QLOGIC_IB_E_SDMAERRS)1109sdma_7220_errors(ppd, errs);11101111if (errs & ~IB_E_BITSEXTANT)1112qib_dev_err(dd, "error interrupt with unknown errors "1113"%llx set\n", (unsigned long long)1114(errs & ~IB_E_BITSEXTANT));11151116if (errs & E_SUM_ERRS) {1117qib_disarm_7220_senderrbufs(ppd);1118if ((errs & E_SUM_LINK_PKTERRS) &&1119!(ppd->lflags & QIBL_LINKACTIVE)) {1120/*1121* This can happen when trying to bring the link1122* up, but the IB link changes state at the "wrong"1123* time. The IB logic then complains that the packet1124* isn't valid. We don't want to confuse people, so1125* we just don't print them, except at debug1126*/1127ignore_this_time = errs & E_SUM_LINK_PKTERRS;1128}1129} else if ((errs & E_SUM_LINK_PKTERRS) &&1130!(ppd->lflags & QIBL_LINKACTIVE)) {1131/*1132* This can happen when SMA is trying to bring the link1133* up, but the IB link changes state at the "wrong" time.1134* The IB logic then complains that the packet isn't1135* valid. We don't want to confuse people, so we just1136* don't print them, except at debug1137*/1138ignore_this_time = errs & E_SUM_LINK_PKTERRS;1139}11401141qib_write_kreg(dd, kr_errclear, errs);11421143errs &= ~ignore_this_time;1144if (!errs)1145goto done;11461147/*1148* The ones we mask off are handled specially below1149* or above. Also mask SDMADISABLED by default as it1150* is too chatty.1151*/1152mask = ERR_MASK(IBStatusChanged) |1153ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |1154ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);11551156qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);11571158if (errs & E_SUM_PKTERRS)1159qib_stats.sps_rcverrs++;1160if (errs & E_SUM_ERRS)1161qib_stats.sps_txerrs++;1162iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |1163ERR_MASK(SDmaDisabledErr));11641165if (errs & ERR_MASK(IBStatusChanged)) {1166u64 ibcs;11671168ibcs = qib_read_kreg64(dd, kr_ibcstatus);1169if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))1170handle_7220_chase(ppd, ibcs);11711172/* Update our picture of width and speed from chip */1173ppd->link_width_active =1174((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?1175IB_WIDTH_4X : IB_WIDTH_1X;1176ppd->link_speed_active =1177((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?1178QIB_IB_DDR : QIB_IB_SDR;11791180/*1181* Since going into a recovery state causes the link state1182* to go down and since recovery is transitory, it is better1183* if we "miss" ever seeing the link training state go into1184* recovery (i.e., ignore this transition for link state1185* special handling purposes) without updating lastibcstat.1186*/1187if (qib_7220_phys_portstate(ibcs) !=1188IB_PHYSPORTSTATE_LINK_ERR_RECOVER)1189qib_handle_e_ibstatuschanged(ppd, ibcs);1190}11911192if (errs & ERR_MASK(ResetNegated)) {1193qib_dev_err(dd, "Got reset, requires re-init "1194"(unload and reload driver)\n");1195dd->flags &= ~QIB_INITTED; /* needs re-init */1196/* mark as having had error */1197*dd->devstatusp |= QIB_STATUS_HWERROR;1198*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;1199}12001201if (*msg && iserr)1202qib_dev_porterr(dd, ppd->port, "%s error\n", msg);12031204if (ppd->state_wanted & ppd->lflags)1205wake_up_interruptible(&ppd->state_wait);12061207/*1208* If there were hdrq or egrfull errors, wake up any processes1209* waiting in poll. We used to try to check which contexts had1210* the overflow, but given the cost of that and the chip reads1211* to support it, it's better to just wake everybody up if we1212* get an overflow; waiters can poll again if it's not them.1213*/1214if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {1215qib_handle_urcv(dd, ~0U);1216if (errs & ERR_MASK(RcvEgrFullErr))1217qib_stats.sps_buffull++;1218else1219qib_stats.sps_hdrfull++;1220}1221done:1222return;1223}12241225/* enable/disable chip from delivering interrupts */1226static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)1227{1228if (enable) {1229if (dd->flags & QIB_BADINTR)1230return;1231qib_write_kreg(dd, kr_intmask, ~0ULL);1232/* force re-interrupt of any pending interrupts. */1233qib_write_kreg(dd, kr_intclear, 0ULL);1234} else1235qib_write_kreg(dd, kr_intmask, 0ULL);1236}12371238/*1239* Try to cleanup as much as possible for anything that might have gone1240* wrong while in freeze mode, such as pio buffers being written by user1241* processes (causing armlaunch), send errors due to going into freeze mode,1242* etc., and try to avoid causing extra interrupts while doing so.1243* Forcibly update the in-memory pioavail register copies after cleanup1244* because the chip won't do it while in freeze mode (the register values1245* themselves are kept correct).1246* Make sure that we don't lose any important interrupts by using the chip1247* feature that says that writing 0 to a bit in *clear that is set in1248* *status will cause an interrupt to be generated again (if allowed by1249* the *mask value).1250* This is in chip-specific code because of all of the register accesses,1251* even though the details are similar on most chips.1252*/1253static void qib_7220_clear_freeze(struct qib_devdata *dd)1254{1255/* disable error interrupts, to avoid confusion */1256qib_write_kreg(dd, kr_errmask, 0ULL);12571258/* also disable interrupts; errormask is sometimes overwriten */1259qib_7220_set_intr_state(dd, 0);12601261qib_cancel_sends(dd->pport);12621263/* clear the freeze, and be sure chip saw it */1264qib_write_kreg(dd, kr_control, dd->control);1265qib_read_kreg32(dd, kr_scratch);12661267/* force in-memory update now we are out of freeze */1268qib_force_pio_avail_update(dd);12691270/*1271* force new interrupt if any hwerr, error or interrupt bits are1272* still set, and clear "safe" send packet errors related to freeze1273* and cancelling sends. Re-enable error interrupts before possible1274* force of re-interrupt on pending interrupts.1275*/1276qib_write_kreg(dd, kr_hwerrclear, 0ULL);1277qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);1278qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);1279qib_7220_set_intr_state(dd, 1);1280}12811282/**1283* qib_7220_handle_hwerrors - display hardware errors.1284* @dd: the qlogic_ib device1285* @msg: the output buffer1286* @msgl: the size of the output buffer1287*1288* Use same msg buffer as regular errors to avoid excessive stack1289* use. Most hardware errors are catastrophic, but for right now,1290* we'll print them and continue. We reuse the same message buffer as1291* handle_7220_errors() to avoid excessive stack usage.1292*/1293static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,1294size_t msgl)1295{1296u64 hwerrs;1297u32 bits, ctrl;1298int isfatal = 0;1299char *bitsmsg;1300int log_idx;13011302hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);1303if (!hwerrs)1304goto bail;1305if (hwerrs == ~0ULL) {1306qib_dev_err(dd, "Read of hardware error status failed "1307"(all bits set); ignoring\n");1308goto bail;1309}1310qib_stats.sps_hwerrs++;13111312/*1313* Always clear the error status register, except MEMBISTFAIL,1314* regardless of whether we continue or stop using the chip.1315* We want that set so we know it failed, even across driver reload.1316* We'll still ignore it in the hwerrmask. We do this partly for1317* diagnostics, but also for support.1318*/1319qib_write_kreg(dd, kr_hwerrclear,1320hwerrs & ~HWE_MASK(PowerOnBISTFailed));13211322hwerrs &= dd->cspec->hwerrmask;13231324/* We log some errors to EEPROM, check if we have any of those. */1325for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)1326if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)1327qib_inc_eeprom_err(dd, log_idx, 1);1328if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |1329RXE_PARITY))1330qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "1331"(cleared)\n", (unsigned long long) hwerrs);13321333if (hwerrs & ~IB_HWE_BITSEXTANT)1334qib_dev_err(dd, "hwerror interrupt with unknown errors "1335"%llx set\n", (unsigned long long)1336(hwerrs & ~IB_HWE_BITSEXTANT));13371338if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)1339qib_sd7220_clr_ibpar(dd);13401341ctrl = qib_read_kreg32(dd, kr_control);1342if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {1343/*1344* Parity errors in send memory are recoverable by h/w1345* just do housekeeping, exit freeze mode and continue.1346*/1347if (hwerrs & (TXEMEMPARITYERR_PIOBUF |1348TXEMEMPARITYERR_PIOPBC)) {1349qib_7220_txe_recover(dd);1350hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |1351TXEMEMPARITYERR_PIOPBC);1352}1353if (hwerrs)1354isfatal = 1;1355else1356qib_7220_clear_freeze(dd);1357}13581359*msg = '\0';13601361if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {1362isfatal = 1;1363strlcat(msg, "[Memory BIST test failed, "1364"InfiniPath hardware unusable]", msgl);1365/* ignore from now on, so disable until driver reloaded */1366dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);1367qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);1368}13691370qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,1371ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);13721373bitsmsg = dd->cspec->bitsmsgbuf;1374if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<1375QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {1376bits = (u32) ((hwerrs >>1377QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &1378QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);1379snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,1380"[PCIe Mem Parity Errs %x] ", bits);1381strlcat(msg, bitsmsg, msgl);1382}13831384#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \1385QLOGIC_IB_HWE_COREPLL_RFSLIP)13861387if (hwerrs & _QIB_PLL_FAIL) {1388isfatal = 1;1389snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,1390"[PLL failed (%llx), InfiniPath hardware unusable]",1391(unsigned long long) hwerrs & _QIB_PLL_FAIL);1392strlcat(msg, bitsmsg, msgl);1393/* ignore from now on, so disable until driver reloaded */1394dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);1395qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);1396}13971398if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {1399/*1400* If it occurs, it is left masked since the eternal1401* interface is unused.1402*/1403dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;1404qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);1405}14061407qib_dev_err(dd, "%s hardware error\n", msg);14081409if (isfatal && !dd->diag_client) {1410qib_dev_err(dd, "Fatal Hardware Error, no longer"1411" usable, SN %.16s\n", dd->serial);1412/*1413* For /sys status file and user programs to print; if no1414* trailing brace is copied, we'll know it was truncated.1415*/1416if (dd->freezemsg)1417snprintf(dd->freezemsg, dd->freezelen,1418"{%s}", msg);1419qib_disable_after_error(dd);1420}1421bail:;1422}14231424/**1425* qib_7220_init_hwerrors - enable hardware errors1426* @dd: the qlogic_ib device1427*1428* now that we have finished initializing everything that might reasonably1429* cause a hardware error, and cleared those errors bits as they occur,1430* we can enable hardware errors in the mask (potentially enabling1431* freeze mode), and enable hardware errors as errors (along with1432* everything else) in errormask1433*/1434static void qib_7220_init_hwerrors(struct qib_devdata *dd)1435{1436u64 val;1437u64 extsval;14381439extsval = qib_read_kreg64(dd, kr_extstatus);14401441if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |1442QLOGIC_IB_EXTS_MEMBIST_DISABLED)))1443qib_dev_err(dd, "MemBIST did not complete!\n");1444if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)1445qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");14461447val = ~0ULL; /* default to all hwerrors become interrupts, */14481449val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;1450dd->cspec->hwerrmask = val;14511452qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));1453qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);14541455/* clear all */1456qib_write_kreg(dd, kr_errclear, ~0ULL);1457/* enable errors that are masked, at least this first time. */1458qib_write_kreg(dd, kr_errmask, ~0ULL);1459dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);1460/* clear any interrupts up to this point (ints still not enabled) */1461qib_write_kreg(dd, kr_intclear, ~0ULL);1462}14631464/*1465* Disable and enable the armlaunch error. Used for PIO bandwidth testing1466* on chips that are count-based, rather than trigger-based. There is no1467* reference counting, but that's also fine, given the intended use.1468* Only chip-specific because it's all register accesses1469*/1470static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)1471{1472if (enable) {1473qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));1474dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);1475} else1476dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);1477qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);1478}14791480/*1481* Formerly took parameter <which> in pre-shifted,1482* pre-merged form with LinkCmd and LinkInitCmd1483* together, and assuming the zero was NOP.1484*/1485static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,1486u16 linitcmd)1487{1488u64 mod_wd;1489struct qib_devdata *dd = ppd->dd;1490unsigned long flags;14911492if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {1493/*1494* If we are told to disable, note that so link-recovery1495* code does not attempt to bring us back up.1496*/1497spin_lock_irqsave(&ppd->lflags_lock, flags);1498ppd->lflags |= QIBL_IB_LINK_DISABLED;1499spin_unlock_irqrestore(&ppd->lflags_lock, flags);1500} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {1501/*1502* Any other linkinitcmd will lead to LINKDOWN and then1503* to INIT (if all is well), so clear flag to let1504* link-recovery code attempt to bring us back up.1505*/1506spin_lock_irqsave(&ppd->lflags_lock, flags);1507ppd->lflags &= ~QIBL_IB_LINK_DISABLED;1508spin_unlock_irqrestore(&ppd->lflags_lock, flags);1509}15101511mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |1512(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);15131514qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);1515/* write to chip to prevent back-to-back writes of ibc reg */1516qib_write_kreg(dd, kr_scratch, 0);1517}15181519/*1520* All detailed interaction with the SerDes has been moved to qib_sd7220.c1521*1522* The portion of IBA7220-specific bringup_serdes() that actually deals with1523* registers and memory within the SerDes itself is qib_sd7220_init().1524*/15251526/**1527* qib_7220_bringup_serdes - bring up the serdes1528* @ppd: physical port on the qlogic_ib device1529*/1530static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)1531{1532struct qib_devdata *dd = ppd->dd;1533u64 val, prev_val, guid, ibc;1534int ret = 0;15351536/* Put IBC in reset, sends disabled */1537dd->control &= ~QLOGIC_IB_C_LINKENABLE;1538qib_write_kreg(dd, kr_control, 0ULL);15391540if (qib_compat_ddr_negotiate) {1541ppd->cpspec->ibdeltainprog = 1;1542ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);1543ppd->cpspec->iblnkerrsnap =1544read_7220_creg32(dd, cr_iblinkerrrecov);1545}15461547/* flowcontrolwatermark is in units of KBytes */1548ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);1549/*1550* How often flowctrl sent. More or less in usecs; balance against1551* watermark value, so that in theory senders always get a flow1552* control update in time to not let the IB link go idle.1553*/1554ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);1555/* max error tolerance */1556ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);1557/* use "real" buffer space for */1558ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);1559/* IB credit flow control. */1560ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);1561/*1562* set initial max size pkt IBC will send, including ICRC; it's the1563* PIO buffer size in dwords, less 1; also see qib_set_mtu()1564*/1565ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);1566ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */15671568/* initially come up waiting for TS1, without sending anything. */1569val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<1570QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);1571qib_write_kreg(dd, kr_ibcctrl, val);15721573if (!ppd->cpspec->ibcddrctrl) {1574/* not on re-init after reset */1575ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);15761577if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))1578ppd->cpspec->ibcddrctrl |=1579IBA7220_IBC_SPEED_AUTONEG_MASK |1580IBA7220_IBC_IBTA_1_2_MASK;1581else1582ppd->cpspec->ibcddrctrl |=1583ppd->link_speed_enabled == QIB_IB_DDR ?1584IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;1585if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==1586(IB_WIDTH_1X | IB_WIDTH_4X))1587ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;1588else1589ppd->cpspec->ibcddrctrl |=1590ppd->link_width_enabled == IB_WIDTH_4X ?1591IBA7220_IBC_WIDTH_4X_ONLY :1592IBA7220_IBC_WIDTH_1X_ONLY;15931594/* always enable these on driver reload, not sticky */1595ppd->cpspec->ibcddrctrl |=1596IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;1597ppd->cpspec->ibcddrctrl |=1598IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;15991600/* enable automatic lane reversal detection for receive */1601ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;1602} else1603/* write to chip to prevent back-to-back writes of ibc reg */1604qib_write_kreg(dd, kr_scratch, 0);16051606qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);1607qib_write_kreg(dd, kr_scratch, 0);16081609qib_write_kreg(dd, kr_ncmodectrl, 0Ull);1610qib_write_kreg(dd, kr_scratch, 0);16111612ret = qib_sd7220_init(dd);16131614val = qib_read_kreg64(dd, kr_xgxs_cfg);1615prev_val = val;1616val |= QLOGIC_IB_XGXS_FC_SAFE;1617if (val != prev_val) {1618qib_write_kreg(dd, kr_xgxs_cfg, val);1619qib_read_kreg32(dd, kr_scratch);1620}1621if (val & QLOGIC_IB_XGXS_RESET)1622val &= ~QLOGIC_IB_XGXS_RESET;1623if (val != prev_val)1624qib_write_kreg(dd, kr_xgxs_cfg, val);16251626/* first time through, set port guid */1627if (!ppd->guid)1628ppd->guid = dd->base_guid;1629guid = be64_to_cpu(ppd->guid);16301631qib_write_kreg(dd, kr_hrtbt_guid, guid);1632if (!ret) {1633dd->control |= QLOGIC_IB_C_LINKENABLE;1634qib_write_kreg(dd, kr_control, dd->control);1635} else1636/* write to chip to prevent back-to-back writes of ibc reg */1637qib_write_kreg(dd, kr_scratch, 0);1638return ret;1639}16401641/**1642* qib_7220_quiet_serdes - set serdes to txidle1643* @ppd: physical port of the qlogic_ib device1644* Called when driver is being unloaded1645*/1646static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)1647{1648u64 val;1649struct qib_devdata *dd = ppd->dd;1650unsigned long flags;16511652/* disable IBC */1653dd->control &= ~QLOGIC_IB_C_LINKENABLE;1654qib_write_kreg(dd, kr_control,1655dd->control | QLOGIC_IB_C_FREEZEMODE);16561657ppd->cpspec->chase_end = 0;1658if (ppd->cpspec->chase_timer.data) /* if initted */1659del_timer_sync(&ppd->cpspec->chase_timer);16601661if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||1662ppd->cpspec->ibdeltainprog) {1663u64 diagc;16641665/* enable counter writes */1666diagc = qib_read_kreg64(dd, kr_hwdiagctrl);1667qib_write_kreg(dd, kr_hwdiagctrl,1668diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));16691670if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {1671val = read_7220_creg32(dd, cr_ibsymbolerr);1672if (ppd->cpspec->ibdeltainprog)1673val -= val - ppd->cpspec->ibsymsnap;1674val -= ppd->cpspec->ibsymdelta;1675write_7220_creg(dd, cr_ibsymbolerr, val);1676}1677if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {1678val = read_7220_creg32(dd, cr_iblinkerrrecov);1679if (ppd->cpspec->ibdeltainprog)1680val -= val - ppd->cpspec->iblnkerrsnap;1681val -= ppd->cpspec->iblnkerrdelta;1682write_7220_creg(dd, cr_iblinkerrrecov, val);1683}16841685/* and disable counter writes */1686qib_write_kreg(dd, kr_hwdiagctrl, diagc);1687}1688qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);16891690spin_lock_irqsave(&ppd->lflags_lock, flags);1691ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;1692spin_unlock_irqrestore(&ppd->lflags_lock, flags);1693wake_up(&ppd->cpspec->autoneg_wait);1694cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);16951696shutdown_7220_relock_poll(ppd->dd);1697val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);1698val |= QLOGIC_IB_XGXS_RESET;1699qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);1700}17011702/**1703* qib_setup_7220_setextled - set the state of the two external LEDs1704* @dd: the qlogic_ib device1705* @on: whether the link is up or not1706*1707* The exact combo of LEDs if on is true is determined by looking1708* at the ibcstatus.1709*1710* These LEDs indicate the physical and logical state of IB link.1711* For this chip (at least with recommended board pinouts), LED11712* is Yellow (logical state) and LED2 is Green (physical state),1713*1714* Note: We try to match the Mellanox HCA LED behavior as best1715* we can. Green indicates physical link state is OK (something is1716* plugged in, and we can train).1717* Amber indicates the link is logically up (ACTIVE).1718* Mellanox further blinks the amber LED to indicate data packet1719* activity, but we have no hardware support for that, so it would1720* require waking up every 10-20 msecs and checking the counters1721* on the chip, and then turning the LED off if appropriate. That's1722* visible overhead, so not something we will do.1723*1724*/1725static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)1726{1727struct qib_devdata *dd = ppd->dd;1728u64 extctl, ledblink = 0, val, lst, ltst;1729unsigned long flags;17301731/*1732* The diags use the LED to indicate diag info, so we leave1733* the external LED alone when the diags are running.1734*/1735if (dd->diag_client)1736return;17371738if (ppd->led_override) {1739ltst = (ppd->led_override & QIB_LED_PHYS) ?1740IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,1741lst = (ppd->led_override & QIB_LED_LOG) ?1742IB_PORT_ACTIVE : IB_PORT_DOWN;1743} else if (on) {1744val = qib_read_kreg64(dd, kr_ibcstatus);1745ltst = qib_7220_phys_portstate(val);1746lst = qib_7220_iblink_state(val);1747} else {1748ltst = 0;1749lst = 0;1750}17511752spin_lock_irqsave(&dd->cspec->gpio_lock, flags);1753extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |1754SYM_MASK(EXTCtrl, LEDPriPortYellowOn));1755if (ltst == IB_PHYSPORTSTATE_LINKUP) {1756extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);1757/*1758* counts are in chip clock (4ns) periods.1759* This is 1/16 sec (66.6ms) on,1760* 3/16 sec (187.5 ms) off, with packets rcvd1761*/1762ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)1763| ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);1764}1765if (lst == IB_PORT_ACTIVE)1766extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);1767dd->cspec->extctrl = extctl;1768qib_write_kreg(dd, kr_extctrl, extctl);1769spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);17701771if (ledblink) /* blink the LED on packet receive */1772qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);1773}17741775static void qib_7220_free_irq(struct qib_devdata *dd)1776{1777if (dd->cspec->irq) {1778free_irq(dd->cspec->irq, dd);1779dd->cspec->irq = 0;1780}1781qib_nomsi(dd);1782}17831784/*1785* qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff1786* @dd: the qlogic_ib device1787*1788* This is called during driver unload.1789*1790*/1791static void qib_setup_7220_cleanup(struct qib_devdata *dd)1792{1793qib_7220_free_irq(dd);1794kfree(dd->cspec->cntrs);1795kfree(dd->cspec->portcntrs);1796}17971798/*1799* This is only called for SDmaInt.1800* SDmaDisabled is handled on the error path.1801*/1802static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)1803{1804unsigned long flags;18051806spin_lock_irqsave(&ppd->sdma_lock, flags);18071808switch (ppd->sdma_state.current_state) {1809case qib_sdma_state_s00_hw_down:1810break;18111812case qib_sdma_state_s10_hw_start_up_wait:1813__qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);1814break;18151816case qib_sdma_state_s20_idle:1817break;18181819case qib_sdma_state_s30_sw_clean_up_wait:1820break;18211822case qib_sdma_state_s40_hw_clean_up_wait:1823break;18241825case qib_sdma_state_s50_hw_halt_wait:1826__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);1827break;18281829case qib_sdma_state_s99_running:1830/* too chatty to print here */1831__qib_sdma_intr(ppd);1832break;1833}1834spin_unlock_irqrestore(&ppd->sdma_lock, flags);1835}18361837static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)1838{1839unsigned long flags;18401841spin_lock_irqsave(&dd->sendctrl_lock, flags);1842if (needint) {1843if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))1844goto done;1845/*1846* blip the availupd off, next write will be on, so1847* we ensure an avail update, regardless of threshold or1848* buffers becoming free, whenever we want an interrupt1849*/1850qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &1851~SYM_MASK(SendCtrl, SendBufAvailUpd));1852qib_write_kreg(dd, kr_scratch, 0ULL);1853dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);1854} else1855dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);1856qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);1857qib_write_kreg(dd, kr_scratch, 0ULL);1858done:1859spin_unlock_irqrestore(&dd->sendctrl_lock, flags);1860}18611862/*1863* Handle errors and unusual events first, separate function1864* to improve cache hits for fast path interrupt handling.1865*/1866static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)1867{1868if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))1869qib_dev_err(dd,1870"interrupt with unknown interrupts %Lx set\n",1871istat & ~QLOGIC_IB_I_BITSEXTANT);18721873if (istat & QLOGIC_IB_I_GPIO) {1874u32 gpiostatus;18751876/*1877* Boards for this chip currently don't use GPIO interrupts,1878* so clear by writing GPIOstatus to GPIOclear, and complain1879* to alert developer. To avoid endless repeats, clear1880* the bits in the mask, since there is some kind of1881* programming error or chip problem.1882*/1883gpiostatus = qib_read_kreg32(dd, kr_gpio_status);1884/*1885* In theory, writing GPIOstatus to GPIOclear could1886* have a bad side-effect on some diagnostic that wanted1887* to poll for a status-change, but the various shadows1888* make that problematic at best. Diags will just suppress1889* all GPIO interrupts during such tests.1890*/1891qib_write_kreg(dd, kr_gpio_clear, gpiostatus);18921893if (gpiostatus) {1894const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);1895u32 gpio_irq = mask & gpiostatus;18961897/*1898* A bit set in status and (chip) Mask register1899* would cause an interrupt. Since we are not1900* expecting any, report it. Also check that the1901* chip reflects our shadow, report issues,1902* and refresh from the shadow.1903*/1904/*1905* Clear any troublemakers, and update chip1906* from shadow1907*/1908dd->cspec->gpio_mask &= ~gpio_irq;1909qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);1910}1911}19121913if (istat & QLOGIC_IB_I_ERROR) {1914u64 estat;19151916qib_stats.sps_errints++;1917estat = qib_read_kreg64(dd, kr_errstatus);1918if (!estat)1919qib_devinfo(dd->pcidev, "error interrupt (%Lx), "1920"but no error bits set!\n", istat);1921else1922handle_7220_errors(dd, estat);1923}1924}19251926static irqreturn_t qib_7220intr(int irq, void *data)1927{1928struct qib_devdata *dd = data;1929irqreturn_t ret;1930u64 istat;1931u64 ctxtrbits;1932u64 rmask;1933unsigned i;19341935if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {1936/*1937* This return value is not great, but we do not want the1938* interrupt core code to remove our interrupt handler1939* because we don't appear to be handling an interrupt1940* during a chip reset.1941*/1942ret = IRQ_HANDLED;1943goto bail;1944}19451946istat = qib_read_kreg64(dd, kr_intstatus);19471948if (unlikely(!istat)) {1949ret = IRQ_NONE; /* not our interrupt, or already handled */1950goto bail;1951}1952if (unlikely(istat == -1)) {1953qib_bad_intrstatus(dd);1954/* don't know if it was our interrupt or not */1955ret = IRQ_NONE;1956goto bail;1957}19581959qib_stats.sps_ints++;1960if (dd->int_counter != (u32) -1)1961dd->int_counter++;19621963if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |1964QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))1965unlikely_7220_intr(dd, istat);19661967/*1968* Clear the interrupt bits we found set, relatively early, so we1969* "know" know the chip will have seen this by the time we process1970* the queue, and will re-interrupt if necessary. The processor1971* itself won't take the interrupt again until we return.1972*/1973qib_write_kreg(dd, kr_intclear, istat);19741975/*1976* Handle kernel receive queues before checking for pio buffers1977* available since receives can overflow; piobuf waiters can afford1978* a few extra cycles, since they were waiting anyway.1979*/1980ctxtrbits = istat &1981((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |1982(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));1983if (ctxtrbits) {1984rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |1985(1ULL << QLOGIC_IB_I_RCVURG_SHIFT);1986for (i = 0; i < dd->first_user_ctxt; i++) {1987if (ctxtrbits & rmask) {1988ctxtrbits &= ~rmask;1989qib_kreceive(dd->rcd[i], NULL, NULL);1990}1991rmask <<= 1;1992}1993if (ctxtrbits) {1994ctxtrbits =1995(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |1996(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);1997qib_handle_urcv(dd, ctxtrbits);1998}1999}20002001/* only call for SDmaInt */2002if (istat & QLOGIC_IB_I_SDMAINT)2003sdma_7220_intr(dd->pport, istat);20042005if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))2006qib_ib_piobufavail(dd);20072008ret = IRQ_HANDLED;2009bail:2010return ret;2011}20122013/*2014* Set up our chip-specific interrupt handler.2015* The interrupt type has already been setup, so2016* we just need to do the registration and error checking.2017* If we are using MSI interrupts, we may fall back to2018* INTx later, if the interrupt handler doesn't get called2019* within 1/2 second (see verify_interrupt()).2020*/2021static void qib_setup_7220_interrupt(struct qib_devdata *dd)2022{2023if (!dd->cspec->irq)2024qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "2025"work\n");2026else {2027int ret = request_irq(dd->cspec->irq, qib_7220intr,2028dd->msi_lo ? 0 : IRQF_SHARED,2029QIB_DRV_NAME, dd);20302031if (ret)2032qib_dev_err(dd, "Couldn't setup %s interrupt "2033"(irq=%d): %d\n", dd->msi_lo ?2034"MSI" : "INTx", dd->cspec->irq, ret);2035}2036}20372038/**2039* qib_7220_boardname - fill in the board name2040* @dd: the qlogic_ib device2041*2042* info is based on the board revision register2043*/2044static void qib_7220_boardname(struct qib_devdata *dd)2045{2046char *n;2047u32 boardid, namelen;20482049boardid = SYM_FIELD(dd->revision, Revision,2050BoardID);20512052switch (boardid) {2053case 1:2054n = "InfiniPath_QLE7240";2055break;2056case 2:2057n = "InfiniPath_QLE7280";2058break;2059default:2060qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);2061n = "Unknown_InfiniPath_7220";2062break;2063}20642065namelen = strlen(n) + 1;2066dd->boardname = kmalloc(namelen, GFP_KERNEL);2067if (!dd->boardname)2068qib_dev_err(dd, "Failed allocation for board name: %s\n", n);2069else2070snprintf(dd->boardname, namelen, "%s", n);20712072if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)2073qib_dev_err(dd, "Unsupported InfiniPath hardware "2074"revision %u.%u!\n",2075dd->majrev, dd->minrev);20762077snprintf(dd->boardversion, sizeof(dd->boardversion),2078"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",2079QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,2080(unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),2081dd->majrev, dd->minrev,2082(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));2083}20842085/*2086* This routine sleeps, so it can only be called from user context, not2087* from interrupt context.2088*/2089static int qib_setup_7220_reset(struct qib_devdata *dd)2090{2091u64 val;2092int i;2093int ret;2094u16 cmdval;2095u8 int_line, clinesz;2096unsigned long flags;20972098qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);20992100/* Use dev_err so it shows up in logs, etc. */2101qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);21022103/* no interrupts till re-initted */2104qib_7220_set_intr_state(dd, 0);21052106dd->pport->cpspec->ibdeltainprog = 0;2107dd->pport->cpspec->ibsymdelta = 0;2108dd->pport->cpspec->iblnkerrdelta = 0;21092110/*2111* Keep chip from being accessed until we are ready. Use2112* writeq() directly, to allow the write even though QIB_PRESENT2113* isn't set.2114*/2115dd->flags &= ~(QIB_INITTED | QIB_PRESENT);2116dd->int_counter = 0; /* so we check interrupts work again */2117val = dd->control | QLOGIC_IB_C_RESET;2118writeq(val, &dd->kregbase[kr_control]);2119mb(); /* prevent compiler reordering around actual reset */21202121for (i = 1; i <= 5; i++) {2122/*2123* Allow MBIST, etc. to complete; longer on each retry.2124* We sometimes get machine checks from bus timeout if no2125* response, so for now, make it *really* long.2126*/2127msleep(1000 + (1 + i) * 2000);21282129qib_pcie_reenable(dd, cmdval, int_line, clinesz);21302131/*2132* Use readq directly, so we don't need to mark it as PRESENT2133* until we get a successful indication that all is well.2134*/2135val = readq(&dd->kregbase[kr_revision]);2136if (val == dd->revision) {2137dd->flags |= QIB_PRESENT; /* it's back */2138ret = qib_reinit_intr(dd);2139goto bail;2140}2141}2142ret = 0; /* failed */21432144bail:2145if (ret) {2146if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))2147qib_dev_err(dd, "Reset failed to setup PCIe or "2148"interrupts; continuing anyway\n");21492150/* hold IBC in reset, no sends, etc till later */2151qib_write_kreg(dd, kr_control, 0ULL);21522153/* clear the reset error, init error/hwerror mask */2154qib_7220_init_hwerrors(dd);21552156/* do setup similar to speed or link-width changes */2157if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)2158dd->cspec->presets_needed = 1;2159spin_lock_irqsave(&dd->pport->lflags_lock, flags);2160dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;2161dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;2162spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);2163}21642165return ret;2166}21672168/**2169* qib_7220_put_tid - write a TID to the chip2170* @dd: the qlogic_ib device2171* @tidptr: pointer to the expected TID (in chip) to update2172* @tidtype: 0 for eager, 1 for expected2173* @pa: physical address of in memory buffer; tidinvalid if freeing2174*/2175static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,2176u32 type, unsigned long pa)2177{2178if (pa != dd->tidinvalid) {2179u64 chippa = pa >> IBA7220_TID_PA_SHIFT;21802181/* paranoia checks */2182if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {2183qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",2184pa);2185return;2186}2187if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {2188qib_dev_err(dd, "Physical page address 0x%lx "2189"larger than supported\n", pa);2190return;2191}21922193if (type == RCVHQ_RCV_TYPE_EAGER)2194chippa |= dd->tidtemplate;2195else /* for now, always full 4KB page */2196chippa |= IBA7220_TID_SZ_4K;2197pa = chippa;2198}2199writeq(pa, tidptr);2200mmiowb();2201}22022203/**2204* qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager2205* @dd: the qlogic_ib device2206* @ctxt: the ctxt2207*2208* clear all TID entries for a ctxt, expected and eager.2209* Used from qib_close(). On this chip, TIDs are only 32 bits,2210* not 64, but they are still on 64 bit boundaries, so tidbase2211* is declared as u64 * for the pointer math, even though we write 32 bits2212*/2213static void qib_7220_clear_tids(struct qib_devdata *dd,2214struct qib_ctxtdata *rcd)2215{2216u64 __iomem *tidbase;2217unsigned long tidinv;2218u32 ctxt;2219int i;22202221if (!dd->kregbase || !rcd)2222return;22232224ctxt = rcd->ctxt;22252226tidinv = dd->tidinvalid;2227tidbase = (u64 __iomem *)2228((char __iomem *)(dd->kregbase) +2229dd->rcvtidbase +2230ctxt * dd->rcvtidcnt * sizeof(*tidbase));22312232for (i = 0; i < dd->rcvtidcnt; i++)2233qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,2234tidinv);22352236tidbase = (u64 __iomem *)2237((char __iomem *)(dd->kregbase) +2238dd->rcvegrbase +2239rcd->rcvegr_tid_base * sizeof(*tidbase));22402241for (i = 0; i < rcd->rcvegrcnt; i++)2242qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,2243tidinv);2244}22452246/**2247* qib_7220_tidtemplate - setup constants for TID updates2248* @dd: the qlogic_ib device2249*2250* We setup stuff that we use a lot, to avoid calculating each time2251*/2252static void qib_7220_tidtemplate(struct qib_devdata *dd)2253{2254if (dd->rcvegrbufsize == 2048)2255dd->tidtemplate = IBA7220_TID_SZ_2K;2256else if (dd->rcvegrbufsize == 4096)2257dd->tidtemplate = IBA7220_TID_SZ_4K;2258dd->tidinvalid = 0;2259}22602261/**2262* qib_init_7220_get_base_info - set chip-specific flags for user code2263* @rcd: the qlogic_ib ctxt2264* @kbase: qib_base_info pointer2265*2266* We set the PCIE flag because the lower bandwidth on PCIe vs2267* HyperTransport can affect some user packet algorithims.2268*/2269static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,2270struct qib_base_info *kinfo)2271{2272kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |2273QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;22742275if (rcd->dd->flags & QIB_USE_SPCL_TRIG)2276kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;22772278return 0;2279}22802281static struct qib_message_header *2282qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)2283{2284u32 offset = qib_hdrget_offset(rhf_addr);22852286return (struct qib_message_header *)2287(rhf_addr - dd->rhf_offset + offset);2288}22892290static void qib_7220_config_ctxts(struct qib_devdata *dd)2291{2292unsigned long flags;2293u32 nchipctxts;22942295nchipctxts = qib_read_kreg32(dd, kr_portcnt);2296dd->cspec->numctxts = nchipctxts;2297if (qib_n_krcv_queues > 1) {2298dd->qpn_mask = 0x3e;2299dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;2300if (dd->first_user_ctxt > nchipctxts)2301dd->first_user_ctxt = nchipctxts;2302} else2303dd->first_user_ctxt = dd->num_pports;2304dd->n_krcv_queues = dd->first_user_ctxt;23052306if (!qib_cfgctxts) {2307int nctxts = dd->first_user_ctxt + num_online_cpus();23082309if (nctxts <= 5)2310dd->ctxtcnt = 5;2311else if (nctxts <= 9)2312dd->ctxtcnt = 9;2313else if (nctxts <= nchipctxts)2314dd->ctxtcnt = nchipctxts;2315} else if (qib_cfgctxts <= nchipctxts)2316dd->ctxtcnt = qib_cfgctxts;2317if (!dd->ctxtcnt) /* none of the above, set to max */2318dd->ctxtcnt = nchipctxts;23192320/*2321* Chip can be configured for 5, 9, or 17 ctxts, and choice2322* affects number of eager TIDs per ctxt (1K, 2K, 4K).2323* Lock to be paranoid about later motion, etc.2324*/2325spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);2326if (dd->ctxtcnt > 9)2327dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;2328else if (dd->ctxtcnt > 5)2329dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;2330/* else configure for default 5 receive ctxts */2331if (dd->qpn_mask)2332dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;2333qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);2334spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);23352336/* kr_rcvegrcnt changes based on the number of contexts enabled */2337dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);2338dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);2339}23402341static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)2342{2343int lsb, ret = 0;2344u64 maskr; /* right-justified mask */23452346switch (which) {2347case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */2348ret = ppd->link_width_enabled;2349goto done;23502351case QIB_IB_CFG_LWID: /* Get currently active Link-width */2352ret = ppd->link_width_active;2353goto done;23542355case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */2356ret = ppd->link_speed_enabled;2357goto done;23582359case QIB_IB_CFG_SPD: /* Get current Link spd */2360ret = ppd->link_speed_active;2361goto done;23622363case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */2364lsb = IBA7220_IBC_RXPOL_SHIFT;2365maskr = IBA7220_IBC_RXPOL_MASK;2366break;23672368case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */2369lsb = IBA7220_IBC_LREV_SHIFT;2370maskr = IBA7220_IBC_LREV_MASK;2371break;23722373case QIB_IB_CFG_LINKLATENCY:2374ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)2375& IBA7220_DDRSTAT_LINKLAT_MASK;2376goto done;23772378case QIB_IB_CFG_OP_VLS:2379ret = ppd->vls_operational;2380goto done;23812382case QIB_IB_CFG_VL_HIGH_CAP:2383ret = 0;2384goto done;23852386case QIB_IB_CFG_VL_LOW_CAP:2387ret = 0;2388goto done;23892390case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */2391ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,2392OverrunThreshold);2393goto done;23942395case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */2396ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,2397PhyerrThreshold);2398goto done;23992400case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */2401/* will only take effect when the link state changes */2402ret = (ppd->cpspec->ibcctrl &2403SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?2404IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;2405goto done;24062407case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */2408lsb = IBA7220_IBC_HRTBT_SHIFT;2409maskr = IBA7220_IBC_HRTBT_MASK;2410break;24112412case QIB_IB_CFG_PMA_TICKS:2413/*2414* 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs2415* Since the clock is always 250MHz, the value is 1 or 0.2416*/2417ret = (ppd->link_speed_active == QIB_IB_DDR);2418goto done;24192420default:2421ret = -EINVAL;2422goto done;2423}2424ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);2425done:2426return ret;2427}24282429static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)2430{2431struct qib_devdata *dd = ppd->dd;2432u64 maskr; /* right-justified mask */2433int lsb, ret = 0, setforce = 0;2434u16 lcmd, licmd;2435unsigned long flags;24362437switch (which) {2438case QIB_IB_CFG_LIDLMC:2439/*2440* Set LID and LMC. Combined to avoid possible hazard2441* caller puts LMC in 16MSbits, DLID in 16LSbits of val2442*/2443lsb = IBA7220_IBC_DLIDLMC_SHIFT;2444maskr = IBA7220_IBC_DLIDLMC_MASK;2445break;24462447case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */2448/*2449* As with speed, only write the actual register if2450* the link is currently down, otherwise takes effect2451* on next link change.2452*/2453ppd->link_width_enabled = val;2454if (!(ppd->lflags & QIBL_LINKDOWN))2455goto bail;2456/*2457* We set the QIBL_IB_FORCE_NOTIFY bit so updown2458* will get called because we want update2459* link_width_active, and the change may not take2460* effect for some time (if we are in POLL), so this2461* flag will force the updown routine to be called2462* on the next ibstatuschange down interrupt, even2463* if it's not an down->up transition.2464*/2465val--; /* convert from IB to chip */2466maskr = IBA7220_IBC_WIDTH_MASK;2467lsb = IBA7220_IBC_WIDTH_SHIFT;2468setforce = 1;2469spin_lock_irqsave(&ppd->lflags_lock, flags);2470ppd->lflags |= QIBL_IB_FORCE_NOTIFY;2471spin_unlock_irqrestore(&ppd->lflags_lock, flags);2472break;24732474case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */2475/*2476* If we turn off IB1.2, need to preset SerDes defaults,2477* but not right now. Set a flag for the next time2478* we command the link down. As with width, only write the2479* actual register if the link is currently down, otherwise2480* takes effect on next link change. Since setting is being2481* explicitly requested (via MAD or sysfs), clear autoneg2482* failure status if speed autoneg is enabled.2483*/2484ppd->link_speed_enabled = val;2485if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&2486!(val & (val - 1)))2487dd->cspec->presets_needed = 1;2488if (!(ppd->lflags & QIBL_LINKDOWN))2489goto bail;2490/*2491* We set the QIBL_IB_FORCE_NOTIFY bit so updown2492* will get called because we want update2493* link_speed_active, and the change may not take2494* effect for some time (if we are in POLL), so this2495* flag will force the updown routine to be called2496* on the next ibstatuschange down interrupt, even2497* if it's not an down->up transition.2498*/2499if (val == (QIB_IB_SDR | QIB_IB_DDR)) {2500val = IBA7220_IBC_SPEED_AUTONEG_MASK |2501IBA7220_IBC_IBTA_1_2_MASK;2502spin_lock_irqsave(&ppd->lflags_lock, flags);2503ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;2504spin_unlock_irqrestore(&ppd->lflags_lock, flags);2505} else2506val = val == QIB_IB_DDR ?2507IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;2508maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |2509IBA7220_IBC_IBTA_1_2_MASK;2510/* IBTA 1.2 mode + speed bits are contiguous */2511lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);2512setforce = 1;2513break;25142515case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */2516lsb = IBA7220_IBC_RXPOL_SHIFT;2517maskr = IBA7220_IBC_RXPOL_MASK;2518break;25192520case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */2521lsb = IBA7220_IBC_LREV_SHIFT;2522maskr = IBA7220_IBC_LREV_MASK;2523break;25242525case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */2526maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,2527OverrunThreshold);2528if (maskr != val) {2529ppd->cpspec->ibcctrl &=2530~SYM_MASK(IBCCtrl, OverrunThreshold);2531ppd->cpspec->ibcctrl |= (u64) val <<2532SYM_LSB(IBCCtrl, OverrunThreshold);2533qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);2534qib_write_kreg(dd, kr_scratch, 0);2535}2536goto bail;25372538case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */2539maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,2540PhyerrThreshold);2541if (maskr != val) {2542ppd->cpspec->ibcctrl &=2543~SYM_MASK(IBCCtrl, PhyerrThreshold);2544ppd->cpspec->ibcctrl |= (u64) val <<2545SYM_LSB(IBCCtrl, PhyerrThreshold);2546qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);2547qib_write_kreg(dd, kr_scratch, 0);2548}2549goto bail;25502551case QIB_IB_CFG_PKEYS: /* update pkeys */2552maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |2553((u64) ppd->pkeys[2] << 32) |2554((u64) ppd->pkeys[3] << 48);2555qib_write_kreg(dd, kr_partitionkey, maskr);2556goto bail;25572558case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */2559/* will only take effect when the link state changes */2560if (val == IB_LINKINITCMD_POLL)2561ppd->cpspec->ibcctrl &=2562~SYM_MASK(IBCCtrl, LinkDownDefaultState);2563else /* SLEEP */2564ppd->cpspec->ibcctrl |=2565SYM_MASK(IBCCtrl, LinkDownDefaultState);2566qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);2567qib_write_kreg(dd, kr_scratch, 0);2568goto bail;25692570case QIB_IB_CFG_MTU: /* update the MTU in IBC */2571/*2572* Update our housekeeping variables, and set IBC max2573* size, same as init code; max IBC is max we allow in2574* buffer, less the qword pbc, plus 1 for ICRC, in dwords2575* Set even if it's unchanged, print debug message only2576* on changes.2577*/2578val = (ppd->ibmaxlen >> 2) + 1;2579ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);2580ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);2581qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);2582qib_write_kreg(dd, kr_scratch, 0);2583goto bail;25842585case QIB_IB_CFG_LSTATE: /* set the IB link state */2586switch (val & 0xffff0000) {2587case IB_LINKCMD_DOWN:2588lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;2589if (!ppd->cpspec->ibdeltainprog &&2590qib_compat_ddr_negotiate) {2591ppd->cpspec->ibdeltainprog = 1;2592ppd->cpspec->ibsymsnap =2593read_7220_creg32(dd, cr_ibsymbolerr);2594ppd->cpspec->iblnkerrsnap =2595read_7220_creg32(dd, cr_iblinkerrrecov);2596}2597break;25982599case IB_LINKCMD_ARMED:2600lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;2601break;26022603case IB_LINKCMD_ACTIVE:2604lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;2605break;26062607default:2608ret = -EINVAL;2609qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);2610goto bail;2611}2612switch (val & 0xffff) {2613case IB_LINKINITCMD_NOP:2614licmd = 0;2615break;26162617case IB_LINKINITCMD_POLL:2618licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;2619break;26202621case IB_LINKINITCMD_SLEEP:2622licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;2623break;26242625case IB_LINKINITCMD_DISABLE:2626licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;2627ppd->cpspec->chase_end = 0;2628/*2629* stop state chase counter and timer, if running.2630* wait forpending timer, but don't clear .data (ppd)!2631*/2632if (ppd->cpspec->chase_timer.expires) {2633del_timer_sync(&ppd->cpspec->chase_timer);2634ppd->cpspec->chase_timer.expires = 0;2635}2636break;26372638default:2639ret = -EINVAL;2640qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",2641val & 0xffff);2642goto bail;2643}2644qib_set_ib_7220_lstate(ppd, lcmd, licmd);2645goto bail;26462647case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */2648if (val > IBA7220_IBC_HRTBT_MASK) {2649ret = -EINVAL;2650goto bail;2651}2652lsb = IBA7220_IBC_HRTBT_SHIFT;2653maskr = IBA7220_IBC_HRTBT_MASK;2654break;26552656default:2657ret = -EINVAL;2658goto bail;2659}2660ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);2661ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);2662qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);2663qib_write_kreg(dd, kr_scratch, 0);2664if (setforce) {2665spin_lock_irqsave(&ppd->lflags_lock, flags);2666ppd->lflags |= QIBL_IB_FORCE_NOTIFY;2667spin_unlock_irqrestore(&ppd->lflags_lock, flags);2668}2669bail:2670return ret;2671}26722673static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)2674{2675int ret = 0;2676u64 val, ddr;26772678if (!strncmp(what, "ibc", 3)) {2679ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);2680val = 0; /* disable heart beat, so link will come up */2681qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",2682ppd->dd->unit, ppd->port);2683} else if (!strncmp(what, "off", 3)) {2684ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);2685/* enable heart beat again */2686val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;2687qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "2688"(normal)\n", ppd->dd->unit, ppd->port);2689} else2690ret = -EINVAL;2691if (!ret) {2692qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);2693ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK2694<< IBA7220_IBC_HRTBT_SHIFT);2695ppd->cpspec->ibcddrctrl = ddr | val;2696qib_write_kreg(ppd->dd, kr_ibcddrctrl,2697ppd->cpspec->ibcddrctrl);2698qib_write_kreg(ppd->dd, kr_scratch, 0);2699}2700return ret;2701}27022703static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,2704u32 updegr, u32 egrhd, u32 npkts)2705{2706qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);2707if (updegr)2708qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);2709}27102711static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)2712{2713u32 head, tail;27142715head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);2716if (rcd->rcvhdrtail_kvaddr)2717tail = qib_get_rcvhdrtail(rcd);2718else2719tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);2720return head == tail;2721}27222723/*2724* Modify the RCVCTRL register in chip-specific way. This2725* is a function because bit positions and (future) register2726* location is chip-specifc, but the needed operations are2727* generic. <op> is a bit-mask because we often want to2728* do multiple modifications.2729*/2730static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,2731int ctxt)2732{2733struct qib_devdata *dd = ppd->dd;2734u64 mask, val;2735unsigned long flags;27362737spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);2738if (op & QIB_RCVCTRL_TAILUPD_ENB)2739dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);2740if (op & QIB_RCVCTRL_TAILUPD_DIS)2741dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);2742if (op & QIB_RCVCTRL_PKEY_ENB)2743dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);2744if (op & QIB_RCVCTRL_PKEY_DIS)2745dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);2746if (ctxt < 0)2747mask = (1ULL << dd->ctxtcnt) - 1;2748else2749mask = (1ULL << ctxt);2750if (op & QIB_RCVCTRL_CTXT_ENB) {2751/* always done for specific ctxt */2752dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));2753if (!(dd->flags & QIB_NODMA_RTAIL))2754dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;2755/* Write these registers before the context is enabled. */2756qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,2757dd->rcd[ctxt]->rcvhdrqtailaddr_phys);2758qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,2759dd->rcd[ctxt]->rcvhdrq_phys);2760dd->rcd[ctxt]->seq_cnt = 1;2761}2762if (op & QIB_RCVCTRL_CTXT_DIS)2763dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));2764if (op & QIB_RCVCTRL_INTRAVAIL_ENB)2765dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);2766if (op & QIB_RCVCTRL_INTRAVAIL_DIS)2767dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);2768qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);2769if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {2770/* arm rcv interrupt */2771val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |2772dd->rhdrhead_intr_off;2773qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);2774}2775if (op & QIB_RCVCTRL_CTXT_ENB) {2776/*2777* Init the context registers also; if we were2778* disabled, tail and head should both be zero2779* already from the enable, but since we don't2780* know, we have to do it explicitly.2781*/2782val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);2783qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);27842785val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);2786dd->rcd[ctxt]->head = val;2787/* If kctxt, interrupt on next receive. */2788if (ctxt < dd->first_user_ctxt)2789val |= dd->rhdrhead_intr_off;2790qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);2791}2792if (op & QIB_RCVCTRL_CTXT_DIS) {2793if (ctxt >= 0) {2794qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);2795qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);2796} else {2797unsigned i;27982799for (i = 0; i < dd->cfgctxts; i++) {2800qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,2801i, 0);2802qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);2803}2804}2805}2806spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);2807}28082809/*2810* Modify the SENDCTRL register in chip-specific way. This2811* is a function there may be multiple such registers with2812* slightly different layouts. To start, we assume the2813* "canonical" register layout of the first chips.2814* Chip requires no back-back sendctrl writes, so write2815* scratch register after writing sendctrl2816*/2817static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)2818{2819struct qib_devdata *dd = ppd->dd;2820u64 tmp_dd_sendctrl;2821unsigned long flags;28222823spin_lock_irqsave(&dd->sendctrl_lock, flags);28242825/* First the ones that are "sticky", saved in shadow */2826if (op & QIB_SENDCTRL_CLEAR)2827dd->sendctrl = 0;2828if (op & QIB_SENDCTRL_SEND_DIS)2829dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);2830else if (op & QIB_SENDCTRL_SEND_ENB) {2831dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);2832if (dd->flags & QIB_USE_SPCL_TRIG)2833dd->sendctrl |= SYM_MASK(SendCtrl,2834SSpecialTriggerEn);2835}2836if (op & QIB_SENDCTRL_AVAIL_DIS)2837dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);2838else if (op & QIB_SENDCTRL_AVAIL_ENB)2839dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);28402841if (op & QIB_SENDCTRL_DISARM_ALL) {2842u32 i, last;28432844tmp_dd_sendctrl = dd->sendctrl;2845/*2846* disarm any that are not yet launched, disabling sends2847* and updates until done.2848*/2849last = dd->piobcnt2k + dd->piobcnt4k;2850tmp_dd_sendctrl &=2851~(SYM_MASK(SendCtrl, SPioEnable) |2852SYM_MASK(SendCtrl, SendBufAvailUpd));2853for (i = 0; i < last; i++) {2854qib_write_kreg(dd, kr_sendctrl,2855tmp_dd_sendctrl |2856SYM_MASK(SendCtrl, Disarm) | i);2857qib_write_kreg(dd, kr_scratch, 0);2858}2859}28602861tmp_dd_sendctrl = dd->sendctrl;28622863if (op & QIB_SENDCTRL_FLUSH)2864tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);2865if (op & QIB_SENDCTRL_DISARM)2866tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |2867((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<2868SYM_LSB(SendCtrl, DisarmPIOBuf));2869if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&2870(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))2871tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);28722873qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);2874qib_write_kreg(dd, kr_scratch, 0);28752876if (op & QIB_SENDCTRL_AVAIL_BLIP) {2877qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);2878qib_write_kreg(dd, kr_scratch, 0);2879}28802881spin_unlock_irqrestore(&dd->sendctrl_lock, flags);28822883if (op & QIB_SENDCTRL_FLUSH) {2884u32 v;2885/*2886* ensure writes have hit chip, then do a few2887* more reads, to allow DMA of pioavail registers2888* to occur, so in-memory copy is in sync with2889* the chip. Not always safe to sleep.2890*/2891v = qib_read_kreg32(dd, kr_scratch);2892qib_write_kreg(dd, kr_scratch, v);2893v = qib_read_kreg32(dd, kr_scratch);2894qib_write_kreg(dd, kr_scratch, v);2895qib_read_kreg32(dd, kr_scratch);2896}2897}28982899/**2900* qib_portcntr_7220 - read a per-port counter2901* @dd: the qlogic_ib device2902* @creg: the counter to snapshot2903*/2904static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)2905{2906u64 ret = 0ULL;2907struct qib_devdata *dd = ppd->dd;2908u16 creg;2909/* 0xffff for unimplemented or synthesized counters */2910static const u16 xlator[] = {2911[QIBPORTCNTR_PKTSEND] = cr_pktsend,2912[QIBPORTCNTR_WORDSEND] = cr_wordsend,2913[QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,2914[QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,2915[QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,2916[QIBPORTCNTR_SENDSTALL] = cr_sendstall,2917[QIBPORTCNTR_PKTRCV] = cr_pktrcv,2918[QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,2919[QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,2920[QIBPORTCNTR_RCVEBP] = cr_rcvebp,2921[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,2922[QIBPORTCNTR_WORDRCV] = cr_wordrcv,2923[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,2924[QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,2925[QIBPORTCNTR_RXVLERR] = cr_rxvlerr,2926[QIBPORTCNTR_ERRICRC] = cr_erricrc,2927[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,2928[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,2929[QIBPORTCNTR_BADFORMAT] = cr_badformat,2930[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,2931[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,2932[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,2933[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,2934[QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,2935[QIBPORTCNTR_ERRLINK] = cr_errlink,2936[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,2937[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,2938[QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,2939[QIBPORTCNTR_PSINTERVAL] = cr_psinterval,2940[QIBPORTCNTR_PSSTART] = cr_psstart,2941[QIBPORTCNTR_PSSTAT] = cr_psstat,2942[QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,2943[QIBPORTCNTR_ERRPKEY] = cr_errpkey,2944[QIBPORTCNTR_KHDROVFL] = 0xffff,2945};29462947if (reg >= ARRAY_SIZE(xlator)) {2948qib_devinfo(ppd->dd->pcidev,2949"Unimplemented portcounter %u\n", reg);2950goto done;2951}2952creg = xlator[reg];29532954if (reg == QIBPORTCNTR_KHDROVFL) {2955int i;29562957/* sum over all kernel contexts */2958for (i = 0; i < dd->first_user_ctxt; i++)2959ret += read_7220_creg32(dd, cr_portovfl + i);2960}2961if (creg == 0xffff)2962goto done;29632964/*2965* only fast incrementing counters are 64bit; use 32 bit reads to2966* avoid two independent reads when on opteron2967*/2968if ((creg == cr_wordsend || creg == cr_wordrcv ||2969creg == cr_pktsend || creg == cr_pktrcv))2970ret = read_7220_creg(dd, creg);2971else2972ret = read_7220_creg32(dd, creg);2973if (creg == cr_ibsymbolerr) {2974if (dd->pport->cpspec->ibdeltainprog)2975ret -= ret - ppd->cpspec->ibsymsnap;2976ret -= dd->pport->cpspec->ibsymdelta;2977} else if (creg == cr_iblinkerrrecov) {2978if (dd->pport->cpspec->ibdeltainprog)2979ret -= ret - ppd->cpspec->iblnkerrsnap;2980ret -= dd->pport->cpspec->iblnkerrdelta;2981}2982done:2983return ret;2984}29852986/*2987* Device counter names (not port-specific), one line per stat,2988* single string. Used by utilities like ipathstats to print the stats2989* in a way which works for different versions of drivers, without changing2990* the utility. Names need to be 12 chars or less (w/o newline), for proper2991* display by utility.2992* Non-error counters are first.2993* Start of "error" conters is indicated by a leading "E " on the first2994* "error" counter, and doesn't count in label length.2995* The EgrOvfl list needs to be last so we truncate them at the configured2996* context count for the device.2997* cntr7220indices contains the corresponding register indices.2998*/2999static const char cntr7220names[] =3000"Interrupts\n"3001"HostBusStall\n"3002"E RxTIDFull\n"3003"RxTIDInvalid\n"3004"Ctxt0EgrOvfl\n"3005"Ctxt1EgrOvfl\n"3006"Ctxt2EgrOvfl\n"3007"Ctxt3EgrOvfl\n"3008"Ctxt4EgrOvfl\n"3009"Ctxt5EgrOvfl\n"3010"Ctxt6EgrOvfl\n"3011"Ctxt7EgrOvfl\n"3012"Ctxt8EgrOvfl\n"3013"Ctxt9EgrOvfl\n"3014"Ctx10EgrOvfl\n"3015"Ctx11EgrOvfl\n"3016"Ctx12EgrOvfl\n"3017"Ctx13EgrOvfl\n"3018"Ctx14EgrOvfl\n"3019"Ctx15EgrOvfl\n"3020"Ctx16EgrOvfl\n";30213022static const size_t cntr7220indices[] = {3023cr_lbint,3024cr_lbflowstall,3025cr_errtidfull,3026cr_errtidvalid,3027cr_portovfl + 0,3028cr_portovfl + 1,3029cr_portovfl + 2,3030cr_portovfl + 3,3031cr_portovfl + 4,3032cr_portovfl + 5,3033cr_portovfl + 6,3034cr_portovfl + 7,3035cr_portovfl + 8,3036cr_portovfl + 9,3037cr_portovfl + 10,3038cr_portovfl + 11,3039cr_portovfl + 12,3040cr_portovfl + 13,3041cr_portovfl + 14,3042cr_portovfl + 15,3043cr_portovfl + 16,3044};30453046/*3047* same as cntr7220names and cntr7220indices, but for port-specific counters.3048* portcntr7220indices is somewhat complicated by some registers needing3049* adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG3050*/3051static const char portcntr7220names[] =3052"TxPkt\n"3053"TxFlowPkt\n"3054"TxWords\n"3055"RxPkt\n"3056"RxFlowPkt\n"3057"RxWords\n"3058"TxFlowStall\n"3059"TxDmaDesc\n" /* 7220 and 7322-only */3060"E RxDlidFltr\n" /* 7220 and 7322-only */3061"IBStatusChng\n"3062"IBLinkDown\n"3063"IBLnkRecov\n"3064"IBRxLinkErr\n"3065"IBSymbolErr\n"3066"RxLLIErr\n"3067"RxBadFormat\n"3068"RxBadLen\n"3069"RxBufOvrfl\n"3070"RxEBP\n"3071"RxFlowCtlErr\n"3072"RxICRCerr\n"3073"RxLPCRCerr\n"3074"RxVCRCerr\n"3075"RxInvalLen\n"3076"RxInvalPKey\n"3077"RxPktDropped\n"3078"TxBadLength\n"3079"TxDropped\n"3080"TxInvalLen\n"3081"TxUnderrun\n"3082"TxUnsupVL\n"3083"RxLclPhyErr\n" /* 7220 and 7322-only */3084"RxVL15Drop\n" /* 7220 and 7322-only */3085"RxVlErr\n" /* 7220 and 7322-only */3086"XcessBufOvfl\n" /* 7220 and 7322-only */3087;30883089#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */3090static const size_t portcntr7220indices[] = {3091QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,3092cr_pktsendflow,3093QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,3094QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,3095cr_pktrcvflowctrl,3096QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,3097QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,3098cr_txsdmadesc,3099cr_rxdlidfltr,3100cr_ibstatuschange,3101QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,3102QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,3103QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,3104QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,3105QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,3106QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,3107QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,3108QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,3109QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,3110cr_rcvflowctrl_err,3111QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,3112QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,3113QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,3114QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,3115QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,3116QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,3117cr_invalidslen,3118cr_senddropped,3119cr_errslen,3120cr_sendunderrun,3121cr_txunsupvl,3122QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,3123QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,3124QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,3125QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,3126};31273128/* do all the setup to make the counter reads efficient later */3129static void init_7220_cntrnames(struct qib_devdata *dd)3130{3131int i, j = 0;3132char *s;31333134for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;3135i++) {3136/* we always have at least one counter before the egrovfl */3137if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))3138j = 1;3139s = strchr(s + 1, '\n');3140if (s && j)3141j++;3142}3143dd->cspec->ncntrs = i;3144if (!s)3145/* full list; size is without terminating null */3146dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;3147else3148dd->cspec->cntrnamelen = 1 + s - cntr7220names;3149dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs3150* sizeof(u64), GFP_KERNEL);3151if (!dd->cspec->cntrs)3152qib_dev_err(dd, "Failed allocation for counters\n");31533154for (i = 0, s = (char *)portcntr7220names; s; i++)3155s = strchr(s + 1, '\n');3156dd->cspec->nportcntrs = i - 1;3157dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;3158dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs3159* sizeof(u64), GFP_KERNEL);3160if (!dd->cspec->portcntrs)3161qib_dev_err(dd, "Failed allocation for portcounters\n");3162}31633164static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,3165u64 **cntrp)3166{3167u32 ret;31683169if (!dd->cspec->cntrs) {3170ret = 0;3171goto done;3172}31733174if (namep) {3175*namep = (char *)cntr7220names;3176ret = dd->cspec->cntrnamelen;3177if (pos >= ret)3178ret = 0; /* final read after getting everything */3179} else {3180u64 *cntr = dd->cspec->cntrs;3181int i;31823183ret = dd->cspec->ncntrs * sizeof(u64);3184if (!cntr || pos >= ret) {3185/* everything read, or couldn't get memory */3186ret = 0;3187goto done;3188}31893190*cntrp = cntr;3191for (i = 0; i < dd->cspec->ncntrs; i++)3192*cntr++ = read_7220_creg32(dd, cntr7220indices[i]);3193}3194done:3195return ret;3196}31973198static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,3199char **namep, u64 **cntrp)3200{3201u32 ret;32023203if (!dd->cspec->portcntrs) {3204ret = 0;3205goto done;3206}3207if (namep) {3208*namep = (char *)portcntr7220names;3209ret = dd->cspec->portcntrnamelen;3210if (pos >= ret)3211ret = 0; /* final read after getting everything */3212} else {3213u64 *cntr = dd->cspec->portcntrs;3214struct qib_pportdata *ppd = &dd->pport[port];3215int i;32163217ret = dd->cspec->nportcntrs * sizeof(u64);3218if (!cntr || pos >= ret) {3219/* everything read, or couldn't get memory */3220ret = 0;3221goto done;3222}3223*cntrp = cntr;3224for (i = 0; i < dd->cspec->nportcntrs; i++) {3225if (portcntr7220indices[i] & _PORT_VIRT_FLAG)3226*cntr++ = qib_portcntr_7220(ppd,3227portcntr7220indices[i] &3228~_PORT_VIRT_FLAG);3229else3230*cntr++ = read_7220_creg32(dd,3231portcntr7220indices[i]);3232}3233}3234done:3235return ret;3236}32373238/**3239* qib_get_7220_faststats - get word counters from chip before they overflow3240* @opaque - contains a pointer to the qlogic_ib device qib_devdata3241*3242* This needs more work; in particular, decision on whether we really3243* need traffic_wds done the way it is3244* called from add_timer3245*/3246static void qib_get_7220_faststats(unsigned long opaque)3247{3248struct qib_devdata *dd = (struct qib_devdata *) opaque;3249struct qib_pportdata *ppd = dd->pport;3250unsigned long flags;3251u64 traffic_wds;32523253/*3254* don't access the chip while running diags, or memory diags can3255* fail3256*/3257if (!(dd->flags & QIB_INITTED) || dd->diag_client)3258/* but re-arm the timer, for diags case; won't hurt other */3259goto done;32603261/*3262* We now try to maintain an activity timer, based on traffic3263* exceeding a threshold, so we need to check the word-counts3264* even if they are 64-bit.3265*/3266traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +3267qib_portcntr_7220(ppd, cr_wordrcv);3268spin_lock_irqsave(&dd->eep_st_lock, flags);3269traffic_wds -= dd->traffic_wds;3270dd->traffic_wds += traffic_wds;3271if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)3272atomic_add(5, &dd->active_time); /* S/B #define */3273spin_unlock_irqrestore(&dd->eep_st_lock, flags);3274done:3275mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);3276}32773278/*3279* If we are using MSI, try to fallback to INTx.3280*/3281static int qib_7220_intr_fallback(struct qib_devdata *dd)3282{3283if (!dd->msi_lo)3284return 0;32853286qib_devinfo(dd->pcidev, "MSI interrupt not detected,"3287" trying INTx interrupts\n");3288qib_7220_free_irq(dd);3289qib_enable_intx(dd->pcidev);3290/*3291* Some newer kernels require free_irq before disable_msi,3292* and irq can be changed during disable and INTx enable3293* and we need to therefore use the pcidev->irq value,3294* not our saved MSI value.3295*/3296dd->cspec->irq = dd->pcidev->irq;3297qib_setup_7220_interrupt(dd);3298return 1;3299}33003301/*3302* Reset the XGXS (between serdes and IBC). Slightly less intrusive3303* than resetting the IBC or external link state, and useful in some3304* cases to cause some retraining. To do this right, we reset IBC3305* as well.3306*/3307static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)3308{3309u64 val, prev_val;3310struct qib_devdata *dd = ppd->dd;33113312prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);3313val = prev_val | QLOGIC_IB_XGXS_RESET;3314prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */3315qib_write_kreg(dd, kr_control,3316dd->control & ~QLOGIC_IB_C_LINKENABLE);3317qib_write_kreg(dd, kr_xgxs_cfg, val);3318qib_read_kreg32(dd, kr_scratch);3319qib_write_kreg(dd, kr_xgxs_cfg, prev_val);3320qib_write_kreg(dd, kr_control, dd->control);3321}33223323/*3324* For this chip, we want to use the same buffer every time3325* when we are trying to bring the link up (they are always VL153326* packets). At that link state the packet should always go out immediately3327* (or at least be discarded at the tx interface if the link is down).3328* If it doesn't, and the buffer isn't available, that means some other3329* sender has gotten ahead of us, and is preventing our packet from going3330* out. In that case, we flush all packets, and try again. If that still3331* fails, we fail the request, and hope things work the next time around.3332*3333* We don't need very complicated heuristics on whether the packet had3334* time to go out or not, since even at SDR 1X, it goes out in very short3335* time periods, covered by the chip reads done here and as part of the3336* flush.3337*/3338static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)3339{3340u32 __iomem *buf;3341u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;3342int do_cleanup;3343unsigned long flags;33443345/*3346* always blip to get avail list updated, since it's almost3347* always needed, and is fairly cheap.3348*/3349sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);3350qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */3351buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);3352if (buf)3353goto done;33543355spin_lock_irqsave(&ppd->sdma_lock, flags);3356if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&3357ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {3358__qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);3359do_cleanup = 0;3360} else {3361do_cleanup = 1;3362qib_7220_sdma_hw_clean_up(ppd);3363}3364spin_unlock_irqrestore(&ppd->sdma_lock, flags);33653366if (do_cleanup) {3367qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */3368buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);3369}3370done:3371return buf;3372}33733374/*3375* This code for non-IBTA-compliant IB speed negotiation is only known to3376* work for the SDR to DDR transition, and only between an HCA and a switch3377* with recent firmware. It is based on observed heuristics, rather than3378* actual knowledge of the non-compliant speed negotiation.3379* It has a number of hard-coded fields, since the hope is to rewrite this3380* when a spec is available on how the negoation is intended to work.3381*/3382static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,3383u32 dcnt, u32 *data)3384{3385int i;3386u64 pbc;3387u32 __iomem *piobuf;3388u32 pnum;3389struct qib_devdata *dd = ppd->dd;33903391i = 0;3392pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */3393pbc |= PBC_7220_VL15_SEND;3394while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {3395if (i++ > 5)3396return;3397udelay(2);3398}3399sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));3400writeq(pbc, piobuf);3401qib_flush_wc();3402qib_pio_copy(piobuf + 2, hdr, 7);3403qib_pio_copy(piobuf + 9, data, dcnt);3404if (dd->flags & QIB_USE_SPCL_TRIG) {3405u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;34063407qib_flush_wc();3408__raw_writel(0xaebecede, piobuf + spcl_off);3409}3410qib_flush_wc();3411qib_sendbuf_done(dd, pnum);3412}34133414/*3415* _start packet gets sent twice at start, _done gets sent twice at end3416*/3417static void autoneg_7220_send(struct qib_pportdata *ppd, int which)3418{3419struct qib_devdata *dd = ppd->dd;3420static u32 swapped;3421u32 dw, i, hcnt, dcnt, *data;3422static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };3423static u32 madpayload_start[0x40] = {34240x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,34250xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,34260x1, 0x1388, 0x15e, 0x1, /* rest 0's */3427};3428static u32 madpayload_done[0x40] = {34290x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,34300xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,34310x40000001, 0x1388, 0x15e, /* rest 0's */3432};34333434dcnt = ARRAY_SIZE(madpayload_start);3435hcnt = ARRAY_SIZE(hdr);3436if (!swapped) {3437/* for maintainability, do it at runtime */3438for (i = 0; i < hcnt; i++) {3439dw = (__force u32) cpu_to_be32(hdr[i]);3440hdr[i] = dw;3441}3442for (i = 0; i < dcnt; i++) {3443dw = (__force u32) cpu_to_be32(madpayload_start[i]);3444madpayload_start[i] = dw;3445dw = (__force u32) cpu_to_be32(madpayload_done[i]);3446madpayload_done[i] = dw;3447}3448swapped = 1;3449}34503451data = which ? madpayload_done : madpayload_start;34523453autoneg_7220_sendpkt(ppd, hdr, dcnt, data);3454qib_read_kreg64(dd, kr_scratch);3455udelay(2);3456autoneg_7220_sendpkt(ppd, hdr, dcnt, data);3457qib_read_kreg64(dd, kr_scratch);3458udelay(2);3459}34603461/*3462* Do the absolute minimum to cause an IB speed change, and make it3463* ready, but don't actually trigger the change. The caller will3464* do that when ready (if link is in Polling training state, it will3465* happen immediately, otherwise when link next goes down)3466*3467* This routine should only be used as part of the DDR autonegotation3468* code for devices that are not compliant with IB 1.2 (or code that3469* fixes things up for same).3470*3471* When link has gone down, and autoneg enabled, or autoneg has3472* failed and we give up until next time we set both speeds, and3473* then we want IBTA enabled as well as "use max enabled speed.3474*/3475static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)3476{3477ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |3478IBA7220_IBC_IBTA_1_2_MASK);34793480if (speed == (QIB_IB_SDR | QIB_IB_DDR))3481ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |3482IBA7220_IBC_IBTA_1_2_MASK;3483else3484ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?3485IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;34863487qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);3488qib_write_kreg(ppd->dd, kr_scratch, 0);3489}34903491/*3492* This routine is only used when we are not talking to another3493* IB 1.2-compliant device that we think can do DDR.3494* (This includes all existing switch chips as of Oct 2007.)3495* 1.2-compliant devices go directly to DDR prior to reaching INIT3496*/3497static void try_7220_autoneg(struct qib_pportdata *ppd)3498{3499unsigned long flags;35003501/*3502* Required for older non-IB1.2 DDR switches. Newer3503* non-IB-compliant switches don't need it, but so far,3504* aren't bothered by it either. "Magic constant"3505*/3506qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);35073508spin_lock_irqsave(&ppd->lflags_lock, flags);3509ppd->lflags |= QIBL_IB_AUTONEG_INPROG;3510spin_unlock_irqrestore(&ppd->lflags_lock, flags);3511autoneg_7220_send(ppd, 0);3512set_7220_ibspeed_fast(ppd, QIB_IB_DDR);35133514toggle_7220_rclkrls(ppd->dd);3515/* 2 msec is minimum length of a poll cycle */3516queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,3517msecs_to_jiffies(2));3518}35193520/*3521* Handle the empirically determined mechanism for auto-negotiation3522* of DDR speed with switches.3523*/3524static void autoneg_7220_work(struct work_struct *work)3525{3526struct qib_pportdata *ppd;3527struct qib_devdata *dd;3528u64 startms;3529u32 i;3530unsigned long flags;35313532ppd = &container_of(work, struct qib_chippport_specific,3533autoneg_work.work)->pportdata;3534dd = ppd->dd;35353536startms = jiffies_to_msecs(jiffies);35373538/*3539* Busy wait for this first part, it should be at most a3540* few hundred usec, since we scheduled ourselves for 2msec.3541*/3542for (i = 0; i < 25; i++) {3543if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)3544== IB_7220_LT_STATE_POLLQUIET) {3545qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);3546break;3547}3548udelay(100);3549}35503551if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))3552goto done; /* we got there early or told to stop */35533554/* we expect this to timeout */3555if (wait_event_timeout(ppd->cpspec->autoneg_wait,3556!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),3557msecs_to_jiffies(90)))3558goto done;35593560toggle_7220_rclkrls(dd);35613562/* we expect this to timeout */3563if (wait_event_timeout(ppd->cpspec->autoneg_wait,3564!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),3565msecs_to_jiffies(1700)))3566goto done;35673568set_7220_ibspeed_fast(ppd, QIB_IB_SDR);3569toggle_7220_rclkrls(dd);35703571/*3572* Wait up to 250 msec for link to train and get to INIT at DDR;3573* this should terminate early.3574*/3575wait_event_timeout(ppd->cpspec->autoneg_wait,3576!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),3577msecs_to_jiffies(250));3578done:3579if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {3580spin_lock_irqsave(&ppd->lflags_lock, flags);3581ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;3582if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {3583ppd->lflags |= QIBL_IB_AUTONEG_FAILED;3584dd->cspec->autoneg_tries = 0;3585}3586spin_unlock_irqrestore(&ppd->lflags_lock, flags);3587set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);3588}3589}35903591static u32 qib_7220_iblink_state(u64 ibcs)3592{3593u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);35943595switch (state) {3596case IB_7220_L_STATE_INIT:3597state = IB_PORT_INIT;3598break;3599case IB_7220_L_STATE_ARM:3600state = IB_PORT_ARMED;3601break;3602case IB_7220_L_STATE_ACTIVE:3603/* fall through */3604case IB_7220_L_STATE_ACT_DEFER:3605state = IB_PORT_ACTIVE;3606break;3607default: /* fall through */3608case IB_7220_L_STATE_DOWN:3609state = IB_PORT_DOWN;3610break;3611}3612return state;3613}36143615/* returns the IBTA port state, rather than the IBC link training state */3616static u8 qib_7220_phys_portstate(u64 ibcs)3617{3618u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);3619return qib_7220_physportstate[state];3620}36213622static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)3623{3624int ret = 0, symadj = 0;3625struct qib_devdata *dd = ppd->dd;3626unsigned long flags;36273628spin_lock_irqsave(&ppd->lflags_lock, flags);3629ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;3630spin_unlock_irqrestore(&ppd->lflags_lock, flags);36313632if (!ibup) {3633/*3634* When the link goes down we don't want AEQ running, so it3635* won't interfere with IBC training, etc., and we need3636* to go back to the static SerDes preset values.3637*/3638if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |3639QIBL_IB_AUTONEG_INPROG)))3640set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);3641if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {3642qib_sd7220_presets(dd);3643qib_cancel_sends(ppd); /* initial disarm, etc. */3644spin_lock_irqsave(&ppd->sdma_lock, flags);3645if (__qib_sdma_running(ppd))3646__qib_sdma_process_event(ppd,3647qib_sdma_event_e70_go_idle);3648spin_unlock_irqrestore(&ppd->sdma_lock, flags);3649}3650/* this might better in qib_sd7220_presets() */3651set_7220_relock_poll(dd, ibup);3652} else {3653if (qib_compat_ddr_negotiate &&3654!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |3655QIBL_IB_AUTONEG_INPROG)) &&3656ppd->link_speed_active == QIB_IB_SDR &&3657(ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==3658(QIB_IB_DDR | QIB_IB_SDR) &&3659dd->cspec->autoneg_tries < AUTONEG_TRIES) {3660/* we are SDR, and DDR auto-negotiation enabled */3661++dd->cspec->autoneg_tries;3662if (!ppd->cpspec->ibdeltainprog) {3663ppd->cpspec->ibdeltainprog = 1;3664ppd->cpspec->ibsymsnap = read_7220_creg32(dd,3665cr_ibsymbolerr);3666ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,3667cr_iblinkerrrecov);3668}3669try_7220_autoneg(ppd);3670ret = 1; /* no other IB status change processing */3671} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&3672ppd->link_speed_active == QIB_IB_SDR) {3673autoneg_7220_send(ppd, 1);3674set_7220_ibspeed_fast(ppd, QIB_IB_DDR);3675udelay(2);3676toggle_7220_rclkrls(dd);3677ret = 1; /* no other IB status change processing */3678} else {3679if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&3680(ppd->link_speed_active & QIB_IB_DDR)) {3681spin_lock_irqsave(&ppd->lflags_lock, flags);3682ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |3683QIBL_IB_AUTONEG_FAILED);3684spin_unlock_irqrestore(&ppd->lflags_lock,3685flags);3686dd->cspec->autoneg_tries = 0;3687/* re-enable SDR, for next link down */3688set_7220_ibspeed_fast(ppd,3689ppd->link_speed_enabled);3690wake_up(&ppd->cpspec->autoneg_wait);3691symadj = 1;3692} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {3693/*3694* Clear autoneg failure flag, and do setup3695* so we'll try next time link goes down and3696* back to INIT (possibly connected to a3697* different device).3698*/3699spin_lock_irqsave(&ppd->lflags_lock, flags);3700ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;3701spin_unlock_irqrestore(&ppd->lflags_lock,3702flags);3703ppd->cpspec->ibcddrctrl |=3704IBA7220_IBC_IBTA_1_2_MASK;3705qib_write_kreg(dd, kr_ncmodectrl, 0);3706symadj = 1;3707}3708}37093710if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))3711symadj = 1;37123713if (!ret) {3714ppd->delay_mult = rate_to_delay3715[(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]3716[(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];37173718set_7220_relock_poll(dd, ibup);3719spin_lock_irqsave(&ppd->sdma_lock, flags);3720/*3721* Unlike 7322, the 7220 needs this, due to lack of3722* interrupt in some cases when we have sdma active3723* when the link goes down.3724*/3725if (ppd->sdma_state.current_state !=3726qib_sdma_state_s20_idle)3727__qib_sdma_process_event(ppd,3728qib_sdma_event_e00_go_hw_down);3729spin_unlock_irqrestore(&ppd->sdma_lock, flags);3730}3731}37323733if (symadj) {3734if (ppd->cpspec->ibdeltainprog) {3735ppd->cpspec->ibdeltainprog = 0;3736ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,3737cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;3738ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,3739cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;3740}3741} else if (!ibup && qib_compat_ddr_negotiate &&3742!ppd->cpspec->ibdeltainprog &&3743!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {3744ppd->cpspec->ibdeltainprog = 1;3745ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,3746cr_ibsymbolerr);3747ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,3748cr_iblinkerrrecov);3749}37503751if (!ret)3752qib_setup_7220_setextled(ppd, ibup);3753return ret;3754}37553756/*3757* Does read/modify/write to appropriate registers to3758* set output and direction bits selected by mask.3759* these are in their canonical postions (e.g. lsb of3760* dir will end up in D48 of extctrl on existing chips).3761* returns contents of GP Inputs.3762*/3763static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)3764{3765u64 read_val, new_out;3766unsigned long flags;37673768if (mask) {3769/* some bits being written, lock access to GPIO */3770dir &= mask;3771out &= mask;3772spin_lock_irqsave(&dd->cspec->gpio_lock, flags);3773dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));3774dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));3775new_out = (dd->cspec->gpio_out & ~mask) | out;37763777qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);3778qib_write_kreg(dd, kr_gpio_out, new_out);3779dd->cspec->gpio_out = new_out;3780spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);3781}3782/*3783* It is unlikely that a read at this time would get valid3784* data on a pin whose direction line was set in the same3785* call to this function. We include the read here because3786* that allows us to potentially combine a change on one pin with3787* a read on another, and because the old code did something like3788* this.3789*/3790read_val = qib_read_kreg64(dd, kr_extstatus);3791return SYM_FIELD(read_val, EXTStatus, GPIOIn);3792}37933794/*3795* Read fundamental info we need to use the chip. These are3796* the registers that describe chip capabilities, and are3797* saved in shadow registers.3798*/3799static void get_7220_chip_params(struct qib_devdata *dd)3800{3801u64 val;3802u32 piobufs;3803int mtu;38043805dd->uregbase = qib_read_kreg32(dd, kr_userregbase);38063807dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);3808dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);3809dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);3810dd->palign = qib_read_kreg32(dd, kr_palign);3811dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);3812dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;38133814val = qib_read_kreg64(dd, kr_sendpiosize);3815dd->piosize2k = val & ~0U;3816dd->piosize4k = val >> 32;38173818mtu = ib_mtu_enum_to_int(qib_ibmtu);3819if (mtu == -1)3820mtu = QIB_DEFAULT_MTU;3821dd->pport->ibmtu = (u32)mtu;38223823val = qib_read_kreg64(dd, kr_sendpiobufcnt);3824dd->piobcnt2k = val & ~0U;3825dd->piobcnt4k = val >> 32;3826/* these may be adjusted in init_chip_wc_pat() */3827dd->pio2kbase = (u32 __iomem *)3828((char __iomem *) dd->kregbase + dd->pio2k_bufbase);3829if (dd->piobcnt4k) {3830dd->pio4kbase = (u32 __iomem *)3831((char __iomem *) dd->kregbase +3832(dd->piobufbase >> 32));3833/*3834* 4K buffers take 2 pages; we use roundup just to be3835* paranoid; we calculate it once here, rather than on3836* ever buf allocate3837*/3838dd->align4k = ALIGN(dd->piosize4k, dd->palign);3839}38403841piobufs = dd->piobcnt4k + dd->piobcnt2k;38423843dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /3844(sizeof(u64) * BITS_PER_BYTE / 2);3845}38463847/*3848* The chip base addresses in cspec and cpspec have to be set3849* after possible init_chip_wc_pat(), rather than in3850* qib_get_7220_chip_params(), so split out as separate function3851*/3852static void set_7220_baseaddrs(struct qib_devdata *dd)3853{3854u32 cregbase;3855/* init after possible re-map in init_chip_wc_pat() */3856cregbase = qib_read_kreg32(dd, kr_counterregbase);3857dd->cspec->cregbase = (u64 __iomem *)3858((char __iomem *) dd->kregbase + cregbase);38593860dd->egrtidbase = (u64 __iomem *)3861((char __iomem *) dd->kregbase + dd->rcvegrbase);3862}386338643865#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \3866SYM_MASK(SendCtrl, SPioEnable) | \3867SYM_MASK(SendCtrl, SSpecialTriggerEn) | \3868SYM_MASK(SendCtrl, SendBufAvailUpd) | \3869SYM_MASK(SendCtrl, AvailUpdThld) | \3870SYM_MASK(SendCtrl, SDmaEnable) | \3871SYM_MASK(SendCtrl, SDmaIntEnable) | \3872SYM_MASK(SendCtrl, SDmaHalt) | \3873SYM_MASK(SendCtrl, SDmaSingleDescriptor))38743875static int sendctrl_hook(struct qib_devdata *dd,3876const struct diag_observer *op,3877u32 offs, u64 *data, u64 mask, int only_32)3878{3879unsigned long flags;3880unsigned idx = offs / sizeof(u64);3881u64 local_data, all_bits;38823883if (idx != kr_sendctrl) {3884qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",3885offs, only_32 ? "32" : "64");3886return 0;3887}38883889all_bits = ~0ULL;3890if (only_32)3891all_bits >>= 32;3892spin_lock_irqsave(&dd->sendctrl_lock, flags);3893if ((mask & all_bits) != all_bits) {3894/*3895* At least some mask bits are zero, so we need3896* to read. The judgement call is whether from3897* reg or shadow. First-cut: read reg, and complain3898* if any bits which should be shadowed are different3899* from their shadowed value.3900*/3901if (only_32)3902local_data = (u64)qib_read_kreg32(dd, idx);3903else3904local_data = qib_read_kreg64(dd, idx);3905qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",3906(u32)local_data, (u32)dd->sendctrl);3907if ((local_data & SENDCTRL_SHADOWED) !=3908(dd->sendctrl & SENDCTRL_SHADOWED))3909qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",3910(u32)local_data, (u32) dd->sendctrl);3911*data = (local_data & ~mask) | (*data & mask);3912}3913if (mask) {3914/*3915* At least some mask bits are one, so we need3916* to write, but only shadow some bits.3917*/3918u64 sval, tval; /* Shadowed, transient */39193920/*3921* New shadow val is bits we don't want to touch,3922* ORed with bits we do, that are intended for shadow.3923*/3924sval = (dd->sendctrl & ~mask);3925sval |= *data & SENDCTRL_SHADOWED & mask;3926dd->sendctrl = sval;3927tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);3928qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",3929(u32)tval, (u32)sval);3930qib_write_kreg(dd, kr_sendctrl, tval);3931qib_write_kreg(dd, kr_scratch, 0Ull);3932}3933spin_unlock_irqrestore(&dd->sendctrl_lock, flags);39343935return only_32 ? 4 : 8;3936}39373938static const struct diag_observer sendctrl_observer = {3939sendctrl_hook, kr_sendctrl * sizeof(u64),3940kr_sendctrl * sizeof(u64)3941};39423943/*3944* write the final few registers that depend on some of the3945* init setup. Done late in init, just before bringing up3946* the serdes.3947*/3948static int qib_late_7220_initreg(struct qib_devdata *dd)3949{3950int ret = 0;3951u64 val;39523953qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);3954qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);3955qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);3956qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);3957val = qib_read_kreg64(dd, kr_sendpioavailaddr);3958if (val != dd->pioavailregs_phys) {3959qib_dev_err(dd, "Catastrophic software error, "3960"SendPIOAvailAddr written as %lx, "3961"read back as %llx\n",3962(unsigned long) dd->pioavailregs_phys,3963(unsigned long long) val);3964ret = -EINVAL;3965}3966qib_register_observer(dd, &sendctrl_observer);3967return ret;3968}39693970static int qib_init_7220_variables(struct qib_devdata *dd)3971{3972struct qib_chippport_specific *cpspec;3973struct qib_pportdata *ppd;3974int ret = 0;3975u32 sbufs, updthresh;39763977cpspec = (struct qib_chippport_specific *)(dd + 1);3978ppd = &cpspec->pportdata;3979dd->pport = ppd;3980dd->num_pports = 1;39813982dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);3983ppd->cpspec = cpspec;39843985spin_lock_init(&dd->cspec->sdepb_lock);3986spin_lock_init(&dd->cspec->rcvmod_lock);3987spin_lock_init(&dd->cspec->gpio_lock);39883989/* we haven't yet set QIB_PRESENT, so use read directly */3990dd->revision = readq(&dd->kregbase[kr_revision]);39913992if ((dd->revision & 0xffffffffU) == 0xffffffffU) {3993qib_dev_err(dd, "Revision register read failure, "3994"giving up initialization\n");3995ret = -ENODEV;3996goto bail;3997}3998dd->flags |= QIB_PRESENT; /* now register routines work */39994000dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,4001ChipRevMajor);4002dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,4003ChipRevMinor);40044005get_7220_chip_params(dd);4006qib_7220_boardname(dd);40074008/*4009* GPIO bits for TWSI data and clock,4010* used for serial EEPROM.4011*/4012dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;4013dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;4014dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;40154016dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |4017QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;4018dd->flags |= qib_special_trigger ?4019QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;40204021/*4022* EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.4023* 2 is Some Misc, 3 is reserved for future.4024*/4025dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);40264027dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);40284029dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);40304031init_waitqueue_head(&cpspec->autoneg_wait);4032INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);40334034qib_init_pportdata(ppd, dd, 0, 1);4035ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;4036ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;40374038ppd->link_width_enabled = ppd->link_width_supported;4039ppd->link_speed_enabled = ppd->link_speed_supported;4040/*4041* Set the initial values to reasonable default, will be set4042* for real when link is up.4043*/4044ppd->link_width_active = IB_WIDTH_4X;4045ppd->link_speed_active = QIB_IB_SDR;4046ppd->delay_mult = rate_to_delay[0][1];4047ppd->vls_supported = IB_VL_VL0;4048ppd->vls_operational = ppd->vls_supported;40494050if (!qib_mini_init)4051qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);40524053init_timer(&ppd->cpspec->chase_timer);4054ppd->cpspec->chase_timer.function = reenable_7220_chase;4055ppd->cpspec->chase_timer.data = (unsigned long)ppd;40564057qib_num_cfg_vls = 1; /* if any 7220's, only one VL */40584059dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;4060dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;4061dd->rhf_offset =4062dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);40634064/* we always allocate at least 2048 bytes for eager buffers */4065ret = ib_mtu_enum_to_int(qib_ibmtu);4066dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;40674068qib_7220_tidtemplate(dd);40694070/*4071* We can request a receive interrupt for 1 or4072* more packets from current offset. For now, we set this4073* up for a single packet.4074*/4075dd->rhdrhead_intr_off = 1ULL << 32;40764077/* setup the stats timer; the add_timer is done at end of init */4078init_timer(&dd->stats_timer);4079dd->stats_timer.function = qib_get_7220_faststats;4080dd->stats_timer.data = (unsigned long) dd;4081dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;40824083/*4084* Control[4] has been added to change the arbitration within4085* the SDMA engine between favoring data fetches over descriptor4086* fetches. qib_sdma_fetch_arb==0 gives data fetches priority.4087*/4088if (qib_sdma_fetch_arb)4089dd->control |= 1 << 4;40904091dd->ureg_align = 0x10000; /* 64KB alignment */40924093dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;4094qib_7220_config_ctxts(dd);4095qib_set_ctxtcnt(dd); /* needed for PAT setup */40964097if (qib_wc_pat) {4098ret = init_chip_wc_pat(dd, 0);4099if (ret)4100goto bail;4101}4102set_7220_baseaddrs(dd); /* set chip access pointers now */41034104ret = 0;4105if (qib_mini_init)4106goto bail;41074108ret = qib_create_ctxts(dd);4109init_7220_cntrnames(dd);41104111/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.4112* reserve the update threshold amount for other kernel use, such4113* as sending SMI, MAD, and ACKs, or 3, whichever is greater,4114* unless we aren't enabling SDMA, in which case we want to use4115* all the 4k bufs for the kernel.4116* if this was less than the update threshold, we could wait4117* a long time for an update. Coded this way because we4118* sometimes change the update threshold for various reasons,4119* and we want this to remain robust.4120*/4121updthresh = 8U; /* update threshold */4122if (dd->flags & QIB_HAS_SEND_DMA) {4123dd->cspec->sdmabufcnt = dd->piobcnt4k;4124sbufs = updthresh > 3 ? updthresh : 3;4125} else {4126dd->cspec->sdmabufcnt = 0;4127sbufs = dd->piobcnt4k;4128}41294130dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -4131dd->cspec->sdmabufcnt;4132dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;4133dd->cspec->lastbuf_for_pio--; /* range is <= , not < */4134dd->pbufsctxt = dd->lastctxt_piobuf /4135(dd->cfgctxts - dd->first_user_ctxt);41364137/*4138* if we are at 16 user contexts, we will have one 7 sbufs4139* per context, so drop the update threshold to match. We4140* want to update before we actually run out, at low pbufs/ctxt4141* so give ourselves some margin4142*/4143if ((dd->pbufsctxt - 2) < updthresh)4144updthresh = dd->pbufsctxt - 2;41454146dd->cspec->updthresh_dflt = updthresh;4147dd->cspec->updthresh = updthresh;41484149/* before full enable, no interrupts, no locking needed */4150dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))4151<< SYM_LSB(SendCtrl, AvailUpdThld);41524153dd->psxmitwait_supported = 1;4154dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;4155bail:4156return ret;4157}41584159static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,4160u32 *pbufnum)4161{4162u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;4163struct qib_devdata *dd = ppd->dd;4164u32 __iomem *buf;41654166if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&4167!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))4168buf = get_7220_link_buf(ppd, pbufnum);4169else {4170if ((plen + 1) > dd->piosize2kmax_dwords)4171first = dd->piobcnt2k;4172else4173first = 0;4174/* try 4k if all 2k busy, so same last for both sizes */4175last = dd->cspec->lastbuf_for_pio;4176buf = qib_getsendbuf_range(dd, pbufnum, first, last);4177}4178return buf;4179}41804181/* these 2 "counters" are really control registers, and are always RW */4182static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,4183u32 start)4184{4185write_7220_creg(ppd->dd, cr_psinterval, intv);4186write_7220_creg(ppd->dd, cr_psstart, start);4187}41884189/*4190* NOTE: no real attempt is made to generalize the SDMA stuff.4191* At some point "soon" we will have a new more generalized4192* set of sdma interface, and then we'll clean this up.4193*/41944195/* Must be called with sdma_lock held, or before init finished */4196static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)4197{4198/* Commit writes to memory and advance the tail on the chip */4199wmb();4200ppd->sdma_descq_tail = tail;4201qib_write_kreg(ppd->dd, kr_senddmatail, tail);4202}42034204static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)4205{4206}42074208static struct sdma_set_state_action sdma_7220_action_table[] = {4209[qib_sdma_state_s00_hw_down] = {4210.op_enable = 0,4211.op_intenable = 0,4212.op_halt = 0,4213.go_s99_running_tofalse = 1,4214},4215[qib_sdma_state_s10_hw_start_up_wait] = {4216.op_enable = 1,4217.op_intenable = 1,4218.op_halt = 1,4219},4220[qib_sdma_state_s20_idle] = {4221.op_enable = 1,4222.op_intenable = 1,4223.op_halt = 1,4224},4225[qib_sdma_state_s30_sw_clean_up_wait] = {4226.op_enable = 0,4227.op_intenable = 1,4228.op_halt = 0,4229},4230[qib_sdma_state_s40_hw_clean_up_wait] = {4231.op_enable = 1,4232.op_intenable = 1,4233.op_halt = 1,4234},4235[qib_sdma_state_s50_hw_halt_wait] = {4236.op_enable = 1,4237.op_intenable = 1,4238.op_halt = 1,4239},4240[qib_sdma_state_s99_running] = {4241.op_enable = 1,4242.op_intenable = 1,4243.op_halt = 0,4244.go_s99_running_totrue = 1,4245},4246};42474248static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)4249{4250ppd->sdma_state.set_state_action = sdma_7220_action_table;4251}42524253static int init_sdma_7220_regs(struct qib_pportdata *ppd)4254{4255struct qib_devdata *dd = ppd->dd;4256unsigned i, n;4257u64 senddmabufmask[3] = { 0 };42584259/* Set SendDmaBase */4260qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);4261qib_sdma_7220_setlengen(ppd);4262qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */4263/* Set SendDmaHeadAddr */4264qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);42654266/*4267* Reserve all the former "kernel" piobufs, using high number range4268* so we get as many 4K buffers as possible4269*/4270n = dd->piobcnt2k + dd->piobcnt4k;4271i = n - dd->cspec->sdmabufcnt;42724273for (; i < n; ++i) {4274unsigned word = i / 64;4275unsigned bit = i & 63;42764277BUG_ON(word >= 3);4278senddmabufmask[word] |= 1ULL << bit;4279}4280qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);4281qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);4282qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);42834284ppd->sdma_state.first_sendbuf = i;4285ppd->sdma_state.last_sendbuf = n;42864287return 0;4288}42894290/* sdma_lock must be held */4291static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)4292{4293struct qib_devdata *dd = ppd->dd;4294int sane;4295int use_dmahead;4296u16 swhead;4297u16 swtail;4298u16 cnt;4299u16 hwhead;43004301use_dmahead = __qib_sdma_running(ppd) &&4302(dd->flags & QIB_HAS_SDMA_TIMEOUT);4303retry:4304hwhead = use_dmahead ?4305(u16)le64_to_cpu(*ppd->sdma_head_dma) :4306(u16)qib_read_kreg32(dd, kr_senddmahead);43074308swhead = ppd->sdma_descq_head;4309swtail = ppd->sdma_descq_tail;4310cnt = ppd->sdma_descq_cnt;43114312if (swhead < swtail) {4313/* not wrapped */4314sane = (hwhead >= swhead) & (hwhead <= swtail);4315} else if (swhead > swtail) {4316/* wrapped around */4317sane = ((hwhead >= swhead) && (hwhead < cnt)) ||4318(hwhead <= swtail);4319} else {4320/* empty */4321sane = (hwhead == swhead);4322}43234324if (unlikely(!sane)) {4325if (use_dmahead) {4326/* try one more time, directly from the register */4327use_dmahead = 0;4328goto retry;4329}4330/* assume no progress */4331hwhead = swhead;4332}43334334return hwhead;4335}43364337static int qib_sdma_7220_busy(struct qib_pportdata *ppd)4338{4339u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);43404341return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||4342(hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||4343(hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||4344!(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));4345}43464347/*4348* Compute the amount of delay before sending the next packet if the4349* port's send rate differs from the static rate set for the QP.4350* Since the delay affects this packet but the amount of the delay is4351* based on the length of the previous packet, use the last delay computed4352* and save the delay count for this packet to be used next time4353* we get here.4354*/4355static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,4356u8 srate, u8 vl)4357{4358u8 snd_mult = ppd->delay_mult;4359u8 rcv_mult = ib_rate_to_delay[srate];4360u32 ret = ppd->cpspec->last_delay_mult;43614362ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?4363(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;43644365/* Indicate VL15, if necessary */4366if (vl == 15)4367ret |= PBC_7220_VL15_SEND_CTRL;4368return ret;4369}43704371static void qib_7220_initvl15_bufs(struct qib_devdata *dd)4372{4373}43744375static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)4376{4377if (!rcd->ctxt) {4378rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;4379rcd->rcvegr_tid_base = 0;4380} else {4381rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;4382rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +4383(rcd->ctxt - 1) * rcd->rcvegrcnt;4384}4385}43864387static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,4388u32 len, u32 which, struct qib_ctxtdata *rcd)4389{4390int i;4391unsigned long flags;43924393switch (which) {4394case TXCHK_CHG_TYPE_KERN:4395/* see if we need to raise avail update threshold */4396spin_lock_irqsave(&dd->uctxt_lock, flags);4397for (i = dd->first_user_ctxt;4398dd->cspec->updthresh != dd->cspec->updthresh_dflt4399&& i < dd->cfgctxts; i++)4400if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&4401((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)4402< dd->cspec->updthresh_dflt)4403break;4404spin_unlock_irqrestore(&dd->uctxt_lock, flags);4405if (i == dd->cfgctxts) {4406spin_lock_irqsave(&dd->sendctrl_lock, flags);4407dd->cspec->updthresh = dd->cspec->updthresh_dflt;4408dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);4409dd->sendctrl |= (dd->cspec->updthresh &4410SYM_RMASK(SendCtrl, AvailUpdThld)) <<4411SYM_LSB(SendCtrl, AvailUpdThld);4412spin_unlock_irqrestore(&dd->sendctrl_lock, flags);4413sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);4414}4415break;4416case TXCHK_CHG_TYPE_USER:4417spin_lock_irqsave(&dd->sendctrl_lock, flags);4418if (rcd && rcd->subctxt_cnt && ((rcd->piocnt4419/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {4420dd->cspec->updthresh = (rcd->piocnt /4421rcd->subctxt_cnt) - 1;4422dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);4423dd->sendctrl |= (dd->cspec->updthresh &4424SYM_RMASK(SendCtrl, AvailUpdThld))4425<< SYM_LSB(SendCtrl, AvailUpdThld);4426spin_unlock_irqrestore(&dd->sendctrl_lock, flags);4427sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);4428} else4429spin_unlock_irqrestore(&dd->sendctrl_lock, flags);4430break;4431}4432}44334434static void writescratch(struct qib_devdata *dd, u32 val)4435{4436qib_write_kreg(dd, kr_scratch, val);4437}44384439#define VALID_TS_RD_REG_MASK 0xBF4440/**4441* qib_7220_tempsense_read - read register of temp sensor via TWSI4442* @dd: the qlogic_ib device4443* @regnum: register to read from4444*4445* returns reg contents (0..255) or < 0 for error4446*/4447static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)4448{4449int ret;4450u8 rdata;44514452if (regnum > 7) {4453ret = -EINVAL;4454goto bail;4455}44564457/* return a bogus value for (the one) register we do not have */4458if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {4459ret = 0;4460goto bail;4461}44624463ret = mutex_lock_interruptible(&dd->eep_lock);4464if (ret)4465goto bail;44664467ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);4468if (!ret)4469ret = rdata;44704471mutex_unlock(&dd->eep_lock);44724473/*4474* There are three possibilities here:4475* ret is actual value (0..255)4476* ret is -ENXIO or -EINVAL from twsi code or this file4477* ret is -EINTR from mutex_lock_interruptible.4478*/4479bail:4480return ret;4481}44824483/* Dummy function, as 7220 boards never disable EEPROM Write */4484static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)4485{4486return 1;4487}44884489/**4490* qib_init_iba7220_funcs - set up the chip-specific function pointers4491* @dev: the pci_dev for qlogic_ib device4492* @ent: pci_device_id struct for this dev4493*4494* This is global, and is called directly at init to set up the4495* chip-specific function pointers for later use.4496*/4497struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,4498const struct pci_device_id *ent)4499{4500struct qib_devdata *dd;4501int ret;4502u32 boardid, minwidth;45034504dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +4505sizeof(struct qib_chippport_specific));4506if (IS_ERR(dd))4507goto bail;45084509dd->f_bringup_serdes = qib_7220_bringup_serdes;4510dd->f_cleanup = qib_setup_7220_cleanup;4511dd->f_clear_tids = qib_7220_clear_tids;4512dd->f_free_irq = qib_7220_free_irq;4513dd->f_get_base_info = qib_7220_get_base_info;4514dd->f_get_msgheader = qib_7220_get_msgheader;4515dd->f_getsendbuf = qib_7220_getsendbuf;4516dd->f_gpio_mod = gpio_7220_mod;4517dd->f_eeprom_wen = qib_7220_eeprom_wen;4518dd->f_hdrqempty = qib_7220_hdrqempty;4519dd->f_ib_updown = qib_7220_ib_updown;4520dd->f_init_ctxt = qib_7220_init_ctxt;4521dd->f_initvl15_bufs = qib_7220_initvl15_bufs;4522dd->f_intr_fallback = qib_7220_intr_fallback;4523dd->f_late_initreg = qib_late_7220_initreg;4524dd->f_setpbc_control = qib_7220_setpbc_control;4525dd->f_portcntr = qib_portcntr_7220;4526dd->f_put_tid = qib_7220_put_tid;4527dd->f_quiet_serdes = qib_7220_quiet_serdes;4528dd->f_rcvctrl = rcvctrl_7220_mod;4529dd->f_read_cntrs = qib_read_7220cntrs;4530dd->f_read_portcntrs = qib_read_7220portcntrs;4531dd->f_reset = qib_setup_7220_reset;4532dd->f_init_sdma_regs = init_sdma_7220_regs;4533dd->f_sdma_busy = qib_sdma_7220_busy;4534dd->f_sdma_gethead = qib_sdma_7220_gethead;4535dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;4536dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;4537dd->f_sdma_update_tail = qib_sdma_update_7220_tail;4538dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;4539dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;4540dd->f_sdma_init_early = qib_7220_sdma_init_early;4541dd->f_sendctrl = sendctrl_7220_mod;4542dd->f_set_armlaunch = qib_set_7220_armlaunch;4543dd->f_set_cntr_sample = qib_set_cntr_7220_sample;4544dd->f_iblink_state = qib_7220_iblink_state;4545dd->f_ibphys_portstate = qib_7220_phys_portstate;4546dd->f_get_ib_cfg = qib_7220_get_ib_cfg;4547dd->f_set_ib_cfg = qib_7220_set_ib_cfg;4548dd->f_set_ib_loopback = qib_7220_set_loopback;4549dd->f_set_intr_state = qib_7220_set_intr_state;4550dd->f_setextled = qib_setup_7220_setextled;4551dd->f_txchk_change = qib_7220_txchk_change;4552dd->f_update_usrhead = qib_update_7220_usrhead;4553dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;4554dd->f_xgxs_reset = qib_7220_xgxs_reset;4555dd->f_writescratch = writescratch;4556dd->f_tempsense_rd = qib_7220_tempsense_rd;4557/*4558* Do remaining pcie setup and save pcie values in dd.4559* Any error printing is already done by the init code.4560* On return, we have the chip mapped, but chip registers4561* are not set up until start of qib_init_7220_variables.4562*/4563ret = qib_pcie_ddinit(dd, pdev, ent);4564if (ret < 0)4565goto bail_free;45664567/* initialize chip-specific variables */4568ret = qib_init_7220_variables(dd);4569if (ret)4570goto bail_cleanup;45714572if (qib_mini_init)4573goto bail;45744575boardid = SYM_FIELD(dd->revision, Revision,4576BoardID);4577switch (boardid) {4578case 0:4579case 2:4580case 10:4581case 12:4582minwidth = 16; /* x16 capable boards */4583break;4584default:4585minwidth = 8; /* x8 capable boards */4586break;4587}4588if (qib_pcie_params(dd, minwidth, NULL, NULL))4589qib_dev_err(dd, "Failed to setup PCIe or interrupts; "4590"continuing anyway\n");45914592/* save IRQ for possible later use */4593dd->cspec->irq = pdev->irq;45944595if (qib_read_kreg64(dd, kr_hwerrstatus) &4596QLOGIC_IB_HWE_SERDESPLLFAILED)4597qib_write_kreg(dd, kr_hwerrclear,4598QLOGIC_IB_HWE_SERDESPLLFAILED);45994600/* setup interrupt handler (interrupt type handled above) */4601qib_setup_7220_interrupt(dd);4602qib_7220_init_hwerrors(dd);46034604/* clear diagctrl register, in case diags were running and crashed */4605qib_write_kreg(dd, kr_hwdiagctrl, 0);46064607goto bail;46084609bail_cleanup:4610qib_pcie_ddcleanup(dd);4611bail_free:4612qib_free_devdata(dd);4613dd = ERR_PTR(ret);4614bail:4615return dd;4616}461746184619