Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/qib/qib_iba7220.c
15112 views
1
/*
2
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3
* All rights reserved.
4
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5
*
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING in the main directory of this source tree, or the
10
* OpenIB.org BSD license below:
11
*
12
* Redistribution and use in source and binary forms, with or
13
* without modification, are permitted provided that the following
14
* conditions are met:
15
*
16
* - Redistributions of source code must retain the above
17
* copyright notice, this list of conditions and the following
18
* disclaimer.
19
*
20
* - Redistributions in binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer in the documentation and/or other materials
23
* provided with the distribution.
24
*
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
* SOFTWARE.
33
*/
34
/*
35
* This file contains all of the code that is specific to the
36
* QLogic_IB 7220 chip (except that specific to the SerDes)
37
*/
38
39
#include <linux/interrupt.h>
40
#include <linux/pci.h>
41
#include <linux/delay.h>
42
#include <linux/io.h>
43
#include <rdma/ib_verbs.h>
44
45
#include "qib.h"
46
#include "qib_7220.h"
47
48
static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
49
static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
50
static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
51
static u32 qib_7220_iblink_state(u64);
52
static u8 qib_7220_phys_portstate(u64);
53
static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
54
static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
55
56
/*
57
* This file contains almost all the chip-specific register information and
58
* access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
59
* exception of SerDes support, which in in qib_sd7220.c.
60
*/
61
62
/* Below uses machine-generated qib_chipnum_regs.h file */
63
#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
64
65
/* Use defines to tie machine-generated names to lower-case names */
66
#define kr_control KREG_IDX(Control)
67
#define kr_counterregbase KREG_IDX(CntrRegBase)
68
#define kr_errclear KREG_IDX(ErrClear)
69
#define kr_errmask KREG_IDX(ErrMask)
70
#define kr_errstatus KREG_IDX(ErrStatus)
71
#define kr_extctrl KREG_IDX(EXTCtrl)
72
#define kr_extstatus KREG_IDX(EXTStatus)
73
#define kr_gpio_clear KREG_IDX(GPIOClear)
74
#define kr_gpio_mask KREG_IDX(GPIOMask)
75
#define kr_gpio_out KREG_IDX(GPIOOut)
76
#define kr_gpio_status KREG_IDX(GPIOStatus)
77
#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
78
#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
79
#define kr_hwerrclear KREG_IDX(HwErrClear)
80
#define kr_hwerrmask KREG_IDX(HwErrMask)
81
#define kr_hwerrstatus KREG_IDX(HwErrStatus)
82
#define kr_ibcctrl KREG_IDX(IBCCtrl)
83
#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
84
#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
85
#define kr_ibcstatus KREG_IDX(IBCStatus)
86
#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
87
#define kr_intclear KREG_IDX(IntClear)
88
#define kr_intmask KREG_IDX(IntMask)
89
#define kr_intstatus KREG_IDX(IntStatus)
90
#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
91
#define kr_palign KREG_IDX(PageAlign)
92
#define kr_partitionkey KREG_IDX(RcvPartitionKey)
93
#define kr_portcnt KREG_IDX(PortCnt)
94
#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
95
#define kr_rcvctrl KREG_IDX(RcvCtrl)
96
#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
97
#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
98
#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
99
#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
100
#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
101
#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
102
#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
103
#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
104
#define kr_revision KREG_IDX(Revision)
105
#define kr_scratch KREG_IDX(Scratch)
106
#define kr_sendbuffererror KREG_IDX(SendBufErr0)
107
#define kr_sendctrl KREG_IDX(SendCtrl)
108
#define kr_senddmabase KREG_IDX(SendDmaBase)
109
#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
110
#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
111
#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
112
#define kr_senddmahead KREG_IDX(SendDmaHead)
113
#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
114
#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
115
#define kr_senddmastatus KREG_IDX(SendDmaStatus)
116
#define kr_senddmatail KREG_IDX(SendDmaTail)
117
#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
118
#define kr_sendpiobufbase KREG_IDX(SendBufBase)
119
#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
120
#define kr_sendpiosize KREG_IDX(SendBufSize)
121
#define kr_sendregbase KREG_IDX(SendRegBase)
122
#define kr_userregbase KREG_IDX(UserRegBase)
123
#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
124
125
/* These must only be written via qib_write_kreg_ctxt() */
126
#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
127
#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
128
129
130
#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
131
QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
132
133
#define cr_badformat CREG_IDX(RxVersionErrCnt)
134
#define cr_erricrc CREG_IDX(RxICRCErrCnt)
135
#define cr_errlink CREG_IDX(RxLinkMalformCnt)
136
#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
137
#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
138
#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
139
#define cr_err_rlen CREG_IDX(RxLenErrCnt)
140
#define cr_errslen CREG_IDX(TxLenErrCnt)
141
#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
142
#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
143
#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
144
#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
145
#define cr_lbint CREG_IDX(LBIntCnt)
146
#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
147
#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
148
#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
149
#define cr_pktrcv CREG_IDX(RxDataPktCnt)
150
#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
151
#define cr_pktsend CREG_IDX(TxDataPktCnt)
152
#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
153
#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
154
#define cr_rcvebp CREG_IDX(RxEBPCnt)
155
#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
156
#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
157
#define cr_sendstall CREG_IDX(TxFlowStallCnt)
158
#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
159
#define cr_wordrcv CREG_IDX(RxDwordCnt)
160
#define cr_wordsend CREG_IDX(TxDwordCnt)
161
#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
162
#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
163
#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
164
#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
165
#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
166
#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
167
#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
168
#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
169
#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
170
#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
171
#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
172
#define cr_psstat CREG_IDX(PSStat)
173
#define cr_psstart CREG_IDX(PSStart)
174
#define cr_psinterval CREG_IDX(PSInterval)
175
#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
176
#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
177
#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
178
#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
179
#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
180
#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
181
#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
182
183
#define SYM_RMASK(regname, fldname) ((u64) \
184
QIB_7220_##regname##_##fldname##_RMASK)
185
#define SYM_MASK(regname, fldname) ((u64) \
186
QIB_7220_##regname##_##fldname##_RMASK << \
187
QIB_7220_##regname##_##fldname##_LSB)
188
#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
189
#define SYM_FIELD(value, regname, fldname) ((u64) \
190
(((value) >> SYM_LSB(regname, fldname)) & \
191
SYM_RMASK(regname, fldname)))
192
#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
193
#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
194
195
/* ibcctrl bits */
196
#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
197
/* cycle through TS1/TS2 till OK */
198
#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
199
/* wait for TS1, then go on */
200
#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
201
#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
202
203
#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
204
#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
205
#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
206
207
#define BLOB_7220_IBCHG 0x81
208
209
/*
210
* We could have a single register get/put routine, that takes a group type,
211
* but this is somewhat clearer and cleaner. It also gives us some error
212
* checking. 64 bit register reads should always work, but are inefficient
213
* on opteron (the northbridge always generates 2 separate HT 32 bit reads),
214
* so we use kreg32 wherever possible. User register and counter register
215
* reads are always 32 bit reads, so only one form of those routines.
216
*/
217
218
/**
219
* qib_read_ureg32 - read 32-bit virtualized per-context register
220
* @dd: device
221
* @regno: register number
222
* @ctxt: context number
223
*
224
* Return the contents of a register that is virtualized to be per context.
225
* Returns -1 on errors (not distinguishable from valid contents at
226
* runtime; we may add a separate error variable at some point).
227
*/
228
static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
229
enum qib_ureg regno, int ctxt)
230
{
231
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
232
return 0;
233
234
if (dd->userbase)
235
return readl(regno + (u64 __iomem *)
236
((char __iomem *)dd->userbase +
237
dd->ureg_align * ctxt));
238
else
239
return readl(regno + (u64 __iomem *)
240
(dd->uregbase +
241
(char __iomem *)dd->kregbase +
242
dd->ureg_align * ctxt));
243
}
244
245
/**
246
* qib_write_ureg - write 32-bit virtualized per-context register
247
* @dd: device
248
* @regno: register number
249
* @value: value
250
* @ctxt: context
251
*
252
* Write the contents of a register that is virtualized to be per context.
253
*/
254
static inline void qib_write_ureg(const struct qib_devdata *dd,
255
enum qib_ureg regno, u64 value, int ctxt)
256
{
257
u64 __iomem *ubase;
258
259
if (dd->userbase)
260
ubase = (u64 __iomem *)
261
((char __iomem *) dd->userbase +
262
dd->ureg_align * ctxt);
263
else
264
ubase = (u64 __iomem *)
265
(dd->uregbase +
266
(char __iomem *) dd->kregbase +
267
dd->ureg_align * ctxt);
268
269
if (dd->kregbase && (dd->flags & QIB_PRESENT))
270
writeq(value, &ubase[regno]);
271
}
272
273
/**
274
* qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
275
* @dd: the qlogic_ib device
276
* @regno: the register number to write
277
* @ctxt: the context containing the register
278
* @value: the value to write
279
*/
280
static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
281
const u16 regno, unsigned ctxt,
282
u64 value)
283
{
284
qib_write_kreg(dd, regno + ctxt, value);
285
}
286
287
static inline void write_7220_creg(const struct qib_devdata *dd,
288
u16 regno, u64 value)
289
{
290
if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
291
writeq(value, &dd->cspec->cregbase[regno]);
292
}
293
294
static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
295
{
296
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
297
return 0;
298
return readq(&dd->cspec->cregbase[regno]);
299
}
300
301
static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
302
{
303
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
304
return 0;
305
return readl(&dd->cspec->cregbase[regno]);
306
}
307
308
/* kr_revision bits */
309
#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
310
#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
311
312
/* kr_control bits */
313
#define QLOGIC_IB_C_RESET (1U << 7)
314
315
/* kr_intstatus, kr_intclear, kr_intmask bits */
316
#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
317
#define QLOGIC_IB_I_RCVURG_SHIFT 32
318
#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
319
#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
320
#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
321
322
#define QLOGIC_IB_C_FREEZEMODE 0x00000002
323
#define QLOGIC_IB_C_LINKENABLE 0x00000004
324
325
#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
326
#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
327
#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
328
#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
329
#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
330
#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
331
332
/* variables for sanity checking interrupt and errors */
333
#define QLOGIC_IB_I_BITSEXTANT \
334
(QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
335
(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
336
(QLOGIC_IB_I_RCVAVAIL_MASK << \
337
QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
338
QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
339
QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
340
QLOGIC_IB_I_SERDESTRIMDONE)
341
342
#define IB_HWE_BITSEXTANT \
343
(HWE_MASK(RXEMemParityErr) | \
344
HWE_MASK(TXEMemParityErr) | \
345
(QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
346
QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
347
QLOGIC_IB_HWE_PCIE1PLLFAILED | \
348
QLOGIC_IB_HWE_PCIE0PLLFAILED | \
349
QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
350
QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
351
QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
352
QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
353
QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
354
HWE_MASK(PowerOnBISTFailed) | \
355
QLOGIC_IB_HWE_COREPLL_FBSLIP | \
356
QLOGIC_IB_HWE_COREPLL_RFSLIP | \
357
QLOGIC_IB_HWE_SERDESPLLFAILED | \
358
HWE_MASK(IBCBusToSPCParityErr) | \
359
HWE_MASK(IBCBusFromSPCParityErr) | \
360
QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
361
QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
362
QLOGIC_IB_HWE_SDMAMEMREADERR | \
363
QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
364
QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
365
QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
366
QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
367
QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
368
QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
369
QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
370
QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
371
QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
372
373
#define IB_E_BITSEXTANT \
374
(ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
375
ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
376
ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
377
ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
378
ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
379
ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
380
ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
381
ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
382
ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
383
ERR_MASK(SendSpecialTriggerErr) | \
384
ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
385
ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
386
ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
387
ERR_MASK(SendDroppedDataPktErr) | \
388
ERR_MASK(SendPioArmLaunchErr) | \
389
ERR_MASK(SendUnexpectedPktNumErr) | \
390
ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
391
ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
392
ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
393
ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
394
ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
395
ERR_MASK(SDmaUnexpDataErr) | \
396
ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
397
ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
398
ERR_MASK(SDmaDescAddrMisalignErr) | \
399
ERR_MASK(InvalidEEPCmd))
400
401
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
402
#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
403
#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
404
#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
405
#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
406
#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
407
#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
408
#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
409
#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
410
#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
411
#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
412
#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
413
#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
414
/* specific to this chip */
415
#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
416
#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
417
#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
418
#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
419
#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
420
#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
421
#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
422
#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
423
#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
424
#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
425
#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
426
#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
427
428
#define IBA7220_IBCC_LINKCMD_SHIFT 19
429
430
/* kr_ibcddrctrl bits */
431
#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
432
#define IBA7220_IBC_DLIDLMC_SHIFT 32
433
434
#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
435
SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
436
#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
437
438
#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
439
#define IBA7220_IBC_LREV_MASK 1
440
#define IBA7220_IBC_LREV_SHIFT 8
441
#define IBA7220_IBC_RXPOL_MASK 1
442
#define IBA7220_IBC_RXPOL_SHIFT 7
443
#define IBA7220_IBC_WIDTH_SHIFT 5
444
#define IBA7220_IBC_WIDTH_MASK 0x3
445
#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
446
#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
447
#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
448
#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
449
#define IBA7220_IBC_SPEED_SDR (1 << 2)
450
#define IBA7220_IBC_SPEED_DDR (1 << 3)
451
#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
452
#define IBA7220_IBC_IBTA_1_2_MASK (1)
453
454
/* kr_ibcddrstatus */
455
/* link latency shift is 0, don't bother defining */
456
#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
457
458
/* kr_extstatus bits */
459
#define QLOGIC_IB_EXTS_FREQSEL 0x2
460
#define QLOGIC_IB_EXTS_SERDESSEL 0x4
461
#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
462
#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
463
464
/* kr_xgxsconfig bits */
465
#define QLOGIC_IB_XGXS_RESET 0x5ULL
466
#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
467
468
/* kr_rcvpktledcnt */
469
#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
470
#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
471
472
#define _QIB_GPIO_SDA_NUM 1
473
#define _QIB_GPIO_SCL_NUM 0
474
#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
475
#define QIB_TWSI_TEMP_DEV 0x98
476
477
/* HW counter clock is at 4nsec */
478
#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
479
480
#define IBA7220_R_INTRAVAIL_SHIFT 17
481
#define IBA7220_R_PKEY_DIS_SHIFT 34
482
#define IBA7220_R_TAILUPD_SHIFT 35
483
#define IBA7220_R_CTXTCFG_SHIFT 36
484
485
#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
486
487
/*
488
* the size bits give us 2^N, in KB units. 0 marks as invalid,
489
* and 7 is reserved. We currently use only 2KB and 4KB
490
*/
491
#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
492
#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
493
#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
494
#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
495
#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
496
#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
497
498
#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
499
500
/* packet rate matching delay multiplier */
501
static u8 rate_to_delay[2][2] = {
502
/* 1x, 4x */
503
{ 8, 2 }, /* SDR */
504
{ 4, 1 } /* DDR */
505
};
506
507
static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
508
[IB_RATE_2_5_GBPS] = 8,
509
[IB_RATE_5_GBPS] = 4,
510
[IB_RATE_10_GBPS] = 2,
511
[IB_RATE_20_GBPS] = 1
512
};
513
514
#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
515
#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
516
517
/* link training states, from IBC */
518
#define IB_7220_LT_STATE_DISABLED 0x00
519
#define IB_7220_LT_STATE_LINKUP 0x01
520
#define IB_7220_LT_STATE_POLLACTIVE 0x02
521
#define IB_7220_LT_STATE_POLLQUIET 0x03
522
#define IB_7220_LT_STATE_SLEEPDELAY 0x04
523
#define IB_7220_LT_STATE_SLEEPQUIET 0x05
524
#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
525
#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
526
#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
527
#define IB_7220_LT_STATE_CFGIDLE 0x0b
528
#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
529
#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
530
#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
531
532
/* link state machine states from IBC */
533
#define IB_7220_L_STATE_DOWN 0x0
534
#define IB_7220_L_STATE_INIT 0x1
535
#define IB_7220_L_STATE_ARM 0x2
536
#define IB_7220_L_STATE_ACTIVE 0x3
537
#define IB_7220_L_STATE_ACT_DEFER 0x4
538
539
static const u8 qib_7220_physportstate[0x20] = {
540
[IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
541
[IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
542
[IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
543
[IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
544
[IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
545
[IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
546
[IB_7220_LT_STATE_CFGDEBOUNCE] =
547
IB_PHYSPORTSTATE_CFG_TRAIN,
548
[IB_7220_LT_STATE_CFGRCVFCFG] =
549
IB_PHYSPORTSTATE_CFG_TRAIN,
550
[IB_7220_LT_STATE_CFGWAITRMT] =
551
IB_PHYSPORTSTATE_CFG_TRAIN,
552
[IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
553
[IB_7220_LT_STATE_RECOVERRETRAIN] =
554
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
555
[IB_7220_LT_STATE_RECOVERWAITRMT] =
556
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
557
[IB_7220_LT_STATE_RECOVERIDLE] =
558
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
559
[0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
560
[0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
561
[0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
562
[0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
563
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
564
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
565
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
566
[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
567
};
568
569
int qib_special_trigger;
570
module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
571
MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
572
573
#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
574
#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
575
576
#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
577
(1ULL << (SYM_LSB(regname, fldname) + (bit))))
578
579
#define TXEMEMPARITYERR_PIOBUF \
580
SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
581
#define TXEMEMPARITYERR_PIOPBC \
582
SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
583
#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
584
SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
585
586
#define RXEMEMPARITYERR_RCVBUF \
587
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
588
#define RXEMEMPARITYERR_LOOKUPQ \
589
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
590
#define RXEMEMPARITYERR_EXPTID \
591
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
592
#define RXEMEMPARITYERR_EAGERTID \
593
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
594
#define RXEMEMPARITYERR_FLAGBUF \
595
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
596
#define RXEMEMPARITYERR_DATAINFO \
597
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
598
#define RXEMEMPARITYERR_HDRINFO \
599
SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
600
601
/* 7220 specific hardware errors... */
602
static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
603
/* generic hardware errors */
604
QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
605
QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
606
607
QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
608
"TXE PIOBUF Memory Parity"),
609
QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
610
"TXE PIOPBC Memory Parity"),
611
QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
612
"TXE PIOLAUNCHFIFO Memory Parity"),
613
614
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
615
"RXE RCVBUF Memory Parity"),
616
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
617
"RXE LOOKUPQ Memory Parity"),
618
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
619
"RXE EAGERTID Memory Parity"),
620
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
621
"RXE EXPTID Memory Parity"),
622
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
623
"RXE FLAGBUF Memory Parity"),
624
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
625
"RXE DATAINFO Memory Parity"),
626
QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
627
"RXE HDRINFO Memory Parity"),
628
629
/* chip-specific hardware errors */
630
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
631
"PCIe Poisoned TLP"),
632
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
633
"PCIe completion timeout"),
634
/*
635
* In practice, it's unlikely wthat we'll see PCIe PLL, or bus
636
* parity or memory parity error failures, because most likely we
637
* won't be able to talk to the core of the chip. Nonetheless, we
638
* might see them, if they are in parts of the PCIe core that aren't
639
* essential.
640
*/
641
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
642
"PCIePLL1"),
643
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
644
"PCIePLL0"),
645
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
646
"PCIe XTLH core parity"),
647
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
648
"PCIe ADM TX core parity"),
649
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
650
"PCIe ADM RX core parity"),
651
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
652
"SerDes PLL"),
653
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
654
"PCIe cpl header queue"),
655
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
656
"PCIe cpl data queue"),
657
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
658
"Send DMA memory read"),
659
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
660
"uC PLL clock not locked"),
661
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
662
"PCIe serdes Q0 no clock"),
663
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
664
"PCIe serdes Q1 no clock"),
665
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
666
"PCIe serdes Q2 no clock"),
667
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
668
"PCIe serdes Q3 no clock"),
669
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
670
"DDS RXEQ memory parity"),
671
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
672
"IB uC memory parity"),
673
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
674
"PCIe uC oct0 memory parity"),
675
QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
676
"PCIe uC oct1 memory parity"),
677
};
678
679
#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
680
681
#define QLOGIC_IB_E_PKTERRS (\
682
ERR_MASK(SendPktLenErr) | \
683
ERR_MASK(SendDroppedDataPktErr) | \
684
ERR_MASK(RcvVCRCErr) | \
685
ERR_MASK(RcvICRCErr) | \
686
ERR_MASK(RcvShortPktLenErr) | \
687
ERR_MASK(RcvEBPErr))
688
689
/* Convenience for decoding Send DMA errors */
690
#define QLOGIC_IB_E_SDMAERRS ( \
691
ERR_MASK(SDmaGenMismatchErr) | \
692
ERR_MASK(SDmaOutOfBoundErr) | \
693
ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
694
ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
695
ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
696
ERR_MASK(SDmaUnexpDataErr) | \
697
ERR_MASK(SDmaDescAddrMisalignErr) | \
698
ERR_MASK(SDmaDisabledErr) | \
699
ERR_MASK(SendBufMisuseErr))
700
701
/* These are all rcv-related errors which we want to count for stats */
702
#define E_SUM_PKTERRS \
703
(ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
704
ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
705
ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
706
ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
707
ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
708
ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
709
710
/* These are all send-related errors which we want to count for stats */
711
#define E_SUM_ERRS \
712
(ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
713
ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
714
ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
715
ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
716
ERR_MASK(InvalidAddrErr))
717
718
/*
719
* this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
720
* errors not related to freeze and cancelling buffers. Can't ignore
721
* armlaunch because could get more while still cleaning up, and need
722
* to cancel those as they happen.
723
*/
724
#define E_SPKT_ERRS_IGNORE \
725
(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
726
ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
727
ERR_MASK(SendPktLenErr))
728
729
/*
730
* these are errors that can occur when the link changes state while
731
* a packet is being sent or received. This doesn't cover things
732
* like EBP or VCRC that can be the result of a sending having the
733
* link change state, so we receive a "known bad" packet.
734
*/
735
#define E_SUM_LINK_PKTERRS \
736
(ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
737
ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
738
ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
739
ERR_MASK(RcvUnexpectedCharErr))
740
741
static void autoneg_7220_work(struct work_struct *);
742
static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
743
744
/*
745
* Called when we might have an error that is specific to a particular
746
* PIO buffer, and may need to cancel that buffer, so it can be re-used.
747
* because we don't need to force the update of pioavail.
748
*/
749
static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
750
{
751
unsigned long sbuf[3];
752
struct qib_devdata *dd = ppd->dd;
753
754
/*
755
* It's possible that sendbuffererror could have bits set; might
756
* have already done this as a result of hardware error handling.
757
*/
758
/* read these before writing errorclear */
759
sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
760
sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
761
sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
762
763
if (sbuf[0] || sbuf[1] || sbuf[2])
764
qib_disarm_piobufs_set(dd, sbuf,
765
dd->piobcnt2k + dd->piobcnt4k);
766
}
767
768
static void qib_7220_txe_recover(struct qib_devdata *dd)
769
{
770
qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
771
qib_disarm_7220_senderrbufs(dd->pport);
772
}
773
774
/*
775
* This is called with interrupts disabled and sdma_lock held.
776
*/
777
static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
778
{
779
struct qib_devdata *dd = ppd->dd;
780
u64 set_sendctrl = 0;
781
u64 clr_sendctrl = 0;
782
783
if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
784
set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
785
else
786
clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
787
788
if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
789
set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
790
else
791
clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
792
793
if (op & QIB_SDMA_SENDCTRL_OP_HALT)
794
set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
795
else
796
clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
797
798
spin_lock(&dd->sendctrl_lock);
799
800
dd->sendctrl |= set_sendctrl;
801
dd->sendctrl &= ~clr_sendctrl;
802
803
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
804
qib_write_kreg(dd, kr_scratch, 0);
805
806
spin_unlock(&dd->sendctrl_lock);
807
}
808
809
static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
810
u64 err, char *buf, size_t blen)
811
{
812
static const struct {
813
u64 err;
814
const char *msg;
815
} errs[] = {
816
{ ERR_MASK(SDmaGenMismatchErr),
817
"SDmaGenMismatch" },
818
{ ERR_MASK(SDmaOutOfBoundErr),
819
"SDmaOutOfBound" },
820
{ ERR_MASK(SDmaTailOutOfBoundErr),
821
"SDmaTailOutOfBound" },
822
{ ERR_MASK(SDmaBaseErr),
823
"SDmaBase" },
824
{ ERR_MASK(SDma1stDescErr),
825
"SDma1stDesc" },
826
{ ERR_MASK(SDmaRpyTagErr),
827
"SDmaRpyTag" },
828
{ ERR_MASK(SDmaDwEnErr),
829
"SDmaDwEn" },
830
{ ERR_MASK(SDmaMissingDwErr),
831
"SDmaMissingDw" },
832
{ ERR_MASK(SDmaUnexpDataErr),
833
"SDmaUnexpData" },
834
{ ERR_MASK(SDmaDescAddrMisalignErr),
835
"SDmaDescAddrMisalign" },
836
{ ERR_MASK(SendBufMisuseErr),
837
"SendBufMisuse" },
838
{ ERR_MASK(SDmaDisabledErr),
839
"SDmaDisabled" },
840
};
841
int i;
842
size_t bidx = 0;
843
844
for (i = 0; i < ARRAY_SIZE(errs); i++) {
845
if (err & errs[i].err)
846
bidx += scnprintf(buf + bidx, blen - bidx,
847
"%s ", errs[i].msg);
848
}
849
}
850
851
/*
852
* This is called as part of link down clean up so disarm and flush
853
* all send buffers so that SMP packets can be sent.
854
*/
855
static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
856
{
857
/* This will trigger the Abort interrupt */
858
sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
859
QIB_SENDCTRL_AVAIL_BLIP);
860
ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
861
}
862
863
static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
864
{
865
/*
866
* Set SendDmaLenGen and clear and set
867
* the MSB of the generation count to enable generation checking
868
* and load the internal generation counter.
869
*/
870
qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
871
qib_write_kreg(ppd->dd, kr_senddmalengen,
872
ppd->sdma_descq_cnt |
873
(1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
874
}
875
876
static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
877
{
878
qib_sdma_7220_setlengen(ppd);
879
qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
880
ppd->sdma_head_dma[0] = 0;
881
}
882
883
#define DISABLES_SDMA ( \
884
ERR_MASK(SDmaDisabledErr) | \
885
ERR_MASK(SDmaBaseErr) | \
886
ERR_MASK(SDmaTailOutOfBoundErr) | \
887
ERR_MASK(SDmaOutOfBoundErr) | \
888
ERR_MASK(SDma1stDescErr) | \
889
ERR_MASK(SDmaRpyTagErr) | \
890
ERR_MASK(SDmaGenMismatchErr) | \
891
ERR_MASK(SDmaDescAddrMisalignErr) | \
892
ERR_MASK(SDmaMissingDwErr) | \
893
ERR_MASK(SDmaDwEnErr))
894
895
static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
896
{
897
unsigned long flags;
898
struct qib_devdata *dd = ppd->dd;
899
char *msg;
900
901
errs &= QLOGIC_IB_E_SDMAERRS;
902
903
msg = dd->cspec->sdmamsgbuf;
904
qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
905
spin_lock_irqsave(&ppd->sdma_lock, flags);
906
907
if (errs & ERR_MASK(SendBufMisuseErr)) {
908
unsigned long sbuf[3];
909
910
sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
911
sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
912
sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
913
914
qib_dev_err(ppd->dd,
915
"IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
916
ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
917
sbuf[0]);
918
}
919
920
if (errs & ERR_MASK(SDmaUnexpDataErr))
921
qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
922
ppd->port);
923
924
switch (ppd->sdma_state.current_state) {
925
case qib_sdma_state_s00_hw_down:
926
/* not expecting any interrupts */
927
break;
928
929
case qib_sdma_state_s10_hw_start_up_wait:
930
/* handled in intr path */
931
break;
932
933
case qib_sdma_state_s20_idle:
934
/* not expecting any interrupts */
935
break;
936
937
case qib_sdma_state_s30_sw_clean_up_wait:
938
/* not expecting any interrupts */
939
break;
940
941
case qib_sdma_state_s40_hw_clean_up_wait:
942
if (errs & ERR_MASK(SDmaDisabledErr))
943
__qib_sdma_process_event(ppd,
944
qib_sdma_event_e50_hw_cleaned);
945
break;
946
947
case qib_sdma_state_s50_hw_halt_wait:
948
/* handled in intr path */
949
break;
950
951
case qib_sdma_state_s99_running:
952
if (errs & DISABLES_SDMA)
953
__qib_sdma_process_event(ppd,
954
qib_sdma_event_e7220_err_halted);
955
break;
956
}
957
958
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
959
}
960
961
/*
962
* Decode the error status into strings, deciding whether to always
963
* print * it or not depending on "normal packet errors" vs everything
964
* else. Return 1 if "real" errors, otherwise 0 if only packet
965
* errors, so caller can decide what to print with the string.
966
*/
967
static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
968
u64 err)
969
{
970
int iserr = 1;
971
972
*buf = '\0';
973
if (err & QLOGIC_IB_E_PKTERRS) {
974
if (!(err & ~QLOGIC_IB_E_PKTERRS))
975
iserr = 0;
976
if ((err & ERR_MASK(RcvICRCErr)) &&
977
!(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
978
strlcat(buf, "CRC ", blen);
979
if (!iserr)
980
goto done;
981
}
982
if (err & ERR_MASK(RcvHdrLenErr))
983
strlcat(buf, "rhdrlen ", blen);
984
if (err & ERR_MASK(RcvBadTidErr))
985
strlcat(buf, "rbadtid ", blen);
986
if (err & ERR_MASK(RcvBadVersionErr))
987
strlcat(buf, "rbadversion ", blen);
988
if (err & ERR_MASK(RcvHdrErr))
989
strlcat(buf, "rhdr ", blen);
990
if (err & ERR_MASK(SendSpecialTriggerErr))
991
strlcat(buf, "sendspecialtrigger ", blen);
992
if (err & ERR_MASK(RcvLongPktLenErr))
993
strlcat(buf, "rlongpktlen ", blen);
994
if (err & ERR_MASK(RcvMaxPktLenErr))
995
strlcat(buf, "rmaxpktlen ", blen);
996
if (err & ERR_MASK(RcvMinPktLenErr))
997
strlcat(buf, "rminpktlen ", blen);
998
if (err & ERR_MASK(SendMinPktLenErr))
999
strlcat(buf, "sminpktlen ", blen);
1000
if (err & ERR_MASK(RcvFormatErr))
1001
strlcat(buf, "rformaterr ", blen);
1002
if (err & ERR_MASK(RcvUnsupportedVLErr))
1003
strlcat(buf, "runsupvl ", blen);
1004
if (err & ERR_MASK(RcvUnexpectedCharErr))
1005
strlcat(buf, "runexpchar ", blen);
1006
if (err & ERR_MASK(RcvIBFlowErr))
1007
strlcat(buf, "ribflow ", blen);
1008
if (err & ERR_MASK(SendUnderRunErr))
1009
strlcat(buf, "sunderrun ", blen);
1010
if (err & ERR_MASK(SendPioArmLaunchErr))
1011
strlcat(buf, "spioarmlaunch ", blen);
1012
if (err & ERR_MASK(SendUnexpectedPktNumErr))
1013
strlcat(buf, "sunexperrpktnum ", blen);
1014
if (err & ERR_MASK(SendDroppedSmpPktErr))
1015
strlcat(buf, "sdroppedsmppkt ", blen);
1016
if (err & ERR_MASK(SendMaxPktLenErr))
1017
strlcat(buf, "smaxpktlen ", blen);
1018
if (err & ERR_MASK(SendUnsupportedVLErr))
1019
strlcat(buf, "sunsupVL ", blen);
1020
if (err & ERR_MASK(InvalidAddrErr))
1021
strlcat(buf, "invalidaddr ", blen);
1022
if (err & ERR_MASK(RcvEgrFullErr))
1023
strlcat(buf, "rcvegrfull ", blen);
1024
if (err & ERR_MASK(RcvHdrFullErr))
1025
strlcat(buf, "rcvhdrfull ", blen);
1026
if (err & ERR_MASK(IBStatusChanged))
1027
strlcat(buf, "ibcstatuschg ", blen);
1028
if (err & ERR_MASK(RcvIBLostLinkErr))
1029
strlcat(buf, "riblostlink ", blen);
1030
if (err & ERR_MASK(HardwareErr))
1031
strlcat(buf, "hardware ", blen);
1032
if (err & ERR_MASK(ResetNegated))
1033
strlcat(buf, "reset ", blen);
1034
if (err & QLOGIC_IB_E_SDMAERRS)
1035
qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
1036
if (err & ERR_MASK(InvalidEEPCmd))
1037
strlcat(buf, "invalideepromcmd ", blen);
1038
done:
1039
return iserr;
1040
}
1041
1042
static void reenable_7220_chase(unsigned long opaque)
1043
{
1044
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1045
ppd->cpspec->chase_timer.expires = 0;
1046
qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1047
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1048
}
1049
1050
static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
1051
{
1052
u8 ibclt;
1053
u64 tnow;
1054
1055
ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
1056
1057
/*
1058
* Detect and handle the state chase issue, where we can
1059
* get stuck if we are unlucky on timing on both sides of
1060
* the link. If we are, we disable, set a timer, and
1061
* then re-enable.
1062
*/
1063
switch (ibclt) {
1064
case IB_7220_LT_STATE_CFGRCVFCFG:
1065
case IB_7220_LT_STATE_CFGWAITRMT:
1066
case IB_7220_LT_STATE_TXREVLANES:
1067
case IB_7220_LT_STATE_CFGENH:
1068
tnow = get_jiffies_64();
1069
if (ppd->cpspec->chase_end &&
1070
time_after64(tnow, ppd->cpspec->chase_end)) {
1071
ppd->cpspec->chase_end = 0;
1072
qib_set_ib_7220_lstate(ppd,
1073
QLOGIC_IB_IBCC_LINKCMD_DOWN,
1074
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1075
ppd->cpspec->chase_timer.expires = jiffies +
1076
QIB_CHASE_DIS_TIME;
1077
add_timer(&ppd->cpspec->chase_timer);
1078
} else if (!ppd->cpspec->chase_end)
1079
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1080
break;
1081
1082
default:
1083
ppd->cpspec->chase_end = 0;
1084
break;
1085
}
1086
}
1087
1088
static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1089
{
1090
char *msg;
1091
u64 ignore_this_time = 0;
1092
u64 iserr = 0;
1093
int log_idx;
1094
struct qib_pportdata *ppd = dd->pport;
1095
u64 mask;
1096
1097
/* don't report errors that are masked */
1098
errs &= dd->cspec->errormask;
1099
msg = dd->cspec->emsgbuf;
1100
1101
/* do these first, they are most important */
1102
if (errs & ERR_MASK(HardwareErr))
1103
qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1104
else
1105
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1106
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1107
qib_inc_eeprom_err(dd, log_idx, 1);
1108
1109
if (errs & QLOGIC_IB_E_SDMAERRS)
1110
sdma_7220_errors(ppd, errs);
1111
1112
if (errs & ~IB_E_BITSEXTANT)
1113
qib_dev_err(dd, "error interrupt with unknown errors "
1114
"%llx set\n", (unsigned long long)
1115
(errs & ~IB_E_BITSEXTANT));
1116
1117
if (errs & E_SUM_ERRS) {
1118
qib_disarm_7220_senderrbufs(ppd);
1119
if ((errs & E_SUM_LINK_PKTERRS) &&
1120
!(ppd->lflags & QIBL_LINKACTIVE)) {
1121
/*
1122
* This can happen when trying to bring the link
1123
* up, but the IB link changes state at the "wrong"
1124
* time. The IB logic then complains that the packet
1125
* isn't valid. We don't want to confuse people, so
1126
* we just don't print them, except at debug
1127
*/
1128
ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1129
}
1130
} else if ((errs & E_SUM_LINK_PKTERRS) &&
1131
!(ppd->lflags & QIBL_LINKACTIVE)) {
1132
/*
1133
* This can happen when SMA is trying to bring the link
1134
* up, but the IB link changes state at the "wrong" time.
1135
* The IB logic then complains that the packet isn't
1136
* valid. We don't want to confuse people, so we just
1137
* don't print them, except at debug
1138
*/
1139
ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1140
}
1141
1142
qib_write_kreg(dd, kr_errclear, errs);
1143
1144
errs &= ~ignore_this_time;
1145
if (!errs)
1146
goto done;
1147
1148
/*
1149
* The ones we mask off are handled specially below
1150
* or above. Also mask SDMADISABLED by default as it
1151
* is too chatty.
1152
*/
1153
mask = ERR_MASK(IBStatusChanged) |
1154
ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1155
ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1156
1157
qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1158
1159
if (errs & E_SUM_PKTERRS)
1160
qib_stats.sps_rcverrs++;
1161
if (errs & E_SUM_ERRS)
1162
qib_stats.sps_txerrs++;
1163
iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
1164
ERR_MASK(SDmaDisabledErr));
1165
1166
if (errs & ERR_MASK(IBStatusChanged)) {
1167
u64 ibcs;
1168
1169
ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1170
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1171
handle_7220_chase(ppd, ibcs);
1172
1173
/* Update our picture of width and speed from chip */
1174
ppd->link_width_active =
1175
((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
1176
IB_WIDTH_4X : IB_WIDTH_1X;
1177
ppd->link_speed_active =
1178
((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
1179
QIB_IB_DDR : QIB_IB_SDR;
1180
1181
/*
1182
* Since going into a recovery state causes the link state
1183
* to go down and since recovery is transitory, it is better
1184
* if we "miss" ever seeing the link training state go into
1185
* recovery (i.e., ignore this transition for link state
1186
* special handling purposes) without updating lastibcstat.
1187
*/
1188
if (qib_7220_phys_portstate(ibcs) !=
1189
IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1190
qib_handle_e_ibstatuschanged(ppd, ibcs);
1191
}
1192
1193
if (errs & ERR_MASK(ResetNegated)) {
1194
qib_dev_err(dd, "Got reset, requires re-init "
1195
"(unload and reload driver)\n");
1196
dd->flags &= ~QIB_INITTED; /* needs re-init */
1197
/* mark as having had error */
1198
*dd->devstatusp |= QIB_STATUS_HWERROR;
1199
*dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1200
}
1201
1202
if (*msg && iserr)
1203
qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1204
1205
if (ppd->state_wanted & ppd->lflags)
1206
wake_up_interruptible(&ppd->state_wait);
1207
1208
/*
1209
* If there were hdrq or egrfull errors, wake up any processes
1210
* waiting in poll. We used to try to check which contexts had
1211
* the overflow, but given the cost of that and the chip reads
1212
* to support it, it's better to just wake everybody up if we
1213
* get an overflow; waiters can poll again if it's not them.
1214
*/
1215
if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1216
qib_handle_urcv(dd, ~0U);
1217
if (errs & ERR_MASK(RcvEgrFullErr))
1218
qib_stats.sps_buffull++;
1219
else
1220
qib_stats.sps_hdrfull++;
1221
}
1222
done:
1223
return;
1224
}
1225
1226
/* enable/disable chip from delivering interrupts */
1227
static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
1228
{
1229
if (enable) {
1230
if (dd->flags & QIB_BADINTR)
1231
return;
1232
qib_write_kreg(dd, kr_intmask, ~0ULL);
1233
/* force re-interrupt of any pending interrupts. */
1234
qib_write_kreg(dd, kr_intclear, 0ULL);
1235
} else
1236
qib_write_kreg(dd, kr_intmask, 0ULL);
1237
}
1238
1239
/*
1240
* Try to cleanup as much as possible for anything that might have gone
1241
* wrong while in freeze mode, such as pio buffers being written by user
1242
* processes (causing armlaunch), send errors due to going into freeze mode,
1243
* etc., and try to avoid causing extra interrupts while doing so.
1244
* Forcibly update the in-memory pioavail register copies after cleanup
1245
* because the chip won't do it while in freeze mode (the register values
1246
* themselves are kept correct).
1247
* Make sure that we don't lose any important interrupts by using the chip
1248
* feature that says that writing 0 to a bit in *clear that is set in
1249
* *status will cause an interrupt to be generated again (if allowed by
1250
* the *mask value).
1251
* This is in chip-specific code because of all of the register accesses,
1252
* even though the details are similar on most chips.
1253
*/
1254
static void qib_7220_clear_freeze(struct qib_devdata *dd)
1255
{
1256
/* disable error interrupts, to avoid confusion */
1257
qib_write_kreg(dd, kr_errmask, 0ULL);
1258
1259
/* also disable interrupts; errormask is sometimes overwriten */
1260
qib_7220_set_intr_state(dd, 0);
1261
1262
qib_cancel_sends(dd->pport);
1263
1264
/* clear the freeze, and be sure chip saw it */
1265
qib_write_kreg(dd, kr_control, dd->control);
1266
qib_read_kreg32(dd, kr_scratch);
1267
1268
/* force in-memory update now we are out of freeze */
1269
qib_force_pio_avail_update(dd);
1270
1271
/*
1272
* force new interrupt if any hwerr, error or interrupt bits are
1273
* still set, and clear "safe" send packet errors related to freeze
1274
* and cancelling sends. Re-enable error interrupts before possible
1275
* force of re-interrupt on pending interrupts.
1276
*/
1277
qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1278
qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1279
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1280
qib_7220_set_intr_state(dd, 1);
1281
}
1282
1283
/**
1284
* qib_7220_handle_hwerrors - display hardware errors.
1285
* @dd: the qlogic_ib device
1286
* @msg: the output buffer
1287
* @msgl: the size of the output buffer
1288
*
1289
* Use same msg buffer as regular errors to avoid excessive stack
1290
* use. Most hardware errors are catastrophic, but for right now,
1291
* we'll print them and continue. We reuse the same message buffer as
1292
* handle_7220_errors() to avoid excessive stack usage.
1293
*/
1294
static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1295
size_t msgl)
1296
{
1297
u64 hwerrs;
1298
u32 bits, ctrl;
1299
int isfatal = 0;
1300
char *bitsmsg;
1301
int log_idx;
1302
1303
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1304
if (!hwerrs)
1305
goto bail;
1306
if (hwerrs == ~0ULL) {
1307
qib_dev_err(dd, "Read of hardware error status failed "
1308
"(all bits set); ignoring\n");
1309
goto bail;
1310
}
1311
qib_stats.sps_hwerrs++;
1312
1313
/*
1314
* Always clear the error status register, except MEMBISTFAIL,
1315
* regardless of whether we continue or stop using the chip.
1316
* We want that set so we know it failed, even across driver reload.
1317
* We'll still ignore it in the hwerrmask. We do this partly for
1318
* diagnostics, but also for support.
1319
*/
1320
qib_write_kreg(dd, kr_hwerrclear,
1321
hwerrs & ~HWE_MASK(PowerOnBISTFailed));
1322
1323
hwerrs &= dd->cspec->hwerrmask;
1324
1325
/* We log some errors to EEPROM, check if we have any of those. */
1326
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1327
if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
1328
qib_inc_eeprom_err(dd, log_idx, 1);
1329
if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
1330
RXE_PARITY))
1331
qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
1332
"(cleared)\n", (unsigned long long) hwerrs);
1333
1334
if (hwerrs & ~IB_HWE_BITSEXTANT)
1335
qib_dev_err(dd, "hwerror interrupt with unknown errors "
1336
"%llx set\n", (unsigned long long)
1337
(hwerrs & ~IB_HWE_BITSEXTANT));
1338
1339
if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
1340
qib_sd7220_clr_ibpar(dd);
1341
1342
ctrl = qib_read_kreg32(dd, kr_control);
1343
if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
1344
/*
1345
* Parity errors in send memory are recoverable by h/w
1346
* just do housekeeping, exit freeze mode and continue.
1347
*/
1348
if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
1349
TXEMEMPARITYERR_PIOPBC)) {
1350
qib_7220_txe_recover(dd);
1351
hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
1352
TXEMEMPARITYERR_PIOPBC);
1353
}
1354
if (hwerrs)
1355
isfatal = 1;
1356
else
1357
qib_7220_clear_freeze(dd);
1358
}
1359
1360
*msg = '\0';
1361
1362
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
1363
isfatal = 1;
1364
strlcat(msg, "[Memory BIST test failed, "
1365
"InfiniPath hardware unusable]", msgl);
1366
/* ignore from now on, so disable until driver reloaded */
1367
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
1368
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1369
}
1370
1371
qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
1372
ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
1373
1374
bitsmsg = dd->cspec->bitsmsgbuf;
1375
if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
1376
QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
1377
bits = (u32) ((hwerrs >>
1378
QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1379
QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1380
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1381
"[PCIe Mem Parity Errs %x] ", bits);
1382
strlcat(msg, bitsmsg, msgl);
1383
}
1384
1385
#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
1386
QLOGIC_IB_HWE_COREPLL_RFSLIP)
1387
1388
if (hwerrs & _QIB_PLL_FAIL) {
1389
isfatal = 1;
1390
snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1391
"[PLL failed (%llx), InfiniPath hardware unusable]",
1392
(unsigned long long) hwerrs & _QIB_PLL_FAIL);
1393
strlcat(msg, bitsmsg, msgl);
1394
/* ignore from now on, so disable until driver reloaded */
1395
dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
1396
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1397
}
1398
1399
if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
1400
/*
1401
* If it occurs, it is left masked since the eternal
1402
* interface is unused.
1403
*/
1404
dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
1405
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1406
}
1407
1408
qib_dev_err(dd, "%s hardware error\n", msg);
1409
1410
if (isfatal && !dd->diag_client) {
1411
qib_dev_err(dd, "Fatal Hardware Error, no longer"
1412
" usable, SN %.16s\n", dd->serial);
1413
/*
1414
* For /sys status file and user programs to print; if no
1415
* trailing brace is copied, we'll know it was truncated.
1416
*/
1417
if (dd->freezemsg)
1418
snprintf(dd->freezemsg, dd->freezelen,
1419
"{%s}", msg);
1420
qib_disable_after_error(dd);
1421
}
1422
bail:;
1423
}
1424
1425
/**
1426
* qib_7220_init_hwerrors - enable hardware errors
1427
* @dd: the qlogic_ib device
1428
*
1429
* now that we have finished initializing everything that might reasonably
1430
* cause a hardware error, and cleared those errors bits as they occur,
1431
* we can enable hardware errors in the mask (potentially enabling
1432
* freeze mode), and enable hardware errors as errors (along with
1433
* everything else) in errormask
1434
*/
1435
static void qib_7220_init_hwerrors(struct qib_devdata *dd)
1436
{
1437
u64 val;
1438
u64 extsval;
1439
1440
extsval = qib_read_kreg64(dd, kr_extstatus);
1441
1442
if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
1443
QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
1444
qib_dev_err(dd, "MemBIST did not complete!\n");
1445
if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
1446
qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
1447
1448
val = ~0ULL; /* default to all hwerrors become interrupts, */
1449
1450
val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
1451
dd->cspec->hwerrmask = val;
1452
1453
qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1454
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1455
1456
/* clear all */
1457
qib_write_kreg(dd, kr_errclear, ~0ULL);
1458
/* enable errors that are masked, at least this first time. */
1459
qib_write_kreg(dd, kr_errmask, ~0ULL);
1460
dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1461
/* clear any interrupts up to this point (ints still not enabled) */
1462
qib_write_kreg(dd, kr_intclear, ~0ULL);
1463
}
1464
1465
/*
1466
* Disable and enable the armlaunch error. Used for PIO bandwidth testing
1467
* on chips that are count-based, rather than trigger-based. There is no
1468
* reference counting, but that's also fine, given the intended use.
1469
* Only chip-specific because it's all register accesses
1470
*/
1471
static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
1472
{
1473
if (enable) {
1474
qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
1475
dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1476
} else
1477
dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1478
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1479
}
1480
1481
/*
1482
* Formerly took parameter <which> in pre-shifted,
1483
* pre-merged form with LinkCmd and LinkInitCmd
1484
* together, and assuming the zero was NOP.
1485
*/
1486
static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1487
u16 linitcmd)
1488
{
1489
u64 mod_wd;
1490
struct qib_devdata *dd = ppd->dd;
1491
unsigned long flags;
1492
1493
if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1494
/*
1495
* If we are told to disable, note that so link-recovery
1496
* code does not attempt to bring us back up.
1497
*/
1498
spin_lock_irqsave(&ppd->lflags_lock, flags);
1499
ppd->lflags |= QIBL_IB_LINK_DISABLED;
1500
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1501
} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1502
/*
1503
* Any other linkinitcmd will lead to LINKDOWN and then
1504
* to INIT (if all is well), so clear flag to let
1505
* link-recovery code attempt to bring us back up.
1506
*/
1507
spin_lock_irqsave(&ppd->lflags_lock, flags);
1508
ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1509
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1510
}
1511
1512
mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
1513
(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1514
1515
qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
1516
/* write to chip to prevent back-to-back writes of ibc reg */
1517
qib_write_kreg(dd, kr_scratch, 0);
1518
}
1519
1520
/*
1521
* All detailed interaction with the SerDes has been moved to qib_sd7220.c
1522
*
1523
* The portion of IBA7220-specific bringup_serdes() that actually deals with
1524
* registers and memory within the SerDes itself is qib_sd7220_init().
1525
*/
1526
1527
/**
1528
* qib_7220_bringup_serdes - bring up the serdes
1529
* @ppd: physical port on the qlogic_ib device
1530
*/
1531
static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
1532
{
1533
struct qib_devdata *dd = ppd->dd;
1534
u64 val, prev_val, guid, ibc;
1535
int ret = 0;
1536
1537
/* Put IBC in reset, sends disabled */
1538
dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1539
qib_write_kreg(dd, kr_control, 0ULL);
1540
1541
if (qib_compat_ddr_negotiate) {
1542
ppd->cpspec->ibdeltainprog = 1;
1543
ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
1544
ppd->cpspec->iblnkerrsnap =
1545
read_7220_creg32(dd, cr_iblinkerrrecov);
1546
}
1547
1548
/* flowcontrolwatermark is in units of KBytes */
1549
ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1550
/*
1551
* How often flowctrl sent. More or less in usecs; balance against
1552
* watermark value, so that in theory senders always get a flow
1553
* control update in time to not let the IB link go idle.
1554
*/
1555
ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1556
/* max error tolerance */
1557
ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
1558
/* use "real" buffer space for */
1559
ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1560
/* IB credit flow control. */
1561
ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1562
/*
1563
* set initial max size pkt IBC will send, including ICRC; it's the
1564
* PIO buffer size in dwords, less 1; also see qib_set_mtu()
1565
*/
1566
ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1567
ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1568
1569
/* initially come up waiting for TS1, without sending anything. */
1570
val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1571
QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1572
qib_write_kreg(dd, kr_ibcctrl, val);
1573
1574
if (!ppd->cpspec->ibcddrctrl) {
1575
/* not on re-init after reset */
1576
ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
1577
1578
if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
1579
ppd->cpspec->ibcddrctrl |=
1580
IBA7220_IBC_SPEED_AUTONEG_MASK |
1581
IBA7220_IBC_IBTA_1_2_MASK;
1582
else
1583
ppd->cpspec->ibcddrctrl |=
1584
ppd->link_speed_enabled == QIB_IB_DDR ?
1585
IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
1586
if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
1587
(IB_WIDTH_1X | IB_WIDTH_4X))
1588
ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
1589
else
1590
ppd->cpspec->ibcddrctrl |=
1591
ppd->link_width_enabled == IB_WIDTH_4X ?
1592
IBA7220_IBC_WIDTH_4X_ONLY :
1593
IBA7220_IBC_WIDTH_1X_ONLY;
1594
1595
/* always enable these on driver reload, not sticky */
1596
ppd->cpspec->ibcddrctrl |=
1597
IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
1598
ppd->cpspec->ibcddrctrl |=
1599
IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
1600
1601
/* enable automatic lane reversal detection for receive */
1602
ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
1603
} else
1604
/* write to chip to prevent back-to-back writes of ibc reg */
1605
qib_write_kreg(dd, kr_scratch, 0);
1606
1607
qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
1608
qib_write_kreg(dd, kr_scratch, 0);
1609
1610
qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
1611
qib_write_kreg(dd, kr_scratch, 0);
1612
1613
ret = qib_sd7220_init(dd);
1614
1615
val = qib_read_kreg64(dd, kr_xgxs_cfg);
1616
prev_val = val;
1617
val |= QLOGIC_IB_XGXS_FC_SAFE;
1618
if (val != prev_val) {
1619
qib_write_kreg(dd, kr_xgxs_cfg, val);
1620
qib_read_kreg32(dd, kr_scratch);
1621
}
1622
if (val & QLOGIC_IB_XGXS_RESET)
1623
val &= ~QLOGIC_IB_XGXS_RESET;
1624
if (val != prev_val)
1625
qib_write_kreg(dd, kr_xgxs_cfg, val);
1626
1627
/* first time through, set port guid */
1628
if (!ppd->guid)
1629
ppd->guid = dd->base_guid;
1630
guid = be64_to_cpu(ppd->guid);
1631
1632
qib_write_kreg(dd, kr_hrtbt_guid, guid);
1633
if (!ret) {
1634
dd->control |= QLOGIC_IB_C_LINKENABLE;
1635
qib_write_kreg(dd, kr_control, dd->control);
1636
} else
1637
/* write to chip to prevent back-to-back writes of ibc reg */
1638
qib_write_kreg(dd, kr_scratch, 0);
1639
return ret;
1640
}
1641
1642
/**
1643
* qib_7220_quiet_serdes - set serdes to txidle
1644
* @ppd: physical port of the qlogic_ib device
1645
* Called when driver is being unloaded
1646
*/
1647
static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1648
{
1649
u64 val;
1650
struct qib_devdata *dd = ppd->dd;
1651
unsigned long flags;
1652
1653
/* disable IBC */
1654
dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1655
qib_write_kreg(dd, kr_control,
1656
dd->control | QLOGIC_IB_C_FREEZEMODE);
1657
1658
ppd->cpspec->chase_end = 0;
1659
if (ppd->cpspec->chase_timer.data) /* if initted */
1660
del_timer_sync(&ppd->cpspec->chase_timer);
1661
1662
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
1663
ppd->cpspec->ibdeltainprog) {
1664
u64 diagc;
1665
1666
/* enable counter writes */
1667
diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1668
qib_write_kreg(dd, kr_hwdiagctrl,
1669
diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1670
1671
if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
1672
val = read_7220_creg32(dd, cr_ibsymbolerr);
1673
if (ppd->cpspec->ibdeltainprog)
1674
val -= val - ppd->cpspec->ibsymsnap;
1675
val -= ppd->cpspec->ibsymdelta;
1676
write_7220_creg(dd, cr_ibsymbolerr, val);
1677
}
1678
if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
1679
val = read_7220_creg32(dd, cr_iblinkerrrecov);
1680
if (ppd->cpspec->ibdeltainprog)
1681
val -= val - ppd->cpspec->iblnkerrsnap;
1682
val -= ppd->cpspec->iblnkerrdelta;
1683
write_7220_creg(dd, cr_iblinkerrrecov, val);
1684
}
1685
1686
/* and disable counter writes */
1687
qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1688
}
1689
qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1690
1691
spin_lock_irqsave(&ppd->lflags_lock, flags);
1692
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1693
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1694
wake_up(&ppd->cpspec->autoneg_wait);
1695
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
1696
1697
shutdown_7220_relock_poll(ppd->dd);
1698
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
1699
val |= QLOGIC_IB_XGXS_RESET;
1700
qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
1701
}
1702
1703
/**
1704
* qib_setup_7220_setextled - set the state of the two external LEDs
1705
* @dd: the qlogic_ib device
1706
* @on: whether the link is up or not
1707
*
1708
* The exact combo of LEDs if on is true is determined by looking
1709
* at the ibcstatus.
1710
*
1711
* These LEDs indicate the physical and logical state of IB link.
1712
* For this chip (at least with recommended board pinouts), LED1
1713
* is Yellow (logical state) and LED2 is Green (physical state),
1714
*
1715
* Note: We try to match the Mellanox HCA LED behavior as best
1716
* we can. Green indicates physical link state is OK (something is
1717
* plugged in, and we can train).
1718
* Amber indicates the link is logically up (ACTIVE).
1719
* Mellanox further blinks the amber LED to indicate data packet
1720
* activity, but we have no hardware support for that, so it would
1721
* require waking up every 10-20 msecs and checking the counters
1722
* on the chip, and then turning the LED off if appropriate. That's
1723
* visible overhead, so not something we will do.
1724
*
1725
*/
1726
static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
1727
{
1728
struct qib_devdata *dd = ppd->dd;
1729
u64 extctl, ledblink = 0, val, lst, ltst;
1730
unsigned long flags;
1731
1732
/*
1733
* The diags use the LED to indicate diag info, so we leave
1734
* the external LED alone when the diags are running.
1735
*/
1736
if (dd->diag_client)
1737
return;
1738
1739
if (ppd->led_override) {
1740
ltst = (ppd->led_override & QIB_LED_PHYS) ?
1741
IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1742
lst = (ppd->led_override & QIB_LED_LOG) ?
1743
IB_PORT_ACTIVE : IB_PORT_DOWN;
1744
} else if (on) {
1745
val = qib_read_kreg64(dd, kr_ibcstatus);
1746
ltst = qib_7220_phys_portstate(val);
1747
lst = qib_7220_iblink_state(val);
1748
} else {
1749
ltst = 0;
1750
lst = 0;
1751
}
1752
1753
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1754
extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1755
SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1756
if (ltst == IB_PHYSPORTSTATE_LINKUP) {
1757
extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1758
/*
1759
* counts are in chip clock (4ns) periods.
1760
* This is 1/16 sec (66.6ms) on,
1761
* 3/16 sec (187.5 ms) off, with packets rcvd
1762
*/
1763
ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
1764
| ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
1765
}
1766
if (lst == IB_PORT_ACTIVE)
1767
extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1768
dd->cspec->extctrl = extctl;
1769
qib_write_kreg(dd, kr_extctrl, extctl);
1770
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1771
1772
if (ledblink) /* blink the LED on packet receive */
1773
qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
1774
}
1775
1776
static void qib_7220_free_irq(struct qib_devdata *dd)
1777
{
1778
if (dd->cspec->irq) {
1779
free_irq(dd->cspec->irq, dd);
1780
dd->cspec->irq = 0;
1781
}
1782
qib_nomsi(dd);
1783
}
1784
1785
/*
1786
* qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1787
* @dd: the qlogic_ib device
1788
*
1789
* This is called during driver unload.
1790
*
1791
*/
1792
static void qib_setup_7220_cleanup(struct qib_devdata *dd)
1793
{
1794
qib_7220_free_irq(dd);
1795
kfree(dd->cspec->cntrs);
1796
kfree(dd->cspec->portcntrs);
1797
}
1798
1799
/*
1800
* This is only called for SDmaInt.
1801
* SDmaDisabled is handled on the error path.
1802
*/
1803
static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
1804
{
1805
unsigned long flags;
1806
1807
spin_lock_irqsave(&ppd->sdma_lock, flags);
1808
1809
switch (ppd->sdma_state.current_state) {
1810
case qib_sdma_state_s00_hw_down:
1811
break;
1812
1813
case qib_sdma_state_s10_hw_start_up_wait:
1814
__qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
1815
break;
1816
1817
case qib_sdma_state_s20_idle:
1818
break;
1819
1820
case qib_sdma_state_s30_sw_clean_up_wait:
1821
break;
1822
1823
case qib_sdma_state_s40_hw_clean_up_wait:
1824
break;
1825
1826
case qib_sdma_state_s50_hw_halt_wait:
1827
__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1828
break;
1829
1830
case qib_sdma_state_s99_running:
1831
/* too chatty to print here */
1832
__qib_sdma_intr(ppd);
1833
break;
1834
}
1835
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1836
}
1837
1838
static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
1839
{
1840
unsigned long flags;
1841
1842
spin_lock_irqsave(&dd->sendctrl_lock, flags);
1843
if (needint) {
1844
if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
1845
goto done;
1846
/*
1847
* blip the availupd off, next write will be on, so
1848
* we ensure an avail update, regardless of threshold or
1849
* buffers becoming free, whenever we want an interrupt
1850
*/
1851
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
1852
~SYM_MASK(SendCtrl, SendBufAvailUpd));
1853
qib_write_kreg(dd, kr_scratch, 0ULL);
1854
dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
1855
} else
1856
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
1857
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1858
qib_write_kreg(dd, kr_scratch, 0ULL);
1859
done:
1860
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1861
}
1862
1863
/*
1864
* Handle errors and unusual events first, separate function
1865
* to improve cache hits for fast path interrupt handling.
1866
*/
1867
static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
1868
{
1869
if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1870
qib_dev_err(dd,
1871
"interrupt with unknown interrupts %Lx set\n",
1872
istat & ~QLOGIC_IB_I_BITSEXTANT);
1873
1874
if (istat & QLOGIC_IB_I_GPIO) {
1875
u32 gpiostatus;
1876
1877
/*
1878
* Boards for this chip currently don't use GPIO interrupts,
1879
* so clear by writing GPIOstatus to GPIOclear, and complain
1880
* to alert developer. To avoid endless repeats, clear
1881
* the bits in the mask, since there is some kind of
1882
* programming error or chip problem.
1883
*/
1884
gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1885
/*
1886
* In theory, writing GPIOstatus to GPIOclear could
1887
* have a bad side-effect on some diagnostic that wanted
1888
* to poll for a status-change, but the various shadows
1889
* make that problematic at best. Diags will just suppress
1890
* all GPIO interrupts during such tests.
1891
*/
1892
qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
1893
1894
if (gpiostatus) {
1895
const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1896
u32 gpio_irq = mask & gpiostatus;
1897
1898
/*
1899
* A bit set in status and (chip) Mask register
1900
* would cause an interrupt. Since we are not
1901
* expecting any, report it. Also check that the
1902
* chip reflects our shadow, report issues,
1903
* and refresh from the shadow.
1904
*/
1905
/*
1906
* Clear any troublemakers, and update chip
1907
* from shadow
1908
*/
1909
dd->cspec->gpio_mask &= ~gpio_irq;
1910
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1911
}
1912
}
1913
1914
if (istat & QLOGIC_IB_I_ERROR) {
1915
u64 estat;
1916
1917
qib_stats.sps_errints++;
1918
estat = qib_read_kreg64(dd, kr_errstatus);
1919
if (!estat)
1920
qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1921
"but no error bits set!\n", istat);
1922
else
1923
handle_7220_errors(dd, estat);
1924
}
1925
}
1926
1927
static irqreturn_t qib_7220intr(int irq, void *data)
1928
{
1929
struct qib_devdata *dd = data;
1930
irqreturn_t ret;
1931
u64 istat;
1932
u64 ctxtrbits;
1933
u64 rmask;
1934
unsigned i;
1935
1936
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1937
/*
1938
* This return value is not great, but we do not want the
1939
* interrupt core code to remove our interrupt handler
1940
* because we don't appear to be handling an interrupt
1941
* during a chip reset.
1942
*/
1943
ret = IRQ_HANDLED;
1944
goto bail;
1945
}
1946
1947
istat = qib_read_kreg64(dd, kr_intstatus);
1948
1949
if (unlikely(!istat)) {
1950
ret = IRQ_NONE; /* not our interrupt, or already handled */
1951
goto bail;
1952
}
1953
if (unlikely(istat == -1)) {
1954
qib_bad_intrstatus(dd);
1955
/* don't know if it was our interrupt or not */
1956
ret = IRQ_NONE;
1957
goto bail;
1958
}
1959
1960
qib_stats.sps_ints++;
1961
if (dd->int_counter != (u32) -1)
1962
dd->int_counter++;
1963
1964
if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1965
QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1966
unlikely_7220_intr(dd, istat);
1967
1968
/*
1969
* Clear the interrupt bits we found set, relatively early, so we
1970
* "know" know the chip will have seen this by the time we process
1971
* the queue, and will re-interrupt if necessary. The processor
1972
* itself won't take the interrupt again until we return.
1973
*/
1974
qib_write_kreg(dd, kr_intclear, istat);
1975
1976
/*
1977
* Handle kernel receive queues before checking for pio buffers
1978
* available since receives can overflow; piobuf waiters can afford
1979
* a few extra cycles, since they were waiting anyway.
1980
*/
1981
ctxtrbits = istat &
1982
((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1983
(QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1984
if (ctxtrbits) {
1985
rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1986
(1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
1987
for (i = 0; i < dd->first_user_ctxt; i++) {
1988
if (ctxtrbits & rmask) {
1989
ctxtrbits &= ~rmask;
1990
qib_kreceive(dd->rcd[i], NULL, NULL);
1991
}
1992
rmask <<= 1;
1993
}
1994
if (ctxtrbits) {
1995
ctxtrbits =
1996
(ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1997
(ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1998
qib_handle_urcv(dd, ctxtrbits);
1999
}
2000
}
2001
2002
/* only call for SDmaInt */
2003
if (istat & QLOGIC_IB_I_SDMAINT)
2004
sdma_7220_intr(dd->pport, istat);
2005
2006
if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2007
qib_ib_piobufavail(dd);
2008
2009
ret = IRQ_HANDLED;
2010
bail:
2011
return ret;
2012
}
2013
2014
/*
2015
* Set up our chip-specific interrupt handler.
2016
* The interrupt type has already been setup, so
2017
* we just need to do the registration and error checking.
2018
* If we are using MSI interrupts, we may fall back to
2019
* INTx later, if the interrupt handler doesn't get called
2020
* within 1/2 second (see verify_interrupt()).
2021
*/
2022
static void qib_setup_7220_interrupt(struct qib_devdata *dd)
2023
{
2024
if (!dd->cspec->irq)
2025
qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
2026
"work\n");
2027
else {
2028
int ret = request_irq(dd->cspec->irq, qib_7220intr,
2029
dd->msi_lo ? 0 : IRQF_SHARED,
2030
QIB_DRV_NAME, dd);
2031
2032
if (ret)
2033
qib_dev_err(dd, "Couldn't setup %s interrupt "
2034
"(irq=%d): %d\n", dd->msi_lo ?
2035
"MSI" : "INTx", dd->cspec->irq, ret);
2036
}
2037
}
2038
2039
/**
2040
* qib_7220_boardname - fill in the board name
2041
* @dd: the qlogic_ib device
2042
*
2043
* info is based on the board revision register
2044
*/
2045
static void qib_7220_boardname(struct qib_devdata *dd)
2046
{
2047
char *n;
2048
u32 boardid, namelen;
2049
2050
boardid = SYM_FIELD(dd->revision, Revision,
2051
BoardID);
2052
2053
switch (boardid) {
2054
case 1:
2055
n = "InfiniPath_QLE7240";
2056
break;
2057
case 2:
2058
n = "InfiniPath_QLE7280";
2059
break;
2060
default:
2061
qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
2062
n = "Unknown_InfiniPath_7220";
2063
break;
2064
}
2065
2066
namelen = strlen(n) + 1;
2067
dd->boardname = kmalloc(namelen, GFP_KERNEL);
2068
if (!dd->boardname)
2069
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
2070
else
2071
snprintf(dd->boardname, namelen, "%s", n);
2072
2073
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
2074
qib_dev_err(dd, "Unsupported InfiniPath hardware "
2075
"revision %u.%u!\n",
2076
dd->majrev, dd->minrev);
2077
2078
snprintf(dd->boardversion, sizeof(dd->boardversion),
2079
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
2080
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
2081
(unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
2082
dd->majrev, dd->minrev,
2083
(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
2084
}
2085
2086
/*
2087
* This routine sleeps, so it can only be called from user context, not
2088
* from interrupt context.
2089
*/
2090
static int qib_setup_7220_reset(struct qib_devdata *dd)
2091
{
2092
u64 val;
2093
int i;
2094
int ret;
2095
u16 cmdval;
2096
u8 int_line, clinesz;
2097
unsigned long flags;
2098
2099
qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
2100
2101
/* Use dev_err so it shows up in logs, etc. */
2102
qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
2103
2104
/* no interrupts till re-initted */
2105
qib_7220_set_intr_state(dd, 0);
2106
2107
dd->pport->cpspec->ibdeltainprog = 0;
2108
dd->pport->cpspec->ibsymdelta = 0;
2109
dd->pport->cpspec->iblnkerrdelta = 0;
2110
2111
/*
2112
* Keep chip from being accessed until we are ready. Use
2113
* writeq() directly, to allow the write even though QIB_PRESENT
2114
* isn't set.
2115
*/
2116
dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2117
dd->int_counter = 0; /* so we check interrupts work again */
2118
val = dd->control | QLOGIC_IB_C_RESET;
2119
writeq(val, &dd->kregbase[kr_control]);
2120
mb(); /* prevent compiler reordering around actual reset */
2121
2122
for (i = 1; i <= 5; i++) {
2123
/*
2124
* Allow MBIST, etc. to complete; longer on each retry.
2125
* We sometimes get machine checks from bus timeout if no
2126
* response, so for now, make it *really* long.
2127
*/
2128
msleep(1000 + (1 + i) * 2000);
2129
2130
qib_pcie_reenable(dd, cmdval, int_line, clinesz);
2131
2132
/*
2133
* Use readq directly, so we don't need to mark it as PRESENT
2134
* until we get a successful indication that all is well.
2135
*/
2136
val = readq(&dd->kregbase[kr_revision]);
2137
if (val == dd->revision) {
2138
dd->flags |= QIB_PRESENT; /* it's back */
2139
ret = qib_reinit_intr(dd);
2140
goto bail;
2141
}
2142
}
2143
ret = 0; /* failed */
2144
2145
bail:
2146
if (ret) {
2147
if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
2148
qib_dev_err(dd, "Reset failed to setup PCIe or "
2149
"interrupts; continuing anyway\n");
2150
2151
/* hold IBC in reset, no sends, etc till later */
2152
qib_write_kreg(dd, kr_control, 0ULL);
2153
2154
/* clear the reset error, init error/hwerror mask */
2155
qib_7220_init_hwerrors(dd);
2156
2157
/* do setup similar to speed or link-width changes */
2158
if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
2159
dd->cspec->presets_needed = 1;
2160
spin_lock_irqsave(&dd->pport->lflags_lock, flags);
2161
dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
2162
dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2163
spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
2164
}
2165
2166
return ret;
2167
}
2168
2169
/**
2170
* qib_7220_put_tid - write a TID to the chip
2171
* @dd: the qlogic_ib device
2172
* @tidptr: pointer to the expected TID (in chip) to update
2173
* @tidtype: 0 for eager, 1 for expected
2174
* @pa: physical address of in memory buffer; tidinvalid if freeing
2175
*/
2176
static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
2177
u32 type, unsigned long pa)
2178
{
2179
if (pa != dd->tidinvalid) {
2180
u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
2181
2182
/* paranoia checks */
2183
if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
2184
qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
2185
pa);
2186
return;
2187
}
2188
if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
2189
qib_dev_err(dd, "Physical page address 0x%lx "
2190
"larger than supported\n", pa);
2191
return;
2192
}
2193
2194
if (type == RCVHQ_RCV_TYPE_EAGER)
2195
chippa |= dd->tidtemplate;
2196
else /* for now, always full 4KB page */
2197
chippa |= IBA7220_TID_SZ_4K;
2198
pa = chippa;
2199
}
2200
writeq(pa, tidptr);
2201
mmiowb();
2202
}
2203
2204
/**
2205
* qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2206
* @dd: the qlogic_ib device
2207
* @ctxt: the ctxt
2208
*
2209
* clear all TID entries for a ctxt, expected and eager.
2210
* Used from qib_close(). On this chip, TIDs are only 32 bits,
2211
* not 64, but they are still on 64 bit boundaries, so tidbase
2212
* is declared as u64 * for the pointer math, even though we write 32 bits
2213
*/
2214
static void qib_7220_clear_tids(struct qib_devdata *dd,
2215
struct qib_ctxtdata *rcd)
2216
{
2217
u64 __iomem *tidbase;
2218
unsigned long tidinv;
2219
u32 ctxt;
2220
int i;
2221
2222
if (!dd->kregbase || !rcd)
2223
return;
2224
2225
ctxt = rcd->ctxt;
2226
2227
tidinv = dd->tidinvalid;
2228
tidbase = (u64 __iomem *)
2229
((char __iomem *)(dd->kregbase) +
2230
dd->rcvtidbase +
2231
ctxt * dd->rcvtidcnt * sizeof(*tidbase));
2232
2233
for (i = 0; i < dd->rcvtidcnt; i++)
2234
qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
2235
tidinv);
2236
2237
tidbase = (u64 __iomem *)
2238
((char __iomem *)(dd->kregbase) +
2239
dd->rcvegrbase +
2240
rcd->rcvegr_tid_base * sizeof(*tidbase));
2241
2242
for (i = 0; i < rcd->rcvegrcnt; i++)
2243
qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2244
tidinv);
2245
}
2246
2247
/**
2248
* qib_7220_tidtemplate - setup constants for TID updates
2249
* @dd: the qlogic_ib device
2250
*
2251
* We setup stuff that we use a lot, to avoid calculating each time
2252
*/
2253
static void qib_7220_tidtemplate(struct qib_devdata *dd)
2254
{
2255
if (dd->rcvegrbufsize == 2048)
2256
dd->tidtemplate = IBA7220_TID_SZ_2K;
2257
else if (dd->rcvegrbufsize == 4096)
2258
dd->tidtemplate = IBA7220_TID_SZ_4K;
2259
dd->tidinvalid = 0;
2260
}
2261
2262
/**
2263
* qib_init_7220_get_base_info - set chip-specific flags for user code
2264
* @rcd: the qlogic_ib ctxt
2265
* @kbase: qib_base_info pointer
2266
*
2267
* We set the PCIE flag because the lower bandwidth on PCIe vs
2268
* HyperTransport can affect some user packet algorithims.
2269
*/
2270
static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
2271
struct qib_base_info *kinfo)
2272
{
2273
kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2274
QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
2275
2276
if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
2277
kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
2278
2279
return 0;
2280
}
2281
2282
static struct qib_message_header *
2283
qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2284
{
2285
u32 offset = qib_hdrget_offset(rhf_addr);
2286
2287
return (struct qib_message_header *)
2288
(rhf_addr - dd->rhf_offset + offset);
2289
}
2290
2291
static void qib_7220_config_ctxts(struct qib_devdata *dd)
2292
{
2293
unsigned long flags;
2294
u32 nchipctxts;
2295
2296
nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2297
dd->cspec->numctxts = nchipctxts;
2298
if (qib_n_krcv_queues > 1) {
2299
dd->qpn_mask = 0x3e;
2300
dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2301
if (dd->first_user_ctxt > nchipctxts)
2302
dd->first_user_ctxt = nchipctxts;
2303
} else
2304
dd->first_user_ctxt = dd->num_pports;
2305
dd->n_krcv_queues = dd->first_user_ctxt;
2306
2307
if (!qib_cfgctxts) {
2308
int nctxts = dd->first_user_ctxt + num_online_cpus();
2309
2310
if (nctxts <= 5)
2311
dd->ctxtcnt = 5;
2312
else if (nctxts <= 9)
2313
dd->ctxtcnt = 9;
2314
else if (nctxts <= nchipctxts)
2315
dd->ctxtcnt = nchipctxts;
2316
} else if (qib_cfgctxts <= nchipctxts)
2317
dd->ctxtcnt = qib_cfgctxts;
2318
if (!dd->ctxtcnt) /* none of the above, set to max */
2319
dd->ctxtcnt = nchipctxts;
2320
2321
/*
2322
* Chip can be configured for 5, 9, or 17 ctxts, and choice
2323
* affects number of eager TIDs per ctxt (1K, 2K, 4K).
2324
* Lock to be paranoid about later motion, etc.
2325
*/
2326
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2327
if (dd->ctxtcnt > 9)
2328
dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
2329
else if (dd->ctxtcnt > 5)
2330
dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
2331
/* else configure for default 5 receive ctxts */
2332
if (dd->qpn_mask)
2333
dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
2334
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2335
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2336
2337
/* kr_rcvegrcnt changes based on the number of contexts enabled */
2338
dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
2339
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
2340
}
2341
2342
static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
2343
{
2344
int lsb, ret = 0;
2345
u64 maskr; /* right-justified mask */
2346
2347
switch (which) {
2348
case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2349
ret = ppd->link_width_enabled;
2350
goto done;
2351
2352
case QIB_IB_CFG_LWID: /* Get currently active Link-width */
2353
ret = ppd->link_width_active;
2354
goto done;
2355
2356
case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2357
ret = ppd->link_speed_enabled;
2358
goto done;
2359
2360
case QIB_IB_CFG_SPD: /* Get current Link spd */
2361
ret = ppd->link_speed_active;
2362
goto done;
2363
2364
case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2365
lsb = IBA7220_IBC_RXPOL_SHIFT;
2366
maskr = IBA7220_IBC_RXPOL_MASK;
2367
break;
2368
2369
case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2370
lsb = IBA7220_IBC_LREV_SHIFT;
2371
maskr = IBA7220_IBC_LREV_MASK;
2372
break;
2373
2374
case QIB_IB_CFG_LINKLATENCY:
2375
ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
2376
& IBA7220_DDRSTAT_LINKLAT_MASK;
2377
goto done;
2378
2379
case QIB_IB_CFG_OP_VLS:
2380
ret = ppd->vls_operational;
2381
goto done;
2382
2383
case QIB_IB_CFG_VL_HIGH_CAP:
2384
ret = 0;
2385
goto done;
2386
2387
case QIB_IB_CFG_VL_LOW_CAP:
2388
ret = 0;
2389
goto done;
2390
2391
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2392
ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2393
OverrunThreshold);
2394
goto done;
2395
2396
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2397
ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2398
PhyerrThreshold);
2399
goto done;
2400
2401
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2402
/* will only take effect when the link state changes */
2403
ret = (ppd->cpspec->ibcctrl &
2404
SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2405
IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2406
goto done;
2407
2408
case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2409
lsb = IBA7220_IBC_HRTBT_SHIFT;
2410
maskr = IBA7220_IBC_HRTBT_MASK;
2411
break;
2412
2413
case QIB_IB_CFG_PMA_TICKS:
2414
/*
2415
* 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
2416
* Since the clock is always 250MHz, the value is 1 or 0.
2417
*/
2418
ret = (ppd->link_speed_active == QIB_IB_DDR);
2419
goto done;
2420
2421
default:
2422
ret = -EINVAL;
2423
goto done;
2424
}
2425
ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
2426
done:
2427
return ret;
2428
}
2429
2430
static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2431
{
2432
struct qib_devdata *dd = ppd->dd;
2433
u64 maskr; /* right-justified mask */
2434
int lsb, ret = 0, setforce = 0;
2435
u16 lcmd, licmd;
2436
unsigned long flags;
2437
2438
switch (which) {
2439
case QIB_IB_CFG_LIDLMC:
2440
/*
2441
* Set LID and LMC. Combined to avoid possible hazard
2442
* caller puts LMC in 16MSbits, DLID in 16LSbits of val
2443
*/
2444
lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2445
maskr = IBA7220_IBC_DLIDLMC_MASK;
2446
break;
2447
2448
case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
2449
/*
2450
* As with speed, only write the actual register if
2451
* the link is currently down, otherwise takes effect
2452
* on next link change.
2453
*/
2454
ppd->link_width_enabled = val;
2455
if (!(ppd->lflags & QIBL_LINKDOWN))
2456
goto bail;
2457
/*
2458
* We set the QIBL_IB_FORCE_NOTIFY bit so updown
2459
* will get called because we want update
2460
* link_width_active, and the change may not take
2461
* effect for some time (if we are in POLL), so this
2462
* flag will force the updown routine to be called
2463
* on the next ibstatuschange down interrupt, even
2464
* if it's not an down->up transition.
2465
*/
2466
val--; /* convert from IB to chip */
2467
maskr = IBA7220_IBC_WIDTH_MASK;
2468
lsb = IBA7220_IBC_WIDTH_SHIFT;
2469
setforce = 1;
2470
spin_lock_irqsave(&ppd->lflags_lock, flags);
2471
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2472
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2473
break;
2474
2475
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2476
/*
2477
* If we turn off IB1.2, need to preset SerDes defaults,
2478
* but not right now. Set a flag for the next time
2479
* we command the link down. As with width, only write the
2480
* actual register if the link is currently down, otherwise
2481
* takes effect on next link change. Since setting is being
2482
* explicitly requested (via MAD or sysfs), clear autoneg
2483
* failure status if speed autoneg is enabled.
2484
*/
2485
ppd->link_speed_enabled = val;
2486
if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
2487
!(val & (val - 1)))
2488
dd->cspec->presets_needed = 1;
2489
if (!(ppd->lflags & QIBL_LINKDOWN))
2490
goto bail;
2491
/*
2492
* We set the QIBL_IB_FORCE_NOTIFY bit so updown
2493
* will get called because we want update
2494
* link_speed_active, and the change may not take
2495
* effect for some time (if we are in POLL), so this
2496
* flag will force the updown routine to be called
2497
* on the next ibstatuschange down interrupt, even
2498
* if it's not an down->up transition.
2499
*/
2500
if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
2501
val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2502
IBA7220_IBC_IBTA_1_2_MASK;
2503
spin_lock_irqsave(&ppd->lflags_lock, flags);
2504
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2505
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2506
} else
2507
val = val == QIB_IB_DDR ?
2508
IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2509
maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2510
IBA7220_IBC_IBTA_1_2_MASK;
2511
/* IBTA 1.2 mode + speed bits are contiguous */
2512
lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
2513
setforce = 1;
2514
break;
2515
2516
case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2517
lsb = IBA7220_IBC_RXPOL_SHIFT;
2518
maskr = IBA7220_IBC_RXPOL_MASK;
2519
break;
2520
2521
case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2522
lsb = IBA7220_IBC_LREV_SHIFT;
2523
maskr = IBA7220_IBC_LREV_MASK;
2524
break;
2525
2526
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2527
maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2528
OverrunThreshold);
2529
if (maskr != val) {
2530
ppd->cpspec->ibcctrl &=
2531
~SYM_MASK(IBCCtrl, OverrunThreshold);
2532
ppd->cpspec->ibcctrl |= (u64) val <<
2533
SYM_LSB(IBCCtrl, OverrunThreshold);
2534
qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2535
qib_write_kreg(dd, kr_scratch, 0);
2536
}
2537
goto bail;
2538
2539
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2540
maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2541
PhyerrThreshold);
2542
if (maskr != val) {
2543
ppd->cpspec->ibcctrl &=
2544
~SYM_MASK(IBCCtrl, PhyerrThreshold);
2545
ppd->cpspec->ibcctrl |= (u64) val <<
2546
SYM_LSB(IBCCtrl, PhyerrThreshold);
2547
qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2548
qib_write_kreg(dd, kr_scratch, 0);
2549
}
2550
goto bail;
2551
2552
case QIB_IB_CFG_PKEYS: /* update pkeys */
2553
maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2554
((u64) ppd->pkeys[2] << 32) |
2555
((u64) ppd->pkeys[3] << 48);
2556
qib_write_kreg(dd, kr_partitionkey, maskr);
2557
goto bail;
2558
2559
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2560
/* will only take effect when the link state changes */
2561
if (val == IB_LINKINITCMD_POLL)
2562
ppd->cpspec->ibcctrl &=
2563
~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2564
else /* SLEEP */
2565
ppd->cpspec->ibcctrl |=
2566
SYM_MASK(IBCCtrl, LinkDownDefaultState);
2567
qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2568
qib_write_kreg(dd, kr_scratch, 0);
2569
goto bail;
2570
2571
case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2572
/*
2573
* Update our housekeeping variables, and set IBC max
2574
* size, same as init code; max IBC is max we allow in
2575
* buffer, less the qword pbc, plus 1 for ICRC, in dwords
2576
* Set even if it's unchanged, print debug message only
2577
* on changes.
2578
*/
2579
val = (ppd->ibmaxlen >> 2) + 1;
2580
ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2581
ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
2582
qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2583
qib_write_kreg(dd, kr_scratch, 0);
2584
goto bail;
2585
2586
case QIB_IB_CFG_LSTATE: /* set the IB link state */
2587
switch (val & 0xffff0000) {
2588
case IB_LINKCMD_DOWN:
2589
lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2590
if (!ppd->cpspec->ibdeltainprog &&
2591
qib_compat_ddr_negotiate) {
2592
ppd->cpspec->ibdeltainprog = 1;
2593
ppd->cpspec->ibsymsnap =
2594
read_7220_creg32(dd, cr_ibsymbolerr);
2595
ppd->cpspec->iblnkerrsnap =
2596
read_7220_creg32(dd, cr_iblinkerrrecov);
2597
}
2598
break;
2599
2600
case IB_LINKCMD_ARMED:
2601
lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2602
break;
2603
2604
case IB_LINKCMD_ACTIVE:
2605
lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2606
break;
2607
2608
default:
2609
ret = -EINVAL;
2610
qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2611
goto bail;
2612
}
2613
switch (val & 0xffff) {
2614
case IB_LINKINITCMD_NOP:
2615
licmd = 0;
2616
break;
2617
2618
case IB_LINKINITCMD_POLL:
2619
licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2620
break;
2621
2622
case IB_LINKINITCMD_SLEEP:
2623
licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2624
break;
2625
2626
case IB_LINKINITCMD_DISABLE:
2627
licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2628
ppd->cpspec->chase_end = 0;
2629
/*
2630
* stop state chase counter and timer, if running.
2631
* wait forpending timer, but don't clear .data (ppd)!
2632
*/
2633
if (ppd->cpspec->chase_timer.expires) {
2634
del_timer_sync(&ppd->cpspec->chase_timer);
2635
ppd->cpspec->chase_timer.expires = 0;
2636
}
2637
break;
2638
2639
default:
2640
ret = -EINVAL;
2641
qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2642
val & 0xffff);
2643
goto bail;
2644
}
2645
qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2646
goto bail;
2647
2648
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2649
if (val > IBA7220_IBC_HRTBT_MASK) {
2650
ret = -EINVAL;
2651
goto bail;
2652
}
2653
lsb = IBA7220_IBC_HRTBT_SHIFT;
2654
maskr = IBA7220_IBC_HRTBT_MASK;
2655
break;
2656
2657
default:
2658
ret = -EINVAL;
2659
goto bail;
2660
}
2661
ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2662
ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
2663
qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
2664
qib_write_kreg(dd, kr_scratch, 0);
2665
if (setforce) {
2666
spin_lock_irqsave(&ppd->lflags_lock, flags);
2667
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2668
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2669
}
2670
bail:
2671
return ret;
2672
}
2673
2674
static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2675
{
2676
int ret = 0;
2677
u64 val, ddr;
2678
2679
if (!strncmp(what, "ibc", 3)) {
2680
ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2681
val = 0; /* disable heart beat, so link will come up */
2682
qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2683
ppd->dd->unit, ppd->port);
2684
} else if (!strncmp(what, "off", 3)) {
2685
ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2686
/* enable heart beat again */
2687
val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
2688
qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2689
"(normal)\n", ppd->dd->unit, ppd->port);
2690
} else
2691
ret = -EINVAL;
2692
if (!ret) {
2693
qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2694
ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
2695
<< IBA7220_IBC_HRTBT_SHIFT);
2696
ppd->cpspec->ibcddrctrl = ddr | val;
2697
qib_write_kreg(ppd->dd, kr_ibcddrctrl,
2698
ppd->cpspec->ibcddrctrl);
2699
qib_write_kreg(ppd->dd, kr_scratch, 0);
2700
}
2701
return ret;
2702
}
2703
2704
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2705
u32 updegr, u32 egrhd, u32 npkts)
2706
{
2707
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2708
if (updegr)
2709
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2710
}
2711
2712
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
2713
{
2714
u32 head, tail;
2715
2716
head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2717
if (rcd->rcvhdrtail_kvaddr)
2718
tail = qib_get_rcvhdrtail(rcd);
2719
else
2720
tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2721
return head == tail;
2722
}
2723
2724
/*
2725
* Modify the RCVCTRL register in chip-specific way. This
2726
* is a function because bit positions and (future) register
2727
* location is chip-specifc, but the needed operations are
2728
* generic. <op> is a bit-mask because we often want to
2729
* do multiple modifications.
2730
*/
2731
static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
2732
int ctxt)
2733
{
2734
struct qib_devdata *dd = ppd->dd;
2735
u64 mask, val;
2736
unsigned long flags;
2737
2738
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2739
if (op & QIB_RCVCTRL_TAILUPD_ENB)
2740
dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
2741
if (op & QIB_RCVCTRL_TAILUPD_DIS)
2742
dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
2743
if (op & QIB_RCVCTRL_PKEY_ENB)
2744
dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2745
if (op & QIB_RCVCTRL_PKEY_DIS)
2746
dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2747
if (ctxt < 0)
2748
mask = (1ULL << dd->ctxtcnt) - 1;
2749
else
2750
mask = (1ULL << ctxt);
2751
if (op & QIB_RCVCTRL_CTXT_ENB) {
2752
/* always done for specific ctxt */
2753
dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2754
if (!(dd->flags & QIB_NODMA_RTAIL))
2755
dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
2756
/* Write these registers before the context is enabled. */
2757
qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2758
dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2759
qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2760
dd->rcd[ctxt]->rcvhdrq_phys);
2761
dd->rcd[ctxt]->seq_cnt = 1;
2762
}
2763
if (op & QIB_RCVCTRL_CTXT_DIS)
2764
dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2765
if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2766
dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
2767
if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2768
dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
2769
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2770
if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2771
/* arm rcv interrupt */
2772
val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2773
dd->rhdrhead_intr_off;
2774
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2775
}
2776
if (op & QIB_RCVCTRL_CTXT_ENB) {
2777
/*
2778
* Init the context registers also; if we were
2779
* disabled, tail and head should both be zero
2780
* already from the enable, but since we don't
2781
* know, we have to do it explicitly.
2782
*/
2783
val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2784
qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2785
2786
val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2787
dd->rcd[ctxt]->head = val;
2788
/* If kctxt, interrupt on next receive. */
2789
if (ctxt < dd->first_user_ctxt)
2790
val |= dd->rhdrhead_intr_off;
2791
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2792
}
2793
if (op & QIB_RCVCTRL_CTXT_DIS) {
2794
if (ctxt >= 0) {
2795
qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
2796
qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
2797
} else {
2798
unsigned i;
2799
2800
for (i = 0; i < dd->cfgctxts; i++) {
2801
qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2802
i, 0);
2803
qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
2804
}
2805
}
2806
}
2807
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2808
}
2809
2810
/*
2811
* Modify the SENDCTRL register in chip-specific way. This
2812
* is a function there may be multiple such registers with
2813
* slightly different layouts. To start, we assume the
2814
* "canonical" register layout of the first chips.
2815
* Chip requires no back-back sendctrl writes, so write
2816
* scratch register after writing sendctrl
2817
*/
2818
static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
2819
{
2820
struct qib_devdata *dd = ppd->dd;
2821
u64 tmp_dd_sendctrl;
2822
unsigned long flags;
2823
2824
spin_lock_irqsave(&dd->sendctrl_lock, flags);
2825
2826
/* First the ones that are "sticky", saved in shadow */
2827
if (op & QIB_SENDCTRL_CLEAR)
2828
dd->sendctrl = 0;
2829
if (op & QIB_SENDCTRL_SEND_DIS)
2830
dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
2831
else if (op & QIB_SENDCTRL_SEND_ENB) {
2832
dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
2833
if (dd->flags & QIB_USE_SPCL_TRIG)
2834
dd->sendctrl |= SYM_MASK(SendCtrl,
2835
SSpecialTriggerEn);
2836
}
2837
if (op & QIB_SENDCTRL_AVAIL_DIS)
2838
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2839
else if (op & QIB_SENDCTRL_AVAIL_ENB)
2840
dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
2841
2842
if (op & QIB_SENDCTRL_DISARM_ALL) {
2843
u32 i, last;
2844
2845
tmp_dd_sendctrl = dd->sendctrl;
2846
/*
2847
* disarm any that are not yet launched, disabling sends
2848
* and updates until done.
2849
*/
2850
last = dd->piobcnt2k + dd->piobcnt4k;
2851
tmp_dd_sendctrl &=
2852
~(SYM_MASK(SendCtrl, SPioEnable) |
2853
SYM_MASK(SendCtrl, SendBufAvailUpd));
2854
for (i = 0; i < last; i++) {
2855
qib_write_kreg(dd, kr_sendctrl,
2856
tmp_dd_sendctrl |
2857
SYM_MASK(SendCtrl, Disarm) | i);
2858
qib_write_kreg(dd, kr_scratch, 0);
2859
}
2860
}
2861
2862
tmp_dd_sendctrl = dd->sendctrl;
2863
2864
if (op & QIB_SENDCTRL_FLUSH)
2865
tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2866
if (op & QIB_SENDCTRL_DISARM)
2867
tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2868
((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
2869
SYM_LSB(SendCtrl, DisarmPIOBuf));
2870
if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
2871
(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
2872
tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2873
2874
qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2875
qib_write_kreg(dd, kr_scratch, 0);
2876
2877
if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2878
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2879
qib_write_kreg(dd, kr_scratch, 0);
2880
}
2881
2882
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2883
2884
if (op & QIB_SENDCTRL_FLUSH) {
2885
u32 v;
2886
/*
2887
* ensure writes have hit chip, then do a few
2888
* more reads, to allow DMA of pioavail registers
2889
* to occur, so in-memory copy is in sync with
2890
* the chip. Not always safe to sleep.
2891
*/
2892
v = qib_read_kreg32(dd, kr_scratch);
2893
qib_write_kreg(dd, kr_scratch, v);
2894
v = qib_read_kreg32(dd, kr_scratch);
2895
qib_write_kreg(dd, kr_scratch, v);
2896
qib_read_kreg32(dd, kr_scratch);
2897
}
2898
}
2899
2900
/**
2901
* qib_portcntr_7220 - read a per-port counter
2902
* @dd: the qlogic_ib device
2903
* @creg: the counter to snapshot
2904
*/
2905
static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
2906
{
2907
u64 ret = 0ULL;
2908
struct qib_devdata *dd = ppd->dd;
2909
u16 creg;
2910
/* 0xffff for unimplemented or synthesized counters */
2911
static const u16 xlator[] = {
2912
[QIBPORTCNTR_PKTSEND] = cr_pktsend,
2913
[QIBPORTCNTR_WORDSEND] = cr_wordsend,
2914
[QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
2915
[QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
2916
[QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
2917
[QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2918
[QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2919
[QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
2920
[QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
2921
[QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2922
[QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2923
[QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2924
[QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2925
[QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
2926
[QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
2927
[QIBPORTCNTR_ERRICRC] = cr_erricrc,
2928
[QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2929
[QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2930
[QIBPORTCNTR_BADFORMAT] = cr_badformat,
2931
[QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2932
[QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2933
[QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2934
[QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2935
[QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
2936
[QIBPORTCNTR_ERRLINK] = cr_errlink,
2937
[QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2938
[QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2939
[QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
2940
[QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
2941
[QIBPORTCNTR_PSSTART] = cr_psstart,
2942
[QIBPORTCNTR_PSSTAT] = cr_psstat,
2943
[QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
2944
[QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2945
[QIBPORTCNTR_KHDROVFL] = 0xffff,
2946
};
2947
2948
if (reg >= ARRAY_SIZE(xlator)) {
2949
qib_devinfo(ppd->dd->pcidev,
2950
"Unimplemented portcounter %u\n", reg);
2951
goto done;
2952
}
2953
creg = xlator[reg];
2954
2955
if (reg == QIBPORTCNTR_KHDROVFL) {
2956
int i;
2957
2958
/* sum over all kernel contexts */
2959
for (i = 0; i < dd->first_user_ctxt; i++)
2960
ret += read_7220_creg32(dd, cr_portovfl + i);
2961
}
2962
if (creg == 0xffff)
2963
goto done;
2964
2965
/*
2966
* only fast incrementing counters are 64bit; use 32 bit reads to
2967
* avoid two independent reads when on opteron
2968
*/
2969
if ((creg == cr_wordsend || creg == cr_wordrcv ||
2970
creg == cr_pktsend || creg == cr_pktrcv))
2971
ret = read_7220_creg(dd, creg);
2972
else
2973
ret = read_7220_creg32(dd, creg);
2974
if (creg == cr_ibsymbolerr) {
2975
if (dd->pport->cpspec->ibdeltainprog)
2976
ret -= ret - ppd->cpspec->ibsymsnap;
2977
ret -= dd->pport->cpspec->ibsymdelta;
2978
} else if (creg == cr_iblinkerrrecov) {
2979
if (dd->pport->cpspec->ibdeltainprog)
2980
ret -= ret - ppd->cpspec->iblnkerrsnap;
2981
ret -= dd->pport->cpspec->iblnkerrdelta;
2982
}
2983
done:
2984
return ret;
2985
}
2986
2987
/*
2988
* Device counter names (not port-specific), one line per stat,
2989
* single string. Used by utilities like ipathstats to print the stats
2990
* in a way which works for different versions of drivers, without changing
2991
* the utility. Names need to be 12 chars or less (w/o newline), for proper
2992
* display by utility.
2993
* Non-error counters are first.
2994
* Start of "error" conters is indicated by a leading "E " on the first
2995
* "error" counter, and doesn't count in label length.
2996
* The EgrOvfl list needs to be last so we truncate them at the configured
2997
* context count for the device.
2998
* cntr7220indices contains the corresponding register indices.
2999
*/
3000
static const char cntr7220names[] =
3001
"Interrupts\n"
3002
"HostBusStall\n"
3003
"E RxTIDFull\n"
3004
"RxTIDInvalid\n"
3005
"Ctxt0EgrOvfl\n"
3006
"Ctxt1EgrOvfl\n"
3007
"Ctxt2EgrOvfl\n"
3008
"Ctxt3EgrOvfl\n"
3009
"Ctxt4EgrOvfl\n"
3010
"Ctxt5EgrOvfl\n"
3011
"Ctxt6EgrOvfl\n"
3012
"Ctxt7EgrOvfl\n"
3013
"Ctxt8EgrOvfl\n"
3014
"Ctxt9EgrOvfl\n"
3015
"Ctx10EgrOvfl\n"
3016
"Ctx11EgrOvfl\n"
3017
"Ctx12EgrOvfl\n"
3018
"Ctx13EgrOvfl\n"
3019
"Ctx14EgrOvfl\n"
3020
"Ctx15EgrOvfl\n"
3021
"Ctx16EgrOvfl\n";
3022
3023
static const size_t cntr7220indices[] = {
3024
cr_lbint,
3025
cr_lbflowstall,
3026
cr_errtidfull,
3027
cr_errtidvalid,
3028
cr_portovfl + 0,
3029
cr_portovfl + 1,
3030
cr_portovfl + 2,
3031
cr_portovfl + 3,
3032
cr_portovfl + 4,
3033
cr_portovfl + 5,
3034
cr_portovfl + 6,
3035
cr_portovfl + 7,
3036
cr_portovfl + 8,
3037
cr_portovfl + 9,
3038
cr_portovfl + 10,
3039
cr_portovfl + 11,
3040
cr_portovfl + 12,
3041
cr_portovfl + 13,
3042
cr_portovfl + 14,
3043
cr_portovfl + 15,
3044
cr_portovfl + 16,
3045
};
3046
3047
/*
3048
* same as cntr7220names and cntr7220indices, but for port-specific counters.
3049
* portcntr7220indices is somewhat complicated by some registers needing
3050
* adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
3051
*/
3052
static const char portcntr7220names[] =
3053
"TxPkt\n"
3054
"TxFlowPkt\n"
3055
"TxWords\n"
3056
"RxPkt\n"
3057
"RxFlowPkt\n"
3058
"RxWords\n"
3059
"TxFlowStall\n"
3060
"TxDmaDesc\n" /* 7220 and 7322-only */
3061
"E RxDlidFltr\n" /* 7220 and 7322-only */
3062
"IBStatusChng\n"
3063
"IBLinkDown\n"
3064
"IBLnkRecov\n"
3065
"IBRxLinkErr\n"
3066
"IBSymbolErr\n"
3067
"RxLLIErr\n"
3068
"RxBadFormat\n"
3069
"RxBadLen\n"
3070
"RxBufOvrfl\n"
3071
"RxEBP\n"
3072
"RxFlowCtlErr\n"
3073
"RxICRCerr\n"
3074
"RxLPCRCerr\n"
3075
"RxVCRCerr\n"
3076
"RxInvalLen\n"
3077
"RxInvalPKey\n"
3078
"RxPktDropped\n"
3079
"TxBadLength\n"
3080
"TxDropped\n"
3081
"TxInvalLen\n"
3082
"TxUnderrun\n"
3083
"TxUnsupVL\n"
3084
"RxLclPhyErr\n" /* 7220 and 7322-only */
3085
"RxVL15Drop\n" /* 7220 and 7322-only */
3086
"RxVlErr\n" /* 7220 and 7322-only */
3087
"XcessBufOvfl\n" /* 7220 and 7322-only */
3088
;
3089
3090
#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
3091
static const size_t portcntr7220indices[] = {
3092
QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
3093
cr_pktsendflow,
3094
QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
3095
QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
3096
cr_pktrcvflowctrl,
3097
QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
3098
QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
3099
cr_txsdmadesc,
3100
cr_rxdlidfltr,
3101
cr_ibstatuschange,
3102
QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
3103
QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
3104
QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
3105
QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
3106
QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
3107
QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
3108
QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
3109
QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
3110
QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
3111
cr_rcvflowctrl_err,
3112
QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
3113
QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
3114
QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
3115
QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
3116
QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
3117
QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
3118
cr_invalidslen,
3119
cr_senddropped,
3120
cr_errslen,
3121
cr_sendunderrun,
3122
cr_txunsupvl,
3123
QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
3124
QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
3125
QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
3126
QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
3127
};
3128
3129
/* do all the setup to make the counter reads efficient later */
3130
static void init_7220_cntrnames(struct qib_devdata *dd)
3131
{
3132
int i, j = 0;
3133
char *s;
3134
3135
for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
3136
i++) {
3137
/* we always have at least one counter before the egrovfl */
3138
if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
3139
j = 1;
3140
s = strchr(s + 1, '\n');
3141
if (s && j)
3142
j++;
3143
}
3144
dd->cspec->ncntrs = i;
3145
if (!s)
3146
/* full list; size is without terminating null */
3147
dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
3148
else
3149
dd->cspec->cntrnamelen = 1 + s - cntr7220names;
3150
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
3151
* sizeof(u64), GFP_KERNEL);
3152
if (!dd->cspec->cntrs)
3153
qib_dev_err(dd, "Failed allocation for counters\n");
3154
3155
for (i = 0, s = (char *)portcntr7220names; s; i++)
3156
s = strchr(s + 1, '\n');
3157
dd->cspec->nportcntrs = i - 1;
3158
dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
3159
dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
3160
* sizeof(u64), GFP_KERNEL);
3161
if (!dd->cspec->portcntrs)
3162
qib_dev_err(dd, "Failed allocation for portcounters\n");
3163
}
3164
3165
static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
3166
u64 **cntrp)
3167
{
3168
u32 ret;
3169
3170
if (!dd->cspec->cntrs) {
3171
ret = 0;
3172
goto done;
3173
}
3174
3175
if (namep) {
3176
*namep = (char *)cntr7220names;
3177
ret = dd->cspec->cntrnamelen;
3178
if (pos >= ret)
3179
ret = 0; /* final read after getting everything */
3180
} else {
3181
u64 *cntr = dd->cspec->cntrs;
3182
int i;
3183
3184
ret = dd->cspec->ncntrs * sizeof(u64);
3185
if (!cntr || pos >= ret) {
3186
/* everything read, or couldn't get memory */
3187
ret = 0;
3188
goto done;
3189
}
3190
3191
*cntrp = cntr;
3192
for (i = 0; i < dd->cspec->ncntrs; i++)
3193
*cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
3194
}
3195
done:
3196
return ret;
3197
}
3198
3199
static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
3200
char **namep, u64 **cntrp)
3201
{
3202
u32 ret;
3203
3204
if (!dd->cspec->portcntrs) {
3205
ret = 0;
3206
goto done;
3207
}
3208
if (namep) {
3209
*namep = (char *)portcntr7220names;
3210
ret = dd->cspec->portcntrnamelen;
3211
if (pos >= ret)
3212
ret = 0; /* final read after getting everything */
3213
} else {
3214
u64 *cntr = dd->cspec->portcntrs;
3215
struct qib_pportdata *ppd = &dd->pport[port];
3216
int i;
3217
3218
ret = dd->cspec->nportcntrs * sizeof(u64);
3219
if (!cntr || pos >= ret) {
3220
/* everything read, or couldn't get memory */
3221
ret = 0;
3222
goto done;
3223
}
3224
*cntrp = cntr;
3225
for (i = 0; i < dd->cspec->nportcntrs; i++) {
3226
if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
3227
*cntr++ = qib_portcntr_7220(ppd,
3228
portcntr7220indices[i] &
3229
~_PORT_VIRT_FLAG);
3230
else
3231
*cntr++ = read_7220_creg32(dd,
3232
portcntr7220indices[i]);
3233
}
3234
}
3235
done:
3236
return ret;
3237
}
3238
3239
/**
3240
* qib_get_7220_faststats - get word counters from chip before they overflow
3241
* @opaque - contains a pointer to the qlogic_ib device qib_devdata
3242
*
3243
* This needs more work; in particular, decision on whether we really
3244
* need traffic_wds done the way it is
3245
* called from add_timer
3246
*/
3247
static void qib_get_7220_faststats(unsigned long opaque)
3248
{
3249
struct qib_devdata *dd = (struct qib_devdata *) opaque;
3250
struct qib_pportdata *ppd = dd->pport;
3251
unsigned long flags;
3252
u64 traffic_wds;
3253
3254
/*
3255
* don't access the chip while running diags, or memory diags can
3256
* fail
3257
*/
3258
if (!(dd->flags & QIB_INITTED) || dd->diag_client)
3259
/* but re-arm the timer, for diags case; won't hurt other */
3260
goto done;
3261
3262
/*
3263
* We now try to maintain an activity timer, based on traffic
3264
* exceeding a threshold, so we need to check the word-counts
3265
* even if they are 64-bit.
3266
*/
3267
traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
3268
qib_portcntr_7220(ppd, cr_wordrcv);
3269
spin_lock_irqsave(&dd->eep_st_lock, flags);
3270
traffic_wds -= dd->traffic_wds;
3271
dd->traffic_wds += traffic_wds;
3272
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3273
atomic_add(5, &dd->active_time); /* S/B #define */
3274
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3275
done:
3276
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
3277
}
3278
3279
/*
3280
* If we are using MSI, try to fallback to INTx.
3281
*/
3282
static int qib_7220_intr_fallback(struct qib_devdata *dd)
3283
{
3284
if (!dd->msi_lo)
3285
return 0;
3286
3287
qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
3288
" trying INTx interrupts\n");
3289
qib_7220_free_irq(dd);
3290
qib_enable_intx(dd->pcidev);
3291
/*
3292
* Some newer kernels require free_irq before disable_msi,
3293
* and irq can be changed during disable and INTx enable
3294
* and we need to therefore use the pcidev->irq value,
3295
* not our saved MSI value.
3296
*/
3297
dd->cspec->irq = dd->pcidev->irq;
3298
qib_setup_7220_interrupt(dd);
3299
return 1;
3300
}
3301
3302
/*
3303
* Reset the XGXS (between serdes and IBC). Slightly less intrusive
3304
* than resetting the IBC or external link state, and useful in some
3305
* cases to cause some retraining. To do this right, we reset IBC
3306
* as well.
3307
*/
3308
static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
3309
{
3310
u64 val, prev_val;
3311
struct qib_devdata *dd = ppd->dd;
3312
3313
prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
3314
val = prev_val | QLOGIC_IB_XGXS_RESET;
3315
prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
3316
qib_write_kreg(dd, kr_control,
3317
dd->control & ~QLOGIC_IB_C_LINKENABLE);
3318
qib_write_kreg(dd, kr_xgxs_cfg, val);
3319
qib_read_kreg32(dd, kr_scratch);
3320
qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
3321
qib_write_kreg(dd, kr_control, dd->control);
3322
}
3323
3324
/*
3325
* For this chip, we want to use the same buffer every time
3326
* when we are trying to bring the link up (they are always VL15
3327
* packets). At that link state the packet should always go out immediately
3328
* (or at least be discarded at the tx interface if the link is down).
3329
* If it doesn't, and the buffer isn't available, that means some other
3330
* sender has gotten ahead of us, and is preventing our packet from going
3331
* out. In that case, we flush all packets, and try again. If that still
3332
* fails, we fail the request, and hope things work the next time around.
3333
*
3334
* We don't need very complicated heuristics on whether the packet had
3335
* time to go out or not, since even at SDR 1X, it goes out in very short
3336
* time periods, covered by the chip reads done here and as part of the
3337
* flush.
3338
*/
3339
static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3340
{
3341
u32 __iomem *buf;
3342
u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
3343
int do_cleanup;
3344
unsigned long flags;
3345
3346
/*
3347
* always blip to get avail list updated, since it's almost
3348
* always needed, and is fairly cheap.
3349
*/
3350
sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3351
qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3352
buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3353
if (buf)
3354
goto done;
3355
3356
spin_lock_irqsave(&ppd->sdma_lock, flags);
3357
if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
3358
ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
3359
__qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
3360
do_cleanup = 0;
3361
} else {
3362
do_cleanup = 1;
3363
qib_7220_sdma_hw_clean_up(ppd);
3364
}
3365
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3366
3367
if (do_cleanup) {
3368
qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3369
buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3370
}
3371
done:
3372
return buf;
3373
}
3374
3375
/*
3376
* This code for non-IBTA-compliant IB speed negotiation is only known to
3377
* work for the SDR to DDR transition, and only between an HCA and a switch
3378
* with recent firmware. It is based on observed heuristics, rather than
3379
* actual knowledge of the non-compliant speed negotiation.
3380
* It has a number of hard-coded fields, since the hope is to rewrite this
3381
* when a spec is available on how the negoation is intended to work.
3382
*/
3383
static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
3384
u32 dcnt, u32 *data)
3385
{
3386
int i;
3387
u64 pbc;
3388
u32 __iomem *piobuf;
3389
u32 pnum;
3390
struct qib_devdata *dd = ppd->dd;
3391
3392
i = 0;
3393
pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
3394
pbc |= PBC_7220_VL15_SEND;
3395
while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
3396
if (i++ > 5)
3397
return;
3398
udelay(2);
3399
}
3400
sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
3401
writeq(pbc, piobuf);
3402
qib_flush_wc();
3403
qib_pio_copy(piobuf + 2, hdr, 7);
3404
qib_pio_copy(piobuf + 9, data, dcnt);
3405
if (dd->flags & QIB_USE_SPCL_TRIG) {
3406
u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
3407
3408
qib_flush_wc();
3409
__raw_writel(0xaebecede, piobuf + spcl_off);
3410
}
3411
qib_flush_wc();
3412
qib_sendbuf_done(dd, pnum);
3413
}
3414
3415
/*
3416
* _start packet gets sent twice at start, _done gets sent twice at end
3417
*/
3418
static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
3419
{
3420
struct qib_devdata *dd = ppd->dd;
3421
static u32 swapped;
3422
u32 dw, i, hcnt, dcnt, *data;
3423
static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
3424
static u32 madpayload_start[0x40] = {
3425
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3426
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3427
0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
3428
};
3429
static u32 madpayload_done[0x40] = {
3430
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3431
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3432
0x40000001, 0x1388, 0x15e, /* rest 0's */
3433
};
3434
3435
dcnt = ARRAY_SIZE(madpayload_start);
3436
hcnt = ARRAY_SIZE(hdr);
3437
if (!swapped) {
3438
/* for maintainability, do it at runtime */
3439
for (i = 0; i < hcnt; i++) {
3440
dw = (__force u32) cpu_to_be32(hdr[i]);
3441
hdr[i] = dw;
3442
}
3443
for (i = 0; i < dcnt; i++) {
3444
dw = (__force u32) cpu_to_be32(madpayload_start[i]);
3445
madpayload_start[i] = dw;
3446
dw = (__force u32) cpu_to_be32(madpayload_done[i]);
3447
madpayload_done[i] = dw;
3448
}
3449
swapped = 1;
3450
}
3451
3452
data = which ? madpayload_done : madpayload_start;
3453
3454
autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3455
qib_read_kreg64(dd, kr_scratch);
3456
udelay(2);
3457
autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3458
qib_read_kreg64(dd, kr_scratch);
3459
udelay(2);
3460
}
3461
3462
/*
3463
* Do the absolute minimum to cause an IB speed change, and make it
3464
* ready, but don't actually trigger the change. The caller will
3465
* do that when ready (if link is in Polling training state, it will
3466
* happen immediately, otherwise when link next goes down)
3467
*
3468
* This routine should only be used as part of the DDR autonegotation
3469
* code for devices that are not compliant with IB 1.2 (or code that
3470
* fixes things up for same).
3471
*
3472
* When link has gone down, and autoneg enabled, or autoneg has
3473
* failed and we give up until next time we set both speeds, and
3474
* then we want IBTA enabled as well as "use max enabled speed.
3475
*/
3476
static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
3477
{
3478
ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
3479
IBA7220_IBC_IBTA_1_2_MASK);
3480
3481
if (speed == (QIB_IB_SDR | QIB_IB_DDR))
3482
ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
3483
IBA7220_IBC_IBTA_1_2_MASK;
3484
else
3485
ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
3486
IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
3487
3488
qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
3489
qib_write_kreg(ppd->dd, kr_scratch, 0);
3490
}
3491
3492
/*
3493
* This routine is only used when we are not talking to another
3494
* IB 1.2-compliant device that we think can do DDR.
3495
* (This includes all existing switch chips as of Oct 2007.)
3496
* 1.2-compliant devices go directly to DDR prior to reaching INIT
3497
*/
3498
static void try_7220_autoneg(struct qib_pportdata *ppd)
3499
{
3500
unsigned long flags;
3501
3502
/*
3503
* Required for older non-IB1.2 DDR switches. Newer
3504
* non-IB-compliant switches don't need it, but so far,
3505
* aren't bothered by it either. "Magic constant"
3506
*/
3507
qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
3508
3509
spin_lock_irqsave(&ppd->lflags_lock, flags);
3510
ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
3511
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3512
autoneg_7220_send(ppd, 0);
3513
set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3514
3515
toggle_7220_rclkrls(ppd->dd);
3516
/* 2 msec is minimum length of a poll cycle */
3517
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
3518
msecs_to_jiffies(2));
3519
}
3520
3521
/*
3522
* Handle the empirically determined mechanism for auto-negotiation
3523
* of DDR speed with switches.
3524
*/
3525
static void autoneg_7220_work(struct work_struct *work)
3526
{
3527
struct qib_pportdata *ppd;
3528
struct qib_devdata *dd;
3529
u64 startms;
3530
u32 i;
3531
unsigned long flags;
3532
3533
ppd = &container_of(work, struct qib_chippport_specific,
3534
autoneg_work.work)->pportdata;
3535
dd = ppd->dd;
3536
3537
startms = jiffies_to_msecs(jiffies);
3538
3539
/*
3540
* Busy wait for this first part, it should be at most a
3541
* few hundred usec, since we scheduled ourselves for 2msec.
3542
*/
3543
for (i = 0; i < 25; i++) {
3544
if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
3545
== IB_7220_LT_STATE_POLLQUIET) {
3546
qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
3547
break;
3548
}
3549
udelay(100);
3550
}
3551
3552
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3553
goto done; /* we got there early or told to stop */
3554
3555
/* we expect this to timeout */
3556
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3557
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3558
msecs_to_jiffies(90)))
3559
goto done;
3560
3561
toggle_7220_rclkrls(dd);
3562
3563
/* we expect this to timeout */
3564
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3565
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3566
msecs_to_jiffies(1700)))
3567
goto done;
3568
3569
set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
3570
toggle_7220_rclkrls(dd);
3571
3572
/*
3573
* Wait up to 250 msec for link to train and get to INIT at DDR;
3574
* this should terminate early.
3575
*/
3576
wait_event_timeout(ppd->cpspec->autoneg_wait,
3577
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3578
msecs_to_jiffies(250));
3579
done:
3580
if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
3581
spin_lock_irqsave(&ppd->lflags_lock, flags);
3582
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
3583
if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
3584
ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
3585
dd->cspec->autoneg_tries = 0;
3586
}
3587
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3588
set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3589
}
3590
}
3591
3592
static u32 qib_7220_iblink_state(u64 ibcs)
3593
{
3594
u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
3595
3596
switch (state) {
3597
case IB_7220_L_STATE_INIT:
3598
state = IB_PORT_INIT;
3599
break;
3600
case IB_7220_L_STATE_ARM:
3601
state = IB_PORT_ARMED;
3602
break;
3603
case IB_7220_L_STATE_ACTIVE:
3604
/* fall through */
3605
case IB_7220_L_STATE_ACT_DEFER:
3606
state = IB_PORT_ACTIVE;
3607
break;
3608
default: /* fall through */
3609
case IB_7220_L_STATE_DOWN:
3610
state = IB_PORT_DOWN;
3611
break;
3612
}
3613
return state;
3614
}
3615
3616
/* returns the IBTA port state, rather than the IBC link training state */
3617
static u8 qib_7220_phys_portstate(u64 ibcs)
3618
{
3619
u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3620
return qib_7220_physportstate[state];
3621
}
3622
3623
static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3624
{
3625
int ret = 0, symadj = 0;
3626
struct qib_devdata *dd = ppd->dd;
3627
unsigned long flags;
3628
3629
spin_lock_irqsave(&ppd->lflags_lock, flags);
3630
ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3631
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3632
3633
if (!ibup) {
3634
/*
3635
* When the link goes down we don't want AEQ running, so it
3636
* won't interfere with IBC training, etc., and we need
3637
* to go back to the static SerDes preset values.
3638
*/
3639
if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3640
QIBL_IB_AUTONEG_INPROG)))
3641
set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3642
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3643
qib_sd7220_presets(dd);
3644
qib_cancel_sends(ppd); /* initial disarm, etc. */
3645
spin_lock_irqsave(&ppd->sdma_lock, flags);
3646
if (__qib_sdma_running(ppd))
3647
__qib_sdma_process_event(ppd,
3648
qib_sdma_event_e70_go_idle);
3649
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3650
}
3651
/* this might better in qib_sd7220_presets() */
3652
set_7220_relock_poll(dd, ibup);
3653
} else {
3654
if (qib_compat_ddr_negotiate &&
3655
!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3656
QIBL_IB_AUTONEG_INPROG)) &&
3657
ppd->link_speed_active == QIB_IB_SDR &&
3658
(ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
3659
(QIB_IB_DDR | QIB_IB_SDR) &&
3660
dd->cspec->autoneg_tries < AUTONEG_TRIES) {
3661
/* we are SDR, and DDR auto-negotiation enabled */
3662
++dd->cspec->autoneg_tries;
3663
if (!ppd->cpspec->ibdeltainprog) {
3664
ppd->cpspec->ibdeltainprog = 1;
3665
ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
3666
cr_ibsymbolerr);
3667
ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
3668
cr_iblinkerrrecov);
3669
}
3670
try_7220_autoneg(ppd);
3671
ret = 1; /* no other IB status change processing */
3672
} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3673
ppd->link_speed_active == QIB_IB_SDR) {
3674
autoneg_7220_send(ppd, 1);
3675
set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3676
udelay(2);
3677
toggle_7220_rclkrls(dd);
3678
ret = 1; /* no other IB status change processing */
3679
} else {
3680
if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3681
(ppd->link_speed_active & QIB_IB_DDR)) {
3682
spin_lock_irqsave(&ppd->lflags_lock, flags);
3683
ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
3684
QIBL_IB_AUTONEG_FAILED);
3685
spin_unlock_irqrestore(&ppd->lflags_lock,
3686
flags);
3687
dd->cspec->autoneg_tries = 0;
3688
/* re-enable SDR, for next link down */
3689
set_7220_ibspeed_fast(ppd,
3690
ppd->link_speed_enabled);
3691
wake_up(&ppd->cpspec->autoneg_wait);
3692
symadj = 1;
3693
} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
3694
/*
3695
* Clear autoneg failure flag, and do setup
3696
* so we'll try next time link goes down and
3697
* back to INIT (possibly connected to a
3698
* different device).
3699
*/
3700
spin_lock_irqsave(&ppd->lflags_lock, flags);
3701
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3702
spin_unlock_irqrestore(&ppd->lflags_lock,
3703
flags);
3704
ppd->cpspec->ibcddrctrl |=
3705
IBA7220_IBC_IBTA_1_2_MASK;
3706
qib_write_kreg(dd, kr_ncmodectrl, 0);
3707
symadj = 1;
3708
}
3709
}
3710
3711
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3712
symadj = 1;
3713
3714
if (!ret) {
3715
ppd->delay_mult = rate_to_delay
3716
[(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
3717
[(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
3718
3719
set_7220_relock_poll(dd, ibup);
3720
spin_lock_irqsave(&ppd->sdma_lock, flags);
3721
/*
3722
* Unlike 7322, the 7220 needs this, due to lack of
3723
* interrupt in some cases when we have sdma active
3724
* when the link goes down.
3725
*/
3726
if (ppd->sdma_state.current_state !=
3727
qib_sdma_state_s20_idle)
3728
__qib_sdma_process_event(ppd,
3729
qib_sdma_event_e00_go_hw_down);
3730
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3731
}
3732
}
3733
3734
if (symadj) {
3735
if (ppd->cpspec->ibdeltainprog) {
3736
ppd->cpspec->ibdeltainprog = 0;
3737
ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
3738
cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
3739
ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
3740
cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
3741
}
3742
} else if (!ibup && qib_compat_ddr_negotiate &&
3743
!ppd->cpspec->ibdeltainprog &&
3744
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3745
ppd->cpspec->ibdeltainprog = 1;
3746
ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
3747
cr_ibsymbolerr);
3748
ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
3749
cr_iblinkerrrecov);
3750
}
3751
3752
if (!ret)
3753
qib_setup_7220_setextled(ppd, ibup);
3754
return ret;
3755
}
3756
3757
/*
3758
* Does read/modify/write to appropriate registers to
3759
* set output and direction bits selected by mask.
3760
* these are in their canonical postions (e.g. lsb of
3761
* dir will end up in D48 of extctrl on existing chips).
3762
* returns contents of GP Inputs.
3763
*/
3764
static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3765
{
3766
u64 read_val, new_out;
3767
unsigned long flags;
3768
3769
if (mask) {
3770
/* some bits being written, lock access to GPIO */
3771
dir &= mask;
3772
out &= mask;
3773
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3774
dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3775
dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3776
new_out = (dd->cspec->gpio_out & ~mask) | out;
3777
3778
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3779
qib_write_kreg(dd, kr_gpio_out, new_out);
3780
dd->cspec->gpio_out = new_out;
3781
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3782
}
3783
/*
3784
* It is unlikely that a read at this time would get valid
3785
* data on a pin whose direction line was set in the same
3786
* call to this function. We include the read here because
3787
* that allows us to potentially combine a change on one pin with
3788
* a read on another, and because the old code did something like
3789
* this.
3790
*/
3791
read_val = qib_read_kreg64(dd, kr_extstatus);
3792
return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3793
}
3794
3795
/*
3796
* Read fundamental info we need to use the chip. These are
3797
* the registers that describe chip capabilities, and are
3798
* saved in shadow registers.
3799
*/
3800
static void get_7220_chip_params(struct qib_devdata *dd)
3801
{
3802
u64 val;
3803
u32 piobufs;
3804
int mtu;
3805
3806
dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3807
3808
dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3809
dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3810
dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3811
dd->palign = qib_read_kreg32(dd, kr_palign);
3812
dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3813
dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3814
3815
val = qib_read_kreg64(dd, kr_sendpiosize);
3816
dd->piosize2k = val & ~0U;
3817
dd->piosize4k = val >> 32;
3818
3819
mtu = ib_mtu_enum_to_int(qib_ibmtu);
3820
if (mtu == -1)
3821
mtu = QIB_DEFAULT_MTU;
3822
dd->pport->ibmtu = (u32)mtu;
3823
3824
val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3825
dd->piobcnt2k = val & ~0U;
3826
dd->piobcnt4k = val >> 32;
3827
/* these may be adjusted in init_chip_wc_pat() */
3828
dd->pio2kbase = (u32 __iomem *)
3829
((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
3830
if (dd->piobcnt4k) {
3831
dd->pio4kbase = (u32 __iomem *)
3832
((char __iomem *) dd->kregbase +
3833
(dd->piobufbase >> 32));
3834
/*
3835
* 4K buffers take 2 pages; we use roundup just to be
3836
* paranoid; we calculate it once here, rather than on
3837
* ever buf allocate
3838
*/
3839
dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3840
}
3841
3842
piobufs = dd->piobcnt4k + dd->piobcnt2k;
3843
3844
dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3845
(sizeof(u64) * BITS_PER_BYTE / 2);
3846
}
3847
3848
/*
3849
* The chip base addresses in cspec and cpspec have to be set
3850
* after possible init_chip_wc_pat(), rather than in
3851
* qib_get_7220_chip_params(), so split out as separate function
3852
*/
3853
static void set_7220_baseaddrs(struct qib_devdata *dd)
3854
{
3855
u32 cregbase;
3856
/* init after possible re-map in init_chip_wc_pat() */
3857
cregbase = qib_read_kreg32(dd, kr_counterregbase);
3858
dd->cspec->cregbase = (u64 __iomem *)
3859
((char __iomem *) dd->kregbase + cregbase);
3860
3861
dd->egrtidbase = (u64 __iomem *)
3862
((char __iomem *) dd->kregbase + dd->rcvegrbase);
3863
}
3864
3865
3866
#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
3867
SYM_MASK(SendCtrl, SPioEnable) | \
3868
SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
3869
SYM_MASK(SendCtrl, SendBufAvailUpd) | \
3870
SYM_MASK(SendCtrl, AvailUpdThld) | \
3871
SYM_MASK(SendCtrl, SDmaEnable) | \
3872
SYM_MASK(SendCtrl, SDmaIntEnable) | \
3873
SYM_MASK(SendCtrl, SDmaHalt) | \
3874
SYM_MASK(SendCtrl, SDmaSingleDescriptor))
3875
3876
static int sendctrl_hook(struct qib_devdata *dd,
3877
const struct diag_observer *op,
3878
u32 offs, u64 *data, u64 mask, int only_32)
3879
{
3880
unsigned long flags;
3881
unsigned idx = offs / sizeof(u64);
3882
u64 local_data, all_bits;
3883
3884
if (idx != kr_sendctrl) {
3885
qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
3886
offs, only_32 ? "32" : "64");
3887
return 0;
3888
}
3889
3890
all_bits = ~0ULL;
3891
if (only_32)
3892
all_bits >>= 32;
3893
spin_lock_irqsave(&dd->sendctrl_lock, flags);
3894
if ((mask & all_bits) != all_bits) {
3895
/*
3896
* At least some mask bits are zero, so we need
3897
* to read. The judgement call is whether from
3898
* reg or shadow. First-cut: read reg, and complain
3899
* if any bits which should be shadowed are different
3900
* from their shadowed value.
3901
*/
3902
if (only_32)
3903
local_data = (u64)qib_read_kreg32(dd, idx);
3904
else
3905
local_data = qib_read_kreg64(dd, idx);
3906
qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
3907
(u32)local_data, (u32)dd->sendctrl);
3908
if ((local_data & SENDCTRL_SHADOWED) !=
3909
(dd->sendctrl & SENDCTRL_SHADOWED))
3910
qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
3911
(u32)local_data, (u32) dd->sendctrl);
3912
*data = (local_data & ~mask) | (*data & mask);
3913
}
3914
if (mask) {
3915
/*
3916
* At least some mask bits are one, so we need
3917
* to write, but only shadow some bits.
3918
*/
3919
u64 sval, tval; /* Shadowed, transient */
3920
3921
/*
3922
* New shadow val is bits we don't want to touch,
3923
* ORed with bits we do, that are intended for shadow.
3924
*/
3925
sval = (dd->sendctrl & ~mask);
3926
sval |= *data & SENDCTRL_SHADOWED & mask;
3927
dd->sendctrl = sval;
3928
tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
3929
qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
3930
(u32)tval, (u32)sval);
3931
qib_write_kreg(dd, kr_sendctrl, tval);
3932
qib_write_kreg(dd, kr_scratch, 0Ull);
3933
}
3934
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3935
3936
return only_32 ? 4 : 8;
3937
}
3938
3939
static const struct diag_observer sendctrl_observer = {
3940
sendctrl_hook, kr_sendctrl * sizeof(u64),
3941
kr_sendctrl * sizeof(u64)
3942
};
3943
3944
/*
3945
* write the final few registers that depend on some of the
3946
* init setup. Done late in init, just before bringing up
3947
* the serdes.
3948
*/
3949
static int qib_late_7220_initreg(struct qib_devdata *dd)
3950
{
3951
int ret = 0;
3952
u64 val;
3953
3954
qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3955
qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3956
qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3957
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3958
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3959
if (val != dd->pioavailregs_phys) {
3960
qib_dev_err(dd, "Catastrophic software error, "
3961
"SendPIOAvailAddr written as %lx, "
3962
"read back as %llx\n",
3963
(unsigned long) dd->pioavailregs_phys,
3964
(unsigned long long) val);
3965
ret = -EINVAL;
3966
}
3967
qib_register_observer(dd, &sendctrl_observer);
3968
return ret;
3969
}
3970
3971
static int qib_init_7220_variables(struct qib_devdata *dd)
3972
{
3973
struct qib_chippport_specific *cpspec;
3974
struct qib_pportdata *ppd;
3975
int ret = 0;
3976
u32 sbufs, updthresh;
3977
3978
cpspec = (struct qib_chippport_specific *)(dd + 1);
3979
ppd = &cpspec->pportdata;
3980
dd->pport = ppd;
3981
dd->num_pports = 1;
3982
3983
dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
3984
ppd->cpspec = cpspec;
3985
3986
spin_lock_init(&dd->cspec->sdepb_lock);
3987
spin_lock_init(&dd->cspec->rcvmod_lock);
3988
spin_lock_init(&dd->cspec->gpio_lock);
3989
3990
/* we haven't yet set QIB_PRESENT, so use read directly */
3991
dd->revision = readq(&dd->kregbase[kr_revision]);
3992
3993
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3994
qib_dev_err(dd, "Revision register read failure, "
3995
"giving up initialization\n");
3996
ret = -ENODEV;
3997
goto bail;
3998
}
3999
dd->flags |= QIB_PRESENT; /* now register routines work */
4000
4001
dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4002
ChipRevMajor);
4003
dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4004
ChipRevMinor);
4005
4006
get_7220_chip_params(dd);
4007
qib_7220_boardname(dd);
4008
4009
/*
4010
* GPIO bits for TWSI data and clock,
4011
* used for serial EEPROM.
4012
*/
4013
dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
4014
dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
4015
dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
4016
4017
dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
4018
QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
4019
dd->flags |= qib_special_trigger ?
4020
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
4021
4022
/*
4023
* EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
4024
* 2 is Some Misc, 3 is reserved for future.
4025
*/
4026
dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
4027
4028
dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
4029
4030
dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
4031
4032
init_waitqueue_head(&cpspec->autoneg_wait);
4033
INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
4034
4035
qib_init_pportdata(ppd, dd, 0, 1);
4036
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
4037
ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
4038
4039
ppd->link_width_enabled = ppd->link_width_supported;
4040
ppd->link_speed_enabled = ppd->link_speed_supported;
4041
/*
4042
* Set the initial values to reasonable default, will be set
4043
* for real when link is up.
4044
*/
4045
ppd->link_width_active = IB_WIDTH_4X;
4046
ppd->link_speed_active = QIB_IB_SDR;
4047
ppd->delay_mult = rate_to_delay[0][1];
4048
ppd->vls_supported = IB_VL_VL0;
4049
ppd->vls_operational = ppd->vls_supported;
4050
4051
if (!qib_mini_init)
4052
qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
4053
4054
init_timer(&ppd->cpspec->chase_timer);
4055
ppd->cpspec->chase_timer.function = reenable_7220_chase;
4056
ppd->cpspec->chase_timer.data = (unsigned long)ppd;
4057
4058
qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
4059
4060
dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
4061
dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
4062
dd->rhf_offset =
4063
dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
4064
4065
/* we always allocate at least 2048 bytes for eager buffers */
4066
ret = ib_mtu_enum_to_int(qib_ibmtu);
4067
dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
4068
4069
qib_7220_tidtemplate(dd);
4070
4071
/*
4072
* We can request a receive interrupt for 1 or
4073
* more packets from current offset. For now, we set this
4074
* up for a single packet.
4075
*/
4076
dd->rhdrhead_intr_off = 1ULL << 32;
4077
4078
/* setup the stats timer; the add_timer is done at end of init */
4079
init_timer(&dd->stats_timer);
4080
dd->stats_timer.function = qib_get_7220_faststats;
4081
dd->stats_timer.data = (unsigned long) dd;
4082
dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
4083
4084
/*
4085
* Control[4] has been added to change the arbitration within
4086
* the SDMA engine between favoring data fetches over descriptor
4087
* fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
4088
*/
4089
if (qib_sdma_fetch_arb)
4090
dd->control |= 1 << 4;
4091
4092
dd->ureg_align = 0x10000; /* 64KB alignment */
4093
4094
dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
4095
qib_7220_config_ctxts(dd);
4096
qib_set_ctxtcnt(dd); /* needed for PAT setup */
4097
4098
if (qib_wc_pat) {
4099
ret = init_chip_wc_pat(dd, 0);
4100
if (ret)
4101
goto bail;
4102
}
4103
set_7220_baseaddrs(dd); /* set chip access pointers now */
4104
4105
ret = 0;
4106
if (qib_mini_init)
4107
goto bail;
4108
4109
ret = qib_create_ctxts(dd);
4110
init_7220_cntrnames(dd);
4111
4112
/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
4113
* reserve the update threshold amount for other kernel use, such
4114
* as sending SMI, MAD, and ACKs, or 3, whichever is greater,
4115
* unless we aren't enabling SDMA, in which case we want to use
4116
* all the 4k bufs for the kernel.
4117
* if this was less than the update threshold, we could wait
4118
* a long time for an update. Coded this way because we
4119
* sometimes change the update threshold for various reasons,
4120
* and we want this to remain robust.
4121
*/
4122
updthresh = 8U; /* update threshold */
4123
if (dd->flags & QIB_HAS_SEND_DMA) {
4124
dd->cspec->sdmabufcnt = dd->piobcnt4k;
4125
sbufs = updthresh > 3 ? updthresh : 3;
4126
} else {
4127
dd->cspec->sdmabufcnt = 0;
4128
sbufs = dd->piobcnt4k;
4129
}
4130
4131
dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
4132
dd->cspec->sdmabufcnt;
4133
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
4134
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
4135
dd->pbufsctxt = dd->lastctxt_piobuf /
4136
(dd->cfgctxts - dd->first_user_ctxt);
4137
4138
/*
4139
* if we are at 16 user contexts, we will have one 7 sbufs
4140
* per context, so drop the update threshold to match. We
4141
* want to update before we actually run out, at low pbufs/ctxt
4142
* so give ourselves some margin
4143
*/
4144
if ((dd->pbufsctxt - 2) < updthresh)
4145
updthresh = dd->pbufsctxt - 2;
4146
4147
dd->cspec->updthresh_dflt = updthresh;
4148
dd->cspec->updthresh = updthresh;
4149
4150
/* before full enable, no interrupts, no locking needed */
4151
dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
4152
<< SYM_LSB(SendCtrl, AvailUpdThld);
4153
4154
dd->psxmitwait_supported = 1;
4155
dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
4156
bail:
4157
return ret;
4158
}
4159
4160
static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
4161
u32 *pbufnum)
4162
{
4163
u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
4164
struct qib_devdata *dd = ppd->dd;
4165
u32 __iomem *buf;
4166
4167
if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
4168
!(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
4169
buf = get_7220_link_buf(ppd, pbufnum);
4170
else {
4171
if ((plen + 1) > dd->piosize2kmax_dwords)
4172
first = dd->piobcnt2k;
4173
else
4174
first = 0;
4175
/* try 4k if all 2k busy, so same last for both sizes */
4176
last = dd->cspec->lastbuf_for_pio;
4177
buf = qib_getsendbuf_range(dd, pbufnum, first, last);
4178
}
4179
return buf;
4180
}
4181
4182
/* these 2 "counters" are really control registers, and are always RW */
4183
static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
4184
u32 start)
4185
{
4186
write_7220_creg(ppd->dd, cr_psinterval, intv);
4187
write_7220_creg(ppd->dd, cr_psstart, start);
4188
}
4189
4190
/*
4191
* NOTE: no real attempt is made to generalize the SDMA stuff.
4192
* At some point "soon" we will have a new more generalized
4193
* set of sdma interface, and then we'll clean this up.
4194
*/
4195
4196
/* Must be called with sdma_lock held, or before init finished */
4197
static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
4198
{
4199
/* Commit writes to memory and advance the tail on the chip */
4200
wmb();
4201
ppd->sdma_descq_tail = tail;
4202
qib_write_kreg(ppd->dd, kr_senddmatail, tail);
4203
}
4204
4205
static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
4206
{
4207
}
4208
4209
static struct sdma_set_state_action sdma_7220_action_table[] = {
4210
[qib_sdma_state_s00_hw_down] = {
4211
.op_enable = 0,
4212
.op_intenable = 0,
4213
.op_halt = 0,
4214
.go_s99_running_tofalse = 1,
4215
},
4216
[qib_sdma_state_s10_hw_start_up_wait] = {
4217
.op_enable = 1,
4218
.op_intenable = 1,
4219
.op_halt = 1,
4220
},
4221
[qib_sdma_state_s20_idle] = {
4222
.op_enable = 1,
4223
.op_intenable = 1,
4224
.op_halt = 1,
4225
},
4226
[qib_sdma_state_s30_sw_clean_up_wait] = {
4227
.op_enable = 0,
4228
.op_intenable = 1,
4229
.op_halt = 0,
4230
},
4231
[qib_sdma_state_s40_hw_clean_up_wait] = {
4232
.op_enable = 1,
4233
.op_intenable = 1,
4234
.op_halt = 1,
4235
},
4236
[qib_sdma_state_s50_hw_halt_wait] = {
4237
.op_enable = 1,
4238
.op_intenable = 1,
4239
.op_halt = 1,
4240
},
4241
[qib_sdma_state_s99_running] = {
4242
.op_enable = 1,
4243
.op_intenable = 1,
4244
.op_halt = 0,
4245
.go_s99_running_totrue = 1,
4246
},
4247
};
4248
4249
static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
4250
{
4251
ppd->sdma_state.set_state_action = sdma_7220_action_table;
4252
}
4253
4254
static int init_sdma_7220_regs(struct qib_pportdata *ppd)
4255
{
4256
struct qib_devdata *dd = ppd->dd;
4257
unsigned i, n;
4258
u64 senddmabufmask[3] = { 0 };
4259
4260
/* Set SendDmaBase */
4261
qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
4262
qib_sdma_7220_setlengen(ppd);
4263
qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
4264
/* Set SendDmaHeadAddr */
4265
qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
4266
4267
/*
4268
* Reserve all the former "kernel" piobufs, using high number range
4269
* so we get as many 4K buffers as possible
4270
*/
4271
n = dd->piobcnt2k + dd->piobcnt4k;
4272
i = n - dd->cspec->sdmabufcnt;
4273
4274
for (; i < n; ++i) {
4275
unsigned word = i / 64;
4276
unsigned bit = i & 63;
4277
4278
BUG_ON(word >= 3);
4279
senddmabufmask[word] |= 1ULL << bit;
4280
}
4281
qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
4282
qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
4283
qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
4284
4285
ppd->sdma_state.first_sendbuf = i;
4286
ppd->sdma_state.last_sendbuf = n;
4287
4288
return 0;
4289
}
4290
4291
/* sdma_lock must be held */
4292
static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
4293
{
4294
struct qib_devdata *dd = ppd->dd;
4295
int sane;
4296
int use_dmahead;
4297
u16 swhead;
4298
u16 swtail;
4299
u16 cnt;
4300
u16 hwhead;
4301
4302
use_dmahead = __qib_sdma_running(ppd) &&
4303
(dd->flags & QIB_HAS_SDMA_TIMEOUT);
4304
retry:
4305
hwhead = use_dmahead ?
4306
(u16)le64_to_cpu(*ppd->sdma_head_dma) :
4307
(u16)qib_read_kreg32(dd, kr_senddmahead);
4308
4309
swhead = ppd->sdma_descq_head;
4310
swtail = ppd->sdma_descq_tail;
4311
cnt = ppd->sdma_descq_cnt;
4312
4313
if (swhead < swtail) {
4314
/* not wrapped */
4315
sane = (hwhead >= swhead) & (hwhead <= swtail);
4316
} else if (swhead > swtail) {
4317
/* wrapped around */
4318
sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
4319
(hwhead <= swtail);
4320
} else {
4321
/* empty */
4322
sane = (hwhead == swhead);
4323
}
4324
4325
if (unlikely(!sane)) {
4326
if (use_dmahead) {
4327
/* try one more time, directly from the register */
4328
use_dmahead = 0;
4329
goto retry;
4330
}
4331
/* assume no progress */
4332
hwhead = swhead;
4333
}
4334
4335
return hwhead;
4336
}
4337
4338
static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
4339
{
4340
u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
4341
4342
return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
4343
(hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
4344
(hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
4345
!(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
4346
}
4347
4348
/*
4349
* Compute the amount of delay before sending the next packet if the
4350
* port's send rate differs from the static rate set for the QP.
4351
* Since the delay affects this packet but the amount of the delay is
4352
* based on the length of the previous packet, use the last delay computed
4353
* and save the delay count for this packet to be used next time
4354
* we get here.
4355
*/
4356
static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
4357
u8 srate, u8 vl)
4358
{
4359
u8 snd_mult = ppd->delay_mult;
4360
u8 rcv_mult = ib_rate_to_delay[srate];
4361
u32 ret = ppd->cpspec->last_delay_mult;
4362
4363
ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
4364
(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
4365
4366
/* Indicate VL15, if necessary */
4367
if (vl == 15)
4368
ret |= PBC_7220_VL15_SEND_CTRL;
4369
return ret;
4370
}
4371
4372
static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
4373
{
4374
}
4375
4376
static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
4377
{
4378
if (!rcd->ctxt) {
4379
rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
4380
rcd->rcvegr_tid_base = 0;
4381
} else {
4382
rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
4383
rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
4384
(rcd->ctxt - 1) * rcd->rcvegrcnt;
4385
}
4386
}
4387
4388
static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
4389
u32 len, u32 which, struct qib_ctxtdata *rcd)
4390
{
4391
int i;
4392
unsigned long flags;
4393
4394
switch (which) {
4395
case TXCHK_CHG_TYPE_KERN:
4396
/* see if we need to raise avail update threshold */
4397
spin_lock_irqsave(&dd->uctxt_lock, flags);
4398
for (i = dd->first_user_ctxt;
4399
dd->cspec->updthresh != dd->cspec->updthresh_dflt
4400
&& i < dd->cfgctxts; i++)
4401
if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
4402
((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
4403
< dd->cspec->updthresh_dflt)
4404
break;
4405
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
4406
if (i == dd->cfgctxts) {
4407
spin_lock_irqsave(&dd->sendctrl_lock, flags);
4408
dd->cspec->updthresh = dd->cspec->updthresh_dflt;
4409
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4410
dd->sendctrl |= (dd->cspec->updthresh &
4411
SYM_RMASK(SendCtrl, AvailUpdThld)) <<
4412
SYM_LSB(SendCtrl, AvailUpdThld);
4413
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4414
sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4415
}
4416
break;
4417
case TXCHK_CHG_TYPE_USER:
4418
spin_lock_irqsave(&dd->sendctrl_lock, flags);
4419
if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
4420
/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
4421
dd->cspec->updthresh = (rcd->piocnt /
4422
rcd->subctxt_cnt) - 1;
4423
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4424
dd->sendctrl |= (dd->cspec->updthresh &
4425
SYM_RMASK(SendCtrl, AvailUpdThld))
4426
<< SYM_LSB(SendCtrl, AvailUpdThld);
4427
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4428
sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4429
} else
4430
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4431
break;
4432
}
4433
}
4434
4435
static void writescratch(struct qib_devdata *dd, u32 val)
4436
{
4437
qib_write_kreg(dd, kr_scratch, val);
4438
}
4439
4440
#define VALID_TS_RD_REG_MASK 0xBF
4441
/**
4442
* qib_7220_tempsense_read - read register of temp sensor via TWSI
4443
* @dd: the qlogic_ib device
4444
* @regnum: register to read from
4445
*
4446
* returns reg contents (0..255) or < 0 for error
4447
*/
4448
static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
4449
{
4450
int ret;
4451
u8 rdata;
4452
4453
if (regnum > 7) {
4454
ret = -EINVAL;
4455
goto bail;
4456
}
4457
4458
/* return a bogus value for (the one) register we do not have */
4459
if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
4460
ret = 0;
4461
goto bail;
4462
}
4463
4464
ret = mutex_lock_interruptible(&dd->eep_lock);
4465
if (ret)
4466
goto bail;
4467
4468
ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
4469
if (!ret)
4470
ret = rdata;
4471
4472
mutex_unlock(&dd->eep_lock);
4473
4474
/*
4475
* There are three possibilities here:
4476
* ret is actual value (0..255)
4477
* ret is -ENXIO or -EINVAL from twsi code or this file
4478
* ret is -EINTR from mutex_lock_interruptible.
4479
*/
4480
bail:
4481
return ret;
4482
}
4483
4484
/* Dummy function, as 7220 boards never disable EEPROM Write */
4485
static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4486
{
4487
return 1;
4488
}
4489
4490
/**
4491
* qib_init_iba7220_funcs - set up the chip-specific function pointers
4492
* @dev: the pci_dev for qlogic_ib device
4493
* @ent: pci_device_id struct for this dev
4494
*
4495
* This is global, and is called directly at init to set up the
4496
* chip-specific function pointers for later use.
4497
*/
4498
struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4499
const struct pci_device_id *ent)
4500
{
4501
struct qib_devdata *dd;
4502
int ret;
4503
u32 boardid, minwidth;
4504
4505
dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
4506
sizeof(struct qib_chippport_specific));
4507
if (IS_ERR(dd))
4508
goto bail;
4509
4510
dd->f_bringup_serdes = qib_7220_bringup_serdes;
4511
dd->f_cleanup = qib_setup_7220_cleanup;
4512
dd->f_clear_tids = qib_7220_clear_tids;
4513
dd->f_free_irq = qib_7220_free_irq;
4514
dd->f_get_base_info = qib_7220_get_base_info;
4515
dd->f_get_msgheader = qib_7220_get_msgheader;
4516
dd->f_getsendbuf = qib_7220_getsendbuf;
4517
dd->f_gpio_mod = gpio_7220_mod;
4518
dd->f_eeprom_wen = qib_7220_eeprom_wen;
4519
dd->f_hdrqempty = qib_7220_hdrqempty;
4520
dd->f_ib_updown = qib_7220_ib_updown;
4521
dd->f_init_ctxt = qib_7220_init_ctxt;
4522
dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
4523
dd->f_intr_fallback = qib_7220_intr_fallback;
4524
dd->f_late_initreg = qib_late_7220_initreg;
4525
dd->f_setpbc_control = qib_7220_setpbc_control;
4526
dd->f_portcntr = qib_portcntr_7220;
4527
dd->f_put_tid = qib_7220_put_tid;
4528
dd->f_quiet_serdes = qib_7220_quiet_serdes;
4529
dd->f_rcvctrl = rcvctrl_7220_mod;
4530
dd->f_read_cntrs = qib_read_7220cntrs;
4531
dd->f_read_portcntrs = qib_read_7220portcntrs;
4532
dd->f_reset = qib_setup_7220_reset;
4533
dd->f_init_sdma_regs = init_sdma_7220_regs;
4534
dd->f_sdma_busy = qib_sdma_7220_busy;
4535
dd->f_sdma_gethead = qib_sdma_7220_gethead;
4536
dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
4537
dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
4538
dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
4539
dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
4540
dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
4541
dd->f_sdma_init_early = qib_7220_sdma_init_early;
4542
dd->f_sendctrl = sendctrl_7220_mod;
4543
dd->f_set_armlaunch = qib_set_7220_armlaunch;
4544
dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
4545
dd->f_iblink_state = qib_7220_iblink_state;
4546
dd->f_ibphys_portstate = qib_7220_phys_portstate;
4547
dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
4548
dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
4549
dd->f_set_ib_loopback = qib_7220_set_loopback;
4550
dd->f_set_intr_state = qib_7220_set_intr_state;
4551
dd->f_setextled = qib_setup_7220_setextled;
4552
dd->f_txchk_change = qib_7220_txchk_change;
4553
dd->f_update_usrhead = qib_update_7220_usrhead;
4554
dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
4555
dd->f_xgxs_reset = qib_7220_xgxs_reset;
4556
dd->f_writescratch = writescratch;
4557
dd->f_tempsense_rd = qib_7220_tempsense_rd;
4558
/*
4559
* Do remaining pcie setup and save pcie values in dd.
4560
* Any error printing is already done by the init code.
4561
* On return, we have the chip mapped, but chip registers
4562
* are not set up until start of qib_init_7220_variables.
4563
*/
4564
ret = qib_pcie_ddinit(dd, pdev, ent);
4565
if (ret < 0)
4566
goto bail_free;
4567
4568
/* initialize chip-specific variables */
4569
ret = qib_init_7220_variables(dd);
4570
if (ret)
4571
goto bail_cleanup;
4572
4573
if (qib_mini_init)
4574
goto bail;
4575
4576
boardid = SYM_FIELD(dd->revision, Revision,
4577
BoardID);
4578
switch (boardid) {
4579
case 0:
4580
case 2:
4581
case 10:
4582
case 12:
4583
minwidth = 16; /* x16 capable boards */
4584
break;
4585
default:
4586
minwidth = 8; /* x8 capable boards */
4587
break;
4588
}
4589
if (qib_pcie_params(dd, minwidth, NULL, NULL))
4590
qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
4591
"continuing anyway\n");
4592
4593
/* save IRQ for possible later use */
4594
dd->cspec->irq = pdev->irq;
4595
4596
if (qib_read_kreg64(dd, kr_hwerrstatus) &
4597
QLOGIC_IB_HWE_SERDESPLLFAILED)
4598
qib_write_kreg(dd, kr_hwerrclear,
4599
QLOGIC_IB_HWE_SERDESPLLFAILED);
4600
4601
/* setup interrupt handler (interrupt type handled above) */
4602
qib_setup_7220_interrupt(dd);
4603
qib_7220_init_hwerrors(dd);
4604
4605
/* clear diagctrl register, in case diags were running and crashed */
4606
qib_write_kreg(dd, kr_hwdiagctrl, 0);
4607
4608
goto bail;
4609
4610
bail_cleanup:
4611
qib_pcie_ddcleanup(dd);
4612
bail_free:
4613
qib_free_devdata(dd);
4614
dd = ERR_PTR(ret);
4615
bail:
4616
return dd;
4617
}
4618
4619