Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/qib/qib_iba7322.c
15112 views
1
/*
2
* Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
33
/*
34
* This file contains all of the code that is specific to the
35
* InfiniPath 7322 chip
36
*/
37
38
#include <linux/interrupt.h>
39
#include <linux/pci.h>
40
#include <linux/delay.h>
41
#include <linux/io.h>
42
#include <linux/jiffies.h>
43
#include <rdma/ib_verbs.h>
44
#include <rdma/ib_smi.h>
45
46
#include "qib.h"
47
#include "qib_7322_regs.h"
48
#include "qib_qsfp.h"
49
50
#include "qib_mad.h"
51
52
static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
53
static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
54
static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
55
static irqreturn_t qib_7322intr(int irq, void *data);
56
static irqreturn_t qib_7322bufavail(int irq, void *data);
57
static irqreturn_t sdma_intr(int irq, void *data);
58
static irqreturn_t sdma_idle_intr(int irq, void *data);
59
static irqreturn_t sdma_progress_intr(int irq, void *data);
60
static irqreturn_t sdma_cleanup_intr(int irq, void *data);
61
static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
62
struct qib_ctxtdata *rcd);
63
static u8 qib_7322_phys_portstate(u64);
64
static u32 qib_7322_iblink_state(u64);
65
static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
66
u16 linitcmd);
67
static void force_h1(struct qib_pportdata *);
68
static void adj_tx_serdes(struct qib_pportdata *);
69
static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
70
static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71
72
static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73
static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74
static void serdes_7322_los_enable(struct qib_pportdata *, int);
75
static int serdes_7322_init_old(struct qib_pportdata *);
76
static int serdes_7322_init_new(struct qib_pportdata *);
77
78
#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
79
80
/* LE2 serdes values for different cases */
81
#define LE2_DEFAULT 5
82
#define LE2_5m 4
83
#define LE2_QME 0
84
85
/* Below is special-purpose, so only really works for the IB SerDes blocks. */
86
#define IBSD(hw_pidx) (hw_pidx + 2)
87
88
/* these are variables for documentation and experimentation purposes */
89
static const unsigned rcv_int_timeout = 375;
90
static const unsigned rcv_int_count = 16;
91
static const unsigned sdma_idle_cnt = 64;
92
93
/* Time to stop altering Rx Equalization parameters, after link up. */
94
#define RXEQ_DISABLE_MSECS 2500
95
96
/*
97
* Number of VLs we are configured to use (to allow for more
98
* credits per vl, etc.)
99
*/
100
ushort qib_num_cfg_vls = 2;
101
module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
102
MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
103
104
static ushort qib_chase = 1;
105
module_param_named(chase, qib_chase, ushort, S_IRUGO);
106
MODULE_PARM_DESC(chase, "Enable state chase handling");
107
108
static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
109
module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
110
MODULE_PARM_DESC(long_attenuation, \
111
"attenuation cutoff (dB) for long copper cable setup");
112
113
static ushort qib_singleport;
114
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116
117
/*
118
* Receive header queue sizes
119
*/
120
static unsigned qib_rcvhdrcnt;
121
module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
122
MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
123
124
static unsigned qib_rcvhdrsize;
125
module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
126
MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
127
128
static unsigned qib_rcvhdrentsize;
129
module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
130
MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
131
132
#define MAX_ATTEN_LEN 64 /* plenty for any real system */
133
/* for read back, default index is ~5m copper cable */
134
static char txselect_list[MAX_ATTEN_LEN] = "10";
135
static struct kparam_string kp_txselect = {
136
.string = txselect_list,
137
.maxlen = MAX_ATTEN_LEN
138
};
139
static int setup_txselect(const char *, struct kernel_param *);
140
module_param_call(txselect, setup_txselect, param_get_string,
141
&kp_txselect, S_IWUSR | S_IRUGO);
142
MODULE_PARM_DESC(txselect, \
143
"Tx serdes indices (for no QSFP or invalid QSFP data)");
144
145
#define BOARD_QME7342 5
146
#define BOARD_QMH7342 6
147
#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
148
BOARD_QMH7342)
149
#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
150
BOARD_QME7342)
151
152
#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
153
154
#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
155
156
#define MASK_ACROSS(lsb, msb) \
157
(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
158
159
#define SYM_RMASK(regname, fldname) ((u64) \
160
QIB_7322_##regname##_##fldname##_RMASK)
161
162
#define SYM_MASK(regname, fldname) ((u64) \
163
QIB_7322_##regname##_##fldname##_RMASK << \
164
QIB_7322_##regname##_##fldname##_LSB)
165
166
#define SYM_FIELD(value, regname, fldname) ((u64) \
167
(((value) >> SYM_LSB(regname, fldname)) & \
168
SYM_RMASK(regname, fldname)))
169
170
/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
171
#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
172
(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
173
174
#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
175
#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
176
#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
177
#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
178
#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
179
/* Below because most, but not all, fields of IntMask have that full suffix */
180
#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
181
182
183
#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
184
185
/*
186
* the size bits give us 2^N, in KB units. 0 marks as invalid,
187
* and 7 is reserved. We currently use only 2KB and 4KB
188
*/
189
#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
190
#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
191
#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
192
#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
193
194
#define SendIBSLIDAssignMask \
195
QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
196
#define SendIBSLMCMask \
197
QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
198
199
#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
200
#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
201
#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
202
#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
203
#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
204
#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
205
206
#define _QIB_GPIO_SDA_NUM 1
207
#define _QIB_GPIO_SCL_NUM 0
208
#define QIB_EEPROM_WEN_NUM 14
209
#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
210
211
/* HW counter clock is at 4nsec */
212
#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
213
214
/* full speed IB port 1 only */
215
#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
216
#define PORT_SPD_CAP_SHIFT 3
217
218
/* full speed featuremask, both ports */
219
#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
220
221
/*
222
* This file contains almost all the chip-specific register information and
223
* access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
224
*/
225
226
/* Use defines to tie machine-generated names to lower-case names */
227
#define kr_contextcnt KREG_IDX(ContextCnt)
228
#define kr_control KREG_IDX(Control)
229
#define kr_counterregbase KREG_IDX(CntrRegBase)
230
#define kr_errclear KREG_IDX(ErrClear)
231
#define kr_errmask KREG_IDX(ErrMask)
232
#define kr_errstatus KREG_IDX(ErrStatus)
233
#define kr_extctrl KREG_IDX(EXTCtrl)
234
#define kr_extstatus KREG_IDX(EXTStatus)
235
#define kr_gpio_clear KREG_IDX(GPIOClear)
236
#define kr_gpio_mask KREG_IDX(GPIOMask)
237
#define kr_gpio_out KREG_IDX(GPIOOut)
238
#define kr_gpio_status KREG_IDX(GPIOStatus)
239
#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
240
#define kr_debugportval KREG_IDX(DebugPortValueReg)
241
#define kr_fmask KREG_IDX(feature_mask)
242
#define kr_act_fmask KREG_IDX(active_feature_mask)
243
#define kr_hwerrclear KREG_IDX(HwErrClear)
244
#define kr_hwerrmask KREG_IDX(HwErrMask)
245
#define kr_hwerrstatus KREG_IDX(HwErrStatus)
246
#define kr_intclear KREG_IDX(IntClear)
247
#define kr_intmask KREG_IDX(IntMask)
248
#define kr_intredirect KREG_IDX(IntRedirect0)
249
#define kr_intstatus KREG_IDX(IntStatus)
250
#define kr_pagealign KREG_IDX(PageAlign)
251
#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
252
#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
253
#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
254
#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
255
#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
256
#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
257
#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
258
#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
259
#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
260
#define kr_revision KREG_IDX(Revision)
261
#define kr_scratch KREG_IDX(Scratch)
262
#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
263
#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
264
#define kr_sendctrl KREG_IDX(SendCtrl)
265
#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
266
#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
267
#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
268
#define kr_sendpiobufbase KREG_IDX(SendBufBase)
269
#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
270
#define kr_sendpiosize KREG_IDX(SendBufSize)
271
#define kr_sendregbase KREG_IDX(SendRegBase)
272
#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
273
#define kr_userregbase KREG_IDX(UserRegBase)
274
#define kr_intgranted KREG_IDX(Int_Granted)
275
#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
276
#define kr_intblocked KREG_IDX(IntBlocked)
277
#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
278
279
/*
280
* per-port kernel registers. Access only with qib_read_kreg_port()
281
* or qib_write_kreg_port()
282
*/
283
#define krp_errclear KREG_IBPORT_IDX(ErrClear)
284
#define krp_errmask KREG_IBPORT_IDX(ErrMask)
285
#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
286
#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
287
#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
288
#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
289
#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
290
#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
291
#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
292
#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
293
#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
294
#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
295
#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
296
#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
297
#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
298
#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
299
#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
300
#define krp_psstart KREG_IBPORT_IDX(PSStart)
301
#define krp_psstat KREG_IBPORT_IDX(PSStat)
302
#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
303
#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
304
#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
305
#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
306
#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
307
#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
308
#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
309
#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
310
#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
311
#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
312
#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
313
#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
314
#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
315
#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
316
#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
317
#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
318
#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
319
#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
320
#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
321
#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
322
#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
323
#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
324
#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
325
#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
326
#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
327
#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
328
#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
329
#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
330
#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
331
#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
332
#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
333
334
/*
335
* Per-context kernel registers. Access only with qib_read_kreg_ctxt()
336
* or qib_write_kreg_ctxt()
337
*/
338
#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
339
#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
340
341
/*
342
* TID Flow table, per context. Reduces
343
* number of hdrq updates to one per flow (or on errors).
344
* context 0 and 1 share same memory, but have distinct
345
* addresses. Since for now, we never use expected sends
346
* on kernel contexts, we don't worry about that (we initialize
347
* those entries for ctxt 0/1 on driver load twice, for example).
348
*/
349
#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
350
#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
351
352
/* these are the error bits in the tid flows, and are W1C */
353
#define TIDFLOW_ERRBITS ( \
354
(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
355
SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
356
(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
357
SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
358
359
/* Most (not all) Counters are per-IBport.
360
* Requires LBIntCnt is at offset 0 in the group
361
*/
362
#define CREG_IDX(regname) \
363
((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
364
365
#define crp_badformat CREG_IDX(RxVersionErrCnt)
366
#define crp_err_rlen CREG_IDX(RxLenErrCnt)
367
#define crp_erricrc CREG_IDX(RxICRCErrCnt)
368
#define crp_errlink CREG_IDX(RxLinkMalformCnt)
369
#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
370
#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
371
#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
372
#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
373
#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
374
#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
375
#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
376
#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
377
#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
378
#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
379
#define crp_pktrcv CREG_IDX(RxDataPktCnt)
380
#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
381
#define crp_pktsend CREG_IDX(TxDataPktCnt)
382
#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
383
#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
384
#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
385
#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
386
#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
387
#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
388
#define crp_rcvebp CREG_IDX(RxEBPCnt)
389
#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
390
#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
391
#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
392
#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
393
#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
394
#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
395
#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
396
#define crp_sendstall CREG_IDX(TxFlowStallCnt)
397
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
398
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
399
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
400
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
401
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
402
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
403
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
404
#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
405
#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
406
#define crp_wordrcv CREG_IDX(RxDwordCnt)
407
#define crp_wordsend CREG_IDX(TxDwordCnt)
408
#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
409
410
/* these are the (few) counters that are not port-specific */
411
#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
412
QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
413
#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
414
#define cr_lbint CREG_DEVIDX(LBIntCnt)
415
#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
416
#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
417
#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
418
#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
419
#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
420
421
/* no chip register for # of IB ports supported, so define */
422
#define NUM_IB_PORTS 2
423
424
/* 1 VL15 buffer per hardware IB port, no register for this, so define */
425
#define NUM_VL15_BUFS NUM_IB_PORTS
426
427
/*
428
* context 0 and 1 are special, and there is no chip register that
429
* defines this value, so we have to define it here.
430
* These are all allocated to either 0 or 1 for single port
431
* hardware configuration, otherwise each gets half
432
*/
433
#define KCTXT0_EGRCNT 2048
434
435
/* values for vl and port fields in PBC, 7322-specific */
436
#define PBC_PORT_SEL_LSB 26
437
#define PBC_PORT_SEL_RMASK 1
438
#define PBC_VL_NUM_LSB 27
439
#define PBC_VL_NUM_RMASK 7
440
#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
441
#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
442
443
static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
444
[IB_RATE_2_5_GBPS] = 16,
445
[IB_RATE_5_GBPS] = 8,
446
[IB_RATE_10_GBPS] = 4,
447
[IB_RATE_20_GBPS] = 2,
448
[IB_RATE_30_GBPS] = 2,
449
[IB_RATE_40_GBPS] = 1
450
};
451
452
#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
453
#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
454
455
/* link training states, from IBC */
456
#define IB_7322_LT_STATE_DISABLED 0x00
457
#define IB_7322_LT_STATE_LINKUP 0x01
458
#define IB_7322_LT_STATE_POLLACTIVE 0x02
459
#define IB_7322_LT_STATE_POLLQUIET 0x03
460
#define IB_7322_LT_STATE_SLEEPDELAY 0x04
461
#define IB_7322_LT_STATE_SLEEPQUIET 0x05
462
#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
463
#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
464
#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
465
#define IB_7322_LT_STATE_CFGIDLE 0x0b
466
#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
467
#define IB_7322_LT_STATE_TXREVLANES 0x0d
468
#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
469
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
470
#define IB_7322_LT_STATE_CFGENH 0x10
471
#define IB_7322_LT_STATE_CFGTEST 0x11
472
#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
473
#define IB_7322_LT_STATE_CFGWAITENH 0x13
474
475
/* link state machine states from IBC */
476
#define IB_7322_L_STATE_DOWN 0x0
477
#define IB_7322_L_STATE_INIT 0x1
478
#define IB_7322_L_STATE_ARM 0x2
479
#define IB_7322_L_STATE_ACTIVE 0x3
480
#define IB_7322_L_STATE_ACT_DEFER 0x4
481
482
static const u8 qib_7322_physportstate[0x20] = {
483
[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
484
[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
485
[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
486
[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
487
[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
488
[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
489
[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
490
[IB_7322_LT_STATE_CFGRCVFCFG] =
491
IB_PHYSPORTSTATE_CFG_TRAIN,
492
[IB_7322_LT_STATE_CFGWAITRMT] =
493
IB_PHYSPORTSTATE_CFG_TRAIN,
494
[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
495
[IB_7322_LT_STATE_RECOVERRETRAIN] =
496
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
497
[IB_7322_LT_STATE_RECOVERWAITRMT] =
498
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
499
[IB_7322_LT_STATE_RECOVERIDLE] =
500
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
501
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
502
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
503
[IB_7322_LT_STATE_CFGWAITRMTTEST] =
504
IB_PHYSPORTSTATE_CFG_TRAIN,
505
[IB_7322_LT_STATE_CFGWAITENH] =
506
IB_PHYSPORTSTATE_CFG_WAIT_ENH,
507
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
508
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
509
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
510
[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
511
};
512
513
struct qib_chip_specific {
514
u64 __iomem *cregbase;
515
u64 *cntrs;
516
spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
517
spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
518
u64 main_int_mask; /* clear bits which have dedicated handlers */
519
u64 int_enable_mask; /* for per port interrupts in single port mode */
520
u64 errormask;
521
u64 hwerrmask;
522
u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
523
u64 gpio_mask; /* shadow the gpio mask register */
524
u64 extctrl; /* shadow the gpio output enable, etc... */
525
u32 ncntrs;
526
u32 nportcntrs;
527
u32 cntrnamelen;
528
u32 portcntrnamelen;
529
u32 numctxts;
530
u32 rcvegrcnt;
531
u32 updthresh; /* current AvailUpdThld */
532
u32 updthresh_dflt; /* default AvailUpdThld */
533
u32 r1;
534
int irq;
535
u32 num_msix_entries;
536
u32 sdmabufcnt;
537
u32 lastbuf_for_pio;
538
u32 stay_in_freeze;
539
u32 recovery_ports_initted;
540
struct msix_entry *msix_entries;
541
void **msix_arg;
542
unsigned long *sendchkenable;
543
unsigned long *sendgrhchk;
544
unsigned long *sendibchk;
545
u32 rcvavail_timeout[18];
546
char emsgbuf[128]; /* for device error interrupt msg buffer */
547
};
548
549
/* Table of entries in "human readable" form Tx Emphasis. */
550
struct txdds_ent {
551
u8 amp;
552
u8 pre;
553
u8 main;
554
u8 post;
555
};
556
557
struct vendor_txdds_ent {
558
u8 oui[QSFP_VOUI_LEN];
559
u8 *partnum;
560
struct txdds_ent sdr;
561
struct txdds_ent ddr;
562
struct txdds_ent qdr;
563
};
564
565
static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
566
567
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
568
#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
569
#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
570
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
571
572
#define H1_FORCE_VAL 8
573
#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
574
#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
575
576
/* The static and dynamic registers are paired, and the pairs indexed by spd */
577
#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
578
+ ((spd) * 2))
579
580
#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
581
#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
582
#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
583
#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
584
#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
585
586
struct qib_chippport_specific {
587
u64 __iomem *kpregbase;
588
u64 __iomem *cpregbase;
589
u64 *portcntrs;
590
struct qib_pportdata *ppd;
591
wait_queue_head_t autoneg_wait;
592
struct delayed_work autoneg_work;
593
struct delayed_work ipg_work;
594
struct timer_list chase_timer;
595
/*
596
* these 5 fields are used to establish deltas for IB symbol
597
* errors and linkrecovery errors. They can be reported on
598
* some chips during link negotiation prior to INIT, and with
599
* DDR when faking DDR negotiations with non-IBTA switches.
600
* The chip counters are adjusted at driver unload if there is
601
* a non-zero delta.
602
*/
603
u64 ibdeltainprog;
604
u64 ibsymdelta;
605
u64 ibsymsnap;
606
u64 iblnkerrdelta;
607
u64 iblnkerrsnap;
608
u64 iblnkdownsnap;
609
u64 iblnkdowndelta;
610
u64 ibmalfdelta;
611
u64 ibmalfsnap;
612
u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
613
u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
614
u64 qdr_dfe_time;
615
u64 chase_end;
616
u32 autoneg_tries;
617
u32 recovery_init;
618
u32 qdr_dfe_on;
619
u32 qdr_reforce;
620
/*
621
* Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
622
* entry zero is unused, to simplify indexing
623
*/
624
u8 h1_val;
625
u8 no_eep; /* txselect table index to use if no qsfp info */
626
u8 ipg_tries;
627
u8 ibmalfusesnap;
628
struct qib_qsfp_data qsfp_data;
629
char epmsgbuf[192]; /* for port error interrupt msg buffer */
630
};
631
632
static struct {
633
const char *name;
634
irq_handler_t handler;
635
int lsb;
636
int port; /* 0 if not port-specific, else port # */
637
} irq_table[] = {
638
{ QIB_DRV_NAME, qib_7322intr, -1, 0 },
639
{ QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
640
SYM_LSB(IntStatus, SendBufAvail), 0 },
641
{ QIB_DRV_NAME " (sdma 0)", sdma_intr,
642
SYM_LSB(IntStatus, SDmaInt_0), 1 },
643
{ QIB_DRV_NAME " (sdma 1)", sdma_intr,
644
SYM_LSB(IntStatus, SDmaInt_1), 2 },
645
{ QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
646
SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
647
{ QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
648
SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
649
{ QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
650
SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
651
{ QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
652
SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
653
{ QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
654
SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
655
{ QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
656
SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
657
};
658
659
/* ibcctrl bits */
660
#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
661
/* cycle through TS1/TS2 till OK */
662
#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
663
/* wait for TS1, then go on */
664
#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
665
#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
666
667
#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
668
#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
669
#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
670
671
#define BLOB_7322_IBCHG 0x101
672
673
static inline void qib_write_kreg(const struct qib_devdata *dd,
674
const u32 regno, u64 value);
675
static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
676
static void write_7322_initregs(struct qib_devdata *);
677
static void write_7322_init_portregs(struct qib_pportdata *);
678
static void setup_7322_link_recovery(struct qib_pportdata *, u32);
679
static void check_7322_rxe_status(struct qib_pportdata *);
680
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
681
682
/**
683
* qib_read_ureg32 - read 32-bit virtualized per-context register
684
* @dd: device
685
* @regno: register number
686
* @ctxt: context number
687
*
688
* Return the contents of a register that is virtualized to be per context.
689
* Returns -1 on errors (not distinguishable from valid contents at
690
* runtime; we may add a separate error variable at some point).
691
*/
692
static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
693
enum qib_ureg regno, int ctxt)
694
{
695
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
696
return 0;
697
return readl(regno + (u64 __iomem *)(
698
(dd->ureg_align * ctxt) + (dd->userbase ?
699
(char __iomem *)dd->userbase :
700
(char __iomem *)dd->kregbase + dd->uregbase)));
701
}
702
703
/**
704
* qib_read_ureg - read virtualized per-context register
705
* @dd: device
706
* @regno: register number
707
* @ctxt: context number
708
*
709
* Return the contents of a register that is virtualized to be per context.
710
* Returns -1 on errors (not distinguishable from valid contents at
711
* runtime; we may add a separate error variable at some point).
712
*/
713
static inline u64 qib_read_ureg(const struct qib_devdata *dd,
714
enum qib_ureg regno, int ctxt)
715
{
716
717
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
718
return 0;
719
return readq(regno + (u64 __iomem *)(
720
(dd->ureg_align * ctxt) + (dd->userbase ?
721
(char __iomem *)dd->userbase :
722
(char __iomem *)dd->kregbase + dd->uregbase)));
723
}
724
725
/**
726
* qib_write_ureg - write virtualized per-context register
727
* @dd: device
728
* @regno: register number
729
* @value: value
730
* @ctxt: context
731
*
732
* Write the contents of a register that is virtualized to be per context.
733
*/
734
static inline void qib_write_ureg(const struct qib_devdata *dd,
735
enum qib_ureg regno, u64 value, int ctxt)
736
{
737
u64 __iomem *ubase;
738
if (dd->userbase)
739
ubase = (u64 __iomem *)
740
((char __iomem *) dd->userbase +
741
dd->ureg_align * ctxt);
742
else
743
ubase = (u64 __iomem *)
744
(dd->uregbase +
745
(char __iomem *) dd->kregbase +
746
dd->ureg_align * ctxt);
747
748
if (dd->kregbase && (dd->flags & QIB_PRESENT))
749
writeq(value, &ubase[regno]);
750
}
751
752
static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
753
const u32 regno)
754
{
755
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
756
return -1;
757
return readl((u32 __iomem *) &dd->kregbase[regno]);
758
}
759
760
static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
761
const u32 regno)
762
{
763
if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
764
return -1;
765
return readq(&dd->kregbase[regno]);
766
}
767
768
static inline void qib_write_kreg(const struct qib_devdata *dd,
769
const u32 regno, u64 value)
770
{
771
if (dd->kregbase && (dd->flags & QIB_PRESENT))
772
writeq(value, &dd->kregbase[regno]);
773
}
774
775
/*
776
* not many sanity checks for the port-specific kernel register routines,
777
* since they are only used when it's known to be safe.
778
*/
779
static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
780
const u16 regno)
781
{
782
if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
783
return 0ULL;
784
return readq(&ppd->cpspec->kpregbase[regno]);
785
}
786
787
static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
788
const u16 regno, u64 value)
789
{
790
if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
791
(ppd->dd->flags & QIB_PRESENT))
792
writeq(value, &ppd->cpspec->kpregbase[regno]);
793
}
794
795
/**
796
* qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
797
* @dd: the qlogic_ib device
798
* @regno: the register number to write
799
* @ctxt: the context containing the register
800
* @value: the value to write
801
*/
802
static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
803
const u16 regno, unsigned ctxt,
804
u64 value)
805
{
806
qib_write_kreg(dd, regno + ctxt, value);
807
}
808
809
static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
810
{
811
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
812
return 0;
813
return readq(&dd->cspec->cregbase[regno]);
814
815
816
}
817
818
static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
819
{
820
if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
821
return 0;
822
return readl(&dd->cspec->cregbase[regno]);
823
824
825
}
826
827
static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
828
u16 regno, u64 value)
829
{
830
if (ppd->cpspec && ppd->cpspec->cpregbase &&
831
(ppd->dd->flags & QIB_PRESENT))
832
writeq(value, &ppd->cpspec->cpregbase[regno]);
833
}
834
835
static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
836
u16 regno)
837
{
838
if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
839
!(ppd->dd->flags & QIB_PRESENT))
840
return 0;
841
return readq(&ppd->cpspec->cpregbase[regno]);
842
}
843
844
static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
845
u16 regno)
846
{
847
if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
848
!(ppd->dd->flags & QIB_PRESENT))
849
return 0;
850
return readl(&ppd->cpspec->cpregbase[regno]);
851
}
852
853
/* bits in Control register */
854
#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
855
#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
856
857
/* bits in general interrupt regs */
858
#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
859
#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
860
#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
861
#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
862
#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
863
#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
864
#define QIB_I_C_ERROR INT_MASK(Err)
865
866
#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
867
#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
868
#define QIB_I_GPIO INT_MASK(AssertGPIO)
869
#define QIB_I_P_SDMAINT(pidx) \
870
(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
871
INT_MASK_P(SDmaProgress, pidx) | \
872
INT_MASK_PM(SDmaCleanupDone, pidx))
873
874
/* Interrupt bits that are "per port" */
875
#define QIB_I_P_BITSEXTANT(pidx) \
876
(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
877
INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
878
INT_MASK_P(SDmaProgress, pidx) | \
879
INT_MASK_PM(SDmaCleanupDone, pidx))
880
881
/* Interrupt bits that are common to a device */
882
/* currently unused: QIB_I_SPIOSENT */
883
#define QIB_I_C_BITSEXTANT \
884
(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
885
QIB_I_SPIOSENT | \
886
QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
887
888
#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
889
QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
890
891
/*
892
* Error bits that are "per port".
893
*/
894
#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
895
#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
896
#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
897
#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
898
#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
899
#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
900
#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
901
#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
902
#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
903
#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
904
#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
905
#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
906
#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
907
#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
908
#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
909
#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
910
#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
911
#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
912
#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
913
#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
914
#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
915
#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
916
#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
917
#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
918
#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
919
#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
920
#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
921
#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
922
923
#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
924
#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
925
#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
926
#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
927
#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
928
#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
929
#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
930
#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
931
#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
932
#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
933
#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
934
935
/* Error bits that are common to a device */
936
#define QIB_E_RESET ERR_MASK(ResetNegated)
937
#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
938
#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
939
940
941
/*
942
* Per chip (rather than per-port) errors. Most either do
943
* nothing but trigger a print (because they self-recover, or
944
* always occur in tandem with other errors that handle the
945
* issue), or because they indicate errors with no recovery,
946
* but we want to know that they happened.
947
*/
948
#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
949
#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
950
#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
951
#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
952
#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
953
#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
954
#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
955
#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
956
957
/* SDMA chip errors (not per port)
958
* QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
959
* the SDMAHALT error immediately, so we just print the dup error via the
960
* E_AUTO mechanism. This is true of most of the per-port fatal errors
961
* as well, but since this is port-independent, by definition, it's
962
* handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
963
* packet send errors, and so are handled in the same manner as other
964
* per-packet errors.
965
*/
966
#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
967
#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
968
#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
969
970
/*
971
* Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
972
* it is used to print "common" packet errors.
973
*/
974
#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
975
QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
976
QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
977
QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
978
QIB_E_P_REBP)
979
980
/* Error Bits that Packet-related (Receive, per-port) */
981
#define QIB_E_P_RPKTERRS (\
982
QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
983
QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
984
QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
985
QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
986
QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
987
QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
988
989
/*
990
* Error bits that are Send-related (per port)
991
* (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
992
* All of these potentially need to have a buffer disarmed
993
*/
994
#define QIB_E_P_SPKTERRS (\
995
QIB_E_P_SUNEXP_PKTNUM |\
996
QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
997
QIB_E_P_SMAXPKTLEN |\
998
QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
999
QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1000
QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1001
1002
#define QIB_E_SPKTERRS ( \
1003
QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1004
ERR_MASK_N(SendUnsupportedVLErr) | \
1005
QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1006
1007
#define QIB_E_P_SDMAERRS ( \
1008
QIB_E_P_SDMAHALT | \
1009
QIB_E_P_SDMADESCADDRMISALIGN | \
1010
QIB_E_P_SDMAUNEXPDATA | \
1011
QIB_E_P_SDMAMISSINGDW | \
1012
QIB_E_P_SDMADWEN | \
1013
QIB_E_P_SDMARPYTAG | \
1014
QIB_E_P_SDMA1STDESC | \
1015
QIB_E_P_SDMABASE | \
1016
QIB_E_P_SDMATAILOUTOFBOUND | \
1017
QIB_E_P_SDMAOUTOFBOUND | \
1018
QIB_E_P_SDMAGENMISMATCH)
1019
1020
/*
1021
* This sets some bits more than once, but makes it more obvious which
1022
* bits are not handled under other categories, and the repeat definition
1023
* is not a problem.
1024
*/
1025
#define QIB_E_P_BITSEXTANT ( \
1026
QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1027
QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1028
QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1029
QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1030
)
1031
1032
/*
1033
* These are errors that can occur when the link
1034
* changes state while a packet is being sent or received. This doesn't
1035
* cover things like EBP or VCRC that can be the result of a sending
1036
* having the link change state, so we receive a "known bad" packet.
1037
* All of these are "per port", so renamed:
1038
*/
1039
#define QIB_E_P_LINK_PKTERRS (\
1040
QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1041
QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1042
QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1043
QIB_E_P_RUNEXPCHAR)
1044
1045
/*
1046
* This sets some bits more than once, but makes it more obvious which
1047
* bits are not handled under other categories (such as QIB_E_SPKTERRS),
1048
* and the repeat definition is not a problem.
1049
*/
1050
#define QIB_E_C_BITSEXTANT (\
1051
QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1052
QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1053
QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1054
1055
/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1056
#define E_SPKT_ERRS_IGNORE 0
1057
1058
#define QIB_EXTS_MEMBIST_DISABLED \
1059
SYM_MASK(EXTStatus, MemBISTDisabled)
1060
#define QIB_EXTS_MEMBIST_ENDTEST \
1061
SYM_MASK(EXTStatus, MemBISTEndTest)
1062
1063
#define QIB_E_SPIOARMLAUNCH \
1064
ERR_MASK(SendArmLaunchErr)
1065
1066
#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1067
#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1068
1069
/*
1070
* IBTA_1_2 is set when multiple speeds are enabled (normal),
1071
* and also if forced QDR (only QDR enabled). It's enabled for the
1072
* forced QDR case so that scrambling will be enabled by the TS3
1073
* exchange, when supported by both sides of the link.
1074
*/
1075
#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1076
#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1077
#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1078
#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1079
#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1080
#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1081
SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1082
#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1083
1084
#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1085
#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1086
1087
#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1088
#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1089
#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1090
1091
#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1092
#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1093
#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1094
SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1095
#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1096
SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1097
#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1098
1099
#define IBA7322_REDIRECT_VEC_PER_REG 12
1100
1101
#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1102
#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1103
#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1104
#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1105
#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1106
1107
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1108
1109
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1110
.msg = #fldname }
1111
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1112
fldname##Mask##_##port), .msg = #fldname }
1113
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1114
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1115
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1116
HWE_AUTO(PCIESerdesPClkNotDetect),
1117
HWE_AUTO(PowerOnBISTFailed),
1118
HWE_AUTO(TempsenseTholdReached),
1119
HWE_AUTO(MemoryErr),
1120
HWE_AUTO(PCIeBusParityErr),
1121
HWE_AUTO(PcieCplTimeout),
1122
HWE_AUTO(PciePoisonedTLP),
1123
HWE_AUTO_P(SDmaMemReadErr, 1),
1124
HWE_AUTO_P(SDmaMemReadErr, 0),
1125
HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1126
HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1127
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1128
HWE_AUTO(statusValidNoEop),
1129
HWE_AUTO(LATriggered),
1130
{ .mask = 0 }
1131
};
1132
1133
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1134
.msg = #fldname }
1135
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1136
.msg = #fldname }
1137
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1138
E_AUTO(ResetNegated),
1139
E_AUTO(HardwareErr),
1140
E_AUTO(InvalidAddrErr),
1141
E_AUTO(SDmaVL15Err),
1142
E_AUTO(SBufVL15MisUseErr),
1143
E_AUTO(InvalidEEPCmd),
1144
E_AUTO(RcvContextShareErr),
1145
E_AUTO(SendVLMismatchErr),
1146
E_AUTO(SendArmLaunchErr),
1147
E_AUTO(SendSpecialTriggerErr),
1148
E_AUTO(SDmaWrongPortErr),
1149
E_AUTO(SDmaBufMaskDuplicateErr),
1150
E_AUTO(RcvHdrFullErr),
1151
E_AUTO(RcvEgrFullErr),
1152
{ .mask = 0 }
1153
};
1154
1155
static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1156
E_P_AUTO(IBStatusChanged),
1157
E_P_AUTO(SHeadersErr),
1158
E_P_AUTO(VL15BufMisuseErr),
1159
/*
1160
* SDmaHaltErr is not really an error, make it clearer;
1161
*/
1162
{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
1163
E_P_AUTO(SDmaDescAddrMisalignErr),
1164
E_P_AUTO(SDmaUnexpDataErr),
1165
E_P_AUTO(SDmaMissingDwErr),
1166
E_P_AUTO(SDmaDwEnErr),
1167
E_P_AUTO(SDmaRpyTagErr),
1168
E_P_AUTO(SDma1stDescErr),
1169
E_P_AUTO(SDmaBaseErr),
1170
E_P_AUTO(SDmaTailOutOfBoundErr),
1171
E_P_AUTO(SDmaOutOfBoundErr),
1172
E_P_AUTO(SDmaGenMismatchErr),
1173
E_P_AUTO(SendBufMisuseErr),
1174
E_P_AUTO(SendUnsupportedVLErr),
1175
E_P_AUTO(SendUnexpectedPktNumErr),
1176
E_P_AUTO(SendDroppedDataPktErr),
1177
E_P_AUTO(SendDroppedSmpPktErr),
1178
E_P_AUTO(SendPktLenErr),
1179
E_P_AUTO(SendUnderRunErr),
1180
E_P_AUTO(SendMaxPktLenErr),
1181
E_P_AUTO(SendMinPktLenErr),
1182
E_P_AUTO(RcvIBLostLinkErr),
1183
E_P_AUTO(RcvHdrErr),
1184
E_P_AUTO(RcvHdrLenErr),
1185
E_P_AUTO(RcvBadTidErr),
1186
E_P_AUTO(RcvBadVersionErr),
1187
E_P_AUTO(RcvIBFlowErr),
1188
E_P_AUTO(RcvEBPErr),
1189
E_P_AUTO(RcvUnsupportedVLErr),
1190
E_P_AUTO(RcvUnexpectedCharErr),
1191
E_P_AUTO(RcvShortPktLenErr),
1192
E_P_AUTO(RcvLongPktLenErr),
1193
E_P_AUTO(RcvMaxPktLenErr),
1194
E_P_AUTO(RcvMinPktLenErr),
1195
E_P_AUTO(RcvICRCErr),
1196
E_P_AUTO(RcvVCRCErr),
1197
E_P_AUTO(RcvFormatErr),
1198
{ .mask = 0 }
1199
};
1200
1201
/*
1202
* Below generates "auto-message" for interrupts not specific to any port or
1203
* context
1204
*/
1205
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1206
.msg = #fldname }
1207
/* Below generates "auto-message" for interrupts specific to a port */
1208
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1209
SYM_LSB(IntMask, fldname##Mask##_0), \
1210
SYM_LSB(IntMask, fldname##Mask##_1)), \
1211
.msg = #fldname "_P" }
1212
/* For some reason, the SerDesTrimDone bits are reversed */
1213
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1214
SYM_LSB(IntMask, fldname##Mask##_1), \
1215
SYM_LSB(IntMask, fldname##Mask##_0)), \
1216
.msg = #fldname "_P" }
1217
/*
1218
* Below generates "auto-message" for interrupts specific to a context,
1219
* with ctxt-number appended
1220
*/
1221
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1222
SYM_LSB(IntMask, fldname##0IntMask), \
1223
SYM_LSB(IntMask, fldname##17IntMask)), \
1224
.msg = #fldname "_C"}
1225
1226
static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1227
INTR_AUTO_P(SDmaInt),
1228
INTR_AUTO_P(SDmaProgressInt),
1229
INTR_AUTO_P(SDmaIdleInt),
1230
INTR_AUTO_P(SDmaCleanupDone),
1231
INTR_AUTO_C(RcvUrg),
1232
INTR_AUTO_P(ErrInt),
1233
INTR_AUTO(ErrInt), /* non-port-specific errs */
1234
INTR_AUTO(AssertGPIOInt),
1235
INTR_AUTO_P(SendDoneInt),
1236
INTR_AUTO(SendBufAvailInt),
1237
INTR_AUTO_C(RcvAvail),
1238
{ .mask = 0 }
1239
};
1240
1241
#define TXSYMPTOM_AUTO_P(fldname) \
1242
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
1243
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1244
TXSYMPTOM_AUTO_P(NonKeyPacket),
1245
TXSYMPTOM_AUTO_P(GRHFail),
1246
TXSYMPTOM_AUTO_P(PkeyFail),
1247
TXSYMPTOM_AUTO_P(QPFail),
1248
TXSYMPTOM_AUTO_P(SLIDFail),
1249
TXSYMPTOM_AUTO_P(RawIPV6),
1250
TXSYMPTOM_AUTO_P(PacketTooSmall),
1251
{ .mask = 0 }
1252
};
1253
1254
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1255
1256
/*
1257
* Called when we might have an error that is specific to a particular
1258
* PIO buffer, and may need to cancel that buffer, so it can be re-used,
1259
* because we don't need to force the update of pioavail
1260
*/
1261
static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1262
{
1263
struct qib_devdata *dd = ppd->dd;
1264
u32 i;
1265
int any;
1266
u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1267
u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1268
unsigned long sbuf[4];
1269
1270
/*
1271
* It's possible that sendbuffererror could have bits set; might
1272
* have already done this as a result of hardware error handling.
1273
*/
1274
any = 0;
1275
for (i = 0; i < regcnt; ++i) {
1276
sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1277
if (sbuf[i]) {
1278
any = 1;
1279
qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1280
}
1281
}
1282
1283
if (any)
1284
qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1285
}
1286
1287
/* No txe_recover yet, if ever */
1288
1289
/* No decode__errors yet */
1290
static void err_decode(char *msg, size_t len, u64 errs,
1291
const struct qib_hwerror_msgs *msp)
1292
{
1293
u64 these, lmask;
1294
int took, multi, n = 0;
1295
1296
while (msp && msp->mask) {
1297
multi = (msp->mask & (msp->mask - 1));
1298
while (errs & msp->mask) {
1299
these = (errs & msp->mask);
1300
lmask = (these & (these - 1)) ^ these;
1301
if (len) {
1302
if (n++) {
1303
/* separate the strings */
1304
*msg++ = ',';
1305
len--;
1306
}
1307
took = scnprintf(msg, len, "%s", msp->msg);
1308
len -= took;
1309
msg += took;
1310
}
1311
errs &= ~lmask;
1312
if (len && multi) {
1313
/* More than one bit this mask */
1314
int idx = -1;
1315
1316
while (lmask & msp->mask) {
1317
++idx;
1318
lmask >>= 1;
1319
}
1320
took = scnprintf(msg, len, "_%d", idx);
1321
len -= took;
1322
msg += took;
1323
}
1324
}
1325
++msp;
1326
}
1327
/* If some bits are left, show in hex. */
1328
if (len && errs)
1329
snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1330
(unsigned long long) errs);
1331
}
1332
1333
/* only called if r1 set */
1334
static void flush_fifo(struct qib_pportdata *ppd)
1335
{
1336
struct qib_devdata *dd = ppd->dd;
1337
u32 __iomem *piobuf;
1338
u32 bufn;
1339
u32 *hdr;
1340
u64 pbc;
1341
const unsigned hdrwords = 7;
1342
static struct qib_ib_header ibhdr = {
1343
.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1344
.lrh[1] = IB_LID_PERMISSIVE,
1345
.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1346
.lrh[3] = IB_LID_PERMISSIVE,
1347
.u.oth.bth[0] = cpu_to_be32(
1348
(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1349
.u.oth.bth[1] = cpu_to_be32(0),
1350
.u.oth.bth[2] = cpu_to_be32(0),
1351
.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1352
.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1353
};
1354
1355
/*
1356
* Send a dummy VL15 packet to flush the launch FIFO.
1357
* This will not actually be sent since the TxeBypassIbc bit is set.
1358
*/
1359
pbc = PBC_7322_VL15_SEND |
1360
(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1361
(hdrwords + SIZE_OF_CRC);
1362
piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1363
if (!piobuf)
1364
return;
1365
writeq(pbc, piobuf);
1366
hdr = (u32 *) &ibhdr;
1367
if (dd->flags & QIB_PIO_FLUSH_WC) {
1368
qib_flush_wc();
1369
qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1370
qib_flush_wc();
1371
__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1372
qib_flush_wc();
1373
} else
1374
qib_pio_copy(piobuf + 2, hdr, hdrwords);
1375
qib_sendbuf_done(dd, bufn);
1376
}
1377
1378
/*
1379
* This is called with interrupts disabled and sdma_lock held.
1380
*/
1381
static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1382
{
1383
struct qib_devdata *dd = ppd->dd;
1384
u64 set_sendctrl = 0;
1385
u64 clr_sendctrl = 0;
1386
1387
if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1388
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1389
else
1390
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1391
1392
if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1393
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1394
else
1395
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1396
1397
if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1398
set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1399
else
1400
clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1401
1402
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1403
set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1404
SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1405
SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1406
else
1407
clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1408
SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1409
SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1410
1411
spin_lock(&dd->sendctrl_lock);
1412
1413
/* If we are draining everything, block sends first */
1414
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1415
ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1416
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1417
qib_write_kreg(dd, kr_scratch, 0);
1418
}
1419
1420
ppd->p_sendctrl |= set_sendctrl;
1421
ppd->p_sendctrl &= ~clr_sendctrl;
1422
1423
if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1424
qib_write_kreg_port(ppd, krp_sendctrl,
1425
ppd->p_sendctrl |
1426
SYM_MASK(SendCtrl_0, SDmaCleanup));
1427
else
1428
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1429
qib_write_kreg(dd, kr_scratch, 0);
1430
1431
if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1432
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1433
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1434
qib_write_kreg(dd, kr_scratch, 0);
1435
}
1436
1437
spin_unlock(&dd->sendctrl_lock);
1438
1439
if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1440
flush_fifo(ppd);
1441
}
1442
1443
static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1444
{
1445
__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1446
}
1447
1448
static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1449
{
1450
/*
1451
* Set SendDmaLenGen and clear and set
1452
* the MSB of the generation count to enable generation checking
1453
* and load the internal generation counter.
1454
*/
1455
qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1456
qib_write_kreg_port(ppd, krp_senddmalengen,
1457
ppd->sdma_descq_cnt |
1458
(1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1459
}
1460
1461
/*
1462
* Must be called with sdma_lock held, or before init finished.
1463
*/
1464
static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1465
{
1466
/* Commit writes to memory and advance the tail on the chip */
1467
wmb();
1468
ppd->sdma_descq_tail = tail;
1469
qib_write_kreg_port(ppd, krp_senddmatail, tail);
1470
}
1471
1472
/*
1473
* This is called with interrupts disabled and sdma_lock held.
1474
*/
1475
static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1476
{
1477
/*
1478
* Drain all FIFOs.
1479
* The hardware doesn't require this but we do it so that verbs
1480
* and user applications don't wait for link active to send stale
1481
* data.
1482
*/
1483
sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1484
1485
qib_sdma_7322_setlengen(ppd);
1486
qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1487
ppd->sdma_head_dma[0] = 0;
1488
qib_7322_sdma_sendctrl(ppd,
1489
ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1490
}
1491
1492
#define DISABLES_SDMA ( \
1493
QIB_E_P_SDMAHALT | \
1494
QIB_E_P_SDMADESCADDRMISALIGN | \
1495
QIB_E_P_SDMAMISSINGDW | \
1496
QIB_E_P_SDMADWEN | \
1497
QIB_E_P_SDMARPYTAG | \
1498
QIB_E_P_SDMA1STDESC | \
1499
QIB_E_P_SDMABASE | \
1500
QIB_E_P_SDMATAILOUTOFBOUND | \
1501
QIB_E_P_SDMAOUTOFBOUND | \
1502
QIB_E_P_SDMAGENMISMATCH)
1503
1504
static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1505
{
1506
unsigned long flags;
1507
struct qib_devdata *dd = ppd->dd;
1508
1509
errs &= QIB_E_P_SDMAERRS;
1510
1511
if (errs & QIB_E_P_SDMAUNEXPDATA)
1512
qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1513
ppd->port);
1514
1515
spin_lock_irqsave(&ppd->sdma_lock, flags);
1516
1517
switch (ppd->sdma_state.current_state) {
1518
case qib_sdma_state_s00_hw_down:
1519
break;
1520
1521
case qib_sdma_state_s10_hw_start_up_wait:
1522
if (errs & QIB_E_P_SDMAHALT)
1523
__qib_sdma_process_event(ppd,
1524
qib_sdma_event_e20_hw_started);
1525
break;
1526
1527
case qib_sdma_state_s20_idle:
1528
break;
1529
1530
case qib_sdma_state_s30_sw_clean_up_wait:
1531
break;
1532
1533
case qib_sdma_state_s40_hw_clean_up_wait:
1534
if (errs & QIB_E_P_SDMAHALT)
1535
__qib_sdma_process_event(ppd,
1536
qib_sdma_event_e50_hw_cleaned);
1537
break;
1538
1539
case qib_sdma_state_s50_hw_halt_wait:
1540
if (errs & QIB_E_P_SDMAHALT)
1541
__qib_sdma_process_event(ppd,
1542
qib_sdma_event_e60_hw_halted);
1543
break;
1544
1545
case qib_sdma_state_s99_running:
1546
__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1547
__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1548
break;
1549
}
1550
1551
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1552
}
1553
1554
/*
1555
* handle per-device errors (not per-port errors)
1556
*/
1557
static noinline void handle_7322_errors(struct qib_devdata *dd)
1558
{
1559
char *msg;
1560
u64 iserr = 0;
1561
u64 errs;
1562
u64 mask;
1563
int log_idx;
1564
1565
qib_stats.sps_errints++;
1566
errs = qib_read_kreg64(dd, kr_errstatus);
1567
if (!errs) {
1568
qib_devinfo(dd->pcidev, "device error interrupt, "
1569
"but no error bits set!\n");
1570
goto done;
1571
}
1572
1573
/* don't report errors that are masked */
1574
errs &= dd->cspec->errormask;
1575
msg = dd->cspec->emsgbuf;
1576
1577
/* do these first, they are most important */
1578
if (errs & QIB_E_HARDWARE) {
1579
*msg = '\0';
1580
qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1581
} else
1582
for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1583
if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1584
qib_inc_eeprom_err(dd, log_idx, 1);
1585
1586
if (errs & QIB_E_SPKTERRS) {
1587
qib_disarm_7322_senderrbufs(dd->pport);
1588
qib_stats.sps_txerrs++;
1589
} else if (errs & QIB_E_INVALIDADDR)
1590
qib_stats.sps_txerrs++;
1591
else if (errs & QIB_E_ARMLAUNCH) {
1592
qib_stats.sps_txerrs++;
1593
qib_disarm_7322_senderrbufs(dd->pport);
1594
}
1595
qib_write_kreg(dd, kr_errclear, errs);
1596
1597
/*
1598
* The ones we mask off are handled specially below
1599
* or above. Also mask SDMADISABLED by default as it
1600
* is too chatty.
1601
*/
1602
mask = QIB_E_HARDWARE;
1603
*msg = '\0';
1604
1605
err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1606
qib_7322error_msgs);
1607
1608
/*
1609
* Getting reset is a tragedy for all ports. Mark the device
1610
* _and_ the ports as "offline" in way meaningful to each.
1611
*/
1612
if (errs & QIB_E_RESET) {
1613
int pidx;
1614
1615
qib_dev_err(dd, "Got reset, requires re-init "
1616
"(unload and reload driver)\n");
1617
dd->flags &= ~QIB_INITTED; /* needs re-init */
1618
/* mark as having had error */
1619
*dd->devstatusp |= QIB_STATUS_HWERROR;
1620
for (pidx = 0; pidx < dd->num_pports; ++pidx)
1621
if (dd->pport[pidx].link_speed_supported)
1622
*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1623
}
1624
1625
if (*msg && iserr)
1626
qib_dev_err(dd, "%s error\n", msg);
1627
1628
/*
1629
* If there were hdrq or egrfull errors, wake up any processes
1630
* waiting in poll. We used to try to check which contexts had
1631
* the overflow, but given the cost of that and the chip reads
1632
* to support it, it's better to just wake everybody up if we
1633
* get an overflow; waiters can poll again if it's not them.
1634
*/
1635
if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1636
qib_handle_urcv(dd, ~0U);
1637
if (errs & ERR_MASK(RcvEgrFullErr))
1638
qib_stats.sps_buffull++;
1639
else
1640
qib_stats.sps_hdrfull++;
1641
}
1642
1643
done:
1644
return;
1645
}
1646
1647
static void reenable_chase(unsigned long opaque)
1648
{
1649
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1650
1651
ppd->cpspec->chase_timer.expires = 0;
1652
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1653
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1654
}
1655
1656
static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1657
{
1658
ppd->cpspec->chase_end = 0;
1659
1660
if (!qib_chase)
1661
return;
1662
1663
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1664
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1665
ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1666
add_timer(&ppd->cpspec->chase_timer);
1667
}
1668
1669
static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1670
{
1671
u8 ibclt;
1672
u64 tnow;
1673
1674
ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1675
1676
/*
1677
* Detect and handle the state chase issue, where we can
1678
* get stuck if we are unlucky on timing on both sides of
1679
* the link. If we are, we disable, set a timer, and
1680
* then re-enable.
1681
*/
1682
switch (ibclt) {
1683
case IB_7322_LT_STATE_CFGRCVFCFG:
1684
case IB_7322_LT_STATE_CFGWAITRMT:
1685
case IB_7322_LT_STATE_TXREVLANES:
1686
case IB_7322_LT_STATE_CFGENH:
1687
tnow = get_jiffies_64();
1688
if (ppd->cpspec->chase_end &&
1689
time_after64(tnow, ppd->cpspec->chase_end))
1690
disable_chase(ppd, tnow, ibclt);
1691
else if (!ppd->cpspec->chase_end)
1692
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1693
break;
1694
default:
1695
ppd->cpspec->chase_end = 0;
1696
break;
1697
}
1698
1699
if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1700
ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1701
ibclt == IB_7322_LT_STATE_LINKUP) &&
1702
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1703
force_h1(ppd);
1704
ppd->cpspec->qdr_reforce = 1;
1705
if (!ppd->dd->cspec->r1)
1706
serdes_7322_los_enable(ppd, 0);
1707
} else if (ppd->cpspec->qdr_reforce &&
1708
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1709
(ibclt == IB_7322_LT_STATE_CFGENH ||
1710
ibclt == IB_7322_LT_STATE_CFGIDLE ||
1711
ibclt == IB_7322_LT_STATE_LINKUP))
1712
force_h1(ppd);
1713
1714
if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1715
ppd->link_speed_enabled == QIB_IB_QDR &&
1716
(ibclt == IB_7322_LT_STATE_CFGTEST ||
1717
ibclt == IB_7322_LT_STATE_CFGENH ||
1718
(ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1719
ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1720
adj_tx_serdes(ppd);
1721
1722
if (ibclt != IB_7322_LT_STATE_LINKUP) {
1723
u8 ltstate = qib_7322_phys_portstate(ibcst);
1724
u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1725
LinkTrainingState);
1726
if (!ppd->dd->cspec->r1 &&
1727
pibclt == IB_7322_LT_STATE_LINKUP &&
1728
ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1729
ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1730
ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1731
ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1732
/* If the link went down (but no into recovery,
1733
* turn LOS back on */
1734
serdes_7322_los_enable(ppd, 1);
1735
if (!ppd->cpspec->qdr_dfe_on &&
1736
ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1737
ppd->cpspec->qdr_dfe_on = 1;
1738
ppd->cpspec->qdr_dfe_time = 0;
1739
/* On link down, reenable QDR adaptation */
1740
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1741
ppd->dd->cspec->r1 ?
1742
QDR_STATIC_ADAPT_DOWN_R1 :
1743
QDR_STATIC_ADAPT_DOWN);
1744
printk(KERN_INFO QIB_DRV_NAME
1745
" IB%u:%u re-enabled QDR adaptation "
1746
"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1747
}
1748
}
1749
}
1750
1751
static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1752
1753
/*
1754
* This is per-pport error handling.
1755
* will likely get it's own MSIx interrupt (one for each port,
1756
* although just a single handler).
1757
*/
1758
static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1759
{
1760
char *msg;
1761
u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1762
struct qib_devdata *dd = ppd->dd;
1763
1764
/* do this as soon as possible */
1765
fmask = qib_read_kreg64(dd, kr_act_fmask);
1766
if (!fmask)
1767
check_7322_rxe_status(ppd);
1768
1769
errs = qib_read_kreg_port(ppd, krp_errstatus);
1770
if (!errs)
1771
qib_devinfo(dd->pcidev,
1772
"Port%d error interrupt, but no error bits set!\n",
1773
ppd->port);
1774
if (!fmask)
1775
errs &= ~QIB_E_P_IBSTATUSCHANGED;
1776
if (!errs)
1777
goto done;
1778
1779
msg = ppd->cpspec->epmsgbuf;
1780
*msg = '\0';
1781
1782
if (errs & ~QIB_E_P_BITSEXTANT) {
1783
err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1784
errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1785
if (!*msg)
1786
snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1787
"no others");
1788
qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1789
" errors 0x%016Lx set (and %s)\n",
1790
(errs & ~QIB_E_P_BITSEXTANT), msg);
1791
*msg = '\0';
1792
}
1793
1794
if (errs & QIB_E_P_SHDR) {
1795
u64 symptom;
1796
1797
/* determine cause, then write to clear */
1798
symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1799
qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1800
err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1801
hdrchk_msgs);
1802
*msg = '\0';
1803
/* senderrbuf cleared in SPKTERRS below */
1804
}
1805
1806
if (errs & QIB_E_P_SPKTERRS) {
1807
if ((errs & QIB_E_P_LINK_PKTERRS) &&
1808
!(ppd->lflags & QIBL_LINKACTIVE)) {
1809
/*
1810
* This can happen when trying to bring the link
1811
* up, but the IB link changes state at the "wrong"
1812
* time. The IB logic then complains that the packet
1813
* isn't valid. We don't want to confuse people, so
1814
* we just don't print them, except at debug
1815
*/
1816
err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1817
(errs & QIB_E_P_LINK_PKTERRS),
1818
qib_7322p_error_msgs);
1819
*msg = '\0';
1820
ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1821
}
1822
qib_disarm_7322_senderrbufs(ppd);
1823
} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1824
!(ppd->lflags & QIBL_LINKACTIVE)) {
1825
/*
1826
* This can happen when SMA is trying to bring the link
1827
* up, but the IB link changes state at the "wrong" time.
1828
* The IB logic then complains that the packet isn't
1829
* valid. We don't want to confuse people, so we just
1830
* don't print them, except at debug
1831
*/
1832
err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1833
qib_7322p_error_msgs);
1834
ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1835
*msg = '\0';
1836
}
1837
1838
qib_write_kreg_port(ppd, krp_errclear, errs);
1839
1840
errs &= ~ignore_this_time;
1841
if (!errs)
1842
goto done;
1843
1844
if (errs & QIB_E_P_RPKTERRS)
1845
qib_stats.sps_rcverrs++;
1846
if (errs & QIB_E_P_SPKTERRS)
1847
qib_stats.sps_txerrs++;
1848
1849
iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1850
1851
if (errs & QIB_E_P_SDMAERRS)
1852
sdma_7322_p_errors(ppd, errs);
1853
1854
if (errs & QIB_E_P_IBSTATUSCHANGED) {
1855
u64 ibcs;
1856
u8 ltstate;
1857
1858
ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1859
ltstate = qib_7322_phys_portstate(ibcs);
1860
1861
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1862
handle_serdes_issues(ppd, ibcs);
1863
if (!(ppd->cpspec->ibcctrl_a &
1864
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1865
/*
1866
* We got our interrupt, so init code should be
1867
* happy and not try alternatives. Now squelch
1868
* other "chatter" from link-negotiation (pre Init)
1869
*/
1870
ppd->cpspec->ibcctrl_a |=
1871
SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1872
qib_write_kreg_port(ppd, krp_ibcctrl_a,
1873
ppd->cpspec->ibcctrl_a);
1874
}
1875
1876
/* Update our picture of width and speed from chip */
1877
ppd->link_width_active =
1878
(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1879
IB_WIDTH_4X : IB_WIDTH_1X;
1880
ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1881
LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1882
SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1883
QIB_IB_DDR : QIB_IB_SDR;
1884
1885
if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1886
IB_PHYSPORTSTATE_DISABLED)
1887
qib_set_ib_7322_lstate(ppd, 0,
1888
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1889
else
1890
/*
1891
* Since going into a recovery state causes the link
1892
* state to go down and since recovery is transitory,
1893
* it is better if we "miss" ever seeing the link
1894
* training state go into recovery (i.e., ignore this
1895
* transition for link state special handling purposes)
1896
* without updating lastibcstat.
1897
*/
1898
if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1899
ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1900
ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1901
ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1902
qib_handle_e_ibstatuschanged(ppd, ibcs);
1903
}
1904
if (*msg && iserr)
1905
qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1906
1907
if (ppd->state_wanted & ppd->lflags)
1908
wake_up_interruptible(&ppd->state_wait);
1909
done:
1910
return;
1911
}
1912
1913
/* enable/disable chip from delivering interrupts */
1914
static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1915
{
1916
if (enable) {
1917
if (dd->flags & QIB_BADINTR)
1918
return;
1919
qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1920
/* cause any pending enabled interrupts to be re-delivered */
1921
qib_write_kreg(dd, kr_intclear, 0ULL);
1922
if (dd->cspec->num_msix_entries) {
1923
/* and same for MSIx */
1924
u64 val = qib_read_kreg64(dd, kr_intgranted);
1925
if (val)
1926
qib_write_kreg(dd, kr_intgranted, val);
1927
}
1928
} else
1929
qib_write_kreg(dd, kr_intmask, 0ULL);
1930
}
1931
1932
/*
1933
* Try to cleanup as much as possible for anything that might have gone
1934
* wrong while in freeze mode, such as pio buffers being written by user
1935
* processes (causing armlaunch), send errors due to going into freeze mode,
1936
* etc., and try to avoid causing extra interrupts while doing so.
1937
* Forcibly update the in-memory pioavail register copies after cleanup
1938
* because the chip won't do it while in freeze mode (the register values
1939
* themselves are kept correct).
1940
* Make sure that we don't lose any important interrupts by using the chip
1941
* feature that says that writing 0 to a bit in *clear that is set in
1942
* *status will cause an interrupt to be generated again (if allowed by
1943
* the *mask value).
1944
* This is in chip-specific code because of all of the register accesses,
1945
* even though the details are similar on most chips.
1946
*/
1947
static void qib_7322_clear_freeze(struct qib_devdata *dd)
1948
{
1949
int pidx;
1950
1951
/* disable error interrupts, to avoid confusion */
1952
qib_write_kreg(dd, kr_errmask, 0ULL);
1953
1954
for (pidx = 0; pidx < dd->num_pports; ++pidx)
1955
if (dd->pport[pidx].link_speed_supported)
1956
qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1957
0ULL);
1958
1959
/* also disable interrupts; errormask is sometimes overwriten */
1960
qib_7322_set_intr_state(dd, 0);
1961
1962
/* clear the freeze, and be sure chip saw it */
1963
qib_write_kreg(dd, kr_control, dd->control);
1964
qib_read_kreg32(dd, kr_scratch);
1965
1966
/*
1967
* Force new interrupt if any hwerr, error or interrupt bits are
1968
* still set, and clear "safe" send packet errors related to freeze
1969
* and cancelling sends. Re-enable error interrupts before possible
1970
* force of re-interrupt on pending interrupts.
1971
*/
1972
qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1973
qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1974
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1975
/* We need to purge per-port errs and reset mask, too */
1976
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1977
if (!dd->pport[pidx].link_speed_supported)
1978
continue;
1979
qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
1980
qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
1981
}
1982
qib_7322_set_intr_state(dd, 1);
1983
}
1984
1985
/* no error handling to speak of */
1986
/**
1987
* qib_7322_handle_hwerrors - display hardware errors.
1988
* @dd: the qlogic_ib device
1989
* @msg: the output buffer
1990
* @msgl: the size of the output buffer
1991
*
1992
* Use same msg buffer as regular errors to avoid excessive stack
1993
* use. Most hardware errors are catastrophic, but for right now,
1994
* we'll print them and continue. We reuse the same message buffer as
1995
* qib_handle_errors() to avoid excessive stack usage.
1996
*/
1997
static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
1998
size_t msgl)
1999
{
2000
u64 hwerrs;
2001
u32 ctrl;
2002
int isfatal = 0;
2003
2004
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2005
if (!hwerrs)
2006
goto bail;
2007
if (hwerrs == ~0ULL) {
2008
qib_dev_err(dd, "Read of hardware error status failed "
2009
"(all bits set); ignoring\n");
2010
goto bail;
2011
}
2012
qib_stats.sps_hwerrs++;
2013
2014
/* Always clear the error status register, except BIST fail */
2015
qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2016
~HWE_MASK(PowerOnBISTFailed));
2017
2018
hwerrs &= dd->cspec->hwerrmask;
2019
2020
/* no EEPROM logging, yet */
2021
2022
if (hwerrs)
2023
qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
2024
"(cleared)\n", (unsigned long long) hwerrs);
2025
2026
ctrl = qib_read_kreg32(dd, kr_control);
2027
if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2028
/*
2029
* No recovery yet...
2030
*/
2031
if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2032
dd->cspec->stay_in_freeze) {
2033
/*
2034
* If any set that we aren't ignoring only make the
2035
* complaint once, in case it's stuck or recurring,
2036
* and we get here multiple times
2037
* Force link down, so switch knows, and
2038
* LEDs are turned off.
2039
*/
2040
if (dd->flags & QIB_INITTED)
2041
isfatal = 1;
2042
} else
2043
qib_7322_clear_freeze(dd);
2044
}
2045
2046
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2047
isfatal = 1;
2048
strlcpy(msg, "[Memory BIST test failed, "
2049
"InfiniPath hardware unusable]", msgl);
2050
/* ignore from now on, so disable until driver reloaded */
2051
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2052
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2053
}
2054
2055
err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2056
2057
/* Ignore esoteric PLL failures et al. */
2058
2059
qib_dev_err(dd, "%s hardware error\n", msg);
2060
2061
if (isfatal && !dd->diag_client) {
2062
qib_dev_err(dd, "Fatal Hardware Error, no longer"
2063
" usable, SN %.16s\n", dd->serial);
2064
/*
2065
* for /sys status file and user programs to print; if no
2066
* trailing brace is copied, we'll know it was truncated.
2067
*/
2068
if (dd->freezemsg)
2069
snprintf(dd->freezemsg, dd->freezelen,
2070
"{%s}", msg);
2071
qib_disable_after_error(dd);
2072
}
2073
bail:;
2074
}
2075
2076
/**
2077
* qib_7322_init_hwerrors - enable hardware errors
2078
* @dd: the qlogic_ib device
2079
*
2080
* now that we have finished initializing everything that might reasonably
2081
* cause a hardware error, and cleared those errors bits as they occur,
2082
* we can enable hardware errors in the mask (potentially enabling
2083
* freeze mode), and enable hardware errors as errors (along with
2084
* everything else) in errormask
2085
*/
2086
static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2087
{
2088
int pidx;
2089
u64 extsval;
2090
2091
extsval = qib_read_kreg64(dd, kr_extstatus);
2092
if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2093
QIB_EXTS_MEMBIST_ENDTEST)))
2094
qib_dev_err(dd, "MemBIST did not complete!\n");
2095
2096
/* never clear BIST failure, so reported on each driver load */
2097
qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2098
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2099
2100
/* clear all */
2101
qib_write_kreg(dd, kr_errclear, ~0ULL);
2102
/* enable errors that are masked, at least this first time. */
2103
qib_write_kreg(dd, kr_errmask, ~0ULL);
2104
dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2105
for (pidx = 0; pidx < dd->num_pports; ++pidx)
2106
if (dd->pport[pidx].link_speed_supported)
2107
qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2108
~0ULL);
2109
}
2110
2111
/*
2112
* Disable and enable the armlaunch error. Used for PIO bandwidth testing
2113
* on chips that are count-based, rather than trigger-based. There is no
2114
* reference counting, but that's also fine, given the intended use.
2115
* Only chip-specific because it's all register accesses
2116
*/
2117
static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2118
{
2119
if (enable) {
2120
qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2121
dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2122
} else
2123
dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2124
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2125
}
2126
2127
/*
2128
* Formerly took parameter <which> in pre-shifted,
2129
* pre-merged form with LinkCmd and LinkInitCmd
2130
* together, and assuming the zero was NOP.
2131
*/
2132
static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2133
u16 linitcmd)
2134
{
2135
u64 mod_wd;
2136
struct qib_devdata *dd = ppd->dd;
2137
unsigned long flags;
2138
2139
if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2140
/*
2141
* If we are told to disable, note that so link-recovery
2142
* code does not attempt to bring us back up.
2143
* Also reset everything that we can, so we start
2144
* completely clean when re-enabled (before we
2145
* actually issue the disable to the IBC)
2146
*/
2147
qib_7322_mini_pcs_reset(ppd);
2148
spin_lock_irqsave(&ppd->lflags_lock, flags);
2149
ppd->lflags |= QIBL_IB_LINK_DISABLED;
2150
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2151
} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2152
/*
2153
* Any other linkinitcmd will lead to LINKDOWN and then
2154
* to INIT (if all is well), so clear flag to let
2155
* link-recovery code attempt to bring us back up.
2156
*/
2157
spin_lock_irqsave(&ppd->lflags_lock, flags);
2158
ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2159
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2160
/*
2161
* Clear status change interrupt reduction so the
2162
* new state is seen.
2163
*/
2164
ppd->cpspec->ibcctrl_a &=
2165
~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2166
}
2167
2168
mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2169
(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2170
2171
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2172
mod_wd);
2173
/* write to chip to prevent back-to-back writes of ibc reg */
2174
qib_write_kreg(dd, kr_scratch, 0);
2175
2176
}
2177
2178
/*
2179
* The total RCV buffer memory is 64KB, used for both ports, and is
2180
* in units of 64 bytes (same as IB flow control credit unit).
2181
* The consumedVL unit in the same registers are in 32 byte units!
2182
* So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2183
* and we can therefore allocate just 9 IB credits for 2 VL15 packets
2184
* in krp_rxcreditvl15, rather than 10.
2185
*/
2186
#define RCV_BUF_UNITSZ 64
2187
#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2188
2189
static void set_vls(struct qib_pportdata *ppd)
2190
{
2191
int i, numvls, totcred, cred_vl, vl0extra;
2192
struct qib_devdata *dd = ppd->dd;
2193
u64 val;
2194
2195
numvls = qib_num_vls(ppd->vls_operational);
2196
2197
/*
2198
* Set up per-VL credits. Below is kluge based on these assumptions:
2199
* 1) port is disabled at the time early_init is called.
2200
* 2) give VL15 17 credits, for two max-plausible packets.
2201
* 3) Give VL0-N the rest, with any rounding excess used for VL0
2202
*/
2203
/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2204
totcred = NUM_RCV_BUF_UNITS(dd);
2205
cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2206
totcred -= cred_vl;
2207
qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2208
cred_vl = totcred / numvls;
2209
vl0extra = totcred - cred_vl * numvls;
2210
qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2211
for (i = 1; i < numvls; i++)
2212
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2213
for (; i < 8; i++) /* no buffer space for other VLs */
2214
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2215
2216
/* Notify IBC that credits need to be recalculated */
2217
val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2218
val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2219
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2220
qib_write_kreg(dd, kr_scratch, 0ULL);
2221
val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2222
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2223
2224
for (i = 0; i < numvls; i++)
2225
val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2226
val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2227
2228
/* Change the number of operational VLs */
2229
ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2230
~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2231
((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2232
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2233
qib_write_kreg(dd, kr_scratch, 0ULL);
2234
}
2235
2236
/*
2237
* The code that deals with actual SerDes is in serdes_7322_init().
2238
* Compared to the code for iba7220, it is minimal.
2239
*/
2240
static int serdes_7322_init(struct qib_pportdata *ppd);
2241
2242
/**
2243
* qib_7322_bringup_serdes - bring up the serdes
2244
* @ppd: physical port on the qlogic_ib device
2245
*/
2246
static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2247
{
2248
struct qib_devdata *dd = ppd->dd;
2249
u64 val, guid, ibc;
2250
unsigned long flags;
2251
int ret = 0;
2252
2253
/*
2254
* SerDes model not in Pd, but still need to
2255
* set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2256
* eventually.
2257
*/
2258
/* Put IBC in reset, sends disabled (should be in reset already) */
2259
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2260
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2261
qib_write_kreg(dd, kr_scratch, 0ULL);
2262
2263
if (qib_compat_ddr_negotiate) {
2264
ppd->cpspec->ibdeltainprog = 1;
2265
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2266
crp_ibsymbolerr);
2267
ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2268
crp_iblinkerrrecov);
2269
}
2270
2271
/* flowcontrolwatermark is in units of KBytes */
2272
ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2273
/*
2274
* Flow control is sent this often, even if no changes in
2275
* buffer space occur. Units are 128ns for this chip.
2276
* Set to 3usec.
2277
*/
2278
ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2279
/* max error tolerance */
2280
ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2281
/* IB credit flow control. */
2282
ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2283
/*
2284
* set initial max size pkt IBC will send, including ICRC; it's the
2285
* PIO buffer size in dwords, less 1; also see qib_set_mtu()
2286
*/
2287
ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2288
SYM_LSB(IBCCtrlA_0, MaxPktLen);
2289
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2290
2291
/* initially come up waiting for TS1, without sending anything. */
2292
val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2293
QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2294
2295
/*
2296
* Reset the PCS interface to the serdes (and also ibc, which is still
2297
* in reset from above). Writes new value of ibcctrl_a as last step.
2298
*/
2299
qib_7322_mini_pcs_reset(ppd);
2300
qib_write_kreg(dd, kr_scratch, 0ULL);
2301
2302
if (!ppd->cpspec->ibcctrl_b) {
2303
unsigned lse = ppd->link_speed_enabled;
2304
2305
/*
2306
* Not on re-init after reset, establish shadow
2307
* and force initial config.
2308
*/
2309
ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2310
krp_ibcctrl_b);
2311
ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2312
IBA7322_IBC_SPEED_DDR |
2313
IBA7322_IBC_SPEED_SDR |
2314
IBA7322_IBC_WIDTH_AUTONEG |
2315
SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2316
if (lse & (lse - 1)) /* Muliple speeds enabled */
2317
ppd->cpspec->ibcctrl_b |=
2318
(lse << IBA7322_IBC_SPEED_LSB) |
2319
IBA7322_IBC_IBTA_1_2_MASK |
2320
IBA7322_IBC_MAX_SPEED_MASK;
2321
else
2322
ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2323
IBA7322_IBC_SPEED_QDR |
2324
IBA7322_IBC_IBTA_1_2_MASK :
2325
(lse == QIB_IB_DDR) ?
2326
IBA7322_IBC_SPEED_DDR :
2327
IBA7322_IBC_SPEED_SDR;
2328
if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2329
(IB_WIDTH_1X | IB_WIDTH_4X))
2330
ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2331
else
2332
ppd->cpspec->ibcctrl_b |=
2333
ppd->link_width_enabled == IB_WIDTH_4X ?
2334
IBA7322_IBC_WIDTH_4X_ONLY :
2335
IBA7322_IBC_WIDTH_1X_ONLY;
2336
2337
/* always enable these on driver reload, not sticky */
2338
ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2339
IBA7322_IBC_HRTBT_MASK);
2340
}
2341
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2342
2343
/* setup so we have more time at CFGTEST to change H1 */
2344
val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2345
val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2346
val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2347
qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2348
2349
serdes_7322_init(ppd);
2350
2351
guid = be64_to_cpu(ppd->guid);
2352
if (!guid) {
2353
if (dd->base_guid)
2354
guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2355
ppd->guid = cpu_to_be64(guid);
2356
}
2357
2358
qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2359
/* write to chip to prevent back-to-back writes of ibc reg */
2360
qib_write_kreg(dd, kr_scratch, 0);
2361
2362
/* Enable port */
2363
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2364
set_vls(ppd);
2365
2366
/* be paranoid against later code motion, etc. */
2367
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2368
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2369
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2370
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2371
2372
/* Hold the link state machine for mezz boards */
2373
if (IS_QMH(dd) || IS_QME(dd))
2374
qib_set_ib_7322_lstate(ppd, 0,
2375
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2376
2377
/* Also enable IBSTATUSCHG interrupt. */
2378
val = qib_read_kreg_port(ppd, krp_errmask);
2379
qib_write_kreg_port(ppd, krp_errmask,
2380
val | ERR_MASK_N(IBStatusChanged));
2381
2382
/* Always zero until we start messing with SerDes for real */
2383
return ret;
2384
}
2385
2386
/**
2387
* qib_7322_quiet_serdes - set serdes to txidle
2388
* @dd: the qlogic_ib device
2389
* Called when driver is being unloaded
2390
*/
2391
static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2392
{
2393
u64 val;
2394
unsigned long flags;
2395
2396
qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2397
2398
spin_lock_irqsave(&ppd->lflags_lock, flags);
2399
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2400
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2401
wake_up(&ppd->cpspec->autoneg_wait);
2402
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2403
if (ppd->dd->cspec->r1)
2404
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2405
2406
ppd->cpspec->chase_end = 0;
2407
if (ppd->cpspec->chase_timer.data) /* if initted */
2408
del_timer_sync(&ppd->cpspec->chase_timer);
2409
2410
/*
2411
* Despite the name, actually disables IBC as well. Do it when
2412
* we are as sure as possible that no more packets can be
2413
* received, following the down and the PCS reset.
2414
* The actual disabling happens in qib_7322_mini_pci_reset(),
2415
* along with the PCS being reset.
2416
*/
2417
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2418
qib_7322_mini_pcs_reset(ppd);
2419
2420
/*
2421
* Update the adjusted counters so the adjustment persists
2422
* across driver reload.
2423
*/
2424
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2425
ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2426
struct qib_devdata *dd = ppd->dd;
2427
u64 diagc;
2428
2429
/* enable counter writes */
2430
diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2431
qib_write_kreg(dd, kr_hwdiagctrl,
2432
diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2433
2434
if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2435
val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2436
if (ppd->cpspec->ibdeltainprog)
2437
val -= val - ppd->cpspec->ibsymsnap;
2438
val -= ppd->cpspec->ibsymdelta;
2439
write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2440
}
2441
if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2442
val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2443
if (ppd->cpspec->ibdeltainprog)
2444
val -= val - ppd->cpspec->iblnkerrsnap;
2445
val -= ppd->cpspec->iblnkerrdelta;
2446
write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2447
}
2448
if (ppd->cpspec->iblnkdowndelta) {
2449
val = read_7322_creg32_port(ppd, crp_iblinkdown);
2450
val += ppd->cpspec->iblnkdowndelta;
2451
write_7322_creg_port(ppd, crp_iblinkdown, val);
2452
}
2453
/*
2454
* No need to save ibmalfdelta since IB perfcounters
2455
* are cleared on driver reload.
2456
*/
2457
2458
/* and disable counter writes */
2459
qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2460
}
2461
}
2462
2463
/**
2464
* qib_setup_7322_setextled - set the state of the two external LEDs
2465
* @ppd: physical port on the qlogic_ib device
2466
* @on: whether the link is up or not
2467
*
2468
* The exact combo of LEDs if on is true is determined by looking
2469
* at the ibcstatus.
2470
*
2471
* These LEDs indicate the physical and logical state of IB link.
2472
* For this chip (at least with recommended board pinouts), LED1
2473
* is Yellow (logical state) and LED2 is Green (physical state),
2474
*
2475
* Note: We try to match the Mellanox HCA LED behavior as best
2476
* we can. Green indicates physical link state is OK (something is
2477
* plugged in, and we can train).
2478
* Amber indicates the link is logically up (ACTIVE).
2479
* Mellanox further blinks the amber LED to indicate data packet
2480
* activity, but we have no hardware support for that, so it would
2481
* require waking up every 10-20 msecs and checking the counters
2482
* on the chip, and then turning the LED off if appropriate. That's
2483
* visible overhead, so not something we will do.
2484
*/
2485
static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2486
{
2487
struct qib_devdata *dd = ppd->dd;
2488
u64 extctl, ledblink = 0, val;
2489
unsigned long flags;
2490
int yel, grn;
2491
2492
/*
2493
* The diags use the LED to indicate diag info, so we leave
2494
* the external LED alone when the diags are running.
2495
*/
2496
if (dd->diag_client)
2497
return;
2498
2499
/* Allow override of LED display for, e.g. Locating system in rack */
2500
if (ppd->led_override) {
2501
grn = (ppd->led_override & QIB_LED_PHYS);
2502
yel = (ppd->led_override & QIB_LED_LOG);
2503
} else if (on) {
2504
val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2505
grn = qib_7322_phys_portstate(val) ==
2506
IB_PHYSPORTSTATE_LINKUP;
2507
yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2508
} else {
2509
grn = 0;
2510
yel = 0;
2511
}
2512
2513
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2514
extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2515
~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2516
if (grn) {
2517
extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2518
/*
2519
* Counts are in chip clock (4ns) periods.
2520
* This is 1/16 sec (66.6ms) on,
2521
* 3/16 sec (187.5 ms) off, with packets rcvd.
2522
*/
2523
ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2524
((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2525
}
2526
if (yel)
2527
extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2528
dd->cspec->extctrl = extctl;
2529
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2530
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2531
2532
if (ledblink) /* blink the LED on packet receive */
2533
qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2534
}
2535
2536
/*
2537
* Disable MSIx interrupt if enabled, call generic MSIx code
2538
* to cleanup, and clear pending MSIx interrupts.
2539
* Used for fallback to INTx, after reset, and when MSIx setup fails.
2540
*/
2541
static void qib_7322_nomsix(struct qib_devdata *dd)
2542
{
2543
u64 intgranted;
2544
int n;
2545
2546
dd->cspec->main_int_mask = ~0ULL;
2547
n = dd->cspec->num_msix_entries;
2548
if (n) {
2549
int i;
2550
2551
dd->cspec->num_msix_entries = 0;
2552
for (i = 0; i < n; i++)
2553
free_irq(dd->cspec->msix_entries[i].vector,
2554
dd->cspec->msix_arg[i]);
2555
qib_nomsix(dd);
2556
}
2557
/* make sure no MSIx interrupts are left pending */
2558
intgranted = qib_read_kreg64(dd, kr_intgranted);
2559
if (intgranted)
2560
qib_write_kreg(dd, kr_intgranted, intgranted);
2561
}
2562
2563
static void qib_7322_free_irq(struct qib_devdata *dd)
2564
{
2565
if (dd->cspec->irq) {
2566
free_irq(dd->cspec->irq, dd);
2567
dd->cspec->irq = 0;
2568
}
2569
qib_7322_nomsix(dd);
2570
}
2571
2572
static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2573
{
2574
int i;
2575
2576
qib_7322_free_irq(dd);
2577
kfree(dd->cspec->cntrs);
2578
kfree(dd->cspec->sendchkenable);
2579
kfree(dd->cspec->sendgrhchk);
2580
kfree(dd->cspec->sendibchk);
2581
kfree(dd->cspec->msix_entries);
2582
kfree(dd->cspec->msix_arg);
2583
for (i = 0; i < dd->num_pports; i++) {
2584
unsigned long flags;
2585
u32 mask = QSFP_GPIO_MOD_PRS_N |
2586
(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2587
2588
kfree(dd->pport[i].cpspec->portcntrs);
2589
if (dd->flags & QIB_HAS_QSFP) {
2590
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2591
dd->cspec->gpio_mask &= ~mask;
2592
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2593
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2594
qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2595
}
2596
if (dd->pport[i].ibport_data.smi_ah)
2597
ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2598
}
2599
}
2600
2601
/* handle SDMA interrupts */
2602
static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2603
{
2604
struct qib_pportdata *ppd0 = &dd->pport[0];
2605
struct qib_pportdata *ppd1 = &dd->pport[1];
2606
u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2607
INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2608
u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2609
INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2610
2611
if (intr0)
2612
qib_sdma_intr(ppd0);
2613
if (intr1)
2614
qib_sdma_intr(ppd1);
2615
2616
if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2617
qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2618
if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2619
qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2620
}
2621
2622
/*
2623
* Set or clear the Send buffer available interrupt enable bit.
2624
*/
2625
static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2626
{
2627
unsigned long flags;
2628
2629
spin_lock_irqsave(&dd->sendctrl_lock, flags);
2630
if (needint)
2631
dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2632
else
2633
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2634
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2635
qib_write_kreg(dd, kr_scratch, 0ULL);
2636
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2637
}
2638
2639
/*
2640
* Somehow got an interrupt with reserved bits set in interrupt status.
2641
* Print a message so we know it happened, then clear them.
2642
* keep mainline interrupt handler cache-friendly
2643
*/
2644
static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2645
{
2646
u64 kills;
2647
char msg[128];
2648
2649
kills = istat & ~QIB_I_BITSEXTANT;
2650
qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2651
" %s\n", (unsigned long long) kills, msg);
2652
qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2653
}
2654
2655
/* keep mainline interrupt handler cache-friendly */
2656
static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2657
{
2658
u32 gpiostatus;
2659
int handled = 0;
2660
int pidx;
2661
2662
/*
2663
* Boards for this chip currently don't use GPIO interrupts,
2664
* so clear by writing GPIOstatus to GPIOclear, and complain
2665
* to developer. To avoid endless repeats, clear
2666
* the bits in the mask, since there is some kind of
2667
* programming error or chip problem.
2668
*/
2669
gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2670
/*
2671
* In theory, writing GPIOstatus to GPIOclear could
2672
* have a bad side-effect on some diagnostic that wanted
2673
* to poll for a status-change, but the various shadows
2674
* make that problematic at best. Diags will just suppress
2675
* all GPIO interrupts during such tests.
2676
*/
2677
qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2678
/*
2679
* Check for QSFP MOD_PRS changes
2680
* only works for single port if IB1 != pidx1
2681
*/
2682
for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2683
++pidx) {
2684
struct qib_pportdata *ppd;
2685
struct qib_qsfp_data *qd;
2686
u32 mask;
2687
if (!dd->pport[pidx].link_speed_supported)
2688
continue;
2689
mask = QSFP_GPIO_MOD_PRS_N;
2690
ppd = dd->pport + pidx;
2691
mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2692
if (gpiostatus & dd->cspec->gpio_mask & mask) {
2693
u64 pins;
2694
qd = &ppd->cpspec->qsfp_data;
2695
gpiostatus &= ~mask;
2696
pins = qib_read_kreg64(dd, kr_extstatus);
2697
pins >>= SYM_LSB(EXTStatus, GPIOIn);
2698
if (!(pins & mask)) {
2699
++handled;
2700
qd->t_insert = get_jiffies_64();
2701
queue_work(ib_wq, &qd->work);
2702
}
2703
}
2704
}
2705
2706
if (gpiostatus && !handled) {
2707
const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2708
u32 gpio_irq = mask & gpiostatus;
2709
2710
/*
2711
* Clear any troublemakers, and update chip from shadow
2712
*/
2713
dd->cspec->gpio_mask &= ~gpio_irq;
2714
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2715
}
2716
}
2717
2718
/*
2719
* Handle errors and unusual events first, separate function
2720
* to improve cache hits for fast path interrupt handling.
2721
*/
2722
static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2723
{
2724
if (istat & ~QIB_I_BITSEXTANT)
2725
unknown_7322_ibits(dd, istat);
2726
if (istat & QIB_I_GPIO)
2727
unknown_7322_gpio_intr(dd);
2728
if (istat & QIB_I_C_ERROR)
2729
handle_7322_errors(dd);
2730
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2731
handle_7322_p_errors(dd->rcd[0]->ppd);
2732
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2733
handle_7322_p_errors(dd->rcd[1]->ppd);
2734
}
2735
2736
/*
2737
* Dynamically adjust the rcv int timeout for a context based on incoming
2738
* packet rate.
2739
*/
2740
static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2741
{
2742
struct qib_devdata *dd = rcd->dd;
2743
u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2744
2745
/*
2746
* Dynamically adjust idle timeout on chip
2747
* based on number of packets processed.
2748
*/
2749
if (npkts < rcv_int_count && timeout > 2)
2750
timeout >>= 1;
2751
else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2752
timeout = min(timeout << 1, rcv_int_timeout);
2753
else
2754
return;
2755
2756
dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2757
qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2758
}
2759
2760
/*
2761
* This is the main interrupt handler.
2762
* It will normally only be used for low frequency interrupts but may
2763
* have to handle all interrupts if INTx is enabled or fewer than normal
2764
* MSIx interrupts were allocated.
2765
* This routine should ignore the interrupt bits for any of the
2766
* dedicated MSIx handlers.
2767
*/
2768
static irqreturn_t qib_7322intr(int irq, void *data)
2769
{
2770
struct qib_devdata *dd = data;
2771
irqreturn_t ret;
2772
u64 istat;
2773
u64 ctxtrbits;
2774
u64 rmask;
2775
unsigned i;
2776
u32 npkts;
2777
2778
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2779
/*
2780
* This return value is not great, but we do not want the
2781
* interrupt core code to remove our interrupt handler
2782
* because we don't appear to be handling an interrupt
2783
* during a chip reset.
2784
*/
2785
ret = IRQ_HANDLED;
2786
goto bail;
2787
}
2788
2789
istat = qib_read_kreg64(dd, kr_intstatus);
2790
2791
if (unlikely(istat == ~0ULL)) {
2792
qib_bad_intrstatus(dd);
2793
qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2794
/* don't know if it was our interrupt or not */
2795
ret = IRQ_NONE;
2796
goto bail;
2797
}
2798
2799
istat &= dd->cspec->main_int_mask;
2800
if (unlikely(!istat)) {
2801
/* already handled, or shared and not us */
2802
ret = IRQ_NONE;
2803
goto bail;
2804
}
2805
2806
qib_stats.sps_ints++;
2807
if (dd->int_counter != (u32) -1)
2808
dd->int_counter++;
2809
2810
/* handle "errors" of various kinds first, device ahead of port */
2811
if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2812
QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2813
INT_MASK_P(Err, 1))))
2814
unlikely_7322_intr(dd, istat);
2815
2816
/*
2817
* Clear the interrupt bits we found set, relatively early, so we
2818
* "know" know the chip will have seen this by the time we process
2819
* the queue, and will re-interrupt if necessary. The processor
2820
* itself won't take the interrupt again until we return.
2821
*/
2822
qib_write_kreg(dd, kr_intclear, istat);
2823
2824
/*
2825
* Handle kernel receive queues before checking for pio buffers
2826
* available since receives can overflow; piobuf waiters can afford
2827
* a few extra cycles, since they were waiting anyway.
2828
*/
2829
ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2830
if (ctxtrbits) {
2831
rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2832
(1ULL << QIB_I_RCVURG_LSB);
2833
for (i = 0; i < dd->first_user_ctxt; i++) {
2834
if (ctxtrbits & rmask) {
2835
ctxtrbits &= ~rmask;
2836
if (dd->rcd[i]) {
2837
qib_kreceive(dd->rcd[i], NULL, &npkts);
2838
}
2839
}
2840
rmask <<= 1;
2841
}
2842
if (ctxtrbits) {
2843
ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2844
(ctxtrbits >> QIB_I_RCVURG_LSB);
2845
qib_handle_urcv(dd, ctxtrbits);
2846
}
2847
}
2848
2849
if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2850
sdma_7322_intr(dd, istat);
2851
2852
if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2853
qib_ib_piobufavail(dd);
2854
2855
ret = IRQ_HANDLED;
2856
bail:
2857
return ret;
2858
}
2859
2860
/*
2861
* Dedicated receive packet available interrupt handler.
2862
*/
2863
static irqreturn_t qib_7322pintr(int irq, void *data)
2864
{
2865
struct qib_ctxtdata *rcd = data;
2866
struct qib_devdata *dd = rcd->dd;
2867
u32 npkts;
2868
2869
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2870
/*
2871
* This return value is not great, but we do not want the
2872
* interrupt core code to remove our interrupt handler
2873
* because we don't appear to be handling an interrupt
2874
* during a chip reset.
2875
*/
2876
return IRQ_HANDLED;
2877
2878
qib_stats.sps_ints++;
2879
if (dd->int_counter != (u32) -1)
2880
dd->int_counter++;
2881
2882
/* Clear the interrupt bit we expect to be set. */
2883
qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2884
(1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2885
2886
qib_kreceive(rcd, NULL, &npkts);
2887
2888
return IRQ_HANDLED;
2889
}
2890
2891
/*
2892
* Dedicated Send buffer available interrupt handler.
2893
*/
2894
static irqreturn_t qib_7322bufavail(int irq, void *data)
2895
{
2896
struct qib_devdata *dd = data;
2897
2898
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2899
/*
2900
* This return value is not great, but we do not want the
2901
* interrupt core code to remove our interrupt handler
2902
* because we don't appear to be handling an interrupt
2903
* during a chip reset.
2904
*/
2905
return IRQ_HANDLED;
2906
2907
qib_stats.sps_ints++;
2908
if (dd->int_counter != (u32) -1)
2909
dd->int_counter++;
2910
2911
/* Clear the interrupt bit we expect to be set. */
2912
qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2913
2914
/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2915
if (dd->flags & QIB_INITTED)
2916
qib_ib_piobufavail(dd);
2917
else
2918
qib_wantpiobuf_7322_intr(dd, 0);
2919
2920
return IRQ_HANDLED;
2921
}
2922
2923
/*
2924
* Dedicated Send DMA interrupt handler.
2925
*/
2926
static irqreturn_t sdma_intr(int irq, void *data)
2927
{
2928
struct qib_pportdata *ppd = data;
2929
struct qib_devdata *dd = ppd->dd;
2930
2931
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2932
/*
2933
* This return value is not great, but we do not want the
2934
* interrupt core code to remove our interrupt handler
2935
* because we don't appear to be handling an interrupt
2936
* during a chip reset.
2937
*/
2938
return IRQ_HANDLED;
2939
2940
qib_stats.sps_ints++;
2941
if (dd->int_counter != (u32) -1)
2942
dd->int_counter++;
2943
2944
/* Clear the interrupt bit we expect to be set. */
2945
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2946
INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2947
qib_sdma_intr(ppd);
2948
2949
return IRQ_HANDLED;
2950
}
2951
2952
/*
2953
* Dedicated Send DMA idle interrupt handler.
2954
*/
2955
static irqreturn_t sdma_idle_intr(int irq, void *data)
2956
{
2957
struct qib_pportdata *ppd = data;
2958
struct qib_devdata *dd = ppd->dd;
2959
2960
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2961
/*
2962
* This return value is not great, but we do not want the
2963
* interrupt core code to remove our interrupt handler
2964
* because we don't appear to be handling an interrupt
2965
* during a chip reset.
2966
*/
2967
return IRQ_HANDLED;
2968
2969
qib_stats.sps_ints++;
2970
if (dd->int_counter != (u32) -1)
2971
dd->int_counter++;
2972
2973
/* Clear the interrupt bit we expect to be set. */
2974
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2975
INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
2976
qib_sdma_intr(ppd);
2977
2978
return IRQ_HANDLED;
2979
}
2980
2981
/*
2982
* Dedicated Send DMA progress interrupt handler.
2983
*/
2984
static irqreturn_t sdma_progress_intr(int irq, void *data)
2985
{
2986
struct qib_pportdata *ppd = data;
2987
struct qib_devdata *dd = ppd->dd;
2988
2989
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2990
/*
2991
* This return value is not great, but we do not want the
2992
* interrupt core code to remove our interrupt handler
2993
* because we don't appear to be handling an interrupt
2994
* during a chip reset.
2995
*/
2996
return IRQ_HANDLED;
2997
2998
qib_stats.sps_ints++;
2999
if (dd->int_counter != (u32) -1)
3000
dd->int_counter++;
3001
3002
/* Clear the interrupt bit we expect to be set. */
3003
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3004
INT_MASK_P(SDmaProgress, 1) :
3005
INT_MASK_P(SDmaProgress, 0));
3006
qib_sdma_intr(ppd);
3007
3008
return IRQ_HANDLED;
3009
}
3010
3011
/*
3012
* Dedicated Send DMA cleanup interrupt handler.
3013
*/
3014
static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3015
{
3016
struct qib_pportdata *ppd = data;
3017
struct qib_devdata *dd = ppd->dd;
3018
3019
if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3020
/*
3021
* This return value is not great, but we do not want the
3022
* interrupt core code to remove our interrupt handler
3023
* because we don't appear to be handling an interrupt
3024
* during a chip reset.
3025
*/
3026
return IRQ_HANDLED;
3027
3028
qib_stats.sps_ints++;
3029
if (dd->int_counter != (u32) -1)
3030
dd->int_counter++;
3031
3032
/* Clear the interrupt bit we expect to be set. */
3033
qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3034
INT_MASK_PM(SDmaCleanupDone, 1) :
3035
INT_MASK_PM(SDmaCleanupDone, 0));
3036
qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3037
3038
return IRQ_HANDLED;
3039
}
3040
3041
/*
3042
* Set up our chip-specific interrupt handler.
3043
* The interrupt type has already been setup, so
3044
* we just need to do the registration and error checking.
3045
* If we are using MSIx interrupts, we may fall back to
3046
* INTx later, if the interrupt handler doesn't get called
3047
* within 1/2 second (see verify_interrupt()).
3048
*/
3049
static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3050
{
3051
int ret, i, msixnum;
3052
u64 redirect[6];
3053
u64 mask;
3054
3055
if (!dd->num_pports)
3056
return;
3057
3058
if (clearpend) {
3059
/*
3060
* if not switching interrupt types, be sure interrupts are
3061
* disabled, and then clear anything pending at this point,
3062
* because we are starting clean.
3063
*/
3064
qib_7322_set_intr_state(dd, 0);
3065
3066
/* clear the reset error, init error/hwerror mask */
3067
qib_7322_init_hwerrors(dd);
3068
3069
/* clear any interrupt bits that might be set */
3070
qib_write_kreg(dd, kr_intclear, ~0ULL);
3071
3072
/* make sure no pending MSIx intr, and clear diag reg */
3073
qib_write_kreg(dd, kr_intgranted, ~0ULL);
3074
qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3075
}
3076
3077
if (!dd->cspec->num_msix_entries) {
3078
/* Try to get INTx interrupt */
3079
try_intx:
3080
if (!dd->pcidev->irq) {
3081
qib_dev_err(dd, "irq is 0, BIOS error? "
3082
"Interrupts won't work\n");
3083
goto bail;
3084
}
3085
ret = request_irq(dd->pcidev->irq, qib_7322intr,
3086
IRQF_SHARED, QIB_DRV_NAME, dd);
3087
if (ret) {
3088
qib_dev_err(dd, "Couldn't setup INTx "
3089
"interrupt (irq=%d): %d\n",
3090
dd->pcidev->irq, ret);
3091
goto bail;
3092
}
3093
dd->cspec->irq = dd->pcidev->irq;
3094
dd->cspec->main_int_mask = ~0ULL;
3095
goto bail;
3096
}
3097
3098
/* Try to get MSIx interrupts */
3099
memset(redirect, 0, sizeof redirect);
3100
mask = ~0ULL;
3101
msixnum = 0;
3102
for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3103
irq_handler_t handler;
3104
const char *name;
3105
void *arg;
3106
u64 val;
3107
int lsb, reg, sh;
3108
3109
if (i < ARRAY_SIZE(irq_table)) {
3110
if (irq_table[i].port) {
3111
/* skip if for a non-configured port */
3112
if (irq_table[i].port > dd->num_pports)
3113
continue;
3114
arg = dd->pport + irq_table[i].port - 1;
3115
} else
3116
arg = dd;
3117
lsb = irq_table[i].lsb;
3118
handler = irq_table[i].handler;
3119
name = irq_table[i].name;
3120
} else {
3121
unsigned ctxt;
3122
3123
ctxt = i - ARRAY_SIZE(irq_table);
3124
/* per krcvq context receive interrupt */
3125
arg = dd->rcd[ctxt];
3126
if (!arg)
3127
continue;
3128
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3129
handler = qib_7322pintr;
3130
name = QIB_DRV_NAME " (kctx)";
3131
}
3132
ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3133
handler, 0, name, arg);
3134
if (ret) {
3135
/*
3136
* Shouldn't happen since the enable said we could
3137
* have as many as we are trying to setup here.
3138
*/
3139
qib_dev_err(dd, "Couldn't setup MSIx "
3140
"interrupt (vec=%d, irq=%d): %d\n", msixnum,
3141
dd->cspec->msix_entries[msixnum].vector,
3142
ret);
3143
qib_7322_nomsix(dd);
3144
goto try_intx;
3145
}
3146
dd->cspec->msix_arg[msixnum] = arg;
3147
if (lsb >= 0) {
3148
reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3149
sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3150
SYM_LSB(IntRedirect0, vec1);
3151
mask &= ~(1ULL << lsb);
3152
redirect[reg] |= ((u64) msixnum) << sh;
3153
}
3154
val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3155
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3156
msixnum++;
3157
}
3158
/* Initialize the vector mapping */
3159
for (i = 0; i < ARRAY_SIZE(redirect); i++)
3160
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3161
dd->cspec->main_int_mask = mask;
3162
bail:;
3163
}
3164
3165
/**
3166
* qib_7322_boardname - fill in the board name and note features
3167
* @dd: the qlogic_ib device
3168
*
3169
* info will be based on the board revision register
3170
*/
3171
static unsigned qib_7322_boardname(struct qib_devdata *dd)
3172
{
3173
/* Will need enumeration of board-types here */
3174
char *n;
3175
u32 boardid, namelen;
3176
unsigned features = DUAL_PORT_CAP;
3177
3178
boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3179
3180
switch (boardid) {
3181
case 0:
3182
n = "InfiniPath_QLE7342_Emulation";
3183
break;
3184
case 1:
3185
n = "InfiniPath_QLE7340";
3186
dd->flags |= QIB_HAS_QSFP;
3187
features = PORT_SPD_CAP;
3188
break;
3189
case 2:
3190
n = "InfiniPath_QLE7342";
3191
dd->flags |= QIB_HAS_QSFP;
3192
break;
3193
case 3:
3194
n = "InfiniPath_QMI7342";
3195
break;
3196
case 4:
3197
n = "InfiniPath_Unsupported7342";
3198
qib_dev_err(dd, "Unsupported version of QMH7342\n");
3199
features = 0;
3200
break;
3201
case BOARD_QMH7342:
3202
n = "InfiniPath_QMH7342";
3203
features = 0x24;
3204
break;
3205
case BOARD_QME7342:
3206
n = "InfiniPath_QME7342";
3207
break;
3208
case 8:
3209
n = "InfiniPath_QME7362";
3210
dd->flags |= QIB_HAS_QSFP;
3211
break;
3212
case 15:
3213
n = "InfiniPath_QLE7342_TEST";
3214
dd->flags |= QIB_HAS_QSFP;
3215
break;
3216
default:
3217
n = "InfiniPath_QLE73xy_UNKNOWN";
3218
qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3219
break;
3220
}
3221
dd->board_atten = 1; /* index into txdds_Xdr */
3222
3223
namelen = strlen(n) + 1;
3224
dd->boardname = kmalloc(namelen, GFP_KERNEL);
3225
if (!dd->boardname)
3226
qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3227
else
3228
snprintf(dd->boardname, namelen, "%s", n);
3229
3230
snprintf(dd->boardversion, sizeof(dd->boardversion),
3231
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3232
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3233
(unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3234
dd->majrev, dd->minrev,
3235
(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3236
3237
if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3238
qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3239
" by module parameter\n", dd->unit);
3240
features &= PORT_SPD_CAP;
3241
}
3242
3243
return features;
3244
}
3245
3246
/*
3247
* This routine sleeps, so it can only be called from user context, not
3248
* from interrupt context.
3249
*/
3250
static int qib_do_7322_reset(struct qib_devdata *dd)
3251
{
3252
u64 val;
3253
u64 *msix_vecsave;
3254
int i, msix_entries, ret = 1;
3255
u16 cmdval;
3256
u8 int_line, clinesz;
3257
unsigned long flags;
3258
3259
/* Use dev_err so it shows up in logs, etc. */
3260
qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3261
3262
qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3263
3264
msix_entries = dd->cspec->num_msix_entries;
3265
3266
/* no interrupts till re-initted */
3267
qib_7322_set_intr_state(dd, 0);
3268
3269
if (msix_entries) {
3270
qib_7322_nomsix(dd);
3271
/* can be up to 512 bytes, too big for stack */
3272
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3273
sizeof(u64), GFP_KERNEL);
3274
if (!msix_vecsave)
3275
qib_dev_err(dd, "No mem to save MSIx data\n");
3276
} else
3277
msix_vecsave = NULL;
3278
3279
/*
3280
* Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3281
* info that is set up by the BIOS, so we have to save and restore
3282
* it ourselves. There is some risk something could change it,
3283
* after we save it, but since we have disabled the MSIx, it
3284
* shouldn't be touched...
3285
*/
3286
for (i = 0; i < msix_entries; i++) {
3287
u64 vecaddr, vecdata;
3288
vecaddr = qib_read_kreg64(dd, 2 * i +
3289
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3290
vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3291
(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3292
if (msix_vecsave) {
3293
msix_vecsave[2 * i] = vecaddr;
3294
/* save it without the masked bit set */
3295
msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3296
}
3297
}
3298
3299
dd->pport->cpspec->ibdeltainprog = 0;
3300
dd->pport->cpspec->ibsymdelta = 0;
3301
dd->pport->cpspec->iblnkerrdelta = 0;
3302
dd->pport->cpspec->ibmalfdelta = 0;
3303
dd->int_counter = 0; /* so we check interrupts work again */
3304
3305
/*
3306
* Keep chip from being accessed until we are ready. Use
3307
* writeq() directly, to allow the write even though QIB_PRESENT
3308
* isn't set.
3309
*/
3310
dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3311
dd->flags |= QIB_DOING_RESET;
3312
val = dd->control | QLOGIC_IB_C_RESET;
3313
writeq(val, &dd->kregbase[kr_control]);
3314
3315
for (i = 1; i <= 5; i++) {
3316
/*
3317
* Allow MBIST, etc. to complete; longer on each retry.
3318
* We sometimes get machine checks from bus timeout if no
3319
* response, so for now, make it *really* long.
3320
*/
3321
msleep(1000 + (1 + i) * 3000);
3322
3323
qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3324
3325
/*
3326
* Use readq directly, so we don't need to mark it as PRESENT
3327
* until we get a successful indication that all is well.
3328
*/
3329
val = readq(&dd->kregbase[kr_revision]);
3330
if (val == dd->revision)
3331
break;
3332
if (i == 5) {
3333
qib_dev_err(dd, "Failed to initialize after reset, "
3334
"unusable\n");
3335
ret = 0;
3336
goto bail;
3337
}
3338
}
3339
3340
dd->flags |= QIB_PRESENT; /* it's back */
3341
3342
if (msix_entries) {
3343
/* restore the MSIx vector address and data if saved above */
3344
for (i = 0; i < msix_entries; i++) {
3345
dd->cspec->msix_entries[i].entry = i;
3346
if (!msix_vecsave || !msix_vecsave[2 * i])
3347
continue;
3348
qib_write_kreg(dd, 2 * i +
3349
(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3350
msix_vecsave[2 * i]);
3351
qib_write_kreg(dd, 1 + 2 * i +
3352
(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3353
msix_vecsave[1 + 2 * i]);
3354
}
3355
}
3356
3357
/* initialize the remaining registers. */
3358
for (i = 0; i < dd->num_pports; ++i)
3359
write_7322_init_portregs(&dd->pport[i]);
3360
write_7322_initregs(dd);
3361
3362
if (qib_pcie_params(dd, dd->lbus_width,
3363
&dd->cspec->num_msix_entries,
3364
dd->cspec->msix_entries))
3365
qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3366
"continuing anyway\n");
3367
3368
qib_setup_7322_interrupt(dd, 1);
3369
3370
for (i = 0; i < dd->num_pports; ++i) {
3371
struct qib_pportdata *ppd = &dd->pport[i];
3372
3373
spin_lock_irqsave(&ppd->lflags_lock, flags);
3374
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3375
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3376
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3377
}
3378
3379
bail:
3380
dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3381
kfree(msix_vecsave);
3382
return ret;
3383
}
3384
3385
/**
3386
* qib_7322_put_tid - write a TID to the chip
3387
* @dd: the qlogic_ib device
3388
* @tidptr: pointer to the expected TID (in chip) to update
3389
* @tidtype: 0 for eager, 1 for expected
3390
* @pa: physical address of in memory buffer; tidinvalid if freeing
3391
*/
3392
static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3393
u32 type, unsigned long pa)
3394
{
3395
if (!(dd->flags & QIB_PRESENT))
3396
return;
3397
if (pa != dd->tidinvalid) {
3398
u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3399
3400
/* paranoia checks */
3401
if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3402
qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3403
pa);
3404
return;
3405
}
3406
if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3407
qib_dev_err(dd, "Physical page address 0x%lx "
3408
"larger than supported\n", pa);
3409
return;
3410
}
3411
3412
if (type == RCVHQ_RCV_TYPE_EAGER)
3413
chippa |= dd->tidtemplate;
3414
else /* for now, always full 4KB page */
3415
chippa |= IBA7322_TID_SZ_4K;
3416
pa = chippa;
3417
}
3418
writeq(pa, tidptr);
3419
mmiowb();
3420
}
3421
3422
/**
3423
* qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3424
* @dd: the qlogic_ib device
3425
* @ctxt: the ctxt
3426
*
3427
* clear all TID entries for a ctxt, expected and eager.
3428
* Used from qib_close().
3429
*/
3430
static void qib_7322_clear_tids(struct qib_devdata *dd,
3431
struct qib_ctxtdata *rcd)
3432
{
3433
u64 __iomem *tidbase;
3434
unsigned long tidinv;
3435
u32 ctxt;
3436
int i;
3437
3438
if (!dd->kregbase || !rcd)
3439
return;
3440
3441
ctxt = rcd->ctxt;
3442
3443
tidinv = dd->tidinvalid;
3444
tidbase = (u64 __iomem *)
3445
((char __iomem *) dd->kregbase +
3446
dd->rcvtidbase +
3447
ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3448
3449
for (i = 0; i < dd->rcvtidcnt; i++)
3450
qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3451
tidinv);
3452
3453
tidbase = (u64 __iomem *)
3454
((char __iomem *) dd->kregbase +
3455
dd->rcvegrbase +
3456
rcd->rcvegr_tid_base * sizeof(*tidbase));
3457
3458
for (i = 0; i < rcd->rcvegrcnt; i++)
3459
qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3460
tidinv);
3461
}
3462
3463
/**
3464
* qib_7322_tidtemplate - setup constants for TID updates
3465
* @dd: the qlogic_ib device
3466
*
3467
* We setup stuff that we use a lot, to avoid calculating each time
3468
*/
3469
static void qib_7322_tidtemplate(struct qib_devdata *dd)
3470
{
3471
/*
3472
* For now, we always allocate 4KB buffers (at init) so we can
3473
* receive max size packets. We may want a module parameter to
3474
* specify 2KB or 4KB and/or make it per port instead of per device
3475
* for those who want to reduce memory footprint. Note that the
3476
* rcvhdrentsize size must be large enough to hold the largest
3477
* IB header (currently 96 bytes) that we expect to handle (plus of
3478
* course the 2 dwords of RHF).
3479
*/
3480
if (dd->rcvegrbufsize == 2048)
3481
dd->tidtemplate = IBA7322_TID_SZ_2K;
3482
else if (dd->rcvegrbufsize == 4096)
3483
dd->tidtemplate = IBA7322_TID_SZ_4K;
3484
dd->tidinvalid = 0;
3485
}
3486
3487
/**
3488
* qib_init_7322_get_base_info - set chip-specific flags for user code
3489
* @rcd: the qlogic_ib ctxt
3490
* @kbase: qib_base_info pointer
3491
*
3492
* We set the PCIE flag because the lower bandwidth on PCIe vs
3493
* HyperTransport can affect some user packet algorithims.
3494
*/
3495
3496
static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3497
struct qib_base_info *kinfo)
3498
{
3499
kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3500
QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3501
QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3502
if (rcd->dd->cspec->r1)
3503
kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3504
if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3505
kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3506
3507
return 0;
3508
}
3509
3510
static struct qib_message_header *
3511
qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3512
{
3513
u32 offset = qib_hdrget_offset(rhf_addr);
3514
3515
return (struct qib_message_header *)
3516
(rhf_addr - dd->rhf_offset + offset);
3517
}
3518
3519
/*
3520
* Configure number of contexts.
3521
*/
3522
static void qib_7322_config_ctxts(struct qib_devdata *dd)
3523
{
3524
unsigned long flags;
3525
u32 nchipctxts;
3526
3527
nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3528
dd->cspec->numctxts = nchipctxts;
3529
if (qib_n_krcv_queues > 1 && dd->num_pports) {
3530
dd->first_user_ctxt = NUM_IB_PORTS +
3531
(qib_n_krcv_queues - 1) * dd->num_pports;
3532
if (dd->first_user_ctxt > nchipctxts)
3533
dd->first_user_ctxt = nchipctxts;
3534
dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3535
} else {
3536
dd->first_user_ctxt = NUM_IB_PORTS;
3537
dd->n_krcv_queues = 1;
3538
}
3539
3540
if (!qib_cfgctxts) {
3541
int nctxts = dd->first_user_ctxt + num_online_cpus();
3542
3543
if (nctxts <= 6)
3544
dd->ctxtcnt = 6;
3545
else if (nctxts <= 10)
3546
dd->ctxtcnt = 10;
3547
else if (nctxts <= nchipctxts)
3548
dd->ctxtcnt = nchipctxts;
3549
} else if (qib_cfgctxts < dd->num_pports)
3550
dd->ctxtcnt = dd->num_pports;
3551
else if (qib_cfgctxts <= nchipctxts)
3552
dd->ctxtcnt = qib_cfgctxts;
3553
if (!dd->ctxtcnt) /* none of the above, set to max */
3554
dd->ctxtcnt = nchipctxts;
3555
3556
/*
3557
* Chip can be configured for 6, 10, or 18 ctxts, and choice
3558
* affects number of eager TIDs per ctxt (1K, 2K, 4K).
3559
* Lock to be paranoid about later motion, etc.
3560
*/
3561
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3562
if (dd->ctxtcnt > 10)
3563
dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3564
else if (dd->ctxtcnt > 6)
3565
dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3566
/* else configure for default 6 receive ctxts */
3567
3568
/* The XRC opcode is 5. */
3569
dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3570
3571
/*
3572
* RcvCtrl *must* be written here so that the
3573
* chip understands how to change rcvegrcnt below.
3574
*/
3575
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3576
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3577
3578
/* kr_rcvegrcnt changes based on the number of contexts enabled */
3579
dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3580
if (qib_rcvhdrcnt)
3581
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3582
else
3583
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3584
dd->num_pports > 1 ? 1024U : 2048U);
3585
}
3586
3587
static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3588
{
3589
3590
int lsb, ret = 0;
3591
u64 maskr; /* right-justified mask */
3592
3593
switch (which) {
3594
3595
case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3596
ret = ppd->link_width_enabled;
3597
goto done;
3598
3599
case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3600
ret = ppd->link_width_active;
3601
goto done;
3602
3603
case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3604
ret = ppd->link_speed_enabled;
3605
goto done;
3606
3607
case QIB_IB_CFG_SPD: /* Get current Link spd */
3608
ret = ppd->link_speed_active;
3609
goto done;
3610
3611
case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3612
lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3613
maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3614
break;
3615
3616
case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3617
lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3618
maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3619
break;
3620
3621
case QIB_IB_CFG_LINKLATENCY:
3622
ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3623
SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3624
goto done;
3625
3626
case QIB_IB_CFG_OP_VLS:
3627
ret = ppd->vls_operational;
3628
goto done;
3629
3630
case QIB_IB_CFG_VL_HIGH_CAP:
3631
ret = 16;
3632
goto done;
3633
3634
case QIB_IB_CFG_VL_LOW_CAP:
3635
ret = 16;
3636
goto done;
3637
3638
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3639
ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3640
OverrunThreshold);
3641
goto done;
3642
3643
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3644
ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3645
PhyerrThreshold);
3646
goto done;
3647
3648
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3649
/* will only take effect when the link state changes */
3650
ret = (ppd->cpspec->ibcctrl_a &
3651
SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3652
IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3653
goto done;
3654
3655
case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3656
lsb = IBA7322_IBC_HRTBT_LSB;
3657
maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3658
break;
3659
3660
case QIB_IB_CFG_PMA_TICKS:
3661
/*
3662
* 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3663
* Since the clock is always 250MHz, the value is 3, 1 or 0.
3664
*/
3665
if (ppd->link_speed_active == QIB_IB_QDR)
3666
ret = 3;
3667
else if (ppd->link_speed_active == QIB_IB_DDR)
3668
ret = 1;
3669
else
3670
ret = 0;
3671
goto done;
3672
3673
default:
3674
ret = -EINVAL;
3675
goto done;
3676
}
3677
ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3678
done:
3679
return ret;
3680
}
3681
3682
/*
3683
* Below again cribbed liberally from older version. Do not lean
3684
* heavily on it.
3685
*/
3686
#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3687
#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3688
| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3689
3690
static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3691
{
3692
struct qib_devdata *dd = ppd->dd;
3693
u64 maskr; /* right-justified mask */
3694
int lsb, ret = 0;
3695
u16 lcmd, licmd;
3696
unsigned long flags;
3697
3698
switch (which) {
3699
case QIB_IB_CFG_LIDLMC:
3700
/*
3701
* Set LID and LMC. Combined to avoid possible hazard
3702
* caller puts LMC in 16MSbits, DLID in 16LSbits of val
3703
*/
3704
lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3705
maskr = IBA7322_IBC_DLIDLMC_MASK;
3706
/*
3707
* For header-checking, the SLID in the packet will
3708
* be masked with SendIBSLMCMask, and compared
3709
* with SendIBSLIDAssignMask. Make sure we do not
3710
* set any bits not covered by the mask, or we get
3711
* false-positives.
3712
*/
3713
qib_write_kreg_port(ppd, krp_sendslid,
3714
val & (val >> 16) & SendIBSLIDAssignMask);
3715
qib_write_kreg_port(ppd, krp_sendslidmask,
3716
(val >> 16) & SendIBSLMCMask);
3717
break;
3718
3719
case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3720
ppd->link_width_enabled = val;
3721
/* convert IB value to chip register value */
3722
if (val == IB_WIDTH_1X)
3723
val = 0;
3724
else if (val == IB_WIDTH_4X)
3725
val = 1;
3726
else
3727
val = 3;
3728
maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3729
lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3730
break;
3731
3732
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3733
/*
3734
* As with width, only write the actual register if the
3735
* link is currently down, otherwise takes effect on next
3736
* link change. Since setting is being explicitly requested
3737
* (via MAD or sysfs), clear autoneg failure status if speed
3738
* autoneg is enabled.
3739
*/
3740
ppd->link_speed_enabled = val;
3741
val <<= IBA7322_IBC_SPEED_LSB;
3742
maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3743
IBA7322_IBC_MAX_SPEED_MASK;
3744
if (val & (val - 1)) {
3745
/* Muliple speeds enabled */
3746
val |= IBA7322_IBC_IBTA_1_2_MASK |
3747
IBA7322_IBC_MAX_SPEED_MASK;
3748
spin_lock_irqsave(&ppd->lflags_lock, flags);
3749
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3750
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3751
} else if (val & IBA7322_IBC_SPEED_QDR)
3752
val |= IBA7322_IBC_IBTA_1_2_MASK;
3753
/* IBTA 1.2 mode + min/max + speed bits are contiguous */
3754
lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3755
break;
3756
3757
case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3758
lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3759
maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3760
break;
3761
3762
case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3763
lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3764
maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3765
break;
3766
3767
case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3768
maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3769
OverrunThreshold);
3770
if (maskr != val) {
3771
ppd->cpspec->ibcctrl_a &=
3772
~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3773
ppd->cpspec->ibcctrl_a |= (u64) val <<
3774
SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3775
qib_write_kreg_port(ppd, krp_ibcctrl_a,
3776
ppd->cpspec->ibcctrl_a);
3777
qib_write_kreg(dd, kr_scratch, 0ULL);
3778
}
3779
goto bail;
3780
3781
case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3782
maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3783
PhyerrThreshold);
3784
if (maskr != val) {
3785
ppd->cpspec->ibcctrl_a &=
3786
~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3787
ppd->cpspec->ibcctrl_a |= (u64) val <<
3788
SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3789
qib_write_kreg_port(ppd, krp_ibcctrl_a,
3790
ppd->cpspec->ibcctrl_a);
3791
qib_write_kreg(dd, kr_scratch, 0ULL);
3792
}
3793
goto bail;
3794
3795
case QIB_IB_CFG_PKEYS: /* update pkeys */
3796
maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3797
((u64) ppd->pkeys[2] << 32) |
3798
((u64) ppd->pkeys[3] << 48);
3799
qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3800
goto bail;
3801
3802
case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3803
/* will only take effect when the link state changes */
3804
if (val == IB_LINKINITCMD_POLL)
3805
ppd->cpspec->ibcctrl_a &=
3806
~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3807
else /* SLEEP */
3808
ppd->cpspec->ibcctrl_a |=
3809
SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3810
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3811
qib_write_kreg(dd, kr_scratch, 0ULL);
3812
goto bail;
3813
3814
case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3815
/*
3816
* Update our housekeeping variables, and set IBC max
3817
* size, same as init code; max IBC is max we allow in
3818
* buffer, less the qword pbc, plus 1 for ICRC, in dwords
3819
* Set even if it's unchanged, print debug message only
3820
* on changes.
3821
*/
3822
val = (ppd->ibmaxlen >> 2) + 1;
3823
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3824
ppd->cpspec->ibcctrl_a |= (u64)val <<
3825
SYM_LSB(IBCCtrlA_0, MaxPktLen);
3826
qib_write_kreg_port(ppd, krp_ibcctrl_a,
3827
ppd->cpspec->ibcctrl_a);
3828
qib_write_kreg(dd, kr_scratch, 0ULL);
3829
goto bail;
3830
3831
case QIB_IB_CFG_LSTATE: /* set the IB link state */
3832
switch (val & 0xffff0000) {
3833
case IB_LINKCMD_DOWN:
3834
lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3835
ppd->cpspec->ibmalfusesnap = 1;
3836
ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3837
crp_errlink);
3838
if (!ppd->cpspec->ibdeltainprog &&
3839
qib_compat_ddr_negotiate) {
3840
ppd->cpspec->ibdeltainprog = 1;
3841
ppd->cpspec->ibsymsnap =
3842
read_7322_creg32_port(ppd,
3843
crp_ibsymbolerr);
3844
ppd->cpspec->iblnkerrsnap =
3845
read_7322_creg32_port(ppd,
3846
crp_iblinkerrrecov);
3847
}
3848
break;
3849
3850
case IB_LINKCMD_ARMED:
3851
lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
3852
if (ppd->cpspec->ibmalfusesnap) {
3853
ppd->cpspec->ibmalfusesnap = 0;
3854
ppd->cpspec->ibmalfdelta +=
3855
read_7322_creg32_port(ppd,
3856
crp_errlink) -
3857
ppd->cpspec->ibmalfsnap;
3858
}
3859
break;
3860
3861
case IB_LINKCMD_ACTIVE:
3862
lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
3863
break;
3864
3865
default:
3866
ret = -EINVAL;
3867
qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
3868
goto bail;
3869
}
3870
switch (val & 0xffff) {
3871
case IB_LINKINITCMD_NOP:
3872
licmd = 0;
3873
break;
3874
3875
case IB_LINKINITCMD_POLL:
3876
licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
3877
break;
3878
3879
case IB_LINKINITCMD_SLEEP:
3880
licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
3881
break;
3882
3883
case IB_LINKINITCMD_DISABLE:
3884
licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
3885
ppd->cpspec->chase_end = 0;
3886
/*
3887
* stop state chase counter and timer, if running.
3888
* wait forpending timer, but don't clear .data (ppd)!
3889
*/
3890
if (ppd->cpspec->chase_timer.expires) {
3891
del_timer_sync(&ppd->cpspec->chase_timer);
3892
ppd->cpspec->chase_timer.expires = 0;
3893
}
3894
break;
3895
3896
default:
3897
ret = -EINVAL;
3898
qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
3899
val & 0xffff);
3900
goto bail;
3901
}
3902
qib_set_ib_7322_lstate(ppd, lcmd, licmd);
3903
goto bail;
3904
3905
case QIB_IB_CFG_OP_VLS:
3906
if (ppd->vls_operational != val) {
3907
ppd->vls_operational = val;
3908
set_vls(ppd);
3909
}
3910
goto bail;
3911
3912
case QIB_IB_CFG_VL_HIGH_LIMIT:
3913
qib_write_kreg_port(ppd, krp_highprio_limit, val);
3914
goto bail;
3915
3916
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
3917
if (val > 3) {
3918
ret = -EINVAL;
3919
goto bail;
3920
}
3921
lsb = IBA7322_IBC_HRTBT_LSB;
3922
maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3923
break;
3924
3925
case QIB_IB_CFG_PORT:
3926
/* val is the port number of the switch we are connected to. */
3927
if (ppd->dd->cspec->r1) {
3928
cancel_delayed_work(&ppd->cpspec->ipg_work);
3929
ppd->cpspec->ipg_tries = 0;
3930
}
3931
goto bail;
3932
3933
default:
3934
ret = -EINVAL;
3935
goto bail;
3936
}
3937
ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
3938
ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
3939
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
3940
qib_write_kreg(dd, kr_scratch, 0);
3941
bail:
3942
return ret;
3943
}
3944
3945
static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
3946
{
3947
int ret = 0;
3948
u64 val, ctrlb;
3949
3950
/* only IBC loopback, may add serdes and xgxs loopbacks later */
3951
if (!strncmp(what, "ibc", 3)) {
3952
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
3953
Loopback);
3954
val = 0; /* disable heart beat, so link will come up */
3955
qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
3956
ppd->dd->unit, ppd->port);
3957
} else if (!strncmp(what, "off", 3)) {
3958
ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
3959
Loopback);
3960
/* enable heart beat again */
3961
val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
3962
qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
3963
"(normal)\n", ppd->dd->unit, ppd->port);
3964
} else
3965
ret = -EINVAL;
3966
if (!ret) {
3967
qib_write_kreg_port(ppd, krp_ibcctrl_a,
3968
ppd->cpspec->ibcctrl_a);
3969
ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
3970
<< IBA7322_IBC_HRTBT_LSB);
3971
ppd->cpspec->ibcctrl_b = ctrlb | val;
3972
qib_write_kreg_port(ppd, krp_ibcctrl_b,
3973
ppd->cpspec->ibcctrl_b);
3974
qib_write_kreg(ppd->dd, kr_scratch, 0);
3975
}
3976
return ret;
3977
}
3978
3979
static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3980
struct ib_vl_weight_elem *vl)
3981
{
3982
unsigned i;
3983
3984
for (i = 0; i < 16; i++, regno++, vl++) {
3985
u32 val = qib_read_kreg_port(ppd, regno);
3986
3987
vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
3988
SYM_RMASK(LowPriority0_0, VirtualLane);
3989
vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
3990
SYM_RMASK(LowPriority0_0, Weight);
3991
}
3992
}
3993
3994
static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
3995
struct ib_vl_weight_elem *vl)
3996
{
3997
unsigned i;
3998
3999
for (i = 0; i < 16; i++, regno++, vl++) {
4000
u64 val;
4001
4002
val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4003
SYM_LSB(LowPriority0_0, VirtualLane)) |
4004
((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4005
SYM_LSB(LowPriority0_0, Weight));
4006
qib_write_kreg_port(ppd, regno, val);
4007
}
4008
if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4009
struct qib_devdata *dd = ppd->dd;
4010
unsigned long flags;
4011
4012
spin_lock_irqsave(&dd->sendctrl_lock, flags);
4013
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4014
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4015
qib_write_kreg(dd, kr_scratch, 0);
4016
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4017
}
4018
}
4019
4020
static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4021
{
4022
switch (which) {
4023
case QIB_IB_TBL_VL_HIGH_ARB:
4024
get_vl_weights(ppd, krp_highprio_0, t);
4025
break;
4026
4027
case QIB_IB_TBL_VL_LOW_ARB:
4028
get_vl_weights(ppd, krp_lowprio_0, t);
4029
break;
4030
4031
default:
4032
return -EINVAL;
4033
}
4034
return 0;
4035
}
4036
4037
static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4038
{
4039
switch (which) {
4040
case QIB_IB_TBL_VL_HIGH_ARB:
4041
set_vl_weights(ppd, krp_highprio_0, t);
4042
break;
4043
4044
case QIB_IB_TBL_VL_LOW_ARB:
4045
set_vl_weights(ppd, krp_lowprio_0, t);
4046
break;
4047
4048
default:
4049
return -EINVAL;
4050
}
4051
return 0;
4052
}
4053
4054
static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4055
u32 updegr, u32 egrhd, u32 npkts)
4056
{
4057
/*
4058
* Need to write timeout register before updating rcvhdrhead to ensure
4059
* that the timer is enabled on reception of a packet.
4060
*/
4061
if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4062
adjust_rcv_timeout(rcd, npkts);
4063
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4064
qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4065
if (updegr)
4066
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4067
}
4068
4069
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4070
{
4071
u32 head, tail;
4072
4073
head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4074
if (rcd->rcvhdrtail_kvaddr)
4075
tail = qib_get_rcvhdrtail(rcd);
4076
else
4077
tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4078
return head == tail;
4079
}
4080
4081
#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4082
QIB_RCVCTRL_CTXT_DIS | \
4083
QIB_RCVCTRL_TIDFLOW_ENB | \
4084
QIB_RCVCTRL_TIDFLOW_DIS | \
4085
QIB_RCVCTRL_TAILUPD_ENB | \
4086
QIB_RCVCTRL_TAILUPD_DIS | \
4087
QIB_RCVCTRL_INTRAVAIL_ENB | \
4088
QIB_RCVCTRL_INTRAVAIL_DIS | \
4089
QIB_RCVCTRL_BP_ENB | \
4090
QIB_RCVCTRL_BP_DIS)
4091
4092
#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4093
QIB_RCVCTRL_CTXT_DIS | \
4094
QIB_RCVCTRL_PKEY_DIS | \
4095
QIB_RCVCTRL_PKEY_ENB)
4096
4097
/*
4098
* Modify the RCVCTRL register in chip-specific way. This
4099
* is a function because bit positions and (future) register
4100
* location is chip-specifc, but the needed operations are
4101
* generic. <op> is a bit-mask because we often want to
4102
* do multiple modifications.
4103
*/
4104
static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4105
int ctxt)
4106
{
4107
struct qib_devdata *dd = ppd->dd;
4108
struct qib_ctxtdata *rcd;
4109
u64 mask, val;
4110
unsigned long flags;
4111
4112
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4113
4114
if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4115
dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4116
if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4117
dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4118
if (op & QIB_RCVCTRL_TAILUPD_ENB)
4119
dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4120
if (op & QIB_RCVCTRL_TAILUPD_DIS)
4121
dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4122
if (op & QIB_RCVCTRL_PKEY_ENB)
4123
ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4124
if (op & QIB_RCVCTRL_PKEY_DIS)
4125
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4126
if (ctxt < 0) {
4127
mask = (1ULL << dd->ctxtcnt) - 1;
4128
rcd = NULL;
4129
} else {
4130
mask = (1ULL << ctxt);
4131
rcd = dd->rcd[ctxt];
4132
}
4133
if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4134
ppd->p_rcvctrl |=
4135
(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4136
if (!(dd->flags & QIB_NODMA_RTAIL)) {
4137
op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4138
dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4139
}
4140
/* Write these registers before the context is enabled. */
4141
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4142
rcd->rcvhdrqtailaddr_phys);
4143
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4144
rcd->rcvhdrq_phys);
4145
rcd->seq_cnt = 1;
4146
}
4147
if (op & QIB_RCVCTRL_CTXT_DIS)
4148
ppd->p_rcvctrl &=
4149
~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4150
if (op & QIB_RCVCTRL_BP_ENB)
4151
dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4152
if (op & QIB_RCVCTRL_BP_DIS)
4153
dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4154
if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4155
dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4156
if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4157
dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4158
/*
4159
* Decide which registers to write depending on the ops enabled.
4160
* Special case is "flush" (no bits set at all)
4161
* which needs to write both.
4162
*/
4163
if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4164
qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4165
if (op == 0 || (op & RCVCTRL_PORT_MODS))
4166
qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4167
if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4168
/*
4169
* Init the context registers also; if we were
4170
* disabled, tail and head should both be zero
4171
* already from the enable, but since we don't
4172
* know, we have to do it explicitly.
4173
*/
4174
val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4175
qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4176
4177
/* be sure enabling write seen; hd/tl should be 0 */
4178
(void) qib_read_kreg32(dd, kr_scratch);
4179
val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4180
dd->rcd[ctxt]->head = val;
4181
/* If kctxt, interrupt on next receive. */
4182
if (ctxt < dd->first_user_ctxt)
4183
val |= dd->rhdrhead_intr_off;
4184
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4185
} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4186
dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4187
/* arm rcv interrupt */
4188
val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4189
qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4190
}
4191
if (op & QIB_RCVCTRL_CTXT_DIS) {
4192
unsigned f;
4193
4194
/* Now that the context is disabled, clear these registers. */
4195
if (ctxt >= 0) {
4196
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4197
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4198
for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4199
qib_write_ureg(dd, ur_rcvflowtable + f,
4200
TIDFLOW_ERRBITS, ctxt);
4201
} else {
4202
unsigned i;
4203
4204
for (i = 0; i < dd->cfgctxts; i++) {
4205
qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4206
i, 0);
4207
qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4208
for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4209
qib_write_ureg(dd, ur_rcvflowtable + f,
4210
TIDFLOW_ERRBITS, i);
4211
}
4212
}
4213
}
4214
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4215
}
4216
4217
/*
4218
* Modify the SENDCTRL register in chip-specific way. This
4219
* is a function where there are multiple such registers with
4220
* slightly different layouts.
4221
* The chip doesn't allow back-to-back sendctrl writes, so write
4222
* the scratch register after writing sendctrl.
4223
*
4224
* Which register is written depends on the operation.
4225
* Most operate on the common register, while
4226
* SEND_ENB and SEND_DIS operate on the per-port ones.
4227
* SEND_ENB is included in common because it can change SPCL_TRIG
4228
*/
4229
#define SENDCTRL_COMMON_MODS (\
4230
QIB_SENDCTRL_CLEAR | \
4231
QIB_SENDCTRL_AVAIL_DIS | \
4232
QIB_SENDCTRL_AVAIL_ENB | \
4233
QIB_SENDCTRL_AVAIL_BLIP | \
4234
QIB_SENDCTRL_DISARM | \
4235
QIB_SENDCTRL_DISARM_ALL | \
4236
QIB_SENDCTRL_SEND_ENB)
4237
4238
#define SENDCTRL_PORT_MODS (\
4239
QIB_SENDCTRL_CLEAR | \
4240
QIB_SENDCTRL_SEND_ENB | \
4241
QIB_SENDCTRL_SEND_DIS | \
4242
QIB_SENDCTRL_FLUSH)
4243
4244
static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4245
{
4246
struct qib_devdata *dd = ppd->dd;
4247
u64 tmp_dd_sendctrl;
4248
unsigned long flags;
4249
4250
spin_lock_irqsave(&dd->sendctrl_lock, flags);
4251
4252
/* First the dd ones that are "sticky", saved in shadow */
4253
if (op & QIB_SENDCTRL_CLEAR)
4254
dd->sendctrl = 0;
4255
if (op & QIB_SENDCTRL_AVAIL_DIS)
4256
dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4257
else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4258
dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4259
if (dd->flags & QIB_USE_SPCL_TRIG)
4260
dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4261
}
4262
4263
/* Then the ppd ones that are "sticky", saved in shadow */
4264
if (op & QIB_SENDCTRL_SEND_DIS)
4265
ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4266
else if (op & QIB_SENDCTRL_SEND_ENB)
4267
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4268
4269
if (op & QIB_SENDCTRL_DISARM_ALL) {
4270
u32 i, last;
4271
4272
tmp_dd_sendctrl = dd->sendctrl;
4273
last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4274
/*
4275
* Disarm any buffers that are not yet launched,
4276
* disabling updates until done.
4277
*/
4278
tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4279
for (i = 0; i < last; i++) {
4280
qib_write_kreg(dd, kr_sendctrl,
4281
tmp_dd_sendctrl |
4282
SYM_MASK(SendCtrl, Disarm) | i);
4283
qib_write_kreg(dd, kr_scratch, 0);
4284
}
4285
}
4286
4287
if (op & QIB_SENDCTRL_FLUSH) {
4288
u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4289
4290
/*
4291
* Now drain all the fifos. The Abort bit should never be
4292
* needed, so for now, at least, we don't use it.
4293
*/
4294
tmp_ppd_sendctrl |=
4295
SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4296
SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4297
SYM_MASK(SendCtrl_0, TxeBypassIbc);
4298
qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4299
qib_write_kreg(dd, kr_scratch, 0);
4300
}
4301
4302
tmp_dd_sendctrl = dd->sendctrl;
4303
4304
if (op & QIB_SENDCTRL_DISARM)
4305
tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4306
((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4307
SYM_LSB(SendCtrl, DisarmSendBuf));
4308
if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4309
(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4310
tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4311
4312
if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4313
qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4314
qib_write_kreg(dd, kr_scratch, 0);
4315
}
4316
4317
if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4318
qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4319
qib_write_kreg(dd, kr_scratch, 0);
4320
}
4321
4322
if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4323
qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4324
qib_write_kreg(dd, kr_scratch, 0);
4325
}
4326
4327
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4328
4329
if (op & QIB_SENDCTRL_FLUSH) {
4330
u32 v;
4331
/*
4332
* ensure writes have hit chip, then do a few
4333
* more reads, to allow DMA of pioavail registers
4334
* to occur, so in-memory copy is in sync with
4335
* the chip. Not always safe to sleep.
4336
*/
4337
v = qib_read_kreg32(dd, kr_scratch);
4338
qib_write_kreg(dd, kr_scratch, v);
4339
v = qib_read_kreg32(dd, kr_scratch);
4340
qib_write_kreg(dd, kr_scratch, v);
4341
qib_read_kreg32(dd, kr_scratch);
4342
}
4343
}
4344
4345
#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4346
#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4347
#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4348
4349
/**
4350
* qib_portcntr_7322 - read a per-port chip counter
4351
* @ppd: the qlogic_ib pport
4352
* @creg: the counter to read (not a chip offset)
4353
*/
4354
static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4355
{
4356
struct qib_devdata *dd = ppd->dd;
4357
u64 ret = 0ULL;
4358
u16 creg;
4359
/* 0xffff for unimplemented or synthesized counters */
4360
static const u32 xlator[] = {
4361
[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4362
[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4363
[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4364
[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4365
[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4366
[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4367
[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4368
[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4369
[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4370
[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4371
[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4372
[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4373
[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4374
[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4375
[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4376
[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4377
[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4378
[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4379
[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4380
[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4381
[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4382
[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4383
[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4384
[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4385
[QIBPORTCNTR_ERRLINK] = crp_errlink,
4386
[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4387
[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4388
[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4389
[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4390
[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4391
/*
4392
* the next 3 aren't really counters, but were implemented
4393
* as counters in older chips, so still get accessed as
4394
* though they were counters from this code.
4395
*/
4396
[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4397
[QIBPORTCNTR_PSSTART] = krp_psstart,
4398
[QIBPORTCNTR_PSSTAT] = krp_psstat,
4399
/* pseudo-counter, summed for all ports */
4400
[QIBPORTCNTR_KHDROVFL] = 0xffff,
4401
};
4402
4403
if (reg >= ARRAY_SIZE(xlator)) {
4404
qib_devinfo(ppd->dd->pcidev,
4405
"Unimplemented portcounter %u\n", reg);
4406
goto done;
4407
}
4408
creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4409
4410
/* handle non-counters and special cases first */
4411
if (reg == QIBPORTCNTR_KHDROVFL) {
4412
int i;
4413
4414
/* sum over all kernel contexts (skip if mini_init) */
4415
for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4416
struct qib_ctxtdata *rcd = dd->rcd[i];
4417
4418
if (!rcd || rcd->ppd != ppd)
4419
continue;
4420
ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4421
}
4422
goto done;
4423
} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4424
/*
4425
* Used as part of the synthesis of port_rcv_errors
4426
* in the verbs code for IBTA counters. Not needed for 7322,
4427
* because all the errors are already counted by other cntrs.
4428
*/
4429
goto done;
4430
} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4431
reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4432
/* were counters in older chips, now per-port kernel regs */
4433
ret = qib_read_kreg_port(ppd, creg);
4434
goto done;
4435
}
4436
4437
/*
4438
* Only fast increment counters are 64 bits; use 32 bit reads to
4439
* avoid two independent reads when on Opteron.
4440
*/
4441
if (xlator[reg] & _PORT_64BIT_FLAG)
4442
ret = read_7322_creg_port(ppd, creg);
4443
else
4444
ret = read_7322_creg32_port(ppd, creg);
4445
if (creg == crp_ibsymbolerr) {
4446
if (ppd->cpspec->ibdeltainprog)
4447
ret -= ret - ppd->cpspec->ibsymsnap;
4448
ret -= ppd->cpspec->ibsymdelta;
4449
} else if (creg == crp_iblinkerrrecov) {
4450
if (ppd->cpspec->ibdeltainprog)
4451
ret -= ret - ppd->cpspec->iblnkerrsnap;
4452
ret -= ppd->cpspec->iblnkerrdelta;
4453
} else if (creg == crp_errlink)
4454
ret -= ppd->cpspec->ibmalfdelta;
4455
else if (creg == crp_iblinkdown)
4456
ret += ppd->cpspec->iblnkdowndelta;
4457
done:
4458
return ret;
4459
}
4460
4461
/*
4462
* Device counter names (not port-specific), one line per stat,
4463
* single string. Used by utilities like ipathstats to print the stats
4464
* in a way which works for different versions of drivers, without changing
4465
* the utility. Names need to be 12 chars or less (w/o newline), for proper
4466
* display by utility.
4467
* Non-error counters are first.
4468
* Start of "error" conters is indicated by a leading "E " on the first
4469
* "error" counter, and doesn't count in label length.
4470
* The EgrOvfl list needs to be last so we truncate them at the configured
4471
* context count for the device.
4472
* cntr7322indices contains the corresponding register indices.
4473
*/
4474
static const char cntr7322names[] =
4475
"Interrupts\n"
4476
"HostBusStall\n"
4477
"E RxTIDFull\n"
4478
"RxTIDInvalid\n"
4479
"RxTIDFloDrop\n" /* 7322 only */
4480
"Ctxt0EgrOvfl\n"
4481
"Ctxt1EgrOvfl\n"
4482
"Ctxt2EgrOvfl\n"
4483
"Ctxt3EgrOvfl\n"
4484
"Ctxt4EgrOvfl\n"
4485
"Ctxt5EgrOvfl\n"
4486
"Ctxt6EgrOvfl\n"
4487
"Ctxt7EgrOvfl\n"
4488
"Ctxt8EgrOvfl\n"
4489
"Ctxt9EgrOvfl\n"
4490
"Ctx10EgrOvfl\n"
4491
"Ctx11EgrOvfl\n"
4492
"Ctx12EgrOvfl\n"
4493
"Ctx13EgrOvfl\n"
4494
"Ctx14EgrOvfl\n"
4495
"Ctx15EgrOvfl\n"
4496
"Ctx16EgrOvfl\n"
4497
"Ctx17EgrOvfl\n"
4498
;
4499
4500
static const u32 cntr7322indices[] = {
4501
cr_lbint | _PORT_64BIT_FLAG,
4502
cr_lbstall | _PORT_64BIT_FLAG,
4503
cr_tidfull,
4504
cr_tidinvalid,
4505
cr_rxtidflowdrop,
4506
cr_base_egrovfl + 0,
4507
cr_base_egrovfl + 1,
4508
cr_base_egrovfl + 2,
4509
cr_base_egrovfl + 3,
4510
cr_base_egrovfl + 4,
4511
cr_base_egrovfl + 5,
4512
cr_base_egrovfl + 6,
4513
cr_base_egrovfl + 7,
4514
cr_base_egrovfl + 8,
4515
cr_base_egrovfl + 9,
4516
cr_base_egrovfl + 10,
4517
cr_base_egrovfl + 11,
4518
cr_base_egrovfl + 12,
4519
cr_base_egrovfl + 13,
4520
cr_base_egrovfl + 14,
4521
cr_base_egrovfl + 15,
4522
cr_base_egrovfl + 16,
4523
cr_base_egrovfl + 17,
4524
};
4525
4526
/*
4527
* same as cntr7322names and cntr7322indices, but for port-specific counters.
4528
* portcntr7322indices is somewhat complicated by some registers needing
4529
* adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4530
*/
4531
static const char portcntr7322names[] =
4532
"TxPkt\n"
4533
"TxFlowPkt\n"
4534
"TxWords\n"
4535
"RxPkt\n"
4536
"RxFlowPkt\n"
4537
"RxWords\n"
4538
"TxFlowStall\n"
4539
"TxDmaDesc\n" /* 7220 and 7322-only */
4540
"E RxDlidFltr\n" /* 7220 and 7322-only */
4541
"IBStatusChng\n"
4542
"IBLinkDown\n"
4543
"IBLnkRecov\n"
4544
"IBRxLinkErr\n"
4545
"IBSymbolErr\n"
4546
"RxLLIErr\n"
4547
"RxBadFormat\n"
4548
"RxBadLen\n"
4549
"RxBufOvrfl\n"
4550
"RxEBP\n"
4551
"RxFlowCtlErr\n"
4552
"RxICRCerr\n"
4553
"RxLPCRCerr\n"
4554
"RxVCRCerr\n"
4555
"RxInvalLen\n"
4556
"RxInvalPKey\n"
4557
"RxPktDropped\n"
4558
"TxBadLength\n"
4559
"TxDropped\n"
4560
"TxInvalLen\n"
4561
"TxUnderrun\n"
4562
"TxUnsupVL\n"
4563
"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4564
"RxVL15Drop\n"
4565
"RxVlErr\n"
4566
"XcessBufOvfl\n"
4567
"RxQPBadCtxt\n" /* 7322-only from here down */
4568
"TXBadHeader\n"
4569
;
4570
4571
static const u32 portcntr7322indices[] = {
4572
QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4573
crp_pktsendflow,
4574
QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4575
QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4576
crp_pktrcvflowctrl,
4577
QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4578
QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4579
crp_txsdmadesc | _PORT_64BIT_FLAG,
4580
crp_rxdlidfltr,
4581
crp_ibstatuschange,
4582
QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4583
QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4584
QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4585
QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4586
QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4587
QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4588
QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4589
QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4590
QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4591
crp_rcvflowctrlviol,
4592
QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4593
QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4594
QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4595
QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4596
QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4597
QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4598
crp_txminmaxlenerr,
4599
crp_txdroppedpkt,
4600
crp_txlenerr,
4601
crp_txunderrun,
4602
crp_txunsupvl,
4603
QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4604
QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4605
QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4606
QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4607
crp_rxqpinvalidctxt,
4608
crp_txhdrerr,
4609
};
4610
4611
/* do all the setup to make the counter reads efficient later */
4612
static void init_7322_cntrnames(struct qib_devdata *dd)
4613
{
4614
int i, j = 0;
4615
char *s;
4616
4617
for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4618
i++) {
4619
/* we always have at least one counter before the egrovfl */
4620
if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4621
j = 1;
4622
s = strchr(s + 1, '\n');
4623
if (s && j)
4624
j++;
4625
}
4626
dd->cspec->ncntrs = i;
4627
if (!s)
4628
/* full list; size is without terminating null */
4629
dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4630
else
4631
dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4632
dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4633
* sizeof(u64), GFP_KERNEL);
4634
if (!dd->cspec->cntrs)
4635
qib_dev_err(dd, "Failed allocation for counters\n");
4636
4637
for (i = 0, s = (char *)portcntr7322names; s; i++)
4638
s = strchr(s + 1, '\n');
4639
dd->cspec->nportcntrs = i - 1;
4640
dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4641
for (i = 0; i < dd->num_pports; ++i) {
4642
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4643
* sizeof(u64), GFP_KERNEL);
4644
if (!dd->pport[i].cpspec->portcntrs)
4645
qib_dev_err(dd, "Failed allocation for"
4646
" portcounters\n");
4647
}
4648
}
4649
4650
static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4651
u64 **cntrp)
4652
{
4653
u32 ret;
4654
4655
if (namep) {
4656
ret = dd->cspec->cntrnamelen;
4657
if (pos >= ret)
4658
ret = 0; /* final read after getting everything */
4659
else
4660
*namep = (char *) cntr7322names;
4661
} else {
4662
u64 *cntr = dd->cspec->cntrs;
4663
int i;
4664
4665
ret = dd->cspec->ncntrs * sizeof(u64);
4666
if (!cntr || pos >= ret) {
4667
/* everything read, or couldn't get memory */
4668
ret = 0;
4669
goto done;
4670
}
4671
*cntrp = cntr;
4672
for (i = 0; i < dd->cspec->ncntrs; i++)
4673
if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4674
*cntr++ = read_7322_creg(dd,
4675
cntr7322indices[i] &
4676
_PORT_CNTR_IDXMASK);
4677
else
4678
*cntr++ = read_7322_creg32(dd,
4679
cntr7322indices[i]);
4680
}
4681
done:
4682
return ret;
4683
}
4684
4685
static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4686
char **namep, u64 **cntrp)
4687
{
4688
u32 ret;
4689
4690
if (namep) {
4691
ret = dd->cspec->portcntrnamelen;
4692
if (pos >= ret)
4693
ret = 0; /* final read after getting everything */
4694
else
4695
*namep = (char *)portcntr7322names;
4696
} else {
4697
struct qib_pportdata *ppd = &dd->pport[port];
4698
u64 *cntr = ppd->cpspec->portcntrs;
4699
int i;
4700
4701
ret = dd->cspec->nportcntrs * sizeof(u64);
4702
if (!cntr || pos >= ret) {
4703
/* everything read, or couldn't get memory */
4704
ret = 0;
4705
goto done;
4706
}
4707
*cntrp = cntr;
4708
for (i = 0; i < dd->cspec->nportcntrs; i++) {
4709
if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4710
*cntr++ = qib_portcntr_7322(ppd,
4711
portcntr7322indices[i] &
4712
_PORT_CNTR_IDXMASK);
4713
else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4714
*cntr++ = read_7322_creg_port(ppd,
4715
portcntr7322indices[i] &
4716
_PORT_CNTR_IDXMASK);
4717
else
4718
*cntr++ = read_7322_creg32_port(ppd,
4719
portcntr7322indices[i]);
4720
}
4721
}
4722
done:
4723
return ret;
4724
}
4725
4726
/**
4727
* qib_get_7322_faststats - get word counters from chip before they overflow
4728
* @opaque - contains a pointer to the qlogic_ib device qib_devdata
4729
*
4730
* VESTIGIAL IBA7322 has no "small fast counters", so the only
4731
* real purpose of this function is to maintain the notion of
4732
* "active time", which in turn is only logged into the eeprom,
4733
* which we don;t have, yet, for 7322-based boards.
4734
*
4735
* called from add_timer
4736
*/
4737
static void qib_get_7322_faststats(unsigned long opaque)
4738
{
4739
struct qib_devdata *dd = (struct qib_devdata *) opaque;
4740
struct qib_pportdata *ppd;
4741
unsigned long flags;
4742
u64 traffic_wds;
4743
int pidx;
4744
4745
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4746
ppd = dd->pport + pidx;
4747
4748
/*
4749
* If port isn't enabled or not operational ports, or
4750
* diags is running (can cause memory diags to fail)
4751
* skip this port this time.
4752
*/
4753
if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4754
|| dd->diag_client)
4755
continue;
4756
4757
/*
4758
* Maintain an activity timer, based on traffic
4759
* exceeding a threshold, so we need to check the word-counts
4760
* even if they are 64-bit.
4761
*/
4762
traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4763
qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4764
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4765
traffic_wds -= ppd->dd->traffic_wds;
4766
ppd->dd->traffic_wds += traffic_wds;
4767
if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4768
atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4769
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4770
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4771
QIB_IB_QDR) &&
4772
(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4773
QIBL_LINKACTIVE)) &&
4774
ppd->cpspec->qdr_dfe_time &&
4775
time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4776
ppd->cpspec->qdr_dfe_on = 0;
4777
4778
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4779
ppd->dd->cspec->r1 ?
4780
QDR_STATIC_ADAPT_INIT_R1 :
4781
QDR_STATIC_ADAPT_INIT);
4782
force_h1(ppd);
4783
}
4784
}
4785
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4786
}
4787
4788
/*
4789
* If we were using MSIx, try to fallback to INTx.
4790
*/
4791
static int qib_7322_intr_fallback(struct qib_devdata *dd)
4792
{
4793
if (!dd->cspec->num_msix_entries)
4794
return 0; /* already using INTx */
4795
4796
qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4797
" trying INTx interrupts\n");
4798
qib_7322_nomsix(dd);
4799
qib_enable_intx(dd->pcidev);
4800
qib_setup_7322_interrupt(dd, 0);
4801
return 1;
4802
}
4803
4804
/*
4805
* Reset the XGXS (between serdes and IBC). Slightly less intrusive
4806
* than resetting the IBC or external link state, and useful in some
4807
* cases to cause some retraining. To do this right, we reset IBC
4808
* as well, then return to previous state (which may be still in reset)
4809
* NOTE: some callers of this "know" this writes the current value
4810
* of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4811
* check all callers.
4812
*/
4813
static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4814
{
4815
u64 val;
4816
struct qib_devdata *dd = ppd->dd;
4817
const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4818
SYM_MASK(IBPCSConfig_0, xcv_treset) |
4819
SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4820
4821
val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4822
qib_write_kreg(dd, kr_hwerrmask,
4823
dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
4824
qib_write_kreg_port(ppd, krp_ibcctrl_a,
4825
ppd->cpspec->ibcctrl_a &
4826
~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4827
4828
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4829
qib_read_kreg32(dd, kr_scratch);
4830
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4831
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4832
qib_write_kreg(dd, kr_scratch, 0ULL);
4833
qib_write_kreg(dd, kr_hwerrclear,
4834
SYM_MASK(HwErrClear, statusValidNoEopClear));
4835
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
4836
}
4837
4838
/*
4839
* This code for non-IBTA-compliant IB speed negotiation is only known to
4840
* work for the SDR to DDR transition, and only between an HCA and a switch
4841
* with recent firmware. It is based on observed heuristics, rather than
4842
* actual knowledge of the non-compliant speed negotiation.
4843
* It has a number of hard-coded fields, since the hope is to rewrite this
4844
* when a spec is available on how the negoation is intended to work.
4845
*/
4846
static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
4847
u32 dcnt, u32 *data)
4848
{
4849
int i;
4850
u64 pbc;
4851
u32 __iomem *piobuf;
4852
u32 pnum, control, len;
4853
struct qib_devdata *dd = ppd->dd;
4854
4855
i = 0;
4856
len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
4857
control = qib_7322_setpbc_control(ppd, len, 0, 15);
4858
pbc = ((u64) control << 32) | len;
4859
while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4860
if (i++ > 15)
4861
return;
4862
udelay(2);
4863
}
4864
/* disable header check on this packet, since it can't be valid */
4865
dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
4866
writeq(pbc, piobuf);
4867
qib_flush_wc();
4868
qib_pio_copy(piobuf + 2, hdr, 7);
4869
qib_pio_copy(piobuf + 9, data, dcnt);
4870
if (dd->flags & QIB_USE_SPCL_TRIG) {
4871
u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
4872
4873
qib_flush_wc();
4874
__raw_writel(0xaebecede, piobuf + spcl_off);
4875
}
4876
qib_flush_wc();
4877
qib_sendbuf_done(dd, pnum);
4878
/* and re-enable hdr check */
4879
dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
4880
}
4881
4882
/*
4883
* _start packet gets sent twice at start, _done gets sent twice at end
4884
*/
4885
static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
4886
{
4887
struct qib_devdata *dd = ppd->dd;
4888
static u32 swapped;
4889
u32 dw, i, hcnt, dcnt, *data;
4890
static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4891
static u32 madpayload_start[0x40] = {
4892
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4893
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4894
0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
4895
};
4896
static u32 madpayload_done[0x40] = {
4897
0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4898
0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4899
0x40000001, 0x1388, 0x15e, /* rest 0's */
4900
};
4901
4902
dcnt = ARRAY_SIZE(madpayload_start);
4903
hcnt = ARRAY_SIZE(hdr);
4904
if (!swapped) {
4905
/* for maintainability, do it at runtime */
4906
for (i = 0; i < hcnt; i++) {
4907
dw = (__force u32) cpu_to_be32(hdr[i]);
4908
hdr[i] = dw;
4909
}
4910
for (i = 0; i < dcnt; i++) {
4911
dw = (__force u32) cpu_to_be32(madpayload_start[i]);
4912
madpayload_start[i] = dw;
4913
dw = (__force u32) cpu_to_be32(madpayload_done[i]);
4914
madpayload_done[i] = dw;
4915
}
4916
swapped = 1;
4917
}
4918
4919
data = which ? madpayload_done : madpayload_start;
4920
4921
autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4922
qib_read_kreg64(dd, kr_scratch);
4923
udelay(2);
4924
autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4925
qib_read_kreg64(dd, kr_scratch);
4926
udelay(2);
4927
}
4928
4929
/*
4930
* Do the absolute minimum to cause an IB speed change, and make it
4931
* ready, but don't actually trigger the change. The caller will
4932
* do that when ready (if link is in Polling training state, it will
4933
* happen immediately, otherwise when link next goes down)
4934
*
4935
* This routine should only be used as part of the DDR autonegotation
4936
* code for devices that are not compliant with IB 1.2 (or code that
4937
* fixes things up for same).
4938
*
4939
* When link has gone down, and autoneg enabled, or autoneg has
4940
* failed and we give up until next time we set both speeds, and
4941
* then we want IBTA enabled as well as "use max enabled speed.
4942
*/
4943
static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
4944
{
4945
u64 newctrlb;
4946
newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
4947
IBA7322_IBC_IBTA_1_2_MASK |
4948
IBA7322_IBC_MAX_SPEED_MASK);
4949
4950
if (speed & (speed - 1)) /* multiple speeds */
4951
newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
4952
IBA7322_IBC_IBTA_1_2_MASK |
4953
IBA7322_IBC_MAX_SPEED_MASK;
4954
else
4955
newctrlb |= speed == QIB_IB_QDR ?
4956
IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
4957
((speed == QIB_IB_DDR ?
4958
IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
4959
4960
if (newctrlb == ppd->cpspec->ibcctrl_b)
4961
return;
4962
4963
ppd->cpspec->ibcctrl_b = newctrlb;
4964
qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4965
qib_write_kreg(ppd->dd, kr_scratch, 0);
4966
}
4967
4968
/*
4969
* This routine is only used when we are not talking to another
4970
* IB 1.2-compliant device that we think can do DDR.
4971
* (This includes all existing switch chips as of Oct 2007.)
4972
* 1.2-compliant devices go directly to DDR prior to reaching INIT
4973
*/
4974
static void try_7322_autoneg(struct qib_pportdata *ppd)
4975
{
4976
unsigned long flags;
4977
4978
spin_lock_irqsave(&ppd->lflags_lock, flags);
4979
ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
4980
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4981
qib_autoneg_7322_send(ppd, 0);
4982
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
4983
qib_7322_mini_pcs_reset(ppd);
4984
/* 2 msec is minimum length of a poll cycle */
4985
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
4986
msecs_to_jiffies(2));
4987
}
4988
4989
/*
4990
* Handle the empirically determined mechanism for auto-negotiation
4991
* of DDR speed with switches.
4992
*/
4993
static void autoneg_7322_work(struct work_struct *work)
4994
{
4995
struct qib_pportdata *ppd;
4996
struct qib_devdata *dd;
4997
u64 startms;
4998
u32 i;
4999
unsigned long flags;
5000
5001
ppd = container_of(work, struct qib_chippport_specific,
5002
autoneg_work.work)->ppd;
5003
dd = ppd->dd;
5004
5005
startms = jiffies_to_msecs(jiffies);
5006
5007
/*
5008
* Busy wait for this first part, it should be at most a
5009
* few hundred usec, since we scheduled ourselves for 2msec.
5010
*/
5011
for (i = 0; i < 25; i++) {
5012
if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5013
== IB_7322_LT_STATE_POLLQUIET) {
5014
qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5015
break;
5016
}
5017
udelay(100);
5018
}
5019
5020
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5021
goto done; /* we got there early or told to stop */
5022
5023
/* we expect this to timeout */
5024
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5025
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5026
msecs_to_jiffies(90)))
5027
goto done;
5028
qib_7322_mini_pcs_reset(ppd);
5029
5030
/* we expect this to timeout */
5031
if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5032
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5033
msecs_to_jiffies(1700)))
5034
goto done;
5035
qib_7322_mini_pcs_reset(ppd);
5036
5037
set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5038
5039
/*
5040
* Wait up to 250 msec for link to train and get to INIT at DDR;
5041
* this should terminate early.
5042
*/
5043
wait_event_timeout(ppd->cpspec->autoneg_wait,
5044
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5045
msecs_to_jiffies(250));
5046
done:
5047
if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5048
spin_lock_irqsave(&ppd->lflags_lock, flags);
5049
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5050
if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5051
ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5052
ppd->cpspec->autoneg_tries = 0;
5053
}
5054
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5055
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5056
}
5057
}
5058
5059
/*
5060
* This routine is used to request IPG set in the QLogic switch.
5061
* Only called if r1.
5062
*/
5063
static void try_7322_ipg(struct qib_pportdata *ppd)
5064
{
5065
struct qib_ibport *ibp = &ppd->ibport_data;
5066
struct ib_mad_send_buf *send_buf;
5067
struct ib_mad_agent *agent;
5068
struct ib_smp *smp;
5069
unsigned delay;
5070
int ret;
5071
5072
agent = ibp->send_agent;
5073
if (!agent)
5074
goto retry;
5075
5076
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5077
IB_MGMT_MAD_DATA, GFP_ATOMIC);
5078
if (IS_ERR(send_buf))
5079
goto retry;
5080
5081
if (!ibp->smi_ah) {
5082
struct ib_ah_attr attr;
5083
struct ib_ah *ah;
5084
5085
memset(&attr, 0, sizeof attr);
5086
attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5087
attr.port_num = ppd->port;
5088
ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5089
if (IS_ERR(ah))
5090
ret = -EINVAL;
5091
else {
5092
send_buf->ah = ah;
5093
ibp->smi_ah = to_iah(ah);
5094
ret = 0;
5095
}
5096
} else {
5097
send_buf->ah = &ibp->smi_ah->ibah;
5098
ret = 0;
5099
}
5100
5101
smp = send_buf->mad;
5102
smp->base_version = IB_MGMT_BASE_VERSION;
5103
smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5104
smp->class_version = 1;
5105
smp->method = IB_MGMT_METHOD_SEND;
5106
smp->hop_cnt = 1;
5107
smp->attr_id = QIB_VENDOR_IPG;
5108
smp->attr_mod = 0;
5109
5110
if (!ret)
5111
ret = ib_post_send_mad(send_buf, NULL);
5112
if (ret)
5113
ib_free_send_mad(send_buf);
5114
retry:
5115
delay = 2 << ppd->cpspec->ipg_tries;
5116
queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5117
msecs_to_jiffies(delay));
5118
}
5119
5120
/*
5121
* Timeout handler for setting IPG.
5122
* Only called if r1.
5123
*/
5124
static void ipg_7322_work(struct work_struct *work)
5125
{
5126
struct qib_pportdata *ppd;
5127
5128
ppd = container_of(work, struct qib_chippport_specific,
5129
ipg_work.work)->ppd;
5130
if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5131
&& ++ppd->cpspec->ipg_tries <= 10)
5132
try_7322_ipg(ppd);
5133
}
5134
5135
static u32 qib_7322_iblink_state(u64 ibcs)
5136
{
5137
u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5138
5139
switch (state) {
5140
case IB_7322_L_STATE_INIT:
5141
state = IB_PORT_INIT;
5142
break;
5143
case IB_7322_L_STATE_ARM:
5144
state = IB_PORT_ARMED;
5145
break;
5146
case IB_7322_L_STATE_ACTIVE:
5147
/* fall through */
5148
case IB_7322_L_STATE_ACT_DEFER:
5149
state = IB_PORT_ACTIVE;
5150
break;
5151
default: /* fall through */
5152
case IB_7322_L_STATE_DOWN:
5153
state = IB_PORT_DOWN;
5154
break;
5155
}
5156
return state;
5157
}
5158
5159
/* returns the IBTA port state, rather than the IBC link training state */
5160
static u8 qib_7322_phys_portstate(u64 ibcs)
5161
{
5162
u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5163
return qib_7322_physportstate[state];
5164
}
5165
5166
static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5167
{
5168
int ret = 0, symadj = 0;
5169
unsigned long flags;
5170
int mult;
5171
5172
spin_lock_irqsave(&ppd->lflags_lock, flags);
5173
ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5174
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5175
5176
/* Update our picture of width and speed from chip */
5177
if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5178
ppd->link_speed_active = QIB_IB_QDR;
5179
mult = 4;
5180
} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5181
ppd->link_speed_active = QIB_IB_DDR;
5182
mult = 2;
5183
} else {
5184
ppd->link_speed_active = QIB_IB_SDR;
5185
mult = 1;
5186
}
5187
if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5188
ppd->link_width_active = IB_WIDTH_4X;
5189
mult *= 4;
5190
} else
5191
ppd->link_width_active = IB_WIDTH_1X;
5192
ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5193
5194
if (!ibup) {
5195
u64 clr;
5196
5197
/* Link went down. */
5198
/* do IPG MAD again after linkdown, even if last time failed */
5199
ppd->cpspec->ipg_tries = 0;
5200
clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5201
(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5202
SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5203
if (clr)
5204
qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5205
if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5206
QIBL_IB_AUTONEG_INPROG)))
5207
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5208
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5209
/* unlock the Tx settings, speed may change */
5210
qib_write_kreg_port(ppd, krp_tx_deemph_override,
5211
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5212
reset_tx_deemphasis_override));
5213
qib_cancel_sends(ppd);
5214
/* on link down, ensure sane pcs state */
5215
qib_7322_mini_pcs_reset(ppd);
5216
spin_lock_irqsave(&ppd->sdma_lock, flags);
5217
if (__qib_sdma_running(ppd))
5218
__qib_sdma_process_event(ppd,
5219
qib_sdma_event_e70_go_idle);
5220
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5221
}
5222
clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5223
if (clr == ppd->cpspec->iblnkdownsnap)
5224
ppd->cpspec->iblnkdowndelta++;
5225
} else {
5226
if (qib_compat_ddr_negotiate &&
5227
!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5228
QIBL_IB_AUTONEG_INPROG)) &&
5229
ppd->link_speed_active == QIB_IB_SDR &&
5230
(ppd->link_speed_enabled & QIB_IB_DDR)
5231
&& ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5232
/* we are SDR, and auto-negotiation enabled */
5233
++ppd->cpspec->autoneg_tries;
5234
if (!ppd->cpspec->ibdeltainprog) {
5235
ppd->cpspec->ibdeltainprog = 1;
5236
ppd->cpspec->ibsymdelta +=
5237
read_7322_creg32_port(ppd,
5238
crp_ibsymbolerr) -
5239
ppd->cpspec->ibsymsnap;
5240
ppd->cpspec->iblnkerrdelta +=
5241
read_7322_creg32_port(ppd,
5242
crp_iblinkerrrecov) -
5243
ppd->cpspec->iblnkerrsnap;
5244
}
5245
try_7322_autoneg(ppd);
5246
ret = 1; /* no other IB status change processing */
5247
} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5248
ppd->link_speed_active == QIB_IB_SDR) {
5249
qib_autoneg_7322_send(ppd, 1);
5250
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5251
qib_7322_mini_pcs_reset(ppd);
5252
udelay(2);
5253
ret = 1; /* no other IB status change processing */
5254
} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5255
(ppd->link_speed_active & QIB_IB_DDR)) {
5256
spin_lock_irqsave(&ppd->lflags_lock, flags);
5257
ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5258
QIBL_IB_AUTONEG_FAILED);
5259
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5260
ppd->cpspec->autoneg_tries = 0;
5261
/* re-enable SDR, for next link down */
5262
set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5263
wake_up(&ppd->cpspec->autoneg_wait);
5264
symadj = 1;
5265
} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5266
/*
5267
* Clear autoneg failure flag, and do setup
5268
* so we'll try next time link goes down and
5269
* back to INIT (possibly connected to a
5270
* different device).
5271
*/
5272
spin_lock_irqsave(&ppd->lflags_lock, flags);
5273
ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5274
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5275
ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5276
symadj = 1;
5277
}
5278
if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5279
symadj = 1;
5280
if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5281
try_7322_ipg(ppd);
5282
if (!ppd->cpspec->recovery_init)
5283
setup_7322_link_recovery(ppd, 0);
5284
ppd->cpspec->qdr_dfe_time = jiffies +
5285
msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5286
}
5287
ppd->cpspec->ibmalfusesnap = 0;
5288
ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5289
crp_errlink);
5290
}
5291
if (symadj) {
5292
ppd->cpspec->iblnkdownsnap =
5293
read_7322_creg32_port(ppd, crp_iblinkdown);
5294
if (ppd->cpspec->ibdeltainprog) {
5295
ppd->cpspec->ibdeltainprog = 0;
5296
ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5297
crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5298
ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5299
crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5300
}
5301
} else if (!ibup && qib_compat_ddr_negotiate &&
5302
!ppd->cpspec->ibdeltainprog &&
5303
!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5304
ppd->cpspec->ibdeltainprog = 1;
5305
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5306
crp_ibsymbolerr);
5307
ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5308
crp_iblinkerrrecov);
5309
}
5310
5311
if (!ret)
5312
qib_setup_7322_setextled(ppd, ibup);
5313
return ret;
5314
}
5315
5316
/*
5317
* Does read/modify/write to appropriate registers to
5318
* set output and direction bits selected by mask.
5319
* these are in their canonical postions (e.g. lsb of
5320
* dir will end up in D48 of extctrl on existing chips).
5321
* returns contents of GP Inputs.
5322
*/
5323
static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5324
{
5325
u64 read_val, new_out;
5326
unsigned long flags;
5327
5328
if (mask) {
5329
/* some bits being written, lock access to GPIO */
5330
dir &= mask;
5331
out &= mask;
5332
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5333
dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5334
dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5335
new_out = (dd->cspec->gpio_out & ~mask) | out;
5336
5337
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5338
qib_write_kreg(dd, kr_gpio_out, new_out);
5339
dd->cspec->gpio_out = new_out;
5340
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5341
}
5342
/*
5343
* It is unlikely that a read at this time would get valid
5344
* data on a pin whose direction line was set in the same
5345
* call to this function. We include the read here because
5346
* that allows us to potentially combine a change on one pin with
5347
* a read on another, and because the old code did something like
5348
* this.
5349
*/
5350
read_val = qib_read_kreg64(dd, kr_extstatus);
5351
return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5352
}
5353
5354
/* Enable writes to config EEPROM, if possible. Returns previous state */
5355
static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5356
{
5357
int prev_wen;
5358
u32 mask;
5359
5360
mask = 1 << QIB_EEPROM_WEN_NUM;
5361
prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5362
gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5363
5364
return prev_wen & 1;
5365
}
5366
5367
/*
5368
* Read fundamental info we need to use the chip. These are
5369
* the registers that describe chip capabilities, and are
5370
* saved in shadow registers.
5371
*/
5372
static void get_7322_chip_params(struct qib_devdata *dd)
5373
{
5374
u64 val;
5375
u32 piobufs;
5376
int mtu;
5377
5378
dd->palign = qib_read_kreg32(dd, kr_pagealign);
5379
5380
dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5381
5382
dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5383
dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5384
dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5385
dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5386
dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5387
5388
val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5389
dd->piobcnt2k = val & ~0U;
5390
dd->piobcnt4k = val >> 32;
5391
val = qib_read_kreg64(dd, kr_sendpiosize);
5392
dd->piosize2k = val & ~0U;
5393
dd->piosize4k = val >> 32;
5394
5395
mtu = ib_mtu_enum_to_int(qib_ibmtu);
5396
if (mtu == -1)
5397
mtu = QIB_DEFAULT_MTU;
5398
dd->pport[0].ibmtu = (u32)mtu;
5399
dd->pport[1].ibmtu = (u32)mtu;
5400
5401
/* these may be adjusted in init_chip_wc_pat() */
5402
dd->pio2kbase = (u32 __iomem *)
5403
((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5404
dd->pio4kbase = (u32 __iomem *)
5405
((char __iomem *) dd->kregbase +
5406
(dd->piobufbase >> 32));
5407
/*
5408
* 4K buffers take 2 pages; we use roundup just to be
5409
* paranoid; we calculate it once here, rather than on
5410
* ever buf allocate
5411
*/
5412
dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5413
5414
piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5415
5416
dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5417
(sizeof(u64) * BITS_PER_BYTE / 2);
5418
}
5419
5420
/*
5421
* The chip base addresses in cspec and cpspec have to be set
5422
* after possible init_chip_wc_pat(), rather than in
5423
* get_7322_chip_params(), so split out as separate function
5424
*/
5425
static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5426
{
5427
u32 cregbase;
5428
cregbase = qib_read_kreg32(dd, kr_counterregbase);
5429
5430
dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5431
(char __iomem *)dd->kregbase);
5432
5433
dd->egrtidbase = (u64 __iomem *)
5434
((char __iomem *) dd->kregbase + dd->rcvegrbase);
5435
5436
/* port registers are defined as relative to base of chip */
5437
dd->pport[0].cpspec->kpregbase =
5438
(u64 __iomem *)((char __iomem *)dd->kregbase);
5439
dd->pport[1].cpspec->kpregbase =
5440
(u64 __iomem *)(dd->palign +
5441
(char __iomem *)dd->kregbase);
5442
dd->pport[0].cpspec->cpregbase =
5443
(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5444
kr_counterregbase) + (char __iomem *)dd->kregbase);
5445
dd->pport[1].cpspec->cpregbase =
5446
(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5447
kr_counterregbase) + (char __iomem *)dd->kregbase);
5448
}
5449
5450
/*
5451
* This is a fairly special-purpose observer, so we only support
5452
* the port-specific parts of SendCtrl
5453
*/
5454
5455
#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5456
SYM_MASK(SendCtrl_0, SDmaEnable) | \
5457
SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5458
SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5459
SYM_MASK(SendCtrl_0, SDmaHalt) | \
5460
SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5461
SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5462
5463
static int sendctrl_hook(struct qib_devdata *dd,
5464
const struct diag_observer *op, u32 offs,
5465
u64 *data, u64 mask, int only_32)
5466
{
5467
unsigned long flags;
5468
unsigned idx;
5469
unsigned pidx;
5470
struct qib_pportdata *ppd = NULL;
5471
u64 local_data, all_bits;
5472
5473
/*
5474
* The fixed correspondence between Physical ports and pports is
5475
* severed. We need to hunt for the ppd that corresponds
5476
* to the offset we got. And we have to do that without admitting
5477
* we know the stride, apparently.
5478
*/
5479
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5480
u64 __iomem *psptr;
5481
u32 psoffs;
5482
5483
ppd = dd->pport + pidx;
5484
if (!ppd->cpspec->kpregbase)
5485
continue;
5486
5487
psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5488
psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5489
if (psoffs == offs)
5490
break;
5491
}
5492
5493
/* If pport is not being managed by driver, just avoid shadows. */
5494
if (pidx >= dd->num_pports)
5495
ppd = NULL;
5496
5497
/* In any case, "idx" is flat index in kreg space */
5498
idx = offs / sizeof(u64);
5499
5500
all_bits = ~0ULL;
5501
if (only_32)
5502
all_bits >>= 32;
5503
5504
spin_lock_irqsave(&dd->sendctrl_lock, flags);
5505
if (!ppd || (mask & all_bits) != all_bits) {
5506
/*
5507
* At least some mask bits are zero, so we need
5508
* to read. The judgement call is whether from
5509
* reg or shadow. First-cut: read reg, and complain
5510
* if any bits which should be shadowed are different
5511
* from their shadowed value.
5512
*/
5513
if (only_32)
5514
local_data = (u64)qib_read_kreg32(dd, idx);
5515
else
5516
local_data = qib_read_kreg64(dd, idx);
5517
*data = (local_data & ~mask) | (*data & mask);
5518
}
5519
if (mask) {
5520
/*
5521
* At least some mask bits are one, so we need
5522
* to write, but only shadow some bits.
5523
*/
5524
u64 sval, tval; /* Shadowed, transient */
5525
5526
/*
5527
* New shadow val is bits we don't want to touch,
5528
* ORed with bits we do, that are intended for shadow.
5529
*/
5530
if (ppd) {
5531
sval = ppd->p_sendctrl & ~mask;
5532
sval |= *data & SENDCTRL_SHADOWED & mask;
5533
ppd->p_sendctrl = sval;
5534
} else
5535
sval = *data & SENDCTRL_SHADOWED & mask;
5536
tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5537
qib_write_kreg(dd, idx, tval);
5538
qib_write_kreg(dd, kr_scratch, 0Ull);
5539
}
5540
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5541
return only_32 ? 4 : 8;
5542
}
5543
5544
static const struct diag_observer sendctrl_0_observer = {
5545
sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5546
KREG_IDX(SendCtrl_0) * sizeof(u64)
5547
};
5548
5549
static const struct diag_observer sendctrl_1_observer = {
5550
sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5551
KREG_IDX(SendCtrl_1) * sizeof(u64)
5552
};
5553
5554
static ushort sdma_fetch_prio = 8;
5555
module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5556
MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5557
5558
/* Besides logging QSFP events, we set appropriate TxDDS values */
5559
static void init_txdds_table(struct qib_pportdata *ppd, int override);
5560
5561
static void qsfp_7322_event(struct work_struct *work)
5562
{
5563
struct qib_qsfp_data *qd;
5564
struct qib_pportdata *ppd;
5565
u64 pwrup;
5566
int ret;
5567
u32 le2;
5568
5569
qd = container_of(work, struct qib_qsfp_data, work);
5570
ppd = qd->ppd;
5571
pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
5572
5573
/*
5574
* Some QSFP's not only do not respond until the full power-up
5575
* time, but may behave badly if we try. So hold off responding
5576
* to insertion.
5577
*/
5578
while (1) {
5579
u64 now = get_jiffies_64();
5580
if (time_after64(now, pwrup))
5581
break;
5582
msleep(20);
5583
}
5584
ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5585
/*
5586
* Need to change LE2 back to defaults if we couldn't
5587
* read the cable type (to handle cable swaps), so do this
5588
* even on failure to read cable information. We don't
5589
* get here for QME, so IS_QME check not needed here.
5590
*/
5591
if (!ret && !ppd->dd->cspec->r1) {
5592
if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5593
le2 = LE2_QME;
5594
else if (qd->cache.atten[1] >= qib_long_atten &&
5595
QSFP_IS_CU(qd->cache.tech))
5596
le2 = LE2_5m;
5597
else
5598
le2 = LE2_DEFAULT;
5599
} else
5600
le2 = LE2_DEFAULT;
5601
ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5602
init_txdds_table(ppd, 0);
5603
}
5604
5605
/*
5606
* There is little we can do but complain to the user if QSFP
5607
* initialization fails.
5608
*/
5609
static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5610
{
5611
unsigned long flags;
5612
struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5613
struct qib_devdata *dd = ppd->dd;
5614
u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5615
5616
mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5617
qd->ppd = ppd;
5618
qib_qsfp_init(qd, qsfp_7322_event);
5619
spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5620
dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5621
dd->cspec->gpio_mask |= mod_prs_bit;
5622
qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5623
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5624
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5625
}
5626
5627
/*
5628
* called at device initialization time, and also if the txselect
5629
* module parameter is changed. This is used for cables that don't
5630
* have valid QSFP EEPROMs (not present, or attenuation is zero).
5631
* We initialize to the default, then if there is a specific
5632
* unit,port match, we use that (and set it immediately, for the
5633
* current speed, if the link is at INIT or better).
5634
* String format is "default# unit#,port#=# ... u,p=#", separators must
5635
* be a SPACE character. A newline terminates. The u,p=# tuples may
5636
* optionally have "u,p=#,#", where the final # is the H1 value
5637
* The last specific match is used (actually, all are used, but last
5638
* one is the one that winds up set); if none at all, fall back on default.
5639
*/
5640
static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5641
{
5642
char *nxt, *str;
5643
u32 pidx, unit, port, deflt, h1;
5644
unsigned long val;
5645
int any = 0, seth1;
5646
int txdds_size;
5647
5648
str = txselect_list;
5649
5650
/* default number is validated in setup_txselect() */
5651
deflt = simple_strtoul(str, &nxt, 0);
5652
for (pidx = 0; pidx < dd->num_pports; ++pidx)
5653
dd->pport[pidx].cpspec->no_eep = deflt;
5654
5655
txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5656
if (IS_QME(dd) || IS_QMH(dd))
5657
txdds_size += TXDDS_MFG_SZ;
5658
5659
while (*nxt && nxt[1]) {
5660
str = ++nxt;
5661
unit = simple_strtoul(str, &nxt, 0);
5662
if (nxt == str || !*nxt || *nxt != ',') {
5663
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5664
;
5665
continue;
5666
}
5667
str = ++nxt;
5668
port = simple_strtoul(str, &nxt, 0);
5669
if (nxt == str || *nxt != '=') {
5670
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5671
;
5672
continue;
5673
}
5674
str = ++nxt;
5675
val = simple_strtoul(str, &nxt, 0);
5676
if (nxt == str) {
5677
while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5678
;
5679
continue;
5680
}
5681
if (val >= txdds_size)
5682
continue;
5683
seth1 = 0;
5684
h1 = 0; /* gcc thinks it might be used uninitted */
5685
if (*nxt == ',' && nxt[1]) {
5686
str = ++nxt;
5687
h1 = (u32)simple_strtoul(str, &nxt, 0);
5688
if (nxt == str)
5689
while (*nxt && *nxt++ != ' ') /* skip */
5690
;
5691
else
5692
seth1 = 1;
5693
}
5694
for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5695
++pidx) {
5696
struct qib_pportdata *ppd = &dd->pport[pidx];
5697
5698
if (ppd->port != port || !ppd->link_speed_supported)
5699
continue;
5700
ppd->cpspec->no_eep = val;
5701
if (seth1)
5702
ppd->cpspec->h1_val = h1;
5703
/* now change the IBC and serdes, overriding generic */
5704
init_txdds_table(ppd, 1);
5705
/* Re-enable the physical state machine on mezz boards
5706
* now that the correct settings have been set. */
5707
if (IS_QMH(dd) || IS_QME(dd))
5708
qib_set_ib_7322_lstate(ppd, 0,
5709
QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
5710
any++;
5711
}
5712
if (*nxt == '\n')
5713
break; /* done */
5714
}
5715
if (change && !any) {
5716
/* no specific setting, use the default.
5717
* Change the IBC and serdes, but since it's
5718
* general, don't override specific settings.
5719
*/
5720
for (pidx = 0; pidx < dd->num_pports; ++pidx)
5721
if (dd->pport[pidx].link_speed_supported)
5722
init_txdds_table(&dd->pport[pidx], 0);
5723
}
5724
}
5725
5726
/* handle the txselect parameter changing */
5727
static int setup_txselect(const char *str, struct kernel_param *kp)
5728
{
5729
struct qib_devdata *dd;
5730
unsigned long val;
5731
char *n;
5732
if (strlen(str) >= MAX_ATTEN_LEN) {
5733
printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
5734
"too long\n");
5735
return -ENOSPC;
5736
}
5737
val = simple_strtoul(str, &n, 0);
5738
if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5739
TXDDS_MFG_SZ)) {
5740
printk(KERN_INFO QIB_DRV_NAME
5741
"txselect_values must start with a number < %d\n",
5742
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
5743
return -EINVAL;
5744
}
5745
strcpy(txselect_list, str);
5746
5747
list_for_each_entry(dd, &qib_dev_list, list)
5748
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5749
set_no_qsfp_atten(dd, 1);
5750
return 0;
5751
}
5752
5753
/*
5754
* Write the final few registers that depend on some of the
5755
* init setup. Done late in init, just before bringing up
5756
* the serdes.
5757
*/
5758
static int qib_late_7322_initreg(struct qib_devdata *dd)
5759
{
5760
int ret = 0, n;
5761
u64 val;
5762
5763
qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5764
qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5765
qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5766
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5767
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5768
if (val != dd->pioavailregs_phys) {
5769
qib_dev_err(dd, "Catastrophic software error, "
5770
"SendPIOAvailAddr written as %lx, "
5771
"read back as %llx\n",
5772
(unsigned long) dd->pioavailregs_phys,
5773
(unsigned long long) val);
5774
ret = -EINVAL;
5775
}
5776
5777
n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5778
qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5779
/* driver sends get pkey, lid, etc. checking also, to catch bugs */
5780
qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5781
5782
qib_register_observer(dd, &sendctrl_0_observer);
5783
qib_register_observer(dd, &sendctrl_1_observer);
5784
5785
dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5786
qib_write_kreg(dd, kr_control, dd->control);
5787
/*
5788
* Set SendDmaFetchPriority and init Tx params, including
5789
* QSFP handler on boards that have QSFP.
5790
* First set our default attenuation entry for cables that
5791
* don't have valid attenuation.
5792
*/
5793
set_no_qsfp_atten(dd, 0);
5794
for (n = 0; n < dd->num_pports; ++n) {
5795
struct qib_pportdata *ppd = dd->pport + n;
5796
5797
qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5798
sdma_fetch_prio & 0xf);
5799
/* Initialize qsfp if present on board. */
5800
if (dd->flags & QIB_HAS_QSFP)
5801
qib_init_7322_qsfp(ppd);
5802
}
5803
dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5804
qib_write_kreg(dd, kr_control, dd->control);
5805
5806
return ret;
5807
}
5808
5809
/* per IB port errors. */
5810
#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5811
MASK_ACROSS(8, 15))
5812
#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5813
#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5814
MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5815
MASK_ACROSS(0, 11))
5816
5817
/*
5818
* Write the initialization per-port registers that need to be done at
5819
* driver load and after reset completes (i.e., that aren't done as part
5820
* of other init procedures called from qib_init.c).
5821
* Some of these should be redundant on reset, but play safe.
5822
*/
5823
static void write_7322_init_portregs(struct qib_pportdata *ppd)
5824
{
5825
u64 val;
5826
int i;
5827
5828
if (!ppd->link_speed_supported) {
5829
/* no buffer credits for this port */
5830
for (i = 1; i < 8; i++)
5831
qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5832
qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5833
qib_write_kreg(ppd->dd, kr_scratch, 0);
5834
return;
5835
}
5836
5837
/*
5838
* Set the number of supported virtual lanes in IBC,
5839
* for flow control packet handling on unsupported VLs
5840
*/
5841
val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5842
val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5843
val |= (u64)(ppd->vls_supported - 1) <<
5844
SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5845
qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5846
5847
qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5848
5849
/* enable tx header checking */
5850
qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5851
IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5852
IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5853
5854
qib_write_kreg_port(ppd, krp_ncmodectrl,
5855
SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5856
5857
/*
5858
* Unconditionally clear the bufmask bits. If SDMA is
5859
* enabled, we'll set them appropriately later.
5860
*/
5861
qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5862
qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5863
qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5864
if (ppd->dd->cspec->r1)
5865
ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5866
}
5867
5868
/*
5869
* Write the initialization per-device registers that need to be done at
5870
* driver load and after reset completes (i.e., that aren't done as part
5871
* of other init procedures called from qib_init.c). Also write per-port
5872
* registers that are affected by overall device config, such as QP mapping
5873
* Some of these should be redundant on reset, but play safe.
5874
*/
5875
static void write_7322_initregs(struct qib_devdata *dd)
5876
{
5877
struct qib_pportdata *ppd;
5878
int i, pidx;
5879
u64 val;
5880
5881
/* Set Multicast QPs received by port 2 to map to context one. */
5882
qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5883
5884
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5885
unsigned n, regno;
5886
unsigned long flags;
5887
5888
if (dd->n_krcv_queues < 2 ||
5889
!dd->pport[pidx].link_speed_supported)
5890
continue;
5891
5892
ppd = &dd->pport[pidx];
5893
5894
/* be paranoid against later code motion, etc. */
5895
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
5896
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
5897
spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
5898
5899
/* Initialize QP to context mapping */
5900
regno = krp_rcvqpmaptable;
5901
val = 0;
5902
if (dd->num_pports > 1)
5903
n = dd->first_user_ctxt / dd->num_pports;
5904
else
5905
n = dd->first_user_ctxt - 1;
5906
for (i = 0; i < 32; ) {
5907
unsigned ctxt;
5908
5909
if (dd->num_pports > 1)
5910
ctxt = (i % n) * dd->num_pports + pidx;
5911
else if (i % n)
5912
ctxt = (i % n) + 1;
5913
else
5914
ctxt = ppd->hw_pidx;
5915
val |= ctxt << (5 * (i % 6));
5916
i++;
5917
if (i % 6 == 0) {
5918
qib_write_kreg_port(ppd, regno, val);
5919
val = 0;
5920
regno++;
5921
}
5922
}
5923
qib_write_kreg_port(ppd, regno, val);
5924
}
5925
5926
/*
5927
* Setup up interrupt mitigation for kernel contexts, but
5928
* not user contexts (user contexts use interrupts when
5929
* stalled waiting for any packet, so want those interrupts
5930
* right away).
5931
*/
5932
for (i = 0; i < dd->first_user_ctxt; i++) {
5933
dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
5934
qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
5935
}
5936
5937
/*
5938
* Initialize as (disabled) rcvflow tables. Application code
5939
* will setup each flow as it uses the flow.
5940
* Doesn't clear any of the error bits that might be set.
5941
*/
5942
val = TIDFLOW_ERRBITS; /* these are W1C */
5943
for (i = 0; i < dd->cfgctxts; i++) {
5944
int flow;
5945
for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
5946
qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
5947
}
5948
5949
/*
5950
* dual cards init to dual port recovery, single port cards to
5951
* the one port. Dual port cards may later adjust to 1 port,
5952
* and then back to dual port if both ports are connected
5953
* */
5954
if (dd->num_pports)
5955
setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
5956
}
5957
5958
static int qib_init_7322_variables(struct qib_devdata *dd)
5959
{
5960
struct qib_pportdata *ppd;
5961
unsigned features, pidx, sbufcnt;
5962
int ret, mtu;
5963
u32 sbufs, updthresh;
5964
5965
/* pport structs are contiguous, allocated after devdata */
5966
ppd = (struct qib_pportdata *)(dd + 1);
5967
dd->pport = ppd;
5968
ppd[0].dd = dd;
5969
ppd[1].dd = dd;
5970
5971
dd->cspec = (struct qib_chip_specific *)(ppd + 2);
5972
5973
ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
5974
ppd[1].cpspec = &ppd[0].cpspec[1];
5975
ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
5976
ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
5977
5978
spin_lock_init(&dd->cspec->rcvmod_lock);
5979
spin_lock_init(&dd->cspec->gpio_lock);
5980
5981
/* we haven't yet set QIB_PRESENT, so use read directly */
5982
dd->revision = readq(&dd->kregbase[kr_revision]);
5983
5984
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
5985
qib_dev_err(dd, "Revision register read failure, "
5986
"giving up initialization\n");
5987
ret = -ENODEV;
5988
goto bail;
5989
}
5990
dd->flags |= QIB_PRESENT; /* now register routines work */
5991
5992
dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
5993
dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
5994
dd->cspec->r1 = dd->minrev == 1;
5995
5996
get_7322_chip_params(dd);
5997
features = qib_7322_boardname(dd);
5998
5999
/* now that piobcnt2k and 4k set, we can allocate these */
6000
sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6001
NUM_VL15_BUFS + BITS_PER_LONG - 1;
6002
sbufcnt /= BITS_PER_LONG;
6003
dd->cspec->sendchkenable = kmalloc(sbufcnt *
6004
sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6005
dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6006
sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6007
dd->cspec->sendibchk = kmalloc(sbufcnt *
6008
sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6009
if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6010
!dd->cspec->sendibchk) {
6011
qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6012
ret = -ENOMEM;
6013
goto bail;
6014
}
6015
6016
ppd = dd->pport;
6017
6018
/*
6019
* GPIO bits for TWSI data and clock,
6020
* used for serial EEPROM.
6021
*/
6022
dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6023
dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6024
dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6025
6026
dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6027
QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6028
QIB_HAS_THRESH_UPDATE |
6029
(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6030
dd->flags |= qib_special_trigger ?
6031
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6032
6033
/*
6034
* Setup initial values. These may change when PAT is enabled, but
6035
* we need these to do initial chip register accesses.
6036
*/
6037
qib_7322_set_baseaddrs(dd);
6038
6039
mtu = ib_mtu_enum_to_int(qib_ibmtu);
6040
if (mtu == -1)
6041
mtu = QIB_DEFAULT_MTU;
6042
6043
dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6044
/* all hwerrors become interrupts, unless special purposed */
6045
dd->cspec->hwerrmask = ~0ULL;
6046
/* link_recovery setup causes these errors, so ignore them,
6047
* other than clearing them when they occur */
6048
dd->cspec->hwerrmask &=
6049
~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6050
SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6051
HWE_MASK(LATriggered));
6052
6053
for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6054
struct qib_chippport_specific *cp = ppd->cpspec;
6055
ppd->link_speed_supported = features & PORT_SPD_CAP;
6056
features >>= PORT_SPD_CAP_SHIFT;
6057
if (!ppd->link_speed_supported) {
6058
/* single port mode (7340, or configured) */
6059
dd->skip_kctxt_mask |= 1 << pidx;
6060
if (pidx == 0) {
6061
/* Make sure port is disabled. */
6062
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6063
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6064
ppd[0] = ppd[1];
6065
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6066
IBSerdesPClkNotDetectMask_0)
6067
| SYM_MASK(HwErrMask,
6068
SDmaMemReadErrMask_0));
6069
dd->cspec->int_enable_mask &= ~(
6070
SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6071
SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6072
SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6073
SYM_MASK(IntMask, SDmaIntMask_0) |
6074
SYM_MASK(IntMask, ErrIntMask_0) |
6075
SYM_MASK(IntMask, SendDoneIntMask_0));
6076
} else {
6077
/* Make sure port is disabled. */
6078
qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6079
qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6080
dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6081
IBSerdesPClkNotDetectMask_1)
6082
| SYM_MASK(HwErrMask,
6083
SDmaMemReadErrMask_1));
6084
dd->cspec->int_enable_mask &= ~(
6085
SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6086
SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6087
SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6088
SYM_MASK(IntMask, SDmaIntMask_1) |
6089
SYM_MASK(IntMask, ErrIntMask_1) |
6090
SYM_MASK(IntMask, SendDoneIntMask_1));
6091
}
6092
continue;
6093
}
6094
6095
dd->num_pports++;
6096
qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6097
6098
ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6099
ppd->link_width_enabled = IB_WIDTH_4X;
6100
ppd->link_speed_enabled = ppd->link_speed_supported;
6101
/*
6102
* Set the initial values to reasonable default, will be set
6103
* for real when link is up.
6104
*/
6105
ppd->link_width_active = IB_WIDTH_4X;
6106
ppd->link_speed_active = QIB_IB_SDR;
6107
ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6108
switch (qib_num_cfg_vls) {
6109
case 1:
6110
ppd->vls_supported = IB_VL_VL0;
6111
break;
6112
case 2:
6113
ppd->vls_supported = IB_VL_VL0_1;
6114
break;
6115
default:
6116
qib_devinfo(dd->pcidev,
6117
"Invalid num_vls %u, using 4 VLs\n",
6118
qib_num_cfg_vls);
6119
qib_num_cfg_vls = 4;
6120
/* fall through */
6121
case 4:
6122
ppd->vls_supported = IB_VL_VL0_3;
6123
break;
6124
case 8:
6125
if (mtu <= 2048)
6126
ppd->vls_supported = IB_VL_VL0_7;
6127
else {
6128
qib_devinfo(dd->pcidev,
6129
"Invalid num_vls %u for MTU %d "
6130
", using 4 VLs\n",
6131
qib_num_cfg_vls, mtu);
6132
ppd->vls_supported = IB_VL_VL0_3;
6133
qib_num_cfg_vls = 4;
6134
}
6135
break;
6136
}
6137
ppd->vls_operational = ppd->vls_supported;
6138
6139
init_waitqueue_head(&cp->autoneg_wait);
6140
INIT_DELAYED_WORK(&cp->autoneg_work,
6141
autoneg_7322_work);
6142
if (ppd->dd->cspec->r1)
6143
INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6144
6145
/*
6146
* For Mez and similar cards, no qsfp info, so do
6147
* the "cable info" setup here. Can be overridden
6148
* in adapter-specific routines.
6149
*/
6150
if (!(dd->flags & QIB_HAS_QSFP)) {
6151
if (!IS_QMH(dd) && !IS_QME(dd))
6152
qib_devinfo(dd->pcidev, "IB%u:%u: "
6153
"Unknown mezzanine card type\n",
6154
dd->unit, ppd->port);
6155
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6156
/*
6157
* Choose center value as default tx serdes setting
6158
* until changed through module parameter.
6159
*/
6160
ppd->cpspec->no_eep = IS_QMH(dd) ?
6161
TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6162
} else
6163
cp->h1_val = H1_FORCE_VAL;
6164
6165
/* Avoid writes to chip for mini_init */
6166
if (!qib_mini_init)
6167
write_7322_init_portregs(ppd);
6168
6169
init_timer(&cp->chase_timer);
6170
cp->chase_timer.function = reenable_chase;
6171
cp->chase_timer.data = (unsigned long)ppd;
6172
6173
ppd++;
6174
}
6175
6176
dd->rcvhdrentsize = qib_rcvhdrentsize ?
6177
qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6178
dd->rcvhdrsize = qib_rcvhdrsize ?
6179
qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6180
dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6181
6182
/* we always allocate at least 2048 bytes for eager buffers */
6183
dd->rcvegrbufsize = max(mtu, 2048);
6184
6185
qib_7322_tidtemplate(dd);
6186
6187
/*
6188
* We can request a receive interrupt for 1 or
6189
* more packets from current offset.
6190
*/
6191
dd->rhdrhead_intr_off =
6192
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6193
6194
/* setup the stats timer; the add_timer is done at end of init */
6195
init_timer(&dd->stats_timer);
6196
dd->stats_timer.function = qib_get_7322_faststats;
6197
dd->stats_timer.data = (unsigned long) dd;
6198
6199
dd->ureg_align = 0x10000; /* 64KB alignment */
6200
6201
dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6202
6203
qib_7322_config_ctxts(dd);
6204
qib_set_ctxtcnt(dd);
6205
6206
if (qib_wc_pat) {
6207
resource_size_t vl15off;
6208
/*
6209
* We do not set WC on the VL15 buffers to avoid
6210
* a rare problem with unaligned writes from
6211
* interrupt-flushed store buffers, so we need
6212
* to map those separately here. We can't solve
6213
* this for the rarely used mtrr case.
6214
*/
6215
ret = init_chip_wc_pat(dd, 0);
6216
if (ret)
6217
goto bail;
6218
6219
/* vl15 buffers start just after the 4k buffers */
6220
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6221
dd->piobcnt4k * dd->align4k;
6222
dd->piovl15base = ioremap_nocache(vl15off,
6223
NUM_VL15_BUFS * dd->align4k);
6224
if (!dd->piovl15base)
6225
goto bail;
6226
}
6227
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6228
6229
ret = 0;
6230
if (qib_mini_init)
6231
goto bail;
6232
if (!dd->num_pports) {
6233
qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6234
goto bail; /* no error, so can still figure out why err */
6235
}
6236
6237
write_7322_initregs(dd);
6238
ret = qib_create_ctxts(dd);
6239
init_7322_cntrnames(dd);
6240
6241
updthresh = 8U; /* update threshold */
6242
6243
/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6244
* reserve the update threshold amount for other kernel use, such
6245
* as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6246
* unless we aren't enabling SDMA, in which case we want to use
6247
* all the 4k bufs for the kernel.
6248
* if this was less than the update threshold, we could wait
6249
* a long time for an update. Coded this way because we
6250
* sometimes change the update threshold for various reasons,
6251
* and we want this to remain robust.
6252
*/
6253
if (dd->flags & QIB_HAS_SEND_DMA) {
6254
dd->cspec->sdmabufcnt = dd->piobcnt4k;
6255
sbufs = updthresh > 3 ? updthresh : 3;
6256
} else {
6257
dd->cspec->sdmabufcnt = 0;
6258
sbufs = dd->piobcnt4k;
6259
}
6260
dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6261
dd->cspec->sdmabufcnt;
6262
dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6263
dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6264
dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6265
dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6266
6267
/*
6268
* If we have 16 user contexts, we will have 7 sbufs
6269
* per context, so reduce the update threshold to match. We
6270
* want to update before we actually run out, at low pbufs/ctxt
6271
* so give ourselves some margin.
6272
*/
6273
if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6274
updthresh = dd->pbufsctxt - 2;
6275
dd->cspec->updthresh_dflt = updthresh;
6276
dd->cspec->updthresh = updthresh;
6277
6278
/* before full enable, no interrupts, no locking needed */
6279
dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6280
<< SYM_LSB(SendCtrl, AvailUpdThld)) |
6281
SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6282
6283
dd->psxmitwait_supported = 1;
6284
dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6285
bail:
6286
if (!dd->ctxtcnt)
6287
dd->ctxtcnt = 1; /* for other initialization code */
6288
6289
return ret;
6290
}
6291
6292
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6293
u32 *pbufnum)
6294
{
6295
u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6296
struct qib_devdata *dd = ppd->dd;
6297
6298
/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6299
if (pbc & PBC_7322_VL15_SEND) {
6300
first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6301
last = first;
6302
} else {
6303
if ((plen + 1) > dd->piosize2kmax_dwords)
6304
first = dd->piobcnt2k;
6305
else
6306
first = 0;
6307
last = dd->cspec->lastbuf_for_pio;
6308
}
6309
return qib_getsendbuf_range(dd, pbufnum, first, last);
6310
}
6311
6312
static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6313
u32 start)
6314
{
6315
qib_write_kreg_port(ppd, krp_psinterval, intv);
6316
qib_write_kreg_port(ppd, krp_psstart, start);
6317
}
6318
6319
/*
6320
* Must be called with sdma_lock held, or before init finished.
6321
*/
6322
static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6323
{
6324
qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6325
}
6326
6327
static struct sdma_set_state_action sdma_7322_action_table[] = {
6328
[qib_sdma_state_s00_hw_down] = {
6329
.go_s99_running_tofalse = 1,
6330
.op_enable = 0,
6331
.op_intenable = 0,
6332
.op_halt = 0,
6333
.op_drain = 0,
6334
},
6335
[qib_sdma_state_s10_hw_start_up_wait] = {
6336
.op_enable = 0,
6337
.op_intenable = 1,
6338
.op_halt = 1,
6339
.op_drain = 0,
6340
},
6341
[qib_sdma_state_s20_idle] = {
6342
.op_enable = 1,
6343
.op_intenable = 1,
6344
.op_halt = 1,
6345
.op_drain = 0,
6346
},
6347
[qib_sdma_state_s30_sw_clean_up_wait] = {
6348
.op_enable = 0,
6349
.op_intenable = 1,
6350
.op_halt = 1,
6351
.op_drain = 0,
6352
},
6353
[qib_sdma_state_s40_hw_clean_up_wait] = {
6354
.op_enable = 1,
6355
.op_intenable = 1,
6356
.op_halt = 1,
6357
.op_drain = 0,
6358
},
6359
[qib_sdma_state_s50_hw_halt_wait] = {
6360
.op_enable = 1,
6361
.op_intenable = 1,
6362
.op_halt = 1,
6363
.op_drain = 1,
6364
},
6365
[qib_sdma_state_s99_running] = {
6366
.op_enable = 1,
6367
.op_intenable = 1,
6368
.op_halt = 0,
6369
.op_drain = 0,
6370
.go_s99_running_totrue = 1,
6371
},
6372
};
6373
6374
static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6375
{
6376
ppd->sdma_state.set_state_action = sdma_7322_action_table;
6377
}
6378
6379
static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6380
{
6381
struct qib_devdata *dd = ppd->dd;
6382
unsigned lastbuf, erstbuf;
6383
u64 senddmabufmask[3] = { 0 };
6384
int n, ret = 0;
6385
6386
qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6387
qib_sdma_7322_setlengen(ppd);
6388
qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6389
qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6390
qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6391
qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6392
6393
if (dd->num_pports)
6394
n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6395
else
6396
n = dd->cspec->sdmabufcnt; /* failsafe for init */
6397
erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6398
((dd->num_pports == 1 || ppd->port == 2) ? n :
6399
dd->cspec->sdmabufcnt);
6400
lastbuf = erstbuf + n;
6401
6402
ppd->sdma_state.first_sendbuf = erstbuf;
6403
ppd->sdma_state.last_sendbuf = lastbuf;
6404
for (; erstbuf < lastbuf; ++erstbuf) {
6405
unsigned word = erstbuf / BITS_PER_LONG;
6406
unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6407
6408
BUG_ON(word >= 3);
6409
senddmabufmask[word] |= 1ULL << bit;
6410
}
6411
qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6412
qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6413
qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6414
return ret;
6415
}
6416
6417
/* sdma_lock must be held */
6418
static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6419
{
6420
struct qib_devdata *dd = ppd->dd;
6421
int sane;
6422
int use_dmahead;
6423
u16 swhead;
6424
u16 swtail;
6425
u16 cnt;
6426
u16 hwhead;
6427
6428
use_dmahead = __qib_sdma_running(ppd) &&
6429
(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6430
retry:
6431
hwhead = use_dmahead ?
6432
(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6433
(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6434
6435
swhead = ppd->sdma_descq_head;
6436
swtail = ppd->sdma_descq_tail;
6437
cnt = ppd->sdma_descq_cnt;
6438
6439
if (swhead < swtail)
6440
/* not wrapped */
6441
sane = (hwhead >= swhead) & (hwhead <= swtail);
6442
else if (swhead > swtail)
6443
/* wrapped around */
6444
sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6445
(hwhead <= swtail);
6446
else
6447
/* empty */
6448
sane = (hwhead == swhead);
6449
6450
if (unlikely(!sane)) {
6451
if (use_dmahead) {
6452
/* try one more time, directly from the register */
6453
use_dmahead = 0;
6454
goto retry;
6455
}
6456
/* proceed as if no progress */
6457
hwhead = swhead;
6458
}
6459
6460
return hwhead;
6461
}
6462
6463
static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6464
{
6465
u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6466
6467
return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6468
(hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6469
!(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6470
!(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6471
}
6472
6473
/*
6474
* Compute the amount of delay before sending the next packet if the
6475
* port's send rate differs from the static rate set for the QP.
6476
* The delay affects the next packet and the amount of the delay is
6477
* based on the length of the this packet.
6478
*/
6479
static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6480
u8 srate, u8 vl)
6481
{
6482
u8 snd_mult = ppd->delay_mult;
6483
u8 rcv_mult = ib_rate_to_delay[srate];
6484
u32 ret;
6485
6486
ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6487
6488
/* Indicate VL15, else set the VL in the control word */
6489
if (vl == 15)
6490
ret |= PBC_7322_VL15_SEND_CTRL;
6491
else
6492
ret |= vl << PBC_VL_NUM_LSB;
6493
ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6494
6495
return ret;
6496
}
6497
6498
/*
6499
* Enable the per-port VL15 send buffers for use.
6500
* They follow the rest of the buffers, without a config parameter.
6501
* This was in initregs, but that is done before the shadow
6502
* is set up, and this has to be done after the shadow is
6503
* set up.
6504
*/
6505
static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6506
{
6507
unsigned vl15bufs;
6508
6509
vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6510
qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6511
TXCHK_CHG_TYPE_KERN, NULL);
6512
}
6513
6514
static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6515
{
6516
if (rcd->ctxt < NUM_IB_PORTS) {
6517
if (rcd->dd->num_pports > 1) {
6518
rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6519
rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6520
} else {
6521
rcd->rcvegrcnt = KCTXT0_EGRCNT;
6522
rcd->rcvegr_tid_base = 0;
6523
}
6524
} else {
6525
rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6526
rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6527
(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6528
}
6529
}
6530
6531
#define QTXSLEEPS 5000
6532
static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6533
u32 len, u32 which, struct qib_ctxtdata *rcd)
6534
{
6535
int i;
6536
const int last = start + len - 1;
6537
const int lastr = last / BITS_PER_LONG;
6538
u32 sleeps = 0;
6539
int wait = rcd != NULL;
6540
unsigned long flags;
6541
6542
while (wait) {
6543
unsigned long shadow;
6544
int cstart, previ = -1;
6545
6546
/*
6547
* when flipping from kernel to user, we can't change
6548
* the checking type if the buffer is allocated to the
6549
* driver. It's OK the other direction, because it's
6550
* from close, and we have just disarm'ed all the
6551
* buffers. All the kernel to kernel changes are also
6552
* OK.
6553
*/
6554
for (cstart = start; cstart <= last; cstart++) {
6555
i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6556
/ BITS_PER_LONG;
6557
if (i != previ) {
6558
shadow = (unsigned long)
6559
le64_to_cpu(dd->pioavailregs_dma[i]);
6560
previ = i;
6561
}
6562
if (test_bit(((2 * cstart) +
6563
QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6564
% BITS_PER_LONG, &shadow))
6565
break;
6566
}
6567
6568
if (cstart > last)
6569
break;
6570
6571
if (sleeps == QTXSLEEPS)
6572
break;
6573
/* make sure we see an updated copy next time around */
6574
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6575
sleeps++;
6576
msleep(20);
6577
}
6578
6579
switch (which) {
6580
case TXCHK_CHG_TYPE_DIS1:
6581
/*
6582
* disable checking on a range; used by diags; just
6583
* one buffer, but still written generically
6584
*/
6585
for (i = start; i <= last; i++)
6586
clear_bit(i, dd->cspec->sendchkenable);
6587
break;
6588
6589
case TXCHK_CHG_TYPE_ENAB1:
6590
/*
6591
* (re)enable checking on a range; used by diags; just
6592
* one buffer, but still written generically; read
6593
* scratch to be sure buffer actually triggered, not
6594
* just flushed from processor.
6595
*/
6596
qib_read_kreg32(dd, kr_scratch);
6597
for (i = start; i <= last; i++)
6598
set_bit(i, dd->cspec->sendchkenable);
6599
break;
6600
6601
case TXCHK_CHG_TYPE_KERN:
6602
/* usable by kernel */
6603
for (i = start; i <= last; i++) {
6604
set_bit(i, dd->cspec->sendibchk);
6605
clear_bit(i, dd->cspec->sendgrhchk);
6606
}
6607
spin_lock_irqsave(&dd->uctxt_lock, flags);
6608
/* see if we need to raise avail update threshold */
6609
for (i = dd->first_user_ctxt;
6610
dd->cspec->updthresh != dd->cspec->updthresh_dflt
6611
&& i < dd->cfgctxts; i++)
6612
if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6613
((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6614
< dd->cspec->updthresh_dflt)
6615
break;
6616
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6617
if (i == dd->cfgctxts) {
6618
spin_lock_irqsave(&dd->sendctrl_lock, flags);
6619
dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6620
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6621
dd->sendctrl |= (dd->cspec->updthresh &
6622
SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6623
SYM_LSB(SendCtrl, AvailUpdThld);
6624
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6625
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6626
}
6627
break;
6628
6629
case TXCHK_CHG_TYPE_USER:
6630
/* for user process */
6631
for (i = start; i <= last; i++) {
6632
clear_bit(i, dd->cspec->sendibchk);
6633
set_bit(i, dd->cspec->sendgrhchk);
6634
}
6635
spin_lock_irqsave(&dd->sendctrl_lock, flags);
6636
if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6637
/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6638
dd->cspec->updthresh = (rcd->piocnt /
6639
rcd->subctxt_cnt) - 1;
6640
dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6641
dd->sendctrl |= (dd->cspec->updthresh &
6642
SYM_RMASK(SendCtrl, AvailUpdThld))
6643
<< SYM_LSB(SendCtrl, AvailUpdThld);
6644
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6645
sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6646
} else
6647
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6648
break;
6649
6650
default:
6651
break;
6652
}
6653
6654
for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6655
qib_write_kreg(dd, kr_sendcheckmask + i,
6656
dd->cspec->sendchkenable[i]);
6657
6658
for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6659
qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6660
dd->cspec->sendgrhchk[i]);
6661
qib_write_kreg(dd, kr_sendibpktmask + i,
6662
dd->cspec->sendibchk[i]);
6663
}
6664
6665
/*
6666
* Be sure whatever we did was seen by the chip and acted upon,
6667
* before we return. Mostly important for which >= 2.
6668
*/
6669
qib_read_kreg32(dd, kr_scratch);
6670
}
6671
6672
6673
/* useful for trigger analyzers, etc. */
6674
static void writescratch(struct qib_devdata *dd, u32 val)
6675
{
6676
qib_write_kreg(dd, kr_scratch, val);
6677
}
6678
6679
/* Dummy for now, use chip regs soon */
6680
static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6681
{
6682
return -ENXIO;
6683
}
6684
6685
/**
6686
* qib_init_iba7322_funcs - set up the chip-specific function pointers
6687
* @dev: the pci_dev for qlogic_ib device
6688
* @ent: pci_device_id struct for this dev
6689
*
6690
* Also allocates, inits, and returns the devdata struct for this
6691
* device instance
6692
*
6693
* This is global, and is called directly at init to set up the
6694
* chip-specific function pointers for later use.
6695
*/
6696
struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6697
const struct pci_device_id *ent)
6698
{
6699
struct qib_devdata *dd;
6700
int ret, i;
6701
u32 tabsize, actual_cnt = 0;
6702
6703
dd = qib_alloc_devdata(pdev,
6704
NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6705
sizeof(struct qib_chip_specific) +
6706
NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6707
if (IS_ERR(dd))
6708
goto bail;
6709
6710
dd->f_bringup_serdes = qib_7322_bringup_serdes;
6711
dd->f_cleanup = qib_setup_7322_cleanup;
6712
dd->f_clear_tids = qib_7322_clear_tids;
6713
dd->f_free_irq = qib_7322_free_irq;
6714
dd->f_get_base_info = qib_7322_get_base_info;
6715
dd->f_get_msgheader = qib_7322_get_msgheader;
6716
dd->f_getsendbuf = qib_7322_getsendbuf;
6717
dd->f_gpio_mod = gpio_7322_mod;
6718
dd->f_eeprom_wen = qib_7322_eeprom_wen;
6719
dd->f_hdrqempty = qib_7322_hdrqempty;
6720
dd->f_ib_updown = qib_7322_ib_updown;
6721
dd->f_init_ctxt = qib_7322_init_ctxt;
6722
dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
6723
dd->f_intr_fallback = qib_7322_intr_fallback;
6724
dd->f_late_initreg = qib_late_7322_initreg;
6725
dd->f_setpbc_control = qib_7322_setpbc_control;
6726
dd->f_portcntr = qib_portcntr_7322;
6727
dd->f_put_tid = qib_7322_put_tid;
6728
dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
6729
dd->f_rcvctrl = rcvctrl_7322_mod;
6730
dd->f_read_cntrs = qib_read_7322cntrs;
6731
dd->f_read_portcntrs = qib_read_7322portcntrs;
6732
dd->f_reset = qib_do_7322_reset;
6733
dd->f_init_sdma_regs = init_sdma_7322_regs;
6734
dd->f_sdma_busy = qib_sdma_7322_busy;
6735
dd->f_sdma_gethead = qib_sdma_7322_gethead;
6736
dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
6737
dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6738
dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
6739
dd->f_sendctrl = sendctrl_7322_mod;
6740
dd->f_set_armlaunch = qib_set_7322_armlaunch;
6741
dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
6742
dd->f_iblink_state = qib_7322_iblink_state;
6743
dd->f_ibphys_portstate = qib_7322_phys_portstate;
6744
dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
6745
dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
6746
dd->f_set_ib_loopback = qib_7322_set_loopback;
6747
dd->f_get_ib_table = qib_7322_get_ib_table;
6748
dd->f_set_ib_table = qib_7322_set_ib_table;
6749
dd->f_set_intr_state = qib_7322_set_intr_state;
6750
dd->f_setextled = qib_setup_7322_setextled;
6751
dd->f_txchk_change = qib_7322_txchk_change;
6752
dd->f_update_usrhead = qib_update_7322_usrhead;
6753
dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
6754
dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
6755
dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
6756
dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
6757
dd->f_sdma_init_early = qib_7322_sdma_init_early;
6758
dd->f_writescratch = writescratch;
6759
dd->f_tempsense_rd = qib_7322_tempsense_rd;
6760
/*
6761
* Do remaining PCIe setup and save PCIe values in dd.
6762
* Any error printing is already done by the init code.
6763
* On return, we have the chip mapped, but chip registers
6764
* are not set up until start of qib_init_7322_variables.
6765
*/
6766
ret = qib_pcie_ddinit(dd, pdev, ent);
6767
if (ret < 0)
6768
goto bail_free;
6769
6770
/* initialize chip-specific variables */
6771
ret = qib_init_7322_variables(dd);
6772
if (ret)
6773
goto bail_cleanup;
6774
6775
if (qib_mini_init || !dd->num_pports)
6776
goto bail;
6777
6778
/*
6779
* Determine number of vectors we want; depends on port count
6780
* and number of configured kernel receive queues actually used.
6781
* Should also depend on whether sdma is enabled or not, but
6782
* that's such a rare testing case it's not worth worrying about.
6783
*/
6784
tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6785
for (i = 0; i < tabsize; i++)
6786
if ((i < ARRAY_SIZE(irq_table) &&
6787
irq_table[i].port <= dd->num_pports) ||
6788
(i >= ARRAY_SIZE(irq_table) &&
6789
dd->rcd[i - ARRAY_SIZE(irq_table)]))
6790
actual_cnt++;
6791
tabsize = actual_cnt;
6792
dd->cspec->msix_entries = kmalloc(tabsize *
6793
sizeof(struct msix_entry), GFP_KERNEL);
6794
dd->cspec->msix_arg = kmalloc(tabsize *
6795
sizeof(void *), GFP_KERNEL);
6796
if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6797
qib_dev_err(dd, "No memory for MSIx table\n");
6798
tabsize = 0;
6799
}
6800
for (i = 0; i < tabsize; i++)
6801
dd->cspec->msix_entries[i].entry = i;
6802
6803
if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6804
qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6805
"continuing anyway\n");
6806
/* may be less than we wanted, if not enough available */
6807
dd->cspec->num_msix_entries = tabsize;
6808
6809
/* setup interrupt handler */
6810
qib_setup_7322_interrupt(dd, 1);
6811
6812
/* clear diagctrl register, in case diags were running and crashed */
6813
qib_write_kreg(dd, kr_hwdiagctrl, 0);
6814
6815
goto bail;
6816
6817
bail_cleanup:
6818
qib_pcie_ddcleanup(dd);
6819
bail_free:
6820
qib_free_devdata(dd);
6821
dd = ERR_PTR(ret);
6822
bail:
6823
return dd;
6824
}
6825
6826
/*
6827
* Set the table entry at the specified index from the table specifed.
6828
* There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6829
* TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6830
* 'idx' below addresses the correct entry, while its 4 LSBs select the
6831
* corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6832
*/
6833
#define DDS_ENT_AMP_LSB 14
6834
#define DDS_ENT_MAIN_LSB 9
6835
#define DDS_ENT_POST_LSB 5
6836
#define DDS_ENT_PRE_XTRA_LSB 3
6837
#define DDS_ENT_PRE_LSB 0
6838
6839
/*
6840
* Set one entry in the TxDDS table for spec'd port
6841
* ridx picks one of the entries, while tp points
6842
* to the appropriate table entry.
6843
*/
6844
static void set_txdds(struct qib_pportdata *ppd, int ridx,
6845
const struct txdds_ent *tp)
6846
{
6847
struct qib_devdata *dd = ppd->dd;
6848
u32 pack_ent;
6849
int regidx;
6850
6851
/* Get correct offset in chip-space, and in source table */
6852
regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6853
/*
6854
* We do not use qib_write_kreg_port() because it was intended
6855
* only for registers in the lower "port specific" pages.
6856
* So do index calculation by hand.
6857
*/
6858
if (ppd->hw_pidx)
6859
regidx += (dd->palign / sizeof(u64));
6860
6861
pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6862
pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6863
pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6864
pack_ent |= tp->post << DDS_ENT_POST_LSB;
6865
qib_write_kreg(dd, regidx, pack_ent);
6866
/* Prevent back-to-back writes by hitting scratch */
6867
qib_write_kreg(ppd->dd, kr_scratch, 0);
6868
}
6869
6870
static const struct vendor_txdds_ent vendor_txdds[] = {
6871
{ /* Amphenol 1m 30awg NoEq */
6872
{ 0x41, 0x50, 0x48 }, "584470002 ",
6873
{ 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
6874
},
6875
{ /* Amphenol 3m 28awg NoEq */
6876
{ 0x41, 0x50, 0x48 }, "584470004 ",
6877
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
6878
},
6879
{ /* Finisar 3m OM2 Optical */
6880
{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6881
{ 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
6882
},
6883
{ /* Finisar 30m OM2 Optical */
6884
{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6885
{ 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
6886
},
6887
{ /* Finisar Default OM2 Optical */
6888
{ 0x00, 0x90, 0x65 }, NULL,
6889
{ 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
6890
},
6891
{ /* Gore 1m 30awg NoEq */
6892
{ 0x00, 0x21, 0x77 }, "QSN3300-1 ",
6893
{ 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
6894
},
6895
{ /* Gore 2m 30awg NoEq */
6896
{ 0x00, 0x21, 0x77 }, "QSN3300-2 ",
6897
{ 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
6898
},
6899
{ /* Gore 1m 28awg NoEq */
6900
{ 0x00, 0x21, 0x77 }, "QSN3800-1 ",
6901
{ 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
6902
},
6903
{ /* Gore 3m 28awg NoEq */
6904
{ 0x00, 0x21, 0x77 }, "QSN3800-3 ",
6905
{ 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
6906
},
6907
{ /* Gore 5m 24awg Eq */
6908
{ 0x00, 0x21, 0x77 }, "QSN7000-5 ",
6909
{ 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
6910
},
6911
{ /* Gore 7m 24awg Eq */
6912
{ 0x00, 0x21, 0x77 }, "QSN7000-7 ",
6913
{ 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
6914
},
6915
{ /* Gore 5m 26awg Eq */
6916
{ 0x00, 0x21, 0x77 }, "QSN7600-5 ",
6917
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
6918
},
6919
{ /* Gore 7m 26awg Eq */
6920
{ 0x00, 0x21, 0x77 }, "QSN7600-7 ",
6921
{ 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
6922
},
6923
{ /* Intersil 12m 24awg Active */
6924
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
6925
{ 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
6926
},
6927
{ /* Intersil 10m 28awg Active */
6928
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
6929
{ 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
6930
},
6931
{ /* Intersil 7m 30awg Active */
6932
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
6933
{ 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
6934
},
6935
{ /* Intersil 5m 32awg Active */
6936
{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
6937
{ 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
6938
},
6939
{ /* Intersil Default Active */
6940
{ 0x00, 0x30, 0xB4 }, NULL,
6941
{ 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
6942
},
6943
{ /* Luxtera 20m Active Optical */
6944
{ 0x00, 0x25, 0x63 }, NULL,
6945
{ 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
6946
},
6947
{ /* Molex 1M Cu loopback */
6948
{ 0x00, 0x09, 0x3A }, "74763-0025 ",
6949
{ 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
6950
},
6951
{ /* Molex 2m 28awg NoEq */
6952
{ 0x00, 0x09, 0x3A }, "74757-2201 ",
6953
{ 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
6954
},
6955
};
6956
6957
static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
6958
/* amp, pre, main, post */
6959
{ 2, 2, 15, 6 }, /* Loopback */
6960
{ 0, 0, 0, 1 }, /* 2 dB */
6961
{ 0, 0, 0, 2 }, /* 3 dB */
6962
{ 0, 0, 0, 3 }, /* 4 dB */
6963
{ 0, 0, 0, 4 }, /* 5 dB */
6964
{ 0, 0, 0, 5 }, /* 6 dB */
6965
{ 0, 0, 0, 6 }, /* 7 dB */
6966
{ 0, 0, 0, 7 }, /* 8 dB */
6967
{ 0, 0, 0, 8 }, /* 9 dB */
6968
{ 0, 0, 0, 9 }, /* 10 dB */
6969
{ 0, 0, 0, 10 }, /* 11 dB */
6970
{ 0, 0, 0, 11 }, /* 12 dB */
6971
{ 0, 0, 0, 12 }, /* 13 dB */
6972
{ 0, 0, 0, 13 }, /* 14 dB */
6973
{ 0, 0, 0, 14 }, /* 15 dB */
6974
{ 0, 0, 0, 15 }, /* 16 dB */
6975
};
6976
6977
static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
6978
/* amp, pre, main, post */
6979
{ 2, 2, 15, 6 }, /* Loopback */
6980
{ 0, 0, 0, 8 }, /* 2 dB */
6981
{ 0, 0, 0, 8 }, /* 3 dB */
6982
{ 0, 0, 0, 9 }, /* 4 dB */
6983
{ 0, 0, 0, 9 }, /* 5 dB */
6984
{ 0, 0, 0, 10 }, /* 6 dB */
6985
{ 0, 0, 0, 10 }, /* 7 dB */
6986
{ 0, 0, 0, 11 }, /* 8 dB */
6987
{ 0, 0, 0, 11 }, /* 9 dB */
6988
{ 0, 0, 0, 12 }, /* 10 dB */
6989
{ 0, 0, 0, 12 }, /* 11 dB */
6990
{ 0, 0, 0, 13 }, /* 12 dB */
6991
{ 0, 0, 0, 13 }, /* 13 dB */
6992
{ 0, 0, 0, 14 }, /* 14 dB */
6993
{ 0, 0, 0, 14 }, /* 15 dB */
6994
{ 0, 0, 0, 15 }, /* 16 dB */
6995
};
6996
6997
static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
6998
/* amp, pre, main, post */
6999
{ 2, 2, 15, 6 }, /* Loopback */
7000
{ 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
7001
{ 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
7002
{ 0, 1, 0, 11 }, /* 4 dB */
7003
{ 0, 1, 0, 13 }, /* 5 dB */
7004
{ 0, 1, 0, 15 }, /* 6 dB */
7005
{ 0, 1, 3, 15 }, /* 7 dB */
7006
{ 0, 1, 7, 15 }, /* 8 dB */
7007
{ 0, 1, 7, 15 }, /* 9 dB */
7008
{ 0, 1, 8, 15 }, /* 10 dB */
7009
{ 0, 1, 9, 15 }, /* 11 dB */
7010
{ 0, 1, 10, 15 }, /* 12 dB */
7011
{ 0, 2, 6, 15 }, /* 13 dB */
7012
{ 0, 2, 7, 15 }, /* 14 dB */
7013
{ 0, 2, 8, 15 }, /* 15 dB */
7014
{ 0, 2, 9, 15 }, /* 16 dB */
7015
};
7016
7017
/*
7018
* extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7019
* These are mostly used for mez cards going through connectors
7020
* and backplane traces, but can be used to add other "unusual"
7021
* table values as well.
7022
*/
7023
static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7024
/* amp, pre, main, post */
7025
{ 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7026
{ 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7027
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7028
{ 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7029
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7030
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7031
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7032
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7033
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7034
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7035
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
7036
{ 0, 0, 0, 3 }, /* QMH7342 backplane settings */
7037
{ 0, 0, 0, 4 }, /* QMH7342 backplane settings */
7038
};
7039
7040
static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7041
/* amp, pre, main, post */
7042
{ 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7043
{ 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7044
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7045
{ 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7046
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7047
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7048
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7049
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7050
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7051
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7052
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
7053
{ 0, 0, 0, 9 }, /* QMH7342 backplane settings */
7054
{ 0, 0, 0, 10 }, /* QMH7342 backplane settings */
7055
};
7056
7057
static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7058
/* amp, pre, main, post */
7059
{ 0, 1, 0, 4 }, /* QMH7342 backplane settings */
7060
{ 0, 1, 0, 5 }, /* QMH7342 backplane settings */
7061
{ 0, 1, 0, 6 }, /* QMH7342 backplane settings */
7062
{ 0, 1, 0, 8 }, /* QMH7342 backplane settings */
7063
{ 0, 1, 12, 10 }, /* QME7342 backplane setting */
7064
{ 0, 1, 12, 11 }, /* QME7342 backplane setting */
7065
{ 0, 1, 12, 12 }, /* QME7342 backplane setting */
7066
{ 0, 1, 12, 14 }, /* QME7342 backplane setting */
7067
{ 0, 1, 12, 6 }, /* QME7342 backplane setting */
7068
{ 0, 1, 12, 7 }, /* QME7342 backplane setting */
7069
{ 0, 1, 12, 8 }, /* QME7342 backplane setting */
7070
{ 0, 1, 0, 10 }, /* QMH7342 backplane settings */
7071
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
7072
};
7073
7074
static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7075
/* amp, pre, main, post */
7076
{ 0, 0, 0, 0 }, /* QME7342 mfg settings */
7077
{ 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7078
};
7079
7080
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7081
unsigned atten)
7082
{
7083
/*
7084
* The attenuation table starts at 2dB for entry 1,
7085
* with entry 0 being the loopback entry.
7086
*/
7087
if (atten <= 2)
7088
atten = 1;
7089
else if (atten > TXDDS_TABLE_SZ)
7090
atten = TXDDS_TABLE_SZ - 1;
7091
else
7092
atten--;
7093
return txdds + atten;
7094
}
7095
7096
/*
7097
* if override is set, the module parameter txselect has a value
7098
* for this specific port, so use it, rather than our normal mechanism.
7099
*/
7100
static void find_best_ent(struct qib_pportdata *ppd,
7101
const struct txdds_ent **sdr_dds,
7102
const struct txdds_ent **ddr_dds,
7103
const struct txdds_ent **qdr_dds, int override)
7104
{
7105
struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7106
int idx;
7107
7108
/* Search table of known cables */
7109
for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7110
const struct vendor_txdds_ent *v = vendor_txdds + idx;
7111
7112
if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7113
(!v->partnum ||
7114
!memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7115
*sdr_dds = &v->sdr;
7116
*ddr_dds = &v->ddr;
7117
*qdr_dds = &v->qdr;
7118
return;
7119
}
7120
}
7121
7122
/* Lookup serdes setting by cable type and attenuation */
7123
if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7124
*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7125
*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7126
*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7127
return;
7128
}
7129
7130
if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7131
qd->atten[1])) {
7132
*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7133
*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7134
*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7135
return;
7136
} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7137
/*
7138
* If we have no (or incomplete) data from the cable
7139
* EEPROM, or no QSFP, or override is set, use the
7140
* module parameter value to index into the attentuation
7141
* table.
7142
*/
7143
idx = ppd->cpspec->no_eep;
7144
*sdr_dds = &txdds_sdr[idx];
7145
*ddr_dds = &txdds_ddr[idx];
7146
*qdr_dds = &txdds_qdr[idx];
7147
} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7148
/* similar to above, but index into the "extra" table. */
7149
idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7150
*sdr_dds = &txdds_extra_sdr[idx];
7151
*ddr_dds = &txdds_extra_ddr[idx];
7152
*qdr_dds = &txdds_extra_qdr[idx];
7153
} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7154
ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7155
TXDDS_MFG_SZ)) {
7156
idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7157
printk(KERN_INFO QIB_DRV_NAME
7158
" IB%u:%u use idx %u into txdds_mfg\n",
7159
ppd->dd->unit, ppd->port, idx);
7160
*sdr_dds = &txdds_extra_mfg[idx];
7161
*ddr_dds = &txdds_extra_mfg[idx];
7162
*qdr_dds = &txdds_extra_mfg[idx];
7163
} else {
7164
/* this shouldn't happen, it's range checked */
7165
*sdr_dds = txdds_sdr + qib_long_atten;
7166
*ddr_dds = txdds_ddr + qib_long_atten;
7167
*qdr_dds = txdds_qdr + qib_long_atten;
7168
}
7169
}
7170
7171
static void init_txdds_table(struct qib_pportdata *ppd, int override)
7172
{
7173
const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7174
struct txdds_ent *dds;
7175
int idx;
7176
int single_ent = 0;
7177
7178
find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7179
7180
/* for mez cards or override, use the selected value for all entries */
7181
if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7182
single_ent = 1;
7183
7184
/* Fill in the first entry with the best entry found. */
7185
set_txdds(ppd, 0, sdr_dds);
7186
set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7187
set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7188
if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7189
QIBL_LINKACTIVE)) {
7190
dds = (struct txdds_ent *)(ppd->link_speed_active ==
7191
QIB_IB_QDR ? qdr_dds :
7192
(ppd->link_speed_active ==
7193
QIB_IB_DDR ? ddr_dds : sdr_dds));
7194
write_tx_serdes_param(ppd, dds);
7195
}
7196
7197
/* Fill in the remaining entries with the default table values. */
7198
for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7199
set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7200
set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7201
single_ent ? ddr_dds : txdds_ddr + idx);
7202
set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7203
single_ent ? qdr_dds : txdds_qdr + idx);
7204
}
7205
}
7206
7207
#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7208
#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7209
#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7210
#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7211
#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7212
#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7213
#define AHB_TRANS_TRIES 10
7214
7215
/*
7216
* The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7217
* 5=subsystem which is why most calls have "chan + chan >> 1"
7218
* for the channel argument.
7219
*/
7220
static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7221
u32 data, u32 mask)
7222
{
7223
u32 rd_data, wr_data, sz_mask;
7224
u64 trans, acc, prev_acc;
7225
u32 ret = 0xBAD0BAD;
7226
int tries;
7227
7228
prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7229
/* From this point on, make sure we return access */
7230
acc = (quad << 1) | 1;
7231
qib_write_kreg(dd, KR_AHB_ACC, acc);
7232
7233
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7234
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7235
if (trans & AHB_TRANS_RDY)
7236
break;
7237
}
7238
if (tries >= AHB_TRANS_TRIES) {
7239
qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7240
goto bail;
7241
}
7242
7243
/* If mask is not all 1s, we need to read, but different SerDes
7244
* entities have different sizes
7245
*/
7246
sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7247
wr_data = data & mask & sz_mask;
7248
if ((~mask & sz_mask) != 0) {
7249
trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7250
qib_write_kreg(dd, KR_AHB_TRANS, trans);
7251
7252
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7253
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7254
if (trans & AHB_TRANS_RDY)
7255
break;
7256
}
7257
if (tries >= AHB_TRANS_TRIES) {
7258
qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7259
AHB_TRANS_TRIES);
7260
goto bail;
7261
}
7262
/* Re-read in case host split reads and read data first */
7263
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7264
rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7265
wr_data |= (rd_data & ~mask & sz_mask);
7266
}
7267
7268
/* If mask is not zero, we need to write. */
7269
if (mask & sz_mask) {
7270
trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7271
trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7272
trans |= AHB_WR;
7273
qib_write_kreg(dd, KR_AHB_TRANS, trans);
7274
7275
for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7276
trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7277
if (trans & AHB_TRANS_RDY)
7278
break;
7279
}
7280
if (tries >= AHB_TRANS_TRIES) {
7281
qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7282
AHB_TRANS_TRIES);
7283
goto bail;
7284
}
7285
}
7286
ret = wr_data;
7287
bail:
7288
qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7289
return ret;
7290
}
7291
7292
static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7293
unsigned mask)
7294
{
7295
struct qib_devdata *dd = ppd->dd;
7296
int chan;
7297
u32 rbc;
7298
7299
for (chan = 0; chan < SERDES_CHANS; ++chan) {
7300
ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7301
data, mask);
7302
rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7303
addr, 0, 0);
7304
}
7305
}
7306
7307
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7308
{
7309
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7310
u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7311
7312
if (enable && !state) {
7313
printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
7314
ppd->dd->unit, ppd->port);
7315
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7316
} else if (!enable && state) {
7317
printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
7318
ppd->dd->unit, ppd->port);
7319
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7320
}
7321
qib_write_kreg_port(ppd, krp_serdesctrl, data);
7322
}
7323
7324
static int serdes_7322_init(struct qib_pportdata *ppd)
7325
{
7326
int ret = 0;
7327
if (ppd->dd->cspec->r1)
7328
ret = serdes_7322_init_old(ppd);
7329
else
7330
ret = serdes_7322_init_new(ppd);
7331
return ret;
7332
}
7333
7334
static int serdes_7322_init_old(struct qib_pportdata *ppd)
7335
{
7336
u32 le_val;
7337
7338
/*
7339
* Initialize the Tx DDS tables. Also done every QSFP event,
7340
* for adapters with QSFP
7341
*/
7342
init_txdds_table(ppd, 0);
7343
7344
/* ensure no tx overrides from earlier driver loads */
7345
qib_write_kreg_port(ppd, krp_tx_deemph_override,
7346
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7347
reset_tx_deemphasis_override));
7348
7349
/* Patch some SerDes defaults to "Better for IB" */
7350
/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7351
ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7352
7353
/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7354
ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7355
/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7356
ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7357
7358
/* May be overridden in qsfp_7322_event */
7359
le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7360
ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7361
7362
/* enable LE1 adaptation for all but QME, which is disabled */
7363
le_val = IS_QME(ppd->dd) ? 0 : 1;
7364
ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7365
7366
/* Clear cmode-override, may be set from older driver */
7367
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7368
7369
/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7370
ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7371
7372
/* setup LoS params; these are subsystem, so chan == 5 */
7373
/* LoS filter threshold_count on, ch 0-3, set to 8 */
7374
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7375
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7376
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7377
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7378
7379
/* LoS filter threshold_count off, ch 0-3, set to 4 */
7380
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7381
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7382
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7383
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7384
7385
/* LoS filter select enabled */
7386
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7387
7388
/* LoS target data: SDR=4, DDR=2, QDR=1 */
7389
ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7390
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7391
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7392
7393
serdes_7322_los_enable(ppd, 1);
7394
7395
/* rxbistena; set 0 to avoid effects of it switch later */
7396
ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7397
7398
/* Configure 4 DFE taps, and only they adapt */
7399
ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7400
7401
/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7402
le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7403
ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7404
7405
/*
7406
* Set receive adaptation mode. SDR and DDR adaptation are
7407
* always on, and QDR is initially enabled; later disabled.
7408
*/
7409
qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7410
qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7411
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7412
ppd->dd->cspec->r1 ?
7413
QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7414
ppd->cpspec->qdr_dfe_on = 1;
7415
7416
/* FLoop LOS gate: PPM filter enabled */
7417
ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7418
7419
/* rx offset center enabled */
7420
ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7421
7422
if (!ppd->dd->cspec->r1) {
7423
ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7424
ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7425
}
7426
7427
/* Set the frequency loop bandwidth to 15 */
7428
ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7429
7430
return 0;
7431
}
7432
7433
static int serdes_7322_init_new(struct qib_pportdata *ppd)
7434
{
7435
u64 tstart;
7436
u32 le_val, rxcaldone;
7437
int chan, chan_done = (1 << SERDES_CHANS) - 1;
7438
7439
/*
7440
* Initialize the Tx DDS tables. Also done every QSFP event,
7441
* for adapters with QSFP
7442
*/
7443
init_txdds_table(ppd, 0);
7444
7445
/* Clear cmode-override, may be set from older driver */
7446
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7447
7448
/* ensure no tx overrides from earlier driver loads */
7449
qib_write_kreg_port(ppd, krp_tx_deemph_override,
7450
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7451
reset_tx_deemphasis_override));
7452
7453
/* START OF LSI SUGGESTED SERDES BRINGUP */
7454
/* Reset - Calibration Setup */
7455
/* Stop DFE adaptaion */
7456
ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7457
/* Disable LE1 */
7458
ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7459
/* Disable autoadapt for LE1 */
7460
ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7461
/* Disable LE2 */
7462
ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7463
/* Disable VGA */
7464
ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7465
/* Disable AFE Offset Cancel */
7466
ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7467
/* Disable Timing Loop */
7468
ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7469
/* Disable Frequency Loop */
7470
ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7471
/* Disable Baseline Wander Correction */
7472
ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7473
/* Disable RX Calibration */
7474
ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7475
/* Disable RX Offset Calibration */
7476
ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7477
/* Select BB CDR */
7478
ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7479
/* CDR Step Size */
7480
ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7481
/* Enable phase Calibration */
7482
ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7483
/* DFE Bandwidth [2:14-12] */
7484
ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7485
/* DFE Config (4 taps only) */
7486
ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7487
/* Gain Loop Bandwidth */
7488
if (!ppd->dd->cspec->r1) {
7489
ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7490
ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7491
} else {
7492
ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7493
}
7494
/* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7495
/* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7496
/* Data Rate Select [5:7-6] (leave as default) */
7497
/* RX Parallel Word Width [3:10-8] (leave as default) */
7498
7499
/* RX REST */
7500
/* Single- or Multi-channel reset */
7501
/* RX Analog reset */
7502
/* RX Digital reset */
7503
ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7504
msleep(20);
7505
/* RX Analog reset */
7506
ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7507
msleep(20);
7508
/* RX Digital reset */
7509
ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7510
msleep(20);
7511
7512
/* setup LoS params; these are subsystem, so chan == 5 */
7513
/* LoS filter threshold_count on, ch 0-3, set to 8 */
7514
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7515
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7516
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7517
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7518
7519
/* LoS filter threshold_count off, ch 0-3, set to 4 */
7520
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7521
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7522
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7523
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7524
7525
/* LoS filter select enabled */
7526
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7527
7528
/* LoS target data: SDR=4, DDR=2, QDR=1 */
7529
ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7530
ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7531
ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7532
7533
/* Turn on LOS on initial SERDES init */
7534
serdes_7322_los_enable(ppd, 1);
7535
/* FLoop LOS gate: PPM filter enabled */
7536
ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7537
7538
/* RX LATCH CALIBRATION */
7539
/* Enable Eyefinder Phase Calibration latch */
7540
ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7541
/* Enable RX Offset Calibration latch */
7542
ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7543
msleep(20);
7544
/* Start Calibration */
7545
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7546
tstart = get_jiffies_64();
7547
while (chan_done &&
7548
!time_after64(get_jiffies_64(),
7549
tstart + msecs_to_jiffies(500))) {
7550
msleep(20);
7551
for (chan = 0; chan < SERDES_CHANS; ++chan) {
7552
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7553
(chan + (chan >> 1)),
7554
25, 0, 0);
7555
if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7556
(~chan_done & (1 << chan)) == 0)
7557
chan_done &= ~(1 << chan);
7558
}
7559
}
7560
if (chan_done) {
7561
printk(KERN_INFO QIB_DRV_NAME
7562
" Serdes %d calibration not done after .5 sec: 0x%x\n",
7563
IBSD(ppd->hw_pidx), chan_done);
7564
} else {
7565
for (chan = 0; chan < SERDES_CHANS; ++chan) {
7566
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7567
(chan + (chan >> 1)),
7568
25, 0, 0);
7569
if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7570
printk(KERN_INFO QIB_DRV_NAME
7571
" Serdes %d chan %d calibration "
7572
"failed\n", IBSD(ppd->hw_pidx), chan);
7573
}
7574
}
7575
7576
/* Turn off Calibration */
7577
ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7578
msleep(20);
7579
7580
/* BRING RX UP */
7581
/* Set LE2 value (May be overridden in qsfp_7322_event) */
7582
le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7583
ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7584
/* Set LE2 Loop bandwidth */
7585
ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7586
/* Enable LE2 */
7587
ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7588
msleep(20);
7589
/* Enable H0 only */
7590
ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7591
/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7592
le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7593
ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7594
/* Enable VGA */
7595
ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7596
msleep(20);
7597
/* Set Frequency Loop Bandwidth */
7598
ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
7599
/* Enable Frequency Loop */
7600
ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7601
/* Set Timing Loop Bandwidth */
7602
ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7603
/* Enable Timing Loop */
7604
ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7605
msleep(50);
7606
/* Enable DFE
7607
* Set receive adaptation mode. SDR and DDR adaptation are
7608
* always on, and QDR is initially enabled; later disabled.
7609
*/
7610
qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7611
qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7612
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7613
ppd->dd->cspec->r1 ?
7614
QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7615
ppd->cpspec->qdr_dfe_on = 1;
7616
/* Disable LE1 */
7617
ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7618
/* Disable auto adapt for LE1 */
7619
ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7620
msleep(20);
7621
/* Enable AFE Offset Cancel */
7622
ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7623
/* Enable Baseline Wander Correction */
7624
ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7625
/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7626
ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7627
/* VGA output common mode */
7628
ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7629
7630
return 0;
7631
}
7632
7633
/* start adjust QMH serdes parameters */
7634
7635
static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7636
{
7637
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7638
9, code << 9, 0x3f << 9);
7639
}
7640
7641
static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7642
int enable, u32 tapenable)
7643
{
7644
if (enable)
7645
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7646
1, 3 << 10, 0x1f << 10);
7647
else
7648
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7649
1, 0, 0x1f << 10);
7650
}
7651
7652
/* Set clock to 1, 0, 1, 0 */
7653
static void clock_man(struct qib_pportdata *ppd, int chan)
7654
{
7655
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7656
4, 0x4000, 0x4000);
7657
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7658
4, 0, 0x4000);
7659
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7660
4, 0x4000, 0x4000);
7661
ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7662
4, 0, 0x4000);
7663
}
7664
7665
/*
7666
* write the current Tx serdes pre,post,main,amp settings into the serdes.
7667
* The caller must pass the settings appropriate for the current speed,
7668
* or not care if they are correct for the current speed.
7669
*/
7670
static void write_tx_serdes_param(struct qib_pportdata *ppd,
7671
struct txdds_ent *txdds)
7672
{
7673
u64 deemph;
7674
7675
deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7676
/* field names for amp, main, post, pre, respectively */
7677
deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7678
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7679
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7680
SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7681
7682
deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7683
tx_override_deemphasis_select);
7684
deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7685
txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7686
txampcntl_d2a);
7687
deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7688
txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7689
txc0_ena);
7690
deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7691
txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7692
txcp1_ena);
7693
deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7694
txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7695
txcn1_ena);
7696
qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7697
}
7698
7699
/*
7700
* Set the parameters for mez cards on link bounce, so they are
7701
* always exactly what was requested. Similar logic to init_txdds
7702
* but does just the serdes.
7703
*/
7704
static void adj_tx_serdes(struct qib_pportdata *ppd)
7705
{
7706
const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7707
struct txdds_ent *dds;
7708
7709
find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7710
dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7711
qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7712
ddr_dds : sdr_dds));
7713
write_tx_serdes_param(ppd, dds);
7714
}
7715
7716
/* set QDR forced value for H1, if needed */
7717
static void force_h1(struct qib_pportdata *ppd)
7718
{
7719
int chan;
7720
7721
ppd->cpspec->qdr_reforce = 0;
7722
if (!ppd->dd->cspec->r1)
7723
return;
7724
7725
for (chan = 0; chan < SERDES_CHANS; chan++) {
7726
set_man_mode_h1(ppd, chan, 1, 0);
7727
set_man_code(ppd, chan, ppd->cpspec->h1_val);
7728
clock_man(ppd, chan);
7729
set_man_mode_h1(ppd, chan, 0, 0);
7730
}
7731
}
7732
7733
#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7734
#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7735
7736
#define R_OPCODE_LSB 3
7737
#define R_OP_NOP 0
7738
#define R_OP_SHIFT 2
7739
#define R_OP_UPDATE 3
7740
#define R_TDI_LSB 2
7741
#define R_TDO_LSB 1
7742
#define R_RDY 1
7743
7744
static int qib_r_grab(struct qib_devdata *dd)
7745
{
7746
u64 val;
7747
val = SJA_EN;
7748
qib_write_kreg(dd, kr_r_access, val);
7749
qib_read_kreg32(dd, kr_scratch);
7750
return 0;
7751
}
7752
7753
/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7754
* returns the current state of R_TDO
7755
*/
7756
static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7757
{
7758
u64 val;
7759
int timeout;
7760
for (timeout = 0; timeout < 100 ; ++timeout) {
7761
val = qib_read_kreg32(dd, kr_r_access);
7762
if (val & R_RDY)
7763
return (val >> R_TDO_LSB) & 1;
7764
}
7765
return -1;
7766
}
7767
7768
static int qib_r_shift(struct qib_devdata *dd, int bisten,
7769
int len, u8 *inp, u8 *outp)
7770
{
7771
u64 valbase, val;
7772
int ret, pos;
7773
7774
valbase = SJA_EN | (bisten << BISTEN_LSB) |
7775
(R_OP_SHIFT << R_OPCODE_LSB);
7776
ret = qib_r_wait_for_rdy(dd);
7777
if (ret < 0)
7778
goto bail;
7779
for (pos = 0; pos < len; ++pos) {
7780
val = valbase;
7781
if (outp) {
7782
outp[pos >> 3] &= ~(1 << (pos & 7));
7783
outp[pos >> 3] |= (ret << (pos & 7));
7784
}
7785
if (inp) {
7786
int tdi = inp[pos >> 3] >> (pos & 7);
7787
val |= ((tdi & 1) << R_TDI_LSB);
7788
}
7789
qib_write_kreg(dd, kr_r_access, val);
7790
qib_read_kreg32(dd, kr_scratch);
7791
ret = qib_r_wait_for_rdy(dd);
7792
if (ret < 0)
7793
break;
7794
}
7795
/* Restore to NOP between operations. */
7796
val = SJA_EN | (bisten << BISTEN_LSB);
7797
qib_write_kreg(dd, kr_r_access, val);
7798
qib_read_kreg32(dd, kr_scratch);
7799
ret = qib_r_wait_for_rdy(dd);
7800
7801
if (ret >= 0)
7802
ret = pos;
7803
bail:
7804
return ret;
7805
}
7806
7807
static int qib_r_update(struct qib_devdata *dd, int bisten)
7808
{
7809
u64 val;
7810
int ret;
7811
7812
val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7813
ret = qib_r_wait_for_rdy(dd);
7814
if (ret >= 0) {
7815
qib_write_kreg(dd, kr_r_access, val);
7816
qib_read_kreg32(dd, kr_scratch);
7817
}
7818
return ret;
7819
}
7820
7821
#define BISTEN_PORT_SEL 15
7822
#define LEN_PORT_SEL 625
7823
#define BISTEN_AT 17
7824
#define LEN_AT 156
7825
#define BISTEN_ETM 16
7826
#define LEN_ETM 632
7827
7828
#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7829
7830
/* these are common for all IB port use cases. */
7831
static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7832
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7833
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7834
};
7835
static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7836
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7837
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7838
0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7839
0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7840
0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7841
0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7842
0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7843
0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7844
};
7845
static u8 at[BIT2BYTE(LEN_AT)] = {
7846
0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7847
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7848
};
7849
7850
/* used for IB1 or IB2, only one in use */
7851
static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7852
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7853
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7854
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7855
0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7856
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7857
0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7858
0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7859
0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7860
};
7861
7862
/* used when both IB1 and IB2 are in use */
7863
static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7864
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7865
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7866
0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7867
0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7868
0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7869
0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7870
0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7871
0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7872
};
7873
7874
/* used when only IB1 is in use */
7875
static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7876
0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7877
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7878
0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7879
0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7880
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7881
0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7882
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7883
0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7884
};
7885
7886
/* used when only IB2 is in use */
7887
static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7888
0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7889
0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7890
0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7891
0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7892
0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7893
0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7894
0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7895
0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7896
};
7897
7898
/* used when both IB1 and IB2 are in use */
7899
static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7900
0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7901
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7902
0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7903
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7904
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7905
0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7906
0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7907
0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7908
};
7909
7910
/*
7911
* Do setup to properly handle IB link recovery; if port is zero, we
7912
* are initializing to cover both ports; otherwise we are initializing
7913
* to cover a single port card, or the port has reached INIT and we may
7914
* need to switch coverage types.
7915
*/
7916
static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7917
{
7918
u8 *portsel, *etm;
7919
struct qib_devdata *dd = ppd->dd;
7920
7921
if (!ppd->dd->cspec->r1)
7922
return;
7923
if (!both) {
7924
dd->cspec->recovery_ports_initted++;
7925
ppd->cpspec->recovery_init = 1;
7926
}
7927
if (!both && dd->cspec->recovery_ports_initted == 1) {
7928
portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
7929
etm = atetm_1port;
7930
} else {
7931
portsel = portsel_2port;
7932
etm = atetm_2port;
7933
}
7934
7935
if (qib_r_grab(dd) < 0 ||
7936
qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
7937
qib_r_update(dd, BISTEN_ETM) < 0 ||
7938
qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
7939
qib_r_update(dd, BISTEN_AT) < 0 ||
7940
qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
7941
portsel, NULL) < 0 ||
7942
qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
7943
qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
7944
qib_r_update(dd, BISTEN_AT) < 0 ||
7945
qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
7946
qib_r_update(dd, BISTEN_ETM) < 0)
7947
qib_dev_err(dd, "Failed IB link recovery setup\n");
7948
}
7949
7950
static void check_7322_rxe_status(struct qib_pportdata *ppd)
7951
{
7952
struct qib_devdata *dd = ppd->dd;
7953
u64 fmask;
7954
7955
if (dd->cspec->recovery_ports_initted != 1)
7956
return; /* rest doesn't apply to dualport */
7957
qib_write_kreg(dd, kr_control, dd->control |
7958
SYM_MASK(Control, FreezeMode));
7959
(void)qib_read_kreg64(dd, kr_scratch);
7960
udelay(3); /* ibcreset asserted 400ns, be sure that's over */
7961
fmask = qib_read_kreg64(dd, kr_act_fmask);
7962
if (!fmask) {
7963
/*
7964
* require a powercycle before we'll work again, and make
7965
* sure we get no more interrupts, and don't turn off
7966
* freeze.
7967
*/
7968
ppd->dd->cspec->stay_in_freeze = 1;
7969
qib_7322_set_intr_state(ppd->dd, 0);
7970
qib_write_kreg(dd, kr_fmask, 0ULL);
7971
qib_dev_err(dd, "HCA unusable until powercycled\n");
7972
return; /* eventually reset */
7973
}
7974
7975
qib_write_kreg(ppd->dd, kr_hwerrclear,
7976
SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
7977
7978
/* don't do the full clear_freeze(), not needed for this */
7979
qib_write_kreg(dd, kr_control, dd->control);
7980
qib_read_kreg32(dd, kr_scratch);
7981
/* take IBC out of reset */
7982
if (ppd->link_speed_supported) {
7983
ppd->cpspec->ibcctrl_a &=
7984
~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
7985
qib_write_kreg_port(ppd, krp_ibcctrl_a,
7986
ppd->cpspec->ibcctrl_a);
7987
qib_read_kreg32(dd, kr_scratch);
7988
if (ppd->lflags & QIBL_IB_LINK_DISABLED)
7989
qib_set_ib_7322_lstate(ppd, 0,
7990
QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
7991
}
7992
}
7993
7994