Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/qib/qib_uc.c
15112 views
1
/*
2
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3
* All rights reserved.
4
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5
*
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING in the main directory of this source tree, or the
10
* OpenIB.org BSD license below:
11
*
12
* Redistribution and use in source and binary forms, with or
13
* without modification, are permitted provided that the following
14
* conditions are met:
15
*
16
* - Redistributions of source code must retain the above
17
* copyright notice, this list of conditions and the following
18
* disclaimer.
19
*
20
* - Redistributions in binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer in the documentation and/or other materials
23
* provided with the distribution.
24
*
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
* SOFTWARE.
33
*/
34
35
#include "qib.h"
36
37
/* cut down ridiculously long IB macro names */
38
#define OP(x) IB_OPCODE_UC_##x
39
40
/**
41
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
42
* @qp: a pointer to the QP
43
*
44
* Return 1 if constructed; otherwise, return 0.
45
*/
46
int qib_make_uc_req(struct qib_qp *qp)
47
{
48
struct qib_other_headers *ohdr;
49
struct qib_swqe *wqe;
50
unsigned long flags;
51
u32 hwords;
52
u32 bth0;
53
u32 len;
54
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
55
int ret = 0;
56
57
spin_lock_irqsave(&qp->s_lock, flags);
58
59
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60
if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
61
goto bail;
62
/* We are in the error state, flush the work request. */
63
if (qp->s_last == qp->s_head)
64
goto bail;
65
/* If DMAs are in progress, we can't flush immediately. */
66
if (atomic_read(&qp->s_dma_busy)) {
67
qp->s_flags |= QIB_S_WAIT_DMA;
68
goto bail;
69
}
70
wqe = get_swqe_ptr(qp, qp->s_last);
71
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72
goto done;
73
}
74
75
ohdr = &qp->s_hdr.u.oth;
76
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77
ohdr = &qp->s_hdr.u.l.oth;
78
79
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
80
hwords = 5;
81
bth0 = 0;
82
83
/* Get the next send request. */
84
wqe = get_swqe_ptr(qp, qp->s_cur);
85
qp->s_wqe = NULL;
86
switch (qp->s_state) {
87
default:
88
if (!(ib_qib_state_ops[qp->state] &
89
QIB_PROCESS_NEXT_SEND_OK))
90
goto bail;
91
/* Check if send work queue is empty. */
92
if (qp->s_cur == qp->s_head)
93
goto bail;
94
/*
95
* Start a new request.
96
*/
97
wqe->psn = qp->s_next_psn;
98
qp->s_psn = qp->s_next_psn;
99
qp->s_sge.sge = wqe->sg_list[0];
100
qp->s_sge.sg_list = wqe->sg_list + 1;
101
qp->s_sge.num_sge = wqe->wr.num_sge;
102
qp->s_sge.total_len = wqe->length;
103
len = wqe->length;
104
qp->s_len = len;
105
switch (wqe->wr.opcode) {
106
case IB_WR_SEND:
107
case IB_WR_SEND_WITH_IMM:
108
if (len > pmtu) {
109
qp->s_state = OP(SEND_FIRST);
110
len = pmtu;
111
break;
112
}
113
if (wqe->wr.opcode == IB_WR_SEND)
114
qp->s_state = OP(SEND_ONLY);
115
else {
116
qp->s_state =
117
OP(SEND_ONLY_WITH_IMMEDIATE);
118
/* Immediate data comes after the BTH */
119
ohdr->u.imm_data = wqe->wr.ex.imm_data;
120
hwords += 1;
121
}
122
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
123
bth0 |= IB_BTH_SOLICITED;
124
qp->s_wqe = wqe;
125
if (++qp->s_cur >= qp->s_size)
126
qp->s_cur = 0;
127
break;
128
129
case IB_WR_RDMA_WRITE:
130
case IB_WR_RDMA_WRITE_WITH_IMM:
131
ohdr->u.rc.reth.vaddr =
132
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
133
ohdr->u.rc.reth.rkey =
134
cpu_to_be32(wqe->wr.wr.rdma.rkey);
135
ohdr->u.rc.reth.length = cpu_to_be32(len);
136
hwords += sizeof(struct ib_reth) / 4;
137
if (len > pmtu) {
138
qp->s_state = OP(RDMA_WRITE_FIRST);
139
len = pmtu;
140
break;
141
}
142
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
143
qp->s_state = OP(RDMA_WRITE_ONLY);
144
else {
145
qp->s_state =
146
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
147
/* Immediate data comes after the RETH */
148
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149
hwords += 1;
150
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
151
bth0 |= IB_BTH_SOLICITED;
152
}
153
qp->s_wqe = wqe;
154
if (++qp->s_cur >= qp->s_size)
155
qp->s_cur = 0;
156
break;
157
158
default:
159
goto bail;
160
}
161
break;
162
163
case OP(SEND_FIRST):
164
qp->s_state = OP(SEND_MIDDLE);
165
/* FALLTHROUGH */
166
case OP(SEND_MIDDLE):
167
len = qp->s_len;
168
if (len > pmtu) {
169
len = pmtu;
170
break;
171
}
172
if (wqe->wr.opcode == IB_WR_SEND)
173
qp->s_state = OP(SEND_LAST);
174
else {
175
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
176
/* Immediate data comes after the BTH */
177
ohdr->u.imm_data = wqe->wr.ex.imm_data;
178
hwords += 1;
179
}
180
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181
bth0 |= IB_BTH_SOLICITED;
182
qp->s_wqe = wqe;
183
if (++qp->s_cur >= qp->s_size)
184
qp->s_cur = 0;
185
break;
186
187
case OP(RDMA_WRITE_FIRST):
188
qp->s_state = OP(RDMA_WRITE_MIDDLE);
189
/* FALLTHROUGH */
190
case OP(RDMA_WRITE_MIDDLE):
191
len = qp->s_len;
192
if (len > pmtu) {
193
len = pmtu;
194
break;
195
}
196
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
197
qp->s_state = OP(RDMA_WRITE_LAST);
198
else {
199
qp->s_state =
200
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
201
/* Immediate data comes after the BTH */
202
ohdr->u.imm_data = wqe->wr.ex.imm_data;
203
hwords += 1;
204
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
205
bth0 |= IB_BTH_SOLICITED;
206
}
207
qp->s_wqe = wqe;
208
if (++qp->s_cur >= qp->s_size)
209
qp->s_cur = 0;
210
break;
211
}
212
qp->s_len -= len;
213
qp->s_hdrwords = hwords;
214
qp->s_cur_sge = &qp->s_sge;
215
qp->s_cur_size = len;
216
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217
qp->s_next_psn++ & QIB_PSN_MASK);
218
done:
219
ret = 1;
220
goto unlock;
221
222
bail:
223
qp->s_flags &= ~QIB_S_BUSY;
224
unlock:
225
spin_unlock_irqrestore(&qp->s_lock, flags);
226
return ret;
227
}
228
229
/**
230
* qib_uc_rcv - handle an incoming UC packet
231
* @ibp: the port the packet came in on
232
* @hdr: the header of the packet
233
* @has_grh: true if the packet has a GRH
234
* @data: the packet data
235
* @tlen: the length of the packet
236
* @qp: the QP for this packet.
237
*
238
* This is called from qib_qp_rcv() to process an incoming UC packet
239
* for the given QP.
240
* Called at interrupt level.
241
*/
242
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
243
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
244
{
245
struct qib_other_headers *ohdr;
246
unsigned long flags;
247
u32 opcode;
248
u32 hdrsize;
249
u32 psn;
250
u32 pad;
251
struct ib_wc wc;
252
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
253
struct ib_reth *reth;
254
int ret;
255
256
/* Check for GRH */
257
if (!has_grh) {
258
ohdr = &hdr->u.oth;
259
hdrsize = 8 + 12; /* LRH + BTH */
260
} else {
261
ohdr = &hdr->u.l.oth;
262
hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
263
}
264
265
opcode = be32_to_cpu(ohdr->bth[0]);
266
spin_lock_irqsave(&qp->s_lock, flags);
267
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
268
goto sunlock;
269
spin_unlock_irqrestore(&qp->s_lock, flags);
270
271
psn = be32_to_cpu(ohdr->bth[2]);
272
opcode >>= 24;
273
memset(&wc, 0, sizeof wc);
274
275
/* Compare the PSN verses the expected PSN. */
276
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
277
/*
278
* Handle a sequence error.
279
* Silently drop any current message.
280
*/
281
qp->r_psn = psn;
282
inv:
283
if (qp->r_state == OP(SEND_FIRST) ||
284
qp->r_state == OP(SEND_MIDDLE)) {
285
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
286
qp->r_sge.num_sge = 0;
287
} else
288
while (qp->r_sge.num_sge) {
289
atomic_dec(&qp->r_sge.sge.mr->refcount);
290
if (--qp->r_sge.num_sge)
291
qp->r_sge.sge = *qp->r_sge.sg_list++;
292
}
293
qp->r_state = OP(SEND_LAST);
294
switch (opcode) {
295
case OP(SEND_FIRST):
296
case OP(SEND_ONLY):
297
case OP(SEND_ONLY_WITH_IMMEDIATE):
298
goto send_first;
299
300
case OP(RDMA_WRITE_FIRST):
301
case OP(RDMA_WRITE_ONLY):
302
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
303
goto rdma_first;
304
305
default:
306
goto drop;
307
}
308
}
309
310
/* Check for opcode sequence errors. */
311
switch (qp->r_state) {
312
case OP(SEND_FIRST):
313
case OP(SEND_MIDDLE):
314
if (opcode == OP(SEND_MIDDLE) ||
315
opcode == OP(SEND_LAST) ||
316
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
317
break;
318
goto inv;
319
320
case OP(RDMA_WRITE_FIRST):
321
case OP(RDMA_WRITE_MIDDLE):
322
if (opcode == OP(RDMA_WRITE_MIDDLE) ||
323
opcode == OP(RDMA_WRITE_LAST) ||
324
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
325
break;
326
goto inv;
327
328
default:
329
if (opcode == OP(SEND_FIRST) ||
330
opcode == OP(SEND_ONLY) ||
331
opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
332
opcode == OP(RDMA_WRITE_FIRST) ||
333
opcode == OP(RDMA_WRITE_ONLY) ||
334
opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
335
break;
336
goto inv;
337
}
338
339
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
340
qp->r_flags |= QIB_R_COMM_EST;
341
if (qp->ibqp.event_handler) {
342
struct ib_event ev;
343
344
ev.device = qp->ibqp.device;
345
ev.element.qp = &qp->ibqp;
346
ev.event = IB_EVENT_COMM_EST;
347
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
348
}
349
}
350
351
/* OK, process the packet. */
352
switch (opcode) {
353
case OP(SEND_FIRST):
354
case OP(SEND_ONLY):
355
case OP(SEND_ONLY_WITH_IMMEDIATE):
356
send_first:
357
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
358
qp->r_sge = qp->s_rdma_read_sge;
359
else {
360
ret = qib_get_rwqe(qp, 0);
361
if (ret < 0)
362
goto op_err;
363
if (!ret)
364
goto drop;
365
/*
366
* qp->s_rdma_read_sge will be the owner
367
* of the mr references.
368
*/
369
qp->s_rdma_read_sge = qp->r_sge;
370
}
371
qp->r_rcv_len = 0;
372
if (opcode == OP(SEND_ONLY))
373
goto send_last;
374
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
375
goto send_last_imm;
376
/* FALLTHROUGH */
377
case OP(SEND_MIDDLE):
378
/* Check for invalid length PMTU or posted rwqe len. */
379
if (unlikely(tlen != (hdrsize + pmtu + 4)))
380
goto rewind;
381
qp->r_rcv_len += pmtu;
382
if (unlikely(qp->r_rcv_len > qp->r_len))
383
goto rewind;
384
qib_copy_sge(&qp->r_sge, data, pmtu, 0);
385
break;
386
387
case OP(SEND_LAST_WITH_IMMEDIATE):
388
send_last_imm:
389
wc.ex.imm_data = ohdr->u.imm_data;
390
hdrsize += 4;
391
wc.wc_flags = IB_WC_WITH_IMM;
392
/* FALLTHROUGH */
393
case OP(SEND_LAST):
394
send_last:
395
/* Get the number of bytes the message was padded by. */
396
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
397
/* Check for invalid length. */
398
/* XXX LAST len should be >= 1 */
399
if (unlikely(tlen < (hdrsize + pad + 4)))
400
goto rewind;
401
/* Don't count the CRC. */
402
tlen -= (hdrsize + pad + 4);
403
wc.byte_len = tlen + qp->r_rcv_len;
404
if (unlikely(wc.byte_len > qp->r_len))
405
goto rewind;
406
wc.opcode = IB_WC_RECV;
407
last_imm:
408
qib_copy_sge(&qp->r_sge, data, tlen, 0);
409
while (qp->s_rdma_read_sge.num_sge) {
410
atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
411
if (--qp->s_rdma_read_sge.num_sge)
412
qp->s_rdma_read_sge.sge =
413
*qp->s_rdma_read_sge.sg_list++;
414
}
415
wc.wr_id = qp->r_wr_id;
416
wc.status = IB_WC_SUCCESS;
417
wc.qp = &qp->ibqp;
418
wc.src_qp = qp->remote_qpn;
419
wc.slid = qp->remote_ah_attr.dlid;
420
wc.sl = qp->remote_ah_attr.sl;
421
/* Signal completion event if the solicited bit is set. */
422
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
423
(ohdr->bth[0] &
424
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
425
break;
426
427
case OP(RDMA_WRITE_FIRST):
428
case OP(RDMA_WRITE_ONLY):
429
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
430
rdma_first:
431
if (unlikely(!(qp->qp_access_flags &
432
IB_ACCESS_REMOTE_WRITE))) {
433
goto drop;
434
}
435
reth = &ohdr->u.rc.reth;
436
hdrsize += sizeof(*reth);
437
qp->r_len = be32_to_cpu(reth->length);
438
qp->r_rcv_len = 0;
439
qp->r_sge.sg_list = NULL;
440
if (qp->r_len != 0) {
441
u32 rkey = be32_to_cpu(reth->rkey);
442
u64 vaddr = be64_to_cpu(reth->vaddr);
443
int ok;
444
445
/* Check rkey */
446
ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
447
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
448
if (unlikely(!ok))
449
goto drop;
450
qp->r_sge.num_sge = 1;
451
} else {
452
qp->r_sge.num_sge = 0;
453
qp->r_sge.sge.mr = NULL;
454
qp->r_sge.sge.vaddr = NULL;
455
qp->r_sge.sge.length = 0;
456
qp->r_sge.sge.sge_length = 0;
457
}
458
if (opcode == OP(RDMA_WRITE_ONLY))
459
goto rdma_last;
460
else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
461
wc.ex.imm_data = ohdr->u.rc.imm_data;
462
goto rdma_last_imm;
463
}
464
/* FALLTHROUGH */
465
case OP(RDMA_WRITE_MIDDLE):
466
/* Check for invalid length PMTU or posted rwqe len. */
467
if (unlikely(tlen != (hdrsize + pmtu + 4)))
468
goto drop;
469
qp->r_rcv_len += pmtu;
470
if (unlikely(qp->r_rcv_len > qp->r_len))
471
goto drop;
472
qib_copy_sge(&qp->r_sge, data, pmtu, 1);
473
break;
474
475
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
476
wc.ex.imm_data = ohdr->u.imm_data;
477
rdma_last_imm:
478
hdrsize += 4;
479
wc.wc_flags = IB_WC_WITH_IMM;
480
481
/* Get the number of bytes the message was padded by. */
482
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
483
/* Check for invalid length. */
484
/* XXX LAST len should be >= 1 */
485
if (unlikely(tlen < (hdrsize + pad + 4)))
486
goto drop;
487
/* Don't count the CRC. */
488
tlen -= (hdrsize + pad + 4);
489
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
490
goto drop;
491
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
492
while (qp->s_rdma_read_sge.num_sge) {
493
atomic_dec(&qp->s_rdma_read_sge.sge.mr->
494
refcount);
495
if (--qp->s_rdma_read_sge.num_sge)
496
qp->s_rdma_read_sge.sge =
497
*qp->s_rdma_read_sge.sg_list++;
498
}
499
else {
500
ret = qib_get_rwqe(qp, 1);
501
if (ret < 0)
502
goto op_err;
503
if (!ret)
504
goto drop;
505
}
506
wc.byte_len = qp->r_len;
507
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
508
goto last_imm;
509
510
case OP(RDMA_WRITE_LAST):
511
rdma_last:
512
/* Get the number of bytes the message was padded by. */
513
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
514
/* Check for invalid length. */
515
/* XXX LAST len should be >= 1 */
516
if (unlikely(tlen < (hdrsize + pad + 4)))
517
goto drop;
518
/* Don't count the CRC. */
519
tlen -= (hdrsize + pad + 4);
520
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
521
goto drop;
522
qib_copy_sge(&qp->r_sge, data, tlen, 1);
523
while (qp->r_sge.num_sge) {
524
atomic_dec(&qp->r_sge.sge.mr->refcount);
525
if (--qp->r_sge.num_sge)
526
qp->r_sge.sge = *qp->r_sge.sg_list++;
527
}
528
break;
529
530
default:
531
/* Drop packet for unknown opcodes. */
532
goto drop;
533
}
534
qp->r_psn++;
535
qp->r_state = opcode;
536
return;
537
538
rewind:
539
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
540
qp->r_sge.num_sge = 0;
541
drop:
542
ibp->n_pkt_drops++;
543
return;
544
545
op_err:
546
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
547
return;
548
549
sunlock:
550
spin_unlock_irqrestore(&qp->s_lock, flags);
551
}
552
553