Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/rdma/rdmavt_qp.h
26285 views
1
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2
/*
3
* Copyright(c) 2016 - 2020 Intel Corporation.
4
*/
5
6
#ifndef DEF_RDMAVT_INCQP_H
7
#define DEF_RDMAVT_INCQP_H
8
9
#include <rdma/rdma_vt.h>
10
#include <rdma/ib_pack.h>
11
#include <rdma/ib_verbs.h>
12
#include <rdma/rdmavt_cq.h>
13
#include <rdma/rvt-abi.h>
14
#include <linux/vmalloc.h>
15
/*
16
* Atomic bit definitions for r_aflags.
17
*/
18
#define RVT_R_WRID_VALID 0
19
#define RVT_R_REWIND_SGE 1
20
21
/*
22
* Bit definitions for r_flags.
23
*/
24
#define RVT_R_REUSE_SGE 0x01
25
#define RVT_R_RDMAR_SEQ 0x02
26
#define RVT_R_RSP_NAK 0x04
27
#define RVT_R_RSP_SEND 0x08
28
#define RVT_R_COMM_EST 0x10
29
30
/*
31
* If a packet's QP[23:16] bits match this value, then it is
32
* a PSM packet and the hardware will expect a KDETH header
33
* following the BTH.
34
*/
35
#define RVT_KDETH_QP_PREFIX 0x80
36
#define RVT_KDETH_QP_SUFFIX 0xffff
37
#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
38
#define RVT_KDETH_QP_PREFIX_SHIFT 16
39
#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
40
RVT_KDETH_QP_PREFIX_SHIFT)
41
#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
42
43
/*
44
* If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
45
* prefix value, then it is an AIP packet with a DETH containing the entropy
46
* value in byte 4 following the BTH.
47
*/
48
#define RVT_AIP_QP_PREFIX 0x81
49
#define RVT_AIP_QP_SUFFIX 0xffff
50
#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
51
#define RVT_AIP_QP_PREFIX_SHIFT 16
52
#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
53
RVT_AIP_QP_PREFIX_SHIFT)
54
#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
55
#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
56
57
/*
58
* Bit definitions for s_flags.
59
*
60
* RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
61
* RVT_S_BUSY - send tasklet is processing the QP
62
* RVT_S_TIMER - the RC retry timer is active
63
* RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
64
* RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
65
* before processing the next SWQE
66
* RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
67
* before processing the next SWQE
68
* RVT_S_WAIT_RNR - waiting for RNR timeout
69
* RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
70
* RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
71
* next send completion entry not via send DMA
72
* RVT_S_WAIT_PIO - waiting for a send buffer to be available
73
* RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
74
* RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
75
* RVT_S_WAIT_KMEM - waiting for kernel memory to be available
76
* RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
77
* RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
78
* RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
79
* RVT_S_ECN - a BECN was queued to the send engine
80
* RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
81
*/
82
#define RVT_S_SIGNAL_REQ_WR 0x0001
83
#define RVT_S_BUSY 0x0002
84
#define RVT_S_TIMER 0x0004
85
#define RVT_S_RESP_PENDING 0x0008
86
#define RVT_S_ACK_PENDING 0x0010
87
#define RVT_S_WAIT_FENCE 0x0020
88
#define RVT_S_WAIT_RDMAR 0x0040
89
#define RVT_S_WAIT_RNR 0x0080
90
#define RVT_S_WAIT_SSN_CREDIT 0x0100
91
#define RVT_S_WAIT_DMA 0x0200
92
#define RVT_S_WAIT_PIO 0x0400
93
#define RVT_S_WAIT_TX 0x0800
94
#define RVT_S_WAIT_DMA_DESC 0x1000
95
#define RVT_S_WAIT_KMEM 0x2000
96
#define RVT_S_WAIT_PSN 0x4000
97
#define RVT_S_WAIT_ACK 0x8000
98
#define RVT_S_SEND_ONE 0x10000
99
#define RVT_S_UNLIMITED_CREDIT 0x20000
100
#define RVT_S_ECN 0x40000
101
#define RVT_S_MAX_BIT_MASK 0x800000
102
103
/*
104
* Drivers should use s_flags starting with bit 31 down to the bit next to
105
* RVT_S_MAX_BIT_MASK
106
*/
107
108
/*
109
* Wait flags that would prevent any packet type from being sent.
110
*/
111
#define RVT_S_ANY_WAIT_IO \
112
(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
113
RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
114
115
/*
116
* Wait flags that would prevent send work requests from making progress.
117
*/
118
#define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
119
RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
120
RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
121
122
#define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
123
124
/* Number of bits to pay attention to in the opcode for checking qp type */
125
#define RVT_OPCODE_QP_MASK 0xE0
126
127
/* Flags for checking QP state (see ib_rvt_state_ops[]) */
128
#define RVT_POST_SEND_OK 0x01
129
#define RVT_POST_RECV_OK 0x02
130
#define RVT_PROCESS_RECV_OK 0x04
131
#define RVT_PROCESS_SEND_OK 0x08
132
#define RVT_PROCESS_NEXT_SEND_OK 0x10
133
#define RVT_FLUSH_SEND 0x20
134
#define RVT_FLUSH_RECV 0x40
135
#define RVT_PROCESS_OR_FLUSH_SEND \
136
(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
137
#define RVT_SEND_OR_FLUSH_OR_RECV_OK \
138
(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
139
140
/*
141
* Internal send flags
142
*/
143
#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
144
#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
145
146
/**
147
* rvt_ud_wr - IB UD work plus AH cache
148
* @wr: valid IB work request
149
* @attr: pointer to an allocated AH attribute
150
*
151
* Special case the UD WR so we can keep track of the AH attributes.
152
*
153
* NOTE: This data structure is stricly ordered wr then attr. I.e the attr
154
* MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
155
* The copy assumes that wr is first.
156
*/
157
struct rvt_ud_wr {
158
struct ib_ud_wr wr;
159
struct rdma_ah_attr *attr;
160
};
161
162
/*
163
* Send work request queue entry.
164
* The size of the sg_list is determined when the QP is created and stored
165
* in qp->s_max_sge.
166
*/
167
struct rvt_swqe {
168
union {
169
struct ib_send_wr wr; /* don't use wr.sg_list */
170
struct rvt_ud_wr ud_wr;
171
struct ib_reg_wr reg_wr;
172
struct ib_rdma_wr rdma_wr;
173
struct ib_atomic_wr atomic_wr;
174
};
175
u32 psn; /* first packet sequence number */
176
u32 lpsn; /* last packet sequence number */
177
u32 ssn; /* send sequence number */
178
u32 length; /* total length of data in sg_list */
179
void *priv; /* driver dependent field */
180
struct rvt_sge sg_list[];
181
};
182
183
/**
184
* struct rvt_krwq - kernel struct receive work request
185
* @p_lock: lock to protect producer of the kernel buffer
186
* @head: index of next entry to fill
187
* @c_lock:lock to protect consumer of the kernel buffer
188
* @tail: index of next entry to pull
189
* @count: count is aproximate of total receive enteries posted
190
* @rvt_rwqe: struct of receive work request queue entry
191
*
192
* This structure is used to contain the head pointer,
193
* tail pointer and receive work queue entries for kernel
194
* mode user.
195
*/
196
struct rvt_krwq {
197
spinlock_t p_lock; /* protect producer */
198
u32 head; /* new work requests posted to the head */
199
200
/* protect consumer */
201
spinlock_t c_lock ____cacheline_aligned_in_smp;
202
u32 tail; /* receives pull requests from here. */
203
u32 count; /* approx count of receive entries posted */
204
struct rvt_rwqe *curr_wq;
205
struct rvt_rwqe wq[];
206
};
207
208
/*
209
* rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
210
* @swqe: valid Send WQE
211
*
212
*/
213
static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
214
{
215
return ibah_to_rvtah(swqe->ud_wr.wr.ah);
216
}
217
218
/**
219
* rvt_get_swqe_ah_attr - Return the cached ah attribute information
220
* @swqe: valid Send WQE
221
*
222
*/
223
static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
224
{
225
return swqe->ud_wr.attr;
226
}
227
228
/**
229
* rvt_get_swqe_remote_qpn - Access the remote QPN value
230
* @swqe: valid Send WQE
231
*
232
*/
233
static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
234
{
235
return swqe->ud_wr.wr.remote_qpn;
236
}
237
238
/**
239
* rvt_get_swqe_remote_qkey - Acces the remote qkey value
240
* @swqe: valid Send WQE
241
*
242
*/
243
static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
244
{
245
return swqe->ud_wr.wr.remote_qkey;
246
}
247
248
/**
249
* rvt_get_swqe_pkey_index - Access the pkey index
250
* @swqe: valid Send WQE
251
*
252
*/
253
static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
254
{
255
return swqe->ud_wr.wr.pkey_index;
256
}
257
258
struct rvt_rq {
259
struct rvt_rwq *wq;
260
struct rvt_krwq *kwq;
261
u32 size; /* size of RWQE array */
262
u8 max_sge;
263
/* protect changes in this struct */
264
spinlock_t lock ____cacheline_aligned_in_smp;
265
};
266
267
/**
268
* rvt_get_rq_count - count numbers of request work queue entries
269
* in circular buffer
270
* @rq: data structure for request queue entry
271
* @head: head indices of the circular buffer
272
* @tail: tail indices of the circular buffer
273
*
274
* Return - total number of entries in the Receive Queue
275
*/
276
277
static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
278
{
279
u32 count = head - tail;
280
281
if ((s32)count < 0)
282
count += rq->size;
283
return count;
284
}
285
286
/*
287
* This structure holds the information that the send tasklet needs
288
* to send a RDMA read response or atomic operation.
289
*/
290
struct rvt_ack_entry {
291
struct rvt_sge rdma_sge;
292
u64 atomic_data;
293
u32 psn;
294
u32 lpsn;
295
u8 opcode;
296
u8 sent;
297
void *priv;
298
};
299
300
#define RC_QP_SCALING_INTERVAL 5
301
302
#define RVT_OPERATION_PRIV 0x00000001
303
#define RVT_OPERATION_ATOMIC 0x00000002
304
#define RVT_OPERATION_ATOMIC_SGE 0x00000004
305
#define RVT_OPERATION_LOCAL 0x00000008
306
#define RVT_OPERATION_USE_RESERVE 0x00000010
307
#define RVT_OPERATION_IGN_RNR_CNT 0x00000020
308
309
#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
310
311
/**
312
* rvt_operation_params - op table entry
313
* @length - the length to copy into the swqe entry
314
* @qpt_support - a bit mask indicating QP type support
315
* @flags - RVT_OPERATION flags (see above)
316
*
317
* This supports table driven post send so that
318
* the driver can have differing an potentially
319
* different sets of operations.
320
*
321
**/
322
323
struct rvt_operation_params {
324
size_t length;
325
u32 qpt_support;
326
u32 flags;
327
};
328
329
/*
330
* Common variables are protected by both r_rq.lock and s_lock in that order
331
* which only happens in modify_qp() or changing the QP 'state'.
332
*/
333
struct rvt_qp {
334
struct ib_qp ibqp;
335
void *priv; /* Driver private data */
336
/* read mostly fields above and below */
337
struct rdma_ah_attr remote_ah_attr;
338
struct rdma_ah_attr alt_ah_attr;
339
struct rvt_qp __rcu *next; /* link list for QPN hash table */
340
struct rvt_swqe *s_wq; /* send work queue */
341
struct rvt_mmap_info *ip;
342
343
unsigned long timeout_jiffies; /* computed from timeout */
344
345
int srate_mbps; /* s_srate (below) converted to Mbit/s */
346
pid_t pid; /* pid for user mode QPs */
347
u32 remote_qpn;
348
u32 qkey; /* QKEY for this QP (for UD or RD) */
349
u32 s_size; /* send work queue size */
350
351
u16 pmtu; /* decoded from path_mtu */
352
u8 log_pmtu; /* shift for pmtu */
353
u8 state; /* QP state */
354
u8 allowed_ops; /* high order bits of allowed opcodes */
355
u8 qp_access_flags;
356
u8 alt_timeout; /* Alternate path timeout for this QP */
357
u8 timeout; /* Timeout for this QP */
358
u8 s_srate;
359
u8 s_mig_state;
360
u8 port_num;
361
u8 s_pkey_index; /* PKEY index to use */
362
u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
363
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
364
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
365
u8 s_retry_cnt; /* number of times to retry */
366
u8 s_rnr_retry_cnt;
367
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
368
u8 s_max_sge; /* size of s_wq->sg_list */
369
u8 s_draining;
370
371
/* start of read/write fields */
372
atomic_t refcount ____cacheline_aligned_in_smp;
373
wait_queue_head_t wait;
374
375
struct rvt_ack_entry *s_ack_queue;
376
struct rvt_sge_state s_rdma_read_sge;
377
378
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
379
u32 r_psn; /* expected rcv packet sequence number */
380
unsigned long r_aflags;
381
u64 r_wr_id; /* ID for current receive WQE */
382
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
383
u32 r_len; /* total length of r_sge */
384
u32 r_rcv_len; /* receive data len processed */
385
u32 r_msn; /* message sequence number */
386
387
u8 r_state; /* opcode of last packet received */
388
u8 r_flags;
389
u8 r_head_ack_queue; /* index into s_ack_queue[] */
390
u8 r_adefered; /* defered ack count */
391
392
struct list_head rspwait; /* link for waiting to respond */
393
394
struct rvt_sge_state r_sge; /* current receive data */
395
struct rvt_rq r_rq; /* receive work queue */
396
397
/* post send line */
398
spinlock_t s_hlock ____cacheline_aligned_in_smp;
399
u32 s_head; /* new entries added here */
400
u32 s_next_psn; /* PSN for next request */
401
u32 s_avail; /* number of entries avail */
402
u32 s_ssn; /* SSN of tail entry */
403
atomic_t s_reserved_used; /* reserved entries in use */
404
405
spinlock_t s_lock ____cacheline_aligned_in_smp;
406
u32 s_flags;
407
struct rvt_sge_state *s_cur_sge;
408
struct rvt_swqe *s_wqe;
409
struct rvt_sge_state s_sge; /* current send request data */
410
struct rvt_mregion *s_rdma_mr;
411
u32 s_len; /* total length of s_sge */
412
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
413
u32 s_last_psn; /* last response PSN processed */
414
u32 s_sending_psn; /* lowest PSN that is being sent */
415
u32 s_sending_hpsn; /* highest PSN that is being sent */
416
u32 s_psn; /* current packet sequence number */
417
u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
418
u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
419
u32 s_tail; /* next entry to process */
420
u32 s_cur; /* current work queue entry */
421
u32 s_acked; /* last un-ACK'ed entry */
422
u32 s_last; /* last completed entry */
423
u32 s_lsn; /* limit sequence number (credit) */
424
u32 s_ahgpsn; /* set to the psn in the copy of the header */
425
u16 s_cur_size; /* size of send packet in bytes */
426
u16 s_rdma_ack_cnt;
427
u8 s_hdrwords; /* size of s_hdr in 32 bit words */
428
s8 s_ahgidx;
429
u8 s_state; /* opcode of last packet sent */
430
u8 s_ack_state; /* opcode of packet to ACK */
431
u8 s_nak_state; /* non-zero if NAK is pending */
432
u8 r_nak_state; /* non-zero if NAK is pending */
433
u8 s_retry; /* requester retry counter */
434
u8 s_rnr_retry; /* requester RNR retry counter */
435
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
436
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
437
u8 s_acked_ack_queue; /* index into s_ack_queue[] */
438
439
struct rvt_sge_state s_ack_rdma_sge;
440
struct timer_list s_timer;
441
struct hrtimer s_rnr_timer;
442
443
atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
444
445
/*
446
* This sge list MUST be last. Do not add anything below here.
447
*/
448
struct rvt_sge *r_sg_list /* verified SGEs */
449
____cacheline_aligned_in_smp;
450
};
451
452
struct rvt_srq {
453
struct ib_srq ibsrq;
454
struct rvt_rq rq;
455
struct rvt_mmap_info *ip;
456
/* send signal when number of RWQEs < limit */
457
u32 limit;
458
};
459
460
static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
461
{
462
return container_of(ibsrq, struct rvt_srq, ibsrq);
463
}
464
465
static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
466
{
467
return container_of(ibqp, struct rvt_qp, ibqp);
468
}
469
470
#define RVT_QPN_MAX BIT(24)
471
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
472
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
473
#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
474
#define RVT_QPN_MASK IB_QPN_MASK
475
476
/*
477
* QPN-map pages start out as NULL, they get allocated upon
478
* first use and are never deallocated. This way,
479
* large bitmaps are not allocated unless large numbers of QPs are used.
480
*/
481
struct rvt_qpn_map {
482
void *page;
483
};
484
485
struct rvt_qpn_table {
486
spinlock_t lock; /* protect changes to the qp table */
487
unsigned flags; /* flags for QP0/1 allocated for each port */
488
u32 last; /* last QP number allocated */
489
u32 nmaps; /* size of the map table */
490
u16 limit;
491
u8 incr;
492
/* bit map of free QP numbers other than 0/1 */
493
struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
494
};
495
496
struct rvt_qp_ibdev {
497
u32 qp_table_size;
498
u32 qp_table_bits;
499
struct rvt_qp __rcu **qp_table;
500
spinlock_t qpt_lock; /* qptable lock */
501
struct rvt_qpn_table qpn_table;
502
};
503
504
/*
505
* There is one struct rvt_mcast for each multicast GID.
506
* All attached QPs are then stored as a list of
507
* struct rvt_mcast_qp.
508
*/
509
struct rvt_mcast_qp {
510
struct list_head list;
511
struct rvt_qp *qp;
512
};
513
514
struct rvt_mcast_addr {
515
union ib_gid mgid;
516
u16 lid;
517
};
518
519
struct rvt_mcast {
520
struct rb_node rb_node;
521
struct rvt_mcast_addr mcast_addr;
522
struct list_head qp_list;
523
wait_queue_head_t wait;
524
atomic_t refcount;
525
int n_attached;
526
};
527
528
/*
529
* Since struct rvt_swqe is not a fixed size, we can't simply index into
530
* struct rvt_qp.s_wq. This function does the array index computation.
531
*/
532
static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
533
unsigned n)
534
{
535
return (struct rvt_swqe *)((char *)qp->s_wq +
536
(sizeof(struct rvt_swqe) +
537
qp->s_max_sge *
538
sizeof(struct rvt_sge)) * n);
539
}
540
541
/*
542
* Since struct rvt_rwqe is not a fixed size, we can't simply index into
543
* struct rvt_rwq.wq. This function does the array index computation.
544
*/
545
static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
546
{
547
return (struct rvt_rwqe *)
548
((char *)rq->kwq->curr_wq +
549
(sizeof(struct rvt_rwqe) +
550
rq->max_sge * sizeof(struct ib_sge)) * n);
551
}
552
553
/**
554
* rvt_is_user_qp - return if this is user mode QP
555
* @qp - the target QP
556
*/
557
static inline bool rvt_is_user_qp(struct rvt_qp *qp)
558
{
559
return !!qp->pid;
560
}
561
562
/**
563
* rvt_get_qp - get a QP reference
564
* @qp - the QP to hold
565
*/
566
static inline void rvt_get_qp(struct rvt_qp *qp)
567
{
568
atomic_inc(&qp->refcount);
569
}
570
571
/**
572
* rvt_put_qp - release a QP reference
573
* @qp - the QP to release
574
*/
575
static inline void rvt_put_qp(struct rvt_qp *qp)
576
{
577
if (qp && atomic_dec_and_test(&qp->refcount))
578
wake_up(&qp->wait);
579
}
580
581
/**
582
* rvt_put_swqe - drop mr refs held by swqe
583
* @wqe - the send wqe
584
*
585
* This drops any mr references held by the swqe
586
*/
587
static inline void rvt_put_swqe(struct rvt_swqe *wqe)
588
{
589
int i;
590
591
for (i = 0; i < wqe->wr.num_sge; i++) {
592
struct rvt_sge *sge = &wqe->sg_list[i];
593
594
rvt_put_mr(sge->mr);
595
}
596
}
597
598
/**
599
* rvt_qp_wqe_reserve - reserve operation
600
* @qp - the rvt qp
601
* @wqe - the send wqe
602
*
603
* This routine used in post send to record
604
* a wqe relative reserved operation use.
605
*/
606
static inline void rvt_qp_wqe_reserve(
607
struct rvt_qp *qp,
608
struct rvt_swqe *wqe)
609
{
610
atomic_inc(&qp->s_reserved_used);
611
}
612
613
/**
614
* rvt_qp_wqe_unreserve - clean reserved operation
615
* @qp - the rvt qp
616
* @flags - send wqe flags
617
*
618
* This decrements the reserve use count.
619
*
620
* This call MUST precede the change to
621
* s_last to insure that post send sees a stable
622
* s_avail.
623
*
624
* An smp_mp__after_atomic() is used to insure
625
* the compiler does not juggle the order of the s_last
626
* ring index and the decrementing of s_reserved_used.
627
*/
628
static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
629
{
630
if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
631
atomic_dec(&qp->s_reserved_used);
632
/* insure no compiler re-order up to s_last change */
633
smp_mb__after_atomic();
634
}
635
}
636
637
extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
638
639
/*
640
* Compare the lower 24 bits of the msn values.
641
* Returns an integer <, ==, or > than zero.
642
*/
643
static inline int rvt_cmp_msn(u32 a, u32 b)
644
{
645
return (((int)a) - ((int)b)) << 8;
646
}
647
648
__be32 rvt_compute_aeth(struct rvt_qp *qp);
649
650
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
651
652
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
653
654
/**
655
* rvt_div_round_up_mtu - round up divide
656
* @qp - the qp pair
657
* @len - the length
658
*
659
* Perform a shift based mtu round up divide
660
*/
661
static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
662
{
663
return (len + qp->pmtu - 1) >> qp->log_pmtu;
664
}
665
666
/**
667
* @qp - the qp pair
668
* @len - the length
669
*
670
* Perform a shift based mtu divide
671
*/
672
static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
673
{
674
return len >> qp->log_pmtu;
675
}
676
677
/**
678
* rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
679
* @timeout - timeout input(0 - 31).
680
*
681
* Return a timeout value in jiffies.
682
*/
683
static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
684
{
685
if (timeout > 31)
686
timeout = 31;
687
688
return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
689
}
690
691
/**
692
* rvt_lookup_qpn - return the QP with the given QPN
693
* @ibp: the ibport
694
* @qpn: the QP number to look up
695
*
696
* The caller must hold the rcu_read_lock(), and keep the lock until
697
* the returned qp is no longer in use.
698
*/
699
static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
700
struct rvt_ibport *rvp,
701
u32 qpn) __must_hold(RCU)
702
{
703
struct rvt_qp *qp = NULL;
704
705
if (unlikely(qpn <= 1)) {
706
qp = rcu_dereference(rvp->qp[qpn]);
707
} else {
708
u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
709
710
for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
711
qp = rcu_dereference(qp->next))
712
if (qp->ibqp.qp_num == qpn)
713
break;
714
}
715
return qp;
716
}
717
718
/**
719
* rvt_mod_retry_timer - mod a retry timer
720
* @qp - the QP
721
* @shift - timeout shift to wait for multiple packets
722
* Modify a potentially already running retry timer
723
*/
724
static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
725
{
726
struct ib_qp *ibqp = &qp->ibqp;
727
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
728
729
lockdep_assert_held(&qp->s_lock);
730
qp->s_flags |= RVT_S_TIMER;
731
/* 4.096 usec. * (1 << qp->timeout) */
732
mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
733
(qp->timeout_jiffies << shift));
734
}
735
736
static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
737
{
738
return rvt_mod_retry_timer_ext(qp, 0);
739
}
740
741
/**
742
* rvt_put_qp_swqe - drop refs held by swqe
743
* @qp: the send qp
744
* @wqe: the send wqe
745
*
746
* This drops any references held by the swqe
747
*/
748
static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
749
{
750
rvt_put_swqe(wqe);
751
if (qp->allowed_ops == IB_OPCODE_UD)
752
rdma_destroy_ah_attr(wqe->ud_wr.attr);
753
}
754
755
/**
756
* rvt_qp_sqwe_incr - increment ring index
757
* @qp: the qp
758
* @val: the starting value
759
*
760
* Return: the new value wrapping as appropriate
761
*/
762
static inline u32
763
rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
764
{
765
if (++val >= qp->s_size)
766
val = 0;
767
return val;
768
}
769
770
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
771
772
/**
773
* rvt_recv_cq - add a new entry to completion queue
774
* by receive queue
775
* @qp: receive queue
776
* @wc: work completion entry to add
777
* @solicited: true if @entry is solicited
778
*
779
* This is wrapper function for rvt_enter_cq function call by
780
* receive queue. If rvt_cq_enter return false, it means cq is
781
* full and the qp is put into error state.
782
*/
783
static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
784
bool solicited)
785
{
786
struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
787
788
if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
789
rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
790
}
791
792
/**
793
* rvt_send_cq - add a new entry to completion queue
794
* by send queue
795
* @qp: send queue
796
* @wc: work completion entry to add
797
* @solicited: true if @entry is solicited
798
*
799
* This is wrapper function for rvt_enter_cq function call by
800
* send queue. If rvt_cq_enter return false, it means cq is
801
* full and the qp is put into error state.
802
*/
803
static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
804
bool solicited)
805
{
806
struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
807
808
if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
809
rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
810
}
811
812
/**
813
* rvt_qp_complete_swqe - insert send completion
814
* @qp - the qp
815
* @wqe - the send wqe
816
* @opcode - wc operation (driver dependent)
817
* @status - completion status
818
*
819
* Update the s_last information, and then insert a send
820
* completion into the completion
821
* queue if the qp indicates it should be done.
822
*
823
* See IBTA 10.7.3.1 for info on completion
824
* control.
825
*
826
* Return: new last
827
*/
828
static inline u32
829
rvt_qp_complete_swqe(struct rvt_qp *qp,
830
struct rvt_swqe *wqe,
831
enum ib_wc_opcode opcode,
832
enum ib_wc_status status)
833
{
834
bool need_completion;
835
u64 wr_id;
836
u32 byte_len, last;
837
int flags = wqe->wr.send_flags;
838
839
rvt_qp_wqe_unreserve(qp, flags);
840
rvt_put_qp_swqe(qp, wqe);
841
842
need_completion =
843
!(flags & RVT_SEND_RESERVE_USED) &&
844
(!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
845
(flags & IB_SEND_SIGNALED) ||
846
status != IB_WC_SUCCESS);
847
if (need_completion) {
848
wr_id = wqe->wr.wr_id;
849
byte_len = wqe->length;
850
/* above fields required before writing s_last */
851
}
852
last = rvt_qp_swqe_incr(qp, qp->s_last);
853
/* see rvt_qp_is_avail() */
854
smp_store_release(&qp->s_last, last);
855
if (need_completion) {
856
struct ib_wc w = {
857
.wr_id = wr_id,
858
.status = status,
859
.opcode = opcode,
860
.qp = &qp->ibqp,
861
.byte_len = byte_len,
862
};
863
rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
864
}
865
return last;
866
}
867
868
extern const int ib_rvt_state_ops[];
869
870
struct rvt_dev_info;
871
int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
872
void rvt_comm_est(struct rvt_qp *qp);
873
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
874
unsigned long rvt_rnr_tbl_to_usec(u32 index);
875
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
876
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
877
void rvt_del_timers_sync(struct rvt_qp *qp);
878
void rvt_stop_rc_timers(struct rvt_qp *qp);
879
void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
880
static inline void rvt_add_retry_timer(struct rvt_qp *qp)
881
{
882
rvt_add_retry_timer_ext(qp, 0);
883
}
884
885
void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
886
void *data, u32 length,
887
bool release, bool copy_last);
888
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
889
enum ib_wc_status status);
890
void rvt_ruc_loopback(struct rvt_qp *qp);
891
892
/**
893
* struct rvt_qp_iter - the iterator for QPs
894
* @qp - the current QP
895
*
896
* This structure defines the current iterator
897
* state for sequenced access to all QPs relative
898
* to an rvt_dev_info.
899
*/
900
struct rvt_qp_iter {
901
struct rvt_qp *qp;
902
/* private: backpointer */
903
struct rvt_dev_info *rdi;
904
/* private: callback routine */
905
void (*cb)(struct rvt_qp *qp, u64 v);
906
/* private: for arg to callback routine */
907
u64 v;
908
/* private: number of SMI,GSI QPs for device */
909
int specials;
910
/* private: current iterator index */
911
int n;
912
};
913
914
/**
915
* ib_cq_tail - Return tail index of cq buffer
916
* @send_cq - The cq for send
917
*
918
* This is called in qp_iter_print to get tail
919
* of cq buffer.
920
*/
921
static inline u32 ib_cq_tail(struct ib_cq *send_cq)
922
{
923
struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
924
925
return ibcq_to_rvtcq(send_cq)->ip ?
926
RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
927
ibcq_to_rvtcq(send_cq)->kqueue->tail;
928
}
929
930
/**
931
* ib_cq_head - Return head index of cq buffer
932
* @send_cq - The cq for send
933
*
934
* This is called in qp_iter_print to get head
935
* of cq buffer.
936
*/
937
static inline u32 ib_cq_head(struct ib_cq *send_cq)
938
{
939
struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
940
941
return ibcq_to_rvtcq(send_cq)->ip ?
942
RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
943
ibcq_to_rvtcq(send_cq)->kqueue->head;
944
}
945
946
/**
947
* rvt_free_rq - free memory allocated for rvt_rq struct
948
* @rvt_rq: request queue data structure
949
*
950
* This function should only be called if the rvt_mmap_info()
951
* has not succeeded.
952
*/
953
static inline void rvt_free_rq(struct rvt_rq *rq)
954
{
955
kvfree(rq->kwq);
956
rq->kwq = NULL;
957
vfree(rq->wq);
958
rq->wq = NULL;
959
}
960
961
/**
962
* rvt_to_iport - Get the ibport pointer
963
* @qp: the qp pointer
964
*
965
* This function returns the ibport pointer from the qp pointer.
966
*/
967
static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
968
{
969
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
970
971
return rdi->ports[qp->port_num - 1];
972
}
973
974
/**
975
* rvt_rc_credit_avail - Check if there are enough RC credits for the request
976
* @qp: the qp
977
* @wqe: the request
978
*
979
* This function returns false when there are not enough credits for the given
980
* request and true otherwise.
981
*/
982
static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
983
{
984
lockdep_assert_held(&qp->s_lock);
985
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
986
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
987
struct rvt_ibport *rvp = rvt_to_iport(qp);
988
989
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
990
rvp->n_rc_crwaits++;
991
return false;
992
}
993
return true;
994
}
995
996
struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
997
u64 v,
998
void (*cb)(struct rvt_qp *qp, u64 v));
999
int rvt_qp_iter_next(struct rvt_qp_iter *iter);
1000
void rvt_qp_iter(struct rvt_dev_info *rdi,
1001
u64 v,
1002
void (*cb)(struct rvt_qp *qp, u64 v));
1003
void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1004
#endif /* DEF_RDMAVT_INCQP_H */
1005
1006