Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/amso1100/c2_cm.c
15112 views
1
/*
2
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4
*
5
* This software is available to you under a choice of one of two
6
* licenses. You may choose to be licensed under the terms of the GNU
7
* General Public License (GPL) Version 2, available from the file
8
* COPYING in the main directory of this source tree, or the
9
* OpenIB.org BSD license below:
10
*
11
* Redistribution and use in source and binary forms, with or
12
* without modification, are permitted provided that the following
13
* conditions are met:
14
*
15
* - Redistributions of source code must retain the above
16
* copyright notice, this list of conditions and the following
17
* disclaimer.
18
*
19
* - Redistributions in binary form must reproduce the above
20
* copyright notice, this list of conditions and the following
21
* disclaimer in the documentation and/or other materials
22
* provided with the distribution.
23
*
24
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
* SOFTWARE.
32
*
33
*/
34
#include <linux/slab.h>
35
36
#include "c2.h"
37
#include "c2_wr.h"
38
#include "c2_vq.h"
39
#include <rdma/iw_cm.h>
40
41
int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
42
{
43
struct c2_dev *c2dev = to_c2dev(cm_id->device);
44
struct ib_qp *ibqp;
45
struct c2_qp *qp;
46
struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
47
struct c2_vq_req *vq_req;
48
int err;
49
50
ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
51
if (!ibqp)
52
return -EINVAL;
53
qp = to_c2qp(ibqp);
54
55
/* Associate QP <--> CM_ID */
56
cm_id->provider_data = qp;
57
cm_id->add_ref(cm_id);
58
qp->cm_id = cm_id;
59
60
/*
61
* only support the max private_data length
62
*/
63
if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
64
err = -EINVAL;
65
goto bail0;
66
}
67
/*
68
* Set the rdma read limits
69
*/
70
err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
71
if (err)
72
goto bail0;
73
74
/*
75
* Create and send a WR_QP_CONNECT...
76
*/
77
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
78
if (!wr) {
79
err = -ENOMEM;
80
goto bail0;
81
}
82
83
vq_req = vq_req_alloc(c2dev);
84
if (!vq_req) {
85
err = -ENOMEM;
86
goto bail1;
87
}
88
89
c2_wr_set_id(wr, CCWR_QP_CONNECT);
90
wr->hdr.context = 0;
91
wr->rnic_handle = c2dev->adapter_handle;
92
wr->qp_handle = qp->adapter_handle;
93
94
wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
95
wr->remote_port = cm_id->remote_addr.sin_port;
96
97
/*
98
* Move any private data from the callers's buf into
99
* the WR.
100
*/
101
if (iw_param->private_data) {
102
wr->private_data_length =
103
cpu_to_be32(iw_param->private_data_len);
104
memcpy(&wr->private_data[0], iw_param->private_data,
105
iw_param->private_data_len);
106
} else
107
wr->private_data_length = 0;
108
109
/*
110
* Send WR to adapter. NOTE: There is no synch reply from
111
* the adapter.
112
*/
113
err = vq_send_wr(c2dev, (union c2wr *) wr);
114
vq_req_free(c2dev, vq_req);
115
116
bail1:
117
kfree(wr);
118
bail0:
119
if (err) {
120
/*
121
* If we fail, release reference on QP and
122
* disassociate QP from CM_ID
123
*/
124
cm_id->provider_data = NULL;
125
qp->cm_id = NULL;
126
cm_id->rem_ref(cm_id);
127
}
128
return err;
129
}
130
131
int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
132
{
133
struct c2_dev *c2dev;
134
struct c2wr_ep_listen_create_req wr;
135
struct c2wr_ep_listen_create_rep *reply;
136
struct c2_vq_req *vq_req;
137
int err;
138
139
c2dev = to_c2dev(cm_id->device);
140
if (c2dev == NULL)
141
return -EINVAL;
142
143
/*
144
* Allocate verbs request.
145
*/
146
vq_req = vq_req_alloc(c2dev);
147
if (!vq_req)
148
return -ENOMEM;
149
150
/*
151
* Build the WR
152
*/
153
c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
154
wr.hdr.context = (u64) (unsigned long) vq_req;
155
wr.rnic_handle = c2dev->adapter_handle;
156
wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
157
wr.local_port = cm_id->local_addr.sin_port;
158
wr.backlog = cpu_to_be32(backlog);
159
wr.user_context = (u64) (unsigned long) cm_id;
160
161
/*
162
* Reference the request struct. Dereferenced in the int handler.
163
*/
164
vq_req_get(c2dev, vq_req);
165
166
/*
167
* Send WR to adapter
168
*/
169
err = vq_send_wr(c2dev, (union c2wr *) & wr);
170
if (err) {
171
vq_req_put(c2dev, vq_req);
172
goto bail0;
173
}
174
175
/*
176
* Wait for reply from adapter
177
*/
178
err = vq_wait_for_reply(c2dev, vq_req);
179
if (err)
180
goto bail0;
181
182
/*
183
* Process reply
184
*/
185
reply =
186
(struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
187
if (!reply) {
188
err = -ENOMEM;
189
goto bail1;
190
}
191
192
if ((err = c2_errno(reply)) != 0)
193
goto bail1;
194
195
/*
196
* Keep the adapter handle. Used in subsequent destroy
197
*/
198
cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
199
200
/*
201
* free vq stuff
202
*/
203
vq_repbuf_free(c2dev, reply);
204
vq_req_free(c2dev, vq_req);
205
206
return 0;
207
208
bail1:
209
vq_repbuf_free(c2dev, reply);
210
bail0:
211
vq_req_free(c2dev, vq_req);
212
return err;
213
}
214
215
216
int c2_llp_service_destroy(struct iw_cm_id *cm_id)
217
{
218
219
struct c2_dev *c2dev;
220
struct c2wr_ep_listen_destroy_req wr;
221
struct c2wr_ep_listen_destroy_rep *reply;
222
struct c2_vq_req *vq_req;
223
int err;
224
225
c2dev = to_c2dev(cm_id->device);
226
if (c2dev == NULL)
227
return -EINVAL;
228
229
/*
230
* Allocate verbs request.
231
*/
232
vq_req = vq_req_alloc(c2dev);
233
if (!vq_req)
234
return -ENOMEM;
235
236
/*
237
* Build the WR
238
*/
239
c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
240
wr.hdr.context = (unsigned long) vq_req;
241
wr.rnic_handle = c2dev->adapter_handle;
242
wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
243
244
/*
245
* reference the request struct. dereferenced in the int handler.
246
*/
247
vq_req_get(c2dev, vq_req);
248
249
/*
250
* Send WR to adapter
251
*/
252
err = vq_send_wr(c2dev, (union c2wr *) & wr);
253
if (err) {
254
vq_req_put(c2dev, vq_req);
255
goto bail0;
256
}
257
258
/*
259
* Wait for reply from adapter
260
*/
261
err = vq_wait_for_reply(c2dev, vq_req);
262
if (err)
263
goto bail0;
264
265
/*
266
* Process reply
267
*/
268
reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
269
if (!reply) {
270
err = -ENOMEM;
271
goto bail0;
272
}
273
if ((err = c2_errno(reply)) != 0)
274
goto bail1;
275
276
bail1:
277
vq_repbuf_free(c2dev, reply);
278
bail0:
279
vq_req_free(c2dev, vq_req);
280
return err;
281
}
282
283
int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
284
{
285
struct c2_dev *c2dev = to_c2dev(cm_id->device);
286
struct c2_qp *qp;
287
struct ib_qp *ibqp;
288
struct c2wr_cr_accept_req *wr; /* variable length WR */
289
struct c2_vq_req *vq_req;
290
struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
291
int err;
292
293
ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
294
if (!ibqp)
295
return -EINVAL;
296
qp = to_c2qp(ibqp);
297
298
/* Set the RDMA read limits */
299
err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
300
if (err)
301
goto bail0;
302
303
/* Allocate verbs request. */
304
vq_req = vq_req_alloc(c2dev);
305
if (!vq_req) {
306
err = -ENOMEM;
307
goto bail0;
308
}
309
vq_req->qp = qp;
310
vq_req->cm_id = cm_id;
311
vq_req->event = IW_CM_EVENT_ESTABLISHED;
312
313
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
314
if (!wr) {
315
err = -ENOMEM;
316
goto bail1;
317
}
318
319
/* Build the WR */
320
c2_wr_set_id(wr, CCWR_CR_ACCEPT);
321
wr->hdr.context = (unsigned long) vq_req;
322
wr->rnic_handle = c2dev->adapter_handle;
323
wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
324
wr->qp_handle = qp->adapter_handle;
325
326
/* Replace the cr_handle with the QP after accept */
327
cm_id->provider_data = qp;
328
cm_id->add_ref(cm_id);
329
qp->cm_id = cm_id;
330
331
cm_id->provider_data = qp;
332
333
/* Validate private_data length */
334
if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
335
err = -EINVAL;
336
goto bail1;
337
}
338
339
if (iw_param->private_data) {
340
wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
341
memcpy(&wr->private_data[0],
342
iw_param->private_data, iw_param->private_data_len);
343
} else
344
wr->private_data_length = 0;
345
346
/* Reference the request struct. Dereferenced in the int handler. */
347
vq_req_get(c2dev, vq_req);
348
349
/* Send WR to adapter */
350
err = vq_send_wr(c2dev, (union c2wr *) wr);
351
if (err) {
352
vq_req_put(c2dev, vq_req);
353
goto bail1;
354
}
355
356
/* Wait for reply from adapter */
357
err = vq_wait_for_reply(c2dev, vq_req);
358
if (err)
359
goto bail1;
360
361
/* Check that reply is present */
362
reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
363
if (!reply) {
364
err = -ENOMEM;
365
goto bail1;
366
}
367
368
err = c2_errno(reply);
369
vq_repbuf_free(c2dev, reply);
370
371
if (!err)
372
c2_set_qp_state(qp, C2_QP_STATE_RTS);
373
bail1:
374
kfree(wr);
375
vq_req_free(c2dev, vq_req);
376
bail0:
377
if (err) {
378
/*
379
* If we fail, release reference on QP and
380
* disassociate QP from CM_ID
381
*/
382
cm_id->provider_data = NULL;
383
qp->cm_id = NULL;
384
cm_id->rem_ref(cm_id);
385
}
386
return err;
387
}
388
389
int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
390
{
391
struct c2_dev *c2dev;
392
struct c2wr_cr_reject_req wr;
393
struct c2_vq_req *vq_req;
394
struct c2wr_cr_reject_rep *reply;
395
int err;
396
397
c2dev = to_c2dev(cm_id->device);
398
399
/*
400
* Allocate verbs request.
401
*/
402
vq_req = vq_req_alloc(c2dev);
403
if (!vq_req)
404
return -ENOMEM;
405
406
/*
407
* Build the WR
408
*/
409
c2_wr_set_id(&wr, CCWR_CR_REJECT);
410
wr.hdr.context = (unsigned long) vq_req;
411
wr.rnic_handle = c2dev->adapter_handle;
412
wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
413
414
/*
415
* reference the request struct. dereferenced in the int handler.
416
*/
417
vq_req_get(c2dev, vq_req);
418
419
/*
420
* Send WR to adapter
421
*/
422
err = vq_send_wr(c2dev, (union c2wr *) & wr);
423
if (err) {
424
vq_req_put(c2dev, vq_req);
425
goto bail0;
426
}
427
428
/*
429
* Wait for reply from adapter
430
*/
431
err = vq_wait_for_reply(c2dev, vq_req);
432
if (err)
433
goto bail0;
434
435
/*
436
* Process reply
437
*/
438
reply = (struct c2wr_cr_reject_rep *) (unsigned long)
439
vq_req->reply_msg;
440
if (!reply) {
441
err = -ENOMEM;
442
goto bail0;
443
}
444
err = c2_errno(reply);
445
/*
446
* free vq stuff
447
*/
448
vq_repbuf_free(c2dev, reply);
449
450
bail0:
451
vq_req_free(c2dev, vq_req);
452
return err;
453
}
454
455