Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/core/ucma.c
37212 views
1
/*
2
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
33
#include <linux/completion.h>
34
#include <linux/file.h>
35
#include <linux/mutex.h>
36
#include <linux/poll.h>
37
#include <linux/sched.h>
38
#include <linux/idr.h>
39
#include <linux/in.h>
40
#include <linux/in6.h>
41
#include <linux/miscdevice.h>
42
#include <linux/slab.h>
43
#include <linux/sysctl.h>
44
45
#include <rdma/rdma_user_cm.h>
46
#include <rdma/ib_marshall.h>
47
#include <rdma/rdma_cm.h>
48
#include <rdma/rdma_cm_ib.h>
49
50
MODULE_AUTHOR("Sean Hefty");
51
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
52
MODULE_LICENSE("Dual BSD/GPL");
53
54
static unsigned int max_backlog = 1024;
55
56
static struct ctl_table_header *ucma_ctl_table_hdr;
57
static ctl_table ucma_ctl_table[] = {
58
{
59
.procname = "max_backlog",
60
.data = &max_backlog,
61
.maxlen = sizeof max_backlog,
62
.mode = 0644,
63
.proc_handler = proc_dointvec,
64
},
65
{ }
66
};
67
68
static struct ctl_path ucma_ctl_path[] = {
69
{ .procname = "net" },
70
{ .procname = "rdma_ucm" },
71
{ }
72
};
73
74
struct ucma_file {
75
struct mutex mut;
76
struct file *filp;
77
struct list_head ctx_list;
78
struct list_head event_list;
79
wait_queue_head_t poll_wait;
80
};
81
82
struct ucma_context {
83
int id;
84
struct completion comp;
85
atomic_t ref;
86
int events_reported;
87
int backlog;
88
89
struct ucma_file *file;
90
struct rdma_cm_id *cm_id;
91
u64 uid;
92
93
struct list_head list;
94
struct list_head mc_list;
95
};
96
97
struct ucma_multicast {
98
struct ucma_context *ctx;
99
int id;
100
int events_reported;
101
102
u64 uid;
103
struct list_head list;
104
struct sockaddr_storage addr;
105
};
106
107
struct ucma_event {
108
struct ucma_context *ctx;
109
struct ucma_multicast *mc;
110
struct list_head list;
111
struct rdma_cm_id *cm_id;
112
struct rdma_ucm_event_resp resp;
113
};
114
115
static DEFINE_MUTEX(mut);
116
static DEFINE_IDR(ctx_idr);
117
static DEFINE_IDR(multicast_idr);
118
119
static inline struct ucma_context *_ucma_find_context(int id,
120
struct ucma_file *file)
121
{
122
struct ucma_context *ctx;
123
124
ctx = idr_find(&ctx_idr, id);
125
if (!ctx)
126
ctx = ERR_PTR(-ENOENT);
127
else if (ctx->file != file)
128
ctx = ERR_PTR(-EINVAL);
129
return ctx;
130
}
131
132
static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
133
{
134
struct ucma_context *ctx;
135
136
mutex_lock(&mut);
137
ctx = _ucma_find_context(id, file);
138
if (!IS_ERR(ctx))
139
atomic_inc(&ctx->ref);
140
mutex_unlock(&mut);
141
return ctx;
142
}
143
144
static void ucma_put_ctx(struct ucma_context *ctx)
145
{
146
if (atomic_dec_and_test(&ctx->ref))
147
complete(&ctx->comp);
148
}
149
150
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
151
{
152
struct ucma_context *ctx;
153
int ret;
154
155
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
156
if (!ctx)
157
return NULL;
158
159
atomic_set(&ctx->ref, 1);
160
init_completion(&ctx->comp);
161
INIT_LIST_HEAD(&ctx->mc_list);
162
ctx->file = file;
163
164
do {
165
ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
166
if (!ret)
167
goto error;
168
169
mutex_lock(&mut);
170
ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
171
mutex_unlock(&mut);
172
} while (ret == -EAGAIN);
173
174
if (ret)
175
goto error;
176
177
list_add_tail(&ctx->list, &file->ctx_list);
178
return ctx;
179
180
error:
181
kfree(ctx);
182
return NULL;
183
}
184
185
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
186
{
187
struct ucma_multicast *mc;
188
int ret;
189
190
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
191
if (!mc)
192
return NULL;
193
194
do {
195
ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
196
if (!ret)
197
goto error;
198
199
mutex_lock(&mut);
200
ret = idr_get_new(&multicast_idr, mc, &mc->id);
201
mutex_unlock(&mut);
202
} while (ret == -EAGAIN);
203
204
if (ret)
205
goto error;
206
207
mc->ctx = ctx;
208
list_add_tail(&mc->list, &ctx->mc_list);
209
return mc;
210
211
error:
212
kfree(mc);
213
return NULL;
214
}
215
216
static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
217
struct rdma_conn_param *src)
218
{
219
if (src->private_data_len)
220
memcpy(dst->private_data, src->private_data,
221
src->private_data_len);
222
dst->private_data_len = src->private_data_len;
223
dst->responder_resources =src->responder_resources;
224
dst->initiator_depth = src->initiator_depth;
225
dst->flow_control = src->flow_control;
226
dst->retry_count = src->retry_count;
227
dst->rnr_retry_count = src->rnr_retry_count;
228
dst->srq = src->srq;
229
dst->qp_num = src->qp_num;
230
}
231
232
static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
233
struct rdma_ud_param *src)
234
{
235
if (src->private_data_len)
236
memcpy(dst->private_data, src->private_data,
237
src->private_data_len);
238
dst->private_data_len = src->private_data_len;
239
ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
240
dst->qp_num = src->qp_num;
241
dst->qkey = src->qkey;
242
}
243
244
static void ucma_set_event_context(struct ucma_context *ctx,
245
struct rdma_cm_event *event,
246
struct ucma_event *uevent)
247
{
248
uevent->ctx = ctx;
249
switch (event->event) {
250
case RDMA_CM_EVENT_MULTICAST_JOIN:
251
case RDMA_CM_EVENT_MULTICAST_ERROR:
252
uevent->mc = (struct ucma_multicast *)
253
event->param.ud.private_data;
254
uevent->resp.uid = uevent->mc->uid;
255
uevent->resp.id = uevent->mc->id;
256
break;
257
default:
258
uevent->resp.uid = ctx->uid;
259
uevent->resp.id = ctx->id;
260
break;
261
}
262
}
263
264
static int ucma_event_handler(struct rdma_cm_id *cm_id,
265
struct rdma_cm_event *event)
266
{
267
struct ucma_event *uevent;
268
struct ucma_context *ctx = cm_id->context;
269
int ret = 0;
270
271
uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
272
if (!uevent)
273
return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
274
275
uevent->cm_id = cm_id;
276
ucma_set_event_context(ctx, event, uevent);
277
uevent->resp.event = event->event;
278
uevent->resp.status = event->status;
279
if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
280
ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
281
else
282
ucma_copy_conn_event(&uevent->resp.param.conn,
283
&event->param.conn);
284
285
mutex_lock(&ctx->file->mut);
286
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
287
if (!ctx->backlog) {
288
ret = -ENOMEM;
289
kfree(uevent);
290
goto out;
291
}
292
ctx->backlog--;
293
} else if (!ctx->uid) {
294
/*
295
* We ignore events for new connections until userspace has set
296
* their context. This can only happen if an error occurs on a
297
* new connection before the user accepts it. This is okay,
298
* since the accept will just fail later.
299
*/
300
kfree(uevent);
301
goto out;
302
}
303
304
list_add_tail(&uevent->list, &ctx->file->event_list);
305
wake_up_interruptible(&ctx->file->poll_wait);
306
out:
307
mutex_unlock(&ctx->file->mut);
308
return ret;
309
}
310
311
static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
312
int in_len, int out_len)
313
{
314
struct ucma_context *ctx;
315
struct rdma_ucm_get_event cmd;
316
struct ucma_event *uevent;
317
int ret = 0;
318
DEFINE_WAIT(wait);
319
320
if (out_len < sizeof uevent->resp)
321
return -ENOSPC;
322
323
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
324
return -EFAULT;
325
326
mutex_lock(&file->mut);
327
while (list_empty(&file->event_list)) {
328
mutex_unlock(&file->mut);
329
330
if (file->filp->f_flags & O_NONBLOCK)
331
return -EAGAIN;
332
333
if (wait_event_interruptible(file->poll_wait,
334
!list_empty(&file->event_list)))
335
return -ERESTARTSYS;
336
337
mutex_lock(&file->mut);
338
}
339
340
uevent = list_entry(file->event_list.next, struct ucma_event, list);
341
342
if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
343
ctx = ucma_alloc_ctx(file);
344
if (!ctx) {
345
ret = -ENOMEM;
346
goto done;
347
}
348
uevent->ctx->backlog++;
349
ctx->cm_id = uevent->cm_id;
350
ctx->cm_id->context = ctx;
351
uevent->resp.id = ctx->id;
352
}
353
354
if (copy_to_user((void __user *)(unsigned long)cmd.response,
355
&uevent->resp, sizeof uevent->resp)) {
356
ret = -EFAULT;
357
goto done;
358
}
359
360
list_del(&uevent->list);
361
uevent->ctx->events_reported++;
362
if (uevent->mc)
363
uevent->mc->events_reported++;
364
kfree(uevent);
365
done:
366
mutex_unlock(&file->mut);
367
return ret;
368
}
369
370
static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
371
{
372
switch (cmd->ps) {
373
case RDMA_PS_TCP:
374
*qp_type = IB_QPT_RC;
375
return 0;
376
case RDMA_PS_UDP:
377
case RDMA_PS_IPOIB:
378
*qp_type = IB_QPT_UD;
379
return 0;
380
default:
381
return -EINVAL;
382
}
383
}
384
385
static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
386
int in_len, int out_len)
387
{
388
struct rdma_ucm_create_id cmd;
389
struct rdma_ucm_create_id_resp resp;
390
struct ucma_context *ctx;
391
enum ib_qp_type qp_type;
392
int ret;
393
394
if (out_len < sizeof(resp))
395
return -ENOSPC;
396
397
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
398
return -EFAULT;
399
400
ret = ucma_get_qp_type(&cmd, &qp_type);
401
if (ret)
402
return ret;
403
404
mutex_lock(&file->mut);
405
ctx = ucma_alloc_ctx(file);
406
mutex_unlock(&file->mut);
407
if (!ctx)
408
return -ENOMEM;
409
410
ctx->uid = cmd.uid;
411
ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
412
if (IS_ERR(ctx->cm_id)) {
413
ret = PTR_ERR(ctx->cm_id);
414
goto err1;
415
}
416
417
resp.id = ctx->id;
418
if (copy_to_user((void __user *)(unsigned long)cmd.response,
419
&resp, sizeof(resp))) {
420
ret = -EFAULT;
421
goto err2;
422
}
423
return 0;
424
425
err2:
426
rdma_destroy_id(ctx->cm_id);
427
err1:
428
mutex_lock(&mut);
429
idr_remove(&ctx_idr, ctx->id);
430
mutex_unlock(&mut);
431
kfree(ctx);
432
return ret;
433
}
434
435
static void ucma_cleanup_multicast(struct ucma_context *ctx)
436
{
437
struct ucma_multicast *mc, *tmp;
438
439
mutex_lock(&mut);
440
list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
441
list_del(&mc->list);
442
idr_remove(&multicast_idr, mc->id);
443
kfree(mc);
444
}
445
mutex_unlock(&mut);
446
}
447
448
static void ucma_cleanup_events(struct ucma_context *ctx)
449
{
450
struct ucma_event *uevent, *tmp;
451
452
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
453
if (uevent->ctx != ctx)
454
continue;
455
456
list_del(&uevent->list);
457
458
/* clear incoming connections. */
459
if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
460
rdma_destroy_id(uevent->cm_id);
461
462
kfree(uevent);
463
}
464
}
465
466
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
467
{
468
struct ucma_event *uevent, *tmp;
469
470
list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
471
if (uevent->mc != mc)
472
continue;
473
474
list_del(&uevent->list);
475
kfree(uevent);
476
}
477
}
478
479
static int ucma_free_ctx(struct ucma_context *ctx)
480
{
481
int events_reported;
482
483
/* No new events will be generated after destroying the id. */
484
rdma_destroy_id(ctx->cm_id);
485
486
ucma_cleanup_multicast(ctx);
487
488
/* Cleanup events not yet reported to the user. */
489
mutex_lock(&ctx->file->mut);
490
ucma_cleanup_events(ctx);
491
list_del(&ctx->list);
492
mutex_unlock(&ctx->file->mut);
493
494
events_reported = ctx->events_reported;
495
kfree(ctx);
496
return events_reported;
497
}
498
499
static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
500
int in_len, int out_len)
501
{
502
struct rdma_ucm_destroy_id cmd;
503
struct rdma_ucm_destroy_id_resp resp;
504
struct ucma_context *ctx;
505
int ret = 0;
506
507
if (out_len < sizeof(resp))
508
return -ENOSPC;
509
510
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
511
return -EFAULT;
512
513
mutex_lock(&mut);
514
ctx = _ucma_find_context(cmd.id, file);
515
if (!IS_ERR(ctx))
516
idr_remove(&ctx_idr, ctx->id);
517
mutex_unlock(&mut);
518
519
if (IS_ERR(ctx))
520
return PTR_ERR(ctx);
521
522
ucma_put_ctx(ctx);
523
wait_for_completion(&ctx->comp);
524
resp.events_reported = ucma_free_ctx(ctx);
525
526
if (copy_to_user((void __user *)(unsigned long)cmd.response,
527
&resp, sizeof(resp)))
528
ret = -EFAULT;
529
530
return ret;
531
}
532
533
static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
534
int in_len, int out_len)
535
{
536
struct rdma_ucm_bind_addr cmd;
537
struct ucma_context *ctx;
538
int ret;
539
540
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
541
return -EFAULT;
542
543
ctx = ucma_get_ctx(file, cmd.id);
544
if (IS_ERR(ctx))
545
return PTR_ERR(ctx);
546
547
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
548
ucma_put_ctx(ctx);
549
return ret;
550
}
551
552
static ssize_t ucma_resolve_addr(struct ucma_file *file,
553
const char __user *inbuf,
554
int in_len, int out_len)
555
{
556
struct rdma_ucm_resolve_addr cmd;
557
struct ucma_context *ctx;
558
int ret;
559
560
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
561
return -EFAULT;
562
563
ctx = ucma_get_ctx(file, cmd.id);
564
if (IS_ERR(ctx))
565
return PTR_ERR(ctx);
566
567
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
568
(struct sockaddr *) &cmd.dst_addr,
569
cmd.timeout_ms);
570
ucma_put_ctx(ctx);
571
return ret;
572
}
573
574
static ssize_t ucma_resolve_route(struct ucma_file *file,
575
const char __user *inbuf,
576
int in_len, int out_len)
577
{
578
struct rdma_ucm_resolve_route cmd;
579
struct ucma_context *ctx;
580
int ret;
581
582
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
583
return -EFAULT;
584
585
ctx = ucma_get_ctx(file, cmd.id);
586
if (IS_ERR(ctx))
587
return PTR_ERR(ctx);
588
589
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
590
ucma_put_ctx(ctx);
591
return ret;
592
}
593
594
static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
595
struct rdma_route *route)
596
{
597
struct rdma_dev_addr *dev_addr;
598
599
resp->num_paths = route->num_paths;
600
switch (route->num_paths) {
601
case 0:
602
dev_addr = &route->addr.dev_addr;
603
rdma_addr_get_dgid(dev_addr,
604
(union ib_gid *) &resp->ib_route[0].dgid);
605
rdma_addr_get_sgid(dev_addr,
606
(union ib_gid *) &resp->ib_route[0].sgid);
607
resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
608
break;
609
case 2:
610
ib_copy_path_rec_to_user(&resp->ib_route[1],
611
&route->path_rec[1]);
612
/* fall through */
613
case 1:
614
ib_copy_path_rec_to_user(&resp->ib_route[0],
615
&route->path_rec[0]);
616
break;
617
default:
618
break;
619
}
620
}
621
622
static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
623
struct rdma_route *route)
624
{
625
struct rdma_dev_addr *dev_addr;
626
struct net_device *dev;
627
u16 vid = 0;
628
629
resp->num_paths = route->num_paths;
630
switch (route->num_paths) {
631
case 0:
632
dev_addr = &route->addr.dev_addr;
633
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
634
if (dev) {
635
vid = rdma_vlan_dev_vlan_id(dev);
636
dev_put(dev);
637
}
638
639
iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
640
dev_addr->dst_dev_addr, vid);
641
iboe_addr_get_sgid(dev_addr,
642
(union ib_gid *) &resp->ib_route[0].sgid);
643
resp->ib_route[0].pkey = cpu_to_be16(0xffff);
644
break;
645
case 2:
646
ib_copy_path_rec_to_user(&resp->ib_route[1],
647
&route->path_rec[1]);
648
/* fall through */
649
case 1:
650
ib_copy_path_rec_to_user(&resp->ib_route[0],
651
&route->path_rec[0]);
652
break;
653
default:
654
break;
655
}
656
}
657
658
static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
659
struct rdma_route *route)
660
{
661
struct rdma_dev_addr *dev_addr;
662
663
dev_addr = &route->addr.dev_addr;
664
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
665
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
666
}
667
668
static ssize_t ucma_query_route(struct ucma_file *file,
669
const char __user *inbuf,
670
int in_len, int out_len)
671
{
672
struct rdma_ucm_query_route cmd;
673
struct rdma_ucm_query_route_resp resp;
674
struct ucma_context *ctx;
675
struct sockaddr *addr;
676
int ret = 0;
677
678
if (out_len < sizeof(resp))
679
return -ENOSPC;
680
681
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682
return -EFAULT;
683
684
ctx = ucma_get_ctx(file, cmd.id);
685
if (IS_ERR(ctx))
686
return PTR_ERR(ctx);
687
688
memset(&resp, 0, sizeof resp);
689
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
690
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
691
sizeof(struct sockaddr_in) :
692
sizeof(struct sockaddr_in6));
693
addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
694
memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
695
sizeof(struct sockaddr_in) :
696
sizeof(struct sockaddr_in6));
697
if (!ctx->cm_id->device)
698
goto out;
699
700
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
701
resp.port_num = ctx->cm_id->port_num;
702
switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
703
case RDMA_TRANSPORT_IB:
704
switch (rdma_port_get_link_layer(ctx->cm_id->device,
705
ctx->cm_id->port_num)) {
706
case IB_LINK_LAYER_INFINIBAND:
707
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
708
break;
709
case IB_LINK_LAYER_ETHERNET:
710
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
711
break;
712
default:
713
break;
714
}
715
break;
716
case RDMA_TRANSPORT_IWARP:
717
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
718
break;
719
default:
720
break;
721
}
722
723
out:
724
if (copy_to_user((void __user *)(unsigned long)cmd.response,
725
&resp, sizeof(resp)))
726
ret = -EFAULT;
727
728
ucma_put_ctx(ctx);
729
return ret;
730
}
731
732
static void ucma_copy_conn_param(struct rdma_conn_param *dst,
733
struct rdma_ucm_conn_param *src)
734
{
735
dst->private_data = src->private_data;
736
dst->private_data_len = src->private_data_len;
737
dst->responder_resources =src->responder_resources;
738
dst->initiator_depth = src->initiator_depth;
739
dst->flow_control = src->flow_control;
740
dst->retry_count = src->retry_count;
741
dst->rnr_retry_count = src->rnr_retry_count;
742
dst->srq = src->srq;
743
dst->qp_num = src->qp_num;
744
}
745
746
static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
747
int in_len, int out_len)
748
{
749
struct rdma_ucm_connect cmd;
750
struct rdma_conn_param conn_param;
751
struct ucma_context *ctx;
752
int ret;
753
754
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
755
return -EFAULT;
756
757
if (!cmd.conn_param.valid)
758
return -EINVAL;
759
760
ctx = ucma_get_ctx(file, cmd.id);
761
if (IS_ERR(ctx))
762
return PTR_ERR(ctx);
763
764
ucma_copy_conn_param(&conn_param, &cmd.conn_param);
765
ret = rdma_connect(ctx->cm_id, &conn_param);
766
ucma_put_ctx(ctx);
767
return ret;
768
}
769
770
static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
771
int in_len, int out_len)
772
{
773
struct rdma_ucm_listen cmd;
774
struct ucma_context *ctx;
775
int ret;
776
777
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
778
return -EFAULT;
779
780
ctx = ucma_get_ctx(file, cmd.id);
781
if (IS_ERR(ctx))
782
return PTR_ERR(ctx);
783
784
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
785
cmd.backlog : max_backlog;
786
ret = rdma_listen(ctx->cm_id, ctx->backlog);
787
ucma_put_ctx(ctx);
788
return ret;
789
}
790
791
static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
792
int in_len, int out_len)
793
{
794
struct rdma_ucm_accept cmd;
795
struct rdma_conn_param conn_param;
796
struct ucma_context *ctx;
797
int ret;
798
799
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
800
return -EFAULT;
801
802
ctx = ucma_get_ctx(file, cmd.id);
803
if (IS_ERR(ctx))
804
return PTR_ERR(ctx);
805
806
if (cmd.conn_param.valid) {
807
ctx->uid = cmd.uid;
808
ucma_copy_conn_param(&conn_param, &cmd.conn_param);
809
ret = rdma_accept(ctx->cm_id, &conn_param);
810
} else
811
ret = rdma_accept(ctx->cm_id, NULL);
812
813
ucma_put_ctx(ctx);
814
return ret;
815
}
816
817
static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
818
int in_len, int out_len)
819
{
820
struct rdma_ucm_reject cmd;
821
struct ucma_context *ctx;
822
int ret;
823
824
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
825
return -EFAULT;
826
827
ctx = ucma_get_ctx(file, cmd.id);
828
if (IS_ERR(ctx))
829
return PTR_ERR(ctx);
830
831
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
832
ucma_put_ctx(ctx);
833
return ret;
834
}
835
836
static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
837
int in_len, int out_len)
838
{
839
struct rdma_ucm_disconnect cmd;
840
struct ucma_context *ctx;
841
int ret;
842
843
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
844
return -EFAULT;
845
846
ctx = ucma_get_ctx(file, cmd.id);
847
if (IS_ERR(ctx))
848
return PTR_ERR(ctx);
849
850
ret = rdma_disconnect(ctx->cm_id);
851
ucma_put_ctx(ctx);
852
return ret;
853
}
854
855
static ssize_t ucma_init_qp_attr(struct ucma_file *file,
856
const char __user *inbuf,
857
int in_len, int out_len)
858
{
859
struct rdma_ucm_init_qp_attr cmd;
860
struct ib_uverbs_qp_attr resp;
861
struct ucma_context *ctx;
862
struct ib_qp_attr qp_attr;
863
int ret;
864
865
if (out_len < sizeof(resp))
866
return -ENOSPC;
867
868
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
869
return -EFAULT;
870
871
ctx = ucma_get_ctx(file, cmd.id);
872
if (IS_ERR(ctx))
873
return PTR_ERR(ctx);
874
875
resp.qp_attr_mask = 0;
876
memset(&qp_attr, 0, sizeof qp_attr);
877
qp_attr.qp_state = cmd.qp_state;
878
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
879
if (ret)
880
goto out;
881
882
ib_copy_qp_attr_to_user(&resp, &qp_attr);
883
if (copy_to_user((void __user *)(unsigned long)cmd.response,
884
&resp, sizeof(resp)))
885
ret = -EFAULT;
886
887
out:
888
ucma_put_ctx(ctx);
889
return ret;
890
}
891
892
static int ucma_set_option_id(struct ucma_context *ctx, int optname,
893
void *optval, size_t optlen)
894
{
895
int ret = 0;
896
897
switch (optname) {
898
case RDMA_OPTION_ID_TOS:
899
if (optlen != sizeof(u8)) {
900
ret = -EINVAL;
901
break;
902
}
903
rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
904
break;
905
case RDMA_OPTION_ID_REUSEADDR:
906
if (optlen != sizeof(int)) {
907
ret = -EINVAL;
908
break;
909
}
910
ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
911
break;
912
default:
913
ret = -ENOSYS;
914
}
915
916
return ret;
917
}
918
919
static int ucma_set_ib_path(struct ucma_context *ctx,
920
struct ib_path_rec_data *path_data, size_t optlen)
921
{
922
struct ib_sa_path_rec sa_path;
923
struct rdma_cm_event event;
924
int ret;
925
926
if (optlen % sizeof(*path_data))
927
return -EINVAL;
928
929
for (; optlen; optlen -= sizeof(*path_data), path_data++) {
930
if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
931
IB_PATH_BIDIRECTIONAL))
932
break;
933
}
934
935
if (!optlen)
936
return -EINVAL;
937
938
ib_sa_unpack_path(path_data->path_rec, &sa_path);
939
ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
940
if (ret)
941
return ret;
942
943
memset(&event, 0, sizeof event);
944
event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
945
return ucma_event_handler(ctx->cm_id, &event);
946
}
947
948
static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
949
void *optval, size_t optlen)
950
{
951
int ret;
952
953
switch (optname) {
954
case RDMA_OPTION_IB_PATH:
955
ret = ucma_set_ib_path(ctx, optval, optlen);
956
break;
957
default:
958
ret = -ENOSYS;
959
}
960
961
return ret;
962
}
963
964
static int ucma_set_option_level(struct ucma_context *ctx, int level,
965
int optname, void *optval, size_t optlen)
966
{
967
int ret;
968
969
switch (level) {
970
case RDMA_OPTION_ID:
971
ret = ucma_set_option_id(ctx, optname, optval, optlen);
972
break;
973
case RDMA_OPTION_IB:
974
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
975
break;
976
default:
977
ret = -ENOSYS;
978
}
979
980
return ret;
981
}
982
983
static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
984
int in_len, int out_len)
985
{
986
struct rdma_ucm_set_option cmd;
987
struct ucma_context *ctx;
988
void *optval;
989
int ret;
990
991
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
992
return -EFAULT;
993
994
ctx = ucma_get_ctx(file, cmd.id);
995
if (IS_ERR(ctx))
996
return PTR_ERR(ctx);
997
998
optval = kmalloc(cmd.optlen, GFP_KERNEL);
999
if (!optval) {
1000
ret = -ENOMEM;
1001
goto out1;
1002
}
1003
1004
if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
1005
cmd.optlen)) {
1006
ret = -EFAULT;
1007
goto out2;
1008
}
1009
1010
ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1011
cmd.optlen);
1012
out2:
1013
kfree(optval);
1014
out1:
1015
ucma_put_ctx(ctx);
1016
return ret;
1017
}
1018
1019
static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1020
int in_len, int out_len)
1021
{
1022
struct rdma_ucm_notify cmd;
1023
struct ucma_context *ctx;
1024
int ret;
1025
1026
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1027
return -EFAULT;
1028
1029
ctx = ucma_get_ctx(file, cmd.id);
1030
if (IS_ERR(ctx))
1031
return PTR_ERR(ctx);
1032
1033
ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1034
ucma_put_ctx(ctx);
1035
return ret;
1036
}
1037
1038
static ssize_t ucma_join_multicast(struct ucma_file *file,
1039
const char __user *inbuf,
1040
int in_len, int out_len)
1041
{
1042
struct rdma_ucm_join_mcast cmd;
1043
struct rdma_ucm_create_id_resp resp;
1044
struct ucma_context *ctx;
1045
struct ucma_multicast *mc;
1046
int ret;
1047
1048
if (out_len < sizeof(resp))
1049
return -ENOSPC;
1050
1051
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1052
return -EFAULT;
1053
1054
ctx = ucma_get_ctx(file, cmd.id);
1055
if (IS_ERR(ctx))
1056
return PTR_ERR(ctx);
1057
1058
mutex_lock(&file->mut);
1059
mc = ucma_alloc_multicast(ctx);
1060
if (!mc) {
1061
ret = -ENOMEM;
1062
goto err1;
1063
}
1064
1065
mc->uid = cmd.uid;
1066
memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1067
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1068
if (ret)
1069
goto err2;
1070
1071
resp.id = mc->id;
1072
if (copy_to_user((void __user *)(unsigned long)cmd.response,
1073
&resp, sizeof(resp))) {
1074
ret = -EFAULT;
1075
goto err3;
1076
}
1077
1078
mutex_unlock(&file->mut);
1079
ucma_put_ctx(ctx);
1080
return 0;
1081
1082
err3:
1083
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1084
ucma_cleanup_mc_events(mc);
1085
err2:
1086
mutex_lock(&mut);
1087
idr_remove(&multicast_idr, mc->id);
1088
mutex_unlock(&mut);
1089
list_del(&mc->list);
1090
kfree(mc);
1091
err1:
1092
mutex_unlock(&file->mut);
1093
ucma_put_ctx(ctx);
1094
return ret;
1095
}
1096
1097
static ssize_t ucma_leave_multicast(struct ucma_file *file,
1098
const char __user *inbuf,
1099
int in_len, int out_len)
1100
{
1101
struct rdma_ucm_destroy_id cmd;
1102
struct rdma_ucm_destroy_id_resp resp;
1103
struct ucma_multicast *mc;
1104
int ret = 0;
1105
1106
if (out_len < sizeof(resp))
1107
return -ENOSPC;
1108
1109
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1110
return -EFAULT;
1111
1112
mutex_lock(&mut);
1113
mc = idr_find(&multicast_idr, cmd.id);
1114
if (!mc)
1115
mc = ERR_PTR(-ENOENT);
1116
else if (mc->ctx->file != file)
1117
mc = ERR_PTR(-EINVAL);
1118
else {
1119
idr_remove(&multicast_idr, mc->id);
1120
atomic_inc(&mc->ctx->ref);
1121
}
1122
mutex_unlock(&mut);
1123
1124
if (IS_ERR(mc)) {
1125
ret = PTR_ERR(mc);
1126
goto out;
1127
}
1128
1129
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1130
mutex_lock(&mc->ctx->file->mut);
1131
ucma_cleanup_mc_events(mc);
1132
list_del(&mc->list);
1133
mutex_unlock(&mc->ctx->file->mut);
1134
1135
ucma_put_ctx(mc->ctx);
1136
resp.events_reported = mc->events_reported;
1137
kfree(mc);
1138
1139
if (copy_to_user((void __user *)(unsigned long)cmd.response,
1140
&resp, sizeof(resp)))
1141
ret = -EFAULT;
1142
out:
1143
return ret;
1144
}
1145
1146
static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1147
{
1148
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1149
if (file1 < file2) {
1150
mutex_lock(&file1->mut);
1151
mutex_lock(&file2->mut);
1152
} else {
1153
mutex_lock(&file2->mut);
1154
mutex_lock(&file1->mut);
1155
}
1156
}
1157
1158
static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1159
{
1160
if (file1 < file2) {
1161
mutex_unlock(&file2->mut);
1162
mutex_unlock(&file1->mut);
1163
} else {
1164
mutex_unlock(&file1->mut);
1165
mutex_unlock(&file2->mut);
1166
}
1167
}
1168
1169
static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1170
{
1171
struct ucma_event *uevent, *tmp;
1172
1173
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1174
if (uevent->ctx == ctx)
1175
list_move_tail(&uevent->list, &file->event_list);
1176
}
1177
1178
static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1179
const char __user *inbuf,
1180
int in_len, int out_len)
1181
{
1182
struct rdma_ucm_migrate_id cmd;
1183
struct rdma_ucm_migrate_resp resp;
1184
struct ucma_context *ctx;
1185
struct file *filp;
1186
struct ucma_file *cur_file;
1187
int ret = 0;
1188
1189
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1190
return -EFAULT;
1191
1192
/* Get current fd to protect against it being closed */
1193
filp = fget(cmd.fd);
1194
if (!filp)
1195
return -ENOENT;
1196
1197
/* Validate current fd and prevent destruction of id. */
1198
ctx = ucma_get_ctx(filp->private_data, cmd.id);
1199
if (IS_ERR(ctx)) {
1200
ret = PTR_ERR(ctx);
1201
goto file_put;
1202
}
1203
1204
cur_file = ctx->file;
1205
if (cur_file == new_file) {
1206
resp.events_reported = ctx->events_reported;
1207
goto response;
1208
}
1209
1210
/*
1211
* Migrate events between fd's, maintaining order, and avoiding new
1212
* events being added before existing events.
1213
*/
1214
ucma_lock_files(cur_file, new_file);
1215
mutex_lock(&mut);
1216
1217
list_move_tail(&ctx->list, &new_file->ctx_list);
1218
ucma_move_events(ctx, new_file);
1219
ctx->file = new_file;
1220
resp.events_reported = ctx->events_reported;
1221
1222
mutex_unlock(&mut);
1223
ucma_unlock_files(cur_file, new_file);
1224
1225
response:
1226
if (copy_to_user((void __user *)(unsigned long)cmd.response,
1227
&resp, sizeof(resp)))
1228
ret = -EFAULT;
1229
1230
ucma_put_ctx(ctx);
1231
file_put:
1232
fput(filp);
1233
return ret;
1234
}
1235
1236
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1237
const char __user *inbuf,
1238
int in_len, int out_len) = {
1239
[RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1240
[RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1241
[RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1242
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1243
[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1244
[RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1245
[RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1246
[RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1247
[RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1248
[RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1249
[RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1250
[RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1251
[RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1252
[RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1253
[RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1254
[RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1255
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1256
[RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1257
[RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1258
};
1259
1260
static ssize_t ucma_write(struct file *filp, const char __user *buf,
1261
size_t len, loff_t *pos)
1262
{
1263
struct ucma_file *file = filp->private_data;
1264
struct rdma_ucm_cmd_hdr hdr;
1265
ssize_t ret;
1266
1267
if (len < sizeof(hdr))
1268
return -EINVAL;
1269
1270
if (copy_from_user(&hdr, buf, sizeof(hdr)))
1271
return -EFAULT;
1272
1273
if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1274
return -EINVAL;
1275
1276
if (hdr.in + sizeof(hdr) > len)
1277
return -EINVAL;
1278
1279
if (!ucma_cmd_table[hdr.cmd])
1280
return -ENOSYS;
1281
1282
ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1283
if (!ret)
1284
ret = len;
1285
1286
return ret;
1287
}
1288
1289
static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1290
{
1291
struct ucma_file *file = filp->private_data;
1292
unsigned int mask = 0;
1293
1294
poll_wait(filp, &file->poll_wait, wait);
1295
1296
if (!list_empty(&file->event_list))
1297
mask = POLLIN | POLLRDNORM;
1298
1299
return mask;
1300
}
1301
1302
/*
1303
* ucma_open() does not need the BKL:
1304
*
1305
* - no global state is referred to;
1306
* - there is no ioctl method to race against;
1307
* - no further module initialization is required for open to work
1308
* after the device is registered.
1309
*/
1310
static int ucma_open(struct inode *inode, struct file *filp)
1311
{
1312
struct ucma_file *file;
1313
1314
file = kmalloc(sizeof *file, GFP_KERNEL);
1315
if (!file)
1316
return -ENOMEM;
1317
1318
INIT_LIST_HEAD(&file->event_list);
1319
INIT_LIST_HEAD(&file->ctx_list);
1320
init_waitqueue_head(&file->poll_wait);
1321
mutex_init(&file->mut);
1322
1323
filp->private_data = file;
1324
file->filp = filp;
1325
1326
return nonseekable_open(inode, filp);
1327
}
1328
1329
static int ucma_close(struct inode *inode, struct file *filp)
1330
{
1331
struct ucma_file *file = filp->private_data;
1332
struct ucma_context *ctx, *tmp;
1333
1334
mutex_lock(&file->mut);
1335
list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1336
mutex_unlock(&file->mut);
1337
1338
mutex_lock(&mut);
1339
idr_remove(&ctx_idr, ctx->id);
1340
mutex_unlock(&mut);
1341
1342
ucma_free_ctx(ctx);
1343
mutex_lock(&file->mut);
1344
}
1345
mutex_unlock(&file->mut);
1346
kfree(file);
1347
return 0;
1348
}
1349
1350
static const struct file_operations ucma_fops = {
1351
.owner = THIS_MODULE,
1352
.open = ucma_open,
1353
.release = ucma_close,
1354
.write = ucma_write,
1355
.poll = ucma_poll,
1356
.llseek = no_llseek,
1357
};
1358
1359
static struct miscdevice ucma_misc = {
1360
.minor = MISC_DYNAMIC_MINOR,
1361
.name = "rdma_cm",
1362
.nodename = "infiniband/rdma_cm",
1363
.mode = 0666,
1364
.fops = &ucma_fops,
1365
};
1366
1367
static ssize_t show_abi_version(struct device *dev,
1368
struct device_attribute *attr,
1369
char *buf)
1370
{
1371
return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1372
}
1373
static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1374
1375
static int __init ucma_init(void)
1376
{
1377
int ret;
1378
1379
ret = misc_register(&ucma_misc);
1380
if (ret)
1381
return ret;
1382
1383
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1384
if (ret) {
1385
printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1386
goto err1;
1387
}
1388
1389
ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1390
if (!ucma_ctl_table_hdr) {
1391
printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1392
ret = -ENOMEM;
1393
goto err2;
1394
}
1395
return 0;
1396
err2:
1397
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1398
err1:
1399
misc_deregister(&ucma_misc);
1400
return ret;
1401
}
1402
1403
static void __exit ucma_cleanup(void)
1404
{
1405
unregister_sysctl_table(ucma_ctl_table_hdr);
1406
device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1407
misc_deregister(&ucma_misc);
1408
idr_destroy(&ctx_idr);
1409
}
1410
1411
module_init(ucma_init);
1412
module_exit(ucma_cleanup);
1413
1414