Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/core/uverbs_cmd.c
37212 views
1
/*
2
* Copyright (c) 2005 Topspin Communications. All rights reserved.
3
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
5
* Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6
*
7
* This software is available to you under a choice of one of two
8
* licenses. You may choose to be licensed under the terms of the GNU
9
* General Public License (GPL) Version 2, available from the file
10
* COPYING in the main directory of this source tree, or the
11
* OpenIB.org BSD license below:
12
*
13
* Redistribution and use in source and binary forms, with or
14
* without modification, are permitted provided that the following
15
* conditions are met:
16
*
17
* - Redistributions of source code must retain the above
18
* copyright notice, this list of conditions and the following
19
* disclaimer.
20
*
21
* - Redistributions in binary form must reproduce the above
22
* copyright notice, this list of conditions and the following
23
* disclaimer in the documentation and/or other materials
24
* provided with the distribution.
25
*
26
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33
* SOFTWARE.
34
*/
35
36
#include <linux/file.h>
37
#include <linux/fs.h>
38
#include <linux/slab.h>
39
40
#include <asm/uaccess.h>
41
42
#include "uverbs.h"
43
44
static struct lock_class_key pd_lock_key;
45
static struct lock_class_key mr_lock_key;
46
static struct lock_class_key cq_lock_key;
47
static struct lock_class_key qp_lock_key;
48
static struct lock_class_key ah_lock_key;
49
static struct lock_class_key srq_lock_key;
50
51
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
52
do { \
53
(udata)->inbuf = (void __user *) (ibuf); \
54
(udata)->outbuf = (void __user *) (obuf); \
55
(udata)->inlen = (ilen); \
56
(udata)->outlen = (olen); \
57
} while (0)
58
59
/*
60
* The ib_uobject locking scheme is as follows:
61
*
62
* - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
63
* needs to be held during all idr operations. When an object is
64
* looked up, a reference must be taken on the object's kref before
65
* dropping this lock.
66
*
67
* - Each object also has an rwsem. This rwsem must be held for
68
* reading while an operation that uses the object is performed.
69
* For example, while registering an MR, the associated PD's
70
* uobject.mutex must be held for reading. The rwsem must be held
71
* for writing while initializing or destroying an object.
72
*
73
* - In addition, each object has a "live" flag. If this flag is not
74
* set, then lookups of the object will fail even if it is found in
75
* the idr. This handles a reader that blocks and does not acquire
76
* the rwsem until after the object is destroyed. The destroy
77
* operation will set the live flag to 0 and then drop the rwsem;
78
* this will allow the reader to acquire the rwsem, see that the
79
* live flag is 0, and then drop the rwsem and its reference to
80
* object. The underlying storage will not be freed until the last
81
* reference to the object is dropped.
82
*/
83
84
static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
85
struct ib_ucontext *context, struct lock_class_key *key)
86
{
87
uobj->user_handle = user_handle;
88
uobj->context = context;
89
kref_init(&uobj->ref);
90
init_rwsem(&uobj->mutex);
91
lockdep_set_class(&uobj->mutex, key);
92
uobj->live = 0;
93
}
94
95
static void release_uobj(struct kref *kref)
96
{
97
kfree(container_of(kref, struct ib_uobject, ref));
98
}
99
100
static void put_uobj(struct ib_uobject *uobj)
101
{
102
kref_put(&uobj->ref, release_uobj);
103
}
104
105
static void put_uobj_read(struct ib_uobject *uobj)
106
{
107
up_read(&uobj->mutex);
108
put_uobj(uobj);
109
}
110
111
static void put_uobj_write(struct ib_uobject *uobj)
112
{
113
up_write(&uobj->mutex);
114
put_uobj(uobj);
115
}
116
117
static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
118
{
119
int ret;
120
121
retry:
122
if (!idr_pre_get(idr, GFP_KERNEL))
123
return -ENOMEM;
124
125
spin_lock(&ib_uverbs_idr_lock);
126
ret = idr_get_new(idr, uobj, &uobj->id);
127
spin_unlock(&ib_uverbs_idr_lock);
128
129
if (ret == -EAGAIN)
130
goto retry;
131
132
return ret;
133
}
134
135
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
136
{
137
spin_lock(&ib_uverbs_idr_lock);
138
idr_remove(idr, uobj->id);
139
spin_unlock(&ib_uverbs_idr_lock);
140
}
141
142
static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143
struct ib_ucontext *context)
144
{
145
struct ib_uobject *uobj;
146
147
spin_lock(&ib_uverbs_idr_lock);
148
uobj = idr_find(idr, id);
149
if (uobj) {
150
if (uobj->context == context)
151
kref_get(&uobj->ref);
152
else
153
uobj = NULL;
154
}
155
spin_unlock(&ib_uverbs_idr_lock);
156
157
return uobj;
158
}
159
160
static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161
struct ib_ucontext *context, int nested)
162
{
163
struct ib_uobject *uobj;
164
165
uobj = __idr_get_uobj(idr, id, context);
166
if (!uobj)
167
return NULL;
168
169
if (nested)
170
down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
171
else
172
down_read(&uobj->mutex);
173
if (!uobj->live) {
174
put_uobj_read(uobj);
175
return NULL;
176
}
177
178
return uobj;
179
}
180
181
static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182
struct ib_ucontext *context)
183
{
184
struct ib_uobject *uobj;
185
186
uobj = __idr_get_uobj(idr, id, context);
187
if (!uobj)
188
return NULL;
189
190
down_write(&uobj->mutex);
191
if (!uobj->live) {
192
put_uobj_write(uobj);
193
return NULL;
194
}
195
196
return uobj;
197
}
198
199
static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
200
int nested)
201
{
202
struct ib_uobject *uobj;
203
204
uobj = idr_read_uobj(idr, id, context, nested);
205
return uobj ? uobj->object : NULL;
206
}
207
208
static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
209
{
210
return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
211
}
212
213
static void put_pd_read(struct ib_pd *pd)
214
{
215
put_uobj_read(pd->uobject);
216
}
217
218
static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
219
{
220
return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
221
}
222
223
static void put_cq_read(struct ib_cq *cq)
224
{
225
put_uobj_read(cq->uobject);
226
}
227
228
static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
229
{
230
return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
231
}
232
233
static void put_ah_read(struct ib_ah *ah)
234
{
235
put_uobj_read(ah->uobject);
236
}
237
238
static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
239
{
240
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
241
}
242
243
static void put_qp_read(struct ib_qp *qp)
244
{
245
put_uobj_read(qp->uobject);
246
}
247
248
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
249
{
250
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
251
}
252
253
static void put_srq_read(struct ib_srq *srq)
254
{
255
put_uobj_read(srq->uobject);
256
}
257
258
ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
259
const char __user *buf,
260
int in_len, int out_len)
261
{
262
struct ib_uverbs_get_context cmd;
263
struct ib_uverbs_get_context_resp resp;
264
struct ib_udata udata;
265
struct ib_device *ibdev = file->device->ib_dev;
266
struct ib_ucontext *ucontext;
267
struct file *filp;
268
int ret;
269
270
if (out_len < sizeof resp)
271
return -ENOSPC;
272
273
if (copy_from_user(&cmd, buf, sizeof cmd))
274
return -EFAULT;
275
276
mutex_lock(&file->mutex);
277
278
if (file->ucontext) {
279
ret = -EINVAL;
280
goto err;
281
}
282
283
INIT_UDATA(&udata, buf + sizeof cmd,
284
(unsigned long) cmd.response + sizeof resp,
285
in_len - sizeof cmd, out_len - sizeof resp);
286
287
ucontext = ibdev->alloc_ucontext(ibdev, &udata);
288
if (IS_ERR(ucontext)) {
289
ret = PTR_ERR(ucontext);
290
goto err;
291
}
292
293
ucontext->device = ibdev;
294
INIT_LIST_HEAD(&ucontext->pd_list);
295
INIT_LIST_HEAD(&ucontext->mr_list);
296
INIT_LIST_HEAD(&ucontext->mw_list);
297
INIT_LIST_HEAD(&ucontext->cq_list);
298
INIT_LIST_HEAD(&ucontext->qp_list);
299
INIT_LIST_HEAD(&ucontext->srq_list);
300
INIT_LIST_HEAD(&ucontext->ah_list);
301
ucontext->closing = 0;
302
303
resp.num_comp_vectors = file->device->num_comp_vectors;
304
305
ret = get_unused_fd();
306
if (ret < 0)
307
goto err_free;
308
resp.async_fd = ret;
309
310
filp = ib_uverbs_alloc_event_file(file, 1);
311
if (IS_ERR(filp)) {
312
ret = PTR_ERR(filp);
313
goto err_fd;
314
}
315
316
if (copy_to_user((void __user *) (unsigned long) cmd.response,
317
&resp, sizeof resp)) {
318
ret = -EFAULT;
319
goto err_file;
320
}
321
322
file->async_file = filp->private_data;
323
324
INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
325
ib_uverbs_event_handler);
326
ret = ib_register_event_handler(&file->event_handler);
327
if (ret)
328
goto err_file;
329
330
kref_get(&file->async_file->ref);
331
kref_get(&file->ref);
332
file->ucontext = ucontext;
333
334
fd_install(resp.async_fd, filp);
335
336
mutex_unlock(&file->mutex);
337
338
return in_len;
339
340
err_file:
341
fput(filp);
342
343
err_fd:
344
put_unused_fd(resp.async_fd);
345
346
err_free:
347
ibdev->dealloc_ucontext(ucontext);
348
349
err:
350
mutex_unlock(&file->mutex);
351
return ret;
352
}
353
354
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
355
const char __user *buf,
356
int in_len, int out_len)
357
{
358
struct ib_uverbs_query_device cmd;
359
struct ib_uverbs_query_device_resp resp;
360
struct ib_device_attr attr;
361
int ret;
362
363
if (out_len < sizeof resp)
364
return -ENOSPC;
365
366
if (copy_from_user(&cmd, buf, sizeof cmd))
367
return -EFAULT;
368
369
ret = ib_query_device(file->device->ib_dev, &attr);
370
if (ret)
371
return ret;
372
373
memset(&resp, 0, sizeof resp);
374
375
resp.fw_ver = attr.fw_ver;
376
resp.node_guid = file->device->ib_dev->node_guid;
377
resp.sys_image_guid = attr.sys_image_guid;
378
resp.max_mr_size = attr.max_mr_size;
379
resp.page_size_cap = attr.page_size_cap;
380
resp.vendor_id = attr.vendor_id;
381
resp.vendor_part_id = attr.vendor_part_id;
382
resp.hw_ver = attr.hw_ver;
383
resp.max_qp = attr.max_qp;
384
resp.max_qp_wr = attr.max_qp_wr;
385
resp.device_cap_flags = attr.device_cap_flags;
386
resp.max_sge = attr.max_sge;
387
resp.max_sge_rd = attr.max_sge_rd;
388
resp.max_cq = attr.max_cq;
389
resp.max_cqe = attr.max_cqe;
390
resp.max_mr = attr.max_mr;
391
resp.max_pd = attr.max_pd;
392
resp.max_qp_rd_atom = attr.max_qp_rd_atom;
393
resp.max_ee_rd_atom = attr.max_ee_rd_atom;
394
resp.max_res_rd_atom = attr.max_res_rd_atom;
395
resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
396
resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
397
resp.atomic_cap = attr.atomic_cap;
398
resp.max_ee = attr.max_ee;
399
resp.max_rdd = attr.max_rdd;
400
resp.max_mw = attr.max_mw;
401
resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
402
resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
403
resp.max_mcast_grp = attr.max_mcast_grp;
404
resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
405
resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
406
resp.max_ah = attr.max_ah;
407
resp.max_fmr = attr.max_fmr;
408
resp.max_map_per_fmr = attr.max_map_per_fmr;
409
resp.max_srq = attr.max_srq;
410
resp.max_srq_wr = attr.max_srq_wr;
411
resp.max_srq_sge = attr.max_srq_sge;
412
resp.max_pkeys = attr.max_pkeys;
413
resp.local_ca_ack_delay = attr.local_ca_ack_delay;
414
resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
415
416
if (copy_to_user((void __user *) (unsigned long) cmd.response,
417
&resp, sizeof resp))
418
return -EFAULT;
419
420
return in_len;
421
}
422
423
ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
424
const char __user *buf,
425
int in_len, int out_len)
426
{
427
struct ib_uverbs_query_port cmd;
428
struct ib_uverbs_query_port_resp resp;
429
struct ib_port_attr attr;
430
int ret;
431
432
if (out_len < sizeof resp)
433
return -ENOSPC;
434
435
if (copy_from_user(&cmd, buf, sizeof cmd))
436
return -EFAULT;
437
438
ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
439
if (ret)
440
return ret;
441
442
memset(&resp, 0, sizeof resp);
443
444
resp.state = attr.state;
445
resp.max_mtu = attr.max_mtu;
446
resp.active_mtu = attr.active_mtu;
447
resp.gid_tbl_len = attr.gid_tbl_len;
448
resp.port_cap_flags = attr.port_cap_flags;
449
resp.max_msg_sz = attr.max_msg_sz;
450
resp.bad_pkey_cntr = attr.bad_pkey_cntr;
451
resp.qkey_viol_cntr = attr.qkey_viol_cntr;
452
resp.pkey_tbl_len = attr.pkey_tbl_len;
453
resp.lid = attr.lid;
454
resp.sm_lid = attr.sm_lid;
455
resp.lmc = attr.lmc;
456
resp.max_vl_num = attr.max_vl_num;
457
resp.sm_sl = attr.sm_sl;
458
resp.subnet_timeout = attr.subnet_timeout;
459
resp.init_type_reply = attr.init_type_reply;
460
resp.active_width = attr.active_width;
461
resp.active_speed = attr.active_speed;
462
resp.phys_state = attr.phys_state;
463
resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
464
cmd.port_num);
465
466
if (copy_to_user((void __user *) (unsigned long) cmd.response,
467
&resp, sizeof resp))
468
return -EFAULT;
469
470
return in_len;
471
}
472
473
ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
474
const char __user *buf,
475
int in_len, int out_len)
476
{
477
struct ib_uverbs_alloc_pd cmd;
478
struct ib_uverbs_alloc_pd_resp resp;
479
struct ib_udata udata;
480
struct ib_uobject *uobj;
481
struct ib_pd *pd;
482
int ret;
483
484
if (out_len < sizeof resp)
485
return -ENOSPC;
486
487
if (copy_from_user(&cmd, buf, sizeof cmd))
488
return -EFAULT;
489
490
INIT_UDATA(&udata, buf + sizeof cmd,
491
(unsigned long) cmd.response + sizeof resp,
492
in_len - sizeof cmd, out_len - sizeof resp);
493
494
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
495
if (!uobj)
496
return -ENOMEM;
497
498
init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
499
down_write(&uobj->mutex);
500
501
pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
502
file->ucontext, &udata);
503
if (IS_ERR(pd)) {
504
ret = PTR_ERR(pd);
505
goto err;
506
}
507
508
pd->device = file->device->ib_dev;
509
pd->uobject = uobj;
510
atomic_set(&pd->usecnt, 0);
511
512
uobj->object = pd;
513
ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
514
if (ret)
515
goto err_idr;
516
517
memset(&resp, 0, sizeof resp);
518
resp.pd_handle = uobj->id;
519
520
if (copy_to_user((void __user *) (unsigned long) cmd.response,
521
&resp, sizeof resp)) {
522
ret = -EFAULT;
523
goto err_copy;
524
}
525
526
mutex_lock(&file->mutex);
527
list_add_tail(&uobj->list, &file->ucontext->pd_list);
528
mutex_unlock(&file->mutex);
529
530
uobj->live = 1;
531
532
up_write(&uobj->mutex);
533
534
return in_len;
535
536
err_copy:
537
idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
538
539
err_idr:
540
ib_dealloc_pd(pd);
541
542
err:
543
put_uobj_write(uobj);
544
return ret;
545
}
546
547
ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
548
const char __user *buf,
549
int in_len, int out_len)
550
{
551
struct ib_uverbs_dealloc_pd cmd;
552
struct ib_uobject *uobj;
553
int ret;
554
555
if (copy_from_user(&cmd, buf, sizeof cmd))
556
return -EFAULT;
557
558
uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
559
if (!uobj)
560
return -EINVAL;
561
562
ret = ib_dealloc_pd(uobj->object);
563
if (!ret)
564
uobj->live = 0;
565
566
put_uobj_write(uobj);
567
568
if (ret)
569
return ret;
570
571
idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
572
573
mutex_lock(&file->mutex);
574
list_del(&uobj->list);
575
mutex_unlock(&file->mutex);
576
577
put_uobj(uobj);
578
579
return in_len;
580
}
581
582
ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
583
const char __user *buf, int in_len,
584
int out_len)
585
{
586
struct ib_uverbs_reg_mr cmd;
587
struct ib_uverbs_reg_mr_resp resp;
588
struct ib_udata udata;
589
struct ib_uobject *uobj;
590
struct ib_pd *pd;
591
struct ib_mr *mr;
592
int ret;
593
594
if (out_len < sizeof resp)
595
return -ENOSPC;
596
597
if (copy_from_user(&cmd, buf, sizeof cmd))
598
return -EFAULT;
599
600
INIT_UDATA(&udata, buf + sizeof cmd,
601
(unsigned long) cmd.response + sizeof resp,
602
in_len - sizeof cmd, out_len - sizeof resp);
603
604
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
605
return -EINVAL;
606
607
/*
608
* Local write permission is required if remote write or
609
* remote atomic permission is also requested.
610
*/
611
if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
612
!(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
613
return -EINVAL;
614
615
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
616
if (!uobj)
617
return -ENOMEM;
618
619
init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
620
down_write(&uobj->mutex);
621
622
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
623
if (!pd) {
624
ret = -EINVAL;
625
goto err_free;
626
}
627
628
mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
629
cmd.access_flags, &udata);
630
if (IS_ERR(mr)) {
631
ret = PTR_ERR(mr);
632
goto err_put;
633
}
634
635
mr->device = pd->device;
636
mr->pd = pd;
637
mr->uobject = uobj;
638
atomic_inc(&pd->usecnt);
639
atomic_set(&mr->usecnt, 0);
640
641
uobj->object = mr;
642
ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
643
if (ret)
644
goto err_unreg;
645
646
memset(&resp, 0, sizeof resp);
647
resp.lkey = mr->lkey;
648
resp.rkey = mr->rkey;
649
resp.mr_handle = uobj->id;
650
651
if (copy_to_user((void __user *) (unsigned long) cmd.response,
652
&resp, sizeof resp)) {
653
ret = -EFAULT;
654
goto err_copy;
655
}
656
657
put_pd_read(pd);
658
659
mutex_lock(&file->mutex);
660
list_add_tail(&uobj->list, &file->ucontext->mr_list);
661
mutex_unlock(&file->mutex);
662
663
uobj->live = 1;
664
665
up_write(&uobj->mutex);
666
667
return in_len;
668
669
err_copy:
670
idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
671
672
err_unreg:
673
ib_dereg_mr(mr);
674
675
err_put:
676
put_pd_read(pd);
677
678
err_free:
679
put_uobj_write(uobj);
680
return ret;
681
}
682
683
ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
684
const char __user *buf, int in_len,
685
int out_len)
686
{
687
struct ib_uverbs_dereg_mr cmd;
688
struct ib_mr *mr;
689
struct ib_uobject *uobj;
690
int ret = -EINVAL;
691
692
if (copy_from_user(&cmd, buf, sizeof cmd))
693
return -EFAULT;
694
695
uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
696
if (!uobj)
697
return -EINVAL;
698
699
mr = uobj->object;
700
701
ret = ib_dereg_mr(mr);
702
if (!ret)
703
uobj->live = 0;
704
705
put_uobj_write(uobj);
706
707
if (ret)
708
return ret;
709
710
idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
711
712
mutex_lock(&file->mutex);
713
list_del(&uobj->list);
714
mutex_unlock(&file->mutex);
715
716
put_uobj(uobj);
717
718
return in_len;
719
}
720
721
ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
722
const char __user *buf, int in_len,
723
int out_len)
724
{
725
struct ib_uverbs_create_comp_channel cmd;
726
struct ib_uverbs_create_comp_channel_resp resp;
727
struct file *filp;
728
int ret;
729
730
if (out_len < sizeof resp)
731
return -ENOSPC;
732
733
if (copy_from_user(&cmd, buf, sizeof cmd))
734
return -EFAULT;
735
736
ret = get_unused_fd();
737
if (ret < 0)
738
return ret;
739
resp.fd = ret;
740
741
filp = ib_uverbs_alloc_event_file(file, 0);
742
if (IS_ERR(filp)) {
743
put_unused_fd(resp.fd);
744
return PTR_ERR(filp);
745
}
746
747
if (copy_to_user((void __user *) (unsigned long) cmd.response,
748
&resp, sizeof resp)) {
749
put_unused_fd(resp.fd);
750
fput(filp);
751
return -EFAULT;
752
}
753
754
fd_install(resp.fd, filp);
755
return in_len;
756
}
757
758
ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
759
const char __user *buf, int in_len,
760
int out_len)
761
{
762
struct ib_uverbs_create_cq cmd;
763
struct ib_uverbs_create_cq_resp resp;
764
struct ib_udata udata;
765
struct ib_ucq_object *obj;
766
struct ib_uverbs_event_file *ev_file = NULL;
767
struct ib_cq *cq;
768
int ret;
769
770
if (out_len < sizeof resp)
771
return -ENOSPC;
772
773
if (copy_from_user(&cmd, buf, sizeof cmd))
774
return -EFAULT;
775
776
INIT_UDATA(&udata, buf + sizeof cmd,
777
(unsigned long) cmd.response + sizeof resp,
778
in_len - sizeof cmd, out_len - sizeof resp);
779
780
if (cmd.comp_vector >= file->device->num_comp_vectors)
781
return -EINVAL;
782
783
obj = kmalloc(sizeof *obj, GFP_KERNEL);
784
if (!obj)
785
return -ENOMEM;
786
787
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
788
down_write(&obj->uobject.mutex);
789
790
if (cmd.comp_channel >= 0) {
791
ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
792
if (!ev_file) {
793
ret = -EINVAL;
794
goto err;
795
}
796
}
797
798
obj->uverbs_file = file;
799
obj->comp_events_reported = 0;
800
obj->async_events_reported = 0;
801
INIT_LIST_HEAD(&obj->comp_list);
802
INIT_LIST_HEAD(&obj->async_list);
803
804
cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
805
cmd.comp_vector,
806
file->ucontext, &udata);
807
if (IS_ERR(cq)) {
808
ret = PTR_ERR(cq);
809
goto err_file;
810
}
811
812
cq->device = file->device->ib_dev;
813
cq->uobject = &obj->uobject;
814
cq->comp_handler = ib_uverbs_comp_handler;
815
cq->event_handler = ib_uverbs_cq_event_handler;
816
cq->cq_context = ev_file;
817
atomic_set(&cq->usecnt, 0);
818
819
obj->uobject.object = cq;
820
ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
821
if (ret)
822
goto err_free;
823
824
memset(&resp, 0, sizeof resp);
825
resp.cq_handle = obj->uobject.id;
826
resp.cqe = cq->cqe;
827
828
if (copy_to_user((void __user *) (unsigned long) cmd.response,
829
&resp, sizeof resp)) {
830
ret = -EFAULT;
831
goto err_copy;
832
}
833
834
mutex_lock(&file->mutex);
835
list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
836
mutex_unlock(&file->mutex);
837
838
obj->uobject.live = 1;
839
840
up_write(&obj->uobject.mutex);
841
842
return in_len;
843
844
err_copy:
845
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
846
847
err_free:
848
ib_destroy_cq(cq);
849
850
err_file:
851
if (ev_file)
852
ib_uverbs_release_ucq(file, ev_file, obj);
853
854
err:
855
put_uobj_write(&obj->uobject);
856
return ret;
857
}
858
859
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
860
const char __user *buf, int in_len,
861
int out_len)
862
{
863
struct ib_uverbs_resize_cq cmd;
864
struct ib_uverbs_resize_cq_resp resp;
865
struct ib_udata udata;
866
struct ib_cq *cq;
867
int ret = -EINVAL;
868
869
if (copy_from_user(&cmd, buf, sizeof cmd))
870
return -EFAULT;
871
872
INIT_UDATA(&udata, buf + sizeof cmd,
873
(unsigned long) cmd.response + sizeof resp,
874
in_len - sizeof cmd, out_len - sizeof resp);
875
876
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
877
if (!cq)
878
return -EINVAL;
879
880
ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
881
if (ret)
882
goto out;
883
884
resp.cqe = cq->cqe;
885
886
if (copy_to_user((void __user *) (unsigned long) cmd.response,
887
&resp, sizeof resp.cqe))
888
ret = -EFAULT;
889
890
out:
891
put_cq_read(cq);
892
893
return ret ? ret : in_len;
894
}
895
896
static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
897
{
898
struct ib_uverbs_wc tmp;
899
900
tmp.wr_id = wc->wr_id;
901
tmp.status = wc->status;
902
tmp.opcode = wc->opcode;
903
tmp.vendor_err = wc->vendor_err;
904
tmp.byte_len = wc->byte_len;
905
tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
906
tmp.qp_num = wc->qp->qp_num;
907
tmp.src_qp = wc->src_qp;
908
tmp.wc_flags = wc->wc_flags;
909
tmp.pkey_index = wc->pkey_index;
910
tmp.slid = wc->slid;
911
tmp.sl = wc->sl;
912
tmp.dlid_path_bits = wc->dlid_path_bits;
913
tmp.port_num = wc->port_num;
914
tmp.reserved = 0;
915
916
if (copy_to_user(dest, &tmp, sizeof tmp))
917
return -EFAULT;
918
919
return 0;
920
}
921
922
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
923
const char __user *buf, int in_len,
924
int out_len)
925
{
926
struct ib_uverbs_poll_cq cmd;
927
struct ib_uverbs_poll_cq_resp resp;
928
u8 __user *header_ptr;
929
u8 __user *data_ptr;
930
struct ib_cq *cq;
931
struct ib_wc wc;
932
int ret;
933
934
if (copy_from_user(&cmd, buf, sizeof cmd))
935
return -EFAULT;
936
937
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
938
if (!cq)
939
return -EINVAL;
940
941
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
942
header_ptr = (void __user *)(unsigned long) cmd.response;
943
data_ptr = header_ptr + sizeof resp;
944
945
memset(&resp, 0, sizeof resp);
946
while (resp.count < cmd.ne) {
947
ret = ib_poll_cq(cq, 1, &wc);
948
if (ret < 0)
949
goto out_put;
950
if (!ret)
951
break;
952
953
ret = copy_wc_to_user(data_ptr, &wc);
954
if (ret)
955
goto out_put;
956
957
data_ptr += sizeof(struct ib_uverbs_wc);
958
++resp.count;
959
}
960
961
if (copy_to_user(header_ptr, &resp, sizeof resp)) {
962
ret = -EFAULT;
963
goto out_put;
964
}
965
966
ret = in_len;
967
968
out_put:
969
put_cq_read(cq);
970
return ret;
971
}
972
973
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
974
const char __user *buf, int in_len,
975
int out_len)
976
{
977
struct ib_uverbs_req_notify_cq cmd;
978
struct ib_cq *cq;
979
980
if (copy_from_user(&cmd, buf, sizeof cmd))
981
return -EFAULT;
982
983
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
984
if (!cq)
985
return -EINVAL;
986
987
ib_req_notify_cq(cq, cmd.solicited_only ?
988
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
989
990
put_cq_read(cq);
991
992
return in_len;
993
}
994
995
ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
996
const char __user *buf, int in_len,
997
int out_len)
998
{
999
struct ib_uverbs_destroy_cq cmd;
1000
struct ib_uverbs_destroy_cq_resp resp;
1001
struct ib_uobject *uobj;
1002
struct ib_cq *cq;
1003
struct ib_ucq_object *obj;
1004
struct ib_uverbs_event_file *ev_file;
1005
int ret = -EINVAL;
1006
1007
if (copy_from_user(&cmd, buf, sizeof cmd))
1008
return -EFAULT;
1009
1010
uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1011
if (!uobj)
1012
return -EINVAL;
1013
cq = uobj->object;
1014
ev_file = cq->cq_context;
1015
obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1016
1017
ret = ib_destroy_cq(cq);
1018
if (!ret)
1019
uobj->live = 0;
1020
1021
put_uobj_write(uobj);
1022
1023
if (ret)
1024
return ret;
1025
1026
idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1027
1028
mutex_lock(&file->mutex);
1029
list_del(&uobj->list);
1030
mutex_unlock(&file->mutex);
1031
1032
ib_uverbs_release_ucq(file, ev_file, obj);
1033
1034
memset(&resp, 0, sizeof resp);
1035
resp.comp_events_reported = obj->comp_events_reported;
1036
resp.async_events_reported = obj->async_events_reported;
1037
1038
put_uobj(uobj);
1039
1040
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1041
&resp, sizeof resp))
1042
return -EFAULT;
1043
1044
return in_len;
1045
}
1046
1047
ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1048
const char __user *buf, int in_len,
1049
int out_len)
1050
{
1051
struct ib_uverbs_create_qp cmd;
1052
struct ib_uverbs_create_qp_resp resp;
1053
struct ib_udata udata;
1054
struct ib_uqp_object *obj;
1055
struct ib_pd *pd;
1056
struct ib_cq *scq, *rcq;
1057
struct ib_srq *srq;
1058
struct ib_qp *qp;
1059
struct ib_qp_init_attr attr;
1060
int ret;
1061
1062
if (out_len < sizeof resp)
1063
return -ENOSPC;
1064
1065
if (copy_from_user(&cmd, buf, sizeof cmd))
1066
return -EFAULT;
1067
1068
INIT_UDATA(&udata, buf + sizeof cmd,
1069
(unsigned long) cmd.response + sizeof resp,
1070
in_len - sizeof cmd, out_len - sizeof resp);
1071
1072
obj = kmalloc(sizeof *obj, GFP_KERNEL);
1073
if (!obj)
1074
return -ENOMEM;
1075
1076
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1077
down_write(&obj->uevent.uobject.mutex);
1078
1079
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1080
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1081
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1082
rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1083
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1084
1085
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1086
ret = -EINVAL;
1087
goto err_put;
1088
}
1089
1090
attr.event_handler = ib_uverbs_qp_event_handler;
1091
attr.qp_context = file;
1092
attr.send_cq = scq;
1093
attr.recv_cq = rcq;
1094
attr.srq = srq;
1095
attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1096
attr.qp_type = cmd.qp_type;
1097
attr.create_flags = 0;
1098
1099
attr.cap.max_send_wr = cmd.max_send_wr;
1100
attr.cap.max_recv_wr = cmd.max_recv_wr;
1101
attr.cap.max_send_sge = cmd.max_send_sge;
1102
attr.cap.max_recv_sge = cmd.max_recv_sge;
1103
attr.cap.max_inline_data = cmd.max_inline_data;
1104
1105
obj->uevent.events_reported = 0;
1106
INIT_LIST_HEAD(&obj->uevent.event_list);
1107
INIT_LIST_HEAD(&obj->mcast_list);
1108
1109
qp = pd->device->create_qp(pd, &attr, &udata);
1110
if (IS_ERR(qp)) {
1111
ret = PTR_ERR(qp);
1112
goto err_put;
1113
}
1114
1115
qp->device = pd->device;
1116
qp->pd = pd;
1117
qp->send_cq = attr.send_cq;
1118
qp->recv_cq = attr.recv_cq;
1119
qp->srq = attr.srq;
1120
qp->uobject = &obj->uevent.uobject;
1121
qp->event_handler = attr.event_handler;
1122
qp->qp_context = attr.qp_context;
1123
qp->qp_type = attr.qp_type;
1124
atomic_inc(&pd->usecnt);
1125
atomic_inc(&attr.send_cq->usecnt);
1126
atomic_inc(&attr.recv_cq->usecnt);
1127
if (attr.srq)
1128
atomic_inc(&attr.srq->usecnt);
1129
1130
obj->uevent.uobject.object = qp;
1131
ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1132
if (ret)
1133
goto err_destroy;
1134
1135
memset(&resp, 0, sizeof resp);
1136
resp.qpn = qp->qp_num;
1137
resp.qp_handle = obj->uevent.uobject.id;
1138
resp.max_recv_sge = attr.cap.max_recv_sge;
1139
resp.max_send_sge = attr.cap.max_send_sge;
1140
resp.max_recv_wr = attr.cap.max_recv_wr;
1141
resp.max_send_wr = attr.cap.max_send_wr;
1142
resp.max_inline_data = attr.cap.max_inline_data;
1143
1144
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1145
&resp, sizeof resp)) {
1146
ret = -EFAULT;
1147
goto err_copy;
1148
}
1149
1150
put_pd_read(pd);
1151
put_cq_read(scq);
1152
if (rcq != scq)
1153
put_cq_read(rcq);
1154
if (srq)
1155
put_srq_read(srq);
1156
1157
mutex_lock(&file->mutex);
1158
list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1159
mutex_unlock(&file->mutex);
1160
1161
obj->uevent.uobject.live = 1;
1162
1163
up_write(&obj->uevent.uobject.mutex);
1164
1165
return in_len;
1166
1167
err_copy:
1168
idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1169
1170
err_destroy:
1171
ib_destroy_qp(qp);
1172
1173
err_put:
1174
if (pd)
1175
put_pd_read(pd);
1176
if (scq)
1177
put_cq_read(scq);
1178
if (rcq && rcq != scq)
1179
put_cq_read(rcq);
1180
if (srq)
1181
put_srq_read(srq);
1182
1183
put_uobj_write(&obj->uevent.uobject);
1184
return ret;
1185
}
1186
1187
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1188
const char __user *buf, int in_len,
1189
int out_len)
1190
{
1191
struct ib_uverbs_query_qp cmd;
1192
struct ib_uverbs_query_qp_resp resp;
1193
struct ib_qp *qp;
1194
struct ib_qp_attr *attr;
1195
struct ib_qp_init_attr *init_attr;
1196
int ret;
1197
1198
if (copy_from_user(&cmd, buf, sizeof cmd))
1199
return -EFAULT;
1200
1201
attr = kmalloc(sizeof *attr, GFP_KERNEL);
1202
init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1203
if (!attr || !init_attr) {
1204
ret = -ENOMEM;
1205
goto out;
1206
}
1207
1208
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1209
if (!qp) {
1210
ret = -EINVAL;
1211
goto out;
1212
}
1213
1214
ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1215
1216
put_qp_read(qp);
1217
1218
if (ret)
1219
goto out;
1220
1221
memset(&resp, 0, sizeof resp);
1222
1223
resp.qp_state = attr->qp_state;
1224
resp.cur_qp_state = attr->cur_qp_state;
1225
resp.path_mtu = attr->path_mtu;
1226
resp.path_mig_state = attr->path_mig_state;
1227
resp.qkey = attr->qkey;
1228
resp.rq_psn = attr->rq_psn;
1229
resp.sq_psn = attr->sq_psn;
1230
resp.dest_qp_num = attr->dest_qp_num;
1231
resp.qp_access_flags = attr->qp_access_flags;
1232
resp.pkey_index = attr->pkey_index;
1233
resp.alt_pkey_index = attr->alt_pkey_index;
1234
resp.sq_draining = attr->sq_draining;
1235
resp.max_rd_atomic = attr->max_rd_atomic;
1236
resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1237
resp.min_rnr_timer = attr->min_rnr_timer;
1238
resp.port_num = attr->port_num;
1239
resp.timeout = attr->timeout;
1240
resp.retry_cnt = attr->retry_cnt;
1241
resp.rnr_retry = attr->rnr_retry;
1242
resp.alt_port_num = attr->alt_port_num;
1243
resp.alt_timeout = attr->alt_timeout;
1244
1245
memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1246
resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1247
resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1248
resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1249
resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1250
resp.dest.dlid = attr->ah_attr.dlid;
1251
resp.dest.sl = attr->ah_attr.sl;
1252
resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1253
resp.dest.static_rate = attr->ah_attr.static_rate;
1254
resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1255
resp.dest.port_num = attr->ah_attr.port_num;
1256
1257
memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1258
resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1259
resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1260
resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1261
resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1262
resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1263
resp.alt_dest.sl = attr->alt_ah_attr.sl;
1264
resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1265
resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1266
resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1267
resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1268
1269
resp.max_send_wr = init_attr->cap.max_send_wr;
1270
resp.max_recv_wr = init_attr->cap.max_recv_wr;
1271
resp.max_send_sge = init_attr->cap.max_send_sge;
1272
resp.max_recv_sge = init_attr->cap.max_recv_sge;
1273
resp.max_inline_data = init_attr->cap.max_inline_data;
1274
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1275
1276
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1277
&resp, sizeof resp))
1278
ret = -EFAULT;
1279
1280
out:
1281
kfree(attr);
1282
kfree(init_attr);
1283
1284
return ret ? ret : in_len;
1285
}
1286
1287
ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1288
const char __user *buf, int in_len,
1289
int out_len)
1290
{
1291
struct ib_uverbs_modify_qp cmd;
1292
struct ib_udata udata;
1293
struct ib_qp *qp;
1294
struct ib_qp_attr *attr;
1295
int ret;
1296
1297
if (copy_from_user(&cmd, buf, sizeof cmd))
1298
return -EFAULT;
1299
1300
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1301
out_len);
1302
1303
attr = kmalloc(sizeof *attr, GFP_KERNEL);
1304
if (!attr)
1305
return -ENOMEM;
1306
1307
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1308
if (!qp) {
1309
ret = -EINVAL;
1310
goto out;
1311
}
1312
1313
attr->qp_state = cmd.qp_state;
1314
attr->cur_qp_state = cmd.cur_qp_state;
1315
attr->path_mtu = cmd.path_mtu;
1316
attr->path_mig_state = cmd.path_mig_state;
1317
attr->qkey = cmd.qkey;
1318
attr->rq_psn = cmd.rq_psn;
1319
attr->sq_psn = cmd.sq_psn;
1320
attr->dest_qp_num = cmd.dest_qp_num;
1321
attr->qp_access_flags = cmd.qp_access_flags;
1322
attr->pkey_index = cmd.pkey_index;
1323
attr->alt_pkey_index = cmd.alt_pkey_index;
1324
attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1325
attr->max_rd_atomic = cmd.max_rd_atomic;
1326
attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
1327
attr->min_rnr_timer = cmd.min_rnr_timer;
1328
attr->port_num = cmd.port_num;
1329
attr->timeout = cmd.timeout;
1330
attr->retry_cnt = cmd.retry_cnt;
1331
attr->rnr_retry = cmd.rnr_retry;
1332
attr->alt_port_num = cmd.alt_port_num;
1333
attr->alt_timeout = cmd.alt_timeout;
1334
1335
memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1336
attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1337
attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1338
attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1339
attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1340
attr->ah_attr.dlid = cmd.dest.dlid;
1341
attr->ah_attr.sl = cmd.dest.sl;
1342
attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1343
attr->ah_attr.static_rate = cmd.dest.static_rate;
1344
attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1345
attr->ah_attr.port_num = cmd.dest.port_num;
1346
1347
memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1348
attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1349
attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1350
attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1351
attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1352
attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1353
attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1354
attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1355
attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1356
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1357
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1358
1359
ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
1360
1361
put_qp_read(qp);
1362
1363
if (ret)
1364
goto out;
1365
1366
ret = in_len;
1367
1368
out:
1369
kfree(attr);
1370
1371
return ret;
1372
}
1373
1374
ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1375
const char __user *buf, int in_len,
1376
int out_len)
1377
{
1378
struct ib_uverbs_destroy_qp cmd;
1379
struct ib_uverbs_destroy_qp_resp resp;
1380
struct ib_uobject *uobj;
1381
struct ib_qp *qp;
1382
struct ib_uqp_object *obj;
1383
int ret = -EINVAL;
1384
1385
if (copy_from_user(&cmd, buf, sizeof cmd))
1386
return -EFAULT;
1387
1388
memset(&resp, 0, sizeof resp);
1389
1390
uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1391
if (!uobj)
1392
return -EINVAL;
1393
qp = uobj->object;
1394
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1395
1396
if (!list_empty(&obj->mcast_list)) {
1397
put_uobj_write(uobj);
1398
return -EBUSY;
1399
}
1400
1401
ret = ib_destroy_qp(qp);
1402
if (!ret)
1403
uobj->live = 0;
1404
1405
put_uobj_write(uobj);
1406
1407
if (ret)
1408
return ret;
1409
1410
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1411
1412
mutex_lock(&file->mutex);
1413
list_del(&uobj->list);
1414
mutex_unlock(&file->mutex);
1415
1416
ib_uverbs_release_uevent(file, &obj->uevent);
1417
1418
resp.events_reported = obj->uevent.events_reported;
1419
1420
put_uobj(uobj);
1421
1422
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1423
&resp, sizeof resp))
1424
return -EFAULT;
1425
1426
return in_len;
1427
}
1428
1429
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1430
const char __user *buf, int in_len,
1431
int out_len)
1432
{
1433
struct ib_uverbs_post_send cmd;
1434
struct ib_uverbs_post_send_resp resp;
1435
struct ib_uverbs_send_wr *user_wr;
1436
struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
1437
struct ib_qp *qp;
1438
int i, sg_ind;
1439
int is_ud;
1440
ssize_t ret = -EINVAL;
1441
1442
if (copy_from_user(&cmd, buf, sizeof cmd))
1443
return -EFAULT;
1444
1445
if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1446
cmd.sge_count * sizeof (struct ib_uverbs_sge))
1447
return -EINVAL;
1448
1449
if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1450
return -EINVAL;
1451
1452
user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1453
if (!user_wr)
1454
return -ENOMEM;
1455
1456
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1457
if (!qp)
1458
goto out;
1459
1460
is_ud = qp->qp_type == IB_QPT_UD;
1461
sg_ind = 0;
1462
last = NULL;
1463
for (i = 0; i < cmd.wr_count; ++i) {
1464
if (copy_from_user(user_wr,
1465
buf + sizeof cmd + i * cmd.wqe_size,
1466
cmd.wqe_size)) {
1467
ret = -EFAULT;
1468
goto out_put;
1469
}
1470
1471
if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1472
ret = -EINVAL;
1473
goto out_put;
1474
}
1475
1476
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1477
user_wr->num_sge * sizeof (struct ib_sge),
1478
GFP_KERNEL);
1479
if (!next) {
1480
ret = -ENOMEM;
1481
goto out_put;
1482
}
1483
1484
if (!last)
1485
wr = next;
1486
else
1487
last->next = next;
1488
last = next;
1489
1490
next->next = NULL;
1491
next->wr_id = user_wr->wr_id;
1492
next->num_sge = user_wr->num_sge;
1493
next->opcode = user_wr->opcode;
1494
next->send_flags = user_wr->send_flags;
1495
1496
if (is_ud) {
1497
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1498
file->ucontext);
1499
if (!next->wr.ud.ah) {
1500
ret = -EINVAL;
1501
goto out_put;
1502
}
1503
next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
1504
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1505
} else {
1506
switch (next->opcode) {
1507
case IB_WR_RDMA_WRITE_WITH_IMM:
1508
next->ex.imm_data =
1509
(__be32 __force) user_wr->ex.imm_data;
1510
case IB_WR_RDMA_WRITE:
1511
case IB_WR_RDMA_READ:
1512
next->wr.rdma.remote_addr =
1513
user_wr->wr.rdma.remote_addr;
1514
next->wr.rdma.rkey =
1515
user_wr->wr.rdma.rkey;
1516
break;
1517
case IB_WR_SEND_WITH_IMM:
1518
next->ex.imm_data =
1519
(__be32 __force) user_wr->ex.imm_data;
1520
break;
1521
case IB_WR_SEND_WITH_INV:
1522
next->ex.invalidate_rkey =
1523
user_wr->ex.invalidate_rkey;
1524
break;
1525
case IB_WR_ATOMIC_CMP_AND_SWP:
1526
case IB_WR_ATOMIC_FETCH_AND_ADD:
1527
next->wr.atomic.remote_addr =
1528
user_wr->wr.atomic.remote_addr;
1529
next->wr.atomic.compare_add =
1530
user_wr->wr.atomic.compare_add;
1531
next->wr.atomic.swap = user_wr->wr.atomic.swap;
1532
next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1533
break;
1534
default:
1535
break;
1536
}
1537
}
1538
1539
if (next->num_sge) {
1540
next->sg_list = (void *) next +
1541
ALIGN(sizeof *next, sizeof (struct ib_sge));
1542
if (copy_from_user(next->sg_list,
1543
buf + sizeof cmd +
1544
cmd.wr_count * cmd.wqe_size +
1545
sg_ind * sizeof (struct ib_sge),
1546
next->num_sge * sizeof (struct ib_sge))) {
1547
ret = -EFAULT;
1548
goto out_put;
1549
}
1550
sg_ind += next->num_sge;
1551
} else
1552
next->sg_list = NULL;
1553
}
1554
1555
resp.bad_wr = 0;
1556
ret = qp->device->post_send(qp, wr, &bad_wr);
1557
if (ret)
1558
for (next = wr; next; next = next->next) {
1559
++resp.bad_wr;
1560
if (next == bad_wr)
1561
break;
1562
}
1563
1564
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1565
&resp, sizeof resp))
1566
ret = -EFAULT;
1567
1568
out_put:
1569
put_qp_read(qp);
1570
1571
while (wr) {
1572
if (is_ud && wr->wr.ud.ah)
1573
put_ah_read(wr->wr.ud.ah);
1574
next = wr->next;
1575
kfree(wr);
1576
wr = next;
1577
}
1578
1579
out:
1580
kfree(user_wr);
1581
1582
return ret ? ret : in_len;
1583
}
1584
1585
static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1586
int in_len,
1587
u32 wr_count,
1588
u32 sge_count,
1589
u32 wqe_size)
1590
{
1591
struct ib_uverbs_recv_wr *user_wr;
1592
struct ib_recv_wr *wr = NULL, *last, *next;
1593
int sg_ind;
1594
int i;
1595
int ret;
1596
1597
if (in_len < wqe_size * wr_count +
1598
sge_count * sizeof (struct ib_uverbs_sge))
1599
return ERR_PTR(-EINVAL);
1600
1601
if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1602
return ERR_PTR(-EINVAL);
1603
1604
user_wr = kmalloc(wqe_size, GFP_KERNEL);
1605
if (!user_wr)
1606
return ERR_PTR(-ENOMEM);
1607
1608
sg_ind = 0;
1609
last = NULL;
1610
for (i = 0; i < wr_count; ++i) {
1611
if (copy_from_user(user_wr, buf + i * wqe_size,
1612
wqe_size)) {
1613
ret = -EFAULT;
1614
goto err;
1615
}
1616
1617
if (user_wr->num_sge + sg_ind > sge_count) {
1618
ret = -EINVAL;
1619
goto err;
1620
}
1621
1622
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1623
user_wr->num_sge * sizeof (struct ib_sge),
1624
GFP_KERNEL);
1625
if (!next) {
1626
ret = -ENOMEM;
1627
goto err;
1628
}
1629
1630
if (!last)
1631
wr = next;
1632
else
1633
last->next = next;
1634
last = next;
1635
1636
next->next = NULL;
1637
next->wr_id = user_wr->wr_id;
1638
next->num_sge = user_wr->num_sge;
1639
1640
if (next->num_sge) {
1641
next->sg_list = (void *) next +
1642
ALIGN(sizeof *next, sizeof (struct ib_sge));
1643
if (copy_from_user(next->sg_list,
1644
buf + wr_count * wqe_size +
1645
sg_ind * sizeof (struct ib_sge),
1646
next->num_sge * sizeof (struct ib_sge))) {
1647
ret = -EFAULT;
1648
goto err;
1649
}
1650
sg_ind += next->num_sge;
1651
} else
1652
next->sg_list = NULL;
1653
}
1654
1655
kfree(user_wr);
1656
return wr;
1657
1658
err:
1659
kfree(user_wr);
1660
1661
while (wr) {
1662
next = wr->next;
1663
kfree(wr);
1664
wr = next;
1665
}
1666
1667
return ERR_PTR(ret);
1668
}
1669
1670
ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1671
const char __user *buf, int in_len,
1672
int out_len)
1673
{
1674
struct ib_uverbs_post_recv cmd;
1675
struct ib_uverbs_post_recv_resp resp;
1676
struct ib_recv_wr *wr, *next, *bad_wr;
1677
struct ib_qp *qp;
1678
ssize_t ret = -EINVAL;
1679
1680
if (copy_from_user(&cmd, buf, sizeof cmd))
1681
return -EFAULT;
1682
1683
wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1684
in_len - sizeof cmd, cmd.wr_count,
1685
cmd.sge_count, cmd.wqe_size);
1686
if (IS_ERR(wr))
1687
return PTR_ERR(wr);
1688
1689
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1690
if (!qp)
1691
goto out;
1692
1693
resp.bad_wr = 0;
1694
ret = qp->device->post_recv(qp, wr, &bad_wr);
1695
1696
put_qp_read(qp);
1697
1698
if (ret)
1699
for (next = wr; next; next = next->next) {
1700
++resp.bad_wr;
1701
if (next == bad_wr)
1702
break;
1703
}
1704
1705
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1706
&resp, sizeof resp))
1707
ret = -EFAULT;
1708
1709
out:
1710
while (wr) {
1711
next = wr->next;
1712
kfree(wr);
1713
wr = next;
1714
}
1715
1716
return ret ? ret : in_len;
1717
}
1718
1719
ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1720
const char __user *buf, int in_len,
1721
int out_len)
1722
{
1723
struct ib_uverbs_post_srq_recv cmd;
1724
struct ib_uverbs_post_srq_recv_resp resp;
1725
struct ib_recv_wr *wr, *next, *bad_wr;
1726
struct ib_srq *srq;
1727
ssize_t ret = -EINVAL;
1728
1729
if (copy_from_user(&cmd, buf, sizeof cmd))
1730
return -EFAULT;
1731
1732
wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1733
in_len - sizeof cmd, cmd.wr_count,
1734
cmd.sge_count, cmd.wqe_size);
1735
if (IS_ERR(wr))
1736
return PTR_ERR(wr);
1737
1738
srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1739
if (!srq)
1740
goto out;
1741
1742
resp.bad_wr = 0;
1743
ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1744
1745
put_srq_read(srq);
1746
1747
if (ret)
1748
for (next = wr; next; next = next->next) {
1749
++resp.bad_wr;
1750
if (next == bad_wr)
1751
break;
1752
}
1753
1754
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1755
&resp, sizeof resp))
1756
ret = -EFAULT;
1757
1758
out:
1759
while (wr) {
1760
next = wr->next;
1761
kfree(wr);
1762
wr = next;
1763
}
1764
1765
return ret ? ret : in_len;
1766
}
1767
1768
ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1769
const char __user *buf, int in_len,
1770
int out_len)
1771
{
1772
struct ib_uverbs_create_ah cmd;
1773
struct ib_uverbs_create_ah_resp resp;
1774
struct ib_uobject *uobj;
1775
struct ib_pd *pd;
1776
struct ib_ah *ah;
1777
struct ib_ah_attr attr;
1778
int ret;
1779
1780
if (out_len < sizeof resp)
1781
return -ENOSPC;
1782
1783
if (copy_from_user(&cmd, buf, sizeof cmd))
1784
return -EFAULT;
1785
1786
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1787
if (!uobj)
1788
return -ENOMEM;
1789
1790
init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1791
down_write(&uobj->mutex);
1792
1793
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1794
if (!pd) {
1795
ret = -EINVAL;
1796
goto err;
1797
}
1798
1799
attr.dlid = cmd.attr.dlid;
1800
attr.sl = cmd.attr.sl;
1801
attr.src_path_bits = cmd.attr.src_path_bits;
1802
attr.static_rate = cmd.attr.static_rate;
1803
attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
1804
attr.port_num = cmd.attr.port_num;
1805
attr.grh.flow_label = cmd.attr.grh.flow_label;
1806
attr.grh.sgid_index = cmd.attr.grh.sgid_index;
1807
attr.grh.hop_limit = cmd.attr.grh.hop_limit;
1808
attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1809
memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1810
1811
ah = ib_create_ah(pd, &attr);
1812
if (IS_ERR(ah)) {
1813
ret = PTR_ERR(ah);
1814
goto err_put;
1815
}
1816
1817
ah->uobject = uobj;
1818
uobj->object = ah;
1819
1820
ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
1821
if (ret)
1822
goto err_destroy;
1823
1824
resp.ah_handle = uobj->id;
1825
1826
if (copy_to_user((void __user *) (unsigned long) cmd.response,
1827
&resp, sizeof resp)) {
1828
ret = -EFAULT;
1829
goto err_copy;
1830
}
1831
1832
put_pd_read(pd);
1833
1834
mutex_lock(&file->mutex);
1835
list_add_tail(&uobj->list, &file->ucontext->ah_list);
1836
mutex_unlock(&file->mutex);
1837
1838
uobj->live = 1;
1839
1840
up_write(&uobj->mutex);
1841
1842
return in_len;
1843
1844
err_copy:
1845
idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1846
1847
err_destroy:
1848
ib_destroy_ah(ah);
1849
1850
err_put:
1851
put_pd_read(pd);
1852
1853
err:
1854
put_uobj_write(uobj);
1855
return ret;
1856
}
1857
1858
ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1859
const char __user *buf, int in_len, int out_len)
1860
{
1861
struct ib_uverbs_destroy_ah cmd;
1862
struct ib_ah *ah;
1863
struct ib_uobject *uobj;
1864
int ret;
1865
1866
if (copy_from_user(&cmd, buf, sizeof cmd))
1867
return -EFAULT;
1868
1869
uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
1870
if (!uobj)
1871
return -EINVAL;
1872
ah = uobj->object;
1873
1874
ret = ib_destroy_ah(ah);
1875
if (!ret)
1876
uobj->live = 0;
1877
1878
put_uobj_write(uobj);
1879
1880
if (ret)
1881
return ret;
1882
1883
idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1884
1885
mutex_lock(&file->mutex);
1886
list_del(&uobj->list);
1887
mutex_unlock(&file->mutex);
1888
1889
put_uobj(uobj);
1890
1891
return in_len;
1892
}
1893
1894
ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1895
const char __user *buf, int in_len,
1896
int out_len)
1897
{
1898
struct ib_uverbs_attach_mcast cmd;
1899
struct ib_qp *qp;
1900
struct ib_uqp_object *obj;
1901
struct ib_uverbs_mcast_entry *mcast;
1902
int ret;
1903
1904
if (copy_from_user(&cmd, buf, sizeof cmd))
1905
return -EFAULT;
1906
1907
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1908
if (!qp)
1909
return -EINVAL;
1910
1911
obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1912
1913
list_for_each_entry(mcast, &obj->mcast_list, list)
1914
if (cmd.mlid == mcast->lid &&
1915
!memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1916
ret = 0;
1917
goto out_put;
1918
}
1919
1920
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1921
if (!mcast) {
1922
ret = -ENOMEM;
1923
goto out_put;
1924
}
1925
1926
mcast->lid = cmd.mlid;
1927
memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1928
1929
ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1930
if (!ret)
1931
list_add_tail(&mcast->list, &obj->mcast_list);
1932
else
1933
kfree(mcast);
1934
1935
out_put:
1936
put_qp_read(qp);
1937
1938
return ret ? ret : in_len;
1939
}
1940
1941
ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1942
const char __user *buf, int in_len,
1943
int out_len)
1944
{
1945
struct ib_uverbs_detach_mcast cmd;
1946
struct ib_uqp_object *obj;
1947
struct ib_qp *qp;
1948
struct ib_uverbs_mcast_entry *mcast;
1949
int ret = -EINVAL;
1950
1951
if (copy_from_user(&cmd, buf, sizeof cmd))
1952
return -EFAULT;
1953
1954
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1955
if (!qp)
1956
return -EINVAL;
1957
1958
ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1959
if (ret)
1960
goto out_put;
1961
1962
obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1963
1964
list_for_each_entry(mcast, &obj->mcast_list, list)
1965
if (cmd.mlid == mcast->lid &&
1966
!memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1967
list_del(&mcast->list);
1968
kfree(mcast);
1969
break;
1970
}
1971
1972
out_put:
1973
put_qp_read(qp);
1974
1975
return ret ? ret : in_len;
1976
}
1977
1978
ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1979
const char __user *buf, int in_len,
1980
int out_len)
1981
{
1982
struct ib_uverbs_create_srq cmd;
1983
struct ib_uverbs_create_srq_resp resp;
1984
struct ib_udata udata;
1985
struct ib_uevent_object *obj;
1986
struct ib_pd *pd;
1987
struct ib_srq *srq;
1988
struct ib_srq_init_attr attr;
1989
int ret;
1990
1991
if (out_len < sizeof resp)
1992
return -ENOSPC;
1993
1994
if (copy_from_user(&cmd, buf, sizeof cmd))
1995
return -EFAULT;
1996
1997
INIT_UDATA(&udata, buf + sizeof cmd,
1998
(unsigned long) cmd.response + sizeof resp,
1999
in_len - sizeof cmd, out_len - sizeof resp);
2000
2001
obj = kmalloc(sizeof *obj, GFP_KERNEL);
2002
if (!obj)
2003
return -ENOMEM;
2004
2005
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
2006
down_write(&obj->uobject.mutex);
2007
2008
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2009
if (!pd) {
2010
ret = -EINVAL;
2011
goto err;
2012
}
2013
2014
attr.event_handler = ib_uverbs_srq_event_handler;
2015
attr.srq_context = file;
2016
attr.attr.max_wr = cmd.max_wr;
2017
attr.attr.max_sge = cmd.max_sge;
2018
attr.attr.srq_limit = cmd.srq_limit;
2019
2020
obj->events_reported = 0;
2021
INIT_LIST_HEAD(&obj->event_list);
2022
2023
srq = pd->device->create_srq(pd, &attr, &udata);
2024
if (IS_ERR(srq)) {
2025
ret = PTR_ERR(srq);
2026
goto err_put;
2027
}
2028
2029
srq->device = pd->device;
2030
srq->pd = pd;
2031
srq->uobject = &obj->uobject;
2032
srq->event_handler = attr.event_handler;
2033
srq->srq_context = attr.srq_context;
2034
atomic_inc(&pd->usecnt);
2035
atomic_set(&srq->usecnt, 0);
2036
2037
obj->uobject.object = srq;
2038
ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2039
if (ret)
2040
goto err_destroy;
2041
2042
memset(&resp, 0, sizeof resp);
2043
resp.srq_handle = obj->uobject.id;
2044
resp.max_wr = attr.attr.max_wr;
2045
resp.max_sge = attr.attr.max_sge;
2046
2047
if (copy_to_user((void __user *) (unsigned long) cmd.response,
2048
&resp, sizeof resp)) {
2049
ret = -EFAULT;
2050
goto err_copy;
2051
}
2052
2053
put_pd_read(pd);
2054
2055
mutex_lock(&file->mutex);
2056
list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2057
mutex_unlock(&file->mutex);
2058
2059
obj->uobject.live = 1;
2060
2061
up_write(&obj->uobject.mutex);
2062
2063
return in_len;
2064
2065
err_copy:
2066
idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2067
2068
err_destroy:
2069
ib_destroy_srq(srq);
2070
2071
err_put:
2072
put_pd_read(pd);
2073
2074
err:
2075
put_uobj_write(&obj->uobject);
2076
return ret;
2077
}
2078
2079
ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2080
const char __user *buf, int in_len,
2081
int out_len)
2082
{
2083
struct ib_uverbs_modify_srq cmd;
2084
struct ib_udata udata;
2085
struct ib_srq *srq;
2086
struct ib_srq_attr attr;
2087
int ret;
2088
2089
if (copy_from_user(&cmd, buf, sizeof cmd))
2090
return -EFAULT;
2091
2092
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2093
out_len);
2094
2095
srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2096
if (!srq)
2097
return -EINVAL;
2098
2099
attr.max_wr = cmd.max_wr;
2100
attr.srq_limit = cmd.srq_limit;
2101
2102
ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2103
2104
put_srq_read(srq);
2105
2106
return ret ? ret : in_len;
2107
}
2108
2109
ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
2110
const char __user *buf,
2111
int in_len, int out_len)
2112
{
2113
struct ib_uverbs_query_srq cmd;
2114
struct ib_uverbs_query_srq_resp resp;
2115
struct ib_srq_attr attr;
2116
struct ib_srq *srq;
2117
int ret;
2118
2119
if (out_len < sizeof resp)
2120
return -ENOSPC;
2121
2122
if (copy_from_user(&cmd, buf, sizeof cmd))
2123
return -EFAULT;
2124
2125
srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2126
if (!srq)
2127
return -EINVAL;
2128
2129
ret = ib_query_srq(srq, &attr);
2130
2131
put_srq_read(srq);
2132
2133
if (ret)
2134
return ret;
2135
2136
memset(&resp, 0, sizeof resp);
2137
2138
resp.max_wr = attr.max_wr;
2139
resp.max_sge = attr.max_sge;
2140
resp.srq_limit = attr.srq_limit;
2141
2142
if (copy_to_user((void __user *) (unsigned long) cmd.response,
2143
&resp, sizeof resp))
2144
return -EFAULT;
2145
2146
return in_len;
2147
}
2148
2149
ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
2150
const char __user *buf, int in_len,
2151
int out_len)
2152
{
2153
struct ib_uverbs_destroy_srq cmd;
2154
struct ib_uverbs_destroy_srq_resp resp;
2155
struct ib_uobject *uobj;
2156
struct ib_srq *srq;
2157
struct ib_uevent_object *obj;
2158
int ret = -EINVAL;
2159
2160
if (copy_from_user(&cmd, buf, sizeof cmd))
2161
return -EFAULT;
2162
2163
uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2164
if (!uobj)
2165
return -EINVAL;
2166
srq = uobj->object;
2167
obj = container_of(uobj, struct ib_uevent_object, uobject);
2168
2169
ret = ib_destroy_srq(srq);
2170
if (!ret)
2171
uobj->live = 0;
2172
2173
put_uobj_write(uobj);
2174
2175
if (ret)
2176
return ret;
2177
2178
idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2179
2180
mutex_lock(&file->mutex);
2181
list_del(&uobj->list);
2182
mutex_unlock(&file->mutex);
2183
2184
ib_uverbs_release_uevent(file, obj);
2185
2186
memset(&resp, 0, sizeof resp);
2187
resp.events_reported = obj->events_reported;
2188
2189
put_uobj(uobj);
2190
2191
if (copy_to_user((void __user *) (unsigned long) cmd.response,
2192
&resp, sizeof resp))
2193
ret = -EFAULT;
2194
2195
return ret ? ret : in_len;
2196
}
2197
2198