Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/hw/cxgb3/iwch_provider.c
15112 views
1
/*
2
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
#include <linux/module.h>
33
#include <linux/moduleparam.h>
34
#include <linux/device.h>
35
#include <linux/netdevice.h>
36
#include <linux/etherdevice.h>
37
#include <linux/delay.h>
38
#include <linux/errno.h>
39
#include <linux/list.h>
40
#include <linux/sched.h>
41
#include <linux/spinlock.h>
42
#include <linux/ethtool.h>
43
#include <linux/rtnetlink.h>
44
#include <linux/inetdevice.h>
45
#include <linux/slab.h>
46
47
#include <asm/io.h>
48
#include <asm/irq.h>
49
#include <asm/byteorder.h>
50
51
#include <rdma/iw_cm.h>
52
#include <rdma/ib_verbs.h>
53
#include <rdma/ib_smi.h>
54
#include <rdma/ib_umem.h>
55
#include <rdma/ib_user_verbs.h>
56
57
#include "cxio_hal.h"
58
#include "iwch.h"
59
#include "iwch_provider.h"
60
#include "iwch_cm.h"
61
#include "iwch_user.h"
62
#include "common.h"
63
64
static int iwch_modify_port(struct ib_device *ibdev,
65
u8 port, int port_modify_mask,
66
struct ib_port_modify *props)
67
{
68
return -ENOSYS;
69
}
70
71
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
72
struct ib_ah_attr *ah_attr)
73
{
74
return ERR_PTR(-ENOSYS);
75
}
76
77
static int iwch_ah_destroy(struct ib_ah *ah)
78
{
79
return -ENOSYS;
80
}
81
82
static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
83
{
84
return -ENOSYS;
85
}
86
87
static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
88
{
89
return -ENOSYS;
90
}
91
92
static int iwch_process_mad(struct ib_device *ibdev,
93
int mad_flags,
94
u8 port_num,
95
struct ib_wc *in_wc,
96
struct ib_grh *in_grh,
97
struct ib_mad *in_mad, struct ib_mad *out_mad)
98
{
99
return -ENOSYS;
100
}
101
102
static int iwch_dealloc_ucontext(struct ib_ucontext *context)
103
{
104
struct iwch_dev *rhp = to_iwch_dev(context->device);
105
struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
106
struct iwch_mm_entry *mm, *tmp;
107
108
PDBG("%s context %p\n", __func__, context);
109
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
110
kfree(mm);
111
cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
112
kfree(ucontext);
113
return 0;
114
}
115
116
static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
117
struct ib_udata *udata)
118
{
119
struct iwch_ucontext *context;
120
struct iwch_dev *rhp = to_iwch_dev(ibdev);
121
122
PDBG("%s ibdev %p\n", __func__, ibdev);
123
context = kzalloc(sizeof(*context), GFP_KERNEL);
124
if (!context)
125
return ERR_PTR(-ENOMEM);
126
cxio_init_ucontext(&rhp->rdev, &context->uctx);
127
INIT_LIST_HEAD(&context->mmaps);
128
spin_lock_init(&context->mmap_lock);
129
return &context->ibucontext;
130
}
131
132
static int iwch_destroy_cq(struct ib_cq *ib_cq)
133
{
134
struct iwch_cq *chp;
135
136
PDBG("%s ib_cq %p\n", __func__, ib_cq);
137
chp = to_iwch_cq(ib_cq);
138
139
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
140
atomic_dec(&chp->refcnt);
141
wait_event(chp->wait, !atomic_read(&chp->refcnt));
142
143
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
144
kfree(chp);
145
return 0;
146
}
147
148
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
149
struct ib_ucontext *ib_context,
150
struct ib_udata *udata)
151
{
152
struct iwch_dev *rhp;
153
struct iwch_cq *chp;
154
struct iwch_create_cq_resp uresp;
155
struct iwch_create_cq_req ureq;
156
struct iwch_ucontext *ucontext = NULL;
157
static int warned;
158
size_t resplen;
159
160
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
161
rhp = to_iwch_dev(ibdev);
162
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
163
if (!chp)
164
return ERR_PTR(-ENOMEM);
165
166
if (ib_context) {
167
ucontext = to_iwch_ucontext(ib_context);
168
if (!t3a_device(rhp)) {
169
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
170
kfree(chp);
171
return ERR_PTR(-EFAULT);
172
}
173
chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
174
}
175
}
176
177
if (t3a_device(rhp)) {
178
179
/*
180
* T3A: Add some fluff to handle extra CQEs inserted
181
* for various errors.
182
* Additional CQE possibilities:
183
* TERMINATE,
184
* incoming RDMA WRITE Failures
185
* incoming RDMA READ REQUEST FAILUREs
186
* NOTE: We cannot ensure the CQ won't overflow.
187
*/
188
entries += 16;
189
}
190
entries = roundup_pow_of_two(entries);
191
chp->cq.size_log2 = ilog2(entries);
192
193
if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
194
kfree(chp);
195
return ERR_PTR(-ENOMEM);
196
}
197
chp->rhp = rhp;
198
chp->ibcq.cqe = 1 << chp->cq.size_log2;
199
spin_lock_init(&chp->lock);
200
atomic_set(&chp->refcnt, 1);
201
init_waitqueue_head(&chp->wait);
202
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
203
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
204
kfree(chp);
205
return ERR_PTR(-ENOMEM);
206
}
207
208
if (ucontext) {
209
struct iwch_mm_entry *mm;
210
211
mm = kmalloc(sizeof *mm, GFP_KERNEL);
212
if (!mm) {
213
iwch_destroy_cq(&chp->ibcq);
214
return ERR_PTR(-ENOMEM);
215
}
216
uresp.cqid = chp->cq.cqid;
217
uresp.size_log2 = chp->cq.size_log2;
218
spin_lock(&ucontext->mmap_lock);
219
uresp.key = ucontext->key;
220
ucontext->key += PAGE_SIZE;
221
spin_unlock(&ucontext->mmap_lock);
222
mm->key = uresp.key;
223
mm->addr = virt_to_phys(chp->cq.queue);
224
if (udata->outlen < sizeof uresp) {
225
if (!warned++)
226
printk(KERN_WARNING MOD "Warning - "
227
"downlevel libcxgb3 (non-fatal).\n");
228
mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
229
sizeof(struct t3_cqe));
230
resplen = sizeof(struct iwch_create_cq_resp_v0);
231
} else {
232
mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
233
sizeof(struct t3_cqe));
234
uresp.memsize = mm->len;
235
resplen = sizeof uresp;
236
}
237
if (ib_copy_to_udata(udata, &uresp, resplen)) {
238
kfree(mm);
239
iwch_destroy_cq(&chp->ibcq);
240
return ERR_PTR(-EFAULT);
241
}
242
insert_mmap(ucontext, mm);
243
}
244
PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
245
chp->cq.cqid, chp, (1 << chp->cq.size_log2),
246
(unsigned long long) chp->cq.dma_addr);
247
return &chp->ibcq;
248
}
249
250
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
251
{
252
#ifdef notyet
253
struct iwch_cq *chp = to_iwch_cq(cq);
254
struct t3_cq oldcq, newcq;
255
int ret;
256
257
PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
258
259
/* We don't downsize... */
260
if (cqe <= cq->cqe)
261
return 0;
262
263
/* create new t3_cq with new size */
264
cqe = roundup_pow_of_two(cqe+1);
265
newcq.size_log2 = ilog2(cqe);
266
267
/* Dont allow resize to less than the current wce count */
268
if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
269
return -ENOMEM;
270
}
271
272
/* Quiesce all QPs using this CQ */
273
ret = iwch_quiesce_qps(chp);
274
if (ret) {
275
return ret;
276
}
277
278
ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
279
if (ret) {
280
return ret;
281
}
282
283
/* copy CQEs */
284
memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
285
sizeof(struct t3_cqe));
286
287
/* old iwch_qp gets new t3_cq but keeps old cqid */
288
oldcq = chp->cq;
289
chp->cq = newcq;
290
chp->cq.cqid = oldcq.cqid;
291
292
/* resize new t3_cq to update the HW context */
293
ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
294
if (ret) {
295
chp->cq = oldcq;
296
return ret;
297
}
298
chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
299
300
/* destroy old t3_cq */
301
oldcq.cqid = newcq.cqid;
302
ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
303
if (ret) {
304
printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
305
__func__, ret);
306
}
307
308
/* add user hooks here */
309
310
/* resume qps */
311
ret = iwch_resume_qps(chp);
312
return ret;
313
#else
314
return -ENOSYS;
315
#endif
316
}
317
318
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
319
{
320
struct iwch_dev *rhp;
321
struct iwch_cq *chp;
322
enum t3_cq_opcode cq_op;
323
int err;
324
unsigned long flag;
325
u32 rptr;
326
327
chp = to_iwch_cq(ibcq);
328
rhp = chp->rhp;
329
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
330
cq_op = CQ_ARM_SE;
331
else
332
cq_op = CQ_ARM_AN;
333
if (chp->user_rptr_addr) {
334
if (get_user(rptr, chp->user_rptr_addr))
335
return -EFAULT;
336
spin_lock_irqsave(&chp->lock, flag);
337
chp->cq.rptr = rptr;
338
} else
339
spin_lock_irqsave(&chp->lock, flag);
340
PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
341
err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
342
spin_unlock_irqrestore(&chp->lock, flag);
343
if (err < 0)
344
printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
345
chp->cq.cqid);
346
if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
347
err = 0;
348
return err;
349
}
350
351
static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
352
{
353
int len = vma->vm_end - vma->vm_start;
354
u32 key = vma->vm_pgoff << PAGE_SHIFT;
355
struct cxio_rdev *rdev_p;
356
int ret = 0;
357
struct iwch_mm_entry *mm;
358
struct iwch_ucontext *ucontext;
359
u64 addr;
360
361
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
362
key, len);
363
364
if (vma->vm_start & (PAGE_SIZE-1)) {
365
return -EINVAL;
366
}
367
368
rdev_p = &(to_iwch_dev(context->device)->rdev);
369
ucontext = to_iwch_ucontext(context);
370
371
mm = remove_mmap(ucontext, key, len);
372
if (!mm)
373
return -EINVAL;
374
addr = mm->addr;
375
kfree(mm);
376
377
if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
378
(addr < (rdev_p->rnic_info.udbell_physbase +
379
rdev_p->rnic_info.udbell_len))) {
380
381
/*
382
* Map T3 DB register.
383
*/
384
if (vma->vm_flags & VM_READ) {
385
return -EPERM;
386
}
387
388
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
389
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
390
vma->vm_flags &= ~VM_MAYREAD;
391
ret = io_remap_pfn_range(vma, vma->vm_start,
392
addr >> PAGE_SHIFT,
393
len, vma->vm_page_prot);
394
} else {
395
396
/*
397
* Map WQ or CQ contig dma memory...
398
*/
399
ret = remap_pfn_range(vma, vma->vm_start,
400
addr >> PAGE_SHIFT,
401
len, vma->vm_page_prot);
402
}
403
404
return ret;
405
}
406
407
static int iwch_deallocate_pd(struct ib_pd *pd)
408
{
409
struct iwch_dev *rhp;
410
struct iwch_pd *php;
411
412
php = to_iwch_pd(pd);
413
rhp = php->rhp;
414
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
415
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
416
kfree(php);
417
return 0;
418
}
419
420
static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
421
struct ib_ucontext *context,
422
struct ib_udata *udata)
423
{
424
struct iwch_pd *php;
425
u32 pdid;
426
struct iwch_dev *rhp;
427
428
PDBG("%s ibdev %p\n", __func__, ibdev);
429
rhp = (struct iwch_dev *) ibdev;
430
pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
431
if (!pdid)
432
return ERR_PTR(-EINVAL);
433
php = kzalloc(sizeof(*php), GFP_KERNEL);
434
if (!php) {
435
cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
436
return ERR_PTR(-ENOMEM);
437
}
438
php->pdid = pdid;
439
php->rhp = rhp;
440
if (context) {
441
if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
442
iwch_deallocate_pd(&php->ibpd);
443
return ERR_PTR(-EFAULT);
444
}
445
}
446
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
447
return &php->ibpd;
448
}
449
450
static int iwch_dereg_mr(struct ib_mr *ib_mr)
451
{
452
struct iwch_dev *rhp;
453
struct iwch_mr *mhp;
454
u32 mmid;
455
456
PDBG("%s ib_mr %p\n", __func__, ib_mr);
457
/* There can be no memory windows */
458
if (atomic_read(&ib_mr->usecnt))
459
return -EINVAL;
460
461
mhp = to_iwch_mr(ib_mr);
462
rhp = mhp->rhp;
463
mmid = mhp->attr.stag >> 8;
464
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
465
mhp->attr.pbl_addr);
466
iwch_free_pbl(mhp);
467
remove_handle(rhp, &rhp->mmidr, mmid);
468
if (mhp->kva)
469
kfree((void *) (unsigned long) mhp->kva);
470
if (mhp->umem)
471
ib_umem_release(mhp->umem);
472
PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
473
kfree(mhp);
474
return 0;
475
}
476
477
static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
478
struct ib_phys_buf *buffer_list,
479
int num_phys_buf,
480
int acc,
481
u64 *iova_start)
482
{
483
__be64 *page_list;
484
int shift;
485
u64 total_size;
486
int npages;
487
struct iwch_dev *rhp;
488
struct iwch_pd *php;
489
struct iwch_mr *mhp;
490
int ret;
491
492
PDBG("%s ib_pd %p\n", __func__, pd);
493
php = to_iwch_pd(pd);
494
rhp = php->rhp;
495
496
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
497
if (!mhp)
498
return ERR_PTR(-ENOMEM);
499
500
mhp->rhp = rhp;
501
502
/* First check that we have enough alignment */
503
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
504
ret = -EINVAL;
505
goto err;
506
}
507
508
if (num_phys_buf > 1 &&
509
((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
510
ret = -EINVAL;
511
goto err;
512
}
513
514
ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
515
&total_size, &npages, &shift, &page_list);
516
if (ret)
517
goto err;
518
519
ret = iwch_alloc_pbl(mhp, npages);
520
if (ret) {
521
kfree(page_list);
522
goto err_pbl;
523
}
524
525
ret = iwch_write_pbl(mhp, page_list, npages, 0);
526
kfree(page_list);
527
if (ret)
528
goto err_pbl;
529
530
mhp->attr.pdid = php->pdid;
531
mhp->attr.zbva = 0;
532
533
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
534
mhp->attr.va_fbo = *iova_start;
535
mhp->attr.page_size = shift - 12;
536
537
mhp->attr.len = (u32) total_size;
538
mhp->attr.pbl_size = npages;
539
ret = iwch_register_mem(rhp, php, mhp, shift);
540
if (ret)
541
goto err_pbl;
542
543
return &mhp->ibmr;
544
545
err_pbl:
546
iwch_free_pbl(mhp);
547
548
err:
549
kfree(mhp);
550
return ERR_PTR(ret);
551
552
}
553
554
static int iwch_reregister_phys_mem(struct ib_mr *mr,
555
int mr_rereg_mask,
556
struct ib_pd *pd,
557
struct ib_phys_buf *buffer_list,
558
int num_phys_buf,
559
int acc, u64 * iova_start)
560
{
561
562
struct iwch_mr mh, *mhp;
563
struct iwch_pd *php;
564
struct iwch_dev *rhp;
565
__be64 *page_list = NULL;
566
int shift = 0;
567
u64 total_size;
568
int npages;
569
int ret;
570
571
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
572
573
/* There can be no memory windows */
574
if (atomic_read(&mr->usecnt))
575
return -EINVAL;
576
577
mhp = to_iwch_mr(mr);
578
rhp = mhp->rhp;
579
php = to_iwch_pd(mr->pd);
580
581
/* make sure we are on the same adapter */
582
if (rhp != php->rhp)
583
return -EINVAL;
584
585
memcpy(&mh, mhp, sizeof *mhp);
586
587
if (mr_rereg_mask & IB_MR_REREG_PD)
588
php = to_iwch_pd(pd);
589
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
590
mh.attr.perms = iwch_ib_to_tpt_access(acc);
591
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
592
ret = build_phys_page_list(buffer_list, num_phys_buf,
593
iova_start,
594
&total_size, &npages,
595
&shift, &page_list);
596
if (ret)
597
return ret;
598
}
599
600
ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
601
kfree(page_list);
602
if (ret) {
603
return ret;
604
}
605
if (mr_rereg_mask & IB_MR_REREG_PD)
606
mhp->attr.pdid = php->pdid;
607
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
608
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
609
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
610
mhp->attr.zbva = 0;
611
mhp->attr.va_fbo = *iova_start;
612
mhp->attr.page_size = shift - 12;
613
mhp->attr.len = (u32) total_size;
614
mhp->attr.pbl_size = npages;
615
}
616
617
return 0;
618
}
619
620
621
static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
622
u64 virt, int acc, struct ib_udata *udata)
623
{
624
__be64 *pages;
625
int shift, n, len;
626
int i, j, k;
627
int err = 0;
628
struct ib_umem_chunk *chunk;
629
struct iwch_dev *rhp;
630
struct iwch_pd *php;
631
struct iwch_mr *mhp;
632
struct iwch_reg_user_mr_resp uresp;
633
634
PDBG("%s ib_pd %p\n", __func__, pd);
635
636
php = to_iwch_pd(pd);
637
rhp = php->rhp;
638
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
639
if (!mhp)
640
return ERR_PTR(-ENOMEM);
641
642
mhp->rhp = rhp;
643
644
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
645
if (IS_ERR(mhp->umem)) {
646
err = PTR_ERR(mhp->umem);
647
kfree(mhp);
648
return ERR_PTR(err);
649
}
650
651
shift = ffs(mhp->umem->page_size) - 1;
652
653
n = 0;
654
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
655
n += chunk->nents;
656
657
err = iwch_alloc_pbl(mhp, n);
658
if (err)
659
goto err;
660
661
pages = (__be64 *) __get_free_page(GFP_KERNEL);
662
if (!pages) {
663
err = -ENOMEM;
664
goto err_pbl;
665
}
666
667
i = n = 0;
668
669
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
670
for (j = 0; j < chunk->nmap; ++j) {
671
len = sg_dma_len(&chunk->page_list[j]) >> shift;
672
for (k = 0; k < len; ++k) {
673
pages[i++] = cpu_to_be64(sg_dma_address(
674
&chunk->page_list[j]) +
675
mhp->umem->page_size * k);
676
if (i == PAGE_SIZE / sizeof *pages) {
677
err = iwch_write_pbl(mhp, pages, i, n);
678
if (err)
679
goto pbl_done;
680
n += i;
681
i = 0;
682
}
683
}
684
}
685
686
if (i)
687
err = iwch_write_pbl(mhp, pages, i, n);
688
689
pbl_done:
690
free_page((unsigned long) pages);
691
if (err)
692
goto err_pbl;
693
694
mhp->attr.pdid = php->pdid;
695
mhp->attr.zbva = 0;
696
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
697
mhp->attr.va_fbo = virt;
698
mhp->attr.page_size = shift - 12;
699
mhp->attr.len = (u32) length;
700
701
err = iwch_register_mem(rhp, php, mhp, shift);
702
if (err)
703
goto err_pbl;
704
705
if (udata && !t3a_device(rhp)) {
706
uresp.pbl_addr = (mhp->attr.pbl_addr -
707
rhp->rdev.rnic_info.pbl_base) >> 3;
708
PDBG("%s user resp pbl_addr 0x%x\n", __func__,
709
uresp.pbl_addr);
710
711
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
712
iwch_dereg_mr(&mhp->ibmr);
713
err = -EFAULT;
714
goto err;
715
}
716
}
717
718
return &mhp->ibmr;
719
720
err_pbl:
721
iwch_free_pbl(mhp);
722
723
err:
724
ib_umem_release(mhp->umem);
725
kfree(mhp);
726
return ERR_PTR(err);
727
}
728
729
static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
730
{
731
struct ib_phys_buf bl;
732
u64 kva;
733
struct ib_mr *ibmr;
734
735
PDBG("%s ib_pd %p\n", __func__, pd);
736
737
/*
738
* T3 only supports 32 bits of size.
739
*/
740
bl.size = 0xffffffff;
741
bl.addr = 0;
742
kva = 0;
743
ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
744
return ibmr;
745
}
746
747
static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
748
{
749
struct iwch_dev *rhp;
750
struct iwch_pd *php;
751
struct iwch_mw *mhp;
752
u32 mmid;
753
u32 stag = 0;
754
int ret;
755
756
php = to_iwch_pd(pd);
757
rhp = php->rhp;
758
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
759
if (!mhp)
760
return ERR_PTR(-ENOMEM);
761
ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
762
if (ret) {
763
kfree(mhp);
764
return ERR_PTR(ret);
765
}
766
mhp->rhp = rhp;
767
mhp->attr.pdid = php->pdid;
768
mhp->attr.type = TPT_MW;
769
mhp->attr.stag = stag;
770
mmid = (stag) >> 8;
771
mhp->ibmw.rkey = stag;
772
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
773
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
774
kfree(mhp);
775
return ERR_PTR(-ENOMEM);
776
}
777
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
778
return &(mhp->ibmw);
779
}
780
781
static int iwch_dealloc_mw(struct ib_mw *mw)
782
{
783
struct iwch_dev *rhp;
784
struct iwch_mw *mhp;
785
u32 mmid;
786
787
mhp = to_iwch_mw(mw);
788
rhp = mhp->rhp;
789
mmid = (mw->rkey) >> 8;
790
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
791
remove_handle(rhp, &rhp->mmidr, mmid);
792
kfree(mhp);
793
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
794
return 0;
795
}
796
797
static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
798
{
799
struct iwch_dev *rhp;
800
struct iwch_pd *php;
801
struct iwch_mr *mhp;
802
u32 mmid;
803
u32 stag = 0;
804
int ret = 0;
805
806
php = to_iwch_pd(pd);
807
rhp = php->rhp;
808
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
809
if (!mhp)
810
goto err;
811
812
mhp->rhp = rhp;
813
ret = iwch_alloc_pbl(mhp, pbl_depth);
814
if (ret)
815
goto err1;
816
mhp->attr.pbl_size = pbl_depth;
817
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
818
mhp->attr.pbl_size, mhp->attr.pbl_addr);
819
if (ret)
820
goto err2;
821
mhp->attr.pdid = php->pdid;
822
mhp->attr.type = TPT_NON_SHARED_MR;
823
mhp->attr.stag = stag;
824
mhp->attr.state = 1;
825
mmid = (stag) >> 8;
826
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
827
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
828
goto err3;
829
830
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
831
return &(mhp->ibmr);
832
err3:
833
cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
834
mhp->attr.pbl_addr);
835
err2:
836
iwch_free_pbl(mhp);
837
err1:
838
kfree(mhp);
839
err:
840
return ERR_PTR(ret);
841
}
842
843
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
844
struct ib_device *device,
845
int page_list_len)
846
{
847
struct ib_fast_reg_page_list *page_list;
848
849
page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
850
GFP_KERNEL);
851
if (!page_list)
852
return ERR_PTR(-ENOMEM);
853
854
page_list->page_list = (u64 *)(page_list + 1);
855
page_list->max_page_list_len = page_list_len;
856
857
return page_list;
858
}
859
860
static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
861
{
862
kfree(page_list);
863
}
864
865
static int iwch_destroy_qp(struct ib_qp *ib_qp)
866
{
867
struct iwch_dev *rhp;
868
struct iwch_qp *qhp;
869
struct iwch_qp_attributes attrs;
870
struct iwch_ucontext *ucontext;
871
872
qhp = to_iwch_qp(ib_qp);
873
rhp = qhp->rhp;
874
875
attrs.next_state = IWCH_QP_STATE_ERROR;
876
iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
877
wait_event(qhp->wait, !qhp->ep);
878
879
remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
880
881
atomic_dec(&qhp->refcnt);
882
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
883
884
ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
885
: NULL;
886
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
887
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
888
889
PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
890
ib_qp, qhp->wq.qpid, qhp);
891
kfree(qhp);
892
return 0;
893
}
894
895
static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
896
struct ib_qp_init_attr *attrs,
897
struct ib_udata *udata)
898
{
899
struct iwch_dev *rhp;
900
struct iwch_qp *qhp;
901
struct iwch_pd *php;
902
struct iwch_cq *schp;
903
struct iwch_cq *rchp;
904
struct iwch_create_qp_resp uresp;
905
int wqsize, sqsize, rqsize;
906
struct iwch_ucontext *ucontext;
907
908
PDBG("%s ib_pd %p\n", __func__, pd);
909
if (attrs->qp_type != IB_QPT_RC)
910
return ERR_PTR(-EINVAL);
911
php = to_iwch_pd(pd);
912
rhp = php->rhp;
913
schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
914
rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
915
if (!schp || !rchp)
916
return ERR_PTR(-EINVAL);
917
918
/* The RQT size must be # of entries + 1 rounded up to a power of two */
919
rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
920
if (rqsize == attrs->cap.max_recv_wr)
921
rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
922
923
/* T3 doesn't support RQT depth < 16 */
924
if (rqsize < 16)
925
rqsize = 16;
926
927
if (rqsize > T3_MAX_RQ_SIZE)
928
return ERR_PTR(-EINVAL);
929
930
if (attrs->cap.max_inline_data > T3_MAX_INLINE)
931
return ERR_PTR(-EINVAL);
932
933
/*
934
* NOTE: The SQ and total WQ sizes don't need to be
935
* a power of two. However, all the code assumes
936
* they are. EG: Q_FREECNT() and friends.
937
*/
938
sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
939
wqsize = roundup_pow_of_two(rqsize + sqsize);
940
941
/*
942
* Kernel users need more wq space for fastreg WRs which can take
943
* 2 WR fragments.
944
*/
945
ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
946
if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
947
wqsize = roundup_pow_of_two(rqsize +
948
roundup_pow_of_two(attrs->cap.max_send_wr * 2));
949
PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
950
wqsize, sqsize, rqsize);
951
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
952
if (!qhp)
953
return ERR_PTR(-ENOMEM);
954
qhp->wq.size_log2 = ilog2(wqsize);
955
qhp->wq.rq_size_log2 = ilog2(rqsize);
956
qhp->wq.sq_size_log2 = ilog2(sqsize);
957
if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
958
ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
959
kfree(qhp);
960
return ERR_PTR(-ENOMEM);
961
}
962
963
attrs->cap.max_recv_wr = rqsize - 1;
964
attrs->cap.max_send_wr = sqsize;
965
attrs->cap.max_inline_data = T3_MAX_INLINE;
966
967
qhp->rhp = rhp;
968
qhp->attr.pd = php->pdid;
969
qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
970
qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
971
qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
972
qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
973
qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
974
qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
975
qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
976
qhp->attr.state = IWCH_QP_STATE_IDLE;
977
qhp->attr.next_state = IWCH_QP_STATE_IDLE;
978
979
/*
980
* XXX - These don't get passed in from the openib user
981
* at create time. The CM sets them via a QP modify.
982
* Need to fix... I think the CM should
983
*/
984
qhp->attr.enable_rdma_read = 1;
985
qhp->attr.enable_rdma_write = 1;
986
qhp->attr.enable_bind = 1;
987
qhp->attr.max_ord = 1;
988
qhp->attr.max_ird = 1;
989
990
spin_lock_init(&qhp->lock);
991
init_waitqueue_head(&qhp->wait);
992
atomic_set(&qhp->refcnt, 1);
993
994
if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
995
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
996
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
997
kfree(qhp);
998
return ERR_PTR(-ENOMEM);
999
}
1000
1001
if (udata) {
1002
1003
struct iwch_mm_entry *mm1, *mm2;
1004
1005
mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1006
if (!mm1) {
1007
iwch_destroy_qp(&qhp->ibqp);
1008
return ERR_PTR(-ENOMEM);
1009
}
1010
1011
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1012
if (!mm2) {
1013
kfree(mm1);
1014
iwch_destroy_qp(&qhp->ibqp);
1015
return ERR_PTR(-ENOMEM);
1016
}
1017
1018
uresp.qpid = qhp->wq.qpid;
1019
uresp.size_log2 = qhp->wq.size_log2;
1020
uresp.sq_size_log2 = qhp->wq.sq_size_log2;
1021
uresp.rq_size_log2 = qhp->wq.rq_size_log2;
1022
spin_lock(&ucontext->mmap_lock);
1023
uresp.key = ucontext->key;
1024
ucontext->key += PAGE_SIZE;
1025
uresp.db_key = ucontext->key;
1026
ucontext->key += PAGE_SIZE;
1027
spin_unlock(&ucontext->mmap_lock);
1028
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
1029
kfree(mm1);
1030
kfree(mm2);
1031
iwch_destroy_qp(&qhp->ibqp);
1032
return ERR_PTR(-EFAULT);
1033
}
1034
mm1->key = uresp.key;
1035
mm1->addr = virt_to_phys(qhp->wq.queue);
1036
mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
1037
insert_mmap(ucontext, mm1);
1038
mm2->key = uresp.db_key;
1039
mm2->addr = qhp->wq.udb & PAGE_MASK;
1040
mm2->len = PAGE_SIZE;
1041
insert_mmap(ucontext, mm2);
1042
}
1043
qhp->ibqp.qp_num = qhp->wq.qpid;
1044
init_timer(&(qhp->timer));
1045
PDBG("%s sq_num_entries %d, rq_num_entries %d "
1046
"qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1047
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1048
qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
1049
1 << qhp->wq.size_log2, qhp->wq.rq_addr);
1050
return &qhp->ibqp;
1051
}
1052
1053
static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1054
int attr_mask, struct ib_udata *udata)
1055
{
1056
struct iwch_dev *rhp;
1057
struct iwch_qp *qhp;
1058
enum iwch_qp_attr_mask mask = 0;
1059
struct iwch_qp_attributes attrs;
1060
1061
PDBG("%s ib_qp %p\n", __func__, ibqp);
1062
1063
/* iwarp does not support the RTR state */
1064
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1065
attr_mask &= ~IB_QP_STATE;
1066
1067
/* Make sure we still have something left to do */
1068
if (!attr_mask)
1069
return 0;
1070
1071
memset(&attrs, 0, sizeof attrs);
1072
qhp = to_iwch_qp(ibqp);
1073
rhp = qhp->rhp;
1074
1075
attrs.next_state = iwch_convert_state(attr->qp_state);
1076
attrs.enable_rdma_read = (attr->qp_access_flags &
1077
IB_ACCESS_REMOTE_READ) ? 1 : 0;
1078
attrs.enable_rdma_write = (attr->qp_access_flags &
1079
IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1080
attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1081
1082
1083
mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
1084
mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1085
(IWCH_QP_ATTR_ENABLE_RDMA_READ |
1086
IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
1087
IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1088
1089
return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
1090
}
1091
1092
void iwch_qp_add_ref(struct ib_qp *qp)
1093
{
1094
PDBG("%s ib_qp %p\n", __func__, qp);
1095
atomic_inc(&(to_iwch_qp(qp)->refcnt));
1096
}
1097
1098
void iwch_qp_rem_ref(struct ib_qp *qp)
1099
{
1100
PDBG("%s ib_qp %p\n", __func__, qp);
1101
if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
1102
wake_up(&(to_iwch_qp(qp)->wait));
1103
}
1104
1105
static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
1106
{
1107
PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1108
return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
1109
}
1110
1111
1112
static int iwch_query_pkey(struct ib_device *ibdev,
1113
u8 port, u16 index, u16 * pkey)
1114
{
1115
PDBG("%s ibdev %p\n", __func__, ibdev);
1116
*pkey = 0;
1117
return 0;
1118
}
1119
1120
static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1121
int index, union ib_gid *gid)
1122
{
1123
struct iwch_dev *dev;
1124
1125
PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1126
__func__, ibdev, port, index, gid);
1127
dev = to_iwch_dev(ibdev);
1128
BUG_ON(port == 0 || port > 2);
1129
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1130
memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1131
return 0;
1132
}
1133
1134
static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1135
{
1136
struct ethtool_drvinfo info;
1137
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1138
char *cp, *next;
1139
unsigned fw_maj, fw_min, fw_mic;
1140
1141
lldev->ethtool_ops->get_drvinfo(lldev, &info);
1142
1143
next = info.fw_version + 1;
1144
cp = strsep(&next, ".");
1145
sscanf(cp, "%i", &fw_maj);
1146
cp = strsep(&next, ".");
1147
sscanf(cp, "%i", &fw_min);
1148
cp = strsep(&next, ".");
1149
sscanf(cp, "%i", &fw_mic);
1150
1151
return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1152
(fw_mic & 0xffff);
1153
}
1154
1155
static int iwch_query_device(struct ib_device *ibdev,
1156
struct ib_device_attr *props)
1157
{
1158
1159
struct iwch_dev *dev;
1160
PDBG("%s ibdev %p\n", __func__, ibdev);
1161
1162
dev = to_iwch_dev(ibdev);
1163
memset(props, 0, sizeof *props);
1164
memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1165
props->hw_ver = dev->rdev.t3cdev_p->type;
1166
props->fw_ver = fw_vers_string_to_u64(dev);
1167
props->device_cap_flags = dev->device_cap_flags;
1168
props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1169
props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1170
props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1171
props->max_mr_size = dev->attr.max_mr_size;
1172
props->max_qp = dev->attr.max_qps;
1173
props->max_qp_wr = dev->attr.max_wrs;
1174
props->max_sge = dev->attr.max_sge_per_wr;
1175
props->max_sge_rd = 1;
1176
props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1177
props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1178
props->max_cq = dev->attr.max_cqs;
1179
props->max_cqe = dev->attr.max_cqes_per_cq;
1180
props->max_mr = dev->attr.max_mem_regs;
1181
props->max_pd = dev->attr.max_pds;
1182
props->local_ca_ack_delay = 0;
1183
props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
1184
1185
return 0;
1186
}
1187
1188
static int iwch_query_port(struct ib_device *ibdev,
1189
u8 port, struct ib_port_attr *props)
1190
{
1191
struct iwch_dev *dev;
1192
struct net_device *netdev;
1193
struct in_device *inetdev;
1194
1195
PDBG("%s ibdev %p\n", __func__, ibdev);
1196
1197
dev = to_iwch_dev(ibdev);
1198
netdev = dev->rdev.port_info.lldevs[port-1];
1199
1200
memset(props, 0, sizeof(struct ib_port_attr));
1201
props->max_mtu = IB_MTU_4096;
1202
if (netdev->mtu >= 4096)
1203
props->active_mtu = IB_MTU_4096;
1204
else if (netdev->mtu >= 2048)
1205
props->active_mtu = IB_MTU_2048;
1206
else if (netdev->mtu >= 1024)
1207
props->active_mtu = IB_MTU_1024;
1208
else if (netdev->mtu >= 512)
1209
props->active_mtu = IB_MTU_512;
1210
else
1211
props->active_mtu = IB_MTU_256;
1212
1213
if (!netif_carrier_ok(netdev))
1214
props->state = IB_PORT_DOWN;
1215
else {
1216
inetdev = in_dev_get(netdev);
1217
if (inetdev) {
1218
if (inetdev->ifa_list)
1219
props->state = IB_PORT_ACTIVE;
1220
else
1221
props->state = IB_PORT_INIT;
1222
in_dev_put(inetdev);
1223
} else
1224
props->state = IB_PORT_INIT;
1225
}
1226
1227
props->port_cap_flags =
1228
IB_PORT_CM_SUP |
1229
IB_PORT_SNMP_TUNNEL_SUP |
1230
IB_PORT_REINIT_SUP |
1231
IB_PORT_DEVICE_MGMT_SUP |
1232
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1233
props->gid_tbl_len = 1;
1234
props->pkey_tbl_len = 1;
1235
props->active_width = 2;
1236
props->active_speed = 2;
1237
props->max_msg_sz = -1;
1238
1239
return 0;
1240
}
1241
1242
static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
1243
char *buf)
1244
{
1245
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1246
ibdev.dev);
1247
PDBG("%s dev 0x%p\n", __func__, dev);
1248
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1249
}
1250
1251
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
1252
{
1253
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1254
ibdev.dev);
1255
struct ethtool_drvinfo info;
1256
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1257
1258
PDBG("%s dev 0x%p\n", __func__, dev);
1259
lldev->ethtool_ops->get_drvinfo(lldev, &info);
1260
return sprintf(buf, "%s\n", info.fw_version);
1261
}
1262
1263
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1264
char *buf)
1265
{
1266
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1267
ibdev.dev);
1268
struct ethtool_drvinfo info;
1269
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1270
1271
PDBG("%s dev 0x%p\n", __func__, dev);
1272
lldev->ethtool_ops->get_drvinfo(lldev, &info);
1273
return sprintf(buf, "%s\n", info.driver);
1274
}
1275
1276
static ssize_t show_board(struct device *dev, struct device_attribute *attr,
1277
char *buf)
1278
{
1279
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
1280
ibdev.dev);
1281
PDBG("%s dev 0x%p\n", __func__, dev);
1282
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1283
iwch_dev->rdev.rnic_info.pdev->device);
1284
}
1285
1286
static int iwch_get_mib(struct ib_device *ibdev,
1287
union rdma_protocol_stats *stats)
1288
{
1289
struct iwch_dev *dev;
1290
struct tp_mib_stats m;
1291
int ret;
1292
1293
PDBG("%s ibdev %p\n", __func__, ibdev);
1294
dev = to_iwch_dev(ibdev);
1295
ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1296
if (ret)
1297
return -ENOSYS;
1298
1299
memset(stats, 0, sizeof *stats);
1300
stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
1301
m.ipInReceive_lo;
1302
stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
1303
m.ipInHdrErrors_lo;
1304
stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
1305
m.ipInAddrErrors_lo;
1306
stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
1307
m.ipInUnknownProtos_lo;
1308
stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
1309
m.ipInDiscards_lo;
1310
stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
1311
m.ipInDelivers_lo;
1312
stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
1313
m.ipOutRequests_lo;
1314
stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
1315
m.ipOutDiscards_lo;
1316
stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
1317
m.ipOutNoRoutes_lo;
1318
stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
1319
stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
1320
stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
1321
stats->iw.ipReasmFails = (u64) m.ipReasmFails;
1322
stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
1323
stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
1324
stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
1325
stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
1326
stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
1327
stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
1328
stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
1329
m.tcpInSegs_lo;
1330
stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
1331
m.tcpOutSegs_lo;
1332
stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
1333
m.tcpRetransSeg_lo;
1334
stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
1335
m.tcpInErrs_lo;
1336
stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
1337
stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
1338
return 0;
1339
}
1340
1341
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1342
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1343
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1344
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1345
1346
static struct device_attribute *iwch_class_attributes[] = {
1347
&dev_attr_hw_rev,
1348
&dev_attr_fw_ver,
1349
&dev_attr_hca_type,
1350
&dev_attr_board_id,
1351
};
1352
1353
int iwch_register_device(struct iwch_dev *dev)
1354
{
1355
int ret;
1356
int i;
1357
1358
PDBG("%s iwch_dev %p\n", __func__, dev);
1359
strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
1360
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1361
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1362
dev->ibdev.owner = THIS_MODULE;
1363
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1364
IB_DEVICE_MEM_WINDOW |
1365
IB_DEVICE_MEM_MGT_EXTENSIONS;
1366
1367
/* cxgb3 supports STag 0. */
1368
dev->ibdev.local_dma_lkey = 0;
1369
1370
dev->ibdev.uverbs_cmd_mask =
1371
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1372
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1373
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1374
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1375
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1376
(1ull << IB_USER_VERBS_CMD_REG_MR) |
1377
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1378
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1379
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1380
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1381
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1382
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1383
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1384
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1385
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1386
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
1387
(1ull << IB_USER_VERBS_CMD_POST_RECV);
1388
dev->ibdev.node_type = RDMA_NODE_RNIC;
1389
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1390
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1391
dev->ibdev.num_comp_vectors = 1;
1392
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1393
dev->ibdev.query_device = iwch_query_device;
1394
dev->ibdev.query_port = iwch_query_port;
1395
dev->ibdev.modify_port = iwch_modify_port;
1396
dev->ibdev.query_pkey = iwch_query_pkey;
1397
dev->ibdev.query_gid = iwch_query_gid;
1398
dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
1399
dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
1400
dev->ibdev.mmap = iwch_mmap;
1401
dev->ibdev.alloc_pd = iwch_allocate_pd;
1402
dev->ibdev.dealloc_pd = iwch_deallocate_pd;
1403
dev->ibdev.create_ah = iwch_ah_create;
1404
dev->ibdev.destroy_ah = iwch_ah_destroy;
1405
dev->ibdev.create_qp = iwch_create_qp;
1406
dev->ibdev.modify_qp = iwch_ib_modify_qp;
1407
dev->ibdev.destroy_qp = iwch_destroy_qp;
1408
dev->ibdev.create_cq = iwch_create_cq;
1409
dev->ibdev.destroy_cq = iwch_destroy_cq;
1410
dev->ibdev.resize_cq = iwch_resize_cq;
1411
dev->ibdev.poll_cq = iwch_poll_cq;
1412
dev->ibdev.get_dma_mr = iwch_get_dma_mr;
1413
dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
1414
dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
1415
dev->ibdev.reg_user_mr = iwch_reg_user_mr;
1416
dev->ibdev.dereg_mr = iwch_dereg_mr;
1417
dev->ibdev.alloc_mw = iwch_alloc_mw;
1418
dev->ibdev.bind_mw = iwch_bind_mw;
1419
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
1420
dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
1421
dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
1422
dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
1423
dev->ibdev.attach_mcast = iwch_multicast_attach;
1424
dev->ibdev.detach_mcast = iwch_multicast_detach;
1425
dev->ibdev.process_mad = iwch_process_mad;
1426
dev->ibdev.req_notify_cq = iwch_arm_cq;
1427
dev->ibdev.post_send = iwch_post_send;
1428
dev->ibdev.post_recv = iwch_post_receive;
1429
dev->ibdev.get_protocol_stats = iwch_get_mib;
1430
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1431
1432
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1433
if (!dev->ibdev.iwcm)
1434
return -ENOMEM;
1435
1436
dev->ibdev.iwcm->connect = iwch_connect;
1437
dev->ibdev.iwcm->accept = iwch_accept_cr;
1438
dev->ibdev.iwcm->reject = iwch_reject_cr;
1439
dev->ibdev.iwcm->create_listen = iwch_create_listen;
1440
dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1441
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1442
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1443
dev->ibdev.iwcm->get_qp = iwch_get_qp;
1444
1445
ret = ib_register_device(&dev->ibdev, NULL);
1446
if (ret)
1447
goto bail1;
1448
1449
for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
1450
ret = device_create_file(&dev->ibdev.dev,
1451
iwch_class_attributes[i]);
1452
if (ret) {
1453
goto bail2;
1454
}
1455
}
1456
return 0;
1457
bail2:
1458
ib_unregister_device(&dev->ibdev);
1459
bail1:
1460
kfree(dev->ibdev.iwcm);
1461
return ret;
1462
}
1463
1464
void iwch_unregister_device(struct iwch_dev *dev)
1465
{
1466
int i;
1467
1468
PDBG("%s iwch_dev %p\n", __func__, dev);
1469
for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
1470
device_remove_file(&dev->ibdev.dev,
1471
iwch_class_attributes[i]);
1472
ib_unregister_device(&dev->ibdev);
1473
kfree(dev->ibdev.iwcm);
1474
return;
1475
}
1476
1477