Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/ulp/ipoib/ipoib_main.c
15112 views
1
/*
2
* Copyright (c) 2004 Topspin Communications. All rights reserved.
3
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5
*
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING in the main directory of this source tree, or the
10
* OpenIB.org BSD license below:
11
*
12
* Redistribution and use in source and binary forms, with or
13
* without modification, are permitted provided that the following
14
* conditions are met:
15
*
16
* - Redistributions of source code must retain the above
17
* copyright notice, this list of conditions and the following
18
* disclaimer.
19
*
20
* - Redistributions in binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer in the documentation and/or other materials
23
* provided with the distribution.
24
*
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
* SOFTWARE.
33
*/
34
35
#include "ipoib.h"
36
37
#include <linux/module.h>
38
39
#include <linux/init.h>
40
#include <linux/slab.h>
41
#include <linux/kernel.h>
42
#include <linux/vmalloc.h>
43
44
#include <linux/if_arp.h> /* For ARPHRD_xxx */
45
46
#include <linux/ip.h>
47
#include <linux/in.h>
48
49
#include <net/dst.h>
50
51
MODULE_AUTHOR("Roland Dreier");
52
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
53
MODULE_LICENSE("Dual BSD/GPL");
54
55
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
56
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
57
58
module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
59
MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62
63
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
64
int ipoib_debug_level;
65
66
module_param_named(debug_level, ipoib_debug_level, int, 0644);
67
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
68
#endif
69
70
struct ipoib_path_iter {
71
struct net_device *dev;
72
struct ipoib_path path;
73
};
74
75
static const u8 ipv4_bcast_addr[] = {
76
0x00, 0xff, 0xff, 0xff,
77
0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
78
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
79
};
80
81
struct workqueue_struct *ipoib_workqueue;
82
83
struct ib_sa_client ipoib_sa_client;
84
85
static void ipoib_add_one(struct ib_device *device);
86
static void ipoib_remove_one(struct ib_device *device);
87
88
static struct ib_client ipoib_client = {
89
.name = "ipoib",
90
.add = ipoib_add_one,
91
.remove = ipoib_remove_one
92
};
93
94
int ipoib_open(struct net_device *dev)
95
{
96
struct ipoib_dev_priv *priv = netdev_priv(dev);
97
98
ipoib_dbg(priv, "bringing up interface\n");
99
100
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
101
102
if (ipoib_pkey_dev_delay_open(dev))
103
return 0;
104
105
if (ipoib_ib_dev_open(dev))
106
goto err_disable;
107
108
if (ipoib_ib_dev_up(dev))
109
goto err_stop;
110
111
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
112
struct ipoib_dev_priv *cpriv;
113
114
/* Bring up any child interfaces too */
115
mutex_lock(&priv->vlan_mutex);
116
list_for_each_entry(cpriv, &priv->child_intfs, list) {
117
int flags;
118
119
flags = cpriv->dev->flags;
120
if (flags & IFF_UP)
121
continue;
122
123
dev_change_flags(cpriv->dev, flags | IFF_UP);
124
}
125
mutex_unlock(&priv->vlan_mutex);
126
}
127
128
netif_start_queue(dev);
129
130
return 0;
131
132
err_stop:
133
ipoib_ib_dev_stop(dev, 1);
134
135
err_disable:
136
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
137
138
return -EINVAL;
139
}
140
141
static int ipoib_stop(struct net_device *dev)
142
{
143
struct ipoib_dev_priv *priv = netdev_priv(dev);
144
145
ipoib_dbg(priv, "stopping interface\n");
146
147
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148
149
netif_stop_queue(dev);
150
151
ipoib_ib_dev_down(dev, 0);
152
ipoib_ib_dev_stop(dev, 0);
153
154
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
155
struct ipoib_dev_priv *cpriv;
156
157
/* Bring down any child interfaces too */
158
mutex_lock(&priv->vlan_mutex);
159
list_for_each_entry(cpriv, &priv->child_intfs, list) {
160
int flags;
161
162
flags = cpriv->dev->flags;
163
if (!(flags & IFF_UP))
164
continue;
165
166
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
167
}
168
mutex_unlock(&priv->vlan_mutex);
169
}
170
171
return 0;
172
}
173
174
static u32 ipoib_fix_features(struct net_device *dev, u32 features)
175
{
176
struct ipoib_dev_priv *priv = netdev_priv(dev);
177
178
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
179
features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
180
181
return features;
182
}
183
184
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
185
{
186
struct ipoib_dev_priv *priv = netdev_priv(dev);
187
188
/* dev->mtu > 2K ==> connected mode */
189
if (ipoib_cm_admin_enabled(dev)) {
190
if (new_mtu > ipoib_cm_max_mtu(dev))
191
return -EINVAL;
192
193
if (new_mtu > priv->mcast_mtu)
194
ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
195
priv->mcast_mtu);
196
197
dev->mtu = new_mtu;
198
return 0;
199
}
200
201
if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
202
return -EINVAL;
203
204
priv->admin_mtu = new_mtu;
205
206
dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
207
208
return 0;
209
}
210
211
static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
212
{
213
struct ipoib_dev_priv *priv = netdev_priv(dev);
214
struct rb_node *n = priv->path_tree.rb_node;
215
struct ipoib_path *path;
216
int ret;
217
218
while (n) {
219
path = rb_entry(n, struct ipoib_path, rb_node);
220
221
ret = memcmp(gid, path->pathrec.dgid.raw,
222
sizeof (union ib_gid));
223
224
if (ret < 0)
225
n = n->rb_left;
226
else if (ret > 0)
227
n = n->rb_right;
228
else
229
return path;
230
}
231
232
return NULL;
233
}
234
235
static int __path_add(struct net_device *dev, struct ipoib_path *path)
236
{
237
struct ipoib_dev_priv *priv = netdev_priv(dev);
238
struct rb_node **n = &priv->path_tree.rb_node;
239
struct rb_node *pn = NULL;
240
struct ipoib_path *tpath;
241
int ret;
242
243
while (*n) {
244
pn = *n;
245
tpath = rb_entry(pn, struct ipoib_path, rb_node);
246
247
ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
248
sizeof (union ib_gid));
249
if (ret < 0)
250
n = &pn->rb_left;
251
else if (ret > 0)
252
n = &pn->rb_right;
253
else
254
return -EEXIST;
255
}
256
257
rb_link_node(&path->rb_node, pn, n);
258
rb_insert_color(&path->rb_node, &priv->path_tree);
259
260
list_add_tail(&path->list, &priv->path_list);
261
262
return 0;
263
}
264
265
static void path_free(struct net_device *dev, struct ipoib_path *path)
266
{
267
struct ipoib_dev_priv *priv = netdev_priv(dev);
268
struct ipoib_neigh *neigh, *tn;
269
struct sk_buff *skb;
270
unsigned long flags;
271
272
while ((skb = __skb_dequeue(&path->queue)))
273
dev_kfree_skb_irq(skb);
274
275
spin_lock_irqsave(&priv->lock, flags);
276
277
list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
278
/*
279
* It's safe to call ipoib_put_ah() inside priv->lock
280
* here, because we know that path->ah will always
281
* hold one more reference, so ipoib_put_ah() will
282
* never do more than decrement the ref count.
283
*/
284
if (neigh->ah)
285
ipoib_put_ah(neigh->ah);
286
287
ipoib_neigh_free(dev, neigh);
288
}
289
290
spin_unlock_irqrestore(&priv->lock, flags);
291
292
if (path->ah)
293
ipoib_put_ah(path->ah);
294
295
kfree(path);
296
}
297
298
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
299
300
struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
301
{
302
struct ipoib_path_iter *iter;
303
304
iter = kmalloc(sizeof *iter, GFP_KERNEL);
305
if (!iter)
306
return NULL;
307
308
iter->dev = dev;
309
memset(iter->path.pathrec.dgid.raw, 0, 16);
310
311
if (ipoib_path_iter_next(iter)) {
312
kfree(iter);
313
return NULL;
314
}
315
316
return iter;
317
}
318
319
int ipoib_path_iter_next(struct ipoib_path_iter *iter)
320
{
321
struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
322
struct rb_node *n;
323
struct ipoib_path *path;
324
int ret = 1;
325
326
spin_lock_irq(&priv->lock);
327
328
n = rb_first(&priv->path_tree);
329
330
while (n) {
331
path = rb_entry(n, struct ipoib_path, rb_node);
332
333
if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
334
sizeof (union ib_gid)) < 0) {
335
iter->path = *path;
336
ret = 0;
337
break;
338
}
339
340
n = rb_next(n);
341
}
342
343
spin_unlock_irq(&priv->lock);
344
345
return ret;
346
}
347
348
void ipoib_path_iter_read(struct ipoib_path_iter *iter,
349
struct ipoib_path *path)
350
{
351
*path = iter->path;
352
}
353
354
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
355
356
void ipoib_mark_paths_invalid(struct net_device *dev)
357
{
358
struct ipoib_dev_priv *priv = netdev_priv(dev);
359
struct ipoib_path *path, *tp;
360
361
spin_lock_irq(&priv->lock);
362
363
list_for_each_entry_safe(path, tp, &priv->path_list, list) {
364
ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n",
365
be16_to_cpu(path->pathrec.dlid),
366
path->pathrec.dgid.raw);
367
path->valid = 0;
368
}
369
370
spin_unlock_irq(&priv->lock);
371
}
372
373
void ipoib_flush_paths(struct net_device *dev)
374
{
375
struct ipoib_dev_priv *priv = netdev_priv(dev);
376
struct ipoib_path *path, *tp;
377
LIST_HEAD(remove_list);
378
unsigned long flags;
379
380
netif_tx_lock_bh(dev);
381
spin_lock_irqsave(&priv->lock, flags);
382
383
list_splice_init(&priv->path_list, &remove_list);
384
385
list_for_each_entry(path, &remove_list, list)
386
rb_erase(&path->rb_node, &priv->path_tree);
387
388
list_for_each_entry_safe(path, tp, &remove_list, list) {
389
if (path->query)
390
ib_sa_cancel_query(path->query_id, path->query);
391
spin_unlock_irqrestore(&priv->lock, flags);
392
netif_tx_unlock_bh(dev);
393
wait_for_completion(&path->done);
394
path_free(dev, path);
395
netif_tx_lock_bh(dev);
396
spin_lock_irqsave(&priv->lock, flags);
397
}
398
399
spin_unlock_irqrestore(&priv->lock, flags);
400
netif_tx_unlock_bh(dev);
401
}
402
403
static void path_rec_completion(int status,
404
struct ib_sa_path_rec *pathrec,
405
void *path_ptr)
406
{
407
struct ipoib_path *path = path_ptr;
408
struct net_device *dev = path->dev;
409
struct ipoib_dev_priv *priv = netdev_priv(dev);
410
struct ipoib_ah *ah = NULL;
411
struct ipoib_ah *old_ah = NULL;
412
struct ipoib_neigh *neigh, *tn;
413
struct sk_buff_head skqueue;
414
struct sk_buff *skb;
415
unsigned long flags;
416
417
if (!status)
418
ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
419
be16_to_cpu(pathrec->dlid), pathrec->dgid.raw);
420
else
421
ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
422
status, path->pathrec.dgid.raw);
423
424
skb_queue_head_init(&skqueue);
425
426
if (!status) {
427
struct ib_ah_attr av;
428
429
if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
430
ah = ipoib_create_ah(dev, priv->pd, &av);
431
}
432
433
spin_lock_irqsave(&priv->lock, flags);
434
435
if (ah) {
436
path->pathrec = *pathrec;
437
438
old_ah = path->ah;
439
path->ah = ah;
440
441
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
442
ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
443
444
while ((skb = __skb_dequeue(&path->queue)))
445
__skb_queue_tail(&skqueue, skb);
446
447
list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
448
if (neigh->ah) {
449
WARN_ON(neigh->ah != old_ah);
450
/*
451
* Dropping the ah reference inside
452
* priv->lock is safe here, because we
453
* will hold one more reference from
454
* the original value of path->ah (ie
455
* old_ah).
456
*/
457
ipoib_put_ah(neigh->ah);
458
}
459
kref_get(&path->ah->ref);
460
neigh->ah = path->ah;
461
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
462
sizeof(union ib_gid));
463
464
if (ipoib_cm_enabled(dev, neigh->neighbour)) {
465
if (!ipoib_cm_get(neigh))
466
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
467
path,
468
neigh));
469
if (!ipoib_cm_get(neigh)) {
470
list_del(&neigh->list);
471
if (neigh->ah)
472
ipoib_put_ah(neigh->ah);
473
ipoib_neigh_free(dev, neigh);
474
continue;
475
}
476
}
477
478
while ((skb = __skb_dequeue(&neigh->queue)))
479
__skb_queue_tail(&skqueue, skb);
480
}
481
path->valid = 1;
482
}
483
484
path->query = NULL;
485
complete(&path->done);
486
487
spin_unlock_irqrestore(&priv->lock, flags);
488
489
if (old_ah)
490
ipoib_put_ah(old_ah);
491
492
while ((skb = __skb_dequeue(&skqueue))) {
493
skb->dev = dev;
494
if (dev_queue_xmit(skb))
495
ipoib_warn(priv, "dev_queue_xmit failed "
496
"to requeue packet\n");
497
}
498
}
499
500
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
501
{
502
struct ipoib_dev_priv *priv = netdev_priv(dev);
503
struct ipoib_path *path;
504
505
if (!priv->broadcast)
506
return NULL;
507
508
path = kzalloc(sizeof *path, GFP_ATOMIC);
509
if (!path)
510
return NULL;
511
512
path->dev = dev;
513
514
skb_queue_head_init(&path->queue);
515
516
INIT_LIST_HEAD(&path->neigh_list);
517
518
memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
519
path->pathrec.sgid = priv->local_gid;
520
path->pathrec.pkey = cpu_to_be16(priv->pkey);
521
path->pathrec.numb_path = 1;
522
path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
523
524
return path;
525
}
526
527
static int path_rec_start(struct net_device *dev,
528
struct ipoib_path *path)
529
{
530
struct ipoib_dev_priv *priv = netdev_priv(dev);
531
532
ipoib_dbg(priv, "Start path record lookup for %pI6\n",
533
path->pathrec.dgid.raw);
534
535
init_completion(&path->done);
536
537
path->query_id =
538
ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
539
&path->pathrec,
540
IB_SA_PATH_REC_DGID |
541
IB_SA_PATH_REC_SGID |
542
IB_SA_PATH_REC_NUMB_PATH |
543
IB_SA_PATH_REC_TRAFFIC_CLASS |
544
IB_SA_PATH_REC_PKEY,
545
1000, GFP_ATOMIC,
546
path_rec_completion,
547
path, &path->query);
548
if (path->query_id < 0) {
549
ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
550
path->query = NULL;
551
complete(&path->done);
552
return path->query_id;
553
}
554
555
return 0;
556
}
557
558
static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
559
{
560
struct ipoib_dev_priv *priv = netdev_priv(dev);
561
struct ipoib_path *path;
562
struct ipoib_neigh *neigh;
563
unsigned long flags;
564
565
neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
566
if (!neigh) {
567
++dev->stats.tx_dropped;
568
dev_kfree_skb_any(skb);
569
return;
570
}
571
572
spin_lock_irqsave(&priv->lock, flags);
573
574
path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
575
if (!path) {
576
path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
577
if (!path)
578
goto err_path;
579
580
__path_add(dev, path);
581
}
582
583
list_add_tail(&neigh->list, &path->neigh_list);
584
585
if (path->ah) {
586
kref_get(&path->ah->ref);
587
neigh->ah = path->ah;
588
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
589
sizeof(union ib_gid));
590
591
if (ipoib_cm_enabled(dev, neigh->neighbour)) {
592
if (!ipoib_cm_get(neigh))
593
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
594
if (!ipoib_cm_get(neigh)) {
595
list_del(&neigh->list);
596
if (neigh->ah)
597
ipoib_put_ah(neigh->ah);
598
ipoib_neigh_free(dev, neigh);
599
goto err_drop;
600
}
601
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
602
__skb_queue_tail(&neigh->queue, skb);
603
else {
604
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
605
skb_queue_len(&neigh->queue));
606
goto err_drop;
607
}
608
} else {
609
spin_unlock_irqrestore(&priv->lock, flags);
610
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
611
return;
612
}
613
} else {
614
neigh->ah = NULL;
615
616
if (!path->query && path_rec_start(dev, path))
617
goto err_list;
618
619
__skb_queue_tail(&neigh->queue, skb);
620
}
621
622
spin_unlock_irqrestore(&priv->lock, flags);
623
return;
624
625
err_list:
626
list_del(&neigh->list);
627
628
err_path:
629
ipoib_neigh_free(dev, neigh);
630
err_drop:
631
++dev->stats.tx_dropped;
632
dev_kfree_skb_any(skb);
633
634
spin_unlock_irqrestore(&priv->lock, flags);
635
}
636
637
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
638
{
639
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
640
641
/* Look up path record for unicasts */
642
if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
643
neigh_add_path(skb, dev);
644
return;
645
}
646
647
/* Add in the P_Key for multicasts */
648
skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
649
skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
650
ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
651
}
652
653
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
654
struct ipoib_pseudoheader *phdr)
655
{
656
struct ipoib_dev_priv *priv = netdev_priv(dev);
657
struct ipoib_path *path;
658
unsigned long flags;
659
660
spin_lock_irqsave(&priv->lock, flags);
661
662
path = __path_find(dev, phdr->hwaddr + 4);
663
if (!path || !path->valid) {
664
int new_path = 0;
665
666
if (!path) {
667
path = path_rec_create(dev, phdr->hwaddr + 4);
668
new_path = 1;
669
}
670
if (path) {
671
/* put pseudoheader back on for next time */
672
skb_push(skb, sizeof *phdr);
673
__skb_queue_tail(&path->queue, skb);
674
675
if (!path->query && path_rec_start(dev, path)) {
676
spin_unlock_irqrestore(&priv->lock, flags);
677
if (new_path)
678
path_free(dev, path);
679
return;
680
} else
681
__path_add(dev, path);
682
} else {
683
++dev->stats.tx_dropped;
684
dev_kfree_skb_any(skb);
685
}
686
687
spin_unlock_irqrestore(&priv->lock, flags);
688
return;
689
}
690
691
if (path->ah) {
692
ipoib_dbg(priv, "Send unicast ARP to %04x\n",
693
be16_to_cpu(path->pathrec.dlid));
694
695
spin_unlock_irqrestore(&priv->lock, flags);
696
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
697
return;
698
} else if ((path->query || !path_rec_start(dev, path)) &&
699
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
700
/* put pseudoheader back on for next time */
701
skb_push(skb, sizeof *phdr);
702
__skb_queue_tail(&path->queue, skb);
703
} else {
704
++dev->stats.tx_dropped;
705
dev_kfree_skb_any(skb);
706
}
707
708
spin_unlock_irqrestore(&priv->lock, flags);
709
}
710
711
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
712
{
713
struct ipoib_dev_priv *priv = netdev_priv(dev);
714
struct ipoib_neigh *neigh;
715
unsigned long flags;
716
717
if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
718
if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
719
ipoib_path_lookup(skb, dev);
720
return NETDEV_TX_OK;
721
}
722
723
neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
724
725
if (unlikely((memcmp(&neigh->dgid.raw,
726
skb_dst(skb)->neighbour->ha + 4,
727
sizeof(union ib_gid))) ||
728
(neigh->dev != dev))) {
729
spin_lock_irqsave(&priv->lock, flags);
730
/*
731
* It's safe to call ipoib_put_ah() inside
732
* priv->lock here, because we know that
733
* path->ah will always hold one more reference,
734
* so ipoib_put_ah() will never do more than
735
* decrement the ref count.
736
*/
737
if (neigh->ah)
738
ipoib_put_ah(neigh->ah);
739
list_del(&neigh->list);
740
ipoib_neigh_free(dev, neigh);
741
spin_unlock_irqrestore(&priv->lock, flags);
742
ipoib_path_lookup(skb, dev);
743
return NETDEV_TX_OK;
744
}
745
746
if (ipoib_cm_get(neigh)) {
747
if (ipoib_cm_up(neigh)) {
748
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
749
return NETDEV_TX_OK;
750
}
751
} else if (neigh->ah) {
752
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
753
return NETDEV_TX_OK;
754
}
755
756
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
757
spin_lock_irqsave(&priv->lock, flags);
758
__skb_queue_tail(&neigh->queue, skb);
759
spin_unlock_irqrestore(&priv->lock, flags);
760
} else {
761
++dev->stats.tx_dropped;
762
dev_kfree_skb_any(skb);
763
}
764
} else {
765
struct ipoib_pseudoheader *phdr =
766
(struct ipoib_pseudoheader *) skb->data;
767
skb_pull(skb, sizeof *phdr);
768
769
if (phdr->hwaddr[4] == 0xff) {
770
/* Add in the P_Key for multicast*/
771
phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
772
phdr->hwaddr[9] = priv->pkey & 0xff;
773
774
ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
775
} else {
776
/* unicast GID -- should be ARP or RARP reply */
777
778
if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
779
(be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
780
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
781
skb_dst(skb) ? "neigh" : "dst",
782
be16_to_cpup((__be16 *) skb->data),
783
IPOIB_QPN(phdr->hwaddr),
784
phdr->hwaddr + 4);
785
dev_kfree_skb_any(skb);
786
++dev->stats.tx_dropped;
787
return NETDEV_TX_OK;
788
}
789
790
unicast_arp_send(skb, dev, phdr);
791
}
792
}
793
794
return NETDEV_TX_OK;
795
}
796
797
static void ipoib_timeout(struct net_device *dev)
798
{
799
struct ipoib_dev_priv *priv = netdev_priv(dev);
800
801
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
802
jiffies_to_msecs(jiffies - dev->trans_start));
803
ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
804
netif_queue_stopped(dev),
805
priv->tx_head, priv->tx_tail);
806
/* XXX reset QP, etc. */
807
}
808
809
static int ipoib_hard_header(struct sk_buff *skb,
810
struct net_device *dev,
811
unsigned short type,
812
const void *daddr, const void *saddr, unsigned len)
813
{
814
struct ipoib_header *header;
815
816
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
817
818
header->proto = htons(type);
819
header->reserved = 0;
820
821
/*
822
* If we don't have a neighbour structure, stuff the
823
* destination address onto the front of the skb so we can
824
* figure out where to send the packet later.
825
*/
826
if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
827
struct ipoib_pseudoheader *phdr =
828
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
829
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
830
}
831
832
return 0;
833
}
834
835
static void ipoib_set_mcast_list(struct net_device *dev)
836
{
837
struct ipoib_dev_priv *priv = netdev_priv(dev);
838
839
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
840
ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
841
return;
842
}
843
844
queue_work(ipoib_workqueue, &priv->restart_task);
845
}
846
847
static void ipoib_neigh_cleanup(struct neighbour *n)
848
{
849
struct ipoib_neigh *neigh;
850
struct ipoib_dev_priv *priv = netdev_priv(n->dev);
851
unsigned long flags;
852
struct ipoib_ah *ah = NULL;
853
854
neigh = *to_ipoib_neigh(n);
855
if (neigh)
856
priv = netdev_priv(neigh->dev);
857
else
858
return;
859
ipoib_dbg(priv,
860
"neigh_cleanup for %06x %pI6\n",
861
IPOIB_QPN(n->ha),
862
n->ha + 4);
863
864
spin_lock_irqsave(&priv->lock, flags);
865
866
if (neigh->ah)
867
ah = neigh->ah;
868
list_del(&neigh->list);
869
ipoib_neigh_free(n->dev, neigh);
870
871
spin_unlock_irqrestore(&priv->lock, flags);
872
873
if (ah)
874
ipoib_put_ah(ah);
875
}
876
877
struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
878
struct net_device *dev)
879
{
880
struct ipoib_neigh *neigh;
881
882
neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
883
if (!neigh)
884
return NULL;
885
886
neigh->neighbour = neighbour;
887
neigh->dev = dev;
888
memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
889
*to_ipoib_neigh(neighbour) = neigh;
890
skb_queue_head_init(&neigh->queue);
891
ipoib_cm_set(neigh, NULL);
892
893
return neigh;
894
}
895
896
void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
897
{
898
struct sk_buff *skb;
899
*to_ipoib_neigh(neigh->neighbour) = NULL;
900
while ((skb = __skb_dequeue(&neigh->queue))) {
901
++dev->stats.tx_dropped;
902
dev_kfree_skb_any(skb);
903
}
904
if (ipoib_cm_get(neigh))
905
ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
906
kfree(neigh);
907
}
908
909
static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
910
{
911
parms->neigh_cleanup = ipoib_neigh_cleanup;
912
913
return 0;
914
}
915
916
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
917
{
918
struct ipoib_dev_priv *priv = netdev_priv(dev);
919
920
/* Allocate RX/TX "rings" to hold queued skbs */
921
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
922
GFP_KERNEL);
923
if (!priv->rx_ring) {
924
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
925
ca->name, ipoib_recvq_size);
926
goto out;
927
}
928
929
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
930
if (!priv->tx_ring) {
931
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
932
ca->name, ipoib_sendq_size);
933
goto out_rx_ring_cleanup;
934
}
935
936
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
937
938
if (ipoib_ib_dev_init(dev, ca, port))
939
goto out_tx_ring_cleanup;
940
941
return 0;
942
943
out_tx_ring_cleanup:
944
vfree(priv->tx_ring);
945
946
out_rx_ring_cleanup:
947
kfree(priv->rx_ring);
948
949
out:
950
return -ENOMEM;
951
}
952
953
void ipoib_dev_cleanup(struct net_device *dev)
954
{
955
struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
956
957
ipoib_delete_debug_files(dev);
958
959
/* Delete any child interfaces first */
960
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
961
unregister_netdev(cpriv->dev);
962
ipoib_dev_cleanup(cpriv->dev);
963
free_netdev(cpriv->dev);
964
}
965
966
ipoib_ib_dev_cleanup(dev);
967
968
kfree(priv->rx_ring);
969
vfree(priv->tx_ring);
970
971
priv->rx_ring = NULL;
972
priv->tx_ring = NULL;
973
}
974
975
static const struct header_ops ipoib_header_ops = {
976
.create = ipoib_hard_header,
977
};
978
979
static const struct net_device_ops ipoib_netdev_ops = {
980
.ndo_open = ipoib_open,
981
.ndo_stop = ipoib_stop,
982
.ndo_change_mtu = ipoib_change_mtu,
983
.ndo_fix_features = ipoib_fix_features,
984
.ndo_start_xmit = ipoib_start_xmit,
985
.ndo_tx_timeout = ipoib_timeout,
986
.ndo_set_multicast_list = ipoib_set_mcast_list,
987
.ndo_neigh_setup = ipoib_neigh_setup_dev,
988
};
989
990
static void ipoib_setup(struct net_device *dev)
991
{
992
struct ipoib_dev_priv *priv = netdev_priv(dev);
993
994
dev->netdev_ops = &ipoib_netdev_ops;
995
dev->header_ops = &ipoib_header_ops;
996
997
ipoib_set_ethtool_ops(dev);
998
999
netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
1000
1001
dev->watchdog_timeo = HZ;
1002
1003
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1004
1005
/*
1006
* We add in INFINIBAND_ALEN to allow for the destination
1007
* address "pseudoheader" for skbs without neighbour struct.
1008
*/
1009
dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
1010
dev->addr_len = INFINIBAND_ALEN;
1011
dev->type = ARPHRD_INFINIBAND;
1012
dev->tx_queue_len = ipoib_sendq_size * 2;
1013
dev->features = (NETIF_F_VLAN_CHALLENGED |
1014
NETIF_F_HIGHDMA);
1015
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1016
1017
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
1018
1019
netif_carrier_off(dev);
1020
1021
priv->dev = dev;
1022
1023
spin_lock_init(&priv->lock);
1024
1025
mutex_init(&priv->vlan_mutex);
1026
1027
INIT_LIST_HEAD(&priv->path_list);
1028
INIT_LIST_HEAD(&priv->child_intfs);
1029
INIT_LIST_HEAD(&priv->dead_ahs);
1030
INIT_LIST_HEAD(&priv->multicast_list);
1031
1032
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
1033
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
1034
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
1035
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
1036
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
1037
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
1038
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
1039
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
1040
}
1041
1042
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
1043
{
1044
struct net_device *dev;
1045
1046
dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
1047
ipoib_setup);
1048
if (!dev)
1049
return NULL;
1050
1051
return netdev_priv(dev);
1052
}
1053
1054
static ssize_t show_pkey(struct device *dev,
1055
struct device_attribute *attr, char *buf)
1056
{
1057
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1058
1059
return sprintf(buf, "0x%04x\n", priv->pkey);
1060
}
1061
static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1062
1063
static ssize_t show_umcast(struct device *dev,
1064
struct device_attribute *attr, char *buf)
1065
{
1066
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1067
1068
return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
1069
}
1070
1071
static ssize_t set_umcast(struct device *dev,
1072
struct device_attribute *attr,
1073
const char *buf, size_t count)
1074
{
1075
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
1076
unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
1077
1078
if (umcast_val > 0) {
1079
set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1080
ipoib_warn(priv, "ignoring multicast groups joined directly "
1081
"by userspace\n");
1082
} else
1083
clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
1084
1085
return count;
1086
}
1087
static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
1088
1089
int ipoib_add_umcast_attr(struct net_device *dev)
1090
{
1091
return device_create_file(&dev->dev, &dev_attr_umcast);
1092
}
1093
1094
static ssize_t create_child(struct device *dev,
1095
struct device_attribute *attr,
1096
const char *buf, size_t count)
1097
{
1098
int pkey;
1099
int ret;
1100
1101
if (sscanf(buf, "%i", &pkey) != 1)
1102
return -EINVAL;
1103
1104
if (pkey < 0 || pkey > 0xffff)
1105
return -EINVAL;
1106
1107
/*
1108
* Set the full membership bit, so that we join the right
1109
* broadcast group, etc.
1110
*/
1111
pkey |= 0x8000;
1112
1113
ret = ipoib_vlan_add(to_net_dev(dev), pkey);
1114
1115
return ret ? ret : count;
1116
}
1117
static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
1118
1119
static ssize_t delete_child(struct device *dev,
1120
struct device_attribute *attr,
1121
const char *buf, size_t count)
1122
{
1123
int pkey;
1124
int ret;
1125
1126
if (sscanf(buf, "%i", &pkey) != 1)
1127
return -EINVAL;
1128
1129
if (pkey < 0 || pkey > 0xffff)
1130
return -EINVAL;
1131
1132
ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1133
1134
return ret ? ret : count;
1135
1136
}
1137
static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
1138
1139
int ipoib_add_pkey_attr(struct net_device *dev)
1140
{
1141
return device_create_file(&dev->dev, &dev_attr_pkey);
1142
}
1143
1144
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1145
{
1146
struct ib_device_attr *device_attr;
1147
int result = -ENOMEM;
1148
1149
device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1150
if (!device_attr) {
1151
printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1152
hca->name, sizeof *device_attr);
1153
return result;
1154
}
1155
1156
result = ib_query_device(hca, device_attr);
1157
if (result) {
1158
printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1159
hca->name, result);
1160
kfree(device_attr);
1161
return result;
1162
}
1163
priv->hca_caps = device_attr->device_cap_flags;
1164
1165
kfree(device_attr);
1166
1167
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1168
priv->dev->hw_features = NETIF_F_SG |
1169
NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1170
1171
if (priv->hca_caps & IB_DEVICE_UD_TSO)
1172
priv->dev->hw_features |= NETIF_F_TSO;
1173
1174
priv->dev->features |= priv->dev->hw_features;
1175
}
1176
1177
return 0;
1178
}
1179
1180
static struct net_device *ipoib_add_port(const char *format,
1181
struct ib_device *hca, u8 port)
1182
{
1183
struct ipoib_dev_priv *priv;
1184
struct ib_port_attr attr;
1185
int result = -ENOMEM;
1186
1187
priv = ipoib_intf_alloc(format);
1188
if (!priv)
1189
goto alloc_mem_failed;
1190
1191
SET_NETDEV_DEV(priv->dev, hca->dma_device);
1192
priv->dev->dev_id = port - 1;
1193
1194
if (!ib_query_port(hca, port, &attr))
1195
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1196
else {
1197
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
1198
hca->name, port);
1199
goto device_init_failed;
1200
}
1201
1202
/* MTU will be reset when mcast join happens */
1203
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1204
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
1205
1206
result = ib_query_pkey(hca, port, 0, &priv->pkey);
1207
if (result) {
1208
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1209
hca->name, port, result);
1210
goto device_init_failed;
1211
}
1212
1213
if (ipoib_set_dev_features(priv, hca))
1214
goto device_init_failed;
1215
1216
/*
1217
* Set the full membership bit, so that we join the right
1218
* broadcast group, etc.
1219
*/
1220
priv->pkey |= 0x8000;
1221
1222
priv->dev->broadcast[8] = priv->pkey >> 8;
1223
priv->dev->broadcast[9] = priv->pkey & 0xff;
1224
1225
result = ib_query_gid(hca, port, 0, &priv->local_gid);
1226
if (result) {
1227
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1228
hca->name, port, result);
1229
goto device_init_failed;
1230
} else
1231
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1232
1233
result = ipoib_dev_init(priv->dev, hca, port);
1234
if (result < 0) {
1235
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1236
hca->name, port, result);
1237
goto device_init_failed;
1238
}
1239
1240
INIT_IB_EVENT_HANDLER(&priv->event_handler,
1241
priv->ca, ipoib_event);
1242
result = ib_register_event_handler(&priv->event_handler);
1243
if (result < 0) {
1244
printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1245
"port %d (ret = %d)\n",
1246
hca->name, port, result);
1247
goto event_failed;
1248
}
1249
1250
result = register_netdev(priv->dev);
1251
if (result) {
1252
printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1253
hca->name, port, result);
1254
goto register_failed;
1255
}
1256
1257
ipoib_create_debug_files(priv->dev);
1258
1259
if (ipoib_cm_add_mode_attr(priv->dev))
1260
goto sysfs_failed;
1261
if (ipoib_add_pkey_attr(priv->dev))
1262
goto sysfs_failed;
1263
if (ipoib_add_umcast_attr(priv->dev))
1264
goto sysfs_failed;
1265
if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1266
goto sysfs_failed;
1267
if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1268
goto sysfs_failed;
1269
1270
return priv->dev;
1271
1272
sysfs_failed:
1273
ipoib_delete_debug_files(priv->dev);
1274
unregister_netdev(priv->dev);
1275
1276
register_failed:
1277
ib_unregister_event_handler(&priv->event_handler);
1278
flush_workqueue(ipoib_workqueue);
1279
1280
event_failed:
1281
ipoib_dev_cleanup(priv->dev);
1282
1283
device_init_failed:
1284
free_netdev(priv->dev);
1285
1286
alloc_mem_failed:
1287
return ERR_PTR(result);
1288
}
1289
1290
static void ipoib_add_one(struct ib_device *device)
1291
{
1292
struct list_head *dev_list;
1293
struct net_device *dev;
1294
struct ipoib_dev_priv *priv;
1295
int s, e, p;
1296
1297
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1298
return;
1299
1300
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1301
if (!dev_list)
1302
return;
1303
1304
INIT_LIST_HEAD(dev_list);
1305
1306
if (device->node_type == RDMA_NODE_IB_SWITCH) {
1307
s = 0;
1308
e = 0;
1309
} else {
1310
s = 1;
1311
e = device->phys_port_cnt;
1312
}
1313
1314
for (p = s; p <= e; ++p) {
1315
if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND)
1316
continue;
1317
dev = ipoib_add_port("ib%d", device, p);
1318
if (!IS_ERR(dev)) {
1319
priv = netdev_priv(dev);
1320
list_add_tail(&priv->list, dev_list);
1321
}
1322
}
1323
1324
ib_set_client_data(device, &ipoib_client, dev_list);
1325
}
1326
1327
static void ipoib_remove_one(struct ib_device *device)
1328
{
1329
struct ipoib_dev_priv *priv, *tmp;
1330
struct list_head *dev_list;
1331
1332
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1333
return;
1334
1335
dev_list = ib_get_client_data(device, &ipoib_client);
1336
1337
list_for_each_entry_safe(priv, tmp, dev_list, list) {
1338
ib_unregister_event_handler(&priv->event_handler);
1339
1340
rtnl_lock();
1341
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
1342
rtnl_unlock();
1343
1344
flush_workqueue(ipoib_workqueue);
1345
1346
unregister_netdev(priv->dev);
1347
ipoib_dev_cleanup(priv->dev);
1348
free_netdev(priv->dev);
1349
}
1350
1351
kfree(dev_list);
1352
}
1353
1354
static int __init ipoib_init_module(void)
1355
{
1356
int ret;
1357
1358
ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
1359
ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
1360
ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
1361
1362
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
1363
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
1364
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
1365
#ifdef CONFIG_INFINIBAND_IPOIB_CM
1366
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
1367
#endif
1368
1369
/*
1370
* When copying small received packets, we only copy from the
1371
* linear data part of the SKB, so we rely on this condition.
1372
*/
1373
BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
1374
1375
ret = ipoib_register_debugfs();
1376
if (ret)
1377
return ret;
1378
1379
/*
1380
* We create our own workqueue mainly because we want to be
1381
* able to flush it when devices are being removed. We can't
1382
* use schedule_work()/flush_scheduled_work() because both
1383
* unregister_netdev() and linkwatch_event take the rtnl lock,
1384
* so flush_scheduled_work() can deadlock during device
1385
* removal.
1386
*/
1387
ipoib_workqueue = create_singlethread_workqueue("ipoib");
1388
if (!ipoib_workqueue) {
1389
ret = -ENOMEM;
1390
goto err_fs;
1391
}
1392
1393
ib_sa_register_client(&ipoib_sa_client);
1394
1395
ret = ib_register_client(&ipoib_client);
1396
if (ret)
1397
goto err_sa;
1398
1399
return 0;
1400
1401
err_sa:
1402
ib_sa_unregister_client(&ipoib_sa_client);
1403
destroy_workqueue(ipoib_workqueue);
1404
1405
err_fs:
1406
ipoib_unregister_debugfs();
1407
1408
return ret;
1409
}
1410
1411
static void __exit ipoib_cleanup_module(void)
1412
{
1413
ib_unregister_client(&ipoib_client);
1414
ib_sa_unregister_client(&ipoib_sa_client);
1415
ipoib_unregister_debugfs();
1416
destroy_workqueue(ipoib_workqueue);
1417
}
1418
1419
module_init(ipoib_init_module);
1420
module_exit(ipoib_cleanup_module);
1421
1422