Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/infiniband/core/sa_query.c
37212 views
1
/*
2
* Copyright (c) 2004 Topspin Communications. All rights reserved.
3
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4
* Copyright (c) 2006 Intel Corporation. All rights reserved.
5
*
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING in the main directory of this source tree, or the
10
* OpenIB.org BSD license below:
11
*
12
* Redistribution and use in source and binary forms, with or
13
* without modification, are permitted provided that the following
14
* conditions are met:
15
*
16
* - Redistributions of source code must retain the above
17
* copyright notice, this list of conditions and the following
18
* disclaimer.
19
*
20
* - Redistributions in binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer in the documentation and/or other materials
23
* provided with the distribution.
24
*
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
* SOFTWARE.
33
*/
34
35
#include <linux/module.h>
36
#include <linux/init.h>
37
#include <linux/err.h>
38
#include <linux/random.h>
39
#include <linux/spinlock.h>
40
#include <linux/slab.h>
41
#include <linux/dma-mapping.h>
42
#include <linux/kref.h>
43
#include <linux/idr.h>
44
#include <linux/workqueue.h>
45
46
#include <rdma/ib_pack.h>
47
#include <rdma/ib_cache.h>
48
#include "sa.h"
49
50
MODULE_AUTHOR("Roland Dreier");
51
MODULE_DESCRIPTION("InfiniBand subnet administration query support");
52
MODULE_LICENSE("Dual BSD/GPL");
53
54
struct ib_sa_sm_ah {
55
struct ib_ah *ah;
56
struct kref ref;
57
u16 pkey_index;
58
u8 src_path_mask;
59
};
60
61
struct ib_sa_port {
62
struct ib_mad_agent *agent;
63
struct ib_sa_sm_ah *sm_ah;
64
struct work_struct update_task;
65
spinlock_t ah_lock;
66
u8 port_num;
67
};
68
69
struct ib_sa_device {
70
int start_port, end_port;
71
struct ib_event_handler event_handler;
72
struct ib_sa_port port[0];
73
};
74
75
struct ib_sa_query {
76
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
77
void (*release)(struct ib_sa_query *);
78
struct ib_sa_client *client;
79
struct ib_sa_port *port;
80
struct ib_mad_send_buf *mad_buf;
81
struct ib_sa_sm_ah *sm_ah;
82
int id;
83
};
84
85
struct ib_sa_service_query {
86
void (*callback)(int, struct ib_sa_service_rec *, void *);
87
void *context;
88
struct ib_sa_query sa_query;
89
};
90
91
struct ib_sa_path_query {
92
void (*callback)(int, struct ib_sa_path_rec *, void *);
93
void *context;
94
struct ib_sa_query sa_query;
95
};
96
97
struct ib_sa_mcmember_query {
98
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
99
void *context;
100
struct ib_sa_query sa_query;
101
};
102
103
static void ib_sa_add_one(struct ib_device *device);
104
static void ib_sa_remove_one(struct ib_device *device);
105
106
static struct ib_client sa_client = {
107
.name = "sa",
108
.add = ib_sa_add_one,
109
.remove = ib_sa_remove_one
110
};
111
112
static DEFINE_SPINLOCK(idr_lock);
113
static DEFINE_IDR(query_idr);
114
115
static DEFINE_SPINLOCK(tid_lock);
116
static u32 tid;
117
118
#define PATH_REC_FIELD(field) \
119
.struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
120
.struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
121
.field_name = "sa_path_rec:" #field
122
123
static const struct ib_field path_rec_table[] = {
124
{ PATH_REC_FIELD(service_id),
125
.offset_words = 0,
126
.offset_bits = 0,
127
.size_bits = 64 },
128
{ PATH_REC_FIELD(dgid),
129
.offset_words = 2,
130
.offset_bits = 0,
131
.size_bits = 128 },
132
{ PATH_REC_FIELD(sgid),
133
.offset_words = 6,
134
.offset_bits = 0,
135
.size_bits = 128 },
136
{ PATH_REC_FIELD(dlid),
137
.offset_words = 10,
138
.offset_bits = 0,
139
.size_bits = 16 },
140
{ PATH_REC_FIELD(slid),
141
.offset_words = 10,
142
.offset_bits = 16,
143
.size_bits = 16 },
144
{ PATH_REC_FIELD(raw_traffic),
145
.offset_words = 11,
146
.offset_bits = 0,
147
.size_bits = 1 },
148
{ RESERVED,
149
.offset_words = 11,
150
.offset_bits = 1,
151
.size_bits = 3 },
152
{ PATH_REC_FIELD(flow_label),
153
.offset_words = 11,
154
.offset_bits = 4,
155
.size_bits = 20 },
156
{ PATH_REC_FIELD(hop_limit),
157
.offset_words = 11,
158
.offset_bits = 24,
159
.size_bits = 8 },
160
{ PATH_REC_FIELD(traffic_class),
161
.offset_words = 12,
162
.offset_bits = 0,
163
.size_bits = 8 },
164
{ PATH_REC_FIELD(reversible),
165
.offset_words = 12,
166
.offset_bits = 8,
167
.size_bits = 1 },
168
{ PATH_REC_FIELD(numb_path),
169
.offset_words = 12,
170
.offset_bits = 9,
171
.size_bits = 7 },
172
{ PATH_REC_FIELD(pkey),
173
.offset_words = 12,
174
.offset_bits = 16,
175
.size_bits = 16 },
176
{ PATH_REC_FIELD(qos_class),
177
.offset_words = 13,
178
.offset_bits = 0,
179
.size_bits = 12 },
180
{ PATH_REC_FIELD(sl),
181
.offset_words = 13,
182
.offset_bits = 12,
183
.size_bits = 4 },
184
{ PATH_REC_FIELD(mtu_selector),
185
.offset_words = 13,
186
.offset_bits = 16,
187
.size_bits = 2 },
188
{ PATH_REC_FIELD(mtu),
189
.offset_words = 13,
190
.offset_bits = 18,
191
.size_bits = 6 },
192
{ PATH_REC_FIELD(rate_selector),
193
.offset_words = 13,
194
.offset_bits = 24,
195
.size_bits = 2 },
196
{ PATH_REC_FIELD(rate),
197
.offset_words = 13,
198
.offset_bits = 26,
199
.size_bits = 6 },
200
{ PATH_REC_FIELD(packet_life_time_selector),
201
.offset_words = 14,
202
.offset_bits = 0,
203
.size_bits = 2 },
204
{ PATH_REC_FIELD(packet_life_time),
205
.offset_words = 14,
206
.offset_bits = 2,
207
.size_bits = 6 },
208
{ PATH_REC_FIELD(preference),
209
.offset_words = 14,
210
.offset_bits = 8,
211
.size_bits = 8 },
212
{ RESERVED,
213
.offset_words = 14,
214
.offset_bits = 16,
215
.size_bits = 48 },
216
};
217
218
#define MCMEMBER_REC_FIELD(field) \
219
.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
220
.struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
221
.field_name = "sa_mcmember_rec:" #field
222
223
static const struct ib_field mcmember_rec_table[] = {
224
{ MCMEMBER_REC_FIELD(mgid),
225
.offset_words = 0,
226
.offset_bits = 0,
227
.size_bits = 128 },
228
{ MCMEMBER_REC_FIELD(port_gid),
229
.offset_words = 4,
230
.offset_bits = 0,
231
.size_bits = 128 },
232
{ MCMEMBER_REC_FIELD(qkey),
233
.offset_words = 8,
234
.offset_bits = 0,
235
.size_bits = 32 },
236
{ MCMEMBER_REC_FIELD(mlid),
237
.offset_words = 9,
238
.offset_bits = 0,
239
.size_bits = 16 },
240
{ MCMEMBER_REC_FIELD(mtu_selector),
241
.offset_words = 9,
242
.offset_bits = 16,
243
.size_bits = 2 },
244
{ MCMEMBER_REC_FIELD(mtu),
245
.offset_words = 9,
246
.offset_bits = 18,
247
.size_bits = 6 },
248
{ MCMEMBER_REC_FIELD(traffic_class),
249
.offset_words = 9,
250
.offset_bits = 24,
251
.size_bits = 8 },
252
{ MCMEMBER_REC_FIELD(pkey),
253
.offset_words = 10,
254
.offset_bits = 0,
255
.size_bits = 16 },
256
{ MCMEMBER_REC_FIELD(rate_selector),
257
.offset_words = 10,
258
.offset_bits = 16,
259
.size_bits = 2 },
260
{ MCMEMBER_REC_FIELD(rate),
261
.offset_words = 10,
262
.offset_bits = 18,
263
.size_bits = 6 },
264
{ MCMEMBER_REC_FIELD(packet_life_time_selector),
265
.offset_words = 10,
266
.offset_bits = 24,
267
.size_bits = 2 },
268
{ MCMEMBER_REC_FIELD(packet_life_time),
269
.offset_words = 10,
270
.offset_bits = 26,
271
.size_bits = 6 },
272
{ MCMEMBER_REC_FIELD(sl),
273
.offset_words = 11,
274
.offset_bits = 0,
275
.size_bits = 4 },
276
{ MCMEMBER_REC_FIELD(flow_label),
277
.offset_words = 11,
278
.offset_bits = 4,
279
.size_bits = 20 },
280
{ MCMEMBER_REC_FIELD(hop_limit),
281
.offset_words = 11,
282
.offset_bits = 24,
283
.size_bits = 8 },
284
{ MCMEMBER_REC_FIELD(scope),
285
.offset_words = 12,
286
.offset_bits = 0,
287
.size_bits = 4 },
288
{ MCMEMBER_REC_FIELD(join_state),
289
.offset_words = 12,
290
.offset_bits = 4,
291
.size_bits = 4 },
292
{ MCMEMBER_REC_FIELD(proxy_join),
293
.offset_words = 12,
294
.offset_bits = 8,
295
.size_bits = 1 },
296
{ RESERVED,
297
.offset_words = 12,
298
.offset_bits = 9,
299
.size_bits = 23 },
300
};
301
302
#define SERVICE_REC_FIELD(field) \
303
.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
304
.struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
305
.field_name = "sa_service_rec:" #field
306
307
static const struct ib_field service_rec_table[] = {
308
{ SERVICE_REC_FIELD(id),
309
.offset_words = 0,
310
.offset_bits = 0,
311
.size_bits = 64 },
312
{ SERVICE_REC_FIELD(gid),
313
.offset_words = 2,
314
.offset_bits = 0,
315
.size_bits = 128 },
316
{ SERVICE_REC_FIELD(pkey),
317
.offset_words = 6,
318
.offset_bits = 0,
319
.size_bits = 16 },
320
{ SERVICE_REC_FIELD(lease),
321
.offset_words = 7,
322
.offset_bits = 0,
323
.size_bits = 32 },
324
{ SERVICE_REC_FIELD(key),
325
.offset_words = 8,
326
.offset_bits = 0,
327
.size_bits = 128 },
328
{ SERVICE_REC_FIELD(name),
329
.offset_words = 12,
330
.offset_bits = 0,
331
.size_bits = 64*8 },
332
{ SERVICE_REC_FIELD(data8),
333
.offset_words = 28,
334
.offset_bits = 0,
335
.size_bits = 16*8 },
336
{ SERVICE_REC_FIELD(data16),
337
.offset_words = 32,
338
.offset_bits = 0,
339
.size_bits = 8*16 },
340
{ SERVICE_REC_FIELD(data32),
341
.offset_words = 36,
342
.offset_bits = 0,
343
.size_bits = 4*32 },
344
{ SERVICE_REC_FIELD(data64),
345
.offset_words = 40,
346
.offset_bits = 0,
347
.size_bits = 2*64 },
348
};
349
350
static void free_sm_ah(struct kref *kref)
351
{
352
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
353
354
ib_destroy_ah(sm_ah->ah);
355
kfree(sm_ah);
356
}
357
358
static void update_sm_ah(struct work_struct *work)
359
{
360
struct ib_sa_port *port =
361
container_of(work, struct ib_sa_port, update_task);
362
struct ib_sa_sm_ah *new_ah;
363
struct ib_port_attr port_attr;
364
struct ib_ah_attr ah_attr;
365
366
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
367
printk(KERN_WARNING "Couldn't query port\n");
368
return;
369
}
370
371
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
372
if (!new_ah) {
373
printk(KERN_WARNING "Couldn't allocate new SM AH\n");
374
return;
375
}
376
377
kref_init(&new_ah->ref);
378
new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
379
380
new_ah->pkey_index = 0;
381
if (ib_find_pkey(port->agent->device, port->port_num,
382
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
383
printk(KERN_ERR "Couldn't find index for default PKey\n");
384
385
memset(&ah_attr, 0, sizeof ah_attr);
386
ah_attr.dlid = port_attr.sm_lid;
387
ah_attr.sl = port_attr.sm_sl;
388
ah_attr.port_num = port->port_num;
389
390
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
391
if (IS_ERR(new_ah->ah)) {
392
printk(KERN_WARNING "Couldn't create new SM AH\n");
393
kfree(new_ah);
394
return;
395
}
396
397
spin_lock_irq(&port->ah_lock);
398
if (port->sm_ah)
399
kref_put(&port->sm_ah->ref, free_sm_ah);
400
port->sm_ah = new_ah;
401
spin_unlock_irq(&port->ah_lock);
402
403
}
404
405
static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
406
{
407
if (event->event == IB_EVENT_PORT_ERR ||
408
event->event == IB_EVENT_PORT_ACTIVE ||
409
event->event == IB_EVENT_LID_CHANGE ||
410
event->event == IB_EVENT_PKEY_CHANGE ||
411
event->event == IB_EVENT_SM_CHANGE ||
412
event->event == IB_EVENT_CLIENT_REREGISTER) {
413
unsigned long flags;
414
struct ib_sa_device *sa_dev =
415
container_of(handler, typeof(*sa_dev), event_handler);
416
struct ib_sa_port *port =
417
&sa_dev->port[event->element.port_num - sa_dev->start_port];
418
419
if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
420
return;
421
422
spin_lock_irqsave(&port->ah_lock, flags);
423
if (port->sm_ah)
424
kref_put(&port->sm_ah->ref, free_sm_ah);
425
port->sm_ah = NULL;
426
spin_unlock_irqrestore(&port->ah_lock, flags);
427
428
queue_work(ib_wq, &sa_dev->port[event->element.port_num -
429
sa_dev->start_port].update_task);
430
}
431
}
432
433
void ib_sa_register_client(struct ib_sa_client *client)
434
{
435
atomic_set(&client->users, 1);
436
init_completion(&client->comp);
437
}
438
EXPORT_SYMBOL(ib_sa_register_client);
439
440
void ib_sa_unregister_client(struct ib_sa_client *client)
441
{
442
ib_sa_client_put(client);
443
wait_for_completion(&client->comp);
444
}
445
EXPORT_SYMBOL(ib_sa_unregister_client);
446
447
/**
448
* ib_sa_cancel_query - try to cancel an SA query
449
* @id:ID of query to cancel
450
* @query:query pointer to cancel
451
*
452
* Try to cancel an SA query. If the id and query don't match up or
453
* the query has already completed, nothing is done. Otherwise the
454
* query is canceled and will complete with a status of -EINTR.
455
*/
456
void ib_sa_cancel_query(int id, struct ib_sa_query *query)
457
{
458
unsigned long flags;
459
struct ib_mad_agent *agent;
460
struct ib_mad_send_buf *mad_buf;
461
462
spin_lock_irqsave(&idr_lock, flags);
463
if (idr_find(&query_idr, id) != query) {
464
spin_unlock_irqrestore(&idr_lock, flags);
465
return;
466
}
467
agent = query->port->agent;
468
mad_buf = query->mad_buf;
469
spin_unlock_irqrestore(&idr_lock, flags);
470
471
ib_cancel_mad(agent, mad_buf);
472
}
473
EXPORT_SYMBOL(ib_sa_cancel_query);
474
475
static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
476
{
477
struct ib_sa_device *sa_dev;
478
struct ib_sa_port *port;
479
unsigned long flags;
480
u8 src_path_mask;
481
482
sa_dev = ib_get_client_data(device, &sa_client);
483
if (!sa_dev)
484
return 0x7f;
485
486
port = &sa_dev->port[port_num - sa_dev->start_port];
487
spin_lock_irqsave(&port->ah_lock, flags);
488
src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
489
spin_unlock_irqrestore(&port->ah_lock, flags);
490
491
return src_path_mask;
492
}
493
494
int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
495
struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
496
{
497
int ret;
498
u16 gid_index;
499
int force_grh;
500
501
memset(ah_attr, 0, sizeof *ah_attr);
502
ah_attr->dlid = be16_to_cpu(rec->dlid);
503
ah_attr->sl = rec->sl;
504
ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
505
get_src_path_mask(device, port_num);
506
ah_attr->port_num = port_num;
507
ah_attr->static_rate = rec->rate;
508
509
force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
510
511
if (rec->hop_limit > 1 || force_grh) {
512
ah_attr->ah_flags = IB_AH_GRH;
513
ah_attr->grh.dgid = rec->dgid;
514
515
ret = ib_find_cached_gid(device, &rec->sgid, &port_num,
516
&gid_index);
517
if (ret)
518
return ret;
519
520
ah_attr->grh.sgid_index = gid_index;
521
ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
522
ah_attr->grh.hop_limit = rec->hop_limit;
523
ah_attr->grh.traffic_class = rec->traffic_class;
524
}
525
return 0;
526
}
527
EXPORT_SYMBOL(ib_init_ah_from_path);
528
529
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
530
{
531
unsigned long flags;
532
533
spin_lock_irqsave(&query->port->ah_lock, flags);
534
if (!query->port->sm_ah) {
535
spin_unlock_irqrestore(&query->port->ah_lock, flags);
536
return -EAGAIN;
537
}
538
kref_get(&query->port->sm_ah->ref);
539
query->sm_ah = query->port->sm_ah;
540
spin_unlock_irqrestore(&query->port->ah_lock, flags);
541
542
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
543
query->sm_ah->pkey_index,
544
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
545
gfp_mask);
546
if (IS_ERR(query->mad_buf)) {
547
kref_put(&query->sm_ah->ref, free_sm_ah);
548
return -ENOMEM;
549
}
550
551
query->mad_buf->ah = query->sm_ah->ah;
552
553
return 0;
554
}
555
556
static void free_mad(struct ib_sa_query *query)
557
{
558
ib_free_send_mad(query->mad_buf);
559
kref_put(&query->sm_ah->ref, free_sm_ah);
560
}
561
562
static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
563
{
564
unsigned long flags;
565
566
memset(mad, 0, sizeof *mad);
567
568
mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
569
mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
570
mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
571
572
spin_lock_irqsave(&tid_lock, flags);
573
mad->mad_hdr.tid =
574
cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
575
spin_unlock_irqrestore(&tid_lock, flags);
576
}
577
578
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
579
{
580
unsigned long flags;
581
int ret, id;
582
583
retry:
584
if (!idr_pre_get(&query_idr, gfp_mask))
585
return -ENOMEM;
586
spin_lock_irqsave(&idr_lock, flags);
587
ret = idr_get_new(&query_idr, query, &id);
588
spin_unlock_irqrestore(&idr_lock, flags);
589
if (ret == -EAGAIN)
590
goto retry;
591
if (ret)
592
return ret;
593
594
query->mad_buf->timeout_ms = timeout_ms;
595
query->mad_buf->context[0] = query;
596
query->id = id;
597
598
ret = ib_post_send_mad(query->mad_buf, NULL);
599
if (ret) {
600
spin_lock_irqsave(&idr_lock, flags);
601
idr_remove(&query_idr, id);
602
spin_unlock_irqrestore(&idr_lock, flags);
603
}
604
605
/*
606
* It's not safe to dereference query any more, because the
607
* send may already have completed and freed the query in
608
* another context.
609
*/
610
return ret ? ret : id;
611
}
612
613
void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
614
{
615
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
616
}
617
EXPORT_SYMBOL(ib_sa_unpack_path);
618
619
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
620
int status,
621
struct ib_sa_mad *mad)
622
{
623
struct ib_sa_path_query *query =
624
container_of(sa_query, struct ib_sa_path_query, sa_query);
625
626
if (mad) {
627
struct ib_sa_path_rec rec;
628
629
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
630
mad->data, &rec);
631
query->callback(status, &rec, query->context);
632
} else
633
query->callback(status, NULL, query->context);
634
}
635
636
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
637
{
638
kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
639
}
640
641
/**
642
* ib_sa_path_rec_get - Start a Path get query
643
* @client:SA client
644
* @device:device to send query on
645
* @port_num: port number to send query on
646
* @rec:Path Record to send in query
647
* @comp_mask:component mask to send in query
648
* @timeout_ms:time to wait for response
649
* @gfp_mask:GFP mask to use for internal allocations
650
* @callback:function called when query completes, times out or is
651
* canceled
652
* @context:opaque user context passed to callback
653
* @sa_query:query context, used to cancel query
654
*
655
* Send a Path Record Get query to the SA to look up a path. The
656
* callback function will be called when the query completes (or
657
* fails); status is 0 for a successful response, -EINTR if the query
658
* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
659
* occurred sending the query. The resp parameter of the callback is
660
* only valid if status is 0.
661
*
662
* If the return value of ib_sa_path_rec_get() is negative, it is an
663
* error code. Otherwise it is a query ID that can be used to cancel
664
* the query.
665
*/
666
int ib_sa_path_rec_get(struct ib_sa_client *client,
667
struct ib_device *device, u8 port_num,
668
struct ib_sa_path_rec *rec,
669
ib_sa_comp_mask comp_mask,
670
int timeout_ms, gfp_t gfp_mask,
671
void (*callback)(int status,
672
struct ib_sa_path_rec *resp,
673
void *context),
674
void *context,
675
struct ib_sa_query **sa_query)
676
{
677
struct ib_sa_path_query *query;
678
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
679
struct ib_sa_port *port;
680
struct ib_mad_agent *agent;
681
struct ib_sa_mad *mad;
682
int ret;
683
684
if (!sa_dev)
685
return -ENODEV;
686
687
port = &sa_dev->port[port_num - sa_dev->start_port];
688
agent = port->agent;
689
690
query = kmalloc(sizeof *query, gfp_mask);
691
if (!query)
692
return -ENOMEM;
693
694
query->sa_query.port = port;
695
ret = alloc_mad(&query->sa_query, gfp_mask);
696
if (ret)
697
goto err1;
698
699
ib_sa_client_get(client);
700
query->sa_query.client = client;
701
query->callback = callback;
702
query->context = context;
703
704
mad = query->sa_query.mad_buf->mad;
705
init_mad(mad, agent);
706
707
query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
708
query->sa_query.release = ib_sa_path_rec_release;
709
mad->mad_hdr.method = IB_MGMT_METHOD_GET;
710
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
711
mad->sa_hdr.comp_mask = comp_mask;
712
713
ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
714
715
*sa_query = &query->sa_query;
716
717
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
718
if (ret < 0)
719
goto err2;
720
721
return ret;
722
723
err2:
724
*sa_query = NULL;
725
ib_sa_client_put(query->sa_query.client);
726
free_mad(&query->sa_query);
727
728
err1:
729
kfree(query);
730
return ret;
731
}
732
EXPORT_SYMBOL(ib_sa_path_rec_get);
733
734
static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
735
int status,
736
struct ib_sa_mad *mad)
737
{
738
struct ib_sa_service_query *query =
739
container_of(sa_query, struct ib_sa_service_query, sa_query);
740
741
if (mad) {
742
struct ib_sa_service_rec rec;
743
744
ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
745
mad->data, &rec);
746
query->callback(status, &rec, query->context);
747
} else
748
query->callback(status, NULL, query->context);
749
}
750
751
static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
752
{
753
kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
754
}
755
756
/**
757
* ib_sa_service_rec_query - Start Service Record operation
758
* @client:SA client
759
* @device:device to send request on
760
* @port_num: port number to send request on
761
* @method:SA method - should be get, set, or delete
762
* @rec:Service Record to send in request
763
* @comp_mask:component mask to send in request
764
* @timeout_ms:time to wait for response
765
* @gfp_mask:GFP mask to use for internal allocations
766
* @callback:function called when request completes, times out or is
767
* canceled
768
* @context:opaque user context passed to callback
769
* @sa_query:request context, used to cancel request
770
*
771
* Send a Service Record set/get/delete to the SA to register,
772
* unregister or query a service record.
773
* The callback function will be called when the request completes (or
774
* fails); status is 0 for a successful response, -EINTR if the query
775
* is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
776
* occurred sending the query. The resp parameter of the callback is
777
* only valid if status is 0.
778
*
779
* If the return value of ib_sa_service_rec_query() is negative, it is an
780
* error code. Otherwise it is a request ID that can be used to cancel
781
* the query.
782
*/
783
int ib_sa_service_rec_query(struct ib_sa_client *client,
784
struct ib_device *device, u8 port_num, u8 method,
785
struct ib_sa_service_rec *rec,
786
ib_sa_comp_mask comp_mask,
787
int timeout_ms, gfp_t gfp_mask,
788
void (*callback)(int status,
789
struct ib_sa_service_rec *resp,
790
void *context),
791
void *context,
792
struct ib_sa_query **sa_query)
793
{
794
struct ib_sa_service_query *query;
795
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
796
struct ib_sa_port *port;
797
struct ib_mad_agent *agent;
798
struct ib_sa_mad *mad;
799
int ret;
800
801
if (!sa_dev)
802
return -ENODEV;
803
804
port = &sa_dev->port[port_num - sa_dev->start_port];
805
agent = port->agent;
806
807
if (method != IB_MGMT_METHOD_GET &&
808
method != IB_MGMT_METHOD_SET &&
809
method != IB_SA_METHOD_DELETE)
810
return -EINVAL;
811
812
query = kmalloc(sizeof *query, gfp_mask);
813
if (!query)
814
return -ENOMEM;
815
816
query->sa_query.port = port;
817
ret = alloc_mad(&query->sa_query, gfp_mask);
818
if (ret)
819
goto err1;
820
821
ib_sa_client_get(client);
822
query->sa_query.client = client;
823
query->callback = callback;
824
query->context = context;
825
826
mad = query->sa_query.mad_buf->mad;
827
init_mad(mad, agent);
828
829
query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
830
query->sa_query.release = ib_sa_service_rec_release;
831
mad->mad_hdr.method = method;
832
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
833
mad->sa_hdr.comp_mask = comp_mask;
834
835
ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
836
rec, mad->data);
837
838
*sa_query = &query->sa_query;
839
840
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
841
if (ret < 0)
842
goto err2;
843
844
return ret;
845
846
err2:
847
*sa_query = NULL;
848
ib_sa_client_put(query->sa_query.client);
849
free_mad(&query->sa_query);
850
851
err1:
852
kfree(query);
853
return ret;
854
}
855
EXPORT_SYMBOL(ib_sa_service_rec_query);
856
857
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
858
int status,
859
struct ib_sa_mad *mad)
860
{
861
struct ib_sa_mcmember_query *query =
862
container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
863
864
if (mad) {
865
struct ib_sa_mcmember_rec rec;
866
867
ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
868
mad->data, &rec);
869
query->callback(status, &rec, query->context);
870
} else
871
query->callback(status, NULL, query->context);
872
}
873
874
static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
875
{
876
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
877
}
878
879
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
880
struct ib_device *device, u8 port_num,
881
u8 method,
882
struct ib_sa_mcmember_rec *rec,
883
ib_sa_comp_mask comp_mask,
884
int timeout_ms, gfp_t gfp_mask,
885
void (*callback)(int status,
886
struct ib_sa_mcmember_rec *resp,
887
void *context),
888
void *context,
889
struct ib_sa_query **sa_query)
890
{
891
struct ib_sa_mcmember_query *query;
892
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
893
struct ib_sa_port *port;
894
struct ib_mad_agent *agent;
895
struct ib_sa_mad *mad;
896
int ret;
897
898
if (!sa_dev)
899
return -ENODEV;
900
901
port = &sa_dev->port[port_num - sa_dev->start_port];
902
agent = port->agent;
903
904
query = kmalloc(sizeof *query, gfp_mask);
905
if (!query)
906
return -ENOMEM;
907
908
query->sa_query.port = port;
909
ret = alloc_mad(&query->sa_query, gfp_mask);
910
if (ret)
911
goto err1;
912
913
ib_sa_client_get(client);
914
query->sa_query.client = client;
915
query->callback = callback;
916
query->context = context;
917
918
mad = query->sa_query.mad_buf->mad;
919
init_mad(mad, agent);
920
921
query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
922
query->sa_query.release = ib_sa_mcmember_rec_release;
923
mad->mad_hdr.method = method;
924
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
925
mad->sa_hdr.comp_mask = comp_mask;
926
927
ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
928
rec, mad->data);
929
930
*sa_query = &query->sa_query;
931
932
ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
933
if (ret < 0)
934
goto err2;
935
936
return ret;
937
938
err2:
939
*sa_query = NULL;
940
ib_sa_client_put(query->sa_query.client);
941
free_mad(&query->sa_query);
942
943
err1:
944
kfree(query);
945
return ret;
946
}
947
948
static void send_handler(struct ib_mad_agent *agent,
949
struct ib_mad_send_wc *mad_send_wc)
950
{
951
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
952
unsigned long flags;
953
954
if (query->callback)
955
switch (mad_send_wc->status) {
956
case IB_WC_SUCCESS:
957
/* No callback -- already got recv */
958
break;
959
case IB_WC_RESP_TIMEOUT_ERR:
960
query->callback(query, -ETIMEDOUT, NULL);
961
break;
962
case IB_WC_WR_FLUSH_ERR:
963
query->callback(query, -EINTR, NULL);
964
break;
965
default:
966
query->callback(query, -EIO, NULL);
967
break;
968
}
969
970
spin_lock_irqsave(&idr_lock, flags);
971
idr_remove(&query_idr, query->id);
972
spin_unlock_irqrestore(&idr_lock, flags);
973
974
free_mad(query);
975
ib_sa_client_put(query->client);
976
query->release(query);
977
}
978
979
static void recv_handler(struct ib_mad_agent *mad_agent,
980
struct ib_mad_recv_wc *mad_recv_wc)
981
{
982
struct ib_sa_query *query;
983
struct ib_mad_send_buf *mad_buf;
984
985
mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
986
query = mad_buf->context[0];
987
988
if (query->callback) {
989
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
990
query->callback(query,
991
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
992
-EINVAL : 0,
993
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
994
else
995
query->callback(query, -EIO, NULL);
996
}
997
998
ib_free_recv_mad(mad_recv_wc);
999
}
1000
1001
static void ib_sa_add_one(struct ib_device *device)
1002
{
1003
struct ib_sa_device *sa_dev;
1004
int s, e, i;
1005
1006
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1007
return;
1008
1009
if (device->node_type == RDMA_NODE_IB_SWITCH)
1010
s = e = 0;
1011
else {
1012
s = 1;
1013
e = device->phys_port_cnt;
1014
}
1015
1016
sa_dev = kzalloc(sizeof *sa_dev +
1017
(e - s + 1) * sizeof (struct ib_sa_port),
1018
GFP_KERNEL);
1019
if (!sa_dev)
1020
return;
1021
1022
sa_dev->start_port = s;
1023
sa_dev->end_port = e;
1024
1025
for (i = 0; i <= e - s; ++i) {
1026
spin_lock_init(&sa_dev->port[i].ah_lock);
1027
if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
1028
continue;
1029
1030
sa_dev->port[i].sm_ah = NULL;
1031
sa_dev->port[i].port_num = i + s;
1032
1033
sa_dev->port[i].agent =
1034
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1035
NULL, 0, send_handler,
1036
recv_handler, sa_dev);
1037
if (IS_ERR(sa_dev->port[i].agent))
1038
goto err;
1039
1040
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1041
}
1042
1043
ib_set_client_data(device, &sa_client, sa_dev);
1044
1045
/*
1046
* We register our event handler after everything is set up,
1047
* and then update our cached info after the event handler is
1048
* registered to avoid any problems if a port changes state
1049
* during our initialization.
1050
*/
1051
1052
INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1053
if (ib_register_event_handler(&sa_dev->event_handler))
1054
goto err;
1055
1056
for (i = 0; i <= e - s; ++i)
1057
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1058
update_sm_ah(&sa_dev->port[i].update_task);
1059
1060
return;
1061
1062
err:
1063
while (--i >= 0)
1064
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1065
ib_unregister_mad_agent(sa_dev->port[i].agent);
1066
1067
kfree(sa_dev);
1068
1069
return;
1070
}
1071
1072
static void ib_sa_remove_one(struct ib_device *device)
1073
{
1074
struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1075
int i;
1076
1077
if (!sa_dev)
1078
return;
1079
1080
ib_unregister_event_handler(&sa_dev->event_handler);
1081
1082
flush_workqueue(ib_wq);
1083
1084
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1085
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
1086
ib_unregister_mad_agent(sa_dev->port[i].agent);
1087
if (sa_dev->port[i].sm_ah)
1088
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1089
}
1090
1091
}
1092
1093
kfree(sa_dev);
1094
}
1095
1096
static int __init ib_sa_init(void)
1097
{
1098
int ret;
1099
1100
get_random_bytes(&tid, sizeof tid);
1101
1102
ret = ib_register_client(&sa_client);
1103
if (ret) {
1104
printk(KERN_ERR "Couldn't register ib_sa client\n");
1105
goto err1;
1106
}
1107
1108
ret = mcast_init();
1109
if (ret) {
1110
printk(KERN_ERR "Couldn't initialize multicast handling\n");
1111
goto err2;
1112
}
1113
1114
return 0;
1115
err2:
1116
ib_unregister_client(&sa_client);
1117
err1:
1118
return ret;
1119
}
1120
1121
static void __exit ib_sa_cleanup(void)
1122
{
1123
mcast_cleanup();
1124
ib_unregister_client(&sa_client);
1125
idr_destroy(&query_idr);
1126
}
1127
1128
module_init(ib_sa_init);
1129
module_exit(ib_sa_cleanup);
1130
1131