Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/dcb/dcbnl.c
15109 views
1
/*
2
* Copyright (c) 2008-2011, Intel Corporation.
3
*
4
* This program is free software; you can redistribute it and/or modify it
5
* under the terms and conditions of the GNU General Public License,
6
* version 2, as published by the Free Software Foundation.
7
*
8
* This program is distributed in the hope it will be useful, but WITHOUT
9
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11
* more details.
12
*
13
* You should have received a copy of the GNU General Public License along with
14
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15
* Place - Suite 330, Boston, MA 02111-1307 USA.
16
*
17
* Author: Lucy Liu <[email protected]>
18
*/
19
20
#include <linux/netdevice.h>
21
#include <linux/netlink.h>
22
#include <linux/slab.h>
23
#include <net/netlink.h>
24
#include <net/rtnetlink.h>
25
#include <linux/dcbnl.h>
26
#include <net/dcbevent.h>
27
#include <linux/rtnetlink.h>
28
#include <net/sock.h>
29
30
/**
31
* Data Center Bridging (DCB) is a collection of Ethernet enhancements
32
* intended to allow network traffic with differing requirements
33
* (highly reliable, no drops vs. best effort vs. low latency) to operate
34
* and co-exist on Ethernet. Current DCB features are:
35
*
36
* Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
37
* framework for assigning bandwidth guarantees to traffic classes.
38
*
39
* Priority-based Flow Control (PFC) - provides a flow control mechanism which
40
* can work independently for each 802.1p priority.
41
*
42
* Congestion Notification - provides a mechanism for end-to-end congestion
43
* control for protocols which do not have built-in congestion management.
44
*
45
* More information about the emerging standards for these Ethernet features
46
* can be found at: http://www.ieee802.org/1/pages/dcbridges.html
47
*
48
* This file implements an rtnetlink interface to allow configuration of DCB
49
* features for capable devices.
50
*/
51
52
MODULE_AUTHOR("Lucy Liu, <[email protected]>");
53
MODULE_DESCRIPTION("Data Center Bridging netlink interface");
54
MODULE_LICENSE("GPL");
55
56
/**************** DCB attribute policies *************************************/
57
58
/* DCB netlink attributes policy */
59
static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
60
[DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
61
[DCB_ATTR_STATE] = {.type = NLA_U8},
62
[DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
63
[DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
64
[DCB_ATTR_SET_ALL] = {.type = NLA_U8},
65
[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
66
[DCB_ATTR_CAP] = {.type = NLA_NESTED},
67
[DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
68
[DCB_ATTR_BCN] = {.type = NLA_NESTED},
69
[DCB_ATTR_APP] = {.type = NLA_NESTED},
70
[DCB_ATTR_IEEE] = {.type = NLA_NESTED},
71
[DCB_ATTR_DCBX] = {.type = NLA_U8},
72
[DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
73
};
74
75
/* DCB priority flow control to User Priority nested attributes */
76
static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
77
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
78
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
79
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
80
[DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
81
[DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
82
[DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
83
[DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
84
[DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
85
[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
86
};
87
88
/* DCB priority grouping nested attributes */
89
static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
90
[DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
91
[DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
92
[DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
93
[DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
94
[DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
95
[DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
96
[DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
97
[DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
98
[DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
99
[DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
100
[DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
101
[DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
102
[DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
103
[DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
104
[DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
105
[DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
106
[DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
107
[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
108
};
109
110
/* DCB traffic class nested attributes. */
111
static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
112
[DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
113
[DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
114
[DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
115
[DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
116
[DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
117
};
118
119
/* DCB capabilities nested attributes. */
120
static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
121
[DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
122
[DCB_CAP_ATTR_PG] = {.type = NLA_U8},
123
[DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
124
[DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
125
[DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
126
[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
127
[DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
128
[DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
129
[DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
130
};
131
132
/* DCB capabilities nested attributes. */
133
static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
134
[DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
135
[DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
136
[DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
137
};
138
139
/* DCB BCN nested attributes. */
140
static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
141
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
142
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
143
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
144
[DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
145
[DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
146
[DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
147
[DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
148
[DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
149
[DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
150
[DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
151
[DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
152
[DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
153
[DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
154
[DCB_BCN_ATTR_GD] = {.type = NLA_U32},
155
[DCB_BCN_ATTR_GI] = {.type = NLA_U32},
156
[DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
157
[DCB_BCN_ATTR_TD] = {.type = NLA_U32},
158
[DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
159
[DCB_BCN_ATTR_W] = {.type = NLA_U32},
160
[DCB_BCN_ATTR_RD] = {.type = NLA_U32},
161
[DCB_BCN_ATTR_RU] = {.type = NLA_U32},
162
[DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
163
[DCB_BCN_ATTR_RI] = {.type = NLA_U32},
164
[DCB_BCN_ATTR_C] = {.type = NLA_U32},
165
[DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
166
};
167
168
/* DCB APP nested attributes. */
169
static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
170
[DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
171
[DCB_APP_ATTR_ID] = {.type = NLA_U16},
172
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
173
};
174
175
/* IEEE 802.1Qaz nested attributes. */
176
static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
177
[DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
178
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
179
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
180
};
181
182
static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
183
[DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
184
};
185
186
/* DCB number of traffic classes nested attributes. */
187
static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
188
[DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
189
[DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
190
[DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
191
[DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
192
};
193
194
static LIST_HEAD(dcb_app_list);
195
static DEFINE_SPINLOCK(dcb_lock);
196
197
/* standard netlink reply call */
198
static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
199
u32 seq, u16 flags)
200
{
201
struct sk_buff *dcbnl_skb;
202
struct dcbmsg *dcb;
203
struct nlmsghdr *nlh;
204
int ret = -EINVAL;
205
206
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
207
if (!dcbnl_skb)
208
return ret;
209
210
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
211
212
dcb = NLMSG_DATA(nlh);
213
dcb->dcb_family = AF_UNSPEC;
214
dcb->cmd = cmd;
215
dcb->dcb_pad = 0;
216
217
ret = nla_put_u8(dcbnl_skb, attr, value);
218
if (ret)
219
goto err;
220
221
/* end the message, assign the nlmsg_len. */
222
nlmsg_end(dcbnl_skb, nlh);
223
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
224
if (ret)
225
return -EINVAL;
226
227
return 0;
228
nlmsg_failure:
229
err:
230
kfree_skb(dcbnl_skb);
231
return ret;
232
}
233
234
static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
235
u32 pid, u32 seq, u16 flags)
236
{
237
int ret = -EINVAL;
238
239
/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
240
if (!netdev->dcbnl_ops->getstate)
241
return ret;
242
243
ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
244
DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
245
246
return ret;
247
}
248
249
static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
250
u32 pid, u32 seq, u16 flags)
251
{
252
struct sk_buff *dcbnl_skb;
253
struct nlmsghdr *nlh;
254
struct dcbmsg *dcb;
255
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
256
u8 value;
257
int ret = -EINVAL;
258
int i;
259
int getall = 0;
260
261
if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
262
return ret;
263
264
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
265
tb[DCB_ATTR_PFC_CFG],
266
dcbnl_pfc_up_nest);
267
if (ret)
268
goto err_out;
269
270
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
271
if (!dcbnl_skb)
272
goto err_out;
273
274
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
275
276
dcb = NLMSG_DATA(nlh);
277
dcb->dcb_family = AF_UNSPEC;
278
dcb->cmd = DCB_CMD_PFC_GCFG;
279
280
nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
281
if (!nest)
282
goto err;
283
284
if (data[DCB_PFC_UP_ATTR_ALL])
285
getall = 1;
286
287
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
288
if (!getall && !data[i])
289
continue;
290
291
netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
292
&value);
293
ret = nla_put_u8(dcbnl_skb, i, value);
294
295
if (ret) {
296
nla_nest_cancel(dcbnl_skb, nest);
297
goto err;
298
}
299
}
300
nla_nest_end(dcbnl_skb, nest);
301
302
nlmsg_end(dcbnl_skb, nlh);
303
304
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
305
if (ret)
306
goto err_out;
307
308
return 0;
309
nlmsg_failure:
310
err:
311
kfree_skb(dcbnl_skb);
312
err_out:
313
return -EINVAL;
314
}
315
316
static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
317
u32 pid, u32 seq, u16 flags)
318
{
319
struct sk_buff *dcbnl_skb;
320
struct nlmsghdr *nlh;
321
struct dcbmsg *dcb;
322
u8 perm_addr[MAX_ADDR_LEN];
323
int ret = -EINVAL;
324
325
if (!netdev->dcbnl_ops->getpermhwaddr)
326
return ret;
327
328
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
329
if (!dcbnl_skb)
330
goto err_out;
331
332
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
333
334
dcb = NLMSG_DATA(nlh);
335
dcb->dcb_family = AF_UNSPEC;
336
dcb->cmd = DCB_CMD_GPERM_HWADDR;
337
338
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
339
340
ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
341
perm_addr);
342
343
nlmsg_end(dcbnl_skb, nlh);
344
345
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
346
if (ret)
347
goto err_out;
348
349
return 0;
350
351
nlmsg_failure:
352
kfree_skb(dcbnl_skb);
353
err_out:
354
return -EINVAL;
355
}
356
357
static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
358
u32 pid, u32 seq, u16 flags)
359
{
360
struct sk_buff *dcbnl_skb;
361
struct nlmsghdr *nlh;
362
struct dcbmsg *dcb;
363
struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
364
u8 value;
365
int ret = -EINVAL;
366
int i;
367
int getall = 0;
368
369
if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
370
return ret;
371
372
ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
373
dcbnl_cap_nest);
374
if (ret)
375
goto err_out;
376
377
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
378
if (!dcbnl_skb)
379
goto err_out;
380
381
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
382
383
dcb = NLMSG_DATA(nlh);
384
dcb->dcb_family = AF_UNSPEC;
385
dcb->cmd = DCB_CMD_GCAP;
386
387
nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
388
if (!nest)
389
goto err;
390
391
if (data[DCB_CAP_ATTR_ALL])
392
getall = 1;
393
394
for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
395
if (!getall && !data[i])
396
continue;
397
398
if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
399
ret = nla_put_u8(dcbnl_skb, i, value);
400
401
if (ret) {
402
nla_nest_cancel(dcbnl_skb, nest);
403
goto err;
404
}
405
}
406
}
407
nla_nest_end(dcbnl_skb, nest);
408
409
nlmsg_end(dcbnl_skb, nlh);
410
411
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
412
if (ret)
413
goto err_out;
414
415
return 0;
416
nlmsg_failure:
417
err:
418
kfree_skb(dcbnl_skb);
419
err_out:
420
return -EINVAL;
421
}
422
423
static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
424
u32 pid, u32 seq, u16 flags)
425
{
426
struct sk_buff *dcbnl_skb;
427
struct nlmsghdr *nlh;
428
struct dcbmsg *dcb;
429
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
430
u8 value;
431
int ret = -EINVAL;
432
int i;
433
int getall = 0;
434
435
if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
436
return ret;
437
438
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
439
dcbnl_numtcs_nest);
440
if (ret) {
441
ret = -EINVAL;
442
goto err_out;
443
}
444
445
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
446
if (!dcbnl_skb) {
447
ret = -EINVAL;
448
goto err_out;
449
}
450
451
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
452
453
dcb = NLMSG_DATA(nlh);
454
dcb->dcb_family = AF_UNSPEC;
455
dcb->cmd = DCB_CMD_GNUMTCS;
456
457
nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
458
if (!nest) {
459
ret = -EINVAL;
460
goto err;
461
}
462
463
if (data[DCB_NUMTCS_ATTR_ALL])
464
getall = 1;
465
466
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
467
if (!getall && !data[i])
468
continue;
469
470
ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
471
if (!ret) {
472
ret = nla_put_u8(dcbnl_skb, i, value);
473
474
if (ret) {
475
nla_nest_cancel(dcbnl_skb, nest);
476
ret = -EINVAL;
477
goto err;
478
}
479
} else {
480
goto err;
481
}
482
}
483
nla_nest_end(dcbnl_skb, nest);
484
485
nlmsg_end(dcbnl_skb, nlh);
486
487
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
488
if (ret) {
489
ret = -EINVAL;
490
goto err_out;
491
}
492
493
return 0;
494
nlmsg_failure:
495
err:
496
kfree_skb(dcbnl_skb);
497
err_out:
498
return ret;
499
}
500
501
static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
502
u32 pid, u32 seq, u16 flags)
503
{
504
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
505
int ret = -EINVAL;
506
u8 value;
507
int i;
508
509
if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
510
return ret;
511
512
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
513
dcbnl_numtcs_nest);
514
515
if (ret) {
516
ret = -EINVAL;
517
goto err;
518
}
519
520
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
521
if (data[i] == NULL)
522
continue;
523
524
value = nla_get_u8(data[i]);
525
526
ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
527
528
if (ret)
529
goto operr;
530
}
531
532
operr:
533
ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
534
DCB_ATTR_NUMTCS, pid, seq, flags);
535
536
err:
537
return ret;
538
}
539
540
static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
541
u32 pid, u32 seq, u16 flags)
542
{
543
int ret = -EINVAL;
544
545
if (!netdev->dcbnl_ops->getpfcstate)
546
return ret;
547
548
ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
549
DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
550
pid, seq, flags);
551
552
return ret;
553
}
554
555
static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
556
u32 pid, u32 seq, u16 flags)
557
{
558
int ret = -EINVAL;
559
u8 value;
560
561
if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
562
return ret;
563
564
value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
565
566
netdev->dcbnl_ops->setpfcstate(netdev, value);
567
568
ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
569
pid, seq, flags);
570
571
return ret;
572
}
573
574
static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
575
u32 pid, u32 seq, u16 flags)
576
{
577
struct sk_buff *dcbnl_skb;
578
struct nlmsghdr *nlh;
579
struct dcbmsg *dcb;
580
struct nlattr *app_nest;
581
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
582
u16 id;
583
u8 up, idtype;
584
int ret = -EINVAL;
585
586
if (!tb[DCB_ATTR_APP])
587
goto out;
588
589
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
590
dcbnl_app_nest);
591
if (ret)
592
goto out;
593
594
ret = -EINVAL;
595
/* all must be non-null */
596
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
597
(!app_tb[DCB_APP_ATTR_ID]))
598
goto out;
599
600
/* either by eth type or by socket number */
601
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
602
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
603
(idtype != DCB_APP_IDTYPE_PORTNUM))
604
goto out;
605
606
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
607
608
if (netdev->dcbnl_ops->getapp) {
609
up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
610
} else {
611
struct dcb_app app = {
612
.selector = idtype,
613
.protocol = id,
614
};
615
up = dcb_getapp(netdev, &app);
616
}
617
618
/* send this back */
619
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
620
if (!dcbnl_skb)
621
goto out;
622
623
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
624
dcb = NLMSG_DATA(nlh);
625
dcb->dcb_family = AF_UNSPEC;
626
dcb->cmd = DCB_CMD_GAPP;
627
628
app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
629
if (!app_nest)
630
goto out_cancel;
631
632
ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
633
if (ret)
634
goto out_cancel;
635
636
ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
637
if (ret)
638
goto out_cancel;
639
640
ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
641
if (ret)
642
goto out_cancel;
643
644
nla_nest_end(dcbnl_skb, app_nest);
645
nlmsg_end(dcbnl_skb, nlh);
646
647
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
648
if (ret)
649
goto nlmsg_failure;
650
651
goto out;
652
653
out_cancel:
654
nla_nest_cancel(dcbnl_skb, app_nest);
655
nlmsg_failure:
656
kfree_skb(dcbnl_skb);
657
out:
658
return ret;
659
}
660
661
static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
662
u32 pid, u32 seq, u16 flags)
663
{
664
int err, ret = -EINVAL;
665
u16 id;
666
u8 up, idtype;
667
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
668
669
if (!tb[DCB_ATTR_APP])
670
goto out;
671
672
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
673
dcbnl_app_nest);
674
if (ret)
675
goto out;
676
677
ret = -EINVAL;
678
/* all must be non-null */
679
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
680
(!app_tb[DCB_APP_ATTR_ID]) ||
681
(!app_tb[DCB_APP_ATTR_PRIORITY]))
682
goto out;
683
684
/* either by eth type or by socket number */
685
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
686
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
687
(idtype != DCB_APP_IDTYPE_PORTNUM))
688
goto out;
689
690
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
691
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
692
693
if (netdev->dcbnl_ops->setapp) {
694
err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
695
} else {
696
struct dcb_app app;
697
app.selector = idtype;
698
app.protocol = id;
699
app.priority = up;
700
err = dcb_setapp(netdev, &app);
701
}
702
703
ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
704
pid, seq, flags);
705
out:
706
return ret;
707
}
708
709
static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
710
u32 pid, u32 seq, u16 flags, int dir)
711
{
712
struct sk_buff *dcbnl_skb;
713
struct nlmsghdr *nlh;
714
struct dcbmsg *dcb;
715
struct nlattr *pg_nest, *param_nest, *data;
716
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
717
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
718
u8 prio, pgid, tc_pct, up_map;
719
int ret = -EINVAL;
720
int getall = 0;
721
int i;
722
723
if (!tb[DCB_ATTR_PG_CFG] ||
724
!netdev->dcbnl_ops->getpgtccfgtx ||
725
!netdev->dcbnl_ops->getpgtccfgrx ||
726
!netdev->dcbnl_ops->getpgbwgcfgtx ||
727
!netdev->dcbnl_ops->getpgbwgcfgrx)
728
return ret;
729
730
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
731
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
732
733
if (ret)
734
goto err_out;
735
736
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
737
if (!dcbnl_skb)
738
goto err_out;
739
740
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
741
742
dcb = NLMSG_DATA(nlh);
743
dcb->dcb_family = AF_UNSPEC;
744
dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
745
746
pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
747
if (!pg_nest)
748
goto err;
749
750
if (pg_tb[DCB_PG_ATTR_TC_ALL])
751
getall = 1;
752
753
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
754
if (!getall && !pg_tb[i])
755
continue;
756
757
if (pg_tb[DCB_PG_ATTR_TC_ALL])
758
data = pg_tb[DCB_PG_ATTR_TC_ALL];
759
else
760
data = pg_tb[i];
761
ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
762
data, dcbnl_tc_param_nest);
763
if (ret)
764
goto err_pg;
765
766
param_nest = nla_nest_start(dcbnl_skb, i);
767
if (!param_nest)
768
goto err_pg;
769
770
pgid = DCB_ATTR_VALUE_UNDEFINED;
771
prio = DCB_ATTR_VALUE_UNDEFINED;
772
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
773
up_map = DCB_ATTR_VALUE_UNDEFINED;
774
775
if (dir) {
776
/* Rx */
777
netdev->dcbnl_ops->getpgtccfgrx(netdev,
778
i - DCB_PG_ATTR_TC_0, &prio,
779
&pgid, &tc_pct, &up_map);
780
} else {
781
/* Tx */
782
netdev->dcbnl_ops->getpgtccfgtx(netdev,
783
i - DCB_PG_ATTR_TC_0, &prio,
784
&pgid, &tc_pct, &up_map);
785
}
786
787
if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
788
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
789
ret = nla_put_u8(dcbnl_skb,
790
DCB_TC_ATTR_PARAM_PGID, pgid);
791
if (ret)
792
goto err_param;
793
}
794
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
795
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
796
ret = nla_put_u8(dcbnl_skb,
797
DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
798
if (ret)
799
goto err_param;
800
}
801
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
802
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
803
ret = nla_put_u8(dcbnl_skb,
804
DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
805
if (ret)
806
goto err_param;
807
}
808
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
809
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
810
ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
811
tc_pct);
812
if (ret)
813
goto err_param;
814
}
815
nla_nest_end(dcbnl_skb, param_nest);
816
}
817
818
if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
819
getall = 1;
820
else
821
getall = 0;
822
823
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
824
if (!getall && !pg_tb[i])
825
continue;
826
827
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
828
829
if (dir) {
830
/* Rx */
831
netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
832
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
833
} else {
834
/* Tx */
835
netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
836
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
837
}
838
ret = nla_put_u8(dcbnl_skb, i, tc_pct);
839
840
if (ret)
841
goto err_pg;
842
}
843
844
nla_nest_end(dcbnl_skb, pg_nest);
845
846
nlmsg_end(dcbnl_skb, nlh);
847
848
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
849
if (ret)
850
goto err_out;
851
852
return 0;
853
854
err_param:
855
nla_nest_cancel(dcbnl_skb, param_nest);
856
err_pg:
857
nla_nest_cancel(dcbnl_skb, pg_nest);
858
nlmsg_failure:
859
err:
860
kfree_skb(dcbnl_skb);
861
err_out:
862
ret = -EINVAL;
863
return ret;
864
}
865
866
static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
867
u32 pid, u32 seq, u16 flags)
868
{
869
return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
870
}
871
872
static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
873
u32 pid, u32 seq, u16 flags)
874
{
875
return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
876
}
877
878
static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
879
u32 pid, u32 seq, u16 flags)
880
{
881
int ret = -EINVAL;
882
u8 value;
883
884
if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
885
return ret;
886
887
value = nla_get_u8(tb[DCB_ATTR_STATE]);
888
889
ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
890
RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
891
pid, seq, flags);
892
893
return ret;
894
}
895
896
static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
897
u32 pid, u32 seq, u16 flags)
898
{
899
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
900
int i;
901
int ret = -EINVAL;
902
u8 value;
903
904
if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
905
return ret;
906
907
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
908
tb[DCB_ATTR_PFC_CFG],
909
dcbnl_pfc_up_nest);
910
if (ret)
911
goto err;
912
913
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
914
if (data[i] == NULL)
915
continue;
916
value = nla_get_u8(data[i]);
917
netdev->dcbnl_ops->setpfccfg(netdev,
918
data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
919
}
920
921
ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
922
pid, seq, flags);
923
err:
924
return ret;
925
}
926
927
static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
928
u32 pid, u32 seq, u16 flags)
929
{
930
int ret = -EINVAL;
931
932
if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
933
return ret;
934
935
ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
936
DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
937
938
return ret;
939
}
940
941
static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
942
u32 pid, u32 seq, u16 flags, int dir)
943
{
944
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
945
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
946
int ret = -EINVAL;
947
int i;
948
u8 pgid;
949
u8 up_map;
950
u8 prio;
951
u8 tc_pct;
952
953
if (!tb[DCB_ATTR_PG_CFG] ||
954
!netdev->dcbnl_ops->setpgtccfgtx ||
955
!netdev->dcbnl_ops->setpgtccfgrx ||
956
!netdev->dcbnl_ops->setpgbwgcfgtx ||
957
!netdev->dcbnl_ops->setpgbwgcfgrx)
958
return ret;
959
960
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
961
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
962
if (ret)
963
goto err;
964
965
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
966
if (!pg_tb[i])
967
continue;
968
969
ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
970
pg_tb[i], dcbnl_tc_param_nest);
971
if (ret)
972
goto err;
973
974
pgid = DCB_ATTR_VALUE_UNDEFINED;
975
prio = DCB_ATTR_VALUE_UNDEFINED;
976
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
977
up_map = DCB_ATTR_VALUE_UNDEFINED;
978
979
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
980
prio =
981
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
982
983
if (param_tb[DCB_TC_ATTR_PARAM_PGID])
984
pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
985
986
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
987
tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
988
989
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
990
up_map =
991
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
992
993
/* dir: Tx = 0, Rx = 1 */
994
if (dir) {
995
/* Rx */
996
netdev->dcbnl_ops->setpgtccfgrx(netdev,
997
i - DCB_PG_ATTR_TC_0,
998
prio, pgid, tc_pct, up_map);
999
} else {
1000
/* Tx */
1001
netdev->dcbnl_ops->setpgtccfgtx(netdev,
1002
i - DCB_PG_ATTR_TC_0,
1003
prio, pgid, tc_pct, up_map);
1004
}
1005
}
1006
1007
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1008
if (!pg_tb[i])
1009
continue;
1010
1011
tc_pct = nla_get_u8(pg_tb[i]);
1012
1013
/* dir: Tx = 0, Rx = 1 */
1014
if (dir) {
1015
/* Rx */
1016
netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1017
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1018
} else {
1019
/* Tx */
1020
netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1021
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1022
}
1023
}
1024
1025
ret = dcbnl_reply(0, RTM_SETDCB,
1026
(dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1027
DCB_ATTR_PG_CFG, pid, seq, flags);
1028
1029
err:
1030
return ret;
1031
}
1032
1033
static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1034
u32 pid, u32 seq, u16 flags)
1035
{
1036
return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1037
}
1038
1039
static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1040
u32 pid, u32 seq, u16 flags)
1041
{
1042
return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1043
}
1044
1045
static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1046
u32 pid, u32 seq, u16 flags)
1047
{
1048
struct sk_buff *dcbnl_skb;
1049
struct nlmsghdr *nlh;
1050
struct dcbmsg *dcb;
1051
struct nlattr *bcn_nest;
1052
struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1053
u8 value_byte;
1054
u32 value_integer;
1055
int ret = -EINVAL;
1056
bool getall = false;
1057
int i;
1058
1059
if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1060
!netdev->dcbnl_ops->getbcncfg)
1061
return ret;
1062
1063
ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1064
tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1065
1066
if (ret)
1067
goto err_out;
1068
1069
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1070
if (!dcbnl_skb)
1071
goto err_out;
1072
1073
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1074
1075
dcb = NLMSG_DATA(nlh);
1076
dcb->dcb_family = AF_UNSPEC;
1077
dcb->cmd = DCB_CMD_BCN_GCFG;
1078
1079
bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1080
if (!bcn_nest)
1081
goto err;
1082
1083
if (bcn_tb[DCB_BCN_ATTR_ALL])
1084
getall = true;
1085
1086
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1087
if (!getall && !bcn_tb[i])
1088
continue;
1089
1090
netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1091
&value_byte);
1092
ret = nla_put_u8(dcbnl_skb, i, value_byte);
1093
if (ret)
1094
goto err_bcn;
1095
}
1096
1097
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1098
if (!getall && !bcn_tb[i])
1099
continue;
1100
1101
netdev->dcbnl_ops->getbcncfg(netdev, i,
1102
&value_integer);
1103
ret = nla_put_u32(dcbnl_skb, i, value_integer);
1104
if (ret)
1105
goto err_bcn;
1106
}
1107
1108
nla_nest_end(dcbnl_skb, bcn_nest);
1109
1110
nlmsg_end(dcbnl_skb, nlh);
1111
1112
ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1113
if (ret)
1114
goto err_out;
1115
1116
return 0;
1117
1118
err_bcn:
1119
nla_nest_cancel(dcbnl_skb, bcn_nest);
1120
nlmsg_failure:
1121
err:
1122
kfree_skb(dcbnl_skb);
1123
err_out:
1124
ret = -EINVAL;
1125
return ret;
1126
}
1127
1128
static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1129
u32 pid, u32 seq, u16 flags)
1130
{
1131
struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1132
int i;
1133
int ret = -EINVAL;
1134
u8 value_byte;
1135
u32 value_int;
1136
1137
if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1138
!netdev->dcbnl_ops->setbcnrp)
1139
return ret;
1140
1141
ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1142
tb[DCB_ATTR_BCN],
1143
dcbnl_pfc_up_nest);
1144
if (ret)
1145
goto err;
1146
1147
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1148
if (data[i] == NULL)
1149
continue;
1150
value_byte = nla_get_u8(data[i]);
1151
netdev->dcbnl_ops->setbcnrp(netdev,
1152
data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1153
}
1154
1155
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1156
if (data[i] == NULL)
1157
continue;
1158
value_int = nla_get_u32(data[i]);
1159
netdev->dcbnl_ops->setbcncfg(netdev,
1160
i, value_int);
1161
}
1162
1163
ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1164
pid, seq, flags);
1165
err:
1166
return ret;
1167
}
1168
1169
/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1170
* be completed the entire msg is aborted and error value is returned.
1171
* No attempt is made to reconcile the case where only part of the
1172
* cmd can be completed.
1173
*/
1174
static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1175
u32 pid, u32 seq, u16 flags)
1176
{
1177
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1178
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1179
int err = -EOPNOTSUPP;
1180
1181
if (!ops)
1182
goto err;
1183
1184
err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1185
tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1186
if (err)
1187
goto err;
1188
1189
if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1190
struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1191
err = ops->ieee_setets(netdev, ets);
1192
if (err)
1193
goto err;
1194
}
1195
1196
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1197
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1198
err = ops->ieee_setpfc(netdev, pfc);
1199
if (err)
1200
goto err;
1201
}
1202
1203
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1204
struct nlattr *attr;
1205
int rem;
1206
1207
nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1208
struct dcb_app *app_data;
1209
if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1210
continue;
1211
app_data = nla_data(attr);
1212
if (ops->ieee_setapp)
1213
err = ops->ieee_setapp(netdev, app_data);
1214
else
1215
err = dcb_setapp(netdev, app_data);
1216
if (err)
1217
goto err;
1218
}
1219
}
1220
1221
err:
1222
dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1223
pid, seq, flags);
1224
return err;
1225
}
1226
1227
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1228
int app_nested_type, int app_info_type,
1229
int app_entry_type)
1230
{
1231
struct dcb_peer_app_info info;
1232
struct dcb_app *table = NULL;
1233
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1234
u16 app_count;
1235
int err;
1236
1237
1238
/**
1239
* retrieve the peer app configuration form the driver. If the driver
1240
* handlers fail exit without doing anything
1241
*/
1242
err = ops->peer_getappinfo(netdev, &info, &app_count);
1243
if (!err && app_count) {
1244
table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1245
if (!table)
1246
return -ENOMEM;
1247
1248
err = ops->peer_getapptable(netdev, table);
1249
}
1250
1251
if (!err) {
1252
u16 i;
1253
struct nlattr *app;
1254
1255
/**
1256
* build the message, from here on the only possible failure
1257
* is due to the skb size
1258
*/
1259
err = -EMSGSIZE;
1260
1261
app = nla_nest_start(skb, app_nested_type);
1262
if (!app)
1263
goto nla_put_failure;
1264
1265
if (app_info_type)
1266
NLA_PUT(skb, app_info_type, sizeof(info), &info);
1267
1268
for (i = 0; i < app_count; i++)
1269
NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1270
&table[i]);
1271
1272
nla_nest_end(skb, app);
1273
}
1274
err = 0;
1275
1276
nla_put_failure:
1277
kfree(table);
1278
return err;
1279
}
1280
1281
/* Handle IEEE 802.1Qaz GET commands. */
1282
static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1283
u32 pid, u32 seq, u16 flags)
1284
{
1285
struct sk_buff *skb;
1286
struct nlmsghdr *nlh;
1287
struct dcbmsg *dcb;
1288
struct nlattr *ieee, *app;
1289
struct dcb_app_type *itr;
1290
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1291
int err;
1292
1293
if (!ops)
1294
return -EOPNOTSUPP;
1295
1296
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1297
if (!skb)
1298
return -ENOBUFS;
1299
1300
nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1301
1302
dcb = NLMSG_DATA(nlh);
1303
dcb->dcb_family = AF_UNSPEC;
1304
dcb->cmd = DCB_CMD_IEEE_GET;
1305
1306
NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1307
1308
ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1309
if (!ieee)
1310
goto nla_put_failure;
1311
1312
if (ops->ieee_getets) {
1313
struct ieee_ets ets;
1314
err = ops->ieee_getets(netdev, &ets);
1315
if (!err)
1316
NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
1317
}
1318
1319
if (ops->ieee_getpfc) {
1320
struct ieee_pfc pfc;
1321
err = ops->ieee_getpfc(netdev, &pfc);
1322
if (!err)
1323
NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
1324
}
1325
1326
app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1327
if (!app)
1328
goto nla_put_failure;
1329
1330
spin_lock(&dcb_lock);
1331
list_for_each_entry(itr, &dcb_app_list, list) {
1332
if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1333
err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1334
&itr->app);
1335
if (err) {
1336
spin_unlock(&dcb_lock);
1337
goto nla_put_failure;
1338
}
1339
}
1340
}
1341
spin_unlock(&dcb_lock);
1342
nla_nest_end(skb, app);
1343
1344
/* get peer info if available */
1345
if (ops->ieee_peer_getets) {
1346
struct ieee_ets ets;
1347
err = ops->ieee_peer_getets(netdev, &ets);
1348
if (!err)
1349
NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
1350
}
1351
1352
if (ops->ieee_peer_getpfc) {
1353
struct ieee_pfc pfc;
1354
err = ops->ieee_peer_getpfc(netdev, &pfc);
1355
if (!err)
1356
NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
1357
}
1358
1359
if (ops->peer_getappinfo && ops->peer_getapptable) {
1360
err = dcbnl_build_peer_app(netdev, skb,
1361
DCB_ATTR_IEEE_PEER_APP,
1362
DCB_ATTR_IEEE_APP_UNSPEC,
1363
DCB_ATTR_IEEE_APP);
1364
if (err)
1365
goto nla_put_failure;
1366
}
1367
1368
nla_nest_end(skb, ieee);
1369
nlmsg_end(skb, nlh);
1370
1371
return rtnl_unicast(skb, &init_net, pid);
1372
nla_put_failure:
1373
nlmsg_cancel(skb, nlh);
1374
nlmsg_failure:
1375
kfree_skb(skb);
1376
return -1;
1377
}
1378
1379
/* DCBX configuration */
1380
static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1381
u32 pid, u32 seq, u16 flags)
1382
{
1383
int ret;
1384
1385
if (!netdev->dcbnl_ops->getdcbx)
1386
return -EOPNOTSUPP;
1387
1388
ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1389
DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1390
1391
return ret;
1392
}
1393
1394
static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1395
u32 pid, u32 seq, u16 flags)
1396
{
1397
int ret;
1398
u8 value;
1399
1400
if (!netdev->dcbnl_ops->setdcbx)
1401
return -EOPNOTSUPP;
1402
1403
if (!tb[DCB_ATTR_DCBX])
1404
return -EINVAL;
1405
1406
value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1407
1408
ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1409
RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1410
pid, seq, flags);
1411
1412
return ret;
1413
}
1414
1415
static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1416
u32 pid, u32 seq, u16 flags)
1417
{
1418
struct sk_buff *dcbnl_skb;
1419
struct nlmsghdr *nlh;
1420
struct dcbmsg *dcb;
1421
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1422
u8 value;
1423
int ret, i;
1424
int getall = 0;
1425
1426
if (!netdev->dcbnl_ops->getfeatcfg)
1427
return -EOPNOTSUPP;
1428
1429
if (!tb[DCB_ATTR_FEATCFG])
1430
return -EINVAL;
1431
1432
ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1433
dcbnl_featcfg_nest);
1434
if (ret)
1435
goto err_out;
1436
1437
dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1438
if (!dcbnl_skb) {
1439
ret = -ENOBUFS;
1440
goto err_out;
1441
}
1442
1443
nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1444
1445
dcb = NLMSG_DATA(nlh);
1446
dcb->dcb_family = AF_UNSPEC;
1447
dcb->cmd = DCB_CMD_GFEATCFG;
1448
1449
nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1450
if (!nest) {
1451
ret = -EMSGSIZE;
1452
goto nla_put_failure;
1453
}
1454
1455
if (data[DCB_FEATCFG_ATTR_ALL])
1456
getall = 1;
1457
1458
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1459
if (!getall && !data[i])
1460
continue;
1461
1462
ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1463
if (!ret)
1464
ret = nla_put_u8(dcbnl_skb, i, value);
1465
1466
if (ret) {
1467
nla_nest_cancel(dcbnl_skb, nest);
1468
goto nla_put_failure;
1469
}
1470
}
1471
nla_nest_end(dcbnl_skb, nest);
1472
1473
nlmsg_end(dcbnl_skb, nlh);
1474
1475
return rtnl_unicast(dcbnl_skb, &init_net, pid);
1476
nla_put_failure:
1477
nlmsg_cancel(dcbnl_skb, nlh);
1478
nlmsg_failure:
1479
kfree_skb(dcbnl_skb);
1480
err_out:
1481
return ret;
1482
}
1483
1484
static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1485
u32 pid, u32 seq, u16 flags)
1486
{
1487
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1488
int ret, i;
1489
u8 value;
1490
1491
if (!netdev->dcbnl_ops->setfeatcfg)
1492
return -ENOTSUPP;
1493
1494
if (!tb[DCB_ATTR_FEATCFG])
1495
return -EINVAL;
1496
1497
ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1498
dcbnl_featcfg_nest);
1499
1500
if (ret)
1501
goto err;
1502
1503
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1504
if (data[i] == NULL)
1505
continue;
1506
1507
value = nla_get_u8(data[i]);
1508
1509
ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1510
1511
if (ret)
1512
goto err;
1513
}
1514
err:
1515
dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1516
pid, seq, flags);
1517
1518
return ret;
1519
}
1520
1521
/* Handle CEE DCBX GET commands. */
1522
static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1523
u32 pid, u32 seq, u16 flags)
1524
{
1525
struct sk_buff *skb;
1526
struct nlmsghdr *nlh;
1527
struct dcbmsg *dcb;
1528
struct nlattr *cee;
1529
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1530
int err;
1531
1532
if (!ops)
1533
return -EOPNOTSUPP;
1534
1535
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1536
if (!skb)
1537
return -ENOBUFS;
1538
1539
nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1540
1541
dcb = NLMSG_DATA(nlh);
1542
dcb->dcb_family = AF_UNSPEC;
1543
dcb->cmd = DCB_CMD_CEE_GET;
1544
1545
NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1546
1547
cee = nla_nest_start(skb, DCB_ATTR_CEE);
1548
if (!cee)
1549
goto nla_put_failure;
1550
1551
/* get peer info if available */
1552
if (ops->cee_peer_getpg) {
1553
struct cee_pg pg;
1554
err = ops->cee_peer_getpg(netdev, &pg);
1555
if (!err)
1556
NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1557
}
1558
1559
if (ops->cee_peer_getpfc) {
1560
struct cee_pfc pfc;
1561
err = ops->cee_peer_getpfc(netdev, &pfc);
1562
if (!err)
1563
NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1564
}
1565
1566
if (ops->peer_getappinfo && ops->peer_getapptable) {
1567
err = dcbnl_build_peer_app(netdev, skb,
1568
DCB_ATTR_CEE_PEER_APP_TABLE,
1569
DCB_ATTR_CEE_PEER_APP_INFO,
1570
DCB_ATTR_CEE_PEER_APP);
1571
if (err)
1572
goto nla_put_failure;
1573
}
1574
1575
nla_nest_end(skb, cee);
1576
nlmsg_end(skb, nlh);
1577
1578
return rtnl_unicast(skb, &init_net, pid);
1579
nla_put_failure:
1580
nlmsg_cancel(skb, nlh);
1581
nlmsg_failure:
1582
kfree_skb(skb);
1583
return -1;
1584
}
1585
1586
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1587
{
1588
struct net *net = sock_net(skb->sk);
1589
struct net_device *netdev;
1590
struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1591
struct nlattr *tb[DCB_ATTR_MAX + 1];
1592
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1593
int ret = -EINVAL;
1594
1595
if (!net_eq(net, &init_net))
1596
return -EINVAL;
1597
1598
ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1599
dcbnl_rtnl_policy);
1600
if (ret < 0)
1601
return ret;
1602
1603
if (!tb[DCB_ATTR_IFNAME])
1604
return -EINVAL;
1605
1606
netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1607
if (!netdev)
1608
return -EINVAL;
1609
1610
if (!netdev->dcbnl_ops)
1611
goto errout;
1612
1613
switch (dcb->cmd) {
1614
case DCB_CMD_GSTATE:
1615
ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1616
nlh->nlmsg_flags);
1617
goto out;
1618
case DCB_CMD_PFC_GCFG:
1619
ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1620
nlh->nlmsg_flags);
1621
goto out;
1622
case DCB_CMD_GPERM_HWADDR:
1623
ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1624
nlh->nlmsg_flags);
1625
goto out;
1626
case DCB_CMD_PGTX_GCFG:
1627
ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1628
nlh->nlmsg_flags);
1629
goto out;
1630
case DCB_CMD_PGRX_GCFG:
1631
ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1632
nlh->nlmsg_flags);
1633
goto out;
1634
case DCB_CMD_BCN_GCFG:
1635
ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1636
nlh->nlmsg_flags);
1637
goto out;
1638
case DCB_CMD_SSTATE:
1639
ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1640
nlh->nlmsg_flags);
1641
goto out;
1642
case DCB_CMD_PFC_SCFG:
1643
ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1644
nlh->nlmsg_flags);
1645
goto out;
1646
1647
case DCB_CMD_SET_ALL:
1648
ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1649
nlh->nlmsg_flags);
1650
goto out;
1651
case DCB_CMD_PGTX_SCFG:
1652
ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1653
nlh->nlmsg_flags);
1654
goto out;
1655
case DCB_CMD_PGRX_SCFG:
1656
ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1657
nlh->nlmsg_flags);
1658
goto out;
1659
case DCB_CMD_GCAP:
1660
ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1661
nlh->nlmsg_flags);
1662
goto out;
1663
case DCB_CMD_GNUMTCS:
1664
ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1665
nlh->nlmsg_flags);
1666
goto out;
1667
case DCB_CMD_SNUMTCS:
1668
ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1669
nlh->nlmsg_flags);
1670
goto out;
1671
case DCB_CMD_PFC_GSTATE:
1672
ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1673
nlh->nlmsg_flags);
1674
goto out;
1675
case DCB_CMD_PFC_SSTATE:
1676
ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1677
nlh->nlmsg_flags);
1678
goto out;
1679
case DCB_CMD_BCN_SCFG:
1680
ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1681
nlh->nlmsg_flags);
1682
goto out;
1683
case DCB_CMD_GAPP:
1684
ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
1685
nlh->nlmsg_flags);
1686
goto out;
1687
case DCB_CMD_SAPP:
1688
ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
1689
nlh->nlmsg_flags);
1690
goto out;
1691
case DCB_CMD_IEEE_SET:
1692
ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
1693
nlh->nlmsg_flags);
1694
goto out;
1695
case DCB_CMD_IEEE_GET:
1696
ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
1697
nlh->nlmsg_flags);
1698
goto out;
1699
case DCB_CMD_GDCBX:
1700
ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1701
nlh->nlmsg_flags);
1702
goto out;
1703
case DCB_CMD_SDCBX:
1704
ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
1705
nlh->nlmsg_flags);
1706
goto out;
1707
case DCB_CMD_GFEATCFG:
1708
ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1709
nlh->nlmsg_flags);
1710
goto out;
1711
case DCB_CMD_SFEATCFG:
1712
ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
1713
nlh->nlmsg_flags);
1714
goto out;
1715
case DCB_CMD_CEE_GET:
1716
ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
1717
nlh->nlmsg_flags);
1718
goto out;
1719
default:
1720
goto errout;
1721
}
1722
errout:
1723
ret = -EINVAL;
1724
out:
1725
dev_put(netdev);
1726
return ret;
1727
}
1728
1729
/**
1730
* dcb_getapp - retrieve the DCBX application user priority
1731
*
1732
* On success returns a non-zero 802.1p user priority bitmap
1733
* otherwise returns 0 as the invalid user priority bitmap to
1734
* indicate an error.
1735
*/
1736
u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1737
{
1738
struct dcb_app_type *itr;
1739
u8 prio = 0;
1740
1741
spin_lock(&dcb_lock);
1742
list_for_each_entry(itr, &dcb_app_list, list) {
1743
if (itr->app.selector == app->selector &&
1744
itr->app.protocol == app->protocol &&
1745
(strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1746
prio = itr->app.priority;
1747
break;
1748
}
1749
}
1750
spin_unlock(&dcb_lock);
1751
1752
return prio;
1753
}
1754
EXPORT_SYMBOL(dcb_getapp);
1755
1756
/**
1757
* ixgbe_dcbnl_setapp - add dcb application data to app list
1758
*
1759
* Priority 0 is the default priority this removes applications
1760
* from the app list if the priority is set to zero.
1761
*/
1762
u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1763
{
1764
struct dcb_app_type *itr;
1765
struct dcb_app_type event;
1766
1767
memcpy(&event.name, dev->name, sizeof(event.name));
1768
memcpy(&event.app, new, sizeof(event.app));
1769
1770
spin_lock(&dcb_lock);
1771
/* Search for existing match and replace */
1772
list_for_each_entry(itr, &dcb_app_list, list) {
1773
if (itr->app.selector == new->selector &&
1774
itr->app.protocol == new->protocol &&
1775
(strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
1776
if (new->priority)
1777
itr->app.priority = new->priority;
1778
else {
1779
list_del(&itr->list);
1780
kfree(itr);
1781
}
1782
goto out;
1783
}
1784
}
1785
/* App type does not exist add new application type */
1786
if (new->priority) {
1787
struct dcb_app_type *entry;
1788
entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
1789
if (!entry) {
1790
spin_unlock(&dcb_lock);
1791
return -ENOMEM;
1792
}
1793
1794
memcpy(&entry->app, new, sizeof(*new));
1795
strncpy(entry->name, dev->name, IFNAMSIZ);
1796
list_add(&entry->list, &dcb_app_list);
1797
}
1798
out:
1799
spin_unlock(&dcb_lock);
1800
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1801
return 0;
1802
}
1803
EXPORT_SYMBOL(dcb_setapp);
1804
1805
static void dcb_flushapp(void)
1806
{
1807
struct dcb_app_type *app;
1808
struct dcb_app_type *tmp;
1809
1810
spin_lock(&dcb_lock);
1811
list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1812
list_del(&app->list);
1813
kfree(app);
1814
}
1815
spin_unlock(&dcb_lock);
1816
}
1817
1818
static int __init dcbnl_init(void)
1819
{
1820
INIT_LIST_HEAD(&dcb_app_list);
1821
1822
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
1823
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
1824
1825
return 0;
1826
}
1827
module_init(dcbnl_init);
1828
1829
static void __exit dcbnl_exit(void)
1830
{
1831
rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1832
rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1833
dcb_flushapp();
1834
}
1835
module_exit(dcbnl_exit);
1836
1837