Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/core/netdev-genl.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
#include <linux/netdevice.h>
4
#include <linux/notifier.h>
5
#include <linux/rtnetlink.h>
6
#include <net/busy_poll.h>
7
#include <net/net_namespace.h>
8
#include <net/netdev_queues.h>
9
#include <net/netdev_rx_queue.h>
10
#include <net/sock.h>
11
#include <net/xdp.h>
12
#include <net/xdp_sock.h>
13
#include <net/page_pool/memory_provider.h>
14
15
#include "dev.h"
16
#include "devmem.h"
17
#include "netdev-genl-gen.h"
18
19
struct netdev_nl_dump_ctx {
20
unsigned long ifindex;
21
unsigned int rxq_idx;
22
unsigned int txq_idx;
23
unsigned int napi_id;
24
};
25
26
static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb)
27
{
28
NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx);
29
30
return (struct netdev_nl_dump_ctx *)cb->ctx;
31
}
32
33
static int
34
netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
35
const struct genl_info *info)
36
{
37
u64 xsk_features = 0;
38
u64 xdp_rx_meta = 0;
39
void *hdr;
40
41
netdev_assert_locked(netdev); /* note: rtnl_lock may not be held! */
42
43
hdr = genlmsg_iput(rsp, info);
44
if (!hdr)
45
return -EMSGSIZE;
46
47
#define XDP_METADATA_KFUNC(_, flag, __, xmo) \
48
if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
49
xdp_rx_meta |= flag;
50
XDP_METADATA_KFUNC_xxx
51
#undef XDP_METADATA_KFUNC
52
53
if (netdev->xsk_tx_metadata_ops) {
54
if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp)
55
xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP;
56
if (netdev->xsk_tx_metadata_ops->tmo_request_checksum)
57
xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM;
58
if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time)
59
xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO;
60
}
61
62
if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) ||
63
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES,
64
netdev->xdp_features, NETDEV_A_DEV_PAD) ||
65
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
66
xdp_rx_meta, NETDEV_A_DEV_PAD) ||
67
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
68
xsk_features, NETDEV_A_DEV_PAD))
69
goto err_cancel_msg;
70
71
if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
72
if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
73
netdev->xdp_zc_max_segs))
74
goto err_cancel_msg;
75
}
76
77
genlmsg_end(rsp, hdr);
78
79
return 0;
80
81
err_cancel_msg:
82
genlmsg_cancel(rsp, hdr);
83
return -EMSGSIZE;
84
}
85
86
static void
87
netdev_genl_dev_notify(struct net_device *netdev, int cmd)
88
{
89
struct genl_info info;
90
struct sk_buff *ntf;
91
92
if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
93
NETDEV_NLGRP_MGMT))
94
return;
95
96
genl_info_init_ntf(&info, &netdev_nl_family, cmd);
97
98
ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
99
if (!ntf)
100
return;
101
102
if (netdev_nl_dev_fill(netdev, ntf, &info)) {
103
nlmsg_free(ntf);
104
return;
105
}
106
107
genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf,
108
0, NETDEV_NLGRP_MGMT, GFP_KERNEL);
109
}
110
111
int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
112
{
113
struct net_device *netdev;
114
struct sk_buff *rsp;
115
u32 ifindex;
116
int err;
117
118
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX))
119
return -EINVAL;
120
121
ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
122
123
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
124
if (!rsp)
125
return -ENOMEM;
126
127
netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
128
if (!netdev) {
129
err = -ENODEV;
130
goto err_free_msg;
131
}
132
133
err = netdev_nl_dev_fill(netdev, rsp, info);
134
netdev_unlock(netdev);
135
136
if (err)
137
goto err_free_msg;
138
139
return genlmsg_reply(rsp, info);
140
141
err_free_msg:
142
nlmsg_free(rsp);
143
return err;
144
}
145
146
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
147
{
148
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
149
struct net *net = sock_net(skb->sk);
150
int err;
151
152
for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
153
err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
154
if (err < 0)
155
return err;
156
}
157
158
return 0;
159
}
160
161
static int
162
netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi,
163
const struct genl_info *info)
164
{
165
unsigned long irq_suspend_timeout;
166
unsigned long gro_flush_timeout;
167
u32 napi_defer_hard_irqs;
168
void *hdr;
169
pid_t pid;
170
171
if (!napi->dev->up)
172
return 0;
173
174
hdr = genlmsg_iput(rsp, info);
175
if (!hdr)
176
return -EMSGSIZE;
177
178
if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id))
179
goto nla_put_failure;
180
181
if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex))
182
goto nla_put_failure;
183
184
if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq))
185
goto nla_put_failure;
186
187
if (nla_put_uint(rsp, NETDEV_A_NAPI_THREADED,
188
napi_get_threaded(napi)))
189
goto nla_put_failure;
190
191
if (napi->thread) {
192
pid = task_pid_nr(napi->thread);
193
if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid))
194
goto nla_put_failure;
195
}
196
197
napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi);
198
if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS,
199
napi_defer_hard_irqs))
200
goto nla_put_failure;
201
202
irq_suspend_timeout = napi_get_irq_suspend_timeout(napi);
203
if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT,
204
irq_suspend_timeout))
205
goto nla_put_failure;
206
207
gro_flush_timeout = napi_get_gro_flush_timeout(napi);
208
if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
209
gro_flush_timeout))
210
goto nla_put_failure;
211
212
genlmsg_end(rsp, hdr);
213
214
return 0;
215
216
nla_put_failure:
217
genlmsg_cancel(rsp, hdr);
218
return -EMSGSIZE;
219
}
220
221
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
222
{
223
struct napi_struct *napi;
224
struct sk_buff *rsp;
225
u32 napi_id;
226
int err;
227
228
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
229
return -EINVAL;
230
231
napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
232
233
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
234
if (!rsp)
235
return -ENOMEM;
236
237
napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id);
238
if (napi) {
239
err = netdev_nl_napi_fill_one(rsp, napi, info);
240
netdev_unlock(napi->dev);
241
} else {
242
NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
243
err = -ENOENT;
244
}
245
246
if (err) {
247
goto err_free_msg;
248
} else if (!rsp->len) {
249
err = -ENOENT;
250
goto err_free_msg;
251
}
252
253
return genlmsg_reply(rsp, info);
254
255
err_free_msg:
256
nlmsg_free(rsp);
257
return err;
258
}
259
260
static int
261
netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp,
262
const struct genl_info *info,
263
struct netdev_nl_dump_ctx *ctx)
264
{
265
struct napi_struct *napi;
266
unsigned int prev_id;
267
int err = 0;
268
269
if (!netdev->up)
270
return err;
271
272
prev_id = UINT_MAX;
273
list_for_each_entry(napi, &netdev->napi_list, dev_list) {
274
if (!napi_id_valid(napi->napi_id))
275
continue;
276
277
/* Dump continuation below depends on the list being sorted */
278
WARN_ON_ONCE(napi->napi_id >= prev_id);
279
prev_id = napi->napi_id;
280
281
if (ctx->napi_id && napi->napi_id >= ctx->napi_id)
282
continue;
283
284
err = netdev_nl_napi_fill_one(rsp, napi, info);
285
if (err)
286
return err;
287
ctx->napi_id = napi->napi_id;
288
}
289
return err;
290
}
291
292
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
293
{
294
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
295
const struct genl_info *info = genl_info_dump(cb);
296
struct net *net = sock_net(skb->sk);
297
struct net_device *netdev;
298
u32 ifindex = 0;
299
int err = 0;
300
301
if (info->attrs[NETDEV_A_NAPI_IFINDEX])
302
ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]);
303
304
if (ifindex) {
305
netdev = netdev_get_by_index_lock(net, ifindex);
306
if (netdev) {
307
err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
308
netdev_unlock(netdev);
309
} else {
310
err = -ENODEV;
311
}
312
} else {
313
for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) {
314
err = netdev_nl_napi_dump_one(netdev, skb, info, ctx);
315
if (err < 0)
316
break;
317
ctx->napi_id = 0;
318
}
319
}
320
321
return err;
322
}
323
324
static int
325
netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
326
{
327
u64 irq_suspend_timeout = 0;
328
u64 gro_flush_timeout = 0;
329
u8 threaded = 0;
330
u32 defer = 0;
331
332
if (info->attrs[NETDEV_A_NAPI_THREADED]) {
333
int ret;
334
335
threaded = nla_get_uint(info->attrs[NETDEV_A_NAPI_THREADED]);
336
ret = napi_set_threaded(napi, threaded);
337
if (ret)
338
return ret;
339
}
340
341
if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
342
defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
343
napi_set_defer_hard_irqs(napi, defer);
344
}
345
346
if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) {
347
irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]);
348
napi_set_irq_suspend_timeout(napi, irq_suspend_timeout);
349
}
350
351
if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
352
gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
353
napi_set_gro_flush_timeout(napi, gro_flush_timeout);
354
}
355
356
return 0;
357
}
358
359
int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
360
{
361
struct napi_struct *napi;
362
unsigned int napi_id;
363
int err;
364
365
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
366
return -EINVAL;
367
368
napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
369
370
napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id);
371
if (napi) {
372
err = netdev_nl_napi_set_config(napi, info);
373
netdev_unlock(napi->dev);
374
} else {
375
NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
376
err = -ENOENT;
377
}
378
379
return err;
380
}
381
382
static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi)
383
{
384
if (napi && napi_id_valid(napi->napi_id))
385
return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id);
386
return 0;
387
}
388
389
static int
390
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
391
u32 q_idx, u32 q_type, const struct genl_info *info)
392
{
393
struct pp_memory_provider_params *params;
394
struct netdev_rx_queue *rxq;
395
struct netdev_queue *txq;
396
void *hdr;
397
398
hdr = genlmsg_iput(rsp, info);
399
if (!hdr)
400
return -EMSGSIZE;
401
402
if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) ||
403
nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) ||
404
nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex))
405
goto nla_put_failure;
406
407
switch (q_type) {
408
case NETDEV_QUEUE_TYPE_RX:
409
rxq = __netif_get_rx_queue(netdev, q_idx);
410
if (nla_put_napi_id(rsp, rxq->napi))
411
goto nla_put_failure;
412
413
params = &rxq->mp_params;
414
if (params->mp_ops &&
415
params->mp_ops->nl_fill(params->mp_priv, rsp, rxq))
416
goto nla_put_failure;
417
#ifdef CONFIG_XDP_SOCKETS
418
if (rxq->pool)
419
if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
420
goto nla_put_failure;
421
#endif
422
423
break;
424
case NETDEV_QUEUE_TYPE_TX:
425
txq = netdev_get_tx_queue(netdev, q_idx);
426
if (nla_put_napi_id(rsp, txq->napi))
427
goto nla_put_failure;
428
#ifdef CONFIG_XDP_SOCKETS
429
if (txq->pool)
430
if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK))
431
goto nla_put_failure;
432
#endif
433
break;
434
}
435
436
genlmsg_end(rsp, hdr);
437
438
return 0;
439
440
nla_put_failure:
441
genlmsg_cancel(rsp, hdr);
442
return -EMSGSIZE;
443
}
444
445
static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id,
446
u32 q_type)
447
{
448
switch (q_type) {
449
case NETDEV_QUEUE_TYPE_RX:
450
if (q_id >= netdev->real_num_rx_queues)
451
return -EINVAL;
452
return 0;
453
case NETDEV_QUEUE_TYPE_TX:
454
if (q_id >= netdev->real_num_tx_queues)
455
return -EINVAL;
456
}
457
return 0;
458
}
459
460
static int
461
netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
462
u32 q_type, const struct genl_info *info)
463
{
464
int err;
465
466
if (!netdev->up)
467
return -ENOENT;
468
469
err = netdev_nl_queue_validate(netdev, q_idx, q_type);
470
if (err)
471
return err;
472
473
return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info);
474
}
475
476
int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info)
477
{
478
u32 q_id, q_type, ifindex;
479
struct net_device *netdev;
480
struct sk_buff *rsp;
481
int err;
482
483
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) ||
484
GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) ||
485
GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX))
486
return -EINVAL;
487
488
q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]);
489
q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]);
490
ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
491
492
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
493
if (!rsp)
494
return -ENOMEM;
495
496
netdev = netdev_get_by_index_lock_ops_compat(genl_info_net(info),
497
ifindex);
498
if (netdev) {
499
err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info);
500
netdev_unlock_ops_compat(netdev);
501
} else {
502
err = -ENODEV;
503
}
504
505
if (err)
506
goto err_free_msg;
507
508
return genlmsg_reply(rsp, info);
509
510
err_free_msg:
511
nlmsg_free(rsp);
512
return err;
513
}
514
515
static int
516
netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
517
const struct genl_info *info,
518
struct netdev_nl_dump_ctx *ctx)
519
{
520
int err = 0;
521
522
if (!netdev->up)
523
return err;
524
525
for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
526
err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
527
NETDEV_QUEUE_TYPE_RX, info);
528
if (err)
529
return err;
530
}
531
for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
532
err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
533
NETDEV_QUEUE_TYPE_TX, info);
534
if (err)
535
return err;
536
}
537
538
return err;
539
}
540
541
int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
542
{
543
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
544
const struct genl_info *info = genl_info_dump(cb);
545
struct net *net = sock_net(skb->sk);
546
struct net_device *netdev;
547
u32 ifindex = 0;
548
int err = 0;
549
550
if (info->attrs[NETDEV_A_QUEUE_IFINDEX])
551
ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]);
552
553
if (ifindex) {
554
netdev = netdev_get_by_index_lock_ops_compat(net, ifindex);
555
if (netdev) {
556
err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
557
netdev_unlock_ops_compat(netdev);
558
} else {
559
err = -ENODEV;
560
}
561
} else {
562
for_each_netdev_lock_ops_compat_scoped(net, netdev,
563
ctx->ifindex) {
564
err = netdev_nl_queue_dump_one(netdev, skb, info, ctx);
565
if (err < 0)
566
break;
567
ctx->rxq_idx = 0;
568
ctx->txq_idx = 0;
569
}
570
}
571
572
return err;
573
}
574
575
#define NETDEV_STAT_NOT_SET (~0ULL)
576
577
static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size)
578
{
579
const u64 *add = _add;
580
u64 *sum = _sum;
581
582
while (size) {
583
if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET)
584
*sum += *add;
585
sum++;
586
add++;
587
size -= 8;
588
}
589
}
590
591
static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value)
592
{
593
if (value == NETDEV_STAT_NOT_SET)
594
return 0;
595
return nla_put_uint(rsp, attr_id, value);
596
}
597
598
static int
599
netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
600
{
601
if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
602
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
603
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
604
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
605
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
606
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) ||
607
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
608
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
609
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
610
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
611
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
612
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
613
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
614
netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
615
return -EMSGSIZE;
616
return 0;
617
}
618
619
static int
620
netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
621
{
622
if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
623
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
624
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
625
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
626
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
627
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
628
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
629
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
630
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
631
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
632
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) ||
633
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) ||
634
netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake))
635
return -EMSGSIZE;
636
return 0;
637
}
638
639
static int
640
netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp,
641
u32 q_type, int i, const struct genl_info *info)
642
{
643
const struct netdev_stat_ops *ops = netdev->stat_ops;
644
struct netdev_queue_stats_rx rx;
645
struct netdev_queue_stats_tx tx;
646
void *hdr;
647
648
hdr = genlmsg_iput(rsp, info);
649
if (!hdr)
650
return -EMSGSIZE;
651
if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) ||
652
nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) ||
653
nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i))
654
goto nla_put_failure;
655
656
switch (q_type) {
657
case NETDEV_QUEUE_TYPE_RX:
658
memset(&rx, 0xff, sizeof(rx));
659
ops->get_queue_stats_rx(netdev, i, &rx);
660
if (!memchr_inv(&rx, 0xff, sizeof(rx)))
661
goto nla_cancel;
662
if (netdev_nl_stats_write_rx(rsp, &rx))
663
goto nla_put_failure;
664
break;
665
case NETDEV_QUEUE_TYPE_TX:
666
memset(&tx, 0xff, sizeof(tx));
667
ops->get_queue_stats_tx(netdev, i, &tx);
668
if (!memchr_inv(&tx, 0xff, sizeof(tx)))
669
goto nla_cancel;
670
if (netdev_nl_stats_write_tx(rsp, &tx))
671
goto nla_put_failure;
672
break;
673
}
674
675
genlmsg_end(rsp, hdr);
676
return 0;
677
678
nla_cancel:
679
genlmsg_cancel(rsp, hdr);
680
return 0;
681
nla_put_failure:
682
genlmsg_cancel(rsp, hdr);
683
return -EMSGSIZE;
684
}
685
686
static int
687
netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
688
const struct genl_info *info,
689
struct netdev_nl_dump_ctx *ctx)
690
{
691
const struct netdev_stat_ops *ops = netdev->stat_ops;
692
int i, err;
693
694
if (!(netdev->flags & IFF_UP))
695
return 0;
696
697
i = ctx->rxq_idx;
698
while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) {
699
err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX,
700
i, info);
701
if (err)
702
return err;
703
ctx->rxq_idx = ++i;
704
}
705
i = ctx->txq_idx;
706
while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
707
err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX,
708
i, info);
709
if (err)
710
return err;
711
ctx->txq_idx = ++i;
712
}
713
714
ctx->rxq_idx = 0;
715
ctx->txq_idx = 0;
716
return 0;
717
}
718
719
/**
720
* netdev_stat_queue_sum() - add up queue stats from range of queues
721
* @netdev: net_device
722
* @rx_start: index of the first Rx queue to query
723
* @rx_end: index after the last Rx queue (first *not* to query)
724
* @rx_sum: output Rx stats, should be already initialized
725
* @tx_start: index of the first Tx queue to query
726
* @tx_end: index after the last Tx queue (first *not* to query)
727
* @tx_sum: output Tx stats, should be already initialized
728
*
729
* Add stats from [start, end) range of queue IDs to *x_sum structs.
730
* The sum structs must be already initialized. Usually this
731
* helper is invoked from the .get_base_stats callbacks of drivers
732
* to account for stats of disabled queues. In that case the ranges
733
* are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
734
*/
735
void netdev_stat_queue_sum(struct net_device *netdev,
736
int rx_start, int rx_end,
737
struct netdev_queue_stats_rx *rx_sum,
738
int tx_start, int tx_end,
739
struct netdev_queue_stats_tx *tx_sum)
740
{
741
const struct netdev_stat_ops *ops;
742
struct netdev_queue_stats_rx rx;
743
struct netdev_queue_stats_tx tx;
744
int i;
745
746
ops = netdev->stat_ops;
747
748
for (i = rx_start; i < rx_end; i++) {
749
memset(&rx, 0xff, sizeof(rx));
750
if (ops->get_queue_stats_rx)
751
ops->get_queue_stats_rx(netdev, i, &rx);
752
netdev_nl_stats_add(rx_sum, &rx, sizeof(rx));
753
}
754
for (i = tx_start; i < tx_end; i++) {
755
memset(&tx, 0xff, sizeof(tx));
756
if (ops->get_queue_stats_tx)
757
ops->get_queue_stats_tx(netdev, i, &tx);
758
netdev_nl_stats_add(tx_sum, &tx, sizeof(tx));
759
}
760
}
761
EXPORT_SYMBOL(netdev_stat_queue_sum);
762
763
static int
764
netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
765
const struct genl_info *info)
766
{
767
struct netdev_queue_stats_rx rx_sum;
768
struct netdev_queue_stats_tx tx_sum;
769
void *hdr;
770
771
/* Netdev can't guarantee any complete counters */
772
if (!netdev->stat_ops->get_base_stats)
773
return 0;
774
775
memset(&rx_sum, 0xff, sizeof(rx_sum));
776
memset(&tx_sum, 0xff, sizeof(tx_sum));
777
778
netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
779
780
/* The op was there, but nothing reported, don't bother */
781
if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
782
!memchr_inv(&tx_sum, 0xff, sizeof(tx_sum)))
783
return 0;
784
785
hdr = genlmsg_iput(rsp, info);
786
if (!hdr)
787
return -EMSGSIZE;
788
if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
789
goto nla_put_failure;
790
791
netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
792
0, netdev->real_num_tx_queues, &tx_sum);
793
794
if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
795
netdev_nl_stats_write_tx(rsp, &tx_sum))
796
goto nla_put_failure;
797
798
genlmsg_end(rsp, hdr);
799
return 0;
800
801
nla_put_failure:
802
genlmsg_cancel(rsp, hdr);
803
return -EMSGSIZE;
804
}
805
806
static int
807
netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
808
struct sk_buff *skb, const struct genl_info *info,
809
struct netdev_nl_dump_ctx *ctx)
810
{
811
if (!netdev->stat_ops)
812
return 0;
813
814
switch (scope) {
815
case 0:
816
return netdev_nl_stats_by_netdev(netdev, skb, info);
817
case NETDEV_QSTATS_SCOPE_QUEUE:
818
return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
819
}
820
821
return -EINVAL; /* Should not happen, per netlink policy */
822
}
823
824
int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
825
struct netlink_callback *cb)
826
{
827
struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb);
828
const struct genl_info *info = genl_info_dump(cb);
829
struct net *net = sock_net(skb->sk);
830
struct net_device *netdev;
831
unsigned int ifindex;
832
unsigned int scope;
833
int err = 0;
834
835
scope = 0;
836
if (info->attrs[NETDEV_A_QSTATS_SCOPE])
837
scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
838
839
ifindex = 0;
840
if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
841
ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
842
843
if (ifindex) {
844
netdev = netdev_get_by_index_lock_ops_compat(net, ifindex);
845
if (!netdev) {
846
NL_SET_BAD_ATTR(info->extack,
847
info->attrs[NETDEV_A_QSTATS_IFINDEX]);
848
return -ENODEV;
849
}
850
if (netdev->stat_ops) {
851
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
852
info, ctx);
853
} else {
854
NL_SET_BAD_ATTR(info->extack,
855
info->attrs[NETDEV_A_QSTATS_IFINDEX]);
856
err = -EOPNOTSUPP;
857
}
858
netdev_unlock_ops_compat(netdev);
859
return err;
860
}
861
862
for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) {
863
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
864
info, ctx);
865
if (err < 0)
866
break;
867
}
868
869
return err;
870
}
871
872
int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
873
{
874
struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
875
struct net_devmem_dmabuf_binding *binding;
876
u32 ifindex, dmabuf_fd, rxq_idx;
877
struct netdev_nl_sock *priv;
878
struct net_device *netdev;
879
struct sk_buff *rsp;
880
struct nlattr *attr;
881
int rem, err = 0;
882
void *hdr;
883
884
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
885
GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
886
GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
887
return -EINVAL;
888
889
ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
890
dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
891
892
priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk);
893
if (IS_ERR(priv))
894
return PTR_ERR(priv);
895
896
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
897
if (!rsp)
898
return -ENOMEM;
899
900
hdr = genlmsg_iput(rsp, info);
901
if (!hdr) {
902
err = -EMSGSIZE;
903
goto err_genlmsg_free;
904
}
905
906
mutex_lock(&priv->lock);
907
908
err = 0;
909
netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
910
if (!netdev) {
911
err = -ENODEV;
912
goto err_unlock_sock;
913
}
914
if (!netif_device_present(netdev))
915
err = -ENODEV;
916
else if (!netdev_need_ops_lock(netdev))
917
err = -EOPNOTSUPP;
918
if (err) {
919
NL_SET_BAD_ATTR(info->extack,
920
info->attrs[NETDEV_A_DEV_IFINDEX]);
921
goto err_unlock;
922
}
923
924
binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd,
925
priv, info->extack);
926
if (IS_ERR(binding)) {
927
err = PTR_ERR(binding);
928
goto err_unlock;
929
}
930
931
nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
932
genlmsg_data(info->genlhdr),
933
genlmsg_len(info->genlhdr), rem) {
934
err = nla_parse_nested(
935
tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
936
netdev_queue_id_nl_policy, info->extack);
937
if (err < 0)
938
goto err_unbind;
939
940
if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
941
NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
942
err = -EINVAL;
943
goto err_unbind;
944
}
945
946
if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
947
NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
948
err = -EINVAL;
949
goto err_unbind;
950
}
951
952
rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
953
954
err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
955
info->extack);
956
if (err)
957
goto err_unbind;
958
}
959
960
nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
961
genlmsg_end(rsp, hdr);
962
963
err = genlmsg_reply(rsp, info);
964
if (err)
965
goto err_unbind;
966
967
netdev_unlock(netdev);
968
969
mutex_unlock(&priv->lock);
970
971
return 0;
972
973
err_unbind:
974
net_devmem_unbind_dmabuf(binding);
975
err_unlock:
976
netdev_unlock(netdev);
977
err_unlock_sock:
978
mutex_unlock(&priv->lock);
979
err_genlmsg_free:
980
nlmsg_free(rsp);
981
return err;
982
}
983
984
int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info)
985
{
986
struct net_devmem_dmabuf_binding *binding;
987
struct netdev_nl_sock *priv;
988
struct net_device *netdev;
989
u32 ifindex, dmabuf_fd;
990
struct sk_buff *rsp;
991
int err = 0;
992
void *hdr;
993
994
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
995
GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD))
996
return -EINVAL;
997
998
ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
999
dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
1000
1001
priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk);
1002
if (IS_ERR(priv))
1003
return PTR_ERR(priv);
1004
1005
rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
1006
if (!rsp)
1007
return -ENOMEM;
1008
1009
hdr = genlmsg_iput(rsp, info);
1010
if (!hdr) {
1011
err = -EMSGSIZE;
1012
goto err_genlmsg_free;
1013
}
1014
1015
mutex_lock(&priv->lock);
1016
1017
netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex);
1018
if (!netdev) {
1019
err = -ENODEV;
1020
goto err_unlock_sock;
1021
}
1022
1023
if (!netif_device_present(netdev)) {
1024
err = -ENODEV;
1025
goto err_unlock_netdev;
1026
}
1027
1028
if (!netdev->netmem_tx) {
1029
err = -EOPNOTSUPP;
1030
NL_SET_ERR_MSG(info->extack,
1031
"Driver does not support netmem TX");
1032
goto err_unlock_netdev;
1033
}
1034
1035
binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv,
1036
info->extack);
1037
if (IS_ERR(binding)) {
1038
err = PTR_ERR(binding);
1039
goto err_unlock_netdev;
1040
}
1041
1042
nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
1043
genlmsg_end(rsp, hdr);
1044
1045
netdev_unlock(netdev);
1046
mutex_unlock(&priv->lock);
1047
1048
return genlmsg_reply(rsp, info);
1049
1050
err_unlock_netdev:
1051
netdev_unlock(netdev);
1052
err_unlock_sock:
1053
mutex_unlock(&priv->lock);
1054
err_genlmsg_free:
1055
nlmsg_free(rsp);
1056
return err;
1057
}
1058
1059
void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv)
1060
{
1061
INIT_LIST_HEAD(&priv->bindings);
1062
mutex_init(&priv->lock);
1063
}
1064
1065
void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv)
1066
{
1067
struct net_devmem_dmabuf_binding *binding;
1068
struct net_devmem_dmabuf_binding *temp;
1069
netdevice_tracker dev_tracker;
1070
struct net_device *dev;
1071
1072
mutex_lock(&priv->lock);
1073
list_for_each_entry_safe(binding, temp, &priv->bindings, list) {
1074
mutex_lock(&binding->lock);
1075
dev = binding->dev;
1076
if (!dev) {
1077
mutex_unlock(&binding->lock);
1078
net_devmem_unbind_dmabuf(binding);
1079
continue;
1080
}
1081
netdev_hold(dev, &dev_tracker, GFP_KERNEL);
1082
mutex_unlock(&binding->lock);
1083
1084
netdev_lock(dev);
1085
net_devmem_unbind_dmabuf(binding);
1086
netdev_unlock(dev);
1087
netdev_put(dev, &dev_tracker);
1088
}
1089
mutex_unlock(&priv->lock);
1090
}
1091
1092
static int netdev_genl_netdevice_event(struct notifier_block *nb,
1093
unsigned long event, void *ptr)
1094
{
1095
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1096
1097
switch (event) {
1098
case NETDEV_REGISTER:
1099
netdev_lock_ops_to_full(netdev);
1100
netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF);
1101
netdev_unlock_full_to_ops(netdev);
1102
break;
1103
case NETDEV_UNREGISTER:
1104
netdev_lock(netdev);
1105
netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF);
1106
netdev_unlock(netdev);
1107
break;
1108
case NETDEV_XDP_FEAT_CHANGE:
1109
netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF);
1110
break;
1111
}
1112
1113
return NOTIFY_OK;
1114
}
1115
1116
static struct notifier_block netdev_genl_nb = {
1117
.notifier_call = netdev_genl_netdevice_event,
1118
};
1119
1120
static int __init netdev_genl_init(void)
1121
{
1122
int err;
1123
1124
err = register_netdevice_notifier(&netdev_genl_nb);
1125
if (err)
1126
return err;
1127
1128
err = genl_register_family(&netdev_nl_family);
1129
if (err)
1130
goto err_unreg_ntf;
1131
1132
return 0;
1133
1134
err_unreg_ntf:
1135
unregister_netdevice_notifier(&netdev_genl_nb);
1136
return err;
1137
}
1138
1139
subsys_initcall(netdev_genl_init);
1140
1141