Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/core/net-sysfs.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* net-sysfs.c - network device class and attributes
4
*
5
* Copyright (c) 2003 Stephen Hemminger <[email protected]>
6
*/
7
8
#include <linux/capability.h>
9
#include <linux/kernel.h>
10
#include <linux/netdevice.h>
11
#include <linux/if_arp.h>
12
#include <linux/slab.h>
13
#include <linux/sched/signal.h>
14
#include <linux/sched/isolation.h>
15
#include <linux/nsproxy.h>
16
#include <net/sock.h>
17
#include <net/net_namespace.h>
18
#include <linux/rtnetlink.h>
19
#include <linux/vmalloc.h>
20
#include <linux/export.h>
21
#include <linux/jiffies.h>
22
#include <linux/pm_runtime.h>
23
#include <linux/of.h>
24
#include <linux/of_net.h>
25
#include <linux/cpu.h>
26
#include <net/netdev_lock.h>
27
#include <net/netdev_rx_queue.h>
28
#include <net/rps.h>
29
30
#include "dev.h"
31
#include "net-sysfs.h"
32
33
#ifdef CONFIG_SYSFS
34
static const char fmt_hex[] = "%#x\n";
35
static const char fmt_dec[] = "%d\n";
36
static const char fmt_uint[] = "%u\n";
37
static const char fmt_ulong[] = "%lu\n";
38
static const char fmt_u64[] = "%llu\n";
39
40
/* Caller holds RTNL, netdev->lock or RCU */
41
static inline int dev_isalive(const struct net_device *dev)
42
{
43
return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED;
44
}
45
46
/* There is a possible ABBA deadlock between rtnl_lock and kernfs_node->active,
47
* when unregistering a net device and accessing associated sysfs files. The
48
* potential deadlock is as follow:
49
*
50
* CPU 0 CPU 1
51
*
52
* rtnl_lock vfs_read
53
* unregister_netdevice_many kernfs_seq_start
54
* device_del / kobject_put kernfs_get_active (kn->active++)
55
* kernfs_drain sysfs_kf_seq_show
56
* wait_event( rtnl_lock
57
* kn->active == KN_DEACTIVATED_BIAS) -> waits on CPU 0 to release
58
* -> waits on CPU 1 to decrease kn->active the rtnl lock.
59
*
60
* The historical fix was to use rtnl_trylock with restart_syscall to bail out
61
* of sysfs operations when the lock couldn't be taken. This fixed the above
62
* issue as it allowed CPU 1 to bail out of the ABBA situation.
63
*
64
* But it came with performances issues, as syscalls are being restarted in
65
* loops when there was contention on the rtnl lock, with huge slow downs in
66
* specific scenarios (e.g. lots of virtual interfaces created and userspace
67
* daemons querying their attributes).
68
*
69
* The idea below is to bail out of the active kernfs_node protection
70
* (kn->active) while trying to take the rtnl lock.
71
*
72
* This replaces rtnl_lock() and still has to be used with rtnl_unlock(). The
73
* net device is guaranteed to be alive if this returns successfully.
74
*/
75
static int sysfs_rtnl_lock(struct kobject *kobj, struct attribute *attr,
76
struct net_device *ndev)
77
{
78
struct kernfs_node *kn;
79
int ret = 0;
80
81
/* First, we hold a reference to the net device as the unregistration
82
* path might run in parallel. This will ensure the net device and the
83
* associated sysfs objects won't be freed while we try to take the rtnl
84
* lock.
85
*/
86
dev_hold(ndev);
87
/* sysfs_break_active_protection was introduced to allow self-removal of
88
* devices and their associated sysfs files by bailing out of the
89
* sysfs/kernfs protection. We do this here to allow the unregistration
90
* path to complete in parallel. The following takes a reference on the
91
* kobject and the kernfs_node being accessed.
92
*
93
* This works because we hold a reference onto the net device and the
94
* unregistration path will wait for us eventually in netdev_run_todo
95
* (outside an rtnl lock section).
96
*/
97
kn = sysfs_break_active_protection(kobj, attr);
98
/* We can now try to take the rtnl lock. This can't deadlock us as the
99
* unregistration path is able to drain sysfs files (kernfs_node) thanks
100
* to the above dance.
101
*/
102
if (rtnl_lock_interruptible()) {
103
ret = -ERESTARTSYS;
104
goto unbreak;
105
}
106
/* Check dismantle on the device hasn't started, otherwise deny the
107
* operation.
108
*/
109
if (!dev_isalive(ndev)) {
110
rtnl_unlock();
111
ret = -ENODEV;
112
goto unbreak;
113
}
114
/* We are now sure the device dismantle hasn't started nor that it can
115
* start before we exit the locking section as we hold the rtnl lock.
116
* There's no need to keep unbreaking the sysfs protection nor to hold
117
* a net device reference from that point; that was only needed to take
118
* the rtnl lock.
119
*/
120
unbreak:
121
sysfs_unbreak_active_protection(kn);
122
dev_put(ndev);
123
124
return ret;
125
}
126
127
/* use same locking rules as GIF* ioctl's */
128
static ssize_t netdev_show(const struct device *dev,
129
struct device_attribute *attr, char *buf,
130
ssize_t (*format)(const struct net_device *, char *))
131
{
132
struct net_device *ndev = to_net_dev(dev);
133
ssize_t ret = -EINVAL;
134
135
rcu_read_lock();
136
if (dev_isalive(ndev))
137
ret = (*format)(ndev, buf);
138
rcu_read_unlock();
139
140
return ret;
141
}
142
143
/* generate a show function for simple field */
144
#define NETDEVICE_SHOW(field, format_string) \
145
static ssize_t format_##field(const struct net_device *dev, char *buf) \
146
{ \
147
return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \
148
} \
149
static ssize_t field##_show(struct device *dev, \
150
struct device_attribute *attr, char *buf) \
151
{ \
152
return netdev_show(dev, attr, buf, format_##field); \
153
} \
154
155
#define NETDEVICE_SHOW_RO(field, format_string) \
156
NETDEVICE_SHOW(field, format_string); \
157
static DEVICE_ATTR_RO(field)
158
159
#define NETDEVICE_SHOW_RW(field, format_string) \
160
NETDEVICE_SHOW(field, format_string); \
161
static DEVICE_ATTR_RW(field)
162
163
/* use same locking and permission rules as SIF* ioctl's */
164
static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
165
const char *buf, size_t len,
166
int (*set)(struct net_device *, unsigned long))
167
{
168
struct net_device *netdev = to_net_dev(dev);
169
struct net *net = dev_net(netdev);
170
unsigned long new;
171
int ret;
172
173
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
174
return -EPERM;
175
176
ret = kstrtoul(buf, 0, &new);
177
if (ret)
178
goto err;
179
180
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
181
if (ret)
182
goto err;
183
184
ret = (*set)(netdev, new);
185
if (ret == 0)
186
ret = len;
187
188
rtnl_unlock();
189
err:
190
return ret;
191
}
192
193
/* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */
194
static ssize_t
195
netdev_lock_store(struct device *dev, struct device_attribute *attr,
196
const char *buf, size_t len,
197
int (*set)(struct net_device *, unsigned long))
198
{
199
struct net_device *netdev = to_net_dev(dev);
200
struct net *net = dev_net(netdev);
201
unsigned long new;
202
int ret;
203
204
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
205
return -EPERM;
206
207
ret = kstrtoul(buf, 0, &new);
208
if (ret)
209
return ret;
210
211
netdev_lock(netdev);
212
213
if (dev_isalive(netdev)) {
214
ret = (*set)(netdev, new);
215
if (ret == 0)
216
ret = len;
217
}
218
netdev_unlock(netdev);
219
220
return ret;
221
}
222
223
NETDEVICE_SHOW_RO(dev_id, fmt_hex);
224
NETDEVICE_SHOW_RO(dev_port, fmt_dec);
225
NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
226
NETDEVICE_SHOW_RO(addr_len, fmt_dec);
227
NETDEVICE_SHOW_RO(ifindex, fmt_dec);
228
NETDEVICE_SHOW_RO(type, fmt_dec);
229
NETDEVICE_SHOW_RO(link_mode, fmt_dec);
230
231
static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
232
char *buf)
233
{
234
struct net_device *ndev = to_net_dev(dev);
235
236
return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev));
237
}
238
static DEVICE_ATTR_RO(iflink);
239
240
static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
241
{
242
return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type));
243
}
244
245
static ssize_t name_assign_type_show(struct device *dev,
246
struct device_attribute *attr,
247
char *buf)
248
{
249
struct net_device *ndev = to_net_dev(dev);
250
ssize_t ret = -EINVAL;
251
252
if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN)
253
ret = netdev_show(dev, attr, buf, format_name_assign_type);
254
255
return ret;
256
}
257
static DEVICE_ATTR_RO(name_assign_type);
258
259
/* use same locking rules as GIFHWADDR ioctl's (netif_get_mac_address()) */
260
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
261
char *buf)
262
{
263
struct net_device *ndev = to_net_dev(dev);
264
ssize_t ret = -EINVAL;
265
266
down_read(&dev_addr_sem);
267
268
rcu_read_lock();
269
if (dev_isalive(ndev))
270
ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
271
rcu_read_unlock();
272
273
up_read(&dev_addr_sem);
274
return ret;
275
}
276
static DEVICE_ATTR_RO(address);
277
278
static ssize_t broadcast_show(struct device *dev,
279
struct device_attribute *attr, char *buf)
280
{
281
struct net_device *ndev = to_net_dev(dev);
282
int ret = -EINVAL;
283
284
rcu_read_lock();
285
if (dev_isalive(ndev))
286
ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
287
rcu_read_unlock();
288
return ret;
289
}
290
static DEVICE_ATTR_RO(broadcast);
291
292
static int change_carrier(struct net_device *dev, unsigned long new_carrier)
293
{
294
if (!netif_running(dev))
295
return -EINVAL;
296
return dev_change_carrier(dev, (bool)new_carrier);
297
}
298
299
static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
300
const char *buf, size_t len)
301
{
302
struct net_device *netdev = to_net_dev(dev);
303
304
/* The check is also done in change_carrier; this helps returning early
305
* without hitting the locking section in netdev_store.
306
*/
307
if (!netdev->netdev_ops->ndo_change_carrier)
308
return -EOPNOTSUPP;
309
310
return netdev_store(dev, attr, buf, len, change_carrier);
311
}
312
313
static ssize_t carrier_show(struct device *dev,
314
struct device_attribute *attr, char *buf)
315
{
316
struct net_device *netdev = to_net_dev(dev);
317
int ret;
318
319
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
320
if (ret)
321
return ret;
322
323
ret = -EINVAL;
324
if (netif_running(netdev)) {
325
/* Synchronize carrier state with link watch,
326
* see also rtnl_getlink().
327
*/
328
linkwatch_sync_dev(netdev);
329
330
ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev));
331
}
332
333
rtnl_unlock();
334
return ret;
335
}
336
static DEVICE_ATTR_RW(carrier);
337
338
static ssize_t speed_show(struct device *dev,
339
struct device_attribute *attr, char *buf)
340
{
341
struct net_device *netdev = to_net_dev(dev);
342
int ret = -EINVAL;
343
344
/* The check is also done in __ethtool_get_link_ksettings; this helps
345
* returning early without hitting the locking section below.
346
*/
347
if (!netdev->ethtool_ops->get_link_ksettings)
348
return ret;
349
350
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
351
if (ret)
352
return ret;
353
354
ret = -EINVAL;
355
if (netif_running(netdev)) {
356
struct ethtool_link_ksettings cmd;
357
358
if (!__ethtool_get_link_ksettings(netdev, &cmd))
359
ret = sysfs_emit(buf, fmt_dec, cmd.base.speed);
360
}
361
rtnl_unlock();
362
return ret;
363
}
364
static DEVICE_ATTR_RO(speed);
365
366
static ssize_t duplex_show(struct device *dev,
367
struct device_attribute *attr, char *buf)
368
{
369
struct net_device *netdev = to_net_dev(dev);
370
int ret = -EINVAL;
371
372
/* The check is also done in __ethtool_get_link_ksettings; this helps
373
* returning early without hitting the locking section below.
374
*/
375
if (!netdev->ethtool_ops->get_link_ksettings)
376
return ret;
377
378
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
379
if (ret)
380
return ret;
381
382
ret = -EINVAL;
383
if (netif_running(netdev)) {
384
struct ethtool_link_ksettings cmd;
385
386
if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
387
const char *duplex;
388
389
switch (cmd.base.duplex) {
390
case DUPLEX_HALF:
391
duplex = "half";
392
break;
393
case DUPLEX_FULL:
394
duplex = "full";
395
break;
396
default:
397
duplex = "unknown";
398
break;
399
}
400
ret = sysfs_emit(buf, "%s\n", duplex);
401
}
402
}
403
rtnl_unlock();
404
return ret;
405
}
406
static DEVICE_ATTR_RO(duplex);
407
408
static ssize_t testing_show(struct device *dev,
409
struct device_attribute *attr, char *buf)
410
{
411
struct net_device *netdev = to_net_dev(dev);
412
413
if (netif_running(netdev))
414
return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev));
415
416
return -EINVAL;
417
}
418
static DEVICE_ATTR_RO(testing);
419
420
static ssize_t dormant_show(struct device *dev,
421
struct device_attribute *attr, char *buf)
422
{
423
struct net_device *netdev = to_net_dev(dev);
424
425
if (netif_running(netdev))
426
return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev));
427
428
return -EINVAL;
429
}
430
static DEVICE_ATTR_RO(dormant);
431
432
static const char *const operstates[] = {
433
"unknown",
434
"notpresent", /* currently unused */
435
"down",
436
"lowerlayerdown",
437
"testing",
438
"dormant",
439
"up"
440
};
441
442
static ssize_t operstate_show(struct device *dev,
443
struct device_attribute *attr, char *buf)
444
{
445
const struct net_device *netdev = to_net_dev(dev);
446
unsigned char operstate;
447
448
operstate = READ_ONCE(netdev->operstate);
449
if (!netif_running(netdev))
450
operstate = IF_OPER_DOWN;
451
452
if (operstate >= ARRAY_SIZE(operstates))
453
return -EINVAL; /* should not happen */
454
455
return sysfs_emit(buf, "%s\n", operstates[operstate]);
456
}
457
static DEVICE_ATTR_RO(operstate);
458
459
static ssize_t carrier_changes_show(struct device *dev,
460
struct device_attribute *attr,
461
char *buf)
462
{
463
struct net_device *netdev = to_net_dev(dev);
464
465
return sysfs_emit(buf, fmt_dec,
466
atomic_read(&netdev->carrier_up_count) +
467
atomic_read(&netdev->carrier_down_count));
468
}
469
static DEVICE_ATTR_RO(carrier_changes);
470
471
static ssize_t carrier_up_count_show(struct device *dev,
472
struct device_attribute *attr,
473
char *buf)
474
{
475
struct net_device *netdev = to_net_dev(dev);
476
477
return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
478
}
479
static DEVICE_ATTR_RO(carrier_up_count);
480
481
static ssize_t carrier_down_count_show(struct device *dev,
482
struct device_attribute *attr,
483
char *buf)
484
{
485
struct net_device *netdev = to_net_dev(dev);
486
487
return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
488
}
489
static DEVICE_ATTR_RO(carrier_down_count);
490
491
/* read-write attributes */
492
493
static int change_mtu(struct net_device *dev, unsigned long new_mtu)
494
{
495
return dev_set_mtu(dev, (int)new_mtu);
496
}
497
498
static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
499
const char *buf, size_t len)
500
{
501
return netdev_store(dev, attr, buf, len, change_mtu);
502
}
503
NETDEVICE_SHOW_RW(mtu, fmt_dec);
504
505
static int change_flags(struct net_device *dev, unsigned long new_flags)
506
{
507
return dev_change_flags(dev, (unsigned int)new_flags, NULL);
508
}
509
510
static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
511
const char *buf, size_t len)
512
{
513
return netdev_store(dev, attr, buf, len, change_flags);
514
}
515
NETDEVICE_SHOW_RW(flags, fmt_hex);
516
517
static ssize_t tx_queue_len_store(struct device *dev,
518
struct device_attribute *attr,
519
const char *buf, size_t len)
520
{
521
if (!capable(CAP_NET_ADMIN))
522
return -EPERM;
523
524
return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
525
}
526
NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
527
528
static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
529
{
530
netdev_set_gro_flush_timeout(dev, val);
531
return 0;
532
}
533
534
static ssize_t gro_flush_timeout_store(struct device *dev,
535
struct device_attribute *attr,
536
const char *buf, size_t len)
537
{
538
if (!capable(CAP_NET_ADMIN))
539
return -EPERM;
540
541
return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout);
542
}
543
NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
544
545
static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
546
{
547
if (val > S32_MAX)
548
return -ERANGE;
549
550
netdev_set_defer_hard_irqs(dev, (u32)val);
551
return 0;
552
}
553
554
static ssize_t napi_defer_hard_irqs_store(struct device *dev,
555
struct device_attribute *attr,
556
const char *buf, size_t len)
557
{
558
if (!capable(CAP_NET_ADMIN))
559
return -EPERM;
560
561
return netdev_lock_store(dev, attr, buf, len,
562
change_napi_defer_hard_irqs);
563
}
564
NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_uint);
565
566
static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
567
const char *buf, size_t len)
568
{
569
struct net_device *netdev = to_net_dev(dev);
570
struct net *net = dev_net(netdev);
571
size_t count = len;
572
ssize_t ret;
573
574
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
575
return -EPERM;
576
577
/* ignore trailing newline */
578
if (len > 0 && buf[len - 1] == '\n')
579
--count;
580
581
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
582
if (ret)
583
return ret;
584
585
ret = dev_set_alias(netdev, buf, count);
586
if (ret < 0)
587
goto err;
588
ret = len;
589
netdev_state_change(netdev);
590
err:
591
rtnl_unlock();
592
593
return ret;
594
}
595
596
static ssize_t ifalias_show(struct device *dev,
597
struct device_attribute *attr, char *buf)
598
{
599
const struct net_device *netdev = to_net_dev(dev);
600
char tmp[IFALIASZ];
601
ssize_t ret;
602
603
ret = dev_get_alias(netdev, tmp, sizeof(tmp));
604
if (ret > 0)
605
ret = sysfs_emit(buf, "%s\n", tmp);
606
return ret;
607
}
608
static DEVICE_ATTR_RW(ifalias);
609
610
static int change_group(struct net_device *dev, unsigned long new_group)
611
{
612
dev_set_group(dev, (int)new_group);
613
return 0;
614
}
615
616
static ssize_t group_store(struct device *dev, struct device_attribute *attr,
617
const char *buf, size_t len)
618
{
619
return netdev_store(dev, attr, buf, len, change_group);
620
}
621
NETDEVICE_SHOW(group, fmt_dec);
622
static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
623
624
static int change_proto_down(struct net_device *dev, unsigned long proto_down)
625
{
626
return dev_change_proto_down(dev, (bool)proto_down);
627
}
628
629
static ssize_t proto_down_store(struct device *dev,
630
struct device_attribute *attr,
631
const char *buf, size_t len)
632
{
633
return netdev_store(dev, attr, buf, len, change_proto_down);
634
}
635
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
636
637
static ssize_t phys_port_id_show(struct device *dev,
638
struct device_attribute *attr, char *buf)
639
{
640
struct net_device *netdev = to_net_dev(dev);
641
struct netdev_phys_item_id ppid;
642
ssize_t ret;
643
644
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
645
if (ret)
646
return ret;
647
648
ret = dev_get_phys_port_id(netdev, &ppid);
649
if (!ret)
650
ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id);
651
652
rtnl_unlock();
653
654
return ret;
655
}
656
static DEVICE_ATTR_RO(phys_port_id);
657
658
static ssize_t phys_port_name_show(struct device *dev,
659
struct device_attribute *attr, char *buf)
660
{
661
struct net_device *netdev = to_net_dev(dev);
662
char name[IFNAMSIZ];
663
ssize_t ret;
664
665
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
666
if (ret)
667
return ret;
668
669
ret = dev_get_phys_port_name(netdev, name, sizeof(name));
670
if (!ret)
671
ret = sysfs_emit(buf, "%s\n", name);
672
673
rtnl_unlock();
674
675
return ret;
676
}
677
static DEVICE_ATTR_RO(phys_port_name);
678
679
static ssize_t phys_switch_id_show(struct device *dev,
680
struct device_attribute *attr, char *buf)
681
{
682
struct net_device *netdev = to_net_dev(dev);
683
struct netdev_phys_item_id ppid = { };
684
ssize_t ret;
685
686
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
687
if (ret)
688
return ret;
689
690
ret = netif_get_port_parent_id(netdev, &ppid, false);
691
if (!ret)
692
ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id);
693
694
rtnl_unlock();
695
696
return ret;
697
}
698
static DEVICE_ATTR_RO(phys_switch_id);
699
700
static struct attribute *netdev_phys_attrs[] __ro_after_init = {
701
&dev_attr_phys_port_id.attr,
702
&dev_attr_phys_port_name.attr,
703
&dev_attr_phys_switch_id.attr,
704
NULL,
705
};
706
707
static umode_t netdev_phys_is_visible(struct kobject *kobj,
708
struct attribute *attr, int index)
709
{
710
struct device *dev = kobj_to_dev(kobj);
711
struct net_device *netdev = to_net_dev(dev);
712
713
if (attr == &dev_attr_phys_port_id.attr) {
714
if (!netdev->netdev_ops->ndo_get_phys_port_id)
715
return 0;
716
} else if (attr == &dev_attr_phys_port_name.attr) {
717
if (!netdev->netdev_ops->ndo_get_phys_port_name &&
718
!netdev->devlink_port)
719
return 0;
720
} else if (attr == &dev_attr_phys_switch_id.attr) {
721
if (!netdev->netdev_ops->ndo_get_port_parent_id &&
722
!netdev->devlink_port)
723
return 0;
724
}
725
726
return attr->mode;
727
}
728
729
static const struct attribute_group netdev_phys_group = {
730
.attrs = netdev_phys_attrs,
731
.is_visible = netdev_phys_is_visible,
732
};
733
734
static ssize_t threaded_show(struct device *dev,
735
struct device_attribute *attr, char *buf)
736
{
737
struct net_device *netdev = to_net_dev(dev);
738
ssize_t ret = -EINVAL;
739
740
rcu_read_lock();
741
742
if (dev_isalive(netdev))
743
ret = sysfs_emit(buf, fmt_dec, READ_ONCE(netdev->threaded));
744
745
rcu_read_unlock();
746
747
return ret;
748
}
749
750
static int modify_napi_threaded(struct net_device *dev, unsigned long val)
751
{
752
int ret;
753
754
if (list_empty(&dev->napi_list))
755
return -EOPNOTSUPP;
756
757
if (val != 0 && val != 1)
758
return -EOPNOTSUPP;
759
760
ret = netif_set_threaded(dev, val);
761
762
return ret;
763
}
764
765
static ssize_t threaded_store(struct device *dev,
766
struct device_attribute *attr,
767
const char *buf, size_t len)
768
{
769
return netdev_lock_store(dev, attr, buf, len, modify_napi_threaded);
770
}
771
static DEVICE_ATTR_RW(threaded);
772
773
static struct attribute *net_class_attrs[] __ro_after_init = {
774
&dev_attr_netdev_group.attr,
775
&dev_attr_type.attr,
776
&dev_attr_dev_id.attr,
777
&dev_attr_dev_port.attr,
778
&dev_attr_iflink.attr,
779
&dev_attr_ifindex.attr,
780
&dev_attr_name_assign_type.attr,
781
&dev_attr_addr_assign_type.attr,
782
&dev_attr_addr_len.attr,
783
&dev_attr_link_mode.attr,
784
&dev_attr_address.attr,
785
&dev_attr_broadcast.attr,
786
&dev_attr_speed.attr,
787
&dev_attr_duplex.attr,
788
&dev_attr_dormant.attr,
789
&dev_attr_testing.attr,
790
&dev_attr_operstate.attr,
791
&dev_attr_carrier_changes.attr,
792
&dev_attr_ifalias.attr,
793
&dev_attr_carrier.attr,
794
&dev_attr_mtu.attr,
795
&dev_attr_flags.attr,
796
&dev_attr_tx_queue_len.attr,
797
&dev_attr_gro_flush_timeout.attr,
798
&dev_attr_napi_defer_hard_irqs.attr,
799
&dev_attr_proto_down.attr,
800
&dev_attr_carrier_up_count.attr,
801
&dev_attr_carrier_down_count.attr,
802
&dev_attr_threaded.attr,
803
NULL,
804
};
805
ATTRIBUTE_GROUPS(net_class);
806
807
/* Show a given an attribute in the statistics group */
808
static ssize_t netstat_show(const struct device *d,
809
struct device_attribute *attr, char *buf,
810
unsigned long offset)
811
{
812
struct net_device *dev = to_net_dev(d);
813
ssize_t ret = -EINVAL;
814
815
WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
816
offset % sizeof(u64) != 0);
817
818
rcu_read_lock();
819
if (dev_isalive(dev)) {
820
struct rtnl_link_stats64 temp;
821
const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
822
823
ret = sysfs_emit(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
824
}
825
rcu_read_unlock();
826
return ret;
827
}
828
829
/* generate a read-only statistics attribute */
830
#define NETSTAT_ENTRY(name) \
831
static ssize_t name##_show(struct device *d, \
832
struct device_attribute *attr, char *buf) \
833
{ \
834
return netstat_show(d, attr, buf, \
835
offsetof(struct rtnl_link_stats64, name)); \
836
} \
837
static DEVICE_ATTR_RO(name)
838
839
NETSTAT_ENTRY(rx_packets);
840
NETSTAT_ENTRY(tx_packets);
841
NETSTAT_ENTRY(rx_bytes);
842
NETSTAT_ENTRY(tx_bytes);
843
NETSTAT_ENTRY(rx_errors);
844
NETSTAT_ENTRY(tx_errors);
845
NETSTAT_ENTRY(rx_dropped);
846
NETSTAT_ENTRY(tx_dropped);
847
NETSTAT_ENTRY(multicast);
848
NETSTAT_ENTRY(collisions);
849
NETSTAT_ENTRY(rx_length_errors);
850
NETSTAT_ENTRY(rx_over_errors);
851
NETSTAT_ENTRY(rx_crc_errors);
852
NETSTAT_ENTRY(rx_frame_errors);
853
NETSTAT_ENTRY(rx_fifo_errors);
854
NETSTAT_ENTRY(rx_missed_errors);
855
NETSTAT_ENTRY(tx_aborted_errors);
856
NETSTAT_ENTRY(tx_carrier_errors);
857
NETSTAT_ENTRY(tx_fifo_errors);
858
NETSTAT_ENTRY(tx_heartbeat_errors);
859
NETSTAT_ENTRY(tx_window_errors);
860
NETSTAT_ENTRY(rx_compressed);
861
NETSTAT_ENTRY(tx_compressed);
862
NETSTAT_ENTRY(rx_nohandler);
863
864
static struct attribute *netstat_attrs[] __ro_after_init = {
865
&dev_attr_rx_packets.attr,
866
&dev_attr_tx_packets.attr,
867
&dev_attr_rx_bytes.attr,
868
&dev_attr_tx_bytes.attr,
869
&dev_attr_rx_errors.attr,
870
&dev_attr_tx_errors.attr,
871
&dev_attr_rx_dropped.attr,
872
&dev_attr_tx_dropped.attr,
873
&dev_attr_multicast.attr,
874
&dev_attr_collisions.attr,
875
&dev_attr_rx_length_errors.attr,
876
&dev_attr_rx_over_errors.attr,
877
&dev_attr_rx_crc_errors.attr,
878
&dev_attr_rx_frame_errors.attr,
879
&dev_attr_rx_fifo_errors.attr,
880
&dev_attr_rx_missed_errors.attr,
881
&dev_attr_tx_aborted_errors.attr,
882
&dev_attr_tx_carrier_errors.attr,
883
&dev_attr_tx_fifo_errors.attr,
884
&dev_attr_tx_heartbeat_errors.attr,
885
&dev_attr_tx_window_errors.attr,
886
&dev_attr_rx_compressed.attr,
887
&dev_attr_tx_compressed.attr,
888
&dev_attr_rx_nohandler.attr,
889
NULL
890
};
891
892
static const struct attribute_group netstat_group = {
893
.name = "statistics",
894
.attrs = netstat_attrs,
895
};
896
897
static struct attribute *wireless_attrs[] = {
898
NULL
899
};
900
901
static const struct attribute_group wireless_group = {
902
.name = "wireless",
903
.attrs = wireless_attrs,
904
};
905
906
static bool wireless_group_needed(struct net_device *ndev)
907
{
908
#if IS_ENABLED(CONFIG_CFG80211)
909
if (ndev->ieee80211_ptr)
910
return true;
911
#endif
912
#if IS_ENABLED(CONFIG_WIRELESS_EXT)
913
if (ndev->wireless_handlers)
914
return true;
915
#endif
916
return false;
917
}
918
919
#else /* CONFIG_SYSFS */
920
#define net_class_groups NULL
921
#endif /* CONFIG_SYSFS */
922
923
#ifdef CONFIG_SYSFS
924
#define to_rx_queue_attr(_attr) \
925
container_of(_attr, struct rx_queue_attribute, attr)
926
927
#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
928
929
static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
930
char *buf)
931
{
932
const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
933
struct netdev_rx_queue *queue = to_rx_queue(kobj);
934
935
if (!attribute->show)
936
return -EIO;
937
938
return attribute->show(queue, buf);
939
}
940
941
static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
942
const char *buf, size_t count)
943
{
944
const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
945
struct netdev_rx_queue *queue = to_rx_queue(kobj);
946
947
if (!attribute->store)
948
return -EIO;
949
950
return attribute->store(queue, buf, count);
951
}
952
953
static const struct sysfs_ops rx_queue_sysfs_ops = {
954
.show = rx_queue_attr_show,
955
.store = rx_queue_attr_store,
956
};
957
958
#ifdef CONFIG_RPS
959
static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
960
{
961
struct rps_map *map;
962
cpumask_var_t mask;
963
int i, len;
964
965
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
966
return -ENOMEM;
967
968
rcu_read_lock();
969
map = rcu_dereference(queue->rps_map);
970
if (map)
971
for (i = 0; i < map->len; i++)
972
cpumask_set_cpu(map->cpus[i], mask);
973
974
len = sysfs_emit(buf, "%*pb\n", cpumask_pr_args(mask));
975
rcu_read_unlock();
976
free_cpumask_var(mask);
977
978
return len < PAGE_SIZE ? len : -EINVAL;
979
}
980
981
static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue,
982
cpumask_var_t mask)
983
{
984
static DEFINE_MUTEX(rps_map_mutex);
985
struct rps_map *old_map, *map;
986
int cpu, i;
987
988
map = kzalloc(max_t(unsigned int,
989
RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
990
GFP_KERNEL);
991
if (!map)
992
return -ENOMEM;
993
994
i = 0;
995
for_each_cpu_and(cpu, mask, cpu_online_mask)
996
map->cpus[i++] = cpu;
997
998
if (i) {
999
map->len = i;
1000
} else {
1001
kfree(map);
1002
map = NULL;
1003
}
1004
1005
mutex_lock(&rps_map_mutex);
1006
old_map = rcu_dereference_protected(queue->rps_map,
1007
mutex_is_locked(&rps_map_mutex));
1008
rcu_assign_pointer(queue->rps_map, map);
1009
1010
if (map)
1011
static_branch_inc(&rps_needed);
1012
if (old_map)
1013
static_branch_dec(&rps_needed);
1014
1015
mutex_unlock(&rps_map_mutex);
1016
1017
if (old_map)
1018
kfree_rcu(old_map, rcu);
1019
return 0;
1020
}
1021
1022
int rps_cpumask_housekeeping(struct cpumask *mask)
1023
{
1024
if (!cpumask_empty(mask)) {
1025
cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_DOMAIN));
1026
cpumask_and(mask, mask, housekeeping_cpumask(HK_TYPE_WQ));
1027
if (cpumask_empty(mask))
1028
return -EINVAL;
1029
}
1030
return 0;
1031
}
1032
1033
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
1034
const char *buf, size_t len)
1035
{
1036
cpumask_var_t mask;
1037
int err;
1038
1039
if (!capable(CAP_NET_ADMIN))
1040
return -EPERM;
1041
1042
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1043
return -ENOMEM;
1044
1045
err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1046
if (err)
1047
goto out;
1048
1049
err = rps_cpumask_housekeeping(mask);
1050
if (err)
1051
goto out;
1052
1053
err = netdev_rx_queue_set_rps_mask(queue, mask);
1054
1055
out:
1056
free_cpumask_var(mask);
1057
return err ? : len;
1058
}
1059
1060
static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
1061
char *buf)
1062
{
1063
struct rps_dev_flow_table *flow_table;
1064
unsigned long val = 0;
1065
1066
rcu_read_lock();
1067
flow_table = rcu_dereference(queue->rps_flow_table);
1068
if (flow_table)
1069
val = 1UL << flow_table->log;
1070
rcu_read_unlock();
1071
1072
return sysfs_emit(buf, "%lu\n", val);
1073
}
1074
1075
static void rps_dev_flow_table_release(struct rcu_head *rcu)
1076
{
1077
struct rps_dev_flow_table *table = container_of(rcu,
1078
struct rps_dev_flow_table, rcu);
1079
vfree(table);
1080
}
1081
1082
static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
1083
const char *buf, size_t len)
1084
{
1085
unsigned long mask, count;
1086
struct rps_dev_flow_table *table, *old_table;
1087
static DEFINE_SPINLOCK(rps_dev_flow_lock);
1088
int rc;
1089
1090
if (!capable(CAP_NET_ADMIN))
1091
return -EPERM;
1092
1093
rc = kstrtoul(buf, 0, &count);
1094
if (rc < 0)
1095
return rc;
1096
1097
if (count) {
1098
mask = count - 1;
1099
/* mask = roundup_pow_of_two(count) - 1;
1100
* without overflows...
1101
*/
1102
while ((mask | (mask >> 1)) != mask)
1103
mask |= (mask >> 1);
1104
/* On 64 bit arches, must check mask fits in table->mask (u32),
1105
* and on 32bit arches, must check
1106
* RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
1107
*/
1108
#if BITS_PER_LONG > 32
1109
if (mask > (unsigned long)(u32)mask)
1110
return -EINVAL;
1111
#else
1112
if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
1113
/ sizeof(struct rps_dev_flow)) {
1114
/* Enforce a limit to prevent overflow */
1115
return -EINVAL;
1116
}
1117
#endif
1118
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
1119
if (!table)
1120
return -ENOMEM;
1121
1122
table->log = ilog2(mask) + 1;
1123
for (count = 0; count <= mask; count++)
1124
table->flows[count].cpu = RPS_NO_CPU;
1125
} else {
1126
table = NULL;
1127
}
1128
1129
spin_lock(&rps_dev_flow_lock);
1130
old_table = rcu_dereference_protected(queue->rps_flow_table,
1131
lockdep_is_held(&rps_dev_flow_lock));
1132
rcu_assign_pointer(queue->rps_flow_table, table);
1133
spin_unlock(&rps_dev_flow_lock);
1134
1135
if (old_table)
1136
call_rcu(&old_table->rcu, rps_dev_flow_table_release);
1137
1138
return len;
1139
}
1140
1141
static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
1142
= __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
1143
1144
static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
1145
= __ATTR(rps_flow_cnt, 0644,
1146
show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
1147
#endif /* CONFIG_RPS */
1148
1149
static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
1150
#ifdef CONFIG_RPS
1151
&rps_cpus_attribute.attr,
1152
&rps_dev_flow_table_cnt_attribute.attr,
1153
#endif
1154
NULL
1155
};
1156
ATTRIBUTE_GROUPS(rx_queue_default);
1157
1158
static void rx_queue_release(struct kobject *kobj)
1159
{
1160
struct netdev_rx_queue *queue = to_rx_queue(kobj);
1161
#ifdef CONFIG_RPS
1162
struct rps_map *map;
1163
struct rps_dev_flow_table *flow_table;
1164
1165
map = rcu_dereference_protected(queue->rps_map, 1);
1166
if (map) {
1167
RCU_INIT_POINTER(queue->rps_map, NULL);
1168
kfree_rcu(map, rcu);
1169
}
1170
1171
flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
1172
if (flow_table) {
1173
RCU_INIT_POINTER(queue->rps_flow_table, NULL);
1174
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
1175
}
1176
#endif
1177
1178
memset(kobj, 0, sizeof(*kobj));
1179
netdev_put(queue->dev, &queue->dev_tracker);
1180
}
1181
1182
static const void *rx_queue_namespace(const struct kobject *kobj)
1183
{
1184
struct netdev_rx_queue *queue = to_rx_queue(kobj);
1185
struct device *dev = &queue->dev->dev;
1186
const void *ns = NULL;
1187
1188
if (dev->class && dev->class->namespace)
1189
ns = dev->class->namespace(dev);
1190
1191
return ns;
1192
}
1193
1194
static void rx_queue_get_ownership(const struct kobject *kobj,
1195
kuid_t *uid, kgid_t *gid)
1196
{
1197
const struct net *net = rx_queue_namespace(kobj);
1198
1199
net_ns_get_ownership(net, uid, gid);
1200
}
1201
1202
static const struct kobj_type rx_queue_ktype = {
1203
.sysfs_ops = &rx_queue_sysfs_ops,
1204
.release = rx_queue_release,
1205
.namespace = rx_queue_namespace,
1206
.get_ownership = rx_queue_get_ownership,
1207
};
1208
1209
static int rx_queue_default_mask(struct net_device *dev,
1210
struct netdev_rx_queue *queue)
1211
{
1212
#if IS_ENABLED(CONFIG_RPS) && IS_ENABLED(CONFIG_SYSCTL)
1213
struct cpumask *rps_default_mask;
1214
int res = 0;
1215
1216
mutex_lock(&rps_default_mask_mutex);
1217
1218
rps_default_mask = dev_net(dev)->core.rps_default_mask;
1219
if (rps_default_mask && !cpumask_empty(rps_default_mask))
1220
res = netdev_rx_queue_set_rps_mask(queue, rps_default_mask);
1221
1222
mutex_unlock(&rps_default_mask_mutex);
1223
1224
return res;
1225
#else
1226
return 0;
1227
#endif
1228
}
1229
1230
static int rx_queue_add_kobject(struct net_device *dev, int index)
1231
{
1232
struct netdev_rx_queue *queue = dev->_rx + index;
1233
struct kobject *kobj = &queue->kobj;
1234
int error = 0;
1235
1236
/* Rx queues are cleared in rx_queue_release to allow later
1237
* re-registration. This is triggered when their kobj refcount is
1238
* dropped.
1239
*
1240
* If a queue is removed while both a read (or write) operation and a
1241
* the re-addition of the same queue are pending (waiting on rntl_lock)
1242
* it might happen that the re-addition will execute before the read,
1243
* making the initial removal to never happen (queue's kobj refcount
1244
* won't drop enough because of the pending read). In such rare case,
1245
* return to allow the removal operation to complete.
1246
*/
1247
if (unlikely(kobj->state_initialized)) {
1248
netdev_warn_once(dev, "Cannot re-add rx queues before their removal completed");
1249
return -EAGAIN;
1250
}
1251
1252
/* Kobject_put later will trigger rx_queue_release call which
1253
* decreases dev refcount: Take that reference here
1254
*/
1255
netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL);
1256
1257
kobj->kset = dev->queues_kset;
1258
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
1259
"rx-%u", index);
1260
if (error)
1261
goto err;
1262
1263
queue->groups = rx_queue_default_groups;
1264
error = sysfs_create_groups(kobj, queue->groups);
1265
if (error)
1266
goto err;
1267
1268
if (dev->sysfs_rx_queue_group) {
1269
error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
1270
if (error)
1271
goto err_default_groups;
1272
}
1273
1274
error = rx_queue_default_mask(dev, queue);
1275
if (error)
1276
goto err_default_groups;
1277
1278
kobject_uevent(kobj, KOBJ_ADD);
1279
1280
return error;
1281
1282
err_default_groups:
1283
sysfs_remove_groups(kobj, queue->groups);
1284
err:
1285
kobject_put(kobj);
1286
return error;
1287
}
1288
1289
static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
1290
kgid_t kgid)
1291
{
1292
struct netdev_rx_queue *queue = dev->_rx + index;
1293
struct kobject *kobj = &queue->kobj;
1294
int error;
1295
1296
error = sysfs_change_owner(kobj, kuid, kgid);
1297
if (error)
1298
return error;
1299
1300
if (dev->sysfs_rx_queue_group)
1301
error = sysfs_group_change_owner(
1302
kobj, dev->sysfs_rx_queue_group, kuid, kgid);
1303
1304
return error;
1305
}
1306
#endif /* CONFIG_SYSFS */
1307
1308
int
1309
net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1310
{
1311
#ifdef CONFIG_SYSFS
1312
int i;
1313
int error = 0;
1314
1315
#ifndef CONFIG_RPS
1316
if (!dev->sysfs_rx_queue_group)
1317
return 0;
1318
#endif
1319
for (i = old_num; i < new_num; i++) {
1320
error = rx_queue_add_kobject(dev, i);
1321
if (error) {
1322
new_num = old_num;
1323
break;
1324
}
1325
}
1326
1327
while (--i >= new_num) {
1328
struct netdev_rx_queue *queue = &dev->_rx[i];
1329
struct kobject *kobj = &queue->kobj;
1330
1331
if (!refcount_read(&dev_net(dev)->ns.count))
1332
kobj->uevent_suppress = 1;
1333
if (dev->sysfs_rx_queue_group)
1334
sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1335
sysfs_remove_groups(kobj, queue->groups);
1336
kobject_put(kobj);
1337
}
1338
1339
return error;
1340
#else
1341
return 0;
1342
#endif
1343
}
1344
1345
static int net_rx_queue_change_owner(struct net_device *dev, int num,
1346
kuid_t kuid, kgid_t kgid)
1347
{
1348
#ifdef CONFIG_SYSFS
1349
int error = 0;
1350
int i;
1351
1352
#ifndef CONFIG_RPS
1353
if (!dev->sysfs_rx_queue_group)
1354
return 0;
1355
#endif
1356
for (i = 0; i < num; i++) {
1357
error = rx_queue_change_owner(dev, i, kuid, kgid);
1358
if (error)
1359
break;
1360
}
1361
1362
return error;
1363
#else
1364
return 0;
1365
#endif
1366
}
1367
1368
#ifdef CONFIG_SYSFS
1369
/*
1370
* netdev_queue sysfs structures and functions.
1371
*/
1372
struct netdev_queue_attribute {
1373
struct attribute attr;
1374
ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
1375
struct netdev_queue *queue, char *buf);
1376
ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
1377
struct netdev_queue *queue, const char *buf,
1378
size_t len);
1379
};
1380
#define to_netdev_queue_attr(_attr) \
1381
container_of(_attr, struct netdev_queue_attribute, attr)
1382
1383
#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1384
1385
static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1386
struct attribute *attr, char *buf)
1387
{
1388
const struct netdev_queue_attribute *attribute
1389
= to_netdev_queue_attr(attr);
1390
struct netdev_queue *queue = to_netdev_queue(kobj);
1391
1392
if (!attribute->show)
1393
return -EIO;
1394
1395
return attribute->show(kobj, attr, queue, buf);
1396
}
1397
1398
static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1399
struct attribute *attr,
1400
const char *buf, size_t count)
1401
{
1402
const struct netdev_queue_attribute *attribute
1403
= to_netdev_queue_attr(attr);
1404
struct netdev_queue *queue = to_netdev_queue(kobj);
1405
1406
if (!attribute->store)
1407
return -EIO;
1408
1409
return attribute->store(kobj, attr, queue, buf, count);
1410
}
1411
1412
static const struct sysfs_ops netdev_queue_sysfs_ops = {
1413
.show = netdev_queue_attr_show,
1414
.store = netdev_queue_attr_store,
1415
};
1416
1417
static ssize_t tx_timeout_show(struct kobject *kobj, struct attribute *attr,
1418
struct netdev_queue *queue, char *buf)
1419
{
1420
unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout);
1421
1422
return sysfs_emit(buf, fmt_ulong, trans_timeout);
1423
}
1424
1425
static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1426
{
1427
struct net_device *dev = queue->dev;
1428
unsigned int i;
1429
1430
i = queue - dev->_tx;
1431
BUG_ON(i >= dev->num_tx_queues);
1432
1433
return i;
1434
}
1435
1436
static ssize_t traffic_class_show(struct kobject *kobj, struct attribute *attr,
1437
struct netdev_queue *queue, char *buf)
1438
{
1439
struct net_device *dev = queue->dev;
1440
int num_tc, tc, index, ret;
1441
1442
if (!netif_is_multiqueue(dev))
1443
return -ENOENT;
1444
1445
ret = sysfs_rtnl_lock(kobj, attr, queue->dev);
1446
if (ret)
1447
return ret;
1448
1449
index = get_netdev_queue_index(queue);
1450
1451
/* If queue belongs to subordinate dev use its TC mapping */
1452
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1453
1454
num_tc = dev->num_tc;
1455
tc = netdev_txq_to_tc(dev, index);
1456
1457
rtnl_unlock();
1458
1459
if (tc < 0)
1460
return -EINVAL;
1461
1462
/* We can report the traffic class one of two ways:
1463
* Subordinate device traffic classes are reported with the traffic
1464
* class first, and then the subordinate class so for example TC0 on
1465
* subordinate device 2 will be reported as "0-2". If the queue
1466
* belongs to the root device it will be reported with just the
1467
* traffic class, so just "0" for TC 0 for example.
1468
*/
1469
return num_tc < 0 ? sysfs_emit(buf, "%d%d\n", tc, num_tc) :
1470
sysfs_emit(buf, "%d\n", tc);
1471
}
1472
1473
#ifdef CONFIG_XPS
1474
static ssize_t tx_maxrate_show(struct kobject *kobj, struct attribute *attr,
1475
struct netdev_queue *queue, char *buf)
1476
{
1477
return sysfs_emit(buf, "%lu\n", queue->tx_maxrate);
1478
}
1479
1480
static ssize_t tx_maxrate_store(struct kobject *kobj, struct attribute *attr,
1481
struct netdev_queue *queue, const char *buf,
1482
size_t len)
1483
{
1484
int err, index = get_netdev_queue_index(queue);
1485
struct net_device *dev = queue->dev;
1486
u32 rate = 0;
1487
1488
if (!capable(CAP_NET_ADMIN))
1489
return -EPERM;
1490
1491
/* The check is also done later; this helps returning early without
1492
* hitting the locking section below.
1493
*/
1494
if (!dev->netdev_ops->ndo_set_tx_maxrate)
1495
return -EOPNOTSUPP;
1496
1497
err = kstrtou32(buf, 10, &rate);
1498
if (err < 0)
1499
return err;
1500
1501
err = sysfs_rtnl_lock(kobj, attr, dev);
1502
if (err)
1503
return err;
1504
1505
err = -EOPNOTSUPP;
1506
netdev_lock_ops(dev);
1507
if (dev->netdev_ops->ndo_set_tx_maxrate)
1508
err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1509
netdev_unlock_ops(dev);
1510
1511
if (!err) {
1512
queue->tx_maxrate = rate;
1513
rtnl_unlock();
1514
return len;
1515
}
1516
1517
rtnl_unlock();
1518
return err;
1519
}
1520
1521
static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1522
= __ATTR_RW(tx_maxrate);
1523
#endif
1524
1525
static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1526
= __ATTR_RO(tx_timeout);
1527
1528
static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1529
= __ATTR_RO(traffic_class);
1530
1531
#ifdef CONFIG_BQL
1532
/*
1533
* Byte queue limits sysfs structures and functions.
1534
*/
1535
static ssize_t bql_show(char *buf, unsigned int value)
1536
{
1537
return sysfs_emit(buf, "%u\n", value);
1538
}
1539
1540
static ssize_t bql_set(const char *buf, const size_t count,
1541
unsigned int *pvalue)
1542
{
1543
unsigned int value;
1544
int err;
1545
1546
if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
1547
value = DQL_MAX_LIMIT;
1548
} else {
1549
err = kstrtouint(buf, 10, &value);
1550
if (err < 0)
1551
return err;
1552
if (value > DQL_MAX_LIMIT)
1553
return -EINVAL;
1554
}
1555
1556
*pvalue = value;
1557
1558
return count;
1559
}
1560
1561
static ssize_t bql_show_hold_time(struct kobject *kobj, struct attribute *attr,
1562
struct netdev_queue *queue, char *buf)
1563
{
1564
struct dql *dql = &queue->dql;
1565
1566
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1567
}
1568
1569
static ssize_t bql_set_hold_time(struct kobject *kobj, struct attribute *attr,
1570
struct netdev_queue *queue, const char *buf,
1571
size_t len)
1572
{
1573
struct dql *dql = &queue->dql;
1574
unsigned int value;
1575
int err;
1576
1577
err = kstrtouint(buf, 10, &value);
1578
if (err < 0)
1579
return err;
1580
1581
dql->slack_hold_time = msecs_to_jiffies(value);
1582
1583
return len;
1584
}
1585
1586
static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
1587
= __ATTR(hold_time, 0644,
1588
bql_show_hold_time, bql_set_hold_time);
1589
1590
static ssize_t bql_show_stall_thrs(struct kobject *kobj, struct attribute *attr,
1591
struct netdev_queue *queue, char *buf)
1592
{
1593
struct dql *dql = &queue->dql;
1594
1595
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs));
1596
}
1597
1598
static ssize_t bql_set_stall_thrs(struct kobject *kobj, struct attribute *attr,
1599
struct netdev_queue *queue, const char *buf,
1600
size_t len)
1601
{
1602
struct dql *dql = &queue->dql;
1603
unsigned int value;
1604
int err;
1605
1606
err = kstrtouint(buf, 10, &value);
1607
if (err < 0)
1608
return err;
1609
1610
value = msecs_to_jiffies(value);
1611
if (value && (value < 4 || value > 4 / 2 * BITS_PER_LONG))
1612
return -ERANGE;
1613
1614
if (!dql->stall_thrs && value)
1615
dql->last_reap = jiffies;
1616
/* Force last_reap to be live */
1617
smp_wmb();
1618
dql->stall_thrs = value;
1619
1620
return len;
1621
}
1622
1623
static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init =
1624
__ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs);
1625
1626
static ssize_t bql_show_stall_max(struct kobject *kobj, struct attribute *attr,
1627
struct netdev_queue *queue, char *buf)
1628
{
1629
return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max));
1630
}
1631
1632
static ssize_t bql_set_stall_max(struct kobject *kobj, struct attribute *attr,
1633
struct netdev_queue *queue, const char *buf,
1634
size_t len)
1635
{
1636
WRITE_ONCE(queue->dql.stall_max, 0);
1637
return len;
1638
}
1639
1640
static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init =
1641
__ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max);
1642
1643
static ssize_t bql_show_stall_cnt(struct kobject *kobj, struct attribute *attr,
1644
struct netdev_queue *queue, char *buf)
1645
{
1646
struct dql *dql = &queue->dql;
1647
1648
return sysfs_emit(buf, "%lu\n", dql->stall_cnt);
1649
}
1650
1651
static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init =
1652
__ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL);
1653
1654
static ssize_t bql_show_inflight(struct kobject *kobj, struct attribute *attr,
1655
struct netdev_queue *queue, char *buf)
1656
{
1657
struct dql *dql = &queue->dql;
1658
1659
return sysfs_emit(buf, "%u\n", dql->num_queued - dql->num_completed);
1660
}
1661
1662
static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
1663
__ATTR(inflight, 0444, bql_show_inflight, NULL);
1664
1665
#define BQL_ATTR(NAME, FIELD) \
1666
static ssize_t bql_show_ ## NAME(struct kobject *kobj, \
1667
struct attribute *attr, \
1668
struct netdev_queue *queue, char *buf) \
1669
{ \
1670
return bql_show(buf, queue->dql.FIELD); \
1671
} \
1672
\
1673
static ssize_t bql_set_ ## NAME(struct kobject *kobj, \
1674
struct attribute *attr, \
1675
struct netdev_queue *queue, \
1676
const char *buf, size_t len) \
1677
{ \
1678
return bql_set(buf, len, &queue->dql.FIELD); \
1679
} \
1680
\
1681
static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
1682
= __ATTR(NAME, 0644, \
1683
bql_show_ ## NAME, bql_set_ ## NAME)
1684
1685
BQL_ATTR(limit, limit);
1686
BQL_ATTR(limit_max, max_limit);
1687
BQL_ATTR(limit_min, min_limit);
1688
1689
static struct attribute *dql_attrs[] __ro_after_init = {
1690
&bql_limit_attribute.attr,
1691
&bql_limit_max_attribute.attr,
1692
&bql_limit_min_attribute.attr,
1693
&bql_hold_time_attribute.attr,
1694
&bql_inflight_attribute.attr,
1695
&bql_stall_thrs_attribute.attr,
1696
&bql_stall_cnt_attribute.attr,
1697
&bql_stall_max_attribute.attr,
1698
NULL
1699
};
1700
1701
static const struct attribute_group dql_group = {
1702
.name = "byte_queue_limits",
1703
.attrs = dql_attrs,
1704
};
1705
#else
1706
/* Fake declaration, all the code using it should be dead */
1707
static const struct attribute_group dql_group = {};
1708
#endif /* CONFIG_BQL */
1709
1710
#ifdef CONFIG_XPS
1711
static ssize_t xps_queue_show(struct net_device *dev, unsigned int index,
1712
int tc, char *buf, enum xps_map_type type)
1713
{
1714
struct xps_dev_maps *dev_maps;
1715
unsigned long *mask;
1716
unsigned int nr_ids;
1717
int j, len;
1718
1719
rcu_read_lock();
1720
dev_maps = rcu_dereference(dev->xps_maps[type]);
1721
1722
/* Default to nr_cpu_ids/dev->num_rx_queues and do not just return 0
1723
* when dev_maps hasn't been allocated yet, to be backward compatible.
1724
*/
1725
nr_ids = dev_maps ? dev_maps->nr_ids :
1726
(type == XPS_CPUS ? nr_cpu_ids : dev->num_rx_queues);
1727
1728
mask = bitmap_zalloc(nr_ids, GFP_NOWAIT);
1729
if (!mask) {
1730
rcu_read_unlock();
1731
return -ENOMEM;
1732
}
1733
1734
if (!dev_maps || tc >= dev_maps->num_tc)
1735
goto out_no_maps;
1736
1737
for (j = 0; j < nr_ids; j++) {
1738
int i, tci = j * dev_maps->num_tc + tc;
1739
struct xps_map *map;
1740
1741
map = rcu_dereference(dev_maps->attr_map[tci]);
1742
if (!map)
1743
continue;
1744
1745
for (i = map->len; i--;) {
1746
if (map->queues[i] == index) {
1747
__set_bit(j, mask);
1748
break;
1749
}
1750
}
1751
}
1752
out_no_maps:
1753
rcu_read_unlock();
1754
1755
len = bitmap_print_to_pagebuf(false, buf, mask, nr_ids);
1756
bitmap_free(mask);
1757
1758
return len < PAGE_SIZE ? len : -EINVAL;
1759
}
1760
1761
static ssize_t xps_cpus_show(struct kobject *kobj, struct attribute *attr,
1762
struct netdev_queue *queue, char *buf)
1763
{
1764
struct net_device *dev = queue->dev;
1765
unsigned int index;
1766
int len, tc, ret;
1767
1768
if (!netif_is_multiqueue(dev))
1769
return -ENOENT;
1770
1771
index = get_netdev_queue_index(queue);
1772
1773
ret = sysfs_rtnl_lock(kobj, attr, queue->dev);
1774
if (ret)
1775
return ret;
1776
1777
/* If queue belongs to subordinate dev use its map */
1778
dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1779
1780
tc = netdev_txq_to_tc(dev, index);
1781
if (tc < 0) {
1782
rtnl_unlock();
1783
return -EINVAL;
1784
}
1785
1786
/* Increase the net device refcnt to make sure it won't be freed while
1787
* xps_queue_show is running.
1788
*/
1789
dev_hold(dev);
1790
rtnl_unlock();
1791
1792
len = xps_queue_show(dev, index, tc, buf, XPS_CPUS);
1793
1794
dev_put(dev);
1795
return len;
1796
}
1797
1798
static ssize_t xps_cpus_store(struct kobject *kobj, struct attribute *attr,
1799
struct netdev_queue *queue, const char *buf,
1800
size_t len)
1801
{
1802
struct net_device *dev = queue->dev;
1803
unsigned int index;
1804
cpumask_var_t mask;
1805
int err;
1806
1807
if (!netif_is_multiqueue(dev))
1808
return -ENOENT;
1809
1810
if (!capable(CAP_NET_ADMIN))
1811
return -EPERM;
1812
1813
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1814
return -ENOMEM;
1815
1816
index = get_netdev_queue_index(queue);
1817
1818
err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1819
if (err) {
1820
free_cpumask_var(mask);
1821
return err;
1822
}
1823
1824
err = sysfs_rtnl_lock(kobj, attr, dev);
1825
if (err) {
1826
free_cpumask_var(mask);
1827
return err;
1828
}
1829
1830
err = netif_set_xps_queue(dev, mask, index);
1831
rtnl_unlock();
1832
1833
free_cpumask_var(mask);
1834
1835
return err ? : len;
1836
}
1837
1838
static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1839
= __ATTR_RW(xps_cpus);
1840
1841
static ssize_t xps_rxqs_show(struct kobject *kobj, struct attribute *attr,
1842
struct netdev_queue *queue, char *buf)
1843
{
1844
struct net_device *dev = queue->dev;
1845
unsigned int index;
1846
int tc, ret;
1847
1848
index = get_netdev_queue_index(queue);
1849
1850
ret = sysfs_rtnl_lock(kobj, attr, dev);
1851
if (ret)
1852
return ret;
1853
1854
tc = netdev_txq_to_tc(dev, index);
1855
1856
/* Increase the net device refcnt to make sure it won't be freed while
1857
* xps_queue_show is running.
1858
*/
1859
dev_hold(dev);
1860
rtnl_unlock();
1861
1862
ret = tc >= 0 ? xps_queue_show(dev, index, tc, buf, XPS_RXQS) : -EINVAL;
1863
dev_put(dev);
1864
return ret;
1865
}
1866
1867
static ssize_t xps_rxqs_store(struct kobject *kobj, struct attribute *attr,
1868
struct netdev_queue *queue, const char *buf,
1869
size_t len)
1870
{
1871
struct net_device *dev = queue->dev;
1872
struct net *net = dev_net(dev);
1873
unsigned long *mask;
1874
unsigned int index;
1875
int err;
1876
1877
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1878
return -EPERM;
1879
1880
mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
1881
if (!mask)
1882
return -ENOMEM;
1883
1884
index = get_netdev_queue_index(queue);
1885
1886
err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1887
if (err) {
1888
bitmap_free(mask);
1889
return err;
1890
}
1891
1892
err = sysfs_rtnl_lock(kobj, attr, dev);
1893
if (err) {
1894
bitmap_free(mask);
1895
return err;
1896
}
1897
1898
cpus_read_lock();
1899
err = __netif_set_xps_queue(dev, mask, index, XPS_RXQS);
1900
cpus_read_unlock();
1901
1902
rtnl_unlock();
1903
1904
bitmap_free(mask);
1905
return err ? : len;
1906
}
1907
1908
static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1909
= __ATTR_RW(xps_rxqs);
1910
#endif /* CONFIG_XPS */
1911
1912
static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
1913
&queue_trans_timeout.attr,
1914
&queue_traffic_class.attr,
1915
#ifdef CONFIG_XPS
1916
&xps_cpus_attribute.attr,
1917
&xps_rxqs_attribute.attr,
1918
&queue_tx_maxrate.attr,
1919
#endif
1920
NULL
1921
};
1922
ATTRIBUTE_GROUPS(netdev_queue_default);
1923
1924
static void netdev_queue_release(struct kobject *kobj)
1925
{
1926
struct netdev_queue *queue = to_netdev_queue(kobj);
1927
1928
memset(kobj, 0, sizeof(*kobj));
1929
netdev_put(queue->dev, &queue->dev_tracker);
1930
}
1931
1932
static const void *netdev_queue_namespace(const struct kobject *kobj)
1933
{
1934
struct netdev_queue *queue = to_netdev_queue(kobj);
1935
struct device *dev = &queue->dev->dev;
1936
const void *ns = NULL;
1937
1938
if (dev->class && dev->class->namespace)
1939
ns = dev->class->namespace(dev);
1940
1941
return ns;
1942
}
1943
1944
static void netdev_queue_get_ownership(const struct kobject *kobj,
1945
kuid_t *uid, kgid_t *gid)
1946
{
1947
const struct net *net = netdev_queue_namespace(kobj);
1948
1949
net_ns_get_ownership(net, uid, gid);
1950
}
1951
1952
static const struct kobj_type netdev_queue_ktype = {
1953
.sysfs_ops = &netdev_queue_sysfs_ops,
1954
.release = netdev_queue_release,
1955
.namespace = netdev_queue_namespace,
1956
.get_ownership = netdev_queue_get_ownership,
1957
};
1958
1959
static bool netdev_uses_bql(const struct net_device *dev)
1960
{
1961
if (dev->lltx || (dev->priv_flags & IFF_NO_QUEUE))
1962
return false;
1963
1964
return IS_ENABLED(CONFIG_BQL);
1965
}
1966
1967
static int netdev_queue_add_kobject(struct net_device *dev, int index)
1968
{
1969
struct netdev_queue *queue = dev->_tx + index;
1970
struct kobject *kobj = &queue->kobj;
1971
int error = 0;
1972
1973
/* Tx queues are cleared in netdev_queue_release to allow later
1974
* re-registration. This is triggered when their kobj refcount is
1975
* dropped.
1976
*
1977
* If a queue is removed while both a read (or write) operation and a
1978
* the re-addition of the same queue are pending (waiting on rntl_lock)
1979
* it might happen that the re-addition will execute before the read,
1980
* making the initial removal to never happen (queue's kobj refcount
1981
* won't drop enough because of the pending read). In such rare case,
1982
* return to allow the removal operation to complete.
1983
*/
1984
if (unlikely(kobj->state_initialized)) {
1985
netdev_warn_once(dev, "Cannot re-add tx queues before their removal completed");
1986
return -EAGAIN;
1987
}
1988
1989
/* Kobject_put later will trigger netdev_queue_release call
1990
* which decreases dev refcount: Take that reference here
1991
*/
1992
netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL);
1993
1994
kobj->kset = dev->queues_kset;
1995
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1996
"tx-%u", index);
1997
if (error)
1998
goto err;
1999
2000
queue->groups = netdev_queue_default_groups;
2001
error = sysfs_create_groups(kobj, queue->groups);
2002
if (error)
2003
goto err;
2004
2005
if (netdev_uses_bql(dev)) {
2006
error = sysfs_create_group(kobj, &dql_group);
2007
if (error)
2008
goto err_default_groups;
2009
}
2010
2011
kobject_uevent(kobj, KOBJ_ADD);
2012
return 0;
2013
2014
err_default_groups:
2015
sysfs_remove_groups(kobj, queue->groups);
2016
err:
2017
kobject_put(kobj);
2018
return error;
2019
}
2020
2021
static int tx_queue_change_owner(struct net_device *ndev, int index,
2022
kuid_t kuid, kgid_t kgid)
2023
{
2024
struct netdev_queue *queue = ndev->_tx + index;
2025
struct kobject *kobj = &queue->kobj;
2026
int error;
2027
2028
error = sysfs_change_owner(kobj, kuid, kgid);
2029
if (error)
2030
return error;
2031
2032
if (netdev_uses_bql(ndev))
2033
error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
2034
2035
return error;
2036
}
2037
#endif /* CONFIG_SYSFS */
2038
2039
int
2040
netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
2041
{
2042
#ifdef CONFIG_SYSFS
2043
int i;
2044
int error = 0;
2045
2046
/* Tx queue kobjects are allowed to be updated when a device is being
2047
* unregistered, but solely to remove queues from qdiscs. Any path
2048
* adding queues should be fixed.
2049
*/
2050
WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num,
2051
"New queues can't be registered after device unregistration.");
2052
2053
for (i = old_num; i < new_num; i++) {
2054
error = netdev_queue_add_kobject(dev, i);
2055
if (error) {
2056
new_num = old_num;
2057
break;
2058
}
2059
}
2060
2061
while (--i >= new_num) {
2062
struct netdev_queue *queue = dev->_tx + i;
2063
2064
if (!refcount_read(&dev_net(dev)->ns.count))
2065
queue->kobj.uevent_suppress = 1;
2066
2067
if (netdev_uses_bql(dev))
2068
sysfs_remove_group(&queue->kobj, &dql_group);
2069
2070
sysfs_remove_groups(&queue->kobj, queue->groups);
2071
kobject_put(&queue->kobj);
2072
}
2073
2074
return error;
2075
#else
2076
return 0;
2077
#endif /* CONFIG_SYSFS */
2078
}
2079
2080
static int net_tx_queue_change_owner(struct net_device *dev, int num,
2081
kuid_t kuid, kgid_t kgid)
2082
{
2083
#ifdef CONFIG_SYSFS
2084
int error = 0;
2085
int i;
2086
2087
for (i = 0; i < num; i++) {
2088
error = tx_queue_change_owner(dev, i, kuid, kgid);
2089
if (error)
2090
break;
2091
}
2092
2093
return error;
2094
#else
2095
return 0;
2096
#endif /* CONFIG_SYSFS */
2097
}
2098
2099
static int register_queue_kobjects(struct net_device *dev)
2100
{
2101
int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
2102
2103
#ifdef CONFIG_SYSFS
2104
dev->queues_kset = kset_create_and_add("queues",
2105
NULL, &dev->dev.kobj);
2106
if (!dev->queues_kset)
2107
return -ENOMEM;
2108
real_rx = dev->real_num_rx_queues;
2109
#endif
2110
real_tx = dev->real_num_tx_queues;
2111
2112
error = net_rx_queue_update_kobjects(dev, 0, real_rx);
2113
if (error)
2114
goto error;
2115
rxq = real_rx;
2116
2117
error = netdev_queue_update_kobjects(dev, 0, real_tx);
2118
if (error)
2119
goto error;
2120
txq = real_tx;
2121
2122
return 0;
2123
2124
error:
2125
netdev_queue_update_kobjects(dev, txq, 0);
2126
net_rx_queue_update_kobjects(dev, rxq, 0);
2127
#ifdef CONFIG_SYSFS
2128
kset_unregister(dev->queues_kset);
2129
#endif
2130
return error;
2131
}
2132
2133
static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
2134
{
2135
int error = 0, real_rx = 0, real_tx = 0;
2136
2137
#ifdef CONFIG_SYSFS
2138
if (ndev->queues_kset) {
2139
error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
2140
if (error)
2141
return error;
2142
}
2143
real_rx = ndev->real_num_rx_queues;
2144
#endif
2145
real_tx = ndev->real_num_tx_queues;
2146
2147
error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
2148
if (error)
2149
return error;
2150
2151
error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
2152
if (error)
2153
return error;
2154
2155
return 0;
2156
}
2157
2158
static void remove_queue_kobjects(struct net_device *dev)
2159
{
2160
int real_rx = 0, real_tx = 0;
2161
2162
#ifdef CONFIG_SYSFS
2163
real_rx = dev->real_num_rx_queues;
2164
#endif
2165
real_tx = dev->real_num_tx_queues;
2166
2167
net_rx_queue_update_kobjects(dev, real_rx, 0);
2168
netdev_queue_update_kobjects(dev, real_tx, 0);
2169
2170
netdev_lock_ops(dev);
2171
dev->real_num_rx_queues = 0;
2172
dev->real_num_tx_queues = 0;
2173
netdev_unlock_ops(dev);
2174
#ifdef CONFIG_SYSFS
2175
kset_unregister(dev->queues_kset);
2176
#endif
2177
}
2178
2179
static bool net_current_may_mount(void)
2180
{
2181
struct net *net = current->nsproxy->net_ns;
2182
2183
return ns_capable(net->user_ns, CAP_SYS_ADMIN);
2184
}
2185
2186
static void *net_grab_current_ns(void)
2187
{
2188
struct net *ns = current->nsproxy->net_ns;
2189
#ifdef CONFIG_NET_NS
2190
if (ns)
2191
refcount_inc(&ns->passive);
2192
#endif
2193
return ns;
2194
}
2195
2196
static const void *net_initial_ns(void)
2197
{
2198
return &init_net;
2199
}
2200
2201
static const void *net_netlink_ns(struct sock *sk)
2202
{
2203
return sock_net(sk);
2204
}
2205
2206
const struct kobj_ns_type_operations net_ns_type_operations = {
2207
.type = KOBJ_NS_TYPE_NET,
2208
.current_may_mount = net_current_may_mount,
2209
.grab_current_ns = net_grab_current_ns,
2210
.netlink_ns = net_netlink_ns,
2211
.initial_ns = net_initial_ns,
2212
.drop_ns = net_drop_ns,
2213
};
2214
EXPORT_SYMBOL_GPL(net_ns_type_operations);
2215
2216
static int netdev_uevent(const struct device *d, struct kobj_uevent_env *env)
2217
{
2218
const struct net_device *dev = to_net_dev(d);
2219
int retval;
2220
2221
/* pass interface to uevent. */
2222
retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
2223
if (retval)
2224
goto exit;
2225
2226
/* pass ifindex to uevent.
2227
* ifindex is useful as it won't change (interface name may change)
2228
* and is what RtNetlink uses natively.
2229
*/
2230
retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
2231
2232
exit:
2233
return retval;
2234
}
2235
2236
/*
2237
* netdev_release -- destroy and free a dead device.
2238
* Called when last reference to device kobject is gone.
2239
*/
2240
static void netdev_release(struct device *d)
2241
{
2242
struct net_device *dev = to_net_dev(d);
2243
2244
BUG_ON(dev->reg_state != NETREG_RELEASED);
2245
2246
/* no need to wait for rcu grace period:
2247
* device is dead and about to be freed.
2248
*/
2249
kfree(rcu_access_pointer(dev->ifalias));
2250
kvfree(dev);
2251
}
2252
2253
static const void *net_namespace(const struct device *d)
2254
{
2255
const struct net_device *dev = to_net_dev(d);
2256
2257
return dev_net(dev);
2258
}
2259
2260
static void net_get_ownership(const struct device *d, kuid_t *uid, kgid_t *gid)
2261
{
2262
const struct net_device *dev = to_net_dev(d);
2263
const struct net *net = dev_net(dev);
2264
2265
net_ns_get_ownership(net, uid, gid);
2266
}
2267
2268
static const struct class net_class = {
2269
.name = "net",
2270
.dev_release = netdev_release,
2271
.dev_groups = net_class_groups,
2272
.dev_uevent = netdev_uevent,
2273
.ns_type = &net_ns_type_operations,
2274
.namespace = net_namespace,
2275
.get_ownership = net_get_ownership,
2276
};
2277
2278
#ifdef CONFIG_OF
2279
static int of_dev_node_match(struct device *dev, const void *data)
2280
{
2281
for (; dev; dev = dev->parent) {
2282
if (dev->of_node == data)
2283
return 1;
2284
}
2285
2286
return 0;
2287
}
2288
2289
/*
2290
* of_find_net_device_by_node - lookup the net device for the device node
2291
* @np: OF device node
2292
*
2293
* Looks up the net_device structure corresponding with the device node.
2294
* If successful, returns a pointer to the net_device with the embedded
2295
* struct device refcount incremented by one, or NULL on failure. The
2296
* refcount must be dropped when done with the net_device.
2297
*/
2298
struct net_device *of_find_net_device_by_node(struct device_node *np)
2299
{
2300
struct device *dev;
2301
2302
dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
2303
if (!dev)
2304
return NULL;
2305
2306
return to_net_dev(dev);
2307
}
2308
EXPORT_SYMBOL(of_find_net_device_by_node);
2309
#endif
2310
2311
/* Delete sysfs entries but hold kobject reference until after all
2312
* netdev references are gone.
2313
*/
2314
void netdev_unregister_kobject(struct net_device *ndev)
2315
{
2316
struct device *dev = &ndev->dev;
2317
2318
if (!refcount_read(&dev_net(ndev)->ns.count))
2319
dev_set_uevent_suppress(dev, 1);
2320
2321
kobject_get(&dev->kobj);
2322
2323
remove_queue_kobjects(ndev);
2324
2325
pm_runtime_set_memalloc_noio(dev, false);
2326
2327
device_del(dev);
2328
}
2329
2330
/* Create sysfs entries for network device. */
2331
int netdev_register_kobject(struct net_device *ndev)
2332
{
2333
struct device *dev = &ndev->dev;
2334
const struct attribute_group **groups = ndev->sysfs_groups;
2335
int error = 0;
2336
2337
device_initialize(dev);
2338
dev->class = &net_class;
2339
dev->platform_data = ndev;
2340
dev->groups = groups;
2341
2342
dev_set_name(dev, "%s", ndev->name);
2343
2344
#ifdef CONFIG_SYSFS
2345
/* Allow for a device specific group */
2346
if (*groups)
2347
groups++;
2348
2349
*groups++ = &netstat_group;
2350
*groups++ = &netdev_phys_group;
2351
2352
if (wireless_group_needed(ndev))
2353
*groups++ = &wireless_group;
2354
#endif /* CONFIG_SYSFS */
2355
2356
error = device_add(dev);
2357
if (error)
2358
return error;
2359
2360
error = register_queue_kobjects(ndev);
2361
if (error) {
2362
device_del(dev);
2363
return error;
2364
}
2365
2366
pm_runtime_set_memalloc_noio(dev, true);
2367
2368
return error;
2369
}
2370
2371
/* Change owner for sysfs entries when moving network devices across network
2372
* namespaces owned by different user namespaces.
2373
*/
2374
int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
2375
const struct net *net_new)
2376
{
2377
kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
2378
kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
2379
struct device *dev = &ndev->dev;
2380
int error;
2381
2382
net_ns_get_ownership(net_old, &old_uid, &old_gid);
2383
net_ns_get_ownership(net_new, &new_uid, &new_gid);
2384
2385
/* The network namespace was changed but the owning user namespace is
2386
* identical so there's no need to change the owner of sysfs entries.
2387
*/
2388
if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
2389
return 0;
2390
2391
error = device_change_owner(dev, new_uid, new_gid);
2392
if (error)
2393
return error;
2394
2395
error = queue_change_owner(ndev, new_uid, new_gid);
2396
if (error)
2397
return error;
2398
2399
return 0;
2400
}
2401
2402
int netdev_class_create_file_ns(const struct class_attribute *class_attr,
2403
const void *ns)
2404
{
2405
return class_create_file_ns(&net_class, class_attr, ns);
2406
}
2407
EXPORT_SYMBOL(netdev_class_create_file_ns);
2408
2409
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
2410
const void *ns)
2411
{
2412
class_remove_file_ns(&net_class, class_attr, ns);
2413
}
2414
EXPORT_SYMBOL(netdev_class_remove_file_ns);
2415
2416
int __init netdev_kobject_init(void)
2417
{
2418
kobj_ns_type_register(&net_ns_type_operations);
2419
return class_register(&net_class);
2420
}
2421
2422