Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/core/dev.h
26282 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
#ifndef _NET_CORE_DEV_H
3
#define _NET_CORE_DEV_H
4
5
#include <linux/cleanup.h>
6
#include <linux/types.h>
7
#include <linux/rwsem.h>
8
#include <linux/netdevice.h>
9
#include <net/netdev_lock.h>
10
11
struct net;
12
struct netlink_ext_ack;
13
struct cpumask;
14
15
/* Random bits of netdevice that don't need to be exposed */
16
#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
17
struct sd_flow_limit {
18
struct rcu_head rcu;
19
unsigned int count;
20
u8 log_buckets;
21
unsigned int history_head;
22
u16 history[FLOW_LIMIT_HISTORY];
23
u8 buckets[];
24
};
25
26
extern int netdev_flow_limit_table_len;
27
28
struct napi_struct *
29
netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
30
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
31
32
struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
33
struct net_device *__netdev_put_lock(struct net_device *dev, struct net *net);
34
struct net_device *
35
netdev_xa_find_lock(struct net *net, struct net_device *dev,
36
unsigned long *index);
37
38
DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T));
39
40
#define for_each_netdev_lock_scoped(net, var_name, ifindex) \
41
for (struct net_device *var_name __free(netdev_unlock) = NULL; \
42
(var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \
43
ifindex++)
44
45
struct net_device *
46
netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex);
47
struct net_device *
48
netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev,
49
unsigned long *index);
50
51
DEFINE_FREE(netdev_unlock_ops_compat, struct net_device *,
52
if (_T) netdev_unlock_ops_compat(_T));
53
54
#define for_each_netdev_lock_ops_compat_scoped(net, var_name, ifindex) \
55
for (struct net_device *var_name __free(netdev_unlock_ops_compat) = NULL; \
56
(var_name = netdev_xa_find_lock_ops_compat(net, var_name, \
57
&ifindex)); \
58
ifindex++)
59
60
#ifdef CONFIG_PROC_FS
61
int __init dev_proc_init(void);
62
#else
63
#define dev_proc_init() 0
64
#endif
65
66
void linkwatch_init_dev(struct net_device *dev);
67
void linkwatch_run_queue(void);
68
69
void dev_addr_flush(struct net_device *dev);
70
int dev_addr_init(struct net_device *dev);
71
void dev_addr_check(struct net_device *dev);
72
73
#if IS_ENABLED(CONFIG_NET_SHAPER)
74
void net_shaper_flush_netdev(struct net_device *dev);
75
void net_shaper_set_real_num_tx_queues(struct net_device *dev,
76
unsigned int txq);
77
#else
78
static inline void net_shaper_flush_netdev(struct net_device *dev) {}
79
static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
80
unsigned int txq) {}
81
#endif
82
83
/* sysctls not referred to from outside net/core/ */
84
extern int netdev_unregister_timeout_secs;
85
extern int weight_p;
86
extern int dev_weight_rx_bias;
87
extern int dev_weight_tx_bias;
88
89
extern struct rw_semaphore dev_addr_sem;
90
91
/* rtnl helpers */
92
extern struct list_head net_todo_list;
93
void netdev_run_todo(void);
94
95
/* netdev management, shared between various uAPI entry points */
96
struct netdev_name_node {
97
struct hlist_node hlist;
98
struct list_head list;
99
struct net_device *dev;
100
const char *name;
101
struct rcu_head rcu;
102
};
103
104
int netdev_get_name(struct net *net, char *name, int ifindex);
105
int netif_change_name(struct net_device *dev, const char *newname);
106
int dev_change_name(struct net_device *dev, const char *newname);
107
108
#define netdev_for_each_altname(dev, namenode) \
109
list_for_each_entry((namenode), &(dev)->name_node->list, list)
110
#define netdev_for_each_altname_safe(dev, namenode, next) \
111
list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
112
list)
113
114
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
115
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
116
117
int dev_validate_mtu(struct net_device *dev, int mtu,
118
struct netlink_ext_ack *extack);
119
int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
120
struct netlink_ext_ack *extack);
121
122
int dev_get_phys_port_id(struct net_device *dev,
123
struct netdev_phys_item_id *ppid);
124
int dev_get_phys_port_name(struct net_device *dev,
125
char *name, size_t len);
126
127
int netif_change_proto_down(struct net_device *dev, bool proto_down);
128
int dev_change_proto_down(struct net_device *dev, bool proto_down);
129
void netdev_change_proto_down_reason_locked(struct net_device *dev,
130
unsigned long mask, u32 value);
131
132
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
133
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
134
int fd, int expected_fd, u32 flags);
135
136
int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
137
int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
138
void netif_set_group(struct net_device *dev, int new_group);
139
void dev_set_group(struct net_device *dev, int new_group);
140
int netif_change_carrier(struct net_device *dev, bool new_carrier);
141
int dev_change_carrier(struct net_device *dev, bool new_carrier);
142
143
void __dev_set_rx_mode(struct net_device *dev);
144
145
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
146
unsigned int gchanges, u32 portid,
147
const struct nlmsghdr *nlh);
148
149
void unregister_netdevice_many_notify(struct list_head *head,
150
u32 portid, const struct nlmsghdr *nlh);
151
152
static inline void netif_set_up(struct net_device *dev, bool value)
153
{
154
if (value)
155
dev->flags |= IFF_UP;
156
else
157
dev->flags &= ~IFF_UP;
158
159
if (!netdev_need_ops_lock(dev))
160
netdev_lock(dev);
161
dev->up = value;
162
if (!netdev_need_ops_lock(dev))
163
netdev_unlock(dev);
164
}
165
166
static inline void netif_set_gso_max_size(struct net_device *dev,
167
unsigned int size)
168
{
169
/* dev->gso_max_size is read locklessly from sk_setup_caps() */
170
WRITE_ONCE(dev->gso_max_size, size);
171
if (size <= GSO_LEGACY_MAX_SIZE)
172
WRITE_ONCE(dev->gso_ipv4_max_size, size);
173
}
174
175
static inline void netif_set_gso_max_segs(struct net_device *dev,
176
unsigned int segs)
177
{
178
/* dev->gso_max_segs is read locklessly from sk_setup_caps() */
179
WRITE_ONCE(dev->gso_max_segs, segs);
180
}
181
182
static inline void netif_set_gro_max_size(struct net_device *dev,
183
unsigned int size)
184
{
185
/* This pairs with the READ_ONCE() in skb_gro_receive() */
186
WRITE_ONCE(dev->gro_max_size, size);
187
if (size <= GRO_LEGACY_MAX_SIZE)
188
WRITE_ONCE(dev->gro_ipv4_max_size, size);
189
}
190
191
static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
192
unsigned int size)
193
{
194
/* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
195
WRITE_ONCE(dev->gso_ipv4_max_size, size);
196
}
197
198
static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
199
unsigned int size)
200
{
201
/* This pairs with the READ_ONCE() in skb_gro_receive() */
202
WRITE_ONCE(dev->gro_ipv4_max_size, size);
203
}
204
205
/**
206
* napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
207
* @n: napi struct to get the defer_hard_irqs field from
208
*
209
* Return: the per-NAPI value of the defar_hard_irqs field.
210
*/
211
static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
212
{
213
return READ_ONCE(n->defer_hard_irqs);
214
}
215
216
/**
217
* napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
218
* @n: napi_struct to set the defer_hard_irqs field
219
* @defer: the value the field should be set to
220
*/
221
static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
222
{
223
WRITE_ONCE(n->defer_hard_irqs, defer);
224
}
225
226
/**
227
* netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
228
* @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
229
* @defer: the defer_hard_irqs value to set
230
*/
231
static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
232
u32 defer)
233
{
234
unsigned int count = max(netdev->num_rx_queues,
235
netdev->num_tx_queues);
236
struct napi_struct *napi;
237
int i;
238
239
WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
240
list_for_each_entry(napi, &netdev->napi_list, dev_list)
241
napi_set_defer_hard_irqs(napi, defer);
242
243
for (i = 0; i < count; i++)
244
netdev->napi_config[i].defer_hard_irqs = defer;
245
}
246
247
/**
248
* napi_get_gro_flush_timeout - get the gro_flush_timeout
249
* @n: napi struct to get the gro_flush_timeout from
250
*
251
* Return: the per-NAPI value of the gro_flush_timeout field.
252
*/
253
static inline unsigned long
254
napi_get_gro_flush_timeout(const struct napi_struct *n)
255
{
256
return READ_ONCE(n->gro_flush_timeout);
257
}
258
259
/**
260
* napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
261
* @n: napi struct to set the gro_flush_timeout
262
* @timeout: timeout value to set
263
*
264
* napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
265
*/
266
static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
267
unsigned long timeout)
268
{
269
WRITE_ONCE(n->gro_flush_timeout, timeout);
270
}
271
272
/**
273
* netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
274
* @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
275
* @timeout: the timeout value to set
276
*/
277
static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
278
unsigned long timeout)
279
{
280
unsigned int count = max(netdev->num_rx_queues,
281
netdev->num_tx_queues);
282
struct napi_struct *napi;
283
int i;
284
285
WRITE_ONCE(netdev->gro_flush_timeout, timeout);
286
list_for_each_entry(napi, &netdev->napi_list, dev_list)
287
napi_set_gro_flush_timeout(napi, timeout);
288
289
for (i = 0; i < count; i++)
290
netdev->napi_config[i].gro_flush_timeout = timeout;
291
}
292
293
/**
294
* napi_get_irq_suspend_timeout - get the irq_suspend_timeout
295
* @n: napi struct to get the irq_suspend_timeout from
296
*
297
* Return: the per-NAPI value of the irq_suspend_timeout field.
298
*/
299
static inline unsigned long
300
napi_get_irq_suspend_timeout(const struct napi_struct *n)
301
{
302
return READ_ONCE(n->irq_suspend_timeout);
303
}
304
305
/**
306
* napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
307
* @n: napi struct to set the irq_suspend_timeout
308
* @timeout: timeout value to set
309
*
310
* napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
311
*/
312
static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
313
unsigned long timeout)
314
{
315
WRITE_ONCE(n->irq_suspend_timeout, timeout);
316
}
317
318
static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
319
{
320
if (test_bit(NAPI_STATE_THREADED, &n->state))
321
return NETDEV_NAPI_THREADED_ENABLED;
322
323
return NETDEV_NAPI_THREADED_DISABLED;
324
}
325
326
static inline enum netdev_napi_threaded
327
napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
328
{
329
if (n->config)
330
return n->config->threaded;
331
return dev->threaded;
332
}
333
334
int napi_set_threaded(struct napi_struct *n,
335
enum netdev_napi_threaded threaded);
336
337
int netif_set_threaded(struct net_device *dev,
338
enum netdev_napi_threaded threaded);
339
340
int rps_cpumask_housekeeping(struct cpumask *mask);
341
342
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
343
void xdp_do_check_flushed(struct napi_struct *napi);
344
#else
345
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
346
#endif
347
348
/* Best effort check that NAPI is not idle (can't be scheduled to run) */
349
static inline void napi_assert_will_not_race(const struct napi_struct *napi)
350
{
351
/* uninitialized instance, can't race */
352
if (!napi->poll_list.next)
353
return;
354
355
/* SCHED bit is set on disabled instances */
356
WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
357
WARN_ON(READ_ONCE(napi->list_owner) != -1);
358
}
359
360
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
361
362
#define XMIT_RECURSION_LIMIT 8
363
364
#ifndef CONFIG_PREEMPT_RT
365
static inline bool dev_xmit_recursion(void)
366
{
367
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
368
XMIT_RECURSION_LIMIT);
369
}
370
371
static inline void dev_xmit_recursion_inc(void)
372
{
373
__this_cpu_inc(softnet_data.xmit.recursion);
374
}
375
376
static inline void dev_xmit_recursion_dec(void)
377
{
378
__this_cpu_dec(softnet_data.xmit.recursion);
379
}
380
#else
381
static inline bool dev_xmit_recursion(void)
382
{
383
return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
384
}
385
386
static inline void dev_xmit_recursion_inc(void)
387
{
388
current->net_xmit.recursion++;
389
}
390
391
static inline void dev_xmit_recursion_dec(void)
392
{
393
current->net_xmit.recursion--;
394
}
395
#endif
396
397
int dev_set_hwtstamp_phylib(struct net_device *dev,
398
struct kernel_hwtstamp_config *cfg,
399
struct netlink_ext_ack *extack);
400
int dev_get_hwtstamp_phylib(struct net_device *dev,
401
struct kernel_hwtstamp_config *cfg);
402
int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg);
403
404
#endif
405
406