Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/core/flow.c
15109 views
1
/* flow.c: Generic flow cache.
2
*
3
* Copyright (C) 2003 Alexey N. Kuznetsov ([email protected])
4
* Copyright (C) 2003 David S. Miller ([email protected])
5
*/
6
7
#include <linux/kernel.h>
8
#include <linux/module.h>
9
#include <linux/list.h>
10
#include <linux/jhash.h>
11
#include <linux/interrupt.h>
12
#include <linux/mm.h>
13
#include <linux/random.h>
14
#include <linux/init.h>
15
#include <linux/slab.h>
16
#include <linux/smp.h>
17
#include <linux/completion.h>
18
#include <linux/percpu.h>
19
#include <linux/bitops.h>
20
#include <linux/notifier.h>
21
#include <linux/cpu.h>
22
#include <linux/cpumask.h>
23
#include <linux/mutex.h>
24
#include <net/flow.h>
25
#include <asm/atomic.h>
26
#include <linux/security.h>
27
28
struct flow_cache_entry {
29
union {
30
struct hlist_node hlist;
31
struct list_head gc_list;
32
} u;
33
u16 family;
34
u8 dir;
35
u32 genid;
36
struct flowi key;
37
struct flow_cache_object *object;
38
};
39
40
struct flow_cache_percpu {
41
struct hlist_head *hash_table;
42
int hash_count;
43
u32 hash_rnd;
44
int hash_rnd_recalc;
45
struct tasklet_struct flush_tasklet;
46
};
47
48
struct flow_flush_info {
49
struct flow_cache *cache;
50
atomic_t cpuleft;
51
struct completion completion;
52
};
53
54
struct flow_cache {
55
u32 hash_shift;
56
struct flow_cache_percpu __percpu *percpu;
57
struct notifier_block hotcpu_notifier;
58
int low_watermark;
59
int high_watermark;
60
struct timer_list rnd_timer;
61
};
62
63
atomic_t flow_cache_genid = ATOMIC_INIT(0);
64
EXPORT_SYMBOL(flow_cache_genid);
65
static struct flow_cache flow_cache_global;
66
static struct kmem_cache *flow_cachep __read_mostly;
67
68
static DEFINE_SPINLOCK(flow_cache_gc_lock);
69
static LIST_HEAD(flow_cache_gc_list);
70
71
#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
72
#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
73
74
static void flow_cache_new_hashrnd(unsigned long arg)
75
{
76
struct flow_cache *fc = (void *) arg;
77
int i;
78
79
for_each_possible_cpu(i)
80
per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
81
82
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
83
add_timer(&fc->rnd_timer);
84
}
85
86
static int flow_entry_valid(struct flow_cache_entry *fle)
87
{
88
if (atomic_read(&flow_cache_genid) != fle->genid)
89
return 0;
90
if (fle->object && !fle->object->ops->check(fle->object))
91
return 0;
92
return 1;
93
}
94
95
static void flow_entry_kill(struct flow_cache_entry *fle)
96
{
97
if (fle->object)
98
fle->object->ops->delete(fle->object);
99
kmem_cache_free(flow_cachep, fle);
100
}
101
102
static void flow_cache_gc_task(struct work_struct *work)
103
{
104
struct list_head gc_list;
105
struct flow_cache_entry *fce, *n;
106
107
INIT_LIST_HEAD(&gc_list);
108
spin_lock_bh(&flow_cache_gc_lock);
109
list_splice_tail_init(&flow_cache_gc_list, &gc_list);
110
spin_unlock_bh(&flow_cache_gc_lock);
111
112
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
113
flow_entry_kill(fce);
114
}
115
static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
116
117
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
118
int deleted, struct list_head *gc_list)
119
{
120
if (deleted) {
121
fcp->hash_count -= deleted;
122
spin_lock_bh(&flow_cache_gc_lock);
123
list_splice_tail(gc_list, &flow_cache_gc_list);
124
spin_unlock_bh(&flow_cache_gc_lock);
125
schedule_work(&flow_cache_gc_work);
126
}
127
}
128
129
static void __flow_cache_shrink(struct flow_cache *fc,
130
struct flow_cache_percpu *fcp,
131
int shrink_to)
132
{
133
struct flow_cache_entry *fle;
134
struct hlist_node *entry, *tmp;
135
LIST_HEAD(gc_list);
136
int i, deleted = 0;
137
138
for (i = 0; i < flow_cache_hash_size(fc); i++) {
139
int saved = 0;
140
141
hlist_for_each_entry_safe(fle, entry, tmp,
142
&fcp->hash_table[i], u.hlist) {
143
if (saved < shrink_to &&
144
flow_entry_valid(fle)) {
145
saved++;
146
} else {
147
deleted++;
148
hlist_del(&fle->u.hlist);
149
list_add_tail(&fle->u.gc_list, &gc_list);
150
}
151
}
152
}
153
154
flow_cache_queue_garbage(fcp, deleted, &gc_list);
155
}
156
157
static void flow_cache_shrink(struct flow_cache *fc,
158
struct flow_cache_percpu *fcp)
159
{
160
int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
161
162
__flow_cache_shrink(fc, fcp, shrink_to);
163
}
164
165
static void flow_new_hash_rnd(struct flow_cache *fc,
166
struct flow_cache_percpu *fcp)
167
{
168
get_random_bytes(&fcp->hash_rnd, sizeof(u32));
169
fcp->hash_rnd_recalc = 0;
170
__flow_cache_shrink(fc, fcp, 0);
171
}
172
173
static u32 flow_hash_code(struct flow_cache *fc,
174
struct flow_cache_percpu *fcp,
175
const struct flowi *key)
176
{
177
const u32 *k = (const u32 *) key;
178
179
return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
180
& (flow_cache_hash_size(fc) - 1);
181
}
182
183
typedef unsigned long flow_compare_t;
184
185
/* I hear what you're saying, use memcmp. But memcmp cannot make
186
* important assumptions that we can here, such as alignment and
187
* constant size.
188
*/
189
static int flow_key_compare(const struct flowi *key1, const struct flowi *key2)
190
{
191
const flow_compare_t *k1, *k1_lim, *k2;
192
const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
193
194
BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
195
196
k1 = (const flow_compare_t *) key1;
197
k1_lim = k1 + n_elem;
198
199
k2 = (const flow_compare_t *) key2;
200
201
do {
202
if (*k1++ != *k2++)
203
return 1;
204
} while (k1 < k1_lim);
205
206
return 0;
207
}
208
209
struct flow_cache_object *
210
flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
211
flow_resolve_t resolver, void *ctx)
212
{
213
struct flow_cache *fc = &flow_cache_global;
214
struct flow_cache_percpu *fcp;
215
struct flow_cache_entry *fle, *tfle;
216
struct hlist_node *entry;
217
struct flow_cache_object *flo;
218
unsigned int hash;
219
220
local_bh_disable();
221
fcp = this_cpu_ptr(fc->percpu);
222
223
fle = NULL;
224
flo = NULL;
225
/* Packet really early in init? Making flow_cache_init a
226
* pre-smp initcall would solve this. --RR */
227
if (!fcp->hash_table)
228
goto nocache;
229
230
if (fcp->hash_rnd_recalc)
231
flow_new_hash_rnd(fc, fcp);
232
233
hash = flow_hash_code(fc, fcp, key);
234
hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
235
if (tfle->family == family &&
236
tfle->dir == dir &&
237
flow_key_compare(key, &tfle->key) == 0) {
238
fle = tfle;
239
break;
240
}
241
}
242
243
if (unlikely(!fle)) {
244
if (fcp->hash_count > fc->high_watermark)
245
flow_cache_shrink(fc, fcp);
246
247
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
248
if (fle) {
249
fle->family = family;
250
fle->dir = dir;
251
memcpy(&fle->key, key, sizeof(*key));
252
fle->object = NULL;
253
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
254
fcp->hash_count++;
255
}
256
} else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
257
flo = fle->object;
258
if (!flo)
259
goto ret_object;
260
flo = flo->ops->get(flo);
261
if (flo)
262
goto ret_object;
263
} else if (fle->object) {
264
flo = fle->object;
265
flo->ops->delete(flo);
266
fle->object = NULL;
267
}
268
269
nocache:
270
flo = NULL;
271
if (fle) {
272
flo = fle->object;
273
fle->object = NULL;
274
}
275
flo = resolver(net, key, family, dir, flo, ctx);
276
if (fle) {
277
fle->genid = atomic_read(&flow_cache_genid);
278
if (!IS_ERR(flo))
279
fle->object = flo;
280
else
281
fle->genid--;
282
} else {
283
if (flo && !IS_ERR(flo))
284
flo->ops->delete(flo);
285
}
286
ret_object:
287
local_bh_enable();
288
return flo;
289
}
290
EXPORT_SYMBOL(flow_cache_lookup);
291
292
static void flow_cache_flush_tasklet(unsigned long data)
293
{
294
struct flow_flush_info *info = (void *)data;
295
struct flow_cache *fc = info->cache;
296
struct flow_cache_percpu *fcp;
297
struct flow_cache_entry *fle;
298
struct hlist_node *entry, *tmp;
299
LIST_HEAD(gc_list);
300
int i, deleted = 0;
301
302
fcp = this_cpu_ptr(fc->percpu);
303
for (i = 0; i < flow_cache_hash_size(fc); i++) {
304
hlist_for_each_entry_safe(fle, entry, tmp,
305
&fcp->hash_table[i], u.hlist) {
306
if (flow_entry_valid(fle))
307
continue;
308
309
deleted++;
310
hlist_del(&fle->u.hlist);
311
list_add_tail(&fle->u.gc_list, &gc_list);
312
}
313
}
314
315
flow_cache_queue_garbage(fcp, deleted, &gc_list);
316
317
if (atomic_dec_and_test(&info->cpuleft))
318
complete(&info->completion);
319
}
320
321
static void flow_cache_flush_per_cpu(void *data)
322
{
323
struct flow_flush_info *info = data;
324
int cpu;
325
struct tasklet_struct *tasklet;
326
327
cpu = smp_processor_id();
328
tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
329
tasklet->data = (unsigned long)info;
330
tasklet_schedule(tasklet);
331
}
332
333
void flow_cache_flush(void)
334
{
335
struct flow_flush_info info;
336
static DEFINE_MUTEX(flow_flush_sem);
337
338
/* Don't want cpus going down or up during this. */
339
get_online_cpus();
340
mutex_lock(&flow_flush_sem);
341
info.cache = &flow_cache_global;
342
atomic_set(&info.cpuleft, num_online_cpus());
343
init_completion(&info.completion);
344
345
local_bh_disable();
346
smp_call_function(flow_cache_flush_per_cpu, &info, 0);
347
flow_cache_flush_tasklet((unsigned long)&info);
348
local_bh_enable();
349
350
wait_for_completion(&info.completion);
351
mutex_unlock(&flow_flush_sem);
352
put_online_cpus();
353
}
354
355
static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
356
{
357
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
358
size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
359
360
if (!fcp->hash_table) {
361
fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
362
if (!fcp->hash_table) {
363
pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
364
return -ENOMEM;
365
}
366
fcp->hash_rnd_recalc = 1;
367
fcp->hash_count = 0;
368
tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
369
}
370
return 0;
371
}
372
373
static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
374
unsigned long action,
375
void *hcpu)
376
{
377
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
378
int res, cpu = (unsigned long) hcpu;
379
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
380
381
switch (action) {
382
case CPU_UP_PREPARE:
383
case CPU_UP_PREPARE_FROZEN:
384
res = flow_cache_cpu_prepare(fc, cpu);
385
if (res)
386
return notifier_from_errno(res);
387
break;
388
case CPU_DEAD:
389
case CPU_DEAD_FROZEN:
390
__flow_cache_shrink(fc, fcp, 0);
391
break;
392
}
393
return NOTIFY_OK;
394
}
395
396
static int __init flow_cache_init(struct flow_cache *fc)
397
{
398
int i;
399
400
fc->hash_shift = 10;
401
fc->low_watermark = 2 * flow_cache_hash_size(fc);
402
fc->high_watermark = 4 * flow_cache_hash_size(fc);
403
404
fc->percpu = alloc_percpu(struct flow_cache_percpu);
405
if (!fc->percpu)
406
return -ENOMEM;
407
408
for_each_online_cpu(i) {
409
if (flow_cache_cpu_prepare(fc, i))
410
return -ENOMEM;
411
}
412
fc->hotcpu_notifier = (struct notifier_block){
413
.notifier_call = flow_cache_cpu,
414
};
415
register_hotcpu_notifier(&fc->hotcpu_notifier);
416
417
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
418
(unsigned long) fc);
419
fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
420
add_timer(&fc->rnd_timer);
421
422
return 0;
423
}
424
425
static int __init flow_cache_init_global(void)
426
{
427
flow_cachep = kmem_cache_create("flow_cache",
428
sizeof(struct flow_cache_entry),
429
0, SLAB_PANIC, NULL);
430
431
return flow_cache_init(&flow_cache_global);
432
}
433
434
module_init(flow_cache_init_global);
435
436