Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv4/inet_fragment.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* inet fragments management
4
*
5
* Authors: Pavel Emelyanov <[email protected]>
6
* Started as consolidation of ipv4/ip_fragment.c,
7
* ipv6/reassembly. and ipv6 nf conntrack reassembly
8
*/
9
10
#include <linux/list.h>
11
#include <linux/spinlock.h>
12
#include <linux/module.h>
13
#include <linux/timer.h>
14
#include <linux/mm.h>
15
#include <linux/random.h>
16
#include <linux/skbuff.h>
17
#include <linux/rtnetlink.h>
18
#include <linux/slab.h>
19
#include <linux/rhashtable.h>
20
21
#include <net/sock.h>
22
#include <net/inet_frag.h>
23
#include <net/inet_ecn.h>
24
#include <net/ip.h>
25
#include <net/ipv6.h>
26
27
#include "../core/sock_destructor.h"
28
29
/* Use skb->cb to track consecutive/adjacent fragments coming at
30
* the end of the queue. Nodes in the rb-tree queue will
31
* contain "runs" of one or more adjacent fragments.
32
*
33
* Invariants:
34
* - next_frag is NULL at the tail of a "run";
35
* - the head of a "run" has the sum of all fragment lengths in frag_run_len.
36
*/
37
struct ipfrag_skb_cb {
38
union {
39
struct inet_skb_parm h4;
40
struct inet6_skb_parm h6;
41
};
42
struct sk_buff *next_frag;
43
int frag_run_len;
44
int ip_defrag_offset;
45
};
46
47
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
48
49
static void fragcb_clear(struct sk_buff *skb)
50
{
51
RB_CLEAR_NODE(&skb->rbnode);
52
FRAG_CB(skb)->next_frag = NULL;
53
FRAG_CB(skb)->frag_run_len = skb->len;
54
}
55
56
/* Append skb to the last "run". */
57
static void fragrun_append_to_last(struct inet_frag_queue *q,
58
struct sk_buff *skb)
59
{
60
fragcb_clear(skb);
61
62
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
63
FRAG_CB(q->fragments_tail)->next_frag = skb;
64
q->fragments_tail = skb;
65
}
66
67
/* Create a new "run" with the skb. */
68
static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
69
{
70
BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
71
fragcb_clear(skb);
72
73
if (q->last_run_head)
74
rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
75
&q->last_run_head->rbnode.rb_right);
76
else
77
rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
78
rb_insert_color(&skb->rbnode, &q->rb_fragments);
79
80
q->fragments_tail = skb;
81
q->last_run_head = skb;
82
}
83
84
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
85
* Value : 0xff if frame should be dropped.
86
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
87
*/
88
const u8 ip_frag_ecn_table[16] = {
89
/* at least one fragment had CE, and others ECT_0 or ECT_1 */
90
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
91
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
92
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
93
94
/* invalid combinations : drop frame */
95
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
96
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
97
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
98
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
102
};
103
EXPORT_SYMBOL(ip_frag_ecn_table);
104
105
int inet_frags_init(struct inet_frags *f)
106
{
107
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
108
NULL);
109
if (!f->frags_cachep)
110
return -ENOMEM;
111
112
refcount_set(&f->refcnt, 1);
113
init_completion(&f->completion);
114
return 0;
115
}
116
EXPORT_SYMBOL(inet_frags_init);
117
118
void inet_frags_fini(struct inet_frags *f)
119
{
120
if (refcount_dec_and_test(&f->refcnt))
121
complete(&f->completion);
122
123
wait_for_completion(&f->completion);
124
125
kmem_cache_destroy(f->frags_cachep);
126
f->frags_cachep = NULL;
127
}
128
EXPORT_SYMBOL(inet_frags_fini);
129
130
/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
131
static void inet_frags_free_cb(void *ptr, void *arg)
132
{
133
struct inet_frag_queue *fq = ptr;
134
int count;
135
136
count = timer_delete_sync(&fq->timer) ? 1 : 0;
137
138
spin_lock_bh(&fq->lock);
139
fq->flags |= INET_FRAG_DROP;
140
if (!(fq->flags & INET_FRAG_COMPLETE)) {
141
fq->flags |= INET_FRAG_COMPLETE;
142
count++;
143
} else if (fq->flags & INET_FRAG_HASH_DEAD) {
144
count++;
145
}
146
spin_unlock_bh(&fq->lock);
147
148
inet_frag_putn(fq, count);
149
}
150
151
static LLIST_HEAD(fqdir_free_list);
152
153
static void fqdir_free_fn(struct work_struct *work)
154
{
155
struct llist_node *kill_list;
156
struct fqdir *fqdir, *tmp;
157
struct inet_frags *f;
158
159
/* Atomically snapshot the list of fqdirs to free */
160
kill_list = llist_del_all(&fqdir_free_list);
161
162
/* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
163
* have completed, since they need to dereference fqdir.
164
* Would it not be nice to have kfree_rcu_barrier() ? :)
165
*/
166
rcu_barrier();
167
168
llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
169
f = fqdir->f;
170
if (refcount_dec_and_test(&f->refcnt))
171
complete(&f->completion);
172
173
kfree(fqdir);
174
}
175
}
176
177
static DECLARE_DELAYED_WORK(fqdir_free_work, fqdir_free_fn);
178
179
static void fqdir_work_fn(struct work_struct *work)
180
{
181
struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
182
183
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
184
185
if (llist_add(&fqdir->free_list, &fqdir_free_list))
186
queue_delayed_work(system_wq, &fqdir_free_work, HZ);
187
}
188
189
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
190
{
191
struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
192
int res;
193
194
if (!fqdir)
195
return -ENOMEM;
196
fqdir->f = f;
197
fqdir->net = net;
198
res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
199
if (res < 0) {
200
kfree(fqdir);
201
return res;
202
}
203
refcount_inc(&f->refcnt);
204
*fqdirp = fqdir;
205
return 0;
206
}
207
EXPORT_SYMBOL(fqdir_init);
208
209
static struct workqueue_struct *inet_frag_wq;
210
211
static int __init inet_frag_wq_init(void)
212
{
213
inet_frag_wq = create_workqueue("inet_frag_wq");
214
if (!inet_frag_wq)
215
panic("Could not create inet frag workq");
216
return 0;
217
}
218
219
pure_initcall(inet_frag_wq_init);
220
221
void fqdir_exit(struct fqdir *fqdir)
222
{
223
INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
224
queue_work(inet_frag_wq, &fqdir->destroy_work);
225
}
226
EXPORT_SYMBOL(fqdir_exit);
227
228
void inet_frag_kill(struct inet_frag_queue *fq, int *refs)
229
{
230
if (timer_delete(&fq->timer))
231
(*refs)++;
232
233
if (!(fq->flags & INET_FRAG_COMPLETE)) {
234
struct fqdir *fqdir = fq->fqdir;
235
236
fq->flags |= INET_FRAG_COMPLETE;
237
rcu_read_lock();
238
/* The RCU read lock provides a memory barrier
239
* guaranteeing that if fqdir->dead is false then
240
* the hash table destruction will not start until
241
* after we unlock. Paired with fqdir_pre_exit().
242
*/
243
if (!READ_ONCE(fqdir->dead)) {
244
rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
245
fqdir->f->rhash_params);
246
(*refs)++;
247
} else {
248
fq->flags |= INET_FRAG_HASH_DEAD;
249
}
250
rcu_read_unlock();
251
}
252
}
253
EXPORT_SYMBOL(inet_frag_kill);
254
255
static void inet_frag_destroy_rcu(struct rcu_head *head)
256
{
257
struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
258
rcu);
259
struct inet_frags *f = q->fqdir->f;
260
261
if (f->destructor)
262
f->destructor(q);
263
kmem_cache_free(f->frags_cachep, q);
264
}
265
266
unsigned int inet_frag_rbtree_purge(struct rb_root *root,
267
enum skb_drop_reason reason)
268
{
269
struct rb_node *p = rb_first(root);
270
unsigned int sum = 0;
271
272
while (p) {
273
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
274
275
p = rb_next(p);
276
rb_erase(&skb->rbnode, root);
277
while (skb) {
278
struct sk_buff *next = FRAG_CB(skb)->next_frag;
279
280
sum += skb->truesize;
281
kfree_skb_reason(skb, reason);
282
skb = next;
283
}
284
}
285
return sum;
286
}
287
EXPORT_SYMBOL(inet_frag_rbtree_purge);
288
289
void inet_frag_destroy(struct inet_frag_queue *q)
290
{
291
unsigned int sum, sum_truesize = 0;
292
enum skb_drop_reason reason;
293
struct inet_frags *f;
294
struct fqdir *fqdir;
295
296
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
297
reason = (q->flags & INET_FRAG_DROP) ?
298
SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
299
SKB_CONSUMED;
300
WARN_ON(timer_delete(&q->timer) != 0);
301
302
/* Release all fragment data. */
303
fqdir = q->fqdir;
304
f = fqdir->f;
305
sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
306
sum = sum_truesize + f->qsize;
307
308
call_rcu(&q->rcu, inet_frag_destroy_rcu);
309
310
sub_frag_mem_limit(fqdir, sum);
311
}
312
EXPORT_SYMBOL(inet_frag_destroy);
313
314
static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
315
struct inet_frags *f,
316
void *arg)
317
{
318
struct inet_frag_queue *q;
319
320
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
321
if (!q)
322
return NULL;
323
324
q->fqdir = fqdir;
325
f->constructor(q, arg);
326
add_frag_mem_limit(fqdir, f->qsize);
327
328
timer_setup(&q->timer, f->frag_expire, 0);
329
spin_lock_init(&q->lock);
330
/* One reference for the timer, one for the hash table. */
331
refcount_set(&q->refcnt, 2);
332
333
return q;
334
}
335
336
static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
337
void *arg,
338
struct inet_frag_queue **prev)
339
{
340
struct inet_frags *f = fqdir->f;
341
struct inet_frag_queue *q;
342
343
q = inet_frag_alloc(fqdir, f, arg);
344
if (!q) {
345
*prev = ERR_PTR(-ENOMEM);
346
return NULL;
347
}
348
mod_timer(&q->timer, jiffies + fqdir->timeout);
349
350
*prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
351
&q->node, f->rhash_params);
352
if (*prev) {
353
/* We could not insert in the hash table,
354
* we need to cancel what inet_frag_alloc()
355
* anticipated.
356
*/
357
int refs = 1;
358
359
q->flags |= INET_FRAG_COMPLETE;
360
inet_frag_kill(q, &refs);
361
inet_frag_putn(q, refs);
362
return NULL;
363
}
364
return q;
365
}
366
367
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
368
{
369
/* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
370
long high_thresh = READ_ONCE(fqdir->high_thresh);
371
struct inet_frag_queue *fq = NULL, *prev;
372
373
if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
374
return NULL;
375
376
prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
377
if (!prev)
378
fq = inet_frag_create(fqdir, key, &prev);
379
if (!IS_ERR_OR_NULL(prev))
380
fq = prev;
381
return fq;
382
}
383
EXPORT_SYMBOL(inet_frag_find);
384
385
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
386
int offset, int end)
387
{
388
struct sk_buff *last = q->fragments_tail;
389
390
/* RFC5722, Section 4, amended by Errata ID : 3089
391
* When reassembling an IPv6 datagram, if
392
* one or more its constituent fragments is determined to be an
393
* overlapping fragment, the entire datagram (and any constituent
394
* fragments) MUST be silently discarded.
395
*
396
* Duplicates, however, should be ignored (i.e. skb dropped, but the
397
* queue/fragments kept for later reassembly).
398
*/
399
if (!last)
400
fragrun_create(q, skb); /* First fragment. */
401
else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
402
/* This is the common case: skb goes to the end. */
403
/* Detect and discard overlaps. */
404
if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
405
return IPFRAG_OVERLAP;
406
if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
407
fragrun_append_to_last(q, skb);
408
else
409
fragrun_create(q, skb);
410
} else {
411
/* Binary search. Note that skb can become the first fragment,
412
* but not the last (covered above).
413
*/
414
struct rb_node **rbn, *parent;
415
416
rbn = &q->rb_fragments.rb_node;
417
do {
418
struct sk_buff *curr;
419
int curr_run_end;
420
421
parent = *rbn;
422
curr = rb_to_skb(parent);
423
curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
424
FRAG_CB(curr)->frag_run_len;
425
if (end <= FRAG_CB(curr)->ip_defrag_offset)
426
rbn = &parent->rb_left;
427
else if (offset >= curr_run_end)
428
rbn = &parent->rb_right;
429
else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
430
end <= curr_run_end)
431
return IPFRAG_DUP;
432
else
433
return IPFRAG_OVERLAP;
434
} while (*rbn);
435
/* Here we have parent properly set, and rbn pointing to
436
* one of its NULL left/right children. Insert skb.
437
*/
438
fragcb_clear(skb);
439
rb_link_node(&skb->rbnode, parent, rbn);
440
rb_insert_color(&skb->rbnode, &q->rb_fragments);
441
}
442
443
FRAG_CB(skb)->ip_defrag_offset = offset;
444
445
return IPFRAG_OK;
446
}
447
EXPORT_SYMBOL(inet_frag_queue_insert);
448
449
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
450
struct sk_buff *parent)
451
{
452
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
453
void (*destructor)(struct sk_buff *);
454
unsigned int orig_truesize = 0;
455
struct sk_buff **nextp = NULL;
456
struct sock *sk = skb->sk;
457
int delta;
458
459
if (sk && is_skb_wmem(skb)) {
460
/* TX: skb->sk might have been passed as argument to
461
* dst->output and must remain valid until tx completes.
462
*
463
* Move sk to reassembled skb and fix up wmem accounting.
464
*/
465
orig_truesize = skb->truesize;
466
destructor = skb->destructor;
467
}
468
469
if (head != skb) {
470
fp = skb_clone(skb, GFP_ATOMIC);
471
if (!fp) {
472
head = skb;
473
goto out_restore_sk;
474
}
475
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
476
if (RB_EMPTY_NODE(&skb->rbnode))
477
FRAG_CB(parent)->next_frag = fp;
478
else
479
rb_replace_node(&skb->rbnode, &fp->rbnode,
480
&q->rb_fragments);
481
if (q->fragments_tail == skb)
482
q->fragments_tail = fp;
483
484
if (orig_truesize) {
485
/* prevent skb_morph from releasing sk */
486
skb->sk = NULL;
487
skb->destructor = NULL;
488
}
489
skb_morph(skb, head);
490
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
491
rb_replace_node(&head->rbnode, &skb->rbnode,
492
&q->rb_fragments);
493
consume_skb(head);
494
head = skb;
495
}
496
WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
497
498
delta = -head->truesize;
499
500
/* Head of list must not be cloned. */
501
if (skb_unclone(head, GFP_ATOMIC))
502
goto out_restore_sk;
503
504
delta += head->truesize;
505
if (delta)
506
add_frag_mem_limit(q->fqdir, delta);
507
508
/* If the first fragment is fragmented itself, we split
509
* it to two chunks: the first with data and paged part
510
* and the second, holding only fragments.
511
*/
512
if (skb_has_frag_list(head)) {
513
struct sk_buff *clone;
514
int i, plen = 0;
515
516
clone = alloc_skb(0, GFP_ATOMIC);
517
if (!clone)
518
goto out_restore_sk;
519
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
520
skb_frag_list_init(head);
521
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
522
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
523
clone->data_len = head->data_len - plen;
524
clone->len = clone->data_len;
525
head->truesize += clone->truesize;
526
clone->csum = 0;
527
clone->ip_summed = head->ip_summed;
528
add_frag_mem_limit(q->fqdir, clone->truesize);
529
skb_shinfo(head)->frag_list = clone;
530
nextp = &clone->next;
531
} else {
532
nextp = &skb_shinfo(head)->frag_list;
533
}
534
535
out_restore_sk:
536
if (orig_truesize) {
537
int ts_delta = head->truesize - orig_truesize;
538
539
/* if this reassembled skb is fragmented later,
540
* fraglist skbs will get skb->sk assigned from head->sk,
541
* and each frag skb will be released via sock_wfree.
542
*
543
* Update sk_wmem_alloc.
544
*/
545
head->sk = sk;
546
head->destructor = destructor;
547
refcount_add(ts_delta, &sk->sk_wmem_alloc);
548
}
549
550
return nextp;
551
}
552
EXPORT_SYMBOL(inet_frag_reasm_prepare);
553
554
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
555
void *reasm_data, bool try_coalesce)
556
{
557
struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
558
const unsigned int head_truesize = head->truesize;
559
struct sk_buff **nextp = reasm_data;
560
struct rb_node *rbn;
561
struct sk_buff *fp;
562
int sum_truesize;
563
564
skb_push(head, head->data - skb_network_header(head));
565
566
/* Traverse the tree in order, to build frag_list. */
567
fp = FRAG_CB(head)->next_frag;
568
rbn = rb_next(&head->rbnode);
569
rb_erase(&head->rbnode, &q->rb_fragments);
570
571
sum_truesize = head->truesize;
572
while (rbn || fp) {
573
/* fp points to the next sk_buff in the current run;
574
* rbn points to the next run.
575
*/
576
/* Go through the current run. */
577
while (fp) {
578
struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
579
bool stolen;
580
int delta;
581
582
sum_truesize += fp->truesize;
583
if (head->ip_summed != fp->ip_summed)
584
head->ip_summed = CHECKSUM_NONE;
585
else if (head->ip_summed == CHECKSUM_COMPLETE)
586
head->csum = csum_add(head->csum, fp->csum);
587
588
if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
589
&delta)) {
590
kfree_skb_partial(fp, stolen);
591
} else {
592
fp->prev = NULL;
593
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
594
fp->sk = NULL;
595
596
head->data_len += fp->len;
597
head->len += fp->len;
598
head->truesize += fp->truesize;
599
600
*nextp = fp;
601
nextp = &fp->next;
602
}
603
604
fp = next_frag;
605
}
606
/* Move to the next run. */
607
if (rbn) {
608
struct rb_node *rbnext = rb_next(rbn);
609
610
fp = rb_to_skb(rbn);
611
rb_erase(rbn, &q->rb_fragments);
612
rbn = rbnext;
613
}
614
}
615
sub_frag_mem_limit(q->fqdir, sum_truesize);
616
617
*nextp = NULL;
618
skb_mark_not_on_list(head);
619
head->prev = NULL;
620
head->tstamp = q->stamp;
621
head->tstamp_type = q->tstamp_type;
622
623
if (sk)
624
refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
625
}
626
EXPORT_SYMBOL(inet_frag_reasm_finish);
627
628
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
629
{
630
struct sk_buff *head, *skb;
631
632
head = skb_rb_first(&q->rb_fragments);
633
if (!head)
634
return NULL;
635
skb = FRAG_CB(head)->next_frag;
636
if (skb)
637
rb_replace_node(&head->rbnode, &skb->rbnode,
638
&q->rb_fragments);
639
else
640
rb_erase(&head->rbnode, &q->rb_fragments);
641
memset(&head->rbnode, 0, sizeof(head->rbnode));
642
barrier();
643
644
if (head == q->fragments_tail)
645
q->fragments_tail = NULL;
646
647
sub_frag_mem_limit(q->fqdir, head->truesize);
648
649
return head;
650
}
651
EXPORT_SYMBOL(inet_frag_pull_head);
652
653