Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/ipv4/ip_fragment.c
15109 views
1
/*
2
* INET An implementation of the TCP/IP protocol suite for the LINUX
3
* operating system. INET is implemented using the BSD Socket
4
* interface as the means of communication with the user level.
5
*
6
* The IP fragmentation functionality.
7
*
8
* Authors: Fred N. van Kempen <[email protected]>
9
* Alan Cox <[email protected]>
10
*
11
* Fixes:
12
* Alan Cox : Split from ip.c , see ip_input.c for history.
13
* David S. Miller : Begin massive cleanup...
14
* Andi Kleen : Add sysctls.
15
* xxxx : Overlapfrag bug.
16
* Ultima : ip_expire() kernel panic.
17
* Bill Hawes : Frag accounting and evictor fixes.
18
* John McDonald : 0 length frag bug.
19
* Alexey Kuznetsov: SMP races, threading, cleanup.
20
* Patrick McHardy : LRU queue of frag heads for evictor.
21
*/
22
23
#include <linux/compiler.h>
24
#include <linux/module.h>
25
#include <linux/types.h>
26
#include <linux/mm.h>
27
#include <linux/jiffies.h>
28
#include <linux/skbuff.h>
29
#include <linux/list.h>
30
#include <linux/ip.h>
31
#include <linux/icmp.h>
32
#include <linux/netdevice.h>
33
#include <linux/jhash.h>
34
#include <linux/random.h>
35
#include <linux/slab.h>
36
#include <net/route.h>
37
#include <net/dst.h>
38
#include <net/sock.h>
39
#include <net/ip.h>
40
#include <net/icmp.h>
41
#include <net/checksum.h>
42
#include <net/inetpeer.h>
43
#include <net/inet_frag.h>
44
#include <linux/tcp.h>
45
#include <linux/udp.h>
46
#include <linux/inet.h>
47
#include <linux/netfilter_ipv4.h>
48
#include <net/inet_ecn.h>
49
50
/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
51
* code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
52
* as well. Or notify me, at least. --ANK
53
*/
54
55
static int sysctl_ipfrag_max_dist __read_mostly = 64;
56
57
struct ipfrag_skb_cb
58
{
59
struct inet_skb_parm h;
60
int offset;
61
};
62
63
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
64
65
/* Describe an entry in the "incomplete datagrams" queue. */
66
struct ipq {
67
struct inet_frag_queue q;
68
69
u32 user;
70
__be32 saddr;
71
__be32 daddr;
72
__be16 id;
73
u8 protocol;
74
u8 ecn; /* RFC3168 support */
75
int iif;
76
unsigned int rid;
77
struct inet_peer *peer;
78
};
79
80
/* RFC 3168 support :
81
* We want to check ECN values of all fragments, do detect invalid combinations.
82
* In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
83
*/
84
#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
85
#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
86
#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
87
#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
88
89
static inline u8 ip4_frag_ecn(u8 tos)
90
{
91
return 1 << (tos & INET_ECN_MASK);
92
}
93
94
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
95
* Value : 0xff if frame should be dropped.
96
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
97
*/
98
static const u8 ip4_frag_ecn_table[16] = {
99
/* at least one fragment had CE, and others ECT_0 or ECT_1 */
100
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
101
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
102
[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
103
104
/* invalid combinations : drop frame */
105
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
106
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
107
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
108
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
109
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
110
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
111
[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
112
};
113
114
static struct inet_frags ip4_frags;
115
116
int ip_frag_nqueues(struct net *net)
117
{
118
return net->ipv4.frags.nqueues;
119
}
120
121
int ip_frag_mem(struct net *net)
122
{
123
return atomic_read(&net->ipv4.frags.mem);
124
}
125
126
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
127
struct net_device *dev);
128
129
struct ip4_create_arg {
130
struct iphdr *iph;
131
u32 user;
132
};
133
134
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
135
{
136
return jhash_3words((__force u32)id << 16 | prot,
137
(__force u32)saddr, (__force u32)daddr,
138
ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
139
}
140
141
static unsigned int ip4_hashfn(struct inet_frag_queue *q)
142
{
143
struct ipq *ipq;
144
145
ipq = container_of(q, struct ipq, q);
146
return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
147
}
148
149
static int ip4_frag_match(struct inet_frag_queue *q, void *a)
150
{
151
struct ipq *qp;
152
struct ip4_create_arg *arg = a;
153
154
qp = container_of(q, struct ipq, q);
155
return qp->id == arg->iph->id &&
156
qp->saddr == arg->iph->saddr &&
157
qp->daddr == arg->iph->daddr &&
158
qp->protocol == arg->iph->protocol &&
159
qp->user == arg->user;
160
}
161
162
/* Memory Tracking Functions. */
163
static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
164
{
165
atomic_sub(skb->truesize, &nf->mem);
166
kfree_skb(skb);
167
}
168
169
static void ip4_frag_init(struct inet_frag_queue *q, void *a)
170
{
171
struct ipq *qp = container_of(q, struct ipq, q);
172
struct ip4_create_arg *arg = a;
173
174
qp->protocol = arg->iph->protocol;
175
qp->id = arg->iph->id;
176
qp->ecn = ip4_frag_ecn(arg->iph->tos);
177
qp->saddr = arg->iph->saddr;
178
qp->daddr = arg->iph->daddr;
179
qp->user = arg->user;
180
qp->peer = sysctl_ipfrag_max_dist ?
181
inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
182
}
183
184
static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
185
{
186
struct ipq *qp;
187
188
qp = container_of(q, struct ipq, q);
189
if (qp->peer)
190
inet_putpeer(qp->peer);
191
}
192
193
194
/* Destruction primitives. */
195
196
static __inline__ void ipq_put(struct ipq *ipq)
197
{
198
inet_frag_put(&ipq->q, &ip4_frags);
199
}
200
201
/* Kill ipq entry. It is not destroyed immediately,
202
* because caller (and someone more) holds reference count.
203
*/
204
static void ipq_kill(struct ipq *ipq)
205
{
206
inet_frag_kill(&ipq->q, &ip4_frags);
207
}
208
209
/* Memory limiting on fragments. Evictor trashes the oldest
210
* fragment queue until we are back under the threshold.
211
*/
212
static void ip_evictor(struct net *net)
213
{
214
int evicted;
215
216
evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
217
if (evicted)
218
IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
219
}
220
221
/*
222
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
223
*/
224
static void ip_expire(unsigned long arg)
225
{
226
struct ipq *qp;
227
struct net *net;
228
229
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
230
net = container_of(qp->q.net, struct net, ipv4.frags);
231
232
spin_lock(&qp->q.lock);
233
234
if (qp->q.last_in & INET_FRAG_COMPLETE)
235
goto out;
236
237
ipq_kill(qp);
238
239
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
240
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
241
242
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
243
struct sk_buff *head = qp->q.fragments;
244
const struct iphdr *iph;
245
int err;
246
247
rcu_read_lock();
248
head->dev = dev_get_by_index_rcu(net, qp->iif);
249
if (!head->dev)
250
goto out_rcu_unlock;
251
252
/* skb dst is stale, drop it, and perform route lookup again */
253
skb_dst_drop(head);
254
iph = ip_hdr(head);
255
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
256
iph->tos, head->dev);
257
if (err)
258
goto out_rcu_unlock;
259
260
/*
261
* Only an end host needs to send an ICMP
262
* "Fragment Reassembly Timeout" message, per RFC792.
263
*/
264
if (qp->user == IP_DEFRAG_CONNTRACK_IN &&
265
skb_rtable(head)->rt_type != RTN_LOCAL)
266
goto out_rcu_unlock;
267
268
269
/* Send an ICMP "Fragment Reassembly Timeout" message. */
270
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
271
out_rcu_unlock:
272
rcu_read_unlock();
273
}
274
out:
275
spin_unlock(&qp->q.lock);
276
ipq_put(qp);
277
}
278
279
/* Find the correct entry in the "incomplete datagrams" queue for
280
* this IP datagram, and create new one, if nothing is found.
281
*/
282
static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
283
{
284
struct inet_frag_queue *q;
285
struct ip4_create_arg arg;
286
unsigned int hash;
287
288
arg.iph = iph;
289
arg.user = user;
290
291
read_lock(&ip4_frags.lock);
292
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
293
294
q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
295
if (q == NULL)
296
goto out_nomem;
297
298
return container_of(q, struct ipq, q);
299
300
out_nomem:
301
LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
302
return NULL;
303
}
304
305
/* Is the fragment too far ahead to be part of ipq? */
306
static inline int ip_frag_too_far(struct ipq *qp)
307
{
308
struct inet_peer *peer = qp->peer;
309
unsigned int max = sysctl_ipfrag_max_dist;
310
unsigned int start, end;
311
312
int rc;
313
314
if (!peer || !max)
315
return 0;
316
317
start = qp->rid;
318
end = atomic_inc_return(&peer->rid);
319
qp->rid = end;
320
321
rc = qp->q.fragments && (end - start) > max;
322
323
if (rc) {
324
struct net *net;
325
326
net = container_of(qp->q.net, struct net, ipv4.frags);
327
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
328
}
329
330
return rc;
331
}
332
333
static int ip_frag_reinit(struct ipq *qp)
334
{
335
struct sk_buff *fp;
336
337
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
338
atomic_inc(&qp->q.refcnt);
339
return -ETIMEDOUT;
340
}
341
342
fp = qp->q.fragments;
343
do {
344
struct sk_buff *xp = fp->next;
345
frag_kfree_skb(qp->q.net, fp);
346
fp = xp;
347
} while (fp);
348
349
qp->q.last_in = 0;
350
qp->q.len = 0;
351
qp->q.meat = 0;
352
qp->q.fragments = NULL;
353
qp->q.fragments_tail = NULL;
354
qp->iif = 0;
355
qp->ecn = 0;
356
357
return 0;
358
}
359
360
/* Add new segment to existing queue. */
361
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
362
{
363
struct sk_buff *prev, *next;
364
struct net_device *dev;
365
int flags, offset;
366
int ihl, end;
367
int err = -ENOENT;
368
u8 ecn;
369
370
if (qp->q.last_in & INET_FRAG_COMPLETE)
371
goto err;
372
373
if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
374
unlikely(ip_frag_too_far(qp)) &&
375
unlikely(err = ip_frag_reinit(qp))) {
376
ipq_kill(qp);
377
goto err;
378
}
379
380
ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
381
offset = ntohs(ip_hdr(skb)->frag_off);
382
flags = offset & ~IP_OFFSET;
383
offset &= IP_OFFSET;
384
offset <<= 3; /* offset is in 8-byte chunks */
385
ihl = ip_hdrlen(skb);
386
387
/* Determine the position of this fragment. */
388
end = offset + skb->len - ihl;
389
err = -EINVAL;
390
391
/* Is this the final fragment? */
392
if ((flags & IP_MF) == 0) {
393
/* If we already have some bits beyond end
394
* or have different end, the segment is corrrupted.
395
*/
396
if (end < qp->q.len ||
397
((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
398
goto err;
399
qp->q.last_in |= INET_FRAG_LAST_IN;
400
qp->q.len = end;
401
} else {
402
if (end&7) {
403
end &= ~7;
404
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
405
skb->ip_summed = CHECKSUM_NONE;
406
}
407
if (end > qp->q.len) {
408
/* Some bits beyond end -> corruption. */
409
if (qp->q.last_in & INET_FRAG_LAST_IN)
410
goto err;
411
qp->q.len = end;
412
}
413
}
414
if (end == offset)
415
goto err;
416
417
err = -ENOMEM;
418
if (pskb_pull(skb, ihl) == NULL)
419
goto err;
420
421
err = pskb_trim_rcsum(skb, end - offset);
422
if (err)
423
goto err;
424
425
/* Find out which fragments are in front and at the back of us
426
* in the chain of fragments so far. We must know where to put
427
* this fragment, right?
428
*/
429
prev = qp->q.fragments_tail;
430
if (!prev || FRAG_CB(prev)->offset < offset) {
431
next = NULL;
432
goto found;
433
}
434
prev = NULL;
435
for (next = qp->q.fragments; next != NULL; next = next->next) {
436
if (FRAG_CB(next)->offset >= offset)
437
break; /* bingo! */
438
prev = next;
439
}
440
441
found:
442
/* We found where to put this one. Check for overlap with
443
* preceding fragment, and, if needed, align things so that
444
* any overlaps are eliminated.
445
*/
446
if (prev) {
447
int i = (FRAG_CB(prev)->offset + prev->len) - offset;
448
449
if (i > 0) {
450
offset += i;
451
err = -EINVAL;
452
if (end <= offset)
453
goto err;
454
err = -ENOMEM;
455
if (!pskb_pull(skb, i))
456
goto err;
457
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
458
skb->ip_summed = CHECKSUM_NONE;
459
}
460
}
461
462
err = -ENOMEM;
463
464
while (next && FRAG_CB(next)->offset < end) {
465
int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
466
467
if (i < next->len) {
468
/* Eat head of the next overlapped fragment
469
* and leave the loop. The next ones cannot overlap.
470
*/
471
if (!pskb_pull(next, i))
472
goto err;
473
FRAG_CB(next)->offset += i;
474
qp->q.meat -= i;
475
if (next->ip_summed != CHECKSUM_UNNECESSARY)
476
next->ip_summed = CHECKSUM_NONE;
477
break;
478
} else {
479
struct sk_buff *free_it = next;
480
481
/* Old fragment is completely overridden with
482
* new one drop it.
483
*/
484
next = next->next;
485
486
if (prev)
487
prev->next = next;
488
else
489
qp->q.fragments = next;
490
491
qp->q.meat -= free_it->len;
492
frag_kfree_skb(qp->q.net, free_it);
493
}
494
}
495
496
FRAG_CB(skb)->offset = offset;
497
498
/* Insert this fragment in the chain of fragments. */
499
skb->next = next;
500
if (!next)
501
qp->q.fragments_tail = skb;
502
if (prev)
503
prev->next = skb;
504
else
505
qp->q.fragments = skb;
506
507
dev = skb->dev;
508
if (dev) {
509
qp->iif = dev->ifindex;
510
skb->dev = NULL;
511
}
512
qp->q.stamp = skb->tstamp;
513
qp->q.meat += skb->len;
514
qp->ecn |= ecn;
515
atomic_add(skb->truesize, &qp->q.net->mem);
516
if (offset == 0)
517
qp->q.last_in |= INET_FRAG_FIRST_IN;
518
519
if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
520
qp->q.meat == qp->q.len)
521
return ip_frag_reasm(qp, prev, dev);
522
523
write_lock(&ip4_frags.lock);
524
list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
525
write_unlock(&ip4_frags.lock);
526
return -EINPROGRESS;
527
528
err:
529
kfree_skb(skb);
530
return err;
531
}
532
533
534
/* Build a new IP datagram from all its fragments. */
535
536
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
537
struct net_device *dev)
538
{
539
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
540
struct iphdr *iph;
541
struct sk_buff *fp, *head = qp->q.fragments;
542
int len;
543
int ihlen;
544
int err;
545
u8 ecn;
546
547
ipq_kill(qp);
548
549
ecn = ip4_frag_ecn_table[qp->ecn];
550
if (unlikely(ecn == 0xff)) {
551
err = -EINVAL;
552
goto out_fail;
553
}
554
/* Make the one we just received the head. */
555
if (prev) {
556
head = prev->next;
557
fp = skb_clone(head, GFP_ATOMIC);
558
if (!fp)
559
goto out_nomem;
560
561
fp->next = head->next;
562
if (!fp->next)
563
qp->q.fragments_tail = fp;
564
prev->next = fp;
565
566
skb_morph(head, qp->q.fragments);
567
head->next = qp->q.fragments->next;
568
569
kfree_skb(qp->q.fragments);
570
qp->q.fragments = head;
571
}
572
573
WARN_ON(head == NULL);
574
WARN_ON(FRAG_CB(head)->offset != 0);
575
576
/* Allocate a new buffer for the datagram. */
577
ihlen = ip_hdrlen(head);
578
len = ihlen + qp->q.len;
579
580
err = -E2BIG;
581
if (len > 65535)
582
goto out_oversize;
583
584
/* Head of list must not be cloned. */
585
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
586
goto out_nomem;
587
588
/* If the first fragment is fragmented itself, we split
589
* it to two chunks: the first with data and paged part
590
* and the second, holding only fragments. */
591
if (skb_has_frag_list(head)) {
592
struct sk_buff *clone;
593
int i, plen = 0;
594
595
if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
596
goto out_nomem;
597
clone->next = head->next;
598
head->next = clone;
599
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
600
skb_frag_list_init(head);
601
for (i=0; i<skb_shinfo(head)->nr_frags; i++)
602
plen += skb_shinfo(head)->frags[i].size;
603
clone->len = clone->data_len = head->data_len - plen;
604
head->data_len -= clone->len;
605
head->len -= clone->len;
606
clone->csum = 0;
607
clone->ip_summed = head->ip_summed;
608
atomic_add(clone->truesize, &qp->q.net->mem);
609
}
610
611
skb_shinfo(head)->frag_list = head->next;
612
skb_push(head, head->data - skb_network_header(head));
613
614
for (fp=head->next; fp; fp = fp->next) {
615
head->data_len += fp->len;
616
head->len += fp->len;
617
if (head->ip_summed != fp->ip_summed)
618
head->ip_summed = CHECKSUM_NONE;
619
else if (head->ip_summed == CHECKSUM_COMPLETE)
620
head->csum = csum_add(head->csum, fp->csum);
621
head->truesize += fp->truesize;
622
}
623
atomic_sub(head->truesize, &qp->q.net->mem);
624
625
head->next = NULL;
626
head->dev = dev;
627
head->tstamp = qp->q.stamp;
628
629
iph = ip_hdr(head);
630
iph->frag_off = 0;
631
iph->tot_len = htons(len);
632
iph->tos |= ecn;
633
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
634
qp->q.fragments = NULL;
635
qp->q.fragments_tail = NULL;
636
return 0;
637
638
out_nomem:
639
LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
640
"queue %p\n", qp);
641
err = -ENOMEM;
642
goto out_fail;
643
out_oversize:
644
if (net_ratelimit())
645
printk(KERN_INFO "Oversized IP packet from %pI4.\n",
646
&qp->saddr);
647
out_fail:
648
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
649
return err;
650
}
651
652
/* Process an incoming IP datagram fragment. */
653
int ip_defrag(struct sk_buff *skb, u32 user)
654
{
655
struct ipq *qp;
656
struct net *net;
657
658
net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
659
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
660
661
/* Start by cleaning up the memory. */
662
if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
663
ip_evictor(net);
664
665
/* Lookup (or create) queue header */
666
if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
667
int ret;
668
669
spin_lock(&qp->q.lock);
670
671
ret = ip_frag_queue(qp, skb);
672
673
spin_unlock(&qp->q.lock);
674
ipq_put(qp);
675
return ret;
676
}
677
678
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
679
kfree_skb(skb);
680
return -ENOMEM;
681
}
682
EXPORT_SYMBOL(ip_defrag);
683
684
#ifdef CONFIG_SYSCTL
685
static int zero;
686
687
static struct ctl_table ip4_frags_ns_ctl_table[] = {
688
{
689
.procname = "ipfrag_high_thresh",
690
.data = &init_net.ipv4.frags.high_thresh,
691
.maxlen = sizeof(int),
692
.mode = 0644,
693
.proc_handler = proc_dointvec
694
},
695
{
696
.procname = "ipfrag_low_thresh",
697
.data = &init_net.ipv4.frags.low_thresh,
698
.maxlen = sizeof(int),
699
.mode = 0644,
700
.proc_handler = proc_dointvec
701
},
702
{
703
.procname = "ipfrag_time",
704
.data = &init_net.ipv4.frags.timeout,
705
.maxlen = sizeof(int),
706
.mode = 0644,
707
.proc_handler = proc_dointvec_jiffies,
708
},
709
{ }
710
};
711
712
static struct ctl_table ip4_frags_ctl_table[] = {
713
{
714
.procname = "ipfrag_secret_interval",
715
.data = &ip4_frags.secret_interval,
716
.maxlen = sizeof(int),
717
.mode = 0644,
718
.proc_handler = proc_dointvec_jiffies,
719
},
720
{
721
.procname = "ipfrag_max_dist",
722
.data = &sysctl_ipfrag_max_dist,
723
.maxlen = sizeof(int),
724
.mode = 0644,
725
.proc_handler = proc_dointvec_minmax,
726
.extra1 = &zero
727
},
728
{ }
729
};
730
731
static int __net_init ip4_frags_ns_ctl_register(struct net *net)
732
{
733
struct ctl_table *table;
734
struct ctl_table_header *hdr;
735
736
table = ip4_frags_ns_ctl_table;
737
if (!net_eq(net, &init_net)) {
738
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
739
if (table == NULL)
740
goto err_alloc;
741
742
table[0].data = &net->ipv4.frags.high_thresh;
743
table[1].data = &net->ipv4.frags.low_thresh;
744
table[2].data = &net->ipv4.frags.timeout;
745
}
746
747
hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table);
748
if (hdr == NULL)
749
goto err_reg;
750
751
net->ipv4.frags_hdr = hdr;
752
return 0;
753
754
err_reg:
755
if (!net_eq(net, &init_net))
756
kfree(table);
757
err_alloc:
758
return -ENOMEM;
759
}
760
761
static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
762
{
763
struct ctl_table *table;
764
765
table = net->ipv4.frags_hdr->ctl_table_arg;
766
unregister_net_sysctl_table(net->ipv4.frags_hdr);
767
kfree(table);
768
}
769
770
static void ip4_frags_ctl_register(void)
771
{
772
register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table);
773
}
774
#else
775
static inline int ip4_frags_ns_ctl_register(struct net *net)
776
{
777
return 0;
778
}
779
780
static inline void ip4_frags_ns_ctl_unregister(struct net *net)
781
{
782
}
783
784
static inline void ip4_frags_ctl_register(void)
785
{
786
}
787
#endif
788
789
static int __net_init ipv4_frags_init_net(struct net *net)
790
{
791
/*
792
* Fragment cache limits. We will commit 256K at one time. Should we
793
* cross that limit we will prune down to 192K. This should cope with
794
* even the most extreme cases without allowing an attacker to
795
* measurably harm machine performance.
796
*/
797
net->ipv4.frags.high_thresh = 256 * 1024;
798
net->ipv4.frags.low_thresh = 192 * 1024;
799
/*
800
* Important NOTE! Fragment queue must be destroyed before MSL expires.
801
* RFC791 is wrong proposing to prolongate timer each fragment arrival
802
* by TTL.
803
*/
804
net->ipv4.frags.timeout = IP_FRAG_TIME;
805
806
inet_frags_init_net(&net->ipv4.frags);
807
808
return ip4_frags_ns_ctl_register(net);
809
}
810
811
static void __net_exit ipv4_frags_exit_net(struct net *net)
812
{
813
ip4_frags_ns_ctl_unregister(net);
814
inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
815
}
816
817
static struct pernet_operations ip4_frags_ops = {
818
.init = ipv4_frags_init_net,
819
.exit = ipv4_frags_exit_net,
820
};
821
822
void __init ipfrag_init(void)
823
{
824
ip4_frags_ctl_register();
825
register_pernet_subsys(&ip4_frags_ops);
826
ip4_frags.hashfn = ip4_hashfn;
827
ip4_frags.constructor = ip4_frag_init;
828
ip4_frags.destructor = ip4_frag_free;
829
ip4_frags.skb_free = NULL;
830
ip4_frags.qsize = sizeof(struct ipq);
831
ip4_frags.match = ip4_frag_match;
832
ip4_frags.frag_expire = ip_expire;
833
ip4_frags.secret_interval = 10 * 60 * HZ;
834
inet_frags_init(&ip4_frags);
835
}
836
837