Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ieee802154/6lowpan/reassembly.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* 6LoWPAN fragment reassembly
3
*
4
* Authors:
5
* Alexander Aring <[email protected]>
6
*
7
* Based on: net/ipv6/reassembly.c
8
*/
9
10
#define pr_fmt(fmt) "6LoWPAN: " fmt
11
12
#include <linux/net.h>
13
#include <linux/list.h>
14
#include <linux/netdevice.h>
15
#include <linux/random.h>
16
#include <linux/jhash.h>
17
#include <linux/skbuff.h>
18
#include <linux/slab.h>
19
#include <linux/export.h>
20
21
#include <net/ieee802154_netdev.h>
22
#include <net/6lowpan.h>
23
#include <net/ipv6_frag.h>
24
#include <net/inet_frag.h>
25
#include <net/ip.h>
26
27
#include "6lowpan_i.h"
28
29
static const char lowpan_frags_cache_name[] = "lowpan-frags";
30
31
static struct inet_frags lowpan_frags;
32
33
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
34
struct sk_buff *prev, struct net_device *ldev,
35
int *refs);
36
37
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
38
{
39
const struct frag_lowpan_compare_key *key = a;
40
41
BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
42
memcpy(&q->key, key, sizeof(*key));
43
}
44
45
static void lowpan_frag_expire(struct timer_list *t)
46
{
47
struct inet_frag_queue *frag = timer_container_of(frag, t, timer);
48
struct frag_queue *fq;
49
int refs = 1;
50
51
fq = container_of(frag, struct frag_queue, q);
52
53
spin_lock(&fq->q.lock);
54
55
if (fq->q.flags & INET_FRAG_COMPLETE)
56
goto out;
57
58
inet_frag_kill(&fq->q, &refs);
59
out:
60
spin_unlock(&fq->q.lock);
61
inet_frag_putn(&fq->q, refs);
62
}
63
64
static inline struct lowpan_frag_queue *
65
fq_find(struct net *net, const struct lowpan_802154_cb *cb,
66
const struct ieee802154_addr *src,
67
const struct ieee802154_addr *dst)
68
{
69
struct netns_ieee802154_lowpan *ieee802154_lowpan =
70
net_ieee802154_lowpan(net);
71
struct frag_lowpan_compare_key key = {};
72
struct inet_frag_queue *q;
73
74
key.tag = cb->d_tag;
75
key.d_size = cb->d_size;
76
key.src = *src;
77
key.dst = *dst;
78
79
q = inet_frag_find(ieee802154_lowpan->fqdir, &key);
80
if (!q)
81
return NULL;
82
83
return container_of(q, struct lowpan_frag_queue, q);
84
}
85
86
static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
87
struct sk_buff *skb, u8 frag_type,
88
int *refs)
89
{
90
struct sk_buff *prev_tail;
91
struct net_device *ldev;
92
int end, offset, err;
93
94
/* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
95
* in inet_fragment.c
96
*/
97
BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet_skb_parm));
98
BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(struct inet6_skb_parm));
99
100
if (fq->q.flags & INET_FRAG_COMPLETE)
101
goto err;
102
103
offset = lowpan_802154_cb(skb)->d_offset << 3;
104
end = lowpan_802154_cb(skb)->d_size;
105
106
/* Is this the final fragment? */
107
if (offset + skb->len == end) {
108
/* If we already have some bits beyond end
109
* or have different end, the segment is corrupted.
110
*/
111
if (end < fq->q.len ||
112
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
113
goto err;
114
fq->q.flags |= INET_FRAG_LAST_IN;
115
fq->q.len = end;
116
} else {
117
if (end > fq->q.len) {
118
/* Some bits beyond end -> corruption. */
119
if (fq->q.flags & INET_FRAG_LAST_IN)
120
goto err;
121
fq->q.len = end;
122
}
123
}
124
125
ldev = skb->dev;
126
if (ldev)
127
skb->dev = NULL;
128
barrier();
129
130
prev_tail = fq->q.fragments_tail;
131
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
132
if (err)
133
goto err;
134
135
fq->q.stamp = skb->tstamp;
136
fq->q.tstamp_type = skb->tstamp_type;
137
if (frag_type == LOWPAN_DISPATCH_FRAG1)
138
fq->q.flags |= INET_FRAG_FIRST_IN;
139
140
fq->q.meat += skb->len;
141
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
142
143
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
144
fq->q.meat == fq->q.len) {
145
int res;
146
unsigned long orefdst = skb->_skb_refdst;
147
148
skb->_skb_refdst = 0UL;
149
res = lowpan_frag_reasm(fq, skb, prev_tail, ldev, refs);
150
skb->_skb_refdst = orefdst;
151
return res;
152
}
153
skb_dst_drop(skb);
154
155
return -1;
156
err:
157
kfree_skb(skb);
158
return -1;
159
}
160
161
/* Check if this packet is complete.
162
*
163
* It is called with locked fq, and caller must check that
164
* queue is eligible for reassembly i.e. it is not COMPLETE,
165
* the last and the first frames arrived and all the bits are here.
166
*/
167
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
168
struct sk_buff *prev_tail, struct net_device *ldev,
169
int *refs)
170
{
171
void *reasm_data;
172
173
inet_frag_kill(&fq->q, refs);
174
175
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
176
if (!reasm_data)
177
goto out_oom;
178
inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
179
180
skb->dev = ldev;
181
skb->tstamp = fq->q.stamp;
182
fq->q.rb_fragments = RB_ROOT;
183
fq->q.fragments_tail = NULL;
184
fq->q.last_run_head = NULL;
185
186
return 1;
187
out_oom:
188
net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
189
return -1;
190
}
191
192
static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
193
lowpan_rx_result res)
194
{
195
switch (res) {
196
case RX_QUEUED:
197
return NET_RX_SUCCESS;
198
case RX_CONTINUE:
199
/* nobody cared about this packet */
200
net_warn_ratelimited("%s: received unknown dispatch\n",
201
__func__);
202
203
fallthrough;
204
default:
205
/* all others failure */
206
return NET_RX_DROP;
207
}
208
}
209
210
static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
211
{
212
int ret;
213
214
if (!lowpan_is_iphc(*skb_network_header(skb)))
215
return RX_CONTINUE;
216
217
ret = lowpan_iphc_decompress(skb);
218
if (ret < 0)
219
return RX_DROP;
220
221
return RX_QUEUED;
222
}
223
224
static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
225
{
226
lowpan_rx_result res;
227
228
#define CALL_RXH(rxh) \
229
do { \
230
res = rxh(skb); \
231
if (res != RX_CONTINUE) \
232
goto rxh_next; \
233
} while (0)
234
235
/* likely at first */
236
CALL_RXH(lowpan_frag_rx_h_iphc);
237
CALL_RXH(lowpan_rx_h_ipv6);
238
239
rxh_next:
240
return lowpan_frag_rx_handlers_result(skb, res);
241
#undef CALL_RXH
242
}
243
244
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
245
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
246
247
static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
248
struct lowpan_802154_cb *cb)
249
{
250
bool fail;
251
u8 high = 0, low = 0;
252
__be16 d_tag = 0;
253
254
fail = lowpan_fetch_skb(skb, &high, 1);
255
fail |= lowpan_fetch_skb(skb, &low, 1);
256
/* remove the dispatch value and use first three bits as high value
257
* for the datagram size
258
*/
259
cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
260
LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
261
fail |= lowpan_fetch_skb(skb, &d_tag, 2);
262
cb->d_tag = ntohs(d_tag);
263
264
if (frag_type == LOWPAN_DISPATCH_FRAGN) {
265
fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
266
} else {
267
skb_reset_network_header(skb);
268
cb->d_offset = 0;
269
/* check if datagram_size has ipv6hdr on FRAG1 */
270
fail |= cb->d_size < sizeof(struct ipv6hdr);
271
/* check if we can dereference the dispatch value */
272
fail |= !skb->len;
273
}
274
275
if (unlikely(fail))
276
return -EIO;
277
278
return 0;
279
}
280
281
int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
282
{
283
struct lowpan_frag_queue *fq;
284
struct net *net = dev_net(skb->dev);
285
struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
286
struct ieee802154_hdr hdr = {};
287
int err;
288
289
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
290
goto err;
291
292
err = lowpan_get_cb(skb, frag_type, cb);
293
if (err < 0)
294
goto err;
295
296
if (frag_type == LOWPAN_DISPATCH_FRAG1) {
297
err = lowpan_invoke_frag_rx_handlers(skb);
298
if (err == NET_RX_DROP)
299
goto err;
300
}
301
302
if (cb->d_size > IPV6_MIN_MTU) {
303
net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
304
goto err;
305
}
306
307
rcu_read_lock();
308
fq = fq_find(net, cb, &hdr.source, &hdr.dest);
309
if (fq != NULL) {
310
int ret, refs = 0;
311
312
spin_lock(&fq->q.lock);
313
ret = lowpan_frag_queue(fq, skb, frag_type, &refs);
314
spin_unlock(&fq->q.lock);
315
316
rcu_read_unlock();
317
inet_frag_putn(&fq->q, refs);
318
return ret;
319
}
320
rcu_read_unlock();
321
322
err:
323
kfree_skb(skb);
324
return -1;
325
}
326
327
#ifdef CONFIG_SYSCTL
328
329
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
330
{
331
.procname = "6lowpanfrag_high_thresh",
332
.maxlen = sizeof(unsigned long),
333
.mode = 0644,
334
.proc_handler = proc_doulongvec_minmax,
335
},
336
{
337
.procname = "6lowpanfrag_low_thresh",
338
.maxlen = sizeof(unsigned long),
339
.mode = 0644,
340
.proc_handler = proc_doulongvec_minmax,
341
},
342
{
343
.procname = "6lowpanfrag_time",
344
.maxlen = sizeof(int),
345
.mode = 0644,
346
.proc_handler = proc_dointvec_jiffies,
347
},
348
};
349
350
/* secret interval has been deprecated */
351
static int lowpan_frags_secret_interval_unused;
352
static struct ctl_table lowpan_frags_ctl_table[] = {
353
{
354
.procname = "6lowpanfrag_secret_interval",
355
.data = &lowpan_frags_secret_interval_unused,
356
.maxlen = sizeof(int),
357
.mode = 0644,
358
.proc_handler = proc_dointvec_jiffies,
359
},
360
};
361
362
static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
363
{
364
struct ctl_table *table;
365
struct ctl_table_header *hdr;
366
struct netns_ieee802154_lowpan *ieee802154_lowpan =
367
net_ieee802154_lowpan(net);
368
size_t table_size = ARRAY_SIZE(lowpan_frags_ns_ctl_table);
369
370
table = lowpan_frags_ns_ctl_table;
371
if (!net_eq(net, &init_net)) {
372
table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
373
GFP_KERNEL);
374
if (table == NULL)
375
goto err_alloc;
376
377
/* Don't export sysctls to unprivileged users */
378
if (net->user_ns != &init_user_ns)
379
table_size = 0;
380
}
381
382
table[0].data = &ieee802154_lowpan->fqdir->high_thresh;
383
table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh;
384
table[1].data = &ieee802154_lowpan->fqdir->low_thresh;
385
table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh;
386
table[2].data = &ieee802154_lowpan->fqdir->timeout;
387
388
hdr = register_net_sysctl_sz(net, "net/ieee802154/6lowpan", table,
389
table_size);
390
if (hdr == NULL)
391
goto err_reg;
392
393
ieee802154_lowpan->sysctl.frags_hdr = hdr;
394
return 0;
395
396
err_reg:
397
if (!net_eq(net, &init_net))
398
kfree(table);
399
err_alloc:
400
return -ENOMEM;
401
}
402
403
static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
404
{
405
const struct ctl_table *table;
406
struct netns_ieee802154_lowpan *ieee802154_lowpan =
407
net_ieee802154_lowpan(net);
408
409
table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
410
unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
411
if (!net_eq(net, &init_net))
412
kfree(table);
413
}
414
415
static struct ctl_table_header *lowpan_ctl_header;
416
417
static int __init lowpan_frags_sysctl_register(void)
418
{
419
lowpan_ctl_header = register_net_sysctl(&init_net,
420
"net/ieee802154/6lowpan",
421
lowpan_frags_ctl_table);
422
return lowpan_ctl_header == NULL ? -ENOMEM : 0;
423
}
424
425
static void lowpan_frags_sysctl_unregister(void)
426
{
427
unregister_net_sysctl_table(lowpan_ctl_header);
428
}
429
#else
430
static inline int lowpan_frags_ns_sysctl_register(struct net *net)
431
{
432
return 0;
433
}
434
435
static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
436
{
437
}
438
439
static inline int __init lowpan_frags_sysctl_register(void)
440
{
441
return 0;
442
}
443
444
static inline void lowpan_frags_sysctl_unregister(void)
445
{
446
}
447
#endif
448
449
static int __net_init lowpan_frags_init_net(struct net *net)
450
{
451
struct netns_ieee802154_lowpan *ieee802154_lowpan =
452
net_ieee802154_lowpan(net);
453
int res;
454
455
456
res = fqdir_init(&ieee802154_lowpan->fqdir, &lowpan_frags, net);
457
if (res < 0)
458
return res;
459
460
ieee802154_lowpan->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH;
461
ieee802154_lowpan->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH;
462
ieee802154_lowpan->fqdir->timeout = IPV6_FRAG_TIMEOUT;
463
464
res = lowpan_frags_ns_sysctl_register(net);
465
if (res < 0)
466
fqdir_exit(ieee802154_lowpan->fqdir);
467
return res;
468
}
469
470
static void __net_exit lowpan_frags_pre_exit_net(struct net *net)
471
{
472
struct netns_ieee802154_lowpan *ieee802154_lowpan =
473
net_ieee802154_lowpan(net);
474
475
fqdir_pre_exit(ieee802154_lowpan->fqdir);
476
}
477
478
static void __net_exit lowpan_frags_exit_net(struct net *net)
479
{
480
struct netns_ieee802154_lowpan *ieee802154_lowpan =
481
net_ieee802154_lowpan(net);
482
483
lowpan_frags_ns_sysctl_unregister(net);
484
fqdir_exit(ieee802154_lowpan->fqdir);
485
}
486
487
static struct pernet_operations lowpan_frags_ops = {
488
.init = lowpan_frags_init_net,
489
.pre_exit = lowpan_frags_pre_exit_net,
490
.exit = lowpan_frags_exit_net,
491
};
492
493
static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
494
{
495
return jhash2(data,
496
sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
497
}
498
499
static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
500
{
501
const struct inet_frag_queue *fq = data;
502
503
return jhash2((const u32 *)&fq->key,
504
sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
505
}
506
507
static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
508
{
509
const struct frag_lowpan_compare_key *key = arg->key;
510
const struct inet_frag_queue *fq = ptr;
511
512
return !!memcmp(&fq->key, key, sizeof(*key));
513
}
514
515
static const struct rhashtable_params lowpan_rhash_params = {
516
.head_offset = offsetof(struct inet_frag_queue, node),
517
.hashfn = lowpan_key_hashfn,
518
.obj_hashfn = lowpan_obj_hashfn,
519
.obj_cmpfn = lowpan_obj_cmpfn,
520
.automatic_shrinking = true,
521
};
522
523
int __init lowpan_net_frag_init(void)
524
{
525
int ret;
526
527
lowpan_frags.constructor = lowpan_frag_init;
528
lowpan_frags.destructor = NULL;
529
lowpan_frags.qsize = sizeof(struct frag_queue);
530
lowpan_frags.frag_expire = lowpan_frag_expire;
531
lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
532
lowpan_frags.rhash_params = lowpan_rhash_params;
533
ret = inet_frags_init(&lowpan_frags);
534
if (ret)
535
goto out;
536
537
ret = lowpan_frags_sysctl_register();
538
if (ret)
539
goto err_sysctl;
540
541
ret = register_pernet_subsys(&lowpan_frags_ops);
542
if (ret)
543
goto err_pernet;
544
out:
545
return ret;
546
err_pernet:
547
lowpan_frags_sysctl_unregister();
548
err_sysctl:
549
inet_frags_fini(&lowpan_frags);
550
return ret;
551
}
552
553
void lowpan_net_frag_exit(void)
554
{
555
lowpan_frags_sysctl_unregister();
556
unregister_pernet_subsys(&lowpan_frags_ops);
557
inet_frags_fini(&lowpan_frags);
558
}
559
560