Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/dccp/ipv6.c
15109 views
1
/*
2
* DCCP over IPv6
3
* Linux INET6 implementation
4
*
5
* Based on net/dccp6/ipv6.c
6
*
7
* Arnaldo Carvalho de Melo <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or
10
* modify it under the terms of the GNU General Public License
11
* as published by the Free Software Foundation; either version
12
* 2 of the License, or (at your option) any later version.
13
*/
14
15
#include <linux/module.h>
16
#include <linux/random.h>
17
#include <linux/slab.h>
18
#include <linux/xfrm.h>
19
20
#include <net/addrconf.h>
21
#include <net/inet_common.h>
22
#include <net/inet_hashtables.h>
23
#include <net/inet_sock.h>
24
#include <net/inet6_connection_sock.h>
25
#include <net/inet6_hashtables.h>
26
#include <net/ip6_route.h>
27
#include <net/ipv6.h>
28
#include <net/protocol.h>
29
#include <net/transp_v6.h>
30
#include <net/ip6_checksum.h>
31
#include <net/xfrm.h>
32
33
#include "dccp.h"
34
#include "ipv6.h"
35
#include "feat.h"
36
37
/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
38
39
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
40
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
41
42
static void dccp_v6_hash(struct sock *sk)
43
{
44
if (sk->sk_state != DCCP_CLOSED) {
45
if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
46
inet_hash(sk);
47
return;
48
}
49
local_bh_disable();
50
__inet6_hash(sk, NULL);
51
local_bh_enable();
52
}
53
}
54
55
/* add pseudo-header to DCCP checksum stored in skb->csum */
56
static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
57
const struct in6_addr *saddr,
58
const struct in6_addr *daddr)
59
{
60
return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61
}
62
63
static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
64
{
65
struct ipv6_pinfo *np = inet6_sk(sk);
66
struct dccp_hdr *dh = dccp_hdr(skb);
67
68
dccp_csum_outgoing(skb);
69
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
70
}
71
72
static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
73
__be16 sport, __be16 dport )
74
{
75
return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
76
}
77
78
static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
79
{
80
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
81
ipv6_hdr(skb)->saddr.s6_addr32,
82
dccp_hdr(skb)->dccph_dport,
83
dccp_hdr(skb)->dccph_sport );
84
85
}
86
87
static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88
u8 type, u8 code, int offset, __be32 info)
89
{
90
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
91
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92
struct dccp_sock *dp;
93
struct ipv6_pinfo *np;
94
struct sock *sk;
95
int err;
96
__u64 seq;
97
struct net *net = dev_net(skb->dev);
98
99
if (skb->len < offset + sizeof(*dh) ||
100
skb->len < offset + __dccp_basic_hdr_len(dh)) {
101
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
102
ICMP6_MIB_INERRORS);
103
return;
104
}
105
106
sk = inet6_lookup(net, &dccp_hashinfo,
107
&hdr->daddr, dh->dccph_dport,
108
&hdr->saddr, dh->dccph_sport, inet6_iif(skb));
109
110
if (sk == NULL) {
111
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
112
ICMP6_MIB_INERRORS);
113
return;
114
}
115
116
if (sk->sk_state == DCCP_TIME_WAIT) {
117
inet_twsk_put(inet_twsk(sk));
118
return;
119
}
120
121
bh_lock_sock(sk);
122
if (sock_owned_by_user(sk))
123
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
124
125
if (sk->sk_state == DCCP_CLOSED)
126
goto out;
127
128
dp = dccp_sk(sk);
129
seq = dccp_hdr_seq(dh);
130
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
131
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
132
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
133
goto out;
134
}
135
136
np = inet6_sk(sk);
137
138
if (type == ICMPV6_PKT_TOOBIG) {
139
struct dst_entry *dst = NULL;
140
141
if (sock_owned_by_user(sk))
142
goto out;
143
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
144
goto out;
145
146
/* icmp should have updated the destination cache entry */
147
dst = __sk_dst_check(sk, np->dst_cookie);
148
if (dst == NULL) {
149
struct inet_sock *inet = inet_sk(sk);
150
struct flowi6 fl6;
151
152
/* BUGGG_FUTURE: Again, it is not clear how
153
to handle rthdr case. Ignore this complexity
154
for now.
155
*/
156
memset(&fl6, 0, sizeof(fl6));
157
fl6.flowi6_proto = IPPROTO_DCCP;
158
ipv6_addr_copy(&fl6.daddr, &np->daddr);
159
ipv6_addr_copy(&fl6.saddr, &np->saddr);
160
fl6.flowi6_oif = sk->sk_bound_dev_if;
161
fl6.fl6_dport = inet->inet_dport;
162
fl6.fl6_sport = inet->inet_sport;
163
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
164
165
dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
166
if (IS_ERR(dst)) {
167
sk->sk_err_soft = -PTR_ERR(dst);
168
goto out;
169
}
170
} else
171
dst_hold(dst);
172
173
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
174
dccp_sync_mss(sk, dst_mtu(dst));
175
} /* else let the usual retransmit timer handle it */
176
dst_release(dst);
177
goto out;
178
}
179
180
icmpv6_err_convert(type, code, &err);
181
182
/* Might be for an request_sock */
183
switch (sk->sk_state) {
184
struct request_sock *req, **prev;
185
case DCCP_LISTEN:
186
if (sock_owned_by_user(sk))
187
goto out;
188
189
req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
190
&hdr->daddr, &hdr->saddr,
191
inet6_iif(skb));
192
if (req == NULL)
193
goto out;
194
195
/*
196
* ICMPs are not backlogged, hence we cannot get an established
197
* socket here.
198
*/
199
WARN_ON(req->sk != NULL);
200
201
if (seq != dccp_rsk(req)->dreq_iss) {
202
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
203
goto out;
204
}
205
206
inet_csk_reqsk_queue_drop(sk, req, prev);
207
goto out;
208
209
case DCCP_REQUESTING:
210
case DCCP_RESPOND: /* Cannot happen.
211
It can, it SYNs are crossed. --ANK */
212
if (!sock_owned_by_user(sk)) {
213
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
214
sk->sk_err = err;
215
/*
216
* Wake people up to see the error
217
* (see connect in sock.c)
218
*/
219
sk->sk_error_report(sk);
220
dccp_done(sk);
221
} else
222
sk->sk_err_soft = err;
223
goto out;
224
}
225
226
if (!sock_owned_by_user(sk) && np->recverr) {
227
sk->sk_err = err;
228
sk->sk_error_report(sk);
229
} else
230
sk->sk_err_soft = err;
231
232
out:
233
bh_unlock_sock(sk);
234
sock_put(sk);
235
}
236
237
238
static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
239
struct request_values *rv_unused)
240
{
241
struct inet6_request_sock *ireq6 = inet6_rsk(req);
242
struct ipv6_pinfo *np = inet6_sk(sk);
243
struct sk_buff *skb;
244
struct ipv6_txoptions *opt = NULL;
245
struct in6_addr *final_p, final;
246
struct flowi6 fl6;
247
int err = -1;
248
struct dst_entry *dst;
249
250
memset(&fl6, 0, sizeof(fl6));
251
fl6.flowi6_proto = IPPROTO_DCCP;
252
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
253
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
254
fl6.flowlabel = 0;
255
fl6.flowi6_oif = ireq6->iif;
256
fl6.fl6_dport = inet_rsk(req)->rmt_port;
257
fl6.fl6_sport = inet_rsk(req)->loc_port;
258
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
259
260
opt = np->opt;
261
262
final_p = fl6_update_dst(&fl6, opt, &final);
263
264
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
265
if (IS_ERR(dst)) {
266
err = PTR_ERR(dst);
267
dst = NULL;
268
goto done;
269
}
270
271
skb = dccp_make_response(sk, dst, req);
272
if (skb != NULL) {
273
struct dccp_hdr *dh = dccp_hdr(skb);
274
275
dh->dccph_checksum = dccp_v6_csum_finish(skb,
276
&ireq6->loc_addr,
277
&ireq6->rmt_addr);
278
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
279
err = ip6_xmit(sk, skb, &fl6, opt);
280
err = net_xmit_eval(err);
281
}
282
283
done:
284
if (opt != NULL && opt != np->opt)
285
sock_kfree_s(sk, opt, opt->tot_len);
286
dst_release(dst);
287
return err;
288
}
289
290
static void dccp_v6_reqsk_destructor(struct request_sock *req)
291
{
292
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
293
if (inet6_rsk(req)->pktopts != NULL)
294
kfree_skb(inet6_rsk(req)->pktopts);
295
}
296
297
static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298
{
299
const struct ipv6hdr *rxip6h;
300
struct sk_buff *skb;
301
struct flowi6 fl6;
302
struct net *net = dev_net(skb_dst(rxskb)->dev);
303
struct sock *ctl_sk = net->dccp.v6_ctl_sk;
304
struct dst_entry *dst;
305
306
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
307
return;
308
309
if (!ipv6_unicast_destination(rxskb))
310
return;
311
312
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
313
if (skb == NULL)
314
return;
315
316
rxip6h = ipv6_hdr(rxskb);
317
dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
318
&rxip6h->daddr);
319
320
memset(&fl6, 0, sizeof(fl6));
321
ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
322
ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
323
324
fl6.flowi6_proto = IPPROTO_DCCP;
325
fl6.flowi6_oif = inet6_iif(rxskb);
326
fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
327
fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
328
security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
329
330
/* sk = NULL, but it is safe for now. RST socket required. */
331
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
332
if (!IS_ERR(dst)) {
333
skb_dst_set(skb, dst);
334
ip6_xmit(ctl_sk, skb, &fl6, NULL);
335
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
336
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
337
return;
338
}
339
340
kfree_skb(skb);
341
}
342
343
static struct request_sock_ops dccp6_request_sock_ops = {
344
.family = AF_INET6,
345
.obj_size = sizeof(struct dccp6_request_sock),
346
.rtx_syn_ack = dccp_v6_send_response,
347
.send_ack = dccp_reqsk_send_ack,
348
.destructor = dccp_v6_reqsk_destructor,
349
.send_reset = dccp_v6_ctl_send_reset,
350
};
351
352
static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
353
{
354
const struct dccp_hdr *dh = dccp_hdr(skb);
355
const struct ipv6hdr *iph = ipv6_hdr(skb);
356
struct sock *nsk;
357
struct request_sock **prev;
358
/* Find possible connection requests. */
359
struct request_sock *req = inet6_csk_search_req(sk, &prev,
360
dh->dccph_sport,
361
&iph->saddr,
362
&iph->daddr,
363
inet6_iif(skb));
364
if (req != NULL)
365
return dccp_check_req(sk, skb, req, prev);
366
367
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
368
&iph->saddr, dh->dccph_sport,
369
&iph->daddr, ntohs(dh->dccph_dport),
370
inet6_iif(skb));
371
if (nsk != NULL) {
372
if (nsk->sk_state != DCCP_TIME_WAIT) {
373
bh_lock_sock(nsk);
374
return nsk;
375
}
376
inet_twsk_put(inet_twsk(nsk));
377
return NULL;
378
}
379
380
return sk;
381
}
382
383
static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
384
{
385
struct request_sock *req;
386
struct dccp_request_sock *dreq;
387
struct inet6_request_sock *ireq6;
388
struct ipv6_pinfo *np = inet6_sk(sk);
389
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
390
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
391
392
if (skb->protocol == htons(ETH_P_IP))
393
return dccp_v4_conn_request(sk, skb);
394
395
if (!ipv6_unicast_destination(skb))
396
return 0; /* discard, don't send a reset here */
397
398
if (dccp_bad_service_code(sk, service)) {
399
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
400
goto drop;
401
}
402
/*
403
* There are no SYN attacks on IPv6, yet...
404
*/
405
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
406
if (inet_csk_reqsk_queue_is_full(sk))
407
goto drop;
408
409
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
410
goto drop;
411
412
req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
413
if (req == NULL)
414
goto drop;
415
416
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
417
goto drop_and_free;
418
419
dreq = dccp_rsk(req);
420
if (dccp_parse_options(sk, dreq, skb))
421
goto drop_and_free;
422
423
if (security_inet_conn_request(sk, skb, req))
424
goto drop_and_free;
425
426
ireq6 = inet6_rsk(req);
427
ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
428
ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
429
430
if (ipv6_opt_accepted(sk, skb) ||
431
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
432
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
433
atomic_inc(&skb->users);
434
ireq6->pktopts = skb;
435
}
436
ireq6->iif = sk->sk_bound_dev_if;
437
438
/* So that link locals have meaning */
439
if (!sk->sk_bound_dev_if &&
440
ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
441
ireq6->iif = inet6_iif(skb);
442
443
/*
444
* Step 3: Process LISTEN state
445
*
446
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
447
*
448
* In fact we defer setting S.GSR, S.SWL, S.SWH to
449
* dccp_create_openreq_child.
450
*/
451
dreq->dreq_isr = dcb->dccpd_seq;
452
dreq->dreq_iss = dccp_v6_init_sequence(skb);
453
dreq->dreq_service = service;
454
455
if (dccp_v6_send_response(sk, req, NULL))
456
goto drop_and_free;
457
458
inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
459
return 0;
460
461
drop_and_free:
462
reqsk_free(req);
463
drop:
464
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
465
return -1;
466
}
467
468
static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
469
struct sk_buff *skb,
470
struct request_sock *req,
471
struct dst_entry *dst)
472
{
473
struct inet6_request_sock *ireq6 = inet6_rsk(req);
474
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
475
struct inet_sock *newinet;
476
struct dccp6_sock *newdp6;
477
struct sock *newsk;
478
struct ipv6_txoptions *opt;
479
480
if (skb->protocol == htons(ETH_P_IP)) {
481
/*
482
* v6 mapped
483
*/
484
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
485
if (newsk == NULL)
486
return NULL;
487
488
newdp6 = (struct dccp6_sock *)newsk;
489
newinet = inet_sk(newsk);
490
newinet->pinet6 = &newdp6->inet6;
491
newnp = inet6_sk(newsk);
492
493
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
494
495
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
496
497
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
498
499
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
500
501
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
502
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
503
newnp->pktoptions = NULL;
504
newnp->opt = NULL;
505
newnp->mcast_oif = inet6_iif(skb);
506
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
507
508
/*
509
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
510
* here, dccp_create_openreq_child now does this for us, see the comment in
511
* that function for the gory details. -acme
512
*/
513
514
/* It is tricky place. Until this moment IPv4 tcp
515
worked with IPv6 icsk.icsk_af_ops.
516
Sync it now.
517
*/
518
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
519
520
return newsk;
521
}
522
523
opt = np->opt;
524
525
if (sk_acceptq_is_full(sk))
526
goto out_overflow;
527
528
if (dst == NULL) {
529
struct in6_addr *final_p, final;
530
struct flowi6 fl6;
531
532
memset(&fl6, 0, sizeof(fl6));
533
fl6.flowi6_proto = IPPROTO_DCCP;
534
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
535
final_p = fl6_update_dst(&fl6, opt, &final);
536
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
537
fl6.flowi6_oif = sk->sk_bound_dev_if;
538
fl6.fl6_dport = inet_rsk(req)->rmt_port;
539
fl6.fl6_sport = inet_rsk(req)->loc_port;
540
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
541
542
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
543
if (IS_ERR(dst))
544
goto out;
545
}
546
547
newsk = dccp_create_openreq_child(sk, req, skb);
548
if (newsk == NULL)
549
goto out_nonewsk;
550
551
/*
552
* No need to charge this sock to the relevant IPv6 refcnt debug socks
553
* count here, dccp_create_openreq_child now does this for us, see the
554
* comment in that function for the gory details. -acme
555
*/
556
557
__ip6_dst_store(newsk, dst, NULL, NULL);
558
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
559
NETIF_F_TSO);
560
newdp6 = (struct dccp6_sock *)newsk;
561
newinet = inet_sk(newsk);
562
newinet->pinet6 = &newdp6->inet6;
563
newnp = inet6_sk(newsk);
564
565
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
566
567
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
568
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
569
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
570
newsk->sk_bound_dev_if = ireq6->iif;
571
572
/* Now IPv6 options...
573
574
First: no IPv4 options.
575
*/
576
newinet->inet_opt = NULL;
577
578
/* Clone RX bits */
579
newnp->rxopt.all = np->rxopt.all;
580
581
/* Clone pktoptions received with SYN */
582
newnp->pktoptions = NULL;
583
if (ireq6->pktopts != NULL) {
584
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
585
kfree_skb(ireq6->pktopts);
586
ireq6->pktopts = NULL;
587
if (newnp->pktoptions)
588
skb_set_owner_r(newnp->pktoptions, newsk);
589
}
590
newnp->opt = NULL;
591
newnp->mcast_oif = inet6_iif(skb);
592
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
593
594
/*
595
* Clone native IPv6 options from listening socket (if any)
596
*
597
* Yes, keeping reference count would be much more clever, but we make
598
* one more one thing there: reattach optmem to newsk.
599
*/
600
if (opt != NULL) {
601
newnp->opt = ipv6_dup_options(newsk, opt);
602
if (opt != np->opt)
603
sock_kfree_s(sk, opt, opt->tot_len);
604
}
605
606
inet_csk(newsk)->icsk_ext_hdr_len = 0;
607
if (newnp->opt != NULL)
608
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
609
newnp->opt->opt_flen);
610
611
dccp_sync_mss(newsk, dst_mtu(dst));
612
613
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
614
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
615
616
if (__inet_inherit_port(sk, newsk) < 0) {
617
sock_put(newsk);
618
goto out;
619
}
620
__inet6_hash(newsk, NULL);
621
622
return newsk;
623
624
out_overflow:
625
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
626
out_nonewsk:
627
dst_release(dst);
628
out:
629
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
630
if (opt != NULL && opt != np->opt)
631
sock_kfree_s(sk, opt, opt->tot_len);
632
return NULL;
633
}
634
635
/* The socket must have it's spinlock held when we get
636
* here.
637
*
638
* We have a potential double-lock case here, so even when
639
* doing backlog processing we use the BH locking scheme.
640
* This is because we cannot sleep with the original spinlock
641
* held.
642
*/
643
static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
644
{
645
struct ipv6_pinfo *np = inet6_sk(sk);
646
struct sk_buff *opt_skb = NULL;
647
648
/* Imagine: socket is IPv6. IPv4 packet arrives,
649
goes to IPv4 receive handler and backlogged.
650
From backlog it always goes here. Kerboom...
651
Fortunately, dccp_rcv_established and rcv_established
652
handle them correctly, but it is not case with
653
dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
654
*/
655
656
if (skb->protocol == htons(ETH_P_IP))
657
return dccp_v4_do_rcv(sk, skb);
658
659
if (sk_filter(sk, skb))
660
goto discard;
661
662
/*
663
* socket locking is here for SMP purposes as backlog rcv is currently
664
* called with bh processing disabled.
665
*/
666
667
/* Do Stevens' IPV6_PKTOPTIONS.
668
669
Yes, guys, it is the only place in our code, where we
670
may make it not affecting IPv4.
671
The rest of code is protocol independent,
672
and I do not like idea to uglify IPv4.
673
674
Actually, all the idea behind IPV6_PKTOPTIONS
675
looks not very well thought. For now we latch
676
options, received in the last packet, enqueued
677
by tcp. Feel free to propose better solution.
678
--ANK (980728)
679
*/
680
if (np->rxopt.all)
681
/*
682
* FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
683
* (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
684
*/
685
opt_skb = skb_clone(skb, GFP_ATOMIC);
686
687
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
688
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
689
goto reset;
690
if (opt_skb) {
691
/* XXX This is where we would goto ipv6_pktoptions. */
692
__kfree_skb(opt_skb);
693
}
694
return 0;
695
}
696
697
/*
698
* Step 3: Process LISTEN state
699
* If S.state == LISTEN,
700
* If P.type == Request or P contains a valid Init Cookie option,
701
* (* Must scan the packet's options to check for Init
702
* Cookies. Only Init Cookies are processed here,
703
* however; other options are processed in Step 8. This
704
* scan need only be performed if the endpoint uses Init
705
* Cookies *)
706
* (* Generate a new socket and switch to that socket *)
707
* Set S := new socket for this port pair
708
* S.state = RESPOND
709
* Choose S.ISS (initial seqno) or set from Init Cookies
710
* Initialize S.GAR := S.ISS
711
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
712
* Continue with S.state == RESPOND
713
* (* A Response packet will be generated in Step 11 *)
714
* Otherwise,
715
* Generate Reset(No Connection) unless P.type == Reset
716
* Drop packet and return
717
*
718
* NOTE: the check for the packet types is done in
719
* dccp_rcv_state_process
720
*/
721
if (sk->sk_state == DCCP_LISTEN) {
722
struct sock *nsk = dccp_v6_hnd_req(sk, skb);
723
724
if (nsk == NULL)
725
goto discard;
726
/*
727
* Queue it on the new socket if the new socket is active,
728
* otherwise we just shortcircuit this and continue with
729
* the new socket..
730
*/
731
if (nsk != sk) {
732
if (dccp_child_process(sk, nsk, skb))
733
goto reset;
734
if (opt_skb != NULL)
735
__kfree_skb(opt_skb);
736
return 0;
737
}
738
}
739
740
if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
741
goto reset;
742
if (opt_skb) {
743
/* XXX This is where we would goto ipv6_pktoptions. */
744
__kfree_skb(opt_skb);
745
}
746
return 0;
747
748
reset:
749
dccp_v6_ctl_send_reset(sk, skb);
750
discard:
751
if (opt_skb != NULL)
752
__kfree_skb(opt_skb);
753
kfree_skb(skb);
754
return 0;
755
}
756
757
static int dccp_v6_rcv(struct sk_buff *skb)
758
{
759
const struct dccp_hdr *dh;
760
struct sock *sk;
761
int min_cov;
762
763
/* Step 1: Check header basics */
764
765
if (dccp_invalid_packet(skb))
766
goto discard_it;
767
768
/* Step 1: If header checksum is incorrect, drop packet and return. */
769
if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
770
&ipv6_hdr(skb)->daddr)) {
771
DCCP_WARN("dropped packet with invalid checksum\n");
772
goto discard_it;
773
}
774
775
dh = dccp_hdr(skb);
776
777
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
778
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
779
780
if (dccp_packet_without_ack(skb))
781
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
782
else
783
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
784
785
/* Step 2:
786
* Look up flow ID in table and get corresponding socket */
787
sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
788
dh->dccph_sport, dh->dccph_dport);
789
/*
790
* Step 2:
791
* If no socket ...
792
*/
793
if (sk == NULL) {
794
dccp_pr_debug("failed to look up flow ID in table and "
795
"get corresponding socket\n");
796
goto no_dccp_socket;
797
}
798
799
/*
800
* Step 2:
801
* ... or S.state == TIMEWAIT,
802
* Generate Reset(No Connection) unless P.type == Reset
803
* Drop packet and return
804
*/
805
if (sk->sk_state == DCCP_TIME_WAIT) {
806
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
807
inet_twsk_put(inet_twsk(sk));
808
goto no_dccp_socket;
809
}
810
811
/*
812
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
813
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
814
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
815
*/
816
min_cov = dccp_sk(sk)->dccps_pcrlen;
817
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
818
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
819
dh->dccph_cscov, min_cov);
820
/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
821
goto discard_and_relse;
822
}
823
824
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
825
goto discard_and_relse;
826
827
return sk_receive_skb(sk, skb, 1) ? -1 : 0;
828
829
no_dccp_socket:
830
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
831
goto discard_it;
832
/*
833
* Step 2:
834
* If no socket ...
835
* Generate Reset(No Connection) unless P.type == Reset
836
* Drop packet and return
837
*/
838
if (dh->dccph_type != DCCP_PKT_RESET) {
839
DCCP_SKB_CB(skb)->dccpd_reset_code =
840
DCCP_RESET_CODE_NO_CONNECTION;
841
dccp_v6_ctl_send_reset(sk, skb);
842
}
843
844
discard_it:
845
kfree_skb(skb);
846
return 0;
847
848
discard_and_relse:
849
sock_put(sk);
850
goto discard_it;
851
}
852
853
static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
854
int addr_len)
855
{
856
struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
857
struct inet_connection_sock *icsk = inet_csk(sk);
858
struct inet_sock *inet = inet_sk(sk);
859
struct ipv6_pinfo *np = inet6_sk(sk);
860
struct dccp_sock *dp = dccp_sk(sk);
861
struct in6_addr *saddr = NULL, *final_p, final;
862
struct flowi6 fl6;
863
struct dst_entry *dst;
864
int addr_type;
865
int err;
866
867
dp->dccps_role = DCCP_ROLE_CLIENT;
868
869
if (addr_len < SIN6_LEN_RFC2133)
870
return -EINVAL;
871
872
if (usin->sin6_family != AF_INET6)
873
return -EAFNOSUPPORT;
874
875
memset(&fl6, 0, sizeof(fl6));
876
877
if (np->sndflow) {
878
fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
879
IP6_ECN_flow_init(fl6.flowlabel);
880
if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
881
struct ip6_flowlabel *flowlabel;
882
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
883
if (flowlabel == NULL)
884
return -EINVAL;
885
ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
886
fl6_sock_release(flowlabel);
887
}
888
}
889
/*
890
* connect() to INADDR_ANY means loopback (BSD'ism).
891
*/
892
if (ipv6_addr_any(&usin->sin6_addr))
893
usin->sin6_addr.s6_addr[15] = 1;
894
895
addr_type = ipv6_addr_type(&usin->sin6_addr);
896
897
if (addr_type & IPV6_ADDR_MULTICAST)
898
return -ENETUNREACH;
899
900
if (addr_type & IPV6_ADDR_LINKLOCAL) {
901
if (addr_len >= sizeof(struct sockaddr_in6) &&
902
usin->sin6_scope_id) {
903
/* If interface is set while binding, indices
904
* must coincide.
905
*/
906
if (sk->sk_bound_dev_if &&
907
sk->sk_bound_dev_if != usin->sin6_scope_id)
908
return -EINVAL;
909
910
sk->sk_bound_dev_if = usin->sin6_scope_id;
911
}
912
913
/* Connect to link-local address requires an interface */
914
if (!sk->sk_bound_dev_if)
915
return -EINVAL;
916
}
917
918
ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
919
np->flow_label = fl6.flowlabel;
920
921
/*
922
* DCCP over IPv4
923
*/
924
if (addr_type == IPV6_ADDR_MAPPED) {
925
u32 exthdrlen = icsk->icsk_ext_hdr_len;
926
struct sockaddr_in sin;
927
928
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
929
930
if (__ipv6_only_sock(sk))
931
return -ENETUNREACH;
932
933
sin.sin_family = AF_INET;
934
sin.sin_port = usin->sin6_port;
935
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
936
937
icsk->icsk_af_ops = &dccp_ipv6_mapped;
938
sk->sk_backlog_rcv = dccp_v4_do_rcv;
939
940
err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
941
if (err) {
942
icsk->icsk_ext_hdr_len = exthdrlen;
943
icsk->icsk_af_ops = &dccp_ipv6_af_ops;
944
sk->sk_backlog_rcv = dccp_v6_do_rcv;
945
goto failure;
946
}
947
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
948
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
949
950
return err;
951
}
952
953
if (!ipv6_addr_any(&np->rcv_saddr))
954
saddr = &np->rcv_saddr;
955
956
fl6.flowi6_proto = IPPROTO_DCCP;
957
ipv6_addr_copy(&fl6.daddr, &np->daddr);
958
ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
959
fl6.flowi6_oif = sk->sk_bound_dev_if;
960
fl6.fl6_dport = usin->sin6_port;
961
fl6.fl6_sport = inet->inet_sport;
962
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
963
964
final_p = fl6_update_dst(&fl6, np->opt, &final);
965
966
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
967
if (IS_ERR(dst)) {
968
err = PTR_ERR(dst);
969
goto failure;
970
}
971
972
if (saddr == NULL) {
973
saddr = &fl6.saddr;
974
ipv6_addr_copy(&np->rcv_saddr, saddr);
975
}
976
977
/* set the source address */
978
ipv6_addr_copy(&np->saddr, saddr);
979
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
980
981
__ip6_dst_store(sk, dst, NULL, NULL);
982
983
icsk->icsk_ext_hdr_len = 0;
984
if (np->opt != NULL)
985
icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
986
np->opt->opt_nflen);
987
988
inet->inet_dport = usin->sin6_port;
989
990
dccp_set_state(sk, DCCP_REQUESTING);
991
err = inet6_hash_connect(&dccp_death_row, sk);
992
if (err)
993
goto late_failure;
994
995
dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
996
np->daddr.s6_addr32,
997
inet->inet_sport,
998
inet->inet_dport);
999
err = dccp_connect(sk);
1000
if (err)
1001
goto late_failure;
1002
1003
return 0;
1004
1005
late_failure:
1006
dccp_set_state(sk, DCCP_CLOSED);
1007
__sk_dst_reset(sk);
1008
failure:
1009
inet->inet_dport = 0;
1010
sk->sk_route_caps = 0;
1011
return err;
1012
}
1013
1014
static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1015
.queue_xmit = inet6_csk_xmit,
1016
.send_check = dccp_v6_send_check,
1017
.rebuild_header = inet6_sk_rebuild_header,
1018
.conn_request = dccp_v6_conn_request,
1019
.syn_recv_sock = dccp_v6_request_recv_sock,
1020
.net_header_len = sizeof(struct ipv6hdr),
1021
.setsockopt = ipv6_setsockopt,
1022
.getsockopt = ipv6_getsockopt,
1023
.addr2sockaddr = inet6_csk_addr2sockaddr,
1024
.sockaddr_len = sizeof(struct sockaddr_in6),
1025
.bind_conflict = inet6_csk_bind_conflict,
1026
#ifdef CONFIG_COMPAT
1027
.compat_setsockopt = compat_ipv6_setsockopt,
1028
.compat_getsockopt = compat_ipv6_getsockopt,
1029
#endif
1030
};
1031
1032
/*
1033
* DCCP over IPv4 via INET6 API
1034
*/
1035
static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1036
.queue_xmit = ip_queue_xmit,
1037
.send_check = dccp_v4_send_check,
1038
.rebuild_header = inet_sk_rebuild_header,
1039
.conn_request = dccp_v6_conn_request,
1040
.syn_recv_sock = dccp_v6_request_recv_sock,
1041
.net_header_len = sizeof(struct iphdr),
1042
.setsockopt = ipv6_setsockopt,
1043
.getsockopt = ipv6_getsockopt,
1044
.addr2sockaddr = inet6_csk_addr2sockaddr,
1045
.sockaddr_len = sizeof(struct sockaddr_in6),
1046
#ifdef CONFIG_COMPAT
1047
.compat_setsockopt = compat_ipv6_setsockopt,
1048
.compat_getsockopt = compat_ipv6_getsockopt,
1049
#endif
1050
};
1051
1052
/* NOTE: A lot of things set to zero explicitly by call to
1053
* sk_alloc() so need not be done here.
1054
*/
1055
static int dccp_v6_init_sock(struct sock *sk)
1056
{
1057
static __u8 dccp_v6_ctl_sock_initialized;
1058
int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1059
1060
if (err == 0) {
1061
if (unlikely(!dccp_v6_ctl_sock_initialized))
1062
dccp_v6_ctl_sock_initialized = 1;
1063
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1064
}
1065
1066
return err;
1067
}
1068
1069
static void dccp_v6_destroy_sock(struct sock *sk)
1070
{
1071
dccp_destroy_sock(sk);
1072
inet6_destroy_sock(sk);
1073
}
1074
1075
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1076
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1077
};
1078
1079
static struct proto dccp_v6_prot = {
1080
.name = "DCCPv6",
1081
.owner = THIS_MODULE,
1082
.close = dccp_close,
1083
.connect = dccp_v6_connect,
1084
.disconnect = dccp_disconnect,
1085
.ioctl = dccp_ioctl,
1086
.init = dccp_v6_init_sock,
1087
.setsockopt = dccp_setsockopt,
1088
.getsockopt = dccp_getsockopt,
1089
.sendmsg = dccp_sendmsg,
1090
.recvmsg = dccp_recvmsg,
1091
.backlog_rcv = dccp_v6_do_rcv,
1092
.hash = dccp_v6_hash,
1093
.unhash = inet_unhash,
1094
.accept = inet_csk_accept,
1095
.get_port = inet_csk_get_port,
1096
.shutdown = dccp_shutdown,
1097
.destroy = dccp_v6_destroy_sock,
1098
.orphan_count = &dccp_orphan_count,
1099
.max_header = MAX_DCCP_HEADER,
1100
.obj_size = sizeof(struct dccp6_sock),
1101
.slab_flags = SLAB_DESTROY_BY_RCU,
1102
.rsk_prot = &dccp6_request_sock_ops,
1103
.twsk_prot = &dccp6_timewait_sock_ops,
1104
.h.hashinfo = &dccp_hashinfo,
1105
#ifdef CONFIG_COMPAT
1106
.compat_setsockopt = compat_dccp_setsockopt,
1107
.compat_getsockopt = compat_dccp_getsockopt,
1108
#endif
1109
};
1110
1111
static const struct inet6_protocol dccp_v6_protocol = {
1112
.handler = dccp_v6_rcv,
1113
.err_handler = dccp_v6_err,
1114
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1115
};
1116
1117
static const struct proto_ops inet6_dccp_ops = {
1118
.family = PF_INET6,
1119
.owner = THIS_MODULE,
1120
.release = inet6_release,
1121
.bind = inet6_bind,
1122
.connect = inet_stream_connect,
1123
.socketpair = sock_no_socketpair,
1124
.accept = inet_accept,
1125
.getname = inet6_getname,
1126
.poll = dccp_poll,
1127
.ioctl = inet6_ioctl,
1128
.listen = inet_dccp_listen,
1129
.shutdown = inet_shutdown,
1130
.setsockopt = sock_common_setsockopt,
1131
.getsockopt = sock_common_getsockopt,
1132
.sendmsg = inet_sendmsg,
1133
.recvmsg = sock_common_recvmsg,
1134
.mmap = sock_no_mmap,
1135
.sendpage = sock_no_sendpage,
1136
#ifdef CONFIG_COMPAT
1137
.compat_setsockopt = compat_sock_common_setsockopt,
1138
.compat_getsockopt = compat_sock_common_getsockopt,
1139
#endif
1140
};
1141
1142
static struct inet_protosw dccp_v6_protosw = {
1143
.type = SOCK_DCCP,
1144
.protocol = IPPROTO_DCCP,
1145
.prot = &dccp_v6_prot,
1146
.ops = &inet6_dccp_ops,
1147
.flags = INET_PROTOSW_ICSK,
1148
};
1149
1150
static int __net_init dccp_v6_init_net(struct net *net)
1151
{
1152
if (dccp_hashinfo.bhash == NULL)
1153
return -ESOCKTNOSUPPORT;
1154
1155
return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1156
SOCK_DCCP, IPPROTO_DCCP, net);
1157
}
1158
1159
static void __net_exit dccp_v6_exit_net(struct net *net)
1160
{
1161
inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1162
}
1163
1164
static struct pernet_operations dccp_v6_ops = {
1165
.init = dccp_v6_init_net,
1166
.exit = dccp_v6_exit_net,
1167
};
1168
1169
static int __init dccp_v6_init(void)
1170
{
1171
int err = proto_register(&dccp_v6_prot, 1);
1172
1173
if (err != 0)
1174
goto out;
1175
1176
err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1177
if (err != 0)
1178
goto out_unregister_proto;
1179
1180
inet6_register_protosw(&dccp_v6_protosw);
1181
1182
err = register_pernet_subsys(&dccp_v6_ops);
1183
if (err != 0)
1184
goto out_destroy_ctl_sock;
1185
out:
1186
return err;
1187
1188
out_destroy_ctl_sock:
1189
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1190
inet6_unregister_protosw(&dccp_v6_protosw);
1191
out_unregister_proto:
1192
proto_unregister(&dccp_v6_prot);
1193
goto out;
1194
}
1195
1196
static void __exit dccp_v6_exit(void)
1197
{
1198
unregister_pernet_subsys(&dccp_v6_ops);
1199
inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1200
inet6_unregister_protosw(&dccp_v6_protosw);
1201
proto_unregister(&dccp_v6_prot);
1202
}
1203
1204
module_init(dccp_v6_init);
1205
module_exit(dccp_v6_exit);
1206
1207
/*
1208
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1209
* values directly, Also cover the case where the protocol is not specified,
1210
* i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1211
*/
1212
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1213
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1214
MODULE_LICENSE("GPL");
1215
MODULE_AUTHOR("Arnaldo Carvalho de Melo <[email protected]>");
1216
MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
1217
1218