Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/sctp/input.c
15109 views
1
/* SCTP kernel implementation
2
* Copyright (c) 1999-2000 Cisco, Inc.
3
* Copyright (c) 1999-2001 Motorola, Inc.
4
* Copyright (c) 2001-2003 International Business Machines, Corp.
5
* Copyright (c) 2001 Intel Corp.
6
* Copyright (c) 2001 Nokia, Inc.
7
* Copyright (c) 2001 La Monte H.P. Yarroll
8
*
9
* This file is part of the SCTP kernel implementation
10
*
11
* These functions handle all input from the IP layer into SCTP.
12
*
13
* This SCTP implementation is free software;
14
* you can redistribute it and/or modify it under the terms of
15
* the GNU General Public License as published by
16
* the Free Software Foundation; either version 2, or (at your option)
17
* any later version.
18
*
19
* This SCTP implementation is distributed in the hope that it
20
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
21
* ************************
22
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23
* See the GNU General Public License for more details.
24
*
25
* You should have received a copy of the GNU General Public License
26
* along with GNU CC; see the file COPYING. If not, write to
27
* the Free Software Foundation, 59 Temple Place - Suite 330,
28
* Boston, MA 02111-1307, USA.
29
*
30
* Please send any bug reports or fixes you make to the
31
* email address(es):
32
* lksctp developers <[email protected]>
33
*
34
* Or submit a bug report through the following website:
35
* http://www.sf.net/projects/lksctp
36
*
37
* Written or modified by:
38
* La Monte H.P. Yarroll <[email protected]>
39
* Karl Knutson <[email protected]>
40
* Xingang Guo <[email protected]>
41
* Jon Grimm <[email protected]>
42
* Hui Huang <[email protected]>
43
* Daisy Chang <[email protected]>
44
* Sridhar Samudrala <[email protected]>
45
* Ardelle Fan <[email protected]>
46
*
47
* Any bugs reported given to us we will try to fix... any fixes shared will
48
* be incorporated into the next SCTP release.
49
*/
50
51
#include <linux/types.h>
52
#include <linux/list.h> /* For struct list_head */
53
#include <linux/socket.h>
54
#include <linux/ip.h>
55
#include <linux/time.h> /* For struct timeval */
56
#include <linux/slab.h>
57
#include <net/ip.h>
58
#include <net/icmp.h>
59
#include <net/snmp.h>
60
#include <net/sock.h>
61
#include <net/xfrm.h>
62
#include <net/sctp/sctp.h>
63
#include <net/sctp/sm.h>
64
#include <net/sctp/checksum.h>
65
#include <net/net_namespace.h>
66
67
/* Forward declarations for internal helpers. */
68
static int sctp_rcv_ootb(struct sk_buff *);
69
static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
70
const union sctp_addr *laddr,
71
const union sctp_addr *paddr,
72
struct sctp_transport **transportp);
73
static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr);
74
static struct sctp_association *__sctp_lookup_association(
75
const union sctp_addr *local,
76
const union sctp_addr *peer,
77
struct sctp_transport **pt);
78
79
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
80
81
82
/* Calculate the SCTP checksum of an SCTP packet. */
83
static inline int sctp_rcv_checksum(struct sk_buff *skb)
84
{
85
struct sctphdr *sh = sctp_hdr(skb);
86
__le32 cmp = sh->checksum;
87
struct sk_buff *list;
88
__le32 val;
89
__u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
90
91
skb_walk_frags(skb, list)
92
tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
93
tmp);
94
95
val = sctp_end_cksum(tmp);
96
97
if (val != cmp) {
98
/* CRC failure, dump it. */
99
SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS);
100
return -1;
101
}
102
return 0;
103
}
104
105
struct sctp_input_cb {
106
union {
107
struct inet_skb_parm h4;
108
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
109
struct inet6_skb_parm h6;
110
#endif
111
} header;
112
struct sctp_chunk *chunk;
113
};
114
#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
115
116
/*
117
* This is the routine which IP calls when receiving an SCTP packet.
118
*/
119
int sctp_rcv(struct sk_buff *skb)
120
{
121
struct sock *sk;
122
struct sctp_association *asoc;
123
struct sctp_endpoint *ep = NULL;
124
struct sctp_ep_common *rcvr;
125
struct sctp_transport *transport = NULL;
126
struct sctp_chunk *chunk;
127
struct sctphdr *sh;
128
union sctp_addr src;
129
union sctp_addr dest;
130
int family;
131
struct sctp_af *af;
132
133
if (skb->pkt_type!=PACKET_HOST)
134
goto discard_it;
135
136
SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS);
137
138
if (skb_linearize(skb))
139
goto discard_it;
140
141
sh = sctp_hdr(skb);
142
143
/* Pull up the IP and SCTP headers. */
144
__skb_pull(skb, skb_transport_offset(skb));
145
if (skb->len < sizeof(struct sctphdr))
146
goto discard_it;
147
if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
148
sctp_rcv_checksum(skb) < 0)
149
goto discard_it;
150
151
skb_pull(skb, sizeof(struct sctphdr));
152
153
/* Make sure we at least have chunk headers worth of data left. */
154
if (skb->len < sizeof(struct sctp_chunkhdr))
155
goto discard_it;
156
157
family = ipver2af(ip_hdr(skb)->version);
158
af = sctp_get_af_specific(family);
159
if (unlikely(!af))
160
goto discard_it;
161
162
/* Initialize local addresses for lookups. */
163
af->from_skb(&src, skb, 1);
164
af->from_skb(&dest, skb, 0);
165
166
/* If the packet is to or from a non-unicast address,
167
* silently discard the packet.
168
*
169
* This is not clearly defined in the RFC except in section
170
* 8.4 - OOTB handling. However, based on the book "Stream Control
171
* Transmission Protocol" 2.1, "It is important to note that the
172
* IP address of an SCTP transport address must be a routable
173
* unicast address. In other words, IP multicast addresses and
174
* IP broadcast addresses cannot be used in an SCTP transport
175
* address."
176
*/
177
if (!af->addr_valid(&src, NULL, skb) ||
178
!af->addr_valid(&dest, NULL, skb))
179
goto discard_it;
180
181
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
182
183
if (!asoc)
184
ep = __sctp_rcv_lookup_endpoint(&dest);
185
186
/* Retrieve the common input handling substructure. */
187
rcvr = asoc ? &asoc->base : &ep->base;
188
sk = rcvr->sk;
189
190
/*
191
* If a frame arrives on an interface and the receiving socket is
192
* bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
193
*/
194
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
195
{
196
if (asoc) {
197
sctp_association_put(asoc);
198
asoc = NULL;
199
} else {
200
sctp_endpoint_put(ep);
201
ep = NULL;
202
}
203
sk = sctp_get_ctl_sock();
204
ep = sctp_sk(sk)->ep;
205
sctp_endpoint_hold(ep);
206
rcvr = &ep->base;
207
}
208
209
/*
210
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
211
* An SCTP packet is called an "out of the blue" (OOTB)
212
* packet if it is correctly formed, i.e., passed the
213
* receiver's checksum check, but the receiver is not
214
* able to identify the association to which this
215
* packet belongs.
216
*/
217
if (!asoc) {
218
if (sctp_rcv_ootb(skb)) {
219
SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES);
220
goto discard_release;
221
}
222
}
223
224
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
225
goto discard_release;
226
nf_reset(skb);
227
228
if (sk_filter(sk, skb))
229
goto discard_release;
230
231
/* Create an SCTP packet structure. */
232
chunk = sctp_chunkify(skb, asoc, sk);
233
if (!chunk)
234
goto discard_release;
235
SCTP_INPUT_CB(skb)->chunk = chunk;
236
237
/* Remember what endpoint is to handle this packet. */
238
chunk->rcvr = rcvr;
239
240
/* Remember the SCTP header. */
241
chunk->sctp_hdr = sh;
242
243
/* Set the source and destination addresses of the incoming chunk. */
244
sctp_init_addrs(chunk, &src, &dest);
245
246
/* Remember where we came from. */
247
chunk->transport = transport;
248
249
/* Acquire access to the sock lock. Note: We are safe from other
250
* bottom halves on this lock, but a user may be in the lock too,
251
* so check if it is busy.
252
*/
253
sctp_bh_lock_sock(sk);
254
255
if (sk != rcvr->sk) {
256
/* Our cached sk is different from the rcvr->sk. This is
257
* because migrate()/accept() may have moved the association
258
* to a new socket and released all the sockets. So now we
259
* are holding a lock on the old socket while the user may
260
* be doing something with the new socket. Switch our veiw
261
* of the current sk.
262
*/
263
sctp_bh_unlock_sock(sk);
264
sk = rcvr->sk;
265
sctp_bh_lock_sock(sk);
266
}
267
268
if (sock_owned_by_user(sk)) {
269
if (sctp_add_backlog(sk, skb)) {
270
sctp_bh_unlock_sock(sk);
271
sctp_chunk_free(chunk);
272
skb = NULL; /* sctp_chunk_free already freed the skb */
273
goto discard_release;
274
}
275
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
276
} else {
277
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
278
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
279
}
280
281
sctp_bh_unlock_sock(sk);
282
283
/* Release the asoc/ep ref we took in the lookup calls. */
284
if (asoc)
285
sctp_association_put(asoc);
286
else
287
sctp_endpoint_put(ep);
288
289
return 0;
290
291
discard_it:
292
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_DISCARDS);
293
kfree_skb(skb);
294
return 0;
295
296
discard_release:
297
/* Release the asoc/ep ref we took in the lookup calls. */
298
if (asoc)
299
sctp_association_put(asoc);
300
else
301
sctp_endpoint_put(ep);
302
303
goto discard_it;
304
}
305
306
/* Process the backlog queue of the socket. Every skb on
307
* the backlog holds a ref on an association or endpoint.
308
* We hold this ref throughout the state machine to make
309
* sure that the structure we need is still around.
310
*/
311
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
312
{
313
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
314
struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
315
struct sctp_ep_common *rcvr = NULL;
316
int backloged = 0;
317
318
rcvr = chunk->rcvr;
319
320
/* If the rcvr is dead then the association or endpoint
321
* has been deleted and we can safely drop the chunk
322
* and refs that we are holding.
323
*/
324
if (rcvr->dead) {
325
sctp_chunk_free(chunk);
326
goto done;
327
}
328
329
if (unlikely(rcvr->sk != sk)) {
330
/* In this case, the association moved from one socket to
331
* another. We are currently sitting on the backlog of the
332
* old socket, so we need to move.
333
* However, since we are here in the process context we
334
* need to take make sure that the user doesn't own
335
* the new socket when we process the packet.
336
* If the new socket is user-owned, queue the chunk to the
337
* backlog of the new socket without dropping any refs.
338
* Otherwise, we can safely push the chunk on the inqueue.
339
*/
340
341
sk = rcvr->sk;
342
sctp_bh_lock_sock(sk);
343
344
if (sock_owned_by_user(sk)) {
345
if (sk_add_backlog(sk, skb))
346
sctp_chunk_free(chunk);
347
else
348
backloged = 1;
349
} else
350
sctp_inq_push(inqueue, chunk);
351
352
sctp_bh_unlock_sock(sk);
353
354
/* If the chunk was backloged again, don't drop refs */
355
if (backloged)
356
return 0;
357
} else {
358
sctp_inq_push(inqueue, chunk);
359
}
360
361
done:
362
/* Release the refs we took in sctp_add_backlog */
363
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
364
sctp_association_put(sctp_assoc(rcvr));
365
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
366
sctp_endpoint_put(sctp_ep(rcvr));
367
else
368
BUG();
369
370
return 0;
371
}
372
373
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
374
{
375
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
376
struct sctp_ep_common *rcvr = chunk->rcvr;
377
int ret;
378
379
ret = sk_add_backlog(sk, skb);
380
if (!ret) {
381
/* Hold the assoc/ep while hanging on the backlog queue.
382
* This way, we know structures we need will not disappear
383
* from us
384
*/
385
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
386
sctp_association_hold(sctp_assoc(rcvr));
387
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
388
sctp_endpoint_hold(sctp_ep(rcvr));
389
else
390
BUG();
391
}
392
return ret;
393
394
}
395
396
/* Handle icmp frag needed error. */
397
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
398
struct sctp_transport *t, __u32 pmtu)
399
{
400
if (!t || (t->pathmtu <= pmtu))
401
return;
402
403
if (sock_owned_by_user(sk)) {
404
asoc->pmtu_pending = 1;
405
t->pmtu_pending = 1;
406
return;
407
}
408
409
if (t->param_flags & SPP_PMTUD_ENABLE) {
410
/* Update transports view of the MTU */
411
sctp_transport_update_pmtu(t, pmtu);
412
413
/* Update association pmtu. */
414
sctp_assoc_sync_pmtu(asoc);
415
}
416
417
/* Retransmit with the new pmtu setting.
418
* Normally, if PMTU discovery is disabled, an ICMP Fragmentation
419
* Needed will never be sent, but if a message was sent before
420
* PMTU discovery was disabled that was larger than the PMTU, it
421
* would not be fragmented, so it must be re-transmitted fragmented.
422
*/
423
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
424
}
425
426
/*
427
* SCTP Implementer's Guide, 2.37 ICMP handling procedures
428
*
429
* ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
430
* or a "Protocol Unreachable" treat this message as an abort
431
* with the T bit set.
432
*
433
* This function sends an event to the state machine, which will abort the
434
* association.
435
*
436
*/
437
void sctp_icmp_proto_unreachable(struct sock *sk,
438
struct sctp_association *asoc,
439
struct sctp_transport *t)
440
{
441
SCTP_DEBUG_PRINTK("%s\n", __func__);
442
443
if (sock_owned_by_user(sk)) {
444
if (timer_pending(&t->proto_unreach_timer))
445
return;
446
else {
447
if (!mod_timer(&t->proto_unreach_timer,
448
jiffies + (HZ/20)))
449
sctp_association_hold(asoc);
450
}
451
452
} else {
453
if (timer_pending(&t->proto_unreach_timer) &&
454
del_timer(&t->proto_unreach_timer))
455
sctp_association_put(asoc);
456
457
sctp_do_sm(SCTP_EVENT_T_OTHER,
458
SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
459
asoc->state, asoc->ep, asoc, t,
460
GFP_ATOMIC);
461
}
462
}
463
464
/* Common lookup code for icmp/icmpv6 error handler. */
465
struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
466
struct sctphdr *sctphdr,
467
struct sctp_association **app,
468
struct sctp_transport **tpp)
469
{
470
union sctp_addr saddr;
471
union sctp_addr daddr;
472
struct sctp_af *af;
473
struct sock *sk = NULL;
474
struct sctp_association *asoc;
475
struct sctp_transport *transport = NULL;
476
struct sctp_init_chunk *chunkhdr;
477
__u32 vtag = ntohl(sctphdr->vtag);
478
int len = skb->len - ((void *)sctphdr - (void *)skb->data);
479
480
*app = NULL; *tpp = NULL;
481
482
af = sctp_get_af_specific(family);
483
if (unlikely(!af)) {
484
return NULL;
485
}
486
487
/* Initialize local addresses for lookups. */
488
af->from_skb(&saddr, skb, 1);
489
af->from_skb(&daddr, skb, 0);
490
491
/* Look for an association that matches the incoming ICMP error
492
* packet.
493
*/
494
asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
495
if (!asoc)
496
return NULL;
497
498
sk = asoc->base.sk;
499
500
/* RFC 4960, Appendix C. ICMP Handling
501
*
502
* ICMP6) An implementation MUST validate that the Verification Tag
503
* contained in the ICMP message matches the Verification Tag of
504
* the peer. If the Verification Tag is not 0 and does NOT
505
* match, discard the ICMP message. If it is 0 and the ICMP
506
* message contains enough bytes to verify that the chunk type is
507
* an INIT chunk and that the Initiate Tag matches the tag of the
508
* peer, continue with ICMP7. If the ICMP message is too short
509
* or the chunk type or the Initiate Tag does not match, silently
510
* discard the packet.
511
*/
512
if (vtag == 0) {
513
chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr
514
+ sizeof(struct sctphdr));
515
if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
516
+ sizeof(__be32) ||
517
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
518
ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
519
goto out;
520
}
521
} else if (vtag != asoc->c.peer_vtag) {
522
goto out;
523
}
524
525
sctp_bh_lock_sock(sk);
526
527
/* If too many ICMPs get dropped on busy
528
* servers this needs to be solved differently.
529
*/
530
if (sock_owned_by_user(sk))
531
NET_INC_STATS_BH(&init_net, LINUX_MIB_LOCKDROPPEDICMPS);
532
533
*app = asoc;
534
*tpp = transport;
535
return sk;
536
537
out:
538
if (asoc)
539
sctp_association_put(asoc);
540
return NULL;
541
}
542
543
/* Common cleanup code for icmp/icmpv6 error handler. */
544
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
545
{
546
sctp_bh_unlock_sock(sk);
547
if (asoc)
548
sctp_association_put(asoc);
549
}
550
551
/*
552
* This routine is called by the ICMP module when it gets some
553
* sort of error condition. If err < 0 then the socket should
554
* be closed and the error returned to the user. If err > 0
555
* it's just the icmp type << 8 | icmp code. After adjustment
556
* header points to the first 8 bytes of the sctp header. We need
557
* to find the appropriate port.
558
*
559
* The locking strategy used here is very "optimistic". When
560
* someone else accesses the socket the ICMP is just dropped
561
* and for some paths there is no check at all.
562
* A more general error queue to queue errors for later handling
563
* is probably better.
564
*
565
*/
566
void sctp_v4_err(struct sk_buff *skb, __u32 info)
567
{
568
const struct iphdr *iph = (const struct iphdr *)skb->data;
569
const int ihlen = iph->ihl * 4;
570
const int type = icmp_hdr(skb)->type;
571
const int code = icmp_hdr(skb)->code;
572
struct sock *sk;
573
struct sctp_association *asoc = NULL;
574
struct sctp_transport *transport;
575
struct inet_sock *inet;
576
sk_buff_data_t saveip, savesctp;
577
int err;
578
579
if (skb->len < ihlen + 8) {
580
ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
581
return;
582
}
583
584
/* Fix up skb to look at the embedded net header. */
585
saveip = skb->network_header;
586
savesctp = skb->transport_header;
587
skb_reset_network_header(skb);
588
skb_set_transport_header(skb, ihlen);
589
sk = sctp_err_lookup(AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
590
/* Put back, the original values. */
591
skb->network_header = saveip;
592
skb->transport_header = savesctp;
593
if (!sk) {
594
ICMP_INC_STATS_BH(&init_net, ICMP_MIB_INERRORS);
595
return;
596
}
597
/* Warning: The sock lock is held. Remember to call
598
* sctp_err_finish!
599
*/
600
601
switch (type) {
602
case ICMP_PARAMETERPROB:
603
err = EPROTO;
604
break;
605
case ICMP_DEST_UNREACH:
606
if (code > NR_ICMP_UNREACH)
607
goto out_unlock;
608
609
/* PMTU discovery (RFC1191) */
610
if (ICMP_FRAG_NEEDED == code) {
611
sctp_icmp_frag_needed(sk, asoc, transport, info);
612
goto out_unlock;
613
}
614
else {
615
if (ICMP_PROT_UNREACH == code) {
616
sctp_icmp_proto_unreachable(sk, asoc,
617
transport);
618
goto out_unlock;
619
}
620
}
621
err = icmp_err_convert[code].errno;
622
break;
623
case ICMP_TIME_EXCEEDED:
624
/* Ignore any time exceeded errors due to fragment reassembly
625
* timeouts.
626
*/
627
if (ICMP_EXC_FRAGTIME == code)
628
goto out_unlock;
629
630
err = EHOSTUNREACH;
631
break;
632
default:
633
goto out_unlock;
634
}
635
636
inet = inet_sk(sk);
637
if (!sock_owned_by_user(sk) && inet->recverr) {
638
sk->sk_err = err;
639
sk->sk_error_report(sk);
640
} else { /* Only an error on timeout */
641
sk->sk_err_soft = err;
642
}
643
644
out_unlock:
645
sctp_err_finish(sk, asoc);
646
}
647
648
/*
649
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
650
*
651
* This function scans all the chunks in the OOTB packet to determine if
652
* the packet should be discarded right away. If a response might be needed
653
* for this packet, or, if further processing is possible, the packet will
654
* be queued to a proper inqueue for the next phase of handling.
655
*
656
* Output:
657
* Return 0 - If further processing is needed.
658
* Return 1 - If the packet can be discarded right away.
659
*/
660
static int sctp_rcv_ootb(struct sk_buff *skb)
661
{
662
sctp_chunkhdr_t *ch;
663
__u8 *ch_end;
664
665
ch = (sctp_chunkhdr_t *) skb->data;
666
667
/* Scan through all the chunks in the packet. */
668
do {
669
/* Break out if chunk length is less then minimal. */
670
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
671
break;
672
673
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
674
if (ch_end > skb_tail_pointer(skb))
675
break;
676
677
/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
678
* receiver MUST silently discard the OOTB packet and take no
679
* further action.
680
*/
681
if (SCTP_CID_ABORT == ch->type)
682
goto discard;
683
684
/* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
685
* chunk, the receiver should silently discard the packet
686
* and take no further action.
687
*/
688
if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
689
goto discard;
690
691
/* RFC 4460, 2.11.2
692
* This will discard packets with INIT chunk bundled as
693
* subsequent chunks in the packet. When INIT is first,
694
* the normal INIT processing will discard the chunk.
695
*/
696
if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
697
goto discard;
698
699
ch = (sctp_chunkhdr_t *) ch_end;
700
} while (ch_end < skb_tail_pointer(skb));
701
702
return 0;
703
704
discard:
705
return 1;
706
}
707
708
/* Insert endpoint into the hash table. */
709
static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
710
{
711
struct sctp_ep_common *epb;
712
struct sctp_hashbucket *head;
713
714
epb = &ep->base;
715
716
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
717
head = &sctp_ep_hashtable[epb->hashent];
718
719
sctp_write_lock(&head->lock);
720
hlist_add_head(&epb->node, &head->chain);
721
sctp_write_unlock(&head->lock);
722
}
723
724
/* Add an endpoint to the hash. Local BH-safe. */
725
void sctp_hash_endpoint(struct sctp_endpoint *ep)
726
{
727
sctp_local_bh_disable();
728
__sctp_hash_endpoint(ep);
729
sctp_local_bh_enable();
730
}
731
732
/* Remove endpoint from the hash table. */
733
static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
734
{
735
struct sctp_hashbucket *head;
736
struct sctp_ep_common *epb;
737
738
epb = &ep->base;
739
740
if (hlist_unhashed(&epb->node))
741
return;
742
743
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
744
745
head = &sctp_ep_hashtable[epb->hashent];
746
747
sctp_write_lock(&head->lock);
748
__hlist_del(&epb->node);
749
sctp_write_unlock(&head->lock);
750
}
751
752
/* Remove endpoint from the hash. Local BH-safe. */
753
void sctp_unhash_endpoint(struct sctp_endpoint *ep)
754
{
755
sctp_local_bh_disable();
756
__sctp_unhash_endpoint(ep);
757
sctp_local_bh_enable();
758
}
759
760
/* Look up an endpoint. */
761
static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr)
762
{
763
struct sctp_hashbucket *head;
764
struct sctp_ep_common *epb;
765
struct sctp_endpoint *ep;
766
struct hlist_node *node;
767
int hash;
768
769
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
770
head = &sctp_ep_hashtable[hash];
771
read_lock(&head->lock);
772
sctp_for_each_hentry(epb, node, &head->chain) {
773
ep = sctp_ep(epb);
774
if (sctp_endpoint_is_match(ep, laddr))
775
goto hit;
776
}
777
778
ep = sctp_sk((sctp_get_ctl_sock()))->ep;
779
780
hit:
781
sctp_endpoint_hold(ep);
782
read_unlock(&head->lock);
783
return ep;
784
}
785
786
/* Insert association into the hash table. */
787
static void __sctp_hash_established(struct sctp_association *asoc)
788
{
789
struct sctp_ep_common *epb;
790
struct sctp_hashbucket *head;
791
792
epb = &asoc->base;
793
794
/* Calculate which chain this entry will belong to. */
795
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port);
796
797
head = &sctp_assoc_hashtable[epb->hashent];
798
799
sctp_write_lock(&head->lock);
800
hlist_add_head(&epb->node, &head->chain);
801
sctp_write_unlock(&head->lock);
802
}
803
804
/* Add an association to the hash. Local BH-safe. */
805
void sctp_hash_established(struct sctp_association *asoc)
806
{
807
if (asoc->temp)
808
return;
809
810
sctp_local_bh_disable();
811
__sctp_hash_established(asoc);
812
sctp_local_bh_enable();
813
}
814
815
/* Remove association from the hash table. */
816
static void __sctp_unhash_established(struct sctp_association *asoc)
817
{
818
struct sctp_hashbucket *head;
819
struct sctp_ep_common *epb;
820
821
epb = &asoc->base;
822
823
epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port,
824
asoc->peer.port);
825
826
head = &sctp_assoc_hashtable[epb->hashent];
827
828
sctp_write_lock(&head->lock);
829
__hlist_del(&epb->node);
830
sctp_write_unlock(&head->lock);
831
}
832
833
/* Remove association from the hash table. Local BH-safe. */
834
void sctp_unhash_established(struct sctp_association *asoc)
835
{
836
if (asoc->temp)
837
return;
838
839
sctp_local_bh_disable();
840
__sctp_unhash_established(asoc);
841
sctp_local_bh_enable();
842
}
843
844
/* Look up an association. */
845
static struct sctp_association *__sctp_lookup_association(
846
const union sctp_addr *local,
847
const union sctp_addr *peer,
848
struct sctp_transport **pt)
849
{
850
struct sctp_hashbucket *head;
851
struct sctp_ep_common *epb;
852
struct sctp_association *asoc;
853
struct sctp_transport *transport;
854
struct hlist_node *node;
855
int hash;
856
857
/* Optimize here for direct hit, only listening connections can
858
* have wildcards anyways.
859
*/
860
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
861
head = &sctp_assoc_hashtable[hash];
862
read_lock(&head->lock);
863
sctp_for_each_hentry(epb, node, &head->chain) {
864
asoc = sctp_assoc(epb);
865
transport = sctp_assoc_is_match(asoc, local, peer);
866
if (transport)
867
goto hit;
868
}
869
870
read_unlock(&head->lock);
871
872
return NULL;
873
874
hit:
875
*pt = transport;
876
sctp_association_hold(asoc);
877
read_unlock(&head->lock);
878
return asoc;
879
}
880
881
/* Look up an association. BH-safe. */
882
SCTP_STATIC
883
struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr,
884
const union sctp_addr *paddr,
885
struct sctp_transport **transportp)
886
{
887
struct sctp_association *asoc;
888
889
sctp_local_bh_disable();
890
asoc = __sctp_lookup_association(laddr, paddr, transportp);
891
sctp_local_bh_enable();
892
893
return asoc;
894
}
895
896
/* Is there an association matching the given local and peer addresses? */
897
int sctp_has_association(const union sctp_addr *laddr,
898
const union sctp_addr *paddr)
899
{
900
struct sctp_association *asoc;
901
struct sctp_transport *transport;
902
903
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
904
sctp_association_put(asoc);
905
return 1;
906
}
907
908
return 0;
909
}
910
911
/*
912
* SCTP Implementors Guide, 2.18 Handling of address
913
* parameters within the INIT or INIT-ACK.
914
*
915
* D) When searching for a matching TCB upon reception of an INIT
916
* or INIT-ACK chunk the receiver SHOULD use not only the
917
* source address of the packet (containing the INIT or
918
* INIT-ACK) but the receiver SHOULD also use all valid
919
* address parameters contained within the chunk.
920
*
921
* 2.18.3 Solution description
922
*
923
* This new text clearly specifies to an implementor the need
924
* to look within the INIT or INIT-ACK. Any implementation that
925
* does not do this, may not be able to establish associations
926
* in certain circumstances.
927
*
928
*/
929
static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb,
930
const union sctp_addr *laddr, struct sctp_transport **transportp)
931
{
932
struct sctp_association *asoc;
933
union sctp_addr addr;
934
union sctp_addr *paddr = &addr;
935
struct sctphdr *sh = sctp_hdr(skb);
936
union sctp_params params;
937
sctp_init_chunk_t *init;
938
struct sctp_transport *transport;
939
struct sctp_af *af;
940
941
/*
942
* This code will NOT touch anything inside the chunk--it is
943
* strictly READ-ONLY.
944
*
945
* RFC 2960 3 SCTP packet Format
946
*
947
* Multiple chunks can be bundled into one SCTP packet up to
948
* the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
949
* COMPLETE chunks. These chunks MUST NOT be bundled with any
950
* other chunk in a packet. See Section 6.10 for more details
951
* on chunk bundling.
952
*/
953
954
/* Find the start of the TLVs and the end of the chunk. This is
955
* the region we search for address parameters.
956
*/
957
init = (sctp_init_chunk_t *)skb->data;
958
959
/* Walk the parameters looking for embedded addresses. */
960
sctp_walk_params(params, init, init_hdr.params) {
961
962
/* Note: Ignoring hostname addresses. */
963
af = sctp_get_af_specific(param_type2af(params.p->type));
964
if (!af)
965
continue;
966
967
af->from_addr_param(paddr, params.addr, sh->source, 0);
968
969
asoc = __sctp_lookup_association(laddr, paddr, &transport);
970
if (asoc)
971
return asoc;
972
}
973
974
return NULL;
975
}
976
977
/* ADD-IP, Section 5.2
978
* When an endpoint receives an ASCONF Chunk from the remote peer
979
* special procedures may be needed to identify the association the
980
* ASCONF Chunk is associated with. To properly find the association
981
* the following procedures SHOULD be followed:
982
*
983
* D2) If the association is not found, use the address found in the
984
* Address Parameter TLV combined with the port number found in the
985
* SCTP common header. If found proceed to rule D4.
986
*
987
* D2-ext) If more than one ASCONF Chunks are packed together, use the
988
* address found in the ASCONF Address Parameter TLV of each of the
989
* subsequent ASCONF Chunks. If found, proceed to rule D4.
990
*/
991
static struct sctp_association *__sctp_rcv_asconf_lookup(
992
sctp_chunkhdr_t *ch,
993
const union sctp_addr *laddr,
994
__be16 peer_port,
995
struct sctp_transport **transportp)
996
{
997
sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
998
struct sctp_af *af;
999
union sctp_addr_param *param;
1000
union sctp_addr paddr;
1001
1002
/* Skip over the ADDIP header and find the Address parameter */
1003
param = (union sctp_addr_param *)(asconf + 1);
1004
1005
af = sctp_get_af_specific(param_type2af(param->p.type));
1006
if (unlikely(!af))
1007
return NULL;
1008
1009
af->from_addr_param(&paddr, param, peer_port, 0);
1010
1011
return __sctp_lookup_association(laddr, &paddr, transportp);
1012
}
1013
1014
1015
/* SCTP-AUTH, Section 6.3:
1016
* If the receiver does not find a STCB for a packet containing an AUTH
1017
* chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1018
* chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1019
* association.
1020
*
1021
* This means that any chunks that can help us identify the association need
1022
* to be looked at to find this association.
1023
*/
1024
static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
1025
const union sctp_addr *laddr,
1026
struct sctp_transport **transportp)
1027
{
1028
struct sctp_association *asoc = NULL;
1029
sctp_chunkhdr_t *ch;
1030
int have_auth = 0;
1031
unsigned int chunk_num = 1;
1032
__u8 *ch_end;
1033
1034
/* Walk through the chunks looking for AUTH or ASCONF chunks
1035
* to help us find the association.
1036
*/
1037
ch = (sctp_chunkhdr_t *) skb->data;
1038
do {
1039
/* Break out if chunk length is less then minimal. */
1040
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
1041
break;
1042
1043
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1044
if (ch_end > skb_tail_pointer(skb))
1045
break;
1046
1047
switch(ch->type) {
1048
case SCTP_CID_AUTH:
1049
have_auth = chunk_num;
1050
break;
1051
1052
case SCTP_CID_COOKIE_ECHO:
1053
/* If a packet arrives containing an AUTH chunk as
1054
* a first chunk, a COOKIE-ECHO chunk as the second
1055
* chunk, and possibly more chunks after them, and
1056
* the receiver does not have an STCB for that
1057
* packet, then authentication is based on
1058
* the contents of the COOKIE- ECHO chunk.
1059
*/
1060
if (have_auth == 1 && chunk_num == 2)
1061
return NULL;
1062
break;
1063
1064
case SCTP_CID_ASCONF:
1065
if (have_auth || sctp_addip_noauth)
1066
asoc = __sctp_rcv_asconf_lookup(ch, laddr,
1067
sctp_hdr(skb)->source,
1068
transportp);
1069
default:
1070
break;
1071
}
1072
1073
if (asoc)
1074
break;
1075
1076
ch = (sctp_chunkhdr_t *) ch_end;
1077
chunk_num++;
1078
} while (ch_end < skb_tail_pointer(skb));
1079
1080
return asoc;
1081
}
1082
1083
/*
1084
* There are circumstances when we need to look inside the SCTP packet
1085
* for information to help us find the association. Examples
1086
* include looking inside of INIT/INIT-ACK chunks or after the AUTH
1087
* chunks.
1088
*/
1089
static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb,
1090
const union sctp_addr *laddr,
1091
struct sctp_transport **transportp)
1092
{
1093
sctp_chunkhdr_t *ch;
1094
1095
ch = (sctp_chunkhdr_t *) skb->data;
1096
1097
/* The code below will attempt to walk the chunk and extract
1098
* parameter information. Before we do that, we need to verify
1099
* that the chunk length doesn't cause overflow. Otherwise, we'll
1100
* walk off the end.
1101
*/
1102
if (WORD_ROUND(ntohs(ch->length)) > skb->len)
1103
return NULL;
1104
1105
/* If this is INIT/INIT-ACK look inside the chunk too. */
1106
switch (ch->type) {
1107
case SCTP_CID_INIT:
1108
case SCTP_CID_INIT_ACK:
1109
return __sctp_rcv_init_lookup(skb, laddr, transportp);
1110
break;
1111
1112
default:
1113
return __sctp_rcv_walk_lookup(skb, laddr, transportp);
1114
break;
1115
}
1116
1117
1118
return NULL;
1119
}
1120
1121
/* Lookup an association for an inbound skb. */
1122
static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb,
1123
const union sctp_addr *paddr,
1124
const union sctp_addr *laddr,
1125
struct sctp_transport **transportp)
1126
{
1127
struct sctp_association *asoc;
1128
1129
asoc = __sctp_lookup_association(laddr, paddr, transportp);
1130
1131
/* Further lookup for INIT/INIT-ACK packets.
1132
* SCTP Implementors Guide, 2.18 Handling of address
1133
* parameters within the INIT or INIT-ACK.
1134
*/
1135
if (!asoc)
1136
asoc = __sctp_rcv_lookup_harder(skb, laddr, transportp);
1137
1138
return asoc;
1139
}
1140
1141