/* SCTP kernel implementation1* (C) Copyright IBM Corp. 2001, 20042* Copyright (c) 1999-2000 Cisco, Inc.3* Copyright (c) 1999-2001 Motorola, Inc.4* Copyright (c) 2001-2003 Intel Corp.5*6* This file is part of the SCTP kernel implementation7*8* These functions implement the sctp_outq class. The outqueue handles9* bundling and queueing of outgoing SCTP chunks.10*11* This SCTP implementation is free software;12* you can redistribute it and/or modify it under the terms of13* the GNU General Public License as published by14* the Free Software Foundation; either version 2, or (at your option)15* any later version.16*17* This SCTP implementation is distributed in the hope that it18* will be useful, but WITHOUT ANY WARRANTY; without even the implied19* ************************20* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.21* See the GNU General Public License for more details.22*23* You should have received a copy of the GNU General Public License24* along with GNU CC; see the file COPYING. If not, write to25* the Free Software Foundation, 59 Temple Place - Suite 330,26* Boston, MA 02111-1307, USA.27*28* Please send any bug reports or fixes you make to the29* email address(es):30* lksctp developers <[email protected]>31*32* Or submit a bug report through the following website:33* http://www.sf.net/projects/lksctp34*35* Written or modified by:36* La Monte H.P. Yarroll <[email protected]>37* Karl Knutson <[email protected]>38* Perry Melange <[email protected]>39* Xingang Guo <[email protected]>40* Hui Huang <[email protected]>41* Sridhar Samudrala <[email protected]>42* Jon Grimm <[email protected]>43*44* Any bugs reported given to us we will try to fix... any fixes shared will45* be incorporated into the next SCTP release.46*/4748#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt4950#include <linux/types.h>51#include <linux/list.h> /* For struct list_head */52#include <linux/socket.h>53#include <linux/ip.h>54#include <linux/slab.h>55#include <net/sock.h> /* For skb_set_owner_w */5657#include <net/sctp/sctp.h>58#include <net/sctp/sm.h>5960/* Declare internal functions here. */61static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);62static void sctp_check_transmitted(struct sctp_outq *q,63struct list_head *transmitted_queue,64struct sctp_transport *transport,65struct sctp_sackhdr *sack,66__u32 *highest_new_tsn);6768static void sctp_mark_missing(struct sctp_outq *q,69struct list_head *transmitted_queue,70struct sctp_transport *transport,71__u32 highest_new_tsn,72int count_of_newacks);7374static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);7576static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);7778/* Add data to the front of the queue. */79static inline void sctp_outq_head_data(struct sctp_outq *q,80struct sctp_chunk *ch)81{82list_add(&ch->list, &q->out_chunk_list);83q->out_qlen += ch->skb->len;84}8586/* Take data from the front of the queue. */87static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)88{89struct sctp_chunk *ch = NULL;9091if (!list_empty(&q->out_chunk_list)) {92struct list_head *entry = q->out_chunk_list.next;9394ch = list_entry(entry, struct sctp_chunk, list);95list_del_init(entry);96q->out_qlen -= ch->skb->len;97}98return ch;99}100/* Add data chunk to the end of the queue. */101static inline void sctp_outq_tail_data(struct sctp_outq *q,102struct sctp_chunk *ch)103{104list_add_tail(&ch->list, &q->out_chunk_list);105q->out_qlen += ch->skb->len;106}107108/*109* SFR-CACC algorithm:110* D) If count_of_newacks is greater than or equal to 2111* and t was not sent to the current primary then the112* sender MUST NOT increment missing report count for t.113*/114static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,115struct sctp_transport *transport,116int count_of_newacks)117{118if (count_of_newacks >=2 && transport != primary)119return 1;120return 0;121}122123/*124* SFR-CACC algorithm:125* F) If count_of_newacks is less than 2, let d be the126* destination to which t was sent. If cacc_saw_newack127* is 0 for destination d, then the sender MUST NOT128* increment missing report count for t.129*/130static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,131int count_of_newacks)132{133if (count_of_newacks < 2 &&134(transport && !transport->cacc.cacc_saw_newack))135return 1;136return 0;137}138139/*140* SFR-CACC algorithm:141* 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD142* execute steps C, D, F.143*144* C has been implemented in sctp_outq_sack145*/146static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,147struct sctp_transport *transport,148int count_of_newacks)149{150if (!primary->cacc.cycling_changeover) {151if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))152return 1;153if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))154return 1;155return 0;156}157return 0;158}159160/*161* SFR-CACC algorithm:162* 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less163* than next_tsn_at_change of the current primary, then164* the sender MUST NOT increment missing report count165* for t.166*/167static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)168{169if (primary->cacc.cycling_changeover &&170TSN_lt(tsn, primary->cacc.next_tsn_at_change))171return 1;172return 0;173}174175/*176* SFR-CACC algorithm:177* 3) If the missing report count for TSN t is to be178* incremented according to [RFC2960] and179* [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,180* then the sender MUST further execute steps 3.1 and181* 3.2 to determine if the missing report count for182* TSN t SHOULD NOT be incremented.183*184* 3.3) If 3.1 and 3.2 do not dictate that the missing185* report count for t should not be incremented, then186* the sender SHOULD increment missing report count for187* t (according to [RFC2960] and [SCTP_STEWART_2002]).188*/189static inline int sctp_cacc_skip(struct sctp_transport *primary,190struct sctp_transport *transport,191int count_of_newacks,192__u32 tsn)193{194if (primary->cacc.changeover_active &&195(sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||196sctp_cacc_skip_3_2(primary, tsn)))197return 1;198return 0;199}200201/* Initialize an existing sctp_outq. This does the boring stuff.202* You still need to define handlers if you really want to DO203* something with this structure...204*/205void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)206{207q->asoc = asoc;208INIT_LIST_HEAD(&q->out_chunk_list);209INIT_LIST_HEAD(&q->control_chunk_list);210INIT_LIST_HEAD(&q->retransmit);211INIT_LIST_HEAD(&q->sacked);212INIT_LIST_HEAD(&q->abandoned);213214q->fast_rtx = 0;215q->outstanding_bytes = 0;216q->empty = 1;217q->cork = 0;218219q->malloced = 0;220q->out_qlen = 0;221}222223/* Free the outqueue structure and any related pending chunks.224*/225void sctp_outq_teardown(struct sctp_outq *q)226{227struct sctp_transport *transport;228struct list_head *lchunk, *temp;229struct sctp_chunk *chunk, *tmp;230231/* Throw away unacknowledged chunks. */232list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,233transports) {234while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {235chunk = list_entry(lchunk, struct sctp_chunk,236transmitted_list);237/* Mark as part of a failed message. */238sctp_chunk_fail(chunk, q->error);239sctp_chunk_free(chunk);240}241}242243/* Throw away chunks that have been gap ACKed. */244list_for_each_safe(lchunk, temp, &q->sacked) {245list_del_init(lchunk);246chunk = list_entry(lchunk, struct sctp_chunk,247transmitted_list);248sctp_chunk_fail(chunk, q->error);249sctp_chunk_free(chunk);250}251252/* Throw away any chunks in the retransmit queue. */253list_for_each_safe(lchunk, temp, &q->retransmit) {254list_del_init(lchunk);255chunk = list_entry(lchunk, struct sctp_chunk,256transmitted_list);257sctp_chunk_fail(chunk, q->error);258sctp_chunk_free(chunk);259}260261/* Throw away any chunks that are in the abandoned queue. */262list_for_each_safe(lchunk, temp, &q->abandoned) {263list_del_init(lchunk);264chunk = list_entry(lchunk, struct sctp_chunk,265transmitted_list);266sctp_chunk_fail(chunk, q->error);267sctp_chunk_free(chunk);268}269270/* Throw away any leftover data chunks. */271while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {272273/* Mark as send failure. */274sctp_chunk_fail(chunk, q->error);275sctp_chunk_free(chunk);276}277278q->error = 0;279280/* Throw away any leftover control chunks. */281list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {282list_del_init(&chunk->list);283sctp_chunk_free(chunk);284}285}286287/* Free the outqueue structure and any related pending chunks. */288void sctp_outq_free(struct sctp_outq *q)289{290/* Throw away leftover chunks. */291sctp_outq_teardown(q);292293/* If we were kmalloc()'d, free the memory. */294if (q->malloced)295kfree(q);296}297298/* Put a new chunk in an sctp_outq. */299int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)300{301int error = 0;302303SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",304q, chunk, chunk && chunk->chunk_hdr ?305sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))306: "Illegal Chunk");307308/* If it is data, queue it up, otherwise, send it309* immediately.310*/311if (sctp_chunk_is_data(chunk)) {312/* Is it OK to queue data chunks? */313/* From 9. Termination of Association314*315* When either endpoint performs a shutdown, the316* association on each peer will stop accepting new317* data from its user and only deliver data in queue318* at the time of sending or receiving the SHUTDOWN319* chunk.320*/321switch (q->asoc->state) {322case SCTP_STATE_CLOSED:323case SCTP_STATE_SHUTDOWN_PENDING:324case SCTP_STATE_SHUTDOWN_SENT:325case SCTP_STATE_SHUTDOWN_RECEIVED:326case SCTP_STATE_SHUTDOWN_ACK_SENT:327/* Cannot send after transport endpoint shutdown */328error = -ESHUTDOWN;329break;330331default:332SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",333q, chunk, chunk && chunk->chunk_hdr ?334sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))335: "Illegal Chunk");336337sctp_outq_tail_data(q, chunk);338if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)339SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);340else341SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);342q->empty = 0;343break;344}345} else {346list_add_tail(&chunk->list, &q->control_chunk_list);347SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);348}349350if (error < 0)351return error;352353if (!q->cork)354error = sctp_outq_flush(q, 0);355356return error;357}358359/* Insert a chunk into the sorted list based on the TSNs. The retransmit list360* and the abandoned list are in ascending order.361*/362static void sctp_insert_list(struct list_head *head, struct list_head *new)363{364struct list_head *pos;365struct sctp_chunk *nchunk, *lchunk;366__u32 ntsn, ltsn;367int done = 0;368369nchunk = list_entry(new, struct sctp_chunk, transmitted_list);370ntsn = ntohl(nchunk->subh.data_hdr->tsn);371372list_for_each(pos, head) {373lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);374ltsn = ntohl(lchunk->subh.data_hdr->tsn);375if (TSN_lt(ntsn, ltsn)) {376list_add(new, pos->prev);377done = 1;378break;379}380}381if (!done)382list_add_tail(new, head);383}384385/* Mark all the eligible packets on a transport for retransmission. */386void sctp_retransmit_mark(struct sctp_outq *q,387struct sctp_transport *transport,388__u8 reason)389{390struct list_head *lchunk, *ltemp;391struct sctp_chunk *chunk;392393/* Walk through the specified transmitted queue. */394list_for_each_safe(lchunk, ltemp, &transport->transmitted) {395chunk = list_entry(lchunk, struct sctp_chunk,396transmitted_list);397398/* If the chunk is abandoned, move it to abandoned list. */399if (sctp_chunk_abandoned(chunk)) {400list_del_init(lchunk);401sctp_insert_list(&q->abandoned, lchunk);402403/* If this chunk has not been previousely acked,404* stop considering it 'outstanding'. Our peer405* will most likely never see it since it will406* not be retransmitted407*/408if (!chunk->tsn_gap_acked) {409if (chunk->transport)410chunk->transport->flight_size -=411sctp_data_size(chunk);412q->outstanding_bytes -= sctp_data_size(chunk);413q->asoc->peer.rwnd += (sctp_data_size(chunk) +414sizeof(struct sk_buff));415}416continue;417}418419/* If we are doing retransmission due to a timeout or pmtu420* discovery, only the chunks that are not yet acked should421* be added to the retransmit queue.422*/423if ((reason == SCTP_RTXR_FAST_RTX &&424(chunk->fast_retransmit == SCTP_NEED_FRTX)) ||425(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {426/* RFC 2960 6.2.1 Processing a Received SACK427*428* C) Any time a DATA chunk is marked for429* retransmission (via either T3-rtx timer expiration430* (Section 6.3.3) or via fast retransmit431* (Section 7.2.4)), add the data size of those432* chunks to the rwnd.433*/434q->asoc->peer.rwnd += (sctp_data_size(chunk) +435sizeof(struct sk_buff));436q->outstanding_bytes -= sctp_data_size(chunk);437if (chunk->transport)438transport->flight_size -= sctp_data_size(chunk);439440/* sctpimpguide-05 Section 2.8.2441* M5) If a T3-rtx timer expires, the442* 'TSN.Missing.Report' of all affected TSNs is set443* to 0.444*/445chunk->tsn_missing_report = 0;446447/* If a chunk that is being used for RTT measurement448* has to be retransmitted, we cannot use this chunk449* anymore for RTT measurements. Reset rto_pending so450* that a new RTT measurement is started when a new451* data chunk is sent.452*/453if (chunk->rtt_in_progress) {454chunk->rtt_in_progress = 0;455transport->rto_pending = 0;456}457458/* Move the chunk to the retransmit queue. The chunks459* on the retransmit queue are always kept in order.460*/461list_del_init(lchunk);462sctp_insert_list(&q->retransmit, lchunk);463}464}465466SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "467"cwnd: %d, ssthresh: %d, flight_size: %d, "468"pba: %d\n", __func__,469transport, reason,470transport->cwnd, transport->ssthresh,471transport->flight_size,472transport->partial_bytes_acked);473474}475476/* Mark all the eligible packets on a transport for retransmission and force477* one packet out.478*/479void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,480sctp_retransmit_reason_t reason)481{482int error = 0;483484switch(reason) {485case SCTP_RTXR_T3_RTX:486SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);487sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);488/* Update the retran path if the T3-rtx timer has expired for489* the current retran path.490*/491if (transport == transport->asoc->peer.retran_path)492sctp_assoc_update_retran_path(transport->asoc);493transport->asoc->rtx_data_chunks +=494transport->asoc->unack_data;495break;496case SCTP_RTXR_FAST_RTX:497SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);498sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);499q->fast_rtx = 1;500break;501case SCTP_RTXR_PMTUD:502SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);503break;504case SCTP_RTXR_T1_RTX:505SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);506transport->asoc->init_retries++;507break;508default:509BUG();510}511512sctp_retransmit_mark(q, transport, reason);513514/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,515* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by516* following the procedures outlined in C1 - C5.517*/518if (reason == SCTP_RTXR_T3_RTX)519sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);520521/* Flush the queues only on timeout, since fast_rtx is only522* triggered during sack processing and the queue523* will be flushed at the end.524*/525if (reason != SCTP_RTXR_FAST_RTX)526error = sctp_outq_flush(q, /* rtx_timeout */ 1);527528if (error)529q->asoc->base.sk->sk_err = -error;530}531532/*533* Transmit DATA chunks on the retransmit queue. Upon return from534* sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which535* need to be transmitted by the caller.536* We assume that pkt->transport has already been set.537*538* The return value is a normal kernel error return value.539*/540static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,541int rtx_timeout, int *start_timer)542{543struct list_head *lqueue;544struct sctp_transport *transport = pkt->transport;545sctp_xmit_t status;546struct sctp_chunk *chunk, *chunk1;547int fast_rtx;548int error = 0;549int timer = 0;550int done = 0;551552lqueue = &q->retransmit;553fast_rtx = q->fast_rtx;554555/* This loop handles time-out retransmissions, fast retransmissions,556* and retransmissions due to opening of whindow.557*558* RFC 2960 6.3.3 Handle T3-rtx Expiration559*560* E3) Determine how many of the earliest (i.e., lowest TSN)561* outstanding DATA chunks for the address for which the562* T3-rtx has expired will fit into a single packet, subject563* to the MTU constraint for the path corresponding to the564* destination transport address to which the retransmission565* is being sent (this may be different from the address for566* which the timer expires [see Section 6.4]). Call this value567* K. Bundle and retransmit those K DATA chunks in a single568* packet to the destination endpoint.569*570* [Just to be painfully clear, if we are retransmitting571* because a timeout just happened, we should send only ONE572* packet of retransmitted data.]573*574* For fast retransmissions we also send only ONE packet. However,575* if we are just flushing the queue due to open window, we'll576* try to send as much as possible.577*/578list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {579/* If the chunk is abandoned, move it to abandoned list. */580if (sctp_chunk_abandoned(chunk)) {581list_del_init(&chunk->transmitted_list);582sctp_insert_list(&q->abandoned,583&chunk->transmitted_list);584continue;585}586587/* Make sure that Gap Acked TSNs are not retransmitted. A588* simple approach is just to move such TSNs out of the589* way and into a 'transmitted' queue and skip to the590* next chunk.591*/592if (chunk->tsn_gap_acked) {593list_del(&chunk->transmitted_list);594list_add_tail(&chunk->transmitted_list,595&transport->transmitted);596continue;597}598599/* If we are doing fast retransmit, ignore non-fast_rtransmit600* chunks601*/602if (fast_rtx && !chunk->fast_retransmit)603continue;604605redo:606/* Attempt to append this chunk to the packet. */607status = sctp_packet_append_chunk(pkt, chunk);608609switch (status) {610case SCTP_XMIT_PMTU_FULL:611if (!pkt->has_data && !pkt->has_cookie_echo) {612/* If this packet did not contain DATA then613* retransmission did not happen, so do it614* again. We'll ignore the error here since615* control chunks are already freed so there616* is nothing we can do.617*/618sctp_packet_transmit(pkt);619goto redo;620}621622/* Send this packet. */623error = sctp_packet_transmit(pkt);624625/* If we are retransmitting, we should only626* send a single packet.627* Otherwise, try appending this chunk again.628*/629if (rtx_timeout || fast_rtx)630done = 1;631else632goto redo;633634/* Bundle next chunk in the next round. */635break;636637case SCTP_XMIT_RWND_FULL:638/* Send this packet. */639error = sctp_packet_transmit(pkt);640641/* Stop sending DATA as there is no more room642* at the receiver.643*/644done = 1;645break;646647case SCTP_XMIT_NAGLE_DELAY:648/* Send this packet. */649error = sctp_packet_transmit(pkt);650651/* Stop sending DATA because of nagle delay. */652done = 1;653break;654655default:656/* The append was successful, so add this chunk to657* the transmitted list.658*/659list_del(&chunk->transmitted_list);660list_add_tail(&chunk->transmitted_list,661&transport->transmitted);662663/* Mark the chunk as ineligible for fast retransmit664* after it is retransmitted.665*/666if (chunk->fast_retransmit == SCTP_NEED_FRTX)667chunk->fast_retransmit = SCTP_DONT_FRTX;668669q->empty = 0;670break;671}672673/* Set the timer if there were no errors */674if (!error && !timer)675timer = 1;676677if (done)678break;679}680681/* If we are here due to a retransmit timeout or a fast682* retransmit and if there are any chunks left in the retransmit683* queue that could not fit in the PMTU sized packet, they need684* to be marked as ineligible for a subsequent fast retransmit.685*/686if (rtx_timeout || fast_rtx) {687list_for_each_entry(chunk1, lqueue, transmitted_list) {688if (chunk1->fast_retransmit == SCTP_NEED_FRTX)689chunk1->fast_retransmit = SCTP_DONT_FRTX;690}691}692693*start_timer = timer;694695/* Clear fast retransmit hint */696if (fast_rtx)697q->fast_rtx = 0;698699return error;700}701702/* Cork the outqueue so queued chunks are really queued. */703int sctp_outq_uncork(struct sctp_outq *q)704{705int error = 0;706if (q->cork)707q->cork = 0;708error = sctp_outq_flush(q, 0);709return error;710}711712713/*714* Try to flush an outqueue.715*716* Description: Send everything in q which we legally can, subject to717* congestion limitations.718* * Note: This function can be called from multiple contexts so appropriate719* locking concerns must be made. Today we use the sock lock to protect720* this function.721*/722static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)723{724struct sctp_packet *packet;725struct sctp_packet singleton;726struct sctp_association *asoc = q->asoc;727__u16 sport = asoc->base.bind_addr.port;728__u16 dport = asoc->peer.port;729__u32 vtag = asoc->peer.i.init_tag;730struct sctp_transport *transport = NULL;731struct sctp_transport *new_transport;732struct sctp_chunk *chunk, *tmp;733sctp_xmit_t status;734int error = 0;735int start_timer = 0;736int one_packet = 0;737738/* These transports have chunks to send. */739struct list_head transport_list;740struct list_head *ltransport;741742INIT_LIST_HEAD(&transport_list);743packet = NULL;744745/*746* 6.10 Bundling747* ...748* When bundling control chunks with DATA chunks, an749* endpoint MUST place control chunks first in the outbound750* SCTP packet. The transmitter MUST transmit DATA chunks751* within a SCTP packet in increasing order of TSN.752* ...753*/754755list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {756list_del_init(&chunk->list);757758/* Pick the right transport to use. */759new_transport = chunk->transport;760761if (!new_transport) {762/*763* If we have a prior transport pointer, see if764* the destination address of the chunk765* matches the destination address of the766* current transport. If not a match, then767* try to look up the transport with a given768* destination address. We do this because769* after processing ASCONFs, we may have new770* transports created.771*/772if (transport &&773sctp_cmp_addr_exact(&chunk->dest,774&transport->ipaddr))775new_transport = transport;776else777new_transport = sctp_assoc_lookup_paddr(asoc,778&chunk->dest);779780/* if we still don't have a new transport, then781* use the current active path.782*/783if (!new_transport)784new_transport = asoc->peer.active_path;785} else if ((new_transport->state == SCTP_INACTIVE) ||786(new_transport->state == SCTP_UNCONFIRMED)) {787/* If the chunk is Heartbeat or Heartbeat Ack,788* send it to chunk->transport, even if it's789* inactive.790*791* 3.3.6 Heartbeat Acknowledgement:792* ...793* A HEARTBEAT ACK is always sent to the source IP794* address of the IP datagram containing the795* HEARTBEAT chunk to which this ack is responding.796* ...797*798* ASCONF_ACKs also must be sent to the source.799*/800if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&801chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&802chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)803new_transport = asoc->peer.active_path;804}805806/* Are we switching transports?807* Take care of transport locks.808*/809if (new_transport != transport) {810transport = new_transport;811if (list_empty(&transport->send_ready)) {812list_add_tail(&transport->send_ready,813&transport_list);814}815packet = &transport->packet;816sctp_packet_config(packet, vtag,817asoc->peer.ecn_capable);818}819820switch (chunk->chunk_hdr->type) {821/*822* 6.10 Bundling823* ...824* An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN825* COMPLETE with any other chunks. [Send them immediately.]826*/827case SCTP_CID_INIT:828case SCTP_CID_INIT_ACK:829case SCTP_CID_SHUTDOWN_COMPLETE:830sctp_packet_init(&singleton, transport, sport, dport);831sctp_packet_config(&singleton, vtag, 0);832sctp_packet_append_chunk(&singleton, chunk);833error = sctp_packet_transmit(&singleton);834if (error < 0)835return error;836break;837838case SCTP_CID_ABORT:839if (sctp_test_T_bit(chunk)) {840packet->vtag = asoc->c.my_vtag;841}842/* The following chunks are "response" chunks, i.e.843* they are generated in response to something we844* received. If we are sending these, then we can845* send only 1 packet containing these chunks.846*/847case SCTP_CID_HEARTBEAT_ACK:848case SCTP_CID_SHUTDOWN_ACK:849case SCTP_CID_COOKIE_ACK:850case SCTP_CID_COOKIE_ECHO:851case SCTP_CID_ERROR:852case SCTP_CID_ECN_CWR:853case SCTP_CID_ASCONF_ACK:854one_packet = 1;855/* Fall through */856857case SCTP_CID_SACK:858case SCTP_CID_HEARTBEAT:859case SCTP_CID_SHUTDOWN:860case SCTP_CID_ECN_ECNE:861case SCTP_CID_ASCONF:862case SCTP_CID_FWD_TSN:863status = sctp_packet_transmit_chunk(packet, chunk,864one_packet);865if (status != SCTP_XMIT_OK) {866/* put the chunk back */867list_add(&chunk->list, &q->control_chunk_list);868} else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {869/* PR-SCTP C5) If a FORWARD TSN is sent, the870* sender MUST assure that at least one T3-rtx871* timer is running.872*/873sctp_transport_reset_timers(transport);874}875break;876877default:878/* We built a chunk with an illegal type! */879BUG();880}881}882883/* Is it OK to send data chunks? */884switch (asoc->state) {885case SCTP_STATE_COOKIE_ECHOED:886/* Only allow bundling when this packet has a COOKIE-ECHO887* chunk.888*/889if (!packet || !packet->has_cookie_echo)890break;891892/* fallthru */893case SCTP_STATE_ESTABLISHED:894case SCTP_STATE_SHUTDOWN_PENDING:895case SCTP_STATE_SHUTDOWN_RECEIVED:896/*897* RFC 2960 6.1 Transmission of DATA Chunks898*899* C) When the time comes for the sender to transmit,900* before sending new DATA chunks, the sender MUST901* first transmit any outstanding DATA chunks which902* are marked for retransmission (limited by the903* current cwnd).904*/905if (!list_empty(&q->retransmit)) {906if (transport == asoc->peer.retran_path)907goto retran;908909/* Switch transports & prepare the packet. */910911transport = asoc->peer.retran_path;912913if (list_empty(&transport->send_ready)) {914list_add_tail(&transport->send_ready,915&transport_list);916}917918packet = &transport->packet;919sctp_packet_config(packet, vtag,920asoc->peer.ecn_capable);921retran:922error = sctp_outq_flush_rtx(q, packet,923rtx_timeout, &start_timer);924925if (start_timer)926sctp_transport_reset_timers(transport);927928/* This can happen on COOKIE-ECHO resend. Only929* one chunk can get bundled with a COOKIE-ECHO.930*/931if (packet->has_cookie_echo)932goto sctp_flush_out;933934/* Don't send new data if there is still data935* waiting to retransmit.936*/937if (!list_empty(&q->retransmit))938goto sctp_flush_out;939}940941/* Apply Max.Burst limitation to the current transport in942* case it will be used for new data. We are going to943* rest it before we return, but we want to apply the limit944* to the currently queued data.945*/946if (transport)947sctp_transport_burst_limited(transport);948949/* Finally, transmit new packets. */950while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {951/* RFC 2960 6.5 Every DATA chunk MUST carry a valid952* stream identifier.953*/954if (chunk->sinfo.sinfo_stream >=955asoc->c.sinit_num_ostreams) {956957/* Mark as failed send. */958sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);959sctp_chunk_free(chunk);960continue;961}962963/* Has this chunk expired? */964if (sctp_chunk_abandoned(chunk)) {965sctp_chunk_fail(chunk, 0);966sctp_chunk_free(chunk);967continue;968}969970/* If there is a specified transport, use it.971* Otherwise, we want to use the active path.972*/973new_transport = chunk->transport;974if (!new_transport ||975((new_transport->state == SCTP_INACTIVE) ||976(new_transport->state == SCTP_UNCONFIRMED)))977new_transport = asoc->peer.active_path;978979/* Change packets if necessary. */980if (new_transport != transport) {981transport = new_transport;982983/* Schedule to have this transport's984* packet flushed.985*/986if (list_empty(&transport->send_ready)) {987list_add_tail(&transport->send_ready,988&transport_list);989}990991packet = &transport->packet;992sctp_packet_config(packet, vtag,993asoc->peer.ecn_capable);994/* We've switched transports, so apply the995* Burst limit to the new transport.996*/997sctp_transport_burst_limited(transport);998}9991000SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",1001q, chunk,1002chunk && chunk->chunk_hdr ?1003sctp_cname(SCTP_ST_CHUNK(1004chunk->chunk_hdr->type))1005: "Illegal Chunk");10061007SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "1008"%p skb->users %d.\n",1009ntohl(chunk->subh.data_hdr->tsn),1010chunk->skb ?chunk->skb->head : NULL,1011chunk->skb ?1012atomic_read(&chunk->skb->users) : -1);10131014/* Add the chunk to the packet. */1015status = sctp_packet_transmit_chunk(packet, chunk, 0);10161017switch (status) {1018case SCTP_XMIT_PMTU_FULL:1019case SCTP_XMIT_RWND_FULL:1020case SCTP_XMIT_NAGLE_DELAY:1021/* We could not append this chunk, so put1022* the chunk back on the output queue.1023*/1024SCTP_DEBUG_PRINTK("sctp_outq_flush: could "1025"not transmit TSN: 0x%x, status: %d\n",1026ntohl(chunk->subh.data_hdr->tsn),1027status);1028sctp_outq_head_data(q, chunk);1029goto sctp_flush_out;1030break;10311032case SCTP_XMIT_OK:1033/* The sender is in the SHUTDOWN-PENDING state,1034* The sender MAY set the I-bit in the DATA1035* chunk header.1036*/1037if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)1038chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;10391040break;10411042default:1043BUG();1044}10451046/* BUG: We assume that the sctp_packet_transmit()1047* call below will succeed all the time and add the1048* chunk to the transmitted list and restart the1049* timers.1050* It is possible that the call can fail under OOM1051* conditions.1052*1053* Is this really a problem? Won't this behave1054* like a lost TSN?1055*/1056list_add_tail(&chunk->transmitted_list,1057&transport->transmitted);10581059sctp_transport_reset_timers(transport);10601061q->empty = 0;10621063/* Only let one DATA chunk get bundled with a1064* COOKIE-ECHO chunk.1065*/1066if (packet->has_cookie_echo)1067goto sctp_flush_out;1068}1069break;10701071default:1072/* Do nothing. */1073break;1074}10751076sctp_flush_out:10771078/* Before returning, examine all the transports touched in1079* this call. Right now, we bluntly force clear all the1080* transports. Things might change after we implement Nagle.1081* But such an examination is still required.1082*1083* --xguo1084*/1085while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {1086struct sctp_transport *t = list_entry(ltransport,1087struct sctp_transport,1088send_ready);1089packet = &t->packet;1090if (!sctp_packet_empty(packet))1091error = sctp_packet_transmit(packet);10921093/* Clear the burst limited state, if any */1094sctp_transport_burst_reset(t);1095}10961097return error;1098}10991100/* Update unack_data based on the incoming SACK chunk */1101static void sctp_sack_update_unack_data(struct sctp_association *assoc,1102struct sctp_sackhdr *sack)1103{1104sctp_sack_variable_t *frags;1105__u16 unack_data;1106int i;11071108unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;11091110frags = sack->variable;1111for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {1112unack_data -= ((ntohs(frags[i].gab.end) -1113ntohs(frags[i].gab.start) + 1));1114}11151116assoc->unack_data = unack_data;1117}11181119/* This is where we REALLY process a SACK.1120*1121* Process the SACK against the outqueue. Mostly, this just frees1122* things off the transmitted queue.1123*/1124int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)1125{1126struct sctp_association *asoc = q->asoc;1127struct sctp_transport *transport;1128struct sctp_chunk *tchunk = NULL;1129struct list_head *lchunk, *transport_list, *temp;1130sctp_sack_variable_t *frags = sack->variable;1131__u32 sack_ctsn, ctsn, tsn;1132__u32 highest_tsn, highest_new_tsn;1133__u32 sack_a_rwnd;1134unsigned outstanding;1135struct sctp_transport *primary = asoc->peer.primary_path;1136int count_of_newacks = 0;1137int gap_ack_blocks;1138u8 accum_moved = 0;11391140/* Grab the association's destination address list. */1141transport_list = &asoc->peer.transport_addr_list;11421143sack_ctsn = ntohl(sack->cum_tsn_ack);1144gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);1145/*1146* SFR-CACC algorithm:1147* On receipt of a SACK the sender SHOULD execute the1148* following statements.1149*1150* 1) If the cumulative ack in the SACK passes next tsn_at_change1151* on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be1152* cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for1153* all destinations.1154* 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE1155* is set the receiver of the SACK MUST take the following actions:1156*1157* A) Initialize the cacc_saw_newack to 0 for all destination1158* addresses.1159*1160* Only bother if changeover_active is set. Otherwise, this is1161* totally suboptimal to do on every SACK.1162*/1163if (primary->cacc.changeover_active) {1164u8 clear_cycling = 0;11651166if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {1167primary->cacc.changeover_active = 0;1168clear_cycling = 1;1169}11701171if (clear_cycling || gap_ack_blocks) {1172list_for_each_entry(transport, transport_list,1173transports) {1174if (clear_cycling)1175transport->cacc.cycling_changeover = 0;1176if (gap_ack_blocks)1177transport->cacc.cacc_saw_newack = 0;1178}1179}1180}11811182/* Get the highest TSN in the sack. */1183highest_tsn = sack_ctsn;1184if (gap_ack_blocks)1185highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);11861187if (TSN_lt(asoc->highest_sacked, highest_tsn))1188asoc->highest_sacked = highest_tsn;11891190highest_new_tsn = sack_ctsn;11911192/* Run through the retransmit queue. Credit bytes received1193* and free those chunks that we can.1194*/1195sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);11961197/* Run through the transmitted queue.1198* Credit bytes received and free those chunks which we can.1199*1200* This is a MASSIVE candidate for optimization.1201*/1202list_for_each_entry(transport, transport_list, transports) {1203sctp_check_transmitted(q, &transport->transmitted,1204transport, sack, &highest_new_tsn);1205/*1206* SFR-CACC algorithm:1207* C) Let count_of_newacks be the number of1208* destinations for which cacc_saw_newack is set.1209*/1210if (transport->cacc.cacc_saw_newack)1211count_of_newacks ++;1212}12131214/* Move the Cumulative TSN Ack Point if appropriate. */1215if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {1216asoc->ctsn_ack_point = sack_ctsn;1217accum_moved = 1;1218}12191220if (gap_ack_blocks) {12211222if (asoc->fast_recovery && accum_moved)1223highest_new_tsn = highest_tsn;12241225list_for_each_entry(transport, transport_list, transports)1226sctp_mark_missing(q, &transport->transmitted, transport,1227highest_new_tsn, count_of_newacks);1228}12291230/* Update unack_data field in the assoc. */1231sctp_sack_update_unack_data(asoc, sack);12321233ctsn = asoc->ctsn_ack_point;12341235/* Throw away stuff rotting on the sack queue. */1236list_for_each_safe(lchunk, temp, &q->sacked) {1237tchunk = list_entry(lchunk, struct sctp_chunk,1238transmitted_list);1239tsn = ntohl(tchunk->subh.data_hdr->tsn);1240if (TSN_lte(tsn, ctsn)) {1241list_del_init(&tchunk->transmitted_list);1242sctp_chunk_free(tchunk);1243}1244}12451246/* ii) Set rwnd equal to the newly received a_rwnd minus the1247* number of bytes still outstanding after processing the1248* Cumulative TSN Ack and the Gap Ack Blocks.1249*/12501251sack_a_rwnd = ntohl(sack->a_rwnd);1252outstanding = q->outstanding_bytes;12531254if (outstanding < sack_a_rwnd)1255sack_a_rwnd -= outstanding;1256else1257sack_a_rwnd = 0;12581259asoc->peer.rwnd = sack_a_rwnd;12601261sctp_generate_fwdtsn(q, sack_ctsn);12621263SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",1264__func__, sack_ctsn);1265SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "1266"%p is 0x%x. Adv peer ack point: 0x%x\n",1267__func__, asoc, ctsn, asoc->adv_peer_ack_point);12681269/* See if all chunks are acked.1270* Make sure the empty queue handler will get run later.1271*/1272q->empty = (list_empty(&q->out_chunk_list) &&1273list_empty(&q->retransmit));1274if (!q->empty)1275goto finish;12761277list_for_each_entry(transport, transport_list, transports) {1278q->empty = q->empty && list_empty(&transport->transmitted);1279if (!q->empty)1280goto finish;1281}12821283SCTP_DEBUG_PRINTK("sack queue is empty.\n");1284finish:1285return q->empty;1286}12871288/* Is the outqueue empty? */1289int sctp_outq_is_empty(const struct sctp_outq *q)1290{1291return q->empty;1292}12931294/********************************************************************1295* 2nd Level Abstractions1296********************************************************************/12971298/* Go through a transport's transmitted list or the association's retransmit1299* list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.1300* The retransmit list will not have an associated transport.1301*1302* I added coherent debug information output. --xguo1303*1304* Instead of printing 'sacked' or 'kept' for each TSN on the1305* transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.1306* KEPT TSN6-TSN7, etc.1307*/1308static void sctp_check_transmitted(struct sctp_outq *q,1309struct list_head *transmitted_queue,1310struct sctp_transport *transport,1311struct sctp_sackhdr *sack,1312__u32 *highest_new_tsn_in_sack)1313{1314struct list_head *lchunk;1315struct sctp_chunk *tchunk;1316struct list_head tlist;1317__u32 tsn;1318__u32 sack_ctsn;1319__u32 rtt;1320__u8 restart_timer = 0;1321int bytes_acked = 0;1322int migrate_bytes = 0;13231324/* These state variables are for coherent debug output. --xguo */13251326#if SCTP_DEBUG1327__u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */1328__u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */1329__u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */1330__u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */13311332/* 0 : The last TSN was ACKed.1333* 1 : The last TSN was NOT ACKed (i.e. KEPT).1334* -1: We need to initialize.1335*/1336int dbg_prt_state = -1;1337#endif /* SCTP_DEBUG */13381339sack_ctsn = ntohl(sack->cum_tsn_ack);13401341INIT_LIST_HEAD(&tlist);13421343/* The while loop will skip empty transmitted queues. */1344while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {1345tchunk = list_entry(lchunk, struct sctp_chunk,1346transmitted_list);13471348if (sctp_chunk_abandoned(tchunk)) {1349/* Move the chunk to abandoned list. */1350sctp_insert_list(&q->abandoned, lchunk);13511352/* If this chunk has not been acked, stop1353* considering it as 'outstanding'.1354*/1355if (!tchunk->tsn_gap_acked) {1356if (tchunk->transport)1357tchunk->transport->flight_size -=1358sctp_data_size(tchunk);1359q->outstanding_bytes -= sctp_data_size(tchunk);1360}1361continue;1362}13631364tsn = ntohl(tchunk->subh.data_hdr->tsn);1365if (sctp_acked(sack, tsn)) {1366/* If this queue is the retransmit queue, the1367* retransmit timer has already reclaimed1368* the outstanding bytes for this chunk, so only1369* count bytes associated with a transport.1370*/1371if (transport) {1372/* If this chunk is being used for RTT1373* measurement, calculate the RTT and update1374* the RTO using this value.1375*1376* 6.3.1 C5) Karn's algorithm: RTT measurements1377* MUST NOT be made using packets that were1378* retransmitted (and thus for which it is1379* ambiguous whether the reply was for the1380* first instance of the packet or a later1381* instance).1382*/1383if (!tchunk->tsn_gap_acked &&1384tchunk->rtt_in_progress) {1385tchunk->rtt_in_progress = 0;1386rtt = jiffies - tchunk->sent_at;1387sctp_transport_update_rto(transport,1388rtt);1389}1390}13911392/* If the chunk hasn't been marked as ACKED,1393* mark it and account bytes_acked if the1394* chunk had a valid transport (it will not1395* have a transport if ASCONF had deleted it1396* while DATA was outstanding).1397*/1398if (!tchunk->tsn_gap_acked) {1399tchunk->tsn_gap_acked = 1;1400*highest_new_tsn_in_sack = tsn;1401bytes_acked += sctp_data_size(tchunk);1402if (!tchunk->transport)1403migrate_bytes += sctp_data_size(tchunk);1404}14051406if (TSN_lte(tsn, sack_ctsn)) {1407/* RFC 2960 6.3.2 Retransmission Timer Rules1408*1409* R3) Whenever a SACK is received1410* that acknowledges the DATA chunk1411* with the earliest outstanding TSN1412* for that address, restart T3-rtx1413* timer for that address with its1414* current RTO.1415*/1416restart_timer = 1;14171418if (!tchunk->tsn_gap_acked) {1419/*1420* SFR-CACC algorithm:1421* 2) If the SACK contains gap acks1422* and the flag CHANGEOVER_ACTIVE is1423* set the receiver of the SACK MUST1424* take the following action:1425*1426* B) For each TSN t being acked that1427* has not been acked in any SACK so1428* far, set cacc_saw_newack to 1 for1429* the destination that the TSN was1430* sent to.1431*/1432if (transport &&1433sack->num_gap_ack_blocks &&1434q->asoc->peer.primary_path->cacc.1435changeover_active)1436transport->cacc.cacc_saw_newack1437= 1;1438}14391440list_add_tail(&tchunk->transmitted_list,1441&q->sacked);1442} else {1443/* RFC2960 7.2.4, sctpimpguide-05 2.8.21444* M2) Each time a SACK arrives reporting1445* 'Stray DATA chunk(s)' record the highest TSN1446* reported as newly acknowledged, call this1447* value 'HighestTSNinSack'. A newly1448* acknowledged DATA chunk is one not1449* previously acknowledged in a SACK.1450*1451* When the SCTP sender of data receives a SACK1452* chunk that acknowledges, for the first time,1453* the receipt of a DATA chunk, all the still1454* unacknowledged DATA chunks whose TSN is1455* older than that newly acknowledged DATA1456* chunk, are qualified as 'Stray DATA chunks'.1457*/1458list_add_tail(lchunk, &tlist);1459}14601461#if SCTP_DEBUG1462switch (dbg_prt_state) {1463case 0: /* last TSN was ACKed */1464if (dbg_last_ack_tsn + 1 == tsn) {1465/* This TSN belongs to the1466* current ACK range.1467*/1468break;1469}14701471if (dbg_last_ack_tsn != dbg_ack_tsn) {1472/* Display the end of the1473* current range.1474*/1475SCTP_DEBUG_PRINTK_CONT("-%08x",1476dbg_last_ack_tsn);1477}14781479/* Start a new range. */1480SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);1481dbg_ack_tsn = tsn;1482break;14831484case 1: /* The last TSN was NOT ACKed. */1485if (dbg_last_kept_tsn != dbg_kept_tsn) {1486/* Display the end of current range. */1487SCTP_DEBUG_PRINTK_CONT("-%08x",1488dbg_last_kept_tsn);1489}14901491SCTP_DEBUG_PRINTK_CONT("\n");14921493/* FALL THROUGH... */1494default:1495/* This is the first-ever TSN we examined. */1496/* Start a new range of ACK-ed TSNs. */1497SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);1498dbg_prt_state = 0;1499dbg_ack_tsn = tsn;1500}15011502dbg_last_ack_tsn = tsn;1503#endif /* SCTP_DEBUG */15041505} else {1506if (tchunk->tsn_gap_acked) {1507SCTP_DEBUG_PRINTK("%s: Receiver reneged on "1508"data TSN: 0x%x\n",1509__func__,1510tsn);1511tchunk->tsn_gap_acked = 0;15121513if (tchunk->transport)1514bytes_acked -= sctp_data_size(tchunk);15151516/* RFC 2960 6.3.2 Retransmission Timer Rules1517*1518* R4) Whenever a SACK is received missing a1519* TSN that was previously acknowledged via a1520* Gap Ack Block, start T3-rtx for the1521* destination address to which the DATA1522* chunk was originally1523* transmitted if it is not already running.1524*/1525restart_timer = 1;1526}15271528list_add_tail(lchunk, &tlist);15291530#if SCTP_DEBUG1531/* See the above comments on ACK-ed TSNs. */1532switch (dbg_prt_state) {1533case 1:1534if (dbg_last_kept_tsn + 1 == tsn)1535break;15361537if (dbg_last_kept_tsn != dbg_kept_tsn)1538SCTP_DEBUG_PRINTK_CONT("-%08x",1539dbg_last_kept_tsn);15401541SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);1542dbg_kept_tsn = tsn;1543break;15441545case 0:1546if (dbg_last_ack_tsn != dbg_ack_tsn)1547SCTP_DEBUG_PRINTK_CONT("-%08x",1548dbg_last_ack_tsn);1549SCTP_DEBUG_PRINTK_CONT("\n");15501551/* FALL THROUGH... */1552default:1553SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);1554dbg_prt_state = 1;1555dbg_kept_tsn = tsn;1556}15571558dbg_last_kept_tsn = tsn;1559#endif /* SCTP_DEBUG */1560}1561}15621563#if SCTP_DEBUG1564/* Finish off the last range, displaying its ending TSN. */1565switch (dbg_prt_state) {1566case 0:1567if (dbg_last_ack_tsn != dbg_ack_tsn) {1568SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);1569} else {1570SCTP_DEBUG_PRINTK_CONT("\n");1571}1572break;15731574case 1:1575if (dbg_last_kept_tsn != dbg_kept_tsn) {1576SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);1577} else {1578SCTP_DEBUG_PRINTK_CONT("\n");1579}1580}1581#endif /* SCTP_DEBUG */1582if (transport) {1583if (bytes_acked) {1584struct sctp_association *asoc = transport->asoc;15851586/* We may have counted DATA that was migrated1587* to this transport due to DEL-IP operation.1588* Subtract those bytes, since the were never1589* send on this transport and shouldn't be1590* credited to this transport.1591*/1592bytes_acked -= migrate_bytes;15931594/* 8.2. When an outstanding TSN is acknowledged,1595* the endpoint shall clear the error counter of1596* the destination transport address to which the1597* DATA chunk was last sent.1598* The association's overall error counter is1599* also cleared.1600*/1601transport->error_count = 0;1602transport->asoc->overall_error_count = 0;16031604/*1605* While in SHUTDOWN PENDING, we may have started1606* the T5 shutdown guard timer after reaching the1607* retransmission limit. Stop that timer as soon1608* as the receiver acknowledged any data.1609*/1610if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&1611del_timer(&asoc->timers1612[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))1613sctp_association_put(asoc);16141615/* Mark the destination transport address as1616* active if it is not so marked.1617*/1618if ((transport->state == SCTP_INACTIVE) ||1619(transport->state == SCTP_UNCONFIRMED)) {1620sctp_assoc_control_transport(1621transport->asoc,1622transport,1623SCTP_TRANSPORT_UP,1624SCTP_RECEIVED_SACK);1625}16261627sctp_transport_raise_cwnd(transport, sack_ctsn,1628bytes_acked);16291630transport->flight_size -= bytes_acked;1631if (transport->flight_size == 0)1632transport->partial_bytes_acked = 0;1633q->outstanding_bytes -= bytes_acked + migrate_bytes;1634} else {1635/* RFC 2960 6.1, sctpimpguide-06 2.15.21636* When a sender is doing zero window probing, it1637* should not timeout the association if it continues1638* to receive new packets from the receiver. The1639* reason is that the receiver MAY keep its window1640* closed for an indefinite time.1641* A sender is doing zero window probing when the1642* receiver's advertised window is zero, and there is1643* only one data chunk in flight to the receiver.1644*1645* Allow the association to timeout while in SHUTDOWN1646* PENDING or SHUTDOWN RECEIVED in case the receiver1647* stays in zero window mode forever.1648*/1649if (!q->asoc->peer.rwnd &&1650!list_empty(&tlist) &&1651(sack_ctsn+2 == q->asoc->next_tsn) &&1652q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {1653SCTP_DEBUG_PRINTK("%s: SACK received for zero "1654"window probe: %u\n",1655__func__, sack_ctsn);1656q->asoc->overall_error_count = 0;1657transport->error_count = 0;1658}1659}16601661/* RFC 2960 6.3.2 Retransmission Timer Rules1662*1663* R2) Whenever all outstanding data sent to an address have1664* been acknowledged, turn off the T3-rtx timer of that1665* address.1666*/1667if (!transport->flight_size) {1668if (timer_pending(&transport->T3_rtx_timer) &&1669del_timer(&transport->T3_rtx_timer)) {1670sctp_transport_put(transport);1671}1672} else if (restart_timer) {1673if (!mod_timer(&transport->T3_rtx_timer,1674jiffies + transport->rto))1675sctp_transport_hold(transport);1676}1677}16781679list_splice(&tlist, transmitted_queue);1680}16811682/* Mark chunks as missing and consequently may get retransmitted. */1683static void sctp_mark_missing(struct sctp_outq *q,1684struct list_head *transmitted_queue,1685struct sctp_transport *transport,1686__u32 highest_new_tsn_in_sack,1687int count_of_newacks)1688{1689struct sctp_chunk *chunk;1690__u32 tsn;1691char do_fast_retransmit = 0;1692struct sctp_association *asoc = q->asoc;1693struct sctp_transport *primary = asoc->peer.primary_path;16941695list_for_each_entry(chunk, transmitted_queue, transmitted_list) {16961697tsn = ntohl(chunk->subh.data_hdr->tsn);16981699/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all1700* 'Unacknowledged TSN's', if the TSN number of an1701* 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'1702* value, increment the 'TSN.Missing.Report' count on that1703* chunk if it has NOT been fast retransmitted or marked for1704* fast retransmit already.1705*/1706if (chunk->fast_retransmit == SCTP_CAN_FRTX &&1707!chunk->tsn_gap_acked &&1708TSN_lt(tsn, highest_new_tsn_in_sack)) {17091710/* SFR-CACC may require us to skip marking1711* this chunk as missing.1712*/1713if (!transport || !sctp_cacc_skip(primary,1714chunk->transport,1715count_of_newacks, tsn)) {1716chunk->tsn_missing_report++;17171718SCTP_DEBUG_PRINTK(1719"%s: TSN 0x%x missing counter: %d\n",1720__func__, tsn,1721chunk->tsn_missing_report);1722}1723}1724/*1725* M4) If any DATA chunk is found to have a1726* 'TSN.Missing.Report'1727* value larger than or equal to 3, mark that chunk for1728* retransmission and start the fast retransmit procedure.1729*/17301731if (chunk->tsn_missing_report >= 3) {1732chunk->fast_retransmit = SCTP_NEED_FRTX;1733do_fast_retransmit = 1;1734}1735}17361737if (transport) {1738if (do_fast_retransmit)1739sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);17401741SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "1742"ssthresh: %d, flight_size: %d, pba: %d\n",1743__func__, transport, transport->cwnd,1744transport->ssthresh, transport->flight_size,1745transport->partial_bytes_acked);1746}1747}17481749/* Is the given TSN acked by this packet? */1750static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)1751{1752int i;1753sctp_sack_variable_t *frags;1754__u16 gap;1755__u32 ctsn = ntohl(sack->cum_tsn_ack);17561757if (TSN_lte(tsn, ctsn))1758goto pass;17591760/* 3.3.4 Selective Acknowledgement (SACK) (3):1761*1762* Gap Ack Blocks:1763* These fields contain the Gap Ack Blocks. They are repeated1764* for each Gap Ack Block up to the number of Gap Ack Blocks1765* defined in the Number of Gap Ack Blocks field. All DATA1766* chunks with TSNs greater than or equal to (Cumulative TSN1767* Ack + Gap Ack Block Start) and less than or equal to1768* (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack1769* Block are assumed to have been received correctly.1770*/17711772frags = sack->variable;1773gap = tsn - ctsn;1774for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {1775if (TSN_lte(ntohs(frags[i].gab.start), gap) &&1776TSN_lte(gap, ntohs(frags[i].gab.end)))1777goto pass;1778}17791780return 0;1781pass:1782return 1;1783}17841785static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,1786int nskips, __be16 stream)1787{1788int i;17891790for (i = 0; i < nskips; i++) {1791if (skiplist[i].stream == stream)1792return i;1793}1794return i;1795}17961797/* Create and add a fwdtsn chunk to the outq's control queue if needed. */1798static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)1799{1800struct sctp_association *asoc = q->asoc;1801struct sctp_chunk *ftsn_chunk = NULL;1802struct sctp_fwdtsn_skip ftsn_skip_arr[10];1803int nskips = 0;1804int skip_pos = 0;1805__u32 tsn;1806struct sctp_chunk *chunk;1807struct list_head *lchunk, *temp;18081809if (!asoc->peer.prsctp_capable)1810return;18111812/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the1813* received SACK.1814*1815* If (Advanced.Peer.Ack.Point < SackCumAck), then update1816* Advanced.Peer.Ack.Point to be equal to SackCumAck.1817*/1818if (TSN_lt(asoc->adv_peer_ack_point, ctsn))1819asoc->adv_peer_ack_point = ctsn;18201821/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"1822* locally, that is, to move "Advanced.Peer.Ack.Point" up as long as1823* the chunk next in the out-queue space is marked as "abandoned" as1824* shown in the following example:1825*1826* Assuming that a SACK arrived with the Cumulative TSN ACK 1021827* and the Advanced.Peer.Ack.Point is updated to this value:1828*1829* out-queue at the end of ==> out-queue after Adv.Ack.Point1830* normal SACK processing local advancement1831* ... ...1832* Adv.Ack.Pt-> 102 acked 102 acked1833* 103 abandoned 103 abandoned1834* 104 abandoned Adv.Ack.P-> 104 abandoned1835* 105 1051836* 106 acked 106 acked1837* ... ...1838*1839* In this example, the data sender successfully advanced the1840* "Advanced.Peer.Ack.Point" from 102 to 104 locally.1841*/1842list_for_each_safe(lchunk, temp, &q->abandoned) {1843chunk = list_entry(lchunk, struct sctp_chunk,1844transmitted_list);1845tsn = ntohl(chunk->subh.data_hdr->tsn);18461847/* Remove any chunks in the abandoned queue that are acked by1848* the ctsn.1849*/1850if (TSN_lte(tsn, ctsn)) {1851list_del_init(lchunk);1852sctp_chunk_free(chunk);1853} else {1854if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {1855asoc->adv_peer_ack_point = tsn;1856if (chunk->chunk_hdr->flags &1857SCTP_DATA_UNORDERED)1858continue;1859skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],1860nskips,1861chunk->subh.data_hdr->stream);1862ftsn_skip_arr[skip_pos].stream =1863chunk->subh.data_hdr->stream;1864ftsn_skip_arr[skip_pos].ssn =1865chunk->subh.data_hdr->ssn;1866if (skip_pos == nskips)1867nskips++;1868if (nskips == 10)1869break;1870} else1871break;1872}1873}18741875/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"1876* is greater than the Cumulative TSN ACK carried in the received1877* SACK, the data sender MUST send the data receiver a FORWARD TSN1878* chunk containing the latest value of the1879* "Advanced.Peer.Ack.Point".1880*1881* C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD1882* list each stream and sequence number in the forwarded TSN. This1883* information will enable the receiver to easily find any1884* stranded TSN's waiting on stream reorder queues. Each stream1885* SHOULD only be reported once; this means that if multiple1886* abandoned messages occur in the same stream then only the1887* highest abandoned stream sequence number is reported. If the1888* total size of the FORWARD TSN does NOT fit in a single MTU then1889* the sender of the FORWARD TSN SHOULD lower the1890* Advanced.Peer.Ack.Point to the last TSN that will fit in a1891* single MTU.1892*/1893if (asoc->adv_peer_ack_point > ctsn)1894ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,1895nskips, &ftsn_skip_arr[0]);18961897if (ftsn_chunk) {1898list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);1899SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);1900}1901}190219031904