Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/dccp/ackvec.c
15109 views
1
/*
2
* net/dccp/ackvec.c
3
*
4
* An implementation of Ack Vectors for the DCCP protocol
5
* Copyright (c) 2007 University of Aberdeen, Scotland, UK
6
* Copyright (c) 2005 Arnaldo Carvalho de Melo <[email protected]>
7
*
8
* This program is free software; you can redistribute it and/or modify it
9
* under the terms of the GNU General Public License as published by the
10
* Free Software Foundation; version 2 of the License;
11
*/
12
#include "dccp.h"
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15
16
static struct kmem_cache *dccp_ackvec_slab;
17
static struct kmem_cache *dccp_ackvec_record_slab;
18
19
struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
20
{
21
struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
22
23
if (av != NULL) {
24
av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
25
INIT_LIST_HEAD(&av->av_records);
26
}
27
return av;
28
}
29
30
static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
31
{
32
struct dccp_ackvec_record *cur, *next;
33
34
list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
35
kmem_cache_free(dccp_ackvec_record_slab, cur);
36
INIT_LIST_HEAD(&av->av_records);
37
}
38
39
void dccp_ackvec_free(struct dccp_ackvec *av)
40
{
41
if (likely(av != NULL)) {
42
dccp_ackvec_purge_records(av);
43
kmem_cache_free(dccp_ackvec_slab, av);
44
}
45
}
46
47
/**
48
* dccp_ackvec_update_records - Record information about sent Ack Vectors
49
* @av: Ack Vector records to update
50
* @seqno: Sequence number of the packet carrying the Ack Vector just sent
51
* @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
52
*/
53
int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
54
{
55
struct dccp_ackvec_record *avr;
56
57
avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
58
if (avr == NULL)
59
return -ENOBUFS;
60
61
avr->avr_ack_seqno = seqno;
62
avr->avr_ack_ptr = av->av_buf_head;
63
avr->avr_ack_ackno = av->av_buf_ackno;
64
avr->avr_ack_nonce = nonce_sum;
65
avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
66
/*
67
* When the buffer overflows, we keep no more than one record. This is
68
* the simplest way of disambiguating sender-Acks dating from before the
69
* overflow from sender-Acks which refer to after the overflow; a simple
70
* solution is preferable here since we are handling an exception.
71
*/
72
if (av->av_overflow)
73
dccp_ackvec_purge_records(av);
74
/*
75
* Since GSS is incremented for each packet, the list is automatically
76
* arranged in descending order of @ack_seqno.
77
*/
78
list_add(&avr->avr_node, &av->av_records);
79
80
dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
81
(unsigned long long)avr->avr_ack_seqno,
82
(unsigned long long)avr->avr_ack_ackno,
83
avr->avr_ack_runlen);
84
return 0;
85
}
86
87
static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
88
const u64 ackno)
89
{
90
struct dccp_ackvec_record *avr;
91
/*
92
* Exploit that records are inserted in descending order of sequence
93
* number, start with the oldest record first. If @ackno is `before'
94
* the earliest ack_ackno, the packet is too old to be considered.
95
*/
96
list_for_each_entry_reverse(avr, av_list, avr_node) {
97
if (avr->avr_ack_seqno == ackno)
98
return avr;
99
if (before48(ackno, avr->avr_ack_seqno))
100
break;
101
}
102
return NULL;
103
}
104
105
/*
106
* Buffer index and length computation using modulo-buffersize arithmetic.
107
* Note that, as pointers move from right to left, head is `before' tail.
108
*/
109
static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
110
{
111
return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
112
}
113
114
static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
115
{
116
return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
117
}
118
119
u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
120
{
121
if (unlikely(av->av_overflow))
122
return DCCPAV_MAX_ACKVEC_LEN;
123
return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
124
}
125
126
/**
127
* dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
128
* @av: non-empty buffer to update
129
* @distance: negative or zero distance of @seqno from buf_ackno downward
130
* @seqno: the (old) sequence number whose record is to be updated
131
* @state: state in which packet carrying @seqno was received
132
*/
133
static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
134
u64 seqno, enum dccp_ackvec_states state)
135
{
136
u16 ptr = av->av_buf_head;
137
138
BUG_ON(distance > 0);
139
if (unlikely(dccp_ackvec_is_empty(av)))
140
return;
141
142
do {
143
u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
144
145
if (distance + runlen >= 0) {
146
/*
147
* Only update the state if packet has not been received
148
* yet. This is OK as per the second table in RFC 4340,
149
* 11.4.1; i.e. here we are using the following table:
150
* RECEIVED
151
* 0 1 3
152
* S +---+---+---+
153
* T 0 | 0 | 0 | 0 |
154
* O +---+---+---+
155
* R 1 | 1 | 1 | 1 |
156
* E +---+---+---+
157
* D 3 | 0 | 1 | 3 |
158
* +---+---+---+
159
* The "Not Received" state was set by reserve_seats().
160
*/
161
if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
162
av->av_buf[ptr] = state;
163
else
164
dccp_pr_debug("Not changing %llu state to %u\n",
165
(unsigned long long)seqno, state);
166
break;
167
}
168
169
distance += runlen + 1;
170
ptr = __ackvec_idx_add(ptr, 1);
171
172
} while (ptr != av->av_buf_tail);
173
}
174
175
/* Mark @num entries after buf_head as "Not yet received". */
176
static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
177
{
178
u16 start = __ackvec_idx_add(av->av_buf_head, 1),
179
len = DCCPAV_MAX_ACKVEC_LEN - start;
180
181
/* check for buffer wrap-around */
182
if (num > len) {
183
memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
184
start = 0;
185
num -= len;
186
}
187
if (num)
188
memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
189
}
190
191
/**
192
* dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
193
* @av: container of buffer to update (can be empty or non-empty)
194
* @num_packets: number of packets to register (must be >= 1)
195
* @seqno: sequence number of the first packet in @num_packets
196
* @state: state in which packet carrying @seqno was received
197
*/
198
static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
199
u64 seqno, enum dccp_ackvec_states state)
200
{
201
u32 num_cells = num_packets;
202
203
if (num_packets > DCCPAV_BURST_THRESH) {
204
u32 lost_packets = num_packets - 1;
205
206
DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
207
/*
208
* We received 1 packet and have a loss of size "num_packets-1"
209
* which we squeeze into num_cells-1 rather than reserving an
210
* entire byte for each lost packet.
211
* The reason is that the vector grows in O(burst_length); when
212
* it grows too large there will no room left for the payload.
213
* This is a trade-off: if a few packets out of the burst show
214
* up later, their state will not be changed; it is simply too
215
* costly to reshuffle/reallocate/copy the buffer each time.
216
* Should such problems persist, we will need to switch to a
217
* different underlying data structure.
218
*/
219
for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
220
u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
221
222
av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
223
av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
224
225
lost_packets -= len;
226
}
227
}
228
229
if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
230
DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
231
av->av_overflow = true;
232
}
233
234
av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
235
if (av->av_overflow)
236
av->av_buf_tail = av->av_buf_head;
237
238
av->av_buf[av->av_buf_head] = state;
239
av->av_buf_ackno = seqno;
240
241
if (num_packets > 1)
242
dccp_ackvec_reserve_seats(av, num_packets - 1);
243
}
244
245
/**
246
* dccp_ackvec_input - Register incoming packet in the buffer
247
*/
248
void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
249
{
250
u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
251
enum dccp_ackvec_states state = DCCPAV_RECEIVED;
252
253
if (dccp_ackvec_is_empty(av)) {
254
dccp_ackvec_add_new(av, 1, seqno, state);
255
av->av_tail_ackno = seqno;
256
257
} else {
258
s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
259
u8 *current_head = av->av_buf + av->av_buf_head;
260
261
if (num_packets == 1 &&
262
dccp_ackvec_state(current_head) == state &&
263
dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
264
265
*current_head += 1;
266
av->av_buf_ackno = seqno;
267
268
} else if (num_packets > 0) {
269
dccp_ackvec_add_new(av, num_packets, seqno, state);
270
} else {
271
dccp_ackvec_update_old(av, num_packets, seqno, state);
272
}
273
}
274
}
275
276
/**
277
* dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
278
* This routine is called when the peer acknowledges the receipt of Ack Vectors
279
* up to and including @ackno. While based on on section A.3 of RFC 4340, here
280
* are additional precautions to prevent corrupted buffer state. In particular,
281
* we use tail_ackno to identify outdated records; it always marks the earliest
282
* packet of group (2) in 11.4.2.
283
*/
284
void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
285
{
286
struct dccp_ackvec_record *avr, *next;
287
u8 runlen_now, eff_runlen;
288
s64 delta;
289
290
avr = dccp_ackvec_lookup(&av->av_records, ackno);
291
if (avr == NULL)
292
return;
293
/*
294
* Deal with outdated acknowledgments: this arises when e.g. there are
295
* several old records and the acks from the peer come in slowly. In
296
* that case we may still have records that pre-date tail_ackno.
297
*/
298
delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
299
if (delta < 0)
300
goto free_records;
301
/*
302
* Deal with overlapping Ack Vectors: don't subtract more than the
303
* number of packets between tail_ackno and ack_ackno.
304
*/
305
eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
306
307
runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
308
/*
309
* The run length of Ack Vector cells does not decrease over time. If
310
* the run length is the same as at the time the Ack Vector was sent, we
311
* free the ack_ptr cell. That cell can however not be freed if the run
312
* length has increased: in this case we need to move the tail pointer
313
* backwards (towards higher indices), to its next-oldest neighbour.
314
*/
315
if (runlen_now > eff_runlen) {
316
317
av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
318
av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
319
320
/* This move may not have cleared the overflow flag. */
321
if (av->av_overflow)
322
av->av_overflow = (av->av_buf_head == av->av_buf_tail);
323
} else {
324
av->av_buf_tail = avr->avr_ack_ptr;
325
/*
326
* We have made sure that avr points to a valid cell within the
327
* buffer. This cell is either older than head, or equals head
328
* (empty buffer): in both cases we no longer have any overflow.
329
*/
330
av->av_overflow = 0;
331
}
332
333
/*
334
* The peer has acknowledged up to and including ack_ackno. Hence the
335
* first packet in group (2) of 11.4.2 is the successor of ack_ackno.
336
*/
337
av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
338
339
free_records:
340
list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
341
list_del(&avr->avr_node);
342
kmem_cache_free(dccp_ackvec_record_slab, avr);
343
}
344
}
345
346
/*
347
* Routines to keep track of Ack Vectors received in an skb
348
*/
349
int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
350
{
351
struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
352
353
if (new == NULL)
354
return -ENOBUFS;
355
new->vec = vec;
356
new->len = len;
357
new->nonce = nonce;
358
359
list_add_tail(&new->node, head);
360
return 0;
361
}
362
EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
363
364
void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
365
{
366
struct dccp_ackvec_parsed *cur, *next;
367
368
list_for_each_entry_safe(cur, next, parsed_chunks, node)
369
kfree(cur);
370
INIT_LIST_HEAD(parsed_chunks);
371
}
372
EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
373
374
int __init dccp_ackvec_init(void)
375
{
376
dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
377
sizeof(struct dccp_ackvec), 0,
378
SLAB_HWCACHE_ALIGN, NULL);
379
if (dccp_ackvec_slab == NULL)
380
goto out_err;
381
382
dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
383
sizeof(struct dccp_ackvec_record),
384
0, SLAB_HWCACHE_ALIGN, NULL);
385
if (dccp_ackvec_record_slab == NULL)
386
goto out_destroy_slab;
387
388
return 0;
389
390
out_destroy_slab:
391
kmem_cache_destroy(dccp_ackvec_slab);
392
dccp_ackvec_slab = NULL;
393
out_err:
394
DCCP_CRIT("Unable to create Ack Vector slab cache");
395
return -ENOBUFS;
396
}
397
398
void dccp_ackvec_exit(void)
399
{
400
if (dccp_ackvec_slab != NULL) {
401
kmem_cache_destroy(dccp_ackvec_slab);
402
dccp_ackvec_slab = NULL;
403
}
404
if (dccp_ackvec_record_slab != NULL) {
405
kmem_cache_destroy(dccp_ackvec_record_slab);
406
dccp_ackvec_record_slab = NULL;
407
}
408
}
409
410