Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ax25/ax25_out.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
*
4
* Copyright (C) Alan Cox GW4PTS ([email protected])
5
* Copyright (C) Jonathan Naylor G4KLX ([email protected])
6
* Copyright (C) Joerg Reuter DL1BKE ([email protected])
7
*/
8
#include <linux/errno.h>
9
#include <linux/types.h>
10
#include <linux/socket.h>
11
#include <linux/in.h>
12
#include <linux/kernel.h>
13
#include <linux/module.h>
14
#include <linux/timer.h>
15
#include <linux/string.h>
16
#include <linux/sockios.h>
17
#include <linux/spinlock.h>
18
#include <linux/net.h>
19
#include <linux/slab.h>
20
#include <net/ax25.h>
21
#include <linux/inet.h>
22
#include <linux/netdevice.h>
23
#include <linux/skbuff.h>
24
#include <net/sock.h>
25
#include <linux/uaccess.h>
26
#include <linux/fcntl.h>
27
#include <linux/mm.h>
28
#include <linux/interrupt.h>
29
30
static DEFINE_SPINLOCK(ax25_frag_lock);
31
32
ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
33
{
34
ax25_dev *ax25_dev;
35
ax25_cb *ax25;
36
37
/*
38
* Take the default packet length for the device if zero is
39
* specified.
40
*/
41
if (paclen == 0) {
42
rcu_read_lock();
43
ax25_dev = ax25_dev_ax25dev(dev);
44
if (!ax25_dev) {
45
rcu_read_unlock();
46
return NULL;
47
}
48
paclen = ax25_dev->values[AX25_VALUES_PACLEN];
49
rcu_read_unlock();
50
}
51
52
/*
53
* Look for an existing connection.
54
*/
55
if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
56
ax25_output(ax25, paclen, skb);
57
return ax25; /* It already existed */
58
}
59
60
rcu_read_lock();
61
ax25_dev = ax25_dev_ax25dev(dev);
62
if (!ax25_dev) {
63
rcu_read_unlock();
64
return NULL;
65
}
66
67
if ((ax25 = ax25_create_cb()) == NULL) {
68
rcu_read_unlock();
69
return NULL;
70
}
71
ax25_fillin_cb(ax25, ax25_dev);
72
rcu_read_unlock();
73
74
ax25->source_addr = *src;
75
ax25->dest_addr = *dest;
76
77
if (digi != NULL) {
78
ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
79
if (ax25->digipeat == NULL) {
80
ax25_cb_put(ax25);
81
return NULL;
82
}
83
}
84
85
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
86
case AX25_PROTO_STD_SIMPLEX:
87
case AX25_PROTO_STD_DUPLEX:
88
ax25_std_establish_data_link(ax25);
89
break;
90
91
#ifdef CONFIG_AX25_DAMA_SLAVE
92
case AX25_PROTO_DAMA_SLAVE:
93
if (ax25_dev->dama.slave)
94
ax25_ds_establish_data_link(ax25);
95
else
96
ax25_std_establish_data_link(ax25);
97
break;
98
#endif
99
}
100
101
/*
102
* There is one ref for the state machine; a caller needs
103
* one more to put it back, just like with the existing one.
104
*/
105
ax25_cb_hold(ax25);
106
107
ax25_cb_add(ax25);
108
109
ax25->state = AX25_STATE_1;
110
111
ax25_start_heartbeat(ax25);
112
113
ax25_output(ax25, paclen, skb);
114
115
return ax25; /* We had to create it */
116
}
117
118
EXPORT_SYMBOL(ax25_send_frame);
119
120
/*
121
* All outgoing AX.25 I frames pass via this routine. Therefore this is
122
* where the fragmentation of frames takes place. If fragment is set to
123
* zero then we are not allowed to do fragmentation, even if the frame
124
* is too large.
125
*/
126
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
127
{
128
struct sk_buff *skbn;
129
unsigned char *p;
130
int frontlen, len, fragno, ka9qfrag, first = 1;
131
132
if (paclen < 16) {
133
WARN_ON_ONCE(1);
134
kfree_skb(skb);
135
return;
136
}
137
138
if ((skb->len - 1) > paclen) {
139
if (*skb->data == AX25_P_TEXT) {
140
skb_pull(skb, 1); /* skip PID */
141
ka9qfrag = 0;
142
} else {
143
paclen -= 2; /* Allow for fragment control info */
144
ka9qfrag = 1;
145
}
146
147
fragno = skb->len / paclen;
148
if (skb->len % paclen == 0) fragno--;
149
150
frontlen = skb_headroom(skb); /* Address space + CTRL */
151
152
while (skb->len > 0) {
153
spin_lock_bh(&ax25_frag_lock);
154
if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
155
spin_unlock_bh(&ax25_frag_lock);
156
printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
157
return;
158
}
159
160
if (skb->sk != NULL)
161
skb_set_owner_w(skbn, skb->sk);
162
163
spin_unlock_bh(&ax25_frag_lock);
164
165
len = (paclen > skb->len) ? skb->len : paclen;
166
167
if (ka9qfrag == 1) {
168
skb_reserve(skbn, frontlen + 2);
169
skb_set_network_header(skbn,
170
skb_network_offset(skb));
171
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
172
p = skb_push(skbn, 2);
173
174
*p++ = AX25_P_SEGMENT;
175
176
*p = fragno--;
177
if (first) {
178
*p |= AX25_SEG_FIRST;
179
first = 0;
180
}
181
} else {
182
skb_reserve(skbn, frontlen + 1);
183
skb_set_network_header(skbn,
184
skb_network_offset(skb));
185
skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
186
p = skb_push(skbn, 1);
187
*p = AX25_P_TEXT;
188
}
189
190
skb_pull(skb, len);
191
skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
192
}
193
194
kfree_skb(skb);
195
} else {
196
skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
197
}
198
199
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
200
case AX25_PROTO_STD_SIMPLEX:
201
case AX25_PROTO_STD_DUPLEX:
202
ax25_kick(ax25);
203
break;
204
205
#ifdef CONFIG_AX25_DAMA_SLAVE
206
/*
207
* A DAMA slave is _required_ to work as normal AX.25L2V2
208
* if no DAMA master is available.
209
*/
210
case AX25_PROTO_DAMA_SLAVE:
211
if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
212
break;
213
#endif
214
}
215
}
216
217
/*
218
* This procedure is passed a buffer descriptor for an iframe. It builds
219
* the rest of the control part of the frame and then writes it out.
220
*/
221
static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
222
{
223
unsigned char *frame;
224
225
if (skb == NULL)
226
return;
227
228
skb_reset_network_header(skb);
229
230
if (ax25->modulus == AX25_MODULUS) {
231
frame = skb_push(skb, 1);
232
233
*frame = AX25_I;
234
*frame |= (poll_bit) ? AX25_PF : 0;
235
*frame |= (ax25->vr << 5);
236
*frame |= (ax25->vs << 1);
237
} else {
238
frame = skb_push(skb, 2);
239
240
frame[0] = AX25_I;
241
frame[0] |= (ax25->vs << 1);
242
frame[1] = (poll_bit) ? AX25_EPF : 0;
243
frame[1] |= (ax25->vr << 1);
244
}
245
246
ax25_start_idletimer(ax25);
247
248
ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
249
}
250
251
void ax25_kick(ax25_cb *ax25)
252
{
253
struct sk_buff *skb, *skbn;
254
int last = 1;
255
unsigned short start, end, next;
256
257
if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
258
return;
259
260
if (ax25->condition & AX25_COND_PEER_RX_BUSY)
261
return;
262
263
if (skb_peek(&ax25->write_queue) == NULL)
264
return;
265
266
start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
267
end = (ax25->va + ax25->window) % ax25->modulus;
268
269
if (start == end)
270
return;
271
272
/*
273
* Transmit data until either we're out of data to send or
274
* the window is full. Send a poll on the final I frame if
275
* the window is filled.
276
*/
277
278
/*
279
* Dequeue the frame and copy it.
280
* Check for race with ax25_clear_queues().
281
*/
282
skb = skb_dequeue(&ax25->write_queue);
283
if (!skb)
284
return;
285
286
ax25->vs = start;
287
288
do {
289
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
290
skb_queue_head(&ax25->write_queue, skb);
291
break;
292
}
293
294
if (skb->sk != NULL)
295
skb_set_owner_w(skbn, skb->sk);
296
297
next = (ax25->vs + 1) % ax25->modulus;
298
last = (next == end);
299
300
/*
301
* Transmit the frame copy.
302
* bke 960114: do not set the Poll bit on the last frame
303
* in DAMA mode.
304
*/
305
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
306
case AX25_PROTO_STD_SIMPLEX:
307
case AX25_PROTO_STD_DUPLEX:
308
ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
309
break;
310
311
#ifdef CONFIG_AX25_DAMA_SLAVE
312
case AX25_PROTO_DAMA_SLAVE:
313
ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
314
break;
315
#endif
316
}
317
318
ax25->vs = next;
319
320
/*
321
* Requeue the original data frame.
322
*/
323
skb_queue_tail(&ax25->ack_queue, skb);
324
325
} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
326
327
ax25->condition &= ~AX25_COND_ACK_PENDING;
328
329
if (!ax25_t1timer_running(ax25)) {
330
ax25_stop_t3timer(ax25);
331
ax25_calculate_t1(ax25);
332
ax25_start_t1timer(ax25);
333
}
334
}
335
336
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
337
{
338
unsigned char *ptr;
339
int headroom;
340
341
if (ax25->ax25_dev == NULL) {
342
ax25_disconnect(ax25, ENETUNREACH);
343
return;
344
}
345
346
headroom = ax25_addr_size(ax25->digipeat);
347
348
if (unlikely(skb_headroom(skb) < headroom)) {
349
skb = skb_expand_head(skb, headroom);
350
if (!skb) {
351
printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
352
return;
353
}
354
}
355
356
ptr = skb_push(skb, headroom);
357
358
ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
359
360
ax25_queue_xmit(skb, ax25->ax25_dev->dev);
361
}
362
363
/*
364
* A small shim to dev_queue_xmit to add the KISS control byte, and do
365
* any packet forwarding in operation.
366
*/
367
void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
368
{
369
unsigned char *ptr;
370
371
rcu_read_lock();
372
skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
373
rcu_read_unlock();
374
375
ptr = skb_push(skb, 1);
376
*ptr = 0x00; /* KISS */
377
378
dev_queue_xmit(skb);
379
}
380
381
int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
382
{
383
if (ax25->vs == nr) {
384
ax25_frames_acked(ax25, nr);
385
ax25_calculate_rtt(ax25);
386
ax25_stop_t1timer(ax25);
387
ax25_start_t3timer(ax25);
388
return 1;
389
} else {
390
if (ax25->va != nr) {
391
ax25_frames_acked(ax25, nr);
392
ax25_calculate_t1(ax25);
393
ax25_start_t1timer(ax25);
394
return 1;
395
}
396
}
397
return 0;
398
}
399
400