Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/caif/cfrfml.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) ST-Ericsson AB 2010
4
* Author: Sjur Brendeland
5
*/
6
7
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
9
#include <linux/stddef.h>
10
#include <linux/spinlock.h>
11
#include <linux/slab.h>
12
#include <linux/unaligned.h>
13
#include <net/caif/caif_layer.h>
14
#include <net/caif/cfsrvl.h>
15
#include <net/caif/cfpkt.h>
16
17
#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
18
#define RFM_SEGMENTATION_BIT 0x01
19
#define RFM_HEAD_SIZE 7
20
21
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
22
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
23
24
struct cfrfml {
25
struct cfsrvl serv;
26
struct cfpkt *incomplete_frm;
27
int fragment_size;
28
u8 seghead[6];
29
u16 pdu_size;
30
/* Protects serialized processing of packets */
31
spinlock_t sync;
32
};
33
34
static void cfrfml_release(struct cflayer *layer)
35
{
36
struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
37
struct cfrfml *rfml = container_obj(&srvl->layer);
38
39
if (rfml->incomplete_frm)
40
cfpkt_destroy(rfml->incomplete_frm);
41
42
kfree(srvl);
43
}
44
45
struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
46
int mtu_size)
47
{
48
int tmp;
49
struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
50
51
if (!this)
52
return NULL;
53
54
cfsrvl_init(&this->serv, channel_id, dev_info, false);
55
this->serv.release = cfrfml_release;
56
this->serv.layer.receive = cfrfml_receive;
57
this->serv.layer.transmit = cfrfml_transmit;
58
59
/* Round down to closest multiple of 16 */
60
tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
61
tmp *= 16;
62
63
this->fragment_size = tmp;
64
spin_lock_init(&this->sync);
65
snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
66
"rfm%d", channel_id);
67
68
return &this->serv.layer;
69
}
70
71
static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
72
struct cfpkt *pkt, int *err)
73
{
74
struct cfpkt *tmppkt;
75
*err = -EPROTO;
76
/* n-th but not last segment */
77
78
if (cfpkt_extr_head(pkt, seghead, 6) < 0)
79
return NULL;
80
81
/* Verify correct header */
82
if (memcmp(seghead, rfml->seghead, 6) != 0)
83
return NULL;
84
85
tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
86
rfml->pdu_size + RFM_HEAD_SIZE);
87
88
/* If cfpkt_append failes input pkts are not freed */
89
*err = -ENOMEM;
90
if (tmppkt == NULL)
91
return NULL;
92
93
*err = 0;
94
return tmppkt;
95
}
96
97
static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
98
{
99
u8 tmp;
100
bool segmented;
101
int err;
102
u8 seghead[6];
103
struct cfrfml *rfml;
104
struct cfpkt *tmppkt = NULL;
105
106
caif_assert(layr->up != NULL);
107
caif_assert(layr->receive != NULL);
108
rfml = container_obj(layr);
109
spin_lock(&rfml->sync);
110
111
err = -EPROTO;
112
if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
113
goto out;
114
segmented = tmp & RFM_SEGMENTATION_BIT;
115
116
if (segmented) {
117
if (rfml->incomplete_frm == NULL) {
118
/* Initial Segment */
119
if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
120
goto out;
121
122
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
123
124
if (cfpkt_erroneous(pkt))
125
goto out;
126
rfml->incomplete_frm = pkt;
127
pkt = NULL;
128
} else {
129
130
tmppkt = rfm_append(rfml, seghead, pkt, &err);
131
if (tmppkt == NULL)
132
goto out;
133
134
if (cfpkt_erroneous(tmppkt))
135
goto out;
136
137
rfml->incomplete_frm = tmppkt;
138
139
140
if (cfpkt_erroneous(tmppkt))
141
goto out;
142
}
143
err = 0;
144
goto out;
145
}
146
147
if (rfml->incomplete_frm) {
148
149
/* Last Segment */
150
tmppkt = rfm_append(rfml, seghead, pkt, &err);
151
if (tmppkt == NULL)
152
goto out;
153
154
if (cfpkt_erroneous(tmppkt))
155
goto out;
156
157
rfml->incomplete_frm = NULL;
158
pkt = tmppkt;
159
tmppkt = NULL;
160
161
/* Verify that length is correct */
162
err = -EPROTO;
163
if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164
goto out;
165
}
166
167
err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
168
169
out:
170
171
if (err != 0) {
172
if (tmppkt)
173
cfpkt_destroy(tmppkt);
174
if (pkt)
175
cfpkt_destroy(pkt);
176
if (rfml->incomplete_frm)
177
cfpkt_destroy(rfml->incomplete_frm);
178
rfml->incomplete_frm = NULL;
179
180
pr_info("Connection error %d triggered on RFM link\n", err);
181
182
/* Trigger connection error upon failure.*/
183
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
184
rfml->serv.dev_info.id);
185
}
186
spin_unlock(&rfml->sync);
187
188
if (unlikely(err == -EAGAIN))
189
/* It is not possible to recover after drop of a fragment */
190
err = -EIO;
191
192
return err;
193
}
194
195
196
static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
197
{
198
caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
199
200
/* Add info for MUX-layer to route the packet out. */
201
cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
202
203
/*
204
* To optimize alignment, we add up the size of CAIF header before
205
* payload.
206
*/
207
cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
208
cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
209
210
return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
211
}
212
213
static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
214
{
215
int err;
216
u8 seg;
217
u8 head[6];
218
struct cfpkt *rearpkt = NULL;
219
struct cfpkt *frontpkt = pkt;
220
struct cfrfml *rfml = container_obj(layr);
221
222
caif_assert(layr->dn != NULL);
223
caif_assert(layr->dn->transmit != NULL);
224
225
if (!cfsrvl_ready(&rfml->serv, &err))
226
goto out;
227
228
err = -EPROTO;
229
if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
230
goto out;
231
232
err = 0;
233
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
234
err = cfpkt_peek_head(pkt, head, 6);
235
236
if (err != 0)
237
goto out;
238
239
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
240
241
seg = 1;
242
err = -EPROTO;
243
244
if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
245
goto out;
246
/*
247
* On OOM error cfpkt_split returns NULL.
248
*
249
* NOTE: Segmented pdu is not correctly aligned.
250
* This has negative performance impact.
251
*/
252
253
rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
254
if (rearpkt == NULL)
255
goto out;
256
257
err = cfrfml_transmit_segment(rfml, frontpkt);
258
259
if (err != 0) {
260
frontpkt = NULL;
261
goto out;
262
}
263
264
frontpkt = rearpkt;
265
rearpkt = NULL;
266
267
err = -EPROTO;
268
if (cfpkt_add_head(frontpkt, head, 6) < 0)
269
goto out;
270
271
}
272
273
seg = 0;
274
err = -EPROTO;
275
276
if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
277
goto out;
278
279
err = cfrfml_transmit_segment(rfml, frontpkt);
280
281
frontpkt = NULL;
282
out:
283
284
if (err != 0) {
285
pr_info("Connection error %d triggered on RFM link\n", err);
286
/* Trigger connection error upon failure.*/
287
288
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
289
rfml->serv.dev_info.id);
290
291
if (rearpkt)
292
cfpkt_destroy(rearpkt);
293
294
if (frontpkt)
295
cfpkt_destroy(frontpkt);
296
}
297
298
return err;
299
}
300
301