Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/can/gw.c
26278 views
1
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2
/* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3
*
4
* Copyright (c) 2019 Volkswagen Group Electronic Research
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* 3. Neither the name of Volkswagen nor the names of its contributors
16
* may be used to endorse or promote products derived from this software
17
* without specific prior written permission.
18
*
19
* Alternatively, provided that this notice is retained in full, this
20
* software may be distributed under the terms of the GNU General
21
* Public License ("GPL") version 2, in which case the provisions of the
22
* GPL apply INSTEAD OF those given above.
23
*
24
* The provided data structures and external interfaces from this code
25
* are not restricted to be used by modules with a GPL compatible license.
26
*
27
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38
* DAMAGE.
39
*
40
*/
41
42
#include <linux/module.h>
43
#include <linux/init.h>
44
#include <linux/types.h>
45
#include <linux/kernel.h>
46
#include <linux/list.h>
47
#include <linux/spinlock.h>
48
#include <linux/rcupdate.h>
49
#include <linux/rculist.h>
50
#include <linux/net.h>
51
#include <linux/netdevice.h>
52
#include <linux/if_arp.h>
53
#include <linux/skbuff.h>
54
#include <linux/can.h>
55
#include <linux/can/core.h>
56
#include <linux/can/skb.h>
57
#include <linux/can/gw.h>
58
#include <net/rtnetlink.h>
59
#include <net/net_namespace.h>
60
#include <net/sock.h>
61
62
#define CAN_GW_NAME "can-gw"
63
64
MODULE_DESCRIPTION("PF_CAN netlink gateway");
65
MODULE_LICENSE("Dual BSD/GPL");
66
MODULE_AUTHOR("Oliver Hartkopp <[email protected]>");
67
MODULE_ALIAS(CAN_GW_NAME);
68
69
#define CGW_MIN_HOPS 1
70
#define CGW_MAX_HOPS 6
71
#define CGW_DEFAULT_HOPS 1
72
73
static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS;
74
module_param(max_hops, uint, 0444);
75
MODULE_PARM_DESC(max_hops,
76
"maximum " CAN_GW_NAME " routing hops for CAN frames "
77
"(valid values: " __stringify(CGW_MIN_HOPS) "-"
78
__stringify(CGW_MAX_HOPS) " hops, "
79
"default: " __stringify(CGW_DEFAULT_HOPS) ")");
80
81
static struct notifier_block notifier;
82
static struct kmem_cache *cgw_cache __read_mostly;
83
84
/* structure that contains the (on-the-fly) CAN frame modifications */
85
struct cf_mod {
86
struct {
87
struct canfd_frame and;
88
struct canfd_frame or;
89
struct canfd_frame xor;
90
struct canfd_frame set;
91
} modframe;
92
struct {
93
u8 and;
94
u8 or;
95
u8 xor;
96
u8 set;
97
} modtype;
98
void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf,
99
struct cf_mod *mod);
100
101
/* CAN frame checksum calculation after CAN frame modifications */
102
struct {
103
struct cgw_csum_xor xor;
104
struct cgw_csum_crc8 crc8;
105
} csum;
106
struct {
107
void (*xor)(struct canfd_frame *cf,
108
struct cgw_csum_xor *xor);
109
void (*crc8)(struct canfd_frame *cf,
110
struct cgw_csum_crc8 *crc8);
111
} csumfunc;
112
u32 uid;
113
};
114
115
/* So far we just support CAN -> CAN routing and frame modifications.
116
*
117
* The internal can_can_gw structure contains data and attributes for
118
* a CAN -> CAN gateway job.
119
*/
120
struct can_can_gw {
121
struct can_filter filter;
122
int src_idx;
123
int dst_idx;
124
};
125
126
/* list entry for CAN gateways jobs */
127
struct cgw_job {
128
struct hlist_node list;
129
struct rcu_head rcu;
130
u32 handled_frames;
131
u32 dropped_frames;
132
u32 deleted_frames;
133
struct cf_mod __rcu *cf_mod;
134
union {
135
/* CAN frame data source */
136
struct net_device *dev;
137
} src;
138
union {
139
/* CAN frame data destination */
140
struct net_device *dev;
141
} dst;
142
union {
143
struct can_can_gw ccgw;
144
/* tbc */
145
};
146
u8 gwtype;
147
u8 limit_hops;
148
u16 flags;
149
};
150
151
/* modification functions that are invoked in the hot path in can_can_gw_rcv */
152
153
#define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
154
struct cf_mod *mod) { op ; }
155
156
MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
157
MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len)
158
MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags)
159
MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
160
MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
161
MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len)
162
MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags)
163
MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
164
MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
165
MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len)
166
MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags)
167
MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
168
MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
169
MODFUNC(mod_set_len, cf->len = mod->modframe.set.len)
170
MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags)
171
MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
172
173
static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod)
174
{
175
int i;
176
177
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
178
*(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i);
179
}
180
181
static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod)
182
{
183
int i;
184
185
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
186
*(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i);
187
}
188
189
static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod)
190
{
191
int i;
192
193
for (i = 0; i < CANFD_MAX_DLEN; i += 8)
194
*(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i);
195
}
196
197
static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod)
198
{
199
memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN);
200
}
201
202
/* retrieve valid CC DLC value and store it into 'len' */
203
static void mod_retrieve_ccdlc(struct canfd_frame *cf)
204
{
205
struct can_frame *ccf = (struct can_frame *)cf;
206
207
/* len8_dlc is only valid if len == CAN_MAX_DLEN */
208
if (ccf->len != CAN_MAX_DLEN)
209
return;
210
211
/* do we have a valid len8_dlc value from 9 .. 15 ? */
212
if (ccf->len8_dlc > CAN_MAX_DLEN && ccf->len8_dlc <= CAN_MAX_RAW_DLC)
213
ccf->len = ccf->len8_dlc;
214
}
215
216
/* convert valid CC DLC value in 'len' into struct can_frame elements */
217
static void mod_store_ccdlc(struct canfd_frame *cf)
218
{
219
struct can_frame *ccf = (struct can_frame *)cf;
220
221
/* clear potential leftovers */
222
ccf->len8_dlc = 0;
223
224
/* plain data length 0 .. 8 - that was easy */
225
if (ccf->len <= CAN_MAX_DLEN)
226
return;
227
228
/* potentially broken values are caught in can_can_gw_rcv() */
229
if (ccf->len > CAN_MAX_RAW_DLC)
230
return;
231
232
/* we have a valid dlc value from 9 .. 15 in ccf->len */
233
ccf->len8_dlc = ccf->len;
234
ccf->len = CAN_MAX_DLEN;
235
}
236
237
static void mod_and_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
238
{
239
mod_retrieve_ccdlc(cf);
240
mod_and_len(cf, mod);
241
mod_store_ccdlc(cf);
242
}
243
244
static void mod_or_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
245
{
246
mod_retrieve_ccdlc(cf);
247
mod_or_len(cf, mod);
248
mod_store_ccdlc(cf);
249
}
250
251
static void mod_xor_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
252
{
253
mod_retrieve_ccdlc(cf);
254
mod_xor_len(cf, mod);
255
mod_store_ccdlc(cf);
256
}
257
258
static void mod_set_ccdlc(struct canfd_frame *cf, struct cf_mod *mod)
259
{
260
mod_set_len(cf, mod);
261
mod_store_ccdlc(cf);
262
}
263
264
static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
265
{
266
/* Copy the struct members separately to ensure that no uninitialized
267
* data are copied in the 3 bytes hole of the struct. This is needed
268
* to make easy compares of the data in the struct cf_mod.
269
*/
270
271
dst->can_id = src->can_id;
272
dst->len = src->len;
273
*(u64 *)dst->data = *(u64 *)src->data;
274
}
275
276
static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src)
277
{
278
/* Copy the struct members separately to ensure that no uninitialized
279
* data are copied in the 2 bytes hole of the struct. This is needed
280
* to make easy compares of the data in the struct cf_mod.
281
*/
282
283
dst->can_id = src->can_id;
284
dst->flags = src->flags;
285
dst->len = src->len;
286
memcpy(dst->data, src->data, CANFD_MAX_DLEN);
287
}
288
289
static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
290
{
291
s8 dlen = CAN_MAX_DLEN;
292
293
if (r->flags & CGW_FLAGS_CAN_FD)
294
dlen = CANFD_MAX_DLEN;
295
296
/* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
297
* relative to received dlc -1 .. -8 :
298
* e.g. for received dlc = 8
299
* -1 => index = 7 (data[7])
300
* -3 => index = 5 (data[5])
301
* -8 => index = 0 (data[0])
302
*/
303
304
if (fr >= -dlen && fr < dlen &&
305
to >= -dlen && to < dlen &&
306
re >= -dlen && re < dlen)
307
return 0;
308
else
309
return -EINVAL;
310
}
311
312
static inline int calc_idx(int idx, int rx_len)
313
{
314
if (idx < 0)
315
return rx_len + idx;
316
else
317
return idx;
318
}
319
320
static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor)
321
{
322
int from = calc_idx(xor->from_idx, cf->len);
323
int to = calc_idx(xor->to_idx, cf->len);
324
int res = calc_idx(xor->result_idx, cf->len);
325
u8 val = xor->init_xor_val;
326
int i;
327
328
if (from < 0 || to < 0 || res < 0)
329
return;
330
331
if (from <= to) {
332
for (i = from; i <= to; i++)
333
val ^= cf->data[i];
334
} else {
335
for (i = from; i >= to; i--)
336
val ^= cf->data[i];
337
}
338
339
cf->data[res] = val;
340
}
341
342
static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor)
343
{
344
u8 val = xor->init_xor_val;
345
int i;
346
347
for (i = xor->from_idx; i <= xor->to_idx; i++)
348
val ^= cf->data[i];
349
350
cf->data[xor->result_idx] = val;
351
}
352
353
static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor)
354
{
355
u8 val = xor->init_xor_val;
356
int i;
357
358
for (i = xor->from_idx; i >= xor->to_idx; i--)
359
val ^= cf->data[i];
360
361
cf->data[xor->result_idx] = val;
362
}
363
364
static void cgw_csum_crc8_rel(struct canfd_frame *cf,
365
struct cgw_csum_crc8 *crc8)
366
{
367
int from = calc_idx(crc8->from_idx, cf->len);
368
int to = calc_idx(crc8->to_idx, cf->len);
369
int res = calc_idx(crc8->result_idx, cf->len);
370
u8 crc = crc8->init_crc_val;
371
int i;
372
373
if (from < 0 || to < 0 || res < 0)
374
return;
375
376
if (from <= to) {
377
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
378
crc = crc8->crctab[crc ^ cf->data[i]];
379
} else {
380
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
381
crc = crc8->crctab[crc ^ cf->data[i]];
382
}
383
384
switch (crc8->profile) {
385
case CGW_CRC8PRF_1U8:
386
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
387
break;
388
389
case CGW_CRC8PRF_16U8:
390
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
391
break;
392
393
case CGW_CRC8PRF_SFFID_XOR:
394
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
395
(cf->can_id >> 8 & 0xFF)];
396
break;
397
}
398
399
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
400
}
401
402
static void cgw_csum_crc8_pos(struct canfd_frame *cf,
403
struct cgw_csum_crc8 *crc8)
404
{
405
u8 crc = crc8->init_crc_val;
406
int i;
407
408
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
409
crc = crc8->crctab[crc ^ cf->data[i]];
410
411
switch (crc8->profile) {
412
case CGW_CRC8PRF_1U8:
413
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
414
break;
415
416
case CGW_CRC8PRF_16U8:
417
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
418
break;
419
420
case CGW_CRC8PRF_SFFID_XOR:
421
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
422
(cf->can_id >> 8 & 0xFF)];
423
break;
424
}
425
426
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
427
}
428
429
static void cgw_csum_crc8_neg(struct canfd_frame *cf,
430
struct cgw_csum_crc8 *crc8)
431
{
432
u8 crc = crc8->init_crc_val;
433
int i;
434
435
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
436
crc = crc8->crctab[crc ^ cf->data[i]];
437
438
switch (crc8->profile) {
439
case CGW_CRC8PRF_1U8:
440
crc = crc8->crctab[crc ^ crc8->profile_data[0]];
441
break;
442
443
case CGW_CRC8PRF_16U8:
444
crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
445
break;
446
447
case CGW_CRC8PRF_SFFID_XOR:
448
crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
449
(cf->can_id >> 8 & 0xFF)];
450
break;
451
}
452
453
cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
454
}
455
456
/* the receive & process & send function */
457
static void can_can_gw_rcv(struct sk_buff *skb, void *data)
458
{
459
struct cgw_job *gwj = (struct cgw_job *)data;
460
struct canfd_frame *cf;
461
struct sk_buff *nskb;
462
struct cf_mod *mod;
463
int modidx = 0;
464
465
/* process strictly Classic CAN or CAN FD frames */
466
if (gwj->flags & CGW_FLAGS_CAN_FD) {
467
if (!can_is_canfd_skb(skb))
468
return;
469
} else {
470
if (!can_is_can_skb(skb))
471
return;
472
}
473
474
/* Do not handle CAN frames routed more than 'max_hops' times.
475
* In general we should never catch this delimiter which is intended
476
* to cover a misconfiguration protection (e.g. circular CAN routes).
477
*
478
* The Controller Area Network controllers only accept CAN frames with
479
* correct CRCs - which are not visible in the controller registers.
480
* According to skbuff.h documentation the csum_start element for IP
481
* checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
482
* Only CAN skbs can be processed here which already have this property.
483
*/
484
485
#define cgw_hops(skb) ((skb)->csum_start)
486
487
BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY);
488
489
if (cgw_hops(skb) >= max_hops) {
490
/* indicate deleted frames due to misconfiguration */
491
gwj->deleted_frames++;
492
return;
493
}
494
495
if (!(gwj->dst.dev->flags & IFF_UP)) {
496
gwj->dropped_frames++;
497
return;
498
}
499
500
/* is sending the skb back to the incoming interface not allowed? */
501
if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
502
can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex)
503
return;
504
505
/* clone the given skb, which has not been done in can_rcv()
506
*
507
* When there is at least one modification function activated,
508
* we need to copy the skb as we want to modify skb->data.
509
*/
510
mod = rcu_dereference(gwj->cf_mod);
511
if (mod->modfunc[0])
512
nskb = skb_copy(skb, GFP_ATOMIC);
513
else
514
nskb = skb_clone(skb, GFP_ATOMIC);
515
516
if (!nskb) {
517
gwj->dropped_frames++;
518
return;
519
}
520
521
/* put the incremented hop counter in the cloned skb */
522
cgw_hops(nskb) = cgw_hops(skb) + 1;
523
524
/* first processing of this CAN frame -> adjust to private hop limit */
525
if (gwj->limit_hops && cgw_hops(nskb) == 1)
526
cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
527
528
nskb->dev = gwj->dst.dev;
529
530
/* pointer to modifiable CAN frame */
531
cf = (struct canfd_frame *)nskb->data;
532
533
/* perform preprocessed modification functions if there are any */
534
while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
535
(*mod->modfunc[modidx++])(cf, mod);
536
537
/* Has the CAN frame been modified? */
538
if (modidx) {
539
/* get available space for the processed CAN frame type */
540
int max_len = nskb->len - offsetof(struct canfd_frame, data);
541
542
/* dlc may have changed, make sure it fits to the CAN frame */
543
if (cf->len > max_len) {
544
/* delete frame due to misconfiguration */
545
gwj->deleted_frames++;
546
kfree_skb(nskb);
547
return;
548
}
549
550
/* check for checksum updates */
551
if (mod->csumfunc.crc8)
552
(*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
553
554
if (mod->csumfunc.xor)
555
(*mod->csumfunc.xor)(cf, &mod->csum.xor);
556
}
557
558
/* clear the skb timestamp if not configured the other way */
559
if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
560
nskb->tstamp = 0;
561
562
/* send to netdevice */
563
if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
564
gwj->dropped_frames++;
565
else
566
gwj->handled_frames++;
567
}
568
569
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
570
{
571
return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id,
572
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
573
gwj, "gw", NULL);
574
}
575
576
static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
577
{
578
can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id,
579
gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
580
}
581
582
static void cgw_job_free_rcu(struct rcu_head *rcu_head)
583
{
584
struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
585
586
/* cgw_job::cf_mod is always accessed from the same cgw_job object within
587
* the same RCU read section. Once cgw_job is scheduled for removal,
588
* cf_mod can also be removed without mandating an additional grace period.
589
*/
590
kfree(rcu_access_pointer(gwj->cf_mod));
591
kmem_cache_free(cgw_cache, gwj);
592
}
593
594
/* Return cgw_job::cf_mod with RTNL protected section */
595
static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
596
{
597
return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
598
}
599
600
static int cgw_notifier(struct notifier_block *nb,
601
unsigned long msg, void *ptr)
602
{
603
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
604
struct net *net = dev_net(dev);
605
606
if (dev->type != ARPHRD_CAN)
607
return NOTIFY_DONE;
608
609
if (msg == NETDEV_UNREGISTER) {
610
struct cgw_job *gwj = NULL;
611
struct hlist_node *nx;
612
613
ASSERT_RTNL();
614
615
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
616
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
617
hlist_del(&gwj->list);
618
cgw_unregister_filter(net, gwj);
619
call_rcu(&gwj->rcu, cgw_job_free_rcu);
620
}
621
}
622
}
623
624
return NOTIFY_DONE;
625
}
626
627
static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
628
u32 pid, u32 seq, int flags)
629
{
630
struct rtcanmsg *rtcan;
631
struct nlmsghdr *nlh;
632
struct cf_mod *mod;
633
634
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
635
if (!nlh)
636
return -EMSGSIZE;
637
638
rtcan = nlmsg_data(nlh);
639
rtcan->can_family = AF_CAN;
640
rtcan->gwtype = gwj->gwtype;
641
rtcan->flags = gwj->flags;
642
643
/* add statistics if available */
644
645
if (gwj->handled_frames) {
646
if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
647
goto cancel;
648
}
649
650
if (gwj->dropped_frames) {
651
if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
652
goto cancel;
653
}
654
655
if (gwj->deleted_frames) {
656
if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
657
goto cancel;
658
}
659
660
/* check non default settings of attributes */
661
662
if (gwj->limit_hops) {
663
if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
664
goto cancel;
665
}
666
667
mod = cgw_job_cf_mod(gwj);
668
if (gwj->flags & CGW_FLAGS_CAN_FD) {
669
struct cgw_fdframe_mod mb;
670
671
if (mod->modtype.and) {
672
memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
673
mb.modtype = mod->modtype.and;
674
if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
675
goto cancel;
676
}
677
678
if (mod->modtype.or) {
679
memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
680
mb.modtype = mod->modtype.or;
681
if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
682
goto cancel;
683
}
684
685
if (mod->modtype.xor) {
686
memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
687
mb.modtype = mod->modtype.xor;
688
if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
689
goto cancel;
690
}
691
692
if (mod->modtype.set) {
693
memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
694
mb.modtype = mod->modtype.set;
695
if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
696
goto cancel;
697
}
698
} else {
699
struct cgw_frame_mod mb;
700
701
if (mod->modtype.and) {
702
memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
703
mb.modtype = mod->modtype.and;
704
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
705
goto cancel;
706
}
707
708
if (mod->modtype.or) {
709
memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
710
mb.modtype = mod->modtype.or;
711
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
712
goto cancel;
713
}
714
715
if (mod->modtype.xor) {
716
memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
717
mb.modtype = mod->modtype.xor;
718
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
719
goto cancel;
720
}
721
722
if (mod->modtype.set) {
723
memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
724
mb.modtype = mod->modtype.set;
725
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
726
goto cancel;
727
}
728
}
729
730
if (mod->uid) {
731
if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
732
goto cancel;
733
}
734
735
if (mod->csumfunc.crc8) {
736
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
737
&mod->csum.crc8) < 0)
738
goto cancel;
739
}
740
741
if (mod->csumfunc.xor) {
742
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
743
&mod->csum.xor) < 0)
744
goto cancel;
745
}
746
747
if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
748
if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
749
if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
750
&gwj->ccgw.filter) < 0)
751
goto cancel;
752
}
753
754
if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
755
goto cancel;
756
757
if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
758
goto cancel;
759
}
760
761
nlmsg_end(skb, nlh);
762
return 0;
763
764
cancel:
765
nlmsg_cancel(skb, nlh);
766
return -EMSGSIZE;
767
}
768
769
/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
770
static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
771
{
772
struct net *net = sock_net(skb->sk);
773
struct cgw_job *gwj = NULL;
774
int idx = 0;
775
int s_idx = cb->args[0];
776
777
rcu_read_lock();
778
hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) {
779
if (idx < s_idx)
780
goto cont;
781
782
if (cgw_put_job(skb, gwj, RTM_NEWROUTE,
783
NETLINK_CB(cb->skb).portid,
784
cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
785
break;
786
cont:
787
idx++;
788
}
789
rcu_read_unlock();
790
791
cb->args[0] = idx;
792
793
return skb->len;
794
}
795
796
static const struct nla_policy cgw_policy[CGW_MAX + 1] = {
797
[CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
798
[CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
799
[CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
800
[CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
801
[CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
802
[CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
803
[CGW_SRC_IF] = { .type = NLA_U32 },
804
[CGW_DST_IF] = { .type = NLA_U32 },
805
[CGW_FILTER] = { .len = sizeof(struct can_filter) },
806
[CGW_LIM_HOPS] = { .type = NLA_U8 },
807
[CGW_MOD_UID] = { .type = NLA_U32 },
808
[CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) },
809
[CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) },
810
[CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) },
811
[CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) },
812
};
813
814
/* check for common and gwtype specific attributes */
815
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
816
u8 gwtype, void *gwtypeattr, u8 *limhops)
817
{
818
struct nlattr *tb[CGW_MAX + 1];
819
struct rtcanmsg *r = nlmsg_data(nlh);
820
int modidx = 0;
821
int err = 0;
822
823
/* initialize modification & checksum data space */
824
memset(mod, 0, sizeof(*mod));
825
826
err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb,
827
CGW_MAX, cgw_policy, NULL);
828
if (err < 0)
829
return err;
830
831
if (tb[CGW_LIM_HOPS]) {
832
*limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
833
834
if (*limhops < 1 || *limhops > max_hops)
835
return -EINVAL;
836
}
837
838
/* check for AND/OR/XOR/SET modifications */
839
if (r->flags & CGW_FLAGS_CAN_FD) {
840
struct cgw_fdframe_mod mb;
841
842
if (tb[CGW_FDMOD_AND]) {
843
nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN);
844
845
canfdframecpy(&mod->modframe.and, &mb.cf);
846
mod->modtype.and = mb.modtype;
847
848
if (mb.modtype & CGW_MOD_ID)
849
mod->modfunc[modidx++] = mod_and_id;
850
851
if (mb.modtype & CGW_MOD_LEN)
852
mod->modfunc[modidx++] = mod_and_len;
853
854
if (mb.modtype & CGW_MOD_FLAGS)
855
mod->modfunc[modidx++] = mod_and_flags;
856
857
if (mb.modtype & CGW_MOD_DATA)
858
mod->modfunc[modidx++] = mod_and_fddata;
859
}
860
861
if (tb[CGW_FDMOD_OR]) {
862
nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN);
863
864
canfdframecpy(&mod->modframe.or, &mb.cf);
865
mod->modtype.or = mb.modtype;
866
867
if (mb.modtype & CGW_MOD_ID)
868
mod->modfunc[modidx++] = mod_or_id;
869
870
if (mb.modtype & CGW_MOD_LEN)
871
mod->modfunc[modidx++] = mod_or_len;
872
873
if (mb.modtype & CGW_MOD_FLAGS)
874
mod->modfunc[modidx++] = mod_or_flags;
875
876
if (mb.modtype & CGW_MOD_DATA)
877
mod->modfunc[modidx++] = mod_or_fddata;
878
}
879
880
if (tb[CGW_FDMOD_XOR]) {
881
nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN);
882
883
canfdframecpy(&mod->modframe.xor, &mb.cf);
884
mod->modtype.xor = mb.modtype;
885
886
if (mb.modtype & CGW_MOD_ID)
887
mod->modfunc[modidx++] = mod_xor_id;
888
889
if (mb.modtype & CGW_MOD_LEN)
890
mod->modfunc[modidx++] = mod_xor_len;
891
892
if (mb.modtype & CGW_MOD_FLAGS)
893
mod->modfunc[modidx++] = mod_xor_flags;
894
895
if (mb.modtype & CGW_MOD_DATA)
896
mod->modfunc[modidx++] = mod_xor_fddata;
897
}
898
899
if (tb[CGW_FDMOD_SET]) {
900
nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN);
901
902
canfdframecpy(&mod->modframe.set, &mb.cf);
903
mod->modtype.set = mb.modtype;
904
905
if (mb.modtype & CGW_MOD_ID)
906
mod->modfunc[modidx++] = mod_set_id;
907
908
if (mb.modtype & CGW_MOD_LEN)
909
mod->modfunc[modidx++] = mod_set_len;
910
911
if (mb.modtype & CGW_MOD_FLAGS)
912
mod->modfunc[modidx++] = mod_set_flags;
913
914
if (mb.modtype & CGW_MOD_DATA)
915
mod->modfunc[modidx++] = mod_set_fddata;
916
}
917
} else {
918
struct cgw_frame_mod mb;
919
920
if (tb[CGW_MOD_AND]) {
921
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
922
923
canframecpy(&mod->modframe.and, &mb.cf);
924
mod->modtype.and = mb.modtype;
925
926
if (mb.modtype & CGW_MOD_ID)
927
mod->modfunc[modidx++] = mod_and_id;
928
929
if (mb.modtype & CGW_MOD_DLC)
930
mod->modfunc[modidx++] = mod_and_ccdlc;
931
932
if (mb.modtype & CGW_MOD_DATA)
933
mod->modfunc[modidx++] = mod_and_data;
934
}
935
936
if (tb[CGW_MOD_OR]) {
937
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
938
939
canframecpy(&mod->modframe.or, &mb.cf);
940
mod->modtype.or = mb.modtype;
941
942
if (mb.modtype & CGW_MOD_ID)
943
mod->modfunc[modidx++] = mod_or_id;
944
945
if (mb.modtype & CGW_MOD_DLC)
946
mod->modfunc[modidx++] = mod_or_ccdlc;
947
948
if (mb.modtype & CGW_MOD_DATA)
949
mod->modfunc[modidx++] = mod_or_data;
950
}
951
952
if (tb[CGW_MOD_XOR]) {
953
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
954
955
canframecpy(&mod->modframe.xor, &mb.cf);
956
mod->modtype.xor = mb.modtype;
957
958
if (mb.modtype & CGW_MOD_ID)
959
mod->modfunc[modidx++] = mod_xor_id;
960
961
if (mb.modtype & CGW_MOD_DLC)
962
mod->modfunc[modidx++] = mod_xor_ccdlc;
963
964
if (mb.modtype & CGW_MOD_DATA)
965
mod->modfunc[modidx++] = mod_xor_data;
966
}
967
968
if (tb[CGW_MOD_SET]) {
969
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
970
971
canframecpy(&mod->modframe.set, &mb.cf);
972
mod->modtype.set = mb.modtype;
973
974
if (mb.modtype & CGW_MOD_ID)
975
mod->modfunc[modidx++] = mod_set_id;
976
977
if (mb.modtype & CGW_MOD_DLC)
978
mod->modfunc[modidx++] = mod_set_ccdlc;
979
980
if (mb.modtype & CGW_MOD_DATA)
981
mod->modfunc[modidx++] = mod_set_data;
982
}
983
}
984
985
/* check for checksum operations after CAN frame modifications */
986
if (modidx) {
987
if (tb[CGW_CS_CRC8]) {
988
struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
989
990
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
991
c->result_idx, r);
992
if (err)
993
return err;
994
995
nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
996
CGW_CS_CRC8_LEN);
997
998
/* select dedicated processing function to reduce
999
* runtime operations in receive hot path.
1000
*/
1001
if (c->from_idx < 0 || c->to_idx < 0 ||
1002
c->result_idx < 0)
1003
mod->csumfunc.crc8 = cgw_csum_crc8_rel;
1004
else if (c->from_idx <= c->to_idx)
1005
mod->csumfunc.crc8 = cgw_csum_crc8_pos;
1006
else
1007
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
1008
}
1009
1010
if (tb[CGW_CS_XOR]) {
1011
struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
1012
1013
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
1014
c->result_idx, r);
1015
if (err)
1016
return err;
1017
1018
nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
1019
CGW_CS_XOR_LEN);
1020
1021
/* select dedicated processing function to reduce
1022
* runtime operations in receive hot path.
1023
*/
1024
if (c->from_idx < 0 || c->to_idx < 0 ||
1025
c->result_idx < 0)
1026
mod->csumfunc.xor = cgw_csum_xor_rel;
1027
else if (c->from_idx <= c->to_idx)
1028
mod->csumfunc.xor = cgw_csum_xor_pos;
1029
else
1030
mod->csumfunc.xor = cgw_csum_xor_neg;
1031
}
1032
1033
if (tb[CGW_MOD_UID])
1034
nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
1035
}
1036
1037
if (gwtype == CGW_TYPE_CAN_CAN) {
1038
/* check CGW_TYPE_CAN_CAN specific attributes */
1039
struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
1040
1041
memset(ccgw, 0, sizeof(*ccgw));
1042
1043
/* check for can_filter in attributes */
1044
if (tb[CGW_FILTER])
1045
nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
1046
sizeof(struct can_filter));
1047
1048
err = -ENODEV;
1049
1050
/* specifying two interfaces is mandatory */
1051
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
1052
return err;
1053
1054
ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
1055
ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
1056
1057
/* both indices set to 0 for flushing all routing entries */
1058
if (!ccgw->src_idx && !ccgw->dst_idx)
1059
return 0;
1060
1061
/* only one index set to 0 is an error */
1062
if (!ccgw->src_idx || !ccgw->dst_idx)
1063
return err;
1064
}
1065
1066
/* add the checks for other gwtypes here */
1067
1068
return 0;
1069
}
1070
1071
static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
1072
struct netlink_ext_ack *extack)
1073
{
1074
struct net *net = sock_net(skb->sk);
1075
struct rtcanmsg *r;
1076
struct cgw_job *gwj;
1077
struct cf_mod *mod;
1078
struct can_can_gw ccgw;
1079
u8 limhops = 0;
1080
int err = 0;
1081
1082
if (!netlink_capable(skb, CAP_NET_ADMIN))
1083
return -EPERM;
1084
1085
if (nlmsg_len(nlh) < sizeof(*r))
1086
return -EINVAL;
1087
1088
r = nlmsg_data(nlh);
1089
if (r->can_family != AF_CAN)
1090
return -EPFNOSUPPORT;
1091
1092
/* so far we only support CAN -> CAN routings */
1093
if (r->gwtype != CGW_TYPE_CAN_CAN)
1094
return -EINVAL;
1095
1096
mod = kmalloc(sizeof(*mod), GFP_KERNEL);
1097
if (!mod)
1098
return -ENOMEM;
1099
1100
err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1101
if (err < 0)
1102
goto out_free_cf;
1103
1104
if (mod->uid) {
1105
ASSERT_RTNL();
1106
1107
/* check for updating an existing job with identical uid */
1108
hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
1109
struct cf_mod *old_cf;
1110
1111
old_cf = cgw_job_cf_mod(gwj);
1112
if (old_cf->uid != mod->uid)
1113
continue;
1114
1115
/* interfaces & filters must be identical */
1116
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
1117
err = -EINVAL;
1118
goto out_free_cf;
1119
}
1120
1121
rcu_assign_pointer(gwj->cf_mod, mod);
1122
kfree_rcu_mightsleep(old_cf);
1123
return 0;
1124
}
1125
}
1126
1127
/* ifindex == 0 is not allowed for job creation */
1128
if (!ccgw.src_idx || !ccgw.dst_idx) {
1129
err = -ENODEV;
1130
goto out_free_cf;
1131
}
1132
1133
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
1134
if (!gwj) {
1135
err = -ENOMEM;
1136
goto out_free_cf;
1137
}
1138
1139
gwj->handled_frames = 0;
1140
gwj->dropped_frames = 0;
1141
gwj->deleted_frames = 0;
1142
gwj->flags = r->flags;
1143
gwj->gwtype = r->gwtype;
1144
gwj->limit_hops = limhops;
1145
1146
/* insert already parsed information */
1147
RCU_INIT_POINTER(gwj->cf_mod, mod);
1148
memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
1149
1150
err = -ENODEV;
1151
1152
gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx);
1153
1154
if (!gwj->src.dev)
1155
goto out;
1156
1157
if (gwj->src.dev->type != ARPHRD_CAN)
1158
goto out;
1159
1160
gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx);
1161
1162
if (!gwj->dst.dev)
1163
goto out;
1164
1165
if (gwj->dst.dev->type != ARPHRD_CAN)
1166
goto out;
1167
1168
/* is sending the skb back to the incoming interface intended? */
1169
if (gwj->src.dev == gwj->dst.dev &&
1170
!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK)) {
1171
err = -EINVAL;
1172
goto out;
1173
}
1174
1175
ASSERT_RTNL();
1176
1177
err = cgw_register_filter(net, gwj);
1178
if (!err)
1179
hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
1180
out:
1181
if (err) {
1182
kmem_cache_free(cgw_cache, gwj);
1183
out_free_cf:
1184
kfree(mod);
1185
}
1186
return err;
1187
}
1188
1189
static void cgw_remove_all_jobs(struct net *net)
1190
{
1191
struct cgw_job *gwj = NULL;
1192
struct hlist_node *nx;
1193
1194
ASSERT_RTNL();
1195
1196
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1197
hlist_del(&gwj->list);
1198
cgw_unregister_filter(net, gwj);
1199
call_rcu(&gwj->rcu, cgw_job_free_rcu);
1200
}
1201
}
1202
1203
static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
1204
struct netlink_ext_ack *extack)
1205
{
1206
struct net *net = sock_net(skb->sk);
1207
struct cgw_job *gwj = NULL;
1208
struct hlist_node *nx;
1209
struct rtcanmsg *r;
1210
struct cf_mod mod;
1211
struct can_can_gw ccgw;
1212
u8 limhops = 0;
1213
int err = 0;
1214
1215
if (!netlink_capable(skb, CAP_NET_ADMIN))
1216
return -EPERM;
1217
1218
if (nlmsg_len(nlh) < sizeof(*r))
1219
return -EINVAL;
1220
1221
r = nlmsg_data(nlh);
1222
if (r->can_family != AF_CAN)
1223
return -EPFNOSUPPORT;
1224
1225
/* so far we only support CAN -> CAN routings */
1226
if (r->gwtype != CGW_TYPE_CAN_CAN)
1227
return -EINVAL;
1228
1229
err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1230
if (err < 0)
1231
return err;
1232
1233
/* two interface indices both set to 0 => remove all entries */
1234
if (!ccgw.src_idx && !ccgw.dst_idx) {
1235
cgw_remove_all_jobs(net);
1236
return 0;
1237
}
1238
1239
err = -EINVAL;
1240
1241
ASSERT_RTNL();
1242
1243
/* remove only the first matching entry */
1244
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1245
struct cf_mod *cf_mod;
1246
1247
if (gwj->flags != r->flags)
1248
continue;
1249
1250
if (gwj->limit_hops != limhops)
1251
continue;
1252
1253
cf_mod = cgw_job_cf_mod(gwj);
1254
/* we have a match when uid is enabled and identical */
1255
if (cf_mod->uid || mod.uid) {
1256
if (cf_mod->uid != mod.uid)
1257
continue;
1258
} else {
1259
/* no uid => check for identical modifications */
1260
if (memcmp(cf_mod, &mod, sizeof(mod)))
1261
continue;
1262
}
1263
1264
/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
1265
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
1266
continue;
1267
1268
hlist_del(&gwj->list);
1269
cgw_unregister_filter(net, gwj);
1270
call_rcu(&gwj->rcu, cgw_job_free_rcu);
1271
err = 0;
1272
break;
1273
}
1274
1275
return err;
1276
}
1277
1278
static int __net_init cangw_pernet_init(struct net *net)
1279
{
1280
INIT_HLIST_HEAD(&net->can.cgw_list);
1281
return 0;
1282
}
1283
1284
static void __net_exit cangw_pernet_exit_batch(struct list_head *net_list)
1285
{
1286
struct net *net;
1287
1288
rtnl_lock();
1289
list_for_each_entry(net, net_list, exit_list)
1290
cgw_remove_all_jobs(net);
1291
rtnl_unlock();
1292
}
1293
1294
static struct pernet_operations cangw_pernet_ops = {
1295
.init = cangw_pernet_init,
1296
.exit_batch = cangw_pernet_exit_batch,
1297
};
1298
1299
static const struct rtnl_msg_handler cgw_rtnl_msg_handlers[] __initconst_or_module = {
1300
{.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_NEWROUTE,
1301
.doit = cgw_create_job},
1302
{.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_DELROUTE,
1303
.doit = cgw_remove_job},
1304
{.owner = THIS_MODULE, .protocol = PF_CAN, .msgtype = RTM_GETROUTE,
1305
.dumpit = cgw_dump_jobs},
1306
};
1307
1308
static __init int cgw_module_init(void)
1309
{
1310
int ret;
1311
1312
/* sanitize given module parameter */
1313
max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
1314
1315
pr_info("can: netlink gateway - max_hops=%d\n", max_hops);
1316
1317
ret = register_pernet_subsys(&cangw_pernet_ops);
1318
if (ret)
1319
return ret;
1320
1321
ret = -ENOMEM;
1322
cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
1323
0, 0, NULL);
1324
if (!cgw_cache)
1325
goto out_cache_create;
1326
1327
/* set notifier */
1328
notifier.notifier_call = cgw_notifier;
1329
ret = register_netdevice_notifier(&notifier);
1330
if (ret)
1331
goto out_register_notifier;
1332
1333
ret = rtnl_register_many(cgw_rtnl_msg_handlers);
1334
if (ret)
1335
goto out_rtnl_register;
1336
1337
return 0;
1338
1339
out_rtnl_register:
1340
unregister_netdevice_notifier(&notifier);
1341
out_register_notifier:
1342
kmem_cache_destroy(cgw_cache);
1343
out_cache_create:
1344
unregister_pernet_subsys(&cangw_pernet_ops);
1345
1346
return ret;
1347
}
1348
1349
static __exit void cgw_module_exit(void)
1350
{
1351
rtnl_unregister_all(PF_CAN);
1352
1353
unregister_netdevice_notifier(&notifier);
1354
1355
unregister_pernet_subsys(&cangw_pernet_ops);
1356
rcu_barrier(); /* Wait for completion of call_rcu()'s */
1357
1358
kmem_cache_destroy(cgw_cache);
1359
}
1360
1361
module_init(cgw_module_init);
1362
module_exit(cgw_module_exit);
1363
1364