Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv6/ila/ila_xlat.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/jhash.h>
3
#include <linux/netfilter.h>
4
#include <linux/rcupdate.h>
5
#include <linux/rhashtable.h>
6
#include <linux/vmalloc.h>
7
#include <net/genetlink.h>
8
#include <net/netns/generic.h>
9
#include <uapi/linux/genetlink.h>
10
#include "ila.h"
11
12
struct ila_xlat_params {
13
struct ila_params ip;
14
int ifindex;
15
};
16
17
struct ila_map {
18
struct ila_xlat_params xp;
19
struct rhash_head node;
20
struct ila_map __rcu *next;
21
struct rcu_head rcu;
22
};
23
24
#define MAX_LOCKS 1024
25
#define LOCKS_PER_CPU 10
26
27
static int alloc_ila_locks(struct ila_net *ilan)
28
{
29
return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
30
MAX_LOCKS, LOCKS_PER_CPU,
31
GFP_KERNEL);
32
}
33
34
static u32 hashrnd __read_mostly;
35
static __always_inline void __ila_hash_secret_init(void)
36
{
37
net_get_random_once(&hashrnd, sizeof(hashrnd));
38
}
39
40
static inline u32 ila_locator_hash(struct ila_locator loc)
41
{
42
u32 *v = (u32 *)loc.v32;
43
44
__ila_hash_secret_init();
45
return jhash_2words(v[0], v[1], hashrnd);
46
}
47
48
static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
49
struct ila_locator loc)
50
{
51
return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
52
}
53
54
static inline int ila_cmp_wildcards(struct ila_map *ila,
55
struct ila_addr *iaddr, int ifindex)
56
{
57
return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
58
}
59
60
static inline int ila_cmp_params(struct ila_map *ila,
61
struct ila_xlat_params *xp)
62
{
63
return (ila->xp.ifindex != xp->ifindex);
64
}
65
66
static int ila_cmpfn(struct rhashtable_compare_arg *arg,
67
const void *obj)
68
{
69
const struct ila_map *ila = obj;
70
71
return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
72
}
73
74
static inline int ila_order(struct ila_map *ila)
75
{
76
int score = 0;
77
78
if (ila->xp.ifindex)
79
score += 1 << 1;
80
81
return score;
82
}
83
84
static const struct rhashtable_params rht_params = {
85
.nelem_hint = 1024,
86
.head_offset = offsetof(struct ila_map, node),
87
.key_offset = offsetof(struct ila_map, xp.ip.locator_match),
88
.key_len = sizeof(u64), /* identifier */
89
.max_size = 1048576,
90
.min_size = 256,
91
.automatic_shrinking = true,
92
.obj_cmpfn = ila_cmpfn,
93
};
94
95
static int parse_nl_config(struct genl_info *info,
96
struct ila_xlat_params *xp)
97
{
98
memset(xp, 0, sizeof(*xp));
99
100
if (info->attrs[ILA_ATTR_LOCATOR])
101
xp->ip.locator.v64 = (__force __be64)nla_get_u64(
102
info->attrs[ILA_ATTR_LOCATOR]);
103
104
if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
105
xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
106
info->attrs[ILA_ATTR_LOCATOR_MATCH]);
107
108
xp->ip.csum_mode = nla_get_u8_default(info->attrs[ILA_ATTR_CSUM_MODE],
109
ILA_CSUM_NO_ACTION);
110
111
xp->ip.ident_type = nla_get_u8_default(info->attrs[ILA_ATTR_IDENT_TYPE],
112
ILA_ATYPE_USE_FORMAT);
113
114
if (info->attrs[ILA_ATTR_IFINDEX])
115
xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
116
117
return 0;
118
}
119
120
/* Must be called with rcu readlock */
121
static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
122
int ifindex,
123
struct ila_net *ilan)
124
{
125
struct ila_map *ila;
126
127
ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
128
rht_params);
129
while (ila) {
130
if (!ila_cmp_wildcards(ila, iaddr, ifindex))
131
return ila;
132
ila = rcu_access_pointer(ila->next);
133
}
134
135
return NULL;
136
}
137
138
/* Must be called with rcu readlock */
139
static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
140
struct ila_net *ilan)
141
{
142
struct ila_map *ila;
143
144
ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
145
&xp->ip.locator_match,
146
rht_params);
147
while (ila) {
148
if (!ila_cmp_params(ila, xp))
149
return ila;
150
ila = rcu_access_pointer(ila->next);
151
}
152
153
return NULL;
154
}
155
156
static inline void ila_release(struct ila_map *ila)
157
{
158
kfree_rcu(ila, rcu);
159
}
160
161
static void ila_free_node(struct ila_map *ila)
162
{
163
struct ila_map *next;
164
165
/* Assume rcu_readlock held */
166
while (ila) {
167
next = rcu_access_pointer(ila->next);
168
ila_release(ila);
169
ila = next;
170
}
171
}
172
173
static void ila_free_cb(void *ptr, void *arg)
174
{
175
ila_free_node((struct ila_map *)ptr);
176
}
177
178
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
179
180
static unsigned int
181
ila_nf_input(void *priv,
182
struct sk_buff *skb,
183
const struct nf_hook_state *state)
184
{
185
ila_xlat_addr(skb, false);
186
return NF_ACCEPT;
187
}
188
189
static const struct nf_hook_ops ila_nf_hook_ops[] = {
190
{
191
.hook = ila_nf_input,
192
.pf = NFPROTO_IPV6,
193
.hooknum = NF_INET_PRE_ROUTING,
194
.priority = -1,
195
},
196
};
197
198
static DEFINE_MUTEX(ila_mutex);
199
200
static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
201
{
202
struct ila_net *ilan = net_generic(net, ila_net_id);
203
struct ila_map *ila, *head;
204
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
205
int err = 0, order;
206
207
if (!READ_ONCE(ilan->xlat.hooks_registered)) {
208
/* We defer registering net hooks in the namespace until the
209
* first mapping is added.
210
*/
211
mutex_lock(&ila_mutex);
212
if (!ilan->xlat.hooks_registered) {
213
err = nf_register_net_hooks(net, ila_nf_hook_ops,
214
ARRAY_SIZE(ila_nf_hook_ops));
215
if (!err)
216
WRITE_ONCE(ilan->xlat.hooks_registered, true);
217
}
218
mutex_unlock(&ila_mutex);
219
if (err)
220
return err;
221
}
222
223
ila = kzalloc(sizeof(*ila), GFP_KERNEL);
224
if (!ila)
225
return -ENOMEM;
226
227
ila_init_saved_csum(&xp->ip);
228
229
ila->xp = *xp;
230
231
order = ila_order(ila);
232
233
spin_lock(lock);
234
235
head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
236
&xp->ip.locator_match,
237
rht_params);
238
if (!head) {
239
/* New entry for the rhash_table */
240
err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
241
&ila->node, rht_params);
242
} else {
243
struct ila_map *tila = head, *prev = NULL;
244
245
do {
246
if (!ila_cmp_params(tila, xp)) {
247
err = -EEXIST;
248
goto out;
249
}
250
251
if (order > ila_order(tila))
252
break;
253
254
prev = tila;
255
tila = rcu_dereference_protected(tila->next,
256
lockdep_is_held(lock));
257
} while (tila);
258
259
if (prev) {
260
/* Insert in sub list of head */
261
RCU_INIT_POINTER(ila->next, tila);
262
rcu_assign_pointer(prev->next, ila);
263
} else {
264
/* Make this ila new head */
265
RCU_INIT_POINTER(ila->next, head);
266
err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
267
&head->node,
268
&ila->node, rht_params);
269
if (err)
270
goto out;
271
}
272
}
273
274
out:
275
spin_unlock(lock);
276
277
if (err)
278
kfree(ila);
279
280
return err;
281
}
282
283
static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
284
{
285
struct ila_net *ilan = net_generic(net, ila_net_id);
286
struct ila_map *ila, *head, *prev;
287
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
288
int err = -ENOENT;
289
290
spin_lock(lock);
291
292
head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
293
&xp->ip.locator_match, rht_params);
294
ila = head;
295
296
prev = NULL;
297
298
while (ila) {
299
if (ila_cmp_params(ila, xp)) {
300
prev = ila;
301
ila = rcu_dereference_protected(ila->next,
302
lockdep_is_held(lock));
303
continue;
304
}
305
306
err = 0;
307
308
if (prev) {
309
/* Not head, just delete from list */
310
rcu_assign_pointer(prev->next, ila->next);
311
} else {
312
/* It is the head. If there is something in the
313
* sublist we need to make a new head.
314
*/
315
head = rcu_dereference_protected(ila->next,
316
lockdep_is_held(lock));
317
if (head) {
318
/* Put first entry in the sublist into the
319
* table
320
*/
321
err = rhashtable_replace_fast(
322
&ilan->xlat.rhash_table, &ila->node,
323
&head->node, rht_params);
324
if (err)
325
goto out;
326
} else {
327
/* Entry no longer used */
328
err = rhashtable_remove_fast(
329
&ilan->xlat.rhash_table,
330
&ila->node, rht_params);
331
}
332
}
333
334
ila_release(ila);
335
336
break;
337
}
338
339
out:
340
spin_unlock(lock);
341
342
return err;
343
}
344
345
int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
346
{
347
struct net *net = genl_info_net(info);
348
struct ila_xlat_params p;
349
int err;
350
351
err = parse_nl_config(info, &p);
352
if (err)
353
return err;
354
355
return ila_add_mapping(net, &p);
356
}
357
358
int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
359
{
360
struct net *net = genl_info_net(info);
361
struct ila_xlat_params xp;
362
int err;
363
364
err = parse_nl_config(info, &xp);
365
if (err)
366
return err;
367
368
ila_del_mapping(net, &xp);
369
370
return 0;
371
}
372
373
static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
374
struct ila_map *ila)
375
{
376
return ila_get_lock(ilan, ila->xp.ip.locator_match);
377
}
378
379
int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
380
{
381
struct net *net = genl_info_net(info);
382
struct ila_net *ilan = net_generic(net, ila_net_id);
383
struct rhashtable_iter iter;
384
struct ila_map *ila;
385
spinlock_t *lock;
386
int ret = 0;
387
388
rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
389
rhashtable_walk_start(&iter);
390
391
for (;;) {
392
ila = rhashtable_walk_next(&iter);
393
394
if (IS_ERR(ila)) {
395
if (PTR_ERR(ila) == -EAGAIN)
396
continue;
397
ret = PTR_ERR(ila);
398
goto done;
399
} else if (!ila) {
400
break;
401
}
402
403
lock = lock_from_ila_map(ilan, ila);
404
405
spin_lock(lock);
406
407
ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
408
&ila->node, rht_params);
409
if (!ret)
410
ila_free_node(ila);
411
412
spin_unlock(lock);
413
414
if (ret)
415
break;
416
}
417
418
done:
419
rhashtable_walk_stop(&iter);
420
rhashtable_walk_exit(&iter);
421
return ret;
422
}
423
424
static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
425
{
426
if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
427
(__force u64)ila->xp.ip.locator.v64,
428
ILA_ATTR_PAD) ||
429
nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
430
(__force u64)ila->xp.ip.locator_match.v64,
431
ILA_ATTR_PAD) ||
432
nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
433
nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
434
nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
435
return -1;
436
437
return 0;
438
}
439
440
static int ila_dump_info(struct ila_map *ila,
441
u32 portid, u32 seq, u32 flags,
442
struct sk_buff *skb, u8 cmd)
443
{
444
void *hdr;
445
446
hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
447
if (!hdr)
448
return -ENOMEM;
449
450
if (ila_fill_info(ila, skb) < 0)
451
goto nla_put_failure;
452
453
genlmsg_end(skb, hdr);
454
return 0;
455
456
nla_put_failure:
457
genlmsg_cancel(skb, hdr);
458
return -EMSGSIZE;
459
}
460
461
int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
462
{
463
struct net *net = genl_info_net(info);
464
struct ila_net *ilan = net_generic(net, ila_net_id);
465
struct sk_buff *msg;
466
struct ila_xlat_params xp;
467
struct ila_map *ila;
468
int ret;
469
470
ret = parse_nl_config(info, &xp);
471
if (ret)
472
return ret;
473
474
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
475
if (!msg)
476
return -ENOMEM;
477
478
rcu_read_lock();
479
480
ret = -ESRCH;
481
ila = ila_lookup_by_params(&xp, ilan);
482
if (ila) {
483
ret = ila_dump_info(ila,
484
info->snd_portid,
485
info->snd_seq, 0, msg,
486
info->genlhdr->cmd);
487
}
488
489
rcu_read_unlock();
490
491
if (ret < 0)
492
goto out_free;
493
494
return genlmsg_reply(msg, info);
495
496
out_free:
497
nlmsg_free(msg);
498
return ret;
499
}
500
501
struct ila_dump_iter {
502
struct rhashtable_iter rhiter;
503
int skip;
504
};
505
506
int ila_xlat_nl_dump_start(struct netlink_callback *cb)
507
{
508
struct net *net = sock_net(cb->skb->sk);
509
struct ila_net *ilan = net_generic(net, ila_net_id);
510
struct ila_dump_iter *iter;
511
512
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
513
if (!iter)
514
return -ENOMEM;
515
516
rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
517
518
iter->skip = 0;
519
cb->args[0] = (long)iter;
520
521
return 0;
522
}
523
524
int ila_xlat_nl_dump_done(struct netlink_callback *cb)
525
{
526
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
527
528
rhashtable_walk_exit(&iter->rhiter);
529
530
kfree(iter);
531
532
return 0;
533
}
534
535
int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
536
{
537
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
538
struct rhashtable_iter *rhiter = &iter->rhiter;
539
int skip = iter->skip;
540
struct ila_map *ila;
541
int ret;
542
543
rhashtable_walk_start(rhiter);
544
545
/* Get first entry */
546
ila = rhashtable_walk_peek(rhiter);
547
548
if (ila && !IS_ERR(ila) && skip) {
549
/* Skip over visited entries */
550
551
while (ila && skip) {
552
/* Skip over any ila entries in this list that we
553
* have already dumped.
554
*/
555
ila = rcu_access_pointer(ila->next);
556
skip--;
557
}
558
}
559
560
skip = 0;
561
562
for (;;) {
563
if (IS_ERR(ila)) {
564
ret = PTR_ERR(ila);
565
if (ret == -EAGAIN) {
566
/* Table has changed and iter has reset. Return
567
* -EAGAIN to the application even if we have
568
* written data to the skb. The application
569
* needs to deal with this.
570
*/
571
572
goto out_ret;
573
} else {
574
break;
575
}
576
} else if (!ila) {
577
ret = 0;
578
break;
579
}
580
581
while (ila) {
582
ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
583
cb->nlh->nlmsg_seq, NLM_F_MULTI,
584
skb, ILA_CMD_GET);
585
if (ret)
586
goto out;
587
588
skip++;
589
ila = rcu_access_pointer(ila->next);
590
}
591
592
skip = 0;
593
ila = rhashtable_walk_next(rhiter);
594
}
595
596
out:
597
iter->skip = skip;
598
ret = (skb->len ? : ret);
599
600
out_ret:
601
rhashtable_walk_stop(rhiter);
602
return ret;
603
}
604
605
int ila_xlat_init_net(struct net *net)
606
{
607
struct ila_net *ilan = net_generic(net, ila_net_id);
608
int err;
609
610
err = alloc_ila_locks(ilan);
611
if (err)
612
return err;
613
614
err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
615
if (err) {
616
free_bucket_spinlocks(ilan->xlat.locks);
617
return err;
618
}
619
620
return 0;
621
}
622
623
void ila_xlat_pre_exit_net(struct net *net)
624
{
625
struct ila_net *ilan = net_generic(net, ila_net_id);
626
627
if (ilan->xlat.hooks_registered)
628
nf_unregister_net_hooks(net, ila_nf_hook_ops,
629
ARRAY_SIZE(ila_nf_hook_ops));
630
}
631
632
void ila_xlat_exit_net(struct net *net)
633
{
634
struct ila_net *ilan = net_generic(net, ila_net_id);
635
636
rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
637
638
free_bucket_spinlocks(ilan->xlat.locks);
639
}
640
641
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
642
{
643
struct ila_map *ila;
644
struct ipv6hdr *ip6h = ipv6_hdr(skb);
645
struct net *net = dev_net(skb->dev);
646
struct ila_net *ilan = net_generic(net, ila_net_id);
647
struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
648
649
/* Assumes skb contains a valid IPv6 header that is pulled */
650
651
/* No check here that ILA type in the mapping matches what is in the
652
* address. We assume that whatever sender gaves us can be translated.
653
* The checksum mode however is relevant.
654
*/
655
656
rcu_read_lock();
657
658
ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
659
if (ila)
660
ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
661
662
rcu_read_unlock();
663
664
return 0;
665
}
666
667