blob: 5ca527110d02aa098ea976e1fd76b66ee60da39b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2#include <linux/jhash.h>
3#include <linux/netfilter.h>
4#include <linux/rcupdate.h>
5#include <linux/rhashtable.h>
6#include <linux/vmalloc.h>
7#include <net/genetlink.h>
8#include <net/ila.h>
9#include <net/netns/generic.h>
10#include <uapi/linux/genetlink.h>
11#include "ila.h"
12
13struct ila_xlat_params {
14 struct ila_params ip;
15 int ifindex;
16};
17
18struct ila_map {
19 struct ila_xlat_params xp;
20 struct rhash_head node;
21 struct ila_map __rcu *next;
22 struct rcu_head rcu;
23};
24
25#define MAX_LOCKS 1024
26#define LOCKS_PER_CPU 10
27
28static int alloc_ila_locks(struct ila_net *ilan)
29{
30 return alloc_bucket_spinlocks(&ilan->xlat.locks, &ilan->xlat.locks_mask,
31 MAX_LOCKS, LOCKS_PER_CPU,
32 GFP_KERNEL);
33}
34
35static u32 hashrnd __read_mostly;
36static __always_inline void __ila_hash_secret_init(void)
37{
38 net_get_random_once(&hashrnd, sizeof(hashrnd));
39}
40
41static inline u32 ila_locator_hash(struct ila_locator loc)
42{
43 u32 *v = (u32 *)loc.v32;
44
45 __ila_hash_secret_init();
46 return jhash_2words(v[0], v[1], hashrnd);
47}
48
49static inline spinlock_t *ila_get_lock(struct ila_net *ilan,
50 struct ila_locator loc)
51{
52 return &ilan->xlat.locks[ila_locator_hash(loc) & ilan->xlat.locks_mask];
53}
54
55static inline int ila_cmp_wildcards(struct ila_map *ila,
56 struct ila_addr *iaddr, int ifindex)
57{
58 return (ila->xp.ifindex && ila->xp.ifindex != ifindex);
59}
60
61static inline int ila_cmp_params(struct ila_map *ila,
62 struct ila_xlat_params *xp)
63{
64 return (ila->xp.ifindex != xp->ifindex);
65}
66
67static int ila_cmpfn(struct rhashtable_compare_arg *arg,
68 const void *obj)
69{
70 const struct ila_map *ila = obj;
71
72 return (ila->xp.ip.locator_match.v64 != *(__be64 *)arg->key);
73}
74
75static inline int ila_order(struct ila_map *ila)
76{
77 int score = 0;
78
79 if (ila->xp.ifindex)
80 score += 1 << 1;
81
82 return score;
83}
84
85static const struct rhashtable_params rht_params = {
86 .nelem_hint = 1024,
87 .head_offset = offsetof(struct ila_map, node),
88 .key_offset = offsetof(struct ila_map, xp.ip.locator_match),
89 .key_len = sizeof(u64), /* identifier */
90 .max_size = 1048576,
91 .min_size = 256,
92 .automatic_shrinking = true,
93 .obj_cmpfn = ila_cmpfn,
94};
95
96static int parse_nl_config(struct genl_info *info,
97 struct ila_xlat_params *xp)
98{
99 memset(xp, 0, sizeof(*xp));
100
101 if (info->attrs[ILA_ATTR_LOCATOR])
102 xp->ip.locator.v64 = (__force __be64)nla_get_u64(
103 info->attrs[ILA_ATTR_LOCATOR]);
104
105 if (info->attrs[ILA_ATTR_LOCATOR_MATCH])
106 xp->ip.locator_match.v64 = (__force __be64)nla_get_u64(
107 info->attrs[ILA_ATTR_LOCATOR_MATCH]);
108
109 if (info->attrs[ILA_ATTR_CSUM_MODE])
110 xp->ip.csum_mode = nla_get_u8(info->attrs[ILA_ATTR_CSUM_MODE]);
111 else
112 xp->ip.csum_mode = ILA_CSUM_NO_ACTION;
113
114 if (info->attrs[ILA_ATTR_IDENT_TYPE])
115 xp->ip.ident_type = nla_get_u8(
116 info->attrs[ILA_ATTR_IDENT_TYPE]);
117 else
118 xp->ip.ident_type = ILA_ATYPE_USE_FORMAT;
119
120 if (info->attrs[ILA_ATTR_IFINDEX])
121 xp->ifindex = nla_get_s32(info->attrs[ILA_ATTR_IFINDEX]);
122
123 return 0;
124}
125
126/* Must be called with rcu readlock */
127static inline struct ila_map *ila_lookup_wildcards(struct ila_addr *iaddr,
128 int ifindex,
129 struct ila_net *ilan)
130{
131 struct ila_map *ila;
132
133 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table, &iaddr->loc,
134 rht_params);
135 while (ila) {
136 if (!ila_cmp_wildcards(ila, iaddr, ifindex))
137 return ila;
138 ila = rcu_access_pointer(ila->next);
139 }
140
141 return NULL;
142}
143
144/* Must be called with rcu readlock */
145static inline struct ila_map *ila_lookup_by_params(struct ila_xlat_params *xp,
146 struct ila_net *ilan)
147{
148 struct ila_map *ila;
149
150 ila = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
151 &xp->ip.locator_match,
152 rht_params);
153 while (ila) {
154 if (!ila_cmp_params(ila, xp))
155 return ila;
156 ila = rcu_access_pointer(ila->next);
157 }
158
159 return NULL;
160}
161
162static inline void ila_release(struct ila_map *ila)
163{
164 kfree_rcu(ila, rcu);
165}
166
167static void ila_free_node(struct ila_map *ila)
168{
169 struct ila_map *next;
170
171 /* Assume rcu_readlock held */
172 while (ila) {
173 next = rcu_access_pointer(ila->next);
174 ila_release(ila);
175 ila = next;
176 }
177}
178
179static void ila_free_cb(void *ptr, void *arg)
180{
181 ila_free_node((struct ila_map *)ptr);
182}
183
184static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila);
185
186static unsigned int
187ila_nf_input(void *priv,
188 struct sk_buff *skb,
189 const struct nf_hook_state *state)
190{
191 ila_xlat_addr(skb, false);
192 return NF_ACCEPT;
193}
194
195static const struct nf_hook_ops ila_nf_hook_ops[] = {
196 {
197 .hook = ila_nf_input,
198 .pf = NFPROTO_IPV6,
199 .hooknum = NF_INET_PRE_ROUTING,
200 .priority = -1,
201 },
202};
203
204static DEFINE_MUTEX(ila_mutex);
205
206static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
207{
208 struct ila_net *ilan = net_generic(net, ila_net_id);
209 struct ila_map *ila, *head;
210 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
211 int err = 0, order;
212
213 if (!READ_ONCE(ilan->xlat.hooks_registered)) {
214 /* We defer registering net hooks in the namespace until the
215 * first mapping is added.
216 */
217 mutex_lock(&ila_mutex);
218 if (!ilan->xlat.hooks_registered) {
219 err = nf_register_net_hooks(net, ila_nf_hook_ops,
220 ARRAY_SIZE(ila_nf_hook_ops));
221 if (!err)
222 WRITE_ONCE(ilan->xlat.hooks_registered, true);
223 }
224 mutex_unlock(&ila_mutex);
225 if (err)
226 return err;
227 }
228
229 ila = kzalloc(sizeof(*ila), GFP_KERNEL);
230 if (!ila)
231 return -ENOMEM;
232
233 ila_init_saved_csum(&xp->ip);
234
235 ila->xp = *xp;
236
237 order = ila_order(ila);
238
239 spin_lock(lock);
240
241 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
242 &xp->ip.locator_match,
243 rht_params);
244 if (!head) {
245 /* New entry for the rhash_table */
246 err = rhashtable_lookup_insert_fast(&ilan->xlat.rhash_table,
247 &ila->node, rht_params);
248 } else {
249 struct ila_map *tila = head, *prev = NULL;
250
251 do {
252 if (!ila_cmp_params(tila, xp)) {
253 err = -EEXIST;
254 goto out;
255 }
256
257 if (order > ila_order(tila))
258 break;
259
260 prev = tila;
261 tila = rcu_dereference_protected(tila->next,
262 lockdep_is_held(lock));
263 } while (tila);
264
265 if (prev) {
266 /* Insert in sub list of head */
267 RCU_INIT_POINTER(ila->next, tila);
268 rcu_assign_pointer(prev->next, ila);
269 } else {
270 /* Make this ila new head */
271 RCU_INIT_POINTER(ila->next, head);
272 err = rhashtable_replace_fast(&ilan->xlat.rhash_table,
273 &head->node,
274 &ila->node, rht_params);
275 if (err)
276 goto out;
277 }
278 }
279
280out:
281 spin_unlock(lock);
282
283 if (err)
284 kfree(ila);
285
286 return err;
287}
288
289static int ila_del_mapping(struct net *net, struct ila_xlat_params *xp)
290{
291 struct ila_net *ilan = net_generic(net, ila_net_id);
292 struct ila_map *ila, *head, *prev;
293 spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
294 int err = -ENOENT;
295
296 spin_lock(lock);
297
298 head = rhashtable_lookup_fast(&ilan->xlat.rhash_table,
299 &xp->ip.locator_match, rht_params);
300 ila = head;
301
302 prev = NULL;
303
304 while (ila) {
305 if (ila_cmp_params(ila, xp)) {
306 prev = ila;
307 ila = rcu_dereference_protected(ila->next,
308 lockdep_is_held(lock));
309 continue;
310 }
311
312 err = 0;
313
314 if (prev) {
315 /* Not head, just delete from list */
316 rcu_assign_pointer(prev->next, ila->next);
317 } else {
318 /* It is the head. If there is something in the
319 * sublist we need to make a new head.
320 */
321 head = rcu_dereference_protected(ila->next,
322 lockdep_is_held(lock));
323 if (head) {
324 /* Put first entry in the sublist into the
325 * table
326 */
327 err = rhashtable_replace_fast(
328 &ilan->xlat.rhash_table, &ila->node,
329 &head->node, rht_params);
330 if (err)
331 goto out;
332 } else {
333 /* Entry no longer used */
334 err = rhashtable_remove_fast(
335 &ilan->xlat.rhash_table,
336 &ila->node, rht_params);
337 }
338 }
339
340 ila_release(ila);
341
342 break;
343 }
344
345out:
346 spin_unlock(lock);
347
348 return err;
349}
350
351int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info)
352{
353 struct net *net = genl_info_net(info);
354 struct ila_xlat_params p;
355 int err;
356
357 err = parse_nl_config(info, &p);
358 if (err)
359 return err;
360
361 return ila_add_mapping(net, &p);
362}
363
364int ila_xlat_nl_cmd_del_mapping(struct sk_buff *skb, struct genl_info *info)
365{
366 struct net *net = genl_info_net(info);
367 struct ila_xlat_params xp;
368 int err;
369
370 err = parse_nl_config(info, &xp);
371 if (err)
372 return err;
373
374 ila_del_mapping(net, &xp);
375
376 return 0;
377}
378
379static inline spinlock_t *lock_from_ila_map(struct ila_net *ilan,
380 struct ila_map *ila)
381{
382 return ila_get_lock(ilan, ila->xp.ip.locator_match);
383}
384
385int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
386{
387 struct net *net = genl_info_net(info);
388 struct ila_net *ilan = net_generic(net, ila_net_id);
389 struct rhashtable_iter iter;
390 struct ila_map *ila;
391 spinlock_t *lock;
392 int ret = 0;
393
394 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
395 rhashtable_walk_start(&iter);
396
397 for (;;) {
398 ila = rhashtable_walk_next(&iter);
399
400 if (IS_ERR(ila)) {
401 if (PTR_ERR(ila) == -EAGAIN)
402 continue;
403 ret = PTR_ERR(ila);
404 goto done;
405 } else if (!ila) {
406 break;
407 }
408
409 lock = lock_from_ila_map(ilan, ila);
410
411 spin_lock(lock);
412
413 ret = rhashtable_remove_fast(&ilan->xlat.rhash_table,
414 &ila->node, rht_params);
415 if (!ret)
416 ila_free_node(ila);
417
418 spin_unlock(lock);
419
420 if (ret)
421 break;
422 }
423
424done:
425 rhashtable_walk_stop(&iter);
426 rhashtable_walk_exit(&iter);
427 return ret;
428}
429
430static int ila_fill_info(struct ila_map *ila, struct sk_buff *msg)
431{
432 if (nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR,
433 (__force u64)ila->xp.ip.locator.v64,
434 ILA_ATTR_PAD) ||
435 nla_put_u64_64bit(msg, ILA_ATTR_LOCATOR_MATCH,
436 (__force u64)ila->xp.ip.locator_match.v64,
437 ILA_ATTR_PAD) ||
438 nla_put_s32(msg, ILA_ATTR_IFINDEX, ila->xp.ifindex) ||
439 nla_put_u8(msg, ILA_ATTR_CSUM_MODE, ila->xp.ip.csum_mode) ||
440 nla_put_u8(msg, ILA_ATTR_IDENT_TYPE, ila->xp.ip.ident_type))
441 return -1;
442
443 return 0;
444}
445
446static int ila_dump_info(struct ila_map *ila,
447 u32 portid, u32 seq, u32 flags,
448 struct sk_buff *skb, u8 cmd)
449{
450 void *hdr;
451
452 hdr = genlmsg_put(skb, portid, seq, &ila_nl_family, flags, cmd);
453 if (!hdr)
454 return -ENOMEM;
455
456 if (ila_fill_info(ila, skb) < 0)
457 goto nla_put_failure;
458
459 genlmsg_end(skb, hdr);
460 return 0;
461
462nla_put_failure:
463 genlmsg_cancel(skb, hdr);
464 return -EMSGSIZE;
465}
466
467int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
468{
469 struct net *net = genl_info_net(info);
470 struct ila_net *ilan = net_generic(net, ila_net_id);
471 struct sk_buff *msg;
472 struct ila_xlat_params xp;
473 struct ila_map *ila;
474 int ret;
475
476 ret = parse_nl_config(info, &xp);
477 if (ret)
478 return ret;
479
480 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
481 if (!msg)
482 return -ENOMEM;
483
484 rcu_read_lock();
485
486 ret = -ESRCH;
487 ila = ila_lookup_by_params(&xp, ilan);
488 if (ila) {
489 ret = ila_dump_info(ila,
490 info->snd_portid,
491 info->snd_seq, 0, msg,
492 info->genlhdr->cmd);
493 }
494
495 rcu_read_unlock();
496
497 if (ret < 0)
498 goto out_free;
499
500 return genlmsg_reply(msg, info);
501
502out_free:
503 nlmsg_free(msg);
504 return ret;
505}
506
507struct ila_dump_iter {
508 struct rhashtable_iter rhiter;
509 int skip;
510};
511
512int ila_xlat_nl_dump_start(struct netlink_callback *cb)
513{
514 struct net *net = sock_net(cb->skb->sk);
515 struct ila_net *ilan = net_generic(net, ila_net_id);
516 struct ila_dump_iter *iter;
517
518 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
519 if (!iter)
520 return -ENOMEM;
521
522 rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
523
524 iter->skip = 0;
525 cb->args[0] = (long)iter;
526
527 return 0;
528}
529
530int ila_xlat_nl_dump_done(struct netlink_callback *cb)
531{
532 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
533
534 rhashtable_walk_exit(&iter->rhiter);
535
536 kfree(iter);
537
538 return 0;
539}
540
541int ila_xlat_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
542{
543 struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
544 struct rhashtable_iter *rhiter = &iter->rhiter;
545 int skip = iter->skip;
546 struct ila_map *ila;
547 int ret;
548
549 rhashtable_walk_start(rhiter);
550
551 /* Get first entry */
552 ila = rhashtable_walk_peek(rhiter);
553
554 if (ila && !IS_ERR(ila) && skip) {
555 /* Skip over visited entries */
556
557 while (ila && skip) {
558 /* Skip over any ila entries in this list that we
559 * have already dumped.
560 */
561 ila = rcu_access_pointer(ila->next);
562 skip--;
563 }
564 }
565
566 skip = 0;
567
568 for (;;) {
569 if (IS_ERR(ila)) {
570 ret = PTR_ERR(ila);
571 if (ret == -EAGAIN) {
572 /* Table has changed and iter has reset. Return
573 * -EAGAIN to the application even if we have
574 * written data to the skb. The application
575 * needs to deal with this.
576 */
577
578 goto out_ret;
579 } else {
580 break;
581 }
582 } else if (!ila) {
583 ret = 0;
584 break;
585 }
586
587 while (ila) {
588 ret = ila_dump_info(ila, NETLINK_CB(cb->skb).portid,
589 cb->nlh->nlmsg_seq, NLM_F_MULTI,
590 skb, ILA_CMD_GET);
591 if (ret)
592 goto out;
593
594 skip++;
595 ila = rcu_access_pointer(ila->next);
596 }
597
598 skip = 0;
599 ila = rhashtable_walk_next(rhiter);
600 }
601
602out:
603 iter->skip = skip;
604 ret = (skb->len ? : ret);
605
606out_ret:
607 rhashtable_walk_stop(rhiter);
608 return ret;
609}
610
611#define ILA_HASH_TABLE_SIZE 1024
612
613int ila_xlat_init_net(struct net *net)
614{
615 struct ila_net *ilan = net_generic(net, ila_net_id);
616 int err;
617
618 err = alloc_ila_locks(ilan);
619 if (err)
620 return err;
621
622 rhashtable_init(&ilan->xlat.rhash_table, &rht_params);
623
624 return 0;
625}
626
627void ila_xlat_pre_exit_net(struct net *net)
628{
629 struct ila_net *ilan = net_generic(net, ila_net_id);
630
631 if (ilan->xlat.hooks_registered)
632 nf_unregister_net_hooks(net, ila_nf_hook_ops,
633 ARRAY_SIZE(ila_nf_hook_ops));
634}
635
636void ila_xlat_exit_net(struct net *net)
637{
638 struct ila_net *ilan = net_generic(net, ila_net_id);
639
640 rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
641
642 free_bucket_spinlocks(ilan->xlat.locks);
643}
644
645static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
646{
647 struct ila_map *ila;
648 struct ipv6hdr *ip6h = ipv6_hdr(skb);
649 struct net *net = dev_net(skb->dev);
650 struct ila_net *ilan = net_generic(net, ila_net_id);
651 struct ila_addr *iaddr = ila_a2i(&ip6h->daddr);
652
653 /* Assumes skb contains a valid IPv6 header that is pulled */
654
655 /* No check here that ILA type in the mapping matches what is in the
656 * address. We assume that whatever sender gaves us can be translated.
657 * The checksum mode however is relevant.
658 */
659
660 rcu_read_lock();
661
662 ila = ila_lookup_wildcards(iaddr, skb->dev->ifindex, ilan);
663 if (ila)
664 ila_update_ipv6_locator(skb, &ila->xp.ip, sir2ila);
665
666 rcu_read_unlock();
667
668 return 0;
669}