blob: 403af62264d557b6722f1fe6274411714675b566 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/kmod.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/netfilter.h>
25#include <linux/module.h>
26#include <linux/cache.h>
27#include <linux/audit.h>
28#include <net/dst.h>
29#include <net/xfrm.h>
30#include <net/ip.h>
31#ifdef CONFIG_XFRM_STATISTICS
32#include <net/snmp.h>
33#endif
34
35#include "xfrm_hash.h"
36
37DEFINE_MUTEX(xfrm_cfg_mutex);
38EXPORT_SYMBOL(xfrm_cfg_mutex);
39
40static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
41static struct dst_entry *xfrm_policy_sk_bundles;
42static DEFINE_RWLOCK(xfrm_policy_lock);
43
44static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
45static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
46
47static struct kmem_cache *xfrm_dst_cache __read_mostly;
48
49static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54
55
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
57 int dir);
58
59static inline int
60__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
61{
62 const struct flowi4 *fl4 = &fl->u.ip4;
63
64 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
65 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
66 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
67 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
68 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
70}
71
72static inline int
73__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74{
75 const struct flowi6 *fl6 = &fl->u.ip6;
76
77 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
78 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
79 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
80 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
81 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
83}
84
85int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
86 unsigned short family)
87{
88 switch (family) {
89 case AF_INET:
90 return __xfrm4_selector_match(sel, fl);
91 case AF_INET6:
92 return __xfrm6_selector_match(sel, fl);
93 }
94 return 0;
95}
96
97static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
98 const xfrm_address_t *saddr,
99 const xfrm_address_t *daddr,
100 int family)
101{
102 struct xfrm_policy_afinfo *afinfo;
103 struct dst_entry *dst;
104
105 afinfo = xfrm_policy_get_afinfo(family);
106 if (unlikely(afinfo == NULL))
107 return ERR_PTR(-EAFNOSUPPORT);
108
109 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
110
111 xfrm_policy_put_afinfo(afinfo);
112
113 return dst;
114}
115
116static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
117 xfrm_address_t *prev_saddr,
118 xfrm_address_t *prev_daddr,
119 int family)
120{
121 struct net *net = xs_net(x);
122 xfrm_address_t *saddr = &x->props.saddr;
123 xfrm_address_t *daddr = &x->id.daddr;
124 struct dst_entry *dst;
125
126 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
127 saddr = x->coaddr;
128 daddr = prev_daddr;
129 }
130 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
131 saddr = prev_saddr;
132 daddr = x->coaddr;
133 }
134
135 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
136
137 if (!IS_ERR(dst)) {
138 if (prev_saddr != saddr)
139 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
140 if (prev_daddr != daddr)
141 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
142 }
143
144 return dst;
145}
146
147static inline unsigned long make_jiffies(long secs)
148{
149 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
150 return MAX_SCHEDULE_TIMEOUT-1;
151 else
152 return secs*HZ;
153}
154
155static void xfrm_policy_timer(unsigned long data)
156{
157 struct xfrm_policy *xp = (struct xfrm_policy*)data;
158 unsigned long now = get_seconds();
159 long next = LONG_MAX;
160 int warn = 0;
161 int dir;
162
163 read_lock(&xp->lock);
164
165 if (unlikely(xp->walk.dead))
166 goto out;
167
168 dir = xfrm_policy_id2dir(xp->index);
169
170 if (xp->lft.hard_add_expires_seconds) {
171 long tmo = xp->lft.hard_add_expires_seconds +
172 xp->curlft.add_time - now;
173 if (tmo <= 0)
174 goto expired;
175 if (tmo < next)
176 next = tmo;
177 }
178 if (xp->lft.hard_use_expires_seconds) {
179 long tmo = xp->lft.hard_use_expires_seconds +
180 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
181 if (tmo <= 0)
182 goto expired;
183 if (tmo < next)
184 next = tmo;
185 }
186 if (xp->lft.soft_add_expires_seconds) {
187 long tmo = xp->lft.soft_add_expires_seconds +
188 xp->curlft.add_time - now;
189 if (tmo <= 0) {
190 warn = 1;
191 tmo = XFRM_KM_TIMEOUT;
192 }
193 if (tmo < next)
194 next = tmo;
195 }
196 if (xp->lft.soft_use_expires_seconds) {
197 long tmo = xp->lft.soft_use_expires_seconds +
198 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
199 if (tmo <= 0) {
200 warn = 1;
201 tmo = XFRM_KM_TIMEOUT;
202 }
203 if (tmo < next)
204 next = tmo;
205 }
206
207 if (warn)
208 km_policy_expired(xp, dir, 0, 0);
209 if (next != LONG_MAX &&
210 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
211 xfrm_pol_hold(xp);
212
213out:
214 read_unlock(&xp->lock);
215 xfrm_pol_put(xp);
216 return;
217
218expired:
219 read_unlock(&xp->lock);
220 if (!xfrm_policy_delete(xp, dir))
221 km_policy_expired(xp, dir, 1, 0);
222 xfrm_pol_put(xp);
223}
224
225static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
226{
227 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
228
229 if (unlikely(pol->walk.dead))
230 flo = NULL;
231 else
232 xfrm_pol_hold(pol);
233
234 return flo;
235}
236
237static int xfrm_policy_flo_check(struct flow_cache_object *flo)
238{
239 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
240
241 return !pol->walk.dead;
242}
243
244static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
245{
246 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
247}
248
249static const struct flow_cache_ops xfrm_policy_fc_ops = {
250 .get = xfrm_policy_flo_get,
251 .check = xfrm_policy_flo_check,
252 .delete = xfrm_policy_flo_delete,
253};
254
255/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
256 * SPD calls.
257 */
258
259struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
260{
261 struct xfrm_policy *policy;
262
263 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
264
265 if (policy) {
266 write_pnet(&policy->xp_net, net);
267 INIT_LIST_HEAD(&policy->walk.all);
268 INIT_HLIST_NODE(&policy->bydst);
269 INIT_HLIST_NODE(&policy->byidx);
270 rwlock_init(&policy->lock);
271 atomic_set(&policy->refcnt, 1);
272 setup_timer(&policy->timer, xfrm_policy_timer,
273 (unsigned long)policy);
274 policy->flo.ops = &xfrm_policy_fc_ops;
275 }
276 return policy;
277}
278EXPORT_SYMBOL(xfrm_policy_alloc);
279
280/* Destroy xfrm_policy: descendant resources must be released to this moment. */
281
282void xfrm_policy_destroy(struct xfrm_policy *policy)
283{
284 BUG_ON(!policy->walk.dead);
285
286 if (del_timer(&policy->timer))
287 BUG();
288
289 security_xfrm_policy_free(policy->security);
290 kfree(policy);
291}
292EXPORT_SYMBOL(xfrm_policy_destroy);
293
294/* Rule must be locked. Release descentant resources, announce
295 * entry dead. The rule must be unlinked from lists to the moment.
296 */
297
298static void xfrm_policy_kill(struct xfrm_policy *policy)
299{
300 policy->walk.dead = 1;
301
302 atomic_inc(&policy->genid);
303
304 if (del_timer(&policy->timer))
305 xfrm_pol_put(policy);
306
307 xfrm_pol_put(policy);
308}
309
310static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
311
312static inline unsigned int idx_hash(struct net *net, u32 index)
313{
314 return __idx_hash(index, net->xfrm.policy_idx_hmask);
315}
316
317static struct hlist_head *policy_hash_bysel(struct net *net,
318 const struct xfrm_selector *sel,
319 unsigned short family, int dir)
320{
321 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
322 unsigned int hash = __sel_hash(sel, family, hmask);
323
324 return (hash == hmask + 1 ?
325 &net->xfrm.policy_inexact[dir] :
326 net->xfrm.policy_bydst[dir].table + hash);
327}
328
329static struct hlist_head *policy_hash_direct(struct net *net,
330 const xfrm_address_t *daddr,
331 const xfrm_address_t *saddr,
332 unsigned short family, int dir)
333{
334 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
335 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
336
337 return net->xfrm.policy_bydst[dir].table + hash;
338}
339
340static void xfrm_dst_hash_transfer(struct hlist_head *list,
341 struct hlist_head *ndsttable,
342 unsigned int nhashmask)
343{
344 struct hlist_node *entry, *tmp, *entry0 = NULL;
345 struct xfrm_policy *pol;
346 unsigned int h0 = 0;
347
348redo:
349 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
350 unsigned int h;
351
352 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
353 pol->family, nhashmask);
354 if (!entry0) {
355 hlist_del(entry);
356 hlist_add_head(&pol->bydst, ndsttable+h);
357 h0 = h;
358 } else {
359 if (h != h0)
360 continue;
361 hlist_del(entry);
362 hlist_add_after(entry0, &pol->bydst);
363 }
364 entry0 = entry;
365 }
366 if (!hlist_empty(list)) {
367 entry0 = NULL;
368 goto redo;
369 }
370}
371
372static void xfrm_idx_hash_transfer(struct hlist_head *list,
373 struct hlist_head *nidxtable,
374 unsigned int nhashmask)
375{
376 struct hlist_node *entry, *tmp;
377 struct xfrm_policy *pol;
378
379 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
380 unsigned int h;
381
382 h = __idx_hash(pol->index, nhashmask);
383 hlist_add_head(&pol->byidx, nidxtable+h);
384 }
385}
386
387static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
388{
389 return ((old_hmask + 1) << 1) - 1;
390}
391
392static void xfrm_bydst_resize(struct net *net, int dir)
393{
394 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
395 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
396 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
397 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
398 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
399 int i;
400
401 if (!ndst)
402 return;
403
404 write_lock_bh(&xfrm_policy_lock);
405
406 for (i = hmask; i >= 0; i--)
407 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
408
409 net->xfrm.policy_bydst[dir].table = ndst;
410 net->xfrm.policy_bydst[dir].hmask = nhashmask;
411
412 write_unlock_bh(&xfrm_policy_lock);
413
414 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
415}
416
417static void xfrm_byidx_resize(struct net *net, int total)
418{
419 unsigned int hmask = net->xfrm.policy_idx_hmask;
420 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
421 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
422 struct hlist_head *oidx = net->xfrm.policy_byidx;
423 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
424 int i;
425
426 if (!nidx)
427 return;
428
429 write_lock_bh(&xfrm_policy_lock);
430
431 for (i = hmask; i >= 0; i--)
432 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
433
434 net->xfrm.policy_byidx = nidx;
435 net->xfrm.policy_idx_hmask = nhashmask;
436
437 write_unlock_bh(&xfrm_policy_lock);
438
439 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
440}
441
442static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
443{
444 unsigned int cnt = net->xfrm.policy_count[dir];
445 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
446
447 if (total)
448 *total += cnt;
449
450 if ((hmask + 1) < xfrm_policy_hashmax &&
451 cnt > hmask)
452 return 1;
453
454 return 0;
455}
456
457static inline int xfrm_byidx_should_resize(struct net *net, int total)
458{
459 unsigned int hmask = net->xfrm.policy_idx_hmask;
460
461 if ((hmask + 1) < xfrm_policy_hashmax &&
462 total > hmask)
463 return 1;
464
465 return 0;
466}
467
468void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
469{
470 read_lock_bh(&xfrm_policy_lock);
471 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
472 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
473 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
474 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
475 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
476 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
477 si->spdhcnt = net->xfrm.policy_idx_hmask;
478 si->spdhmcnt = xfrm_policy_hashmax;
479 read_unlock_bh(&xfrm_policy_lock);
480}
481EXPORT_SYMBOL(xfrm_spd_getinfo);
482
483static DEFINE_MUTEX(hash_resize_mutex);
484static void xfrm_hash_resize(struct work_struct *work)
485{
486 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
487 int dir, total;
488
489 mutex_lock(&hash_resize_mutex);
490
491 total = 0;
492 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
493 if (xfrm_bydst_should_resize(net, dir, &total))
494 xfrm_bydst_resize(net, dir);
495 }
496 if (xfrm_byidx_should_resize(net, total))
497 xfrm_byidx_resize(net, total);
498
499 mutex_unlock(&hash_resize_mutex);
500}
501
502/* Generate new index... KAME seems to generate them ordered by cost
503 * of an absolute inpredictability of ordering of rules. This will not pass. */
504static u32 xfrm_gen_index(struct net *net, int dir)
505{
506 static u32 idx_generator;
507
508 for (;;) {
509 struct hlist_node *entry;
510 struct hlist_head *list;
511 struct xfrm_policy *p;
512 u32 idx;
513 int found;
514
515 idx = (idx_generator | dir);
516 idx_generator += 8;
517 if (idx == 0)
518 idx = 8;
519 list = net->xfrm.policy_byidx + idx_hash(net, idx);
520 found = 0;
521 hlist_for_each_entry(p, entry, list, byidx) {
522 if (p->index == idx) {
523 found = 1;
524 break;
525 }
526 }
527 if (!found)
528 return idx;
529 }
530}
531
532static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
533{
534 u32 *p1 = (u32 *) s1;
535 u32 *p2 = (u32 *) s2;
536 int len = sizeof(struct xfrm_selector) / sizeof(u32);
537 int i;
538
539 for (i = 0; i < len; i++) {
540 if (p1[i] != p2[i])
541 return 1;
542 }
543
544 return 0;
545}
546
547int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
548{
549 struct net *net = xp_net(policy);
550 struct xfrm_policy *pol;
551 struct xfrm_policy *delpol;
552 struct hlist_head *chain;
553 struct hlist_node *entry, *newpos;
554 u32 mark = policy->mark.v & policy->mark.m;
555
556 write_lock_bh(&xfrm_policy_lock);
557 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
558 delpol = NULL;
559 newpos = NULL;
560 hlist_for_each_entry(pol, entry, chain, bydst) {
561 if (pol->type == policy->type &&
562 !selector_cmp(&pol->selector, &policy->selector) &&
563 (mark & pol->mark.m) == pol->mark.v &&
564 xfrm_sec_ctx_match(pol->security, policy->security) &&
565 !WARN_ON(delpol)) {
566 if (excl) {
567 write_unlock_bh(&xfrm_policy_lock);
568 return -EEXIST;
569 }
570 delpol = pol;
571 if (policy->priority > pol->priority)
572 continue;
573 } else if (policy->priority >= pol->priority) {
574 newpos = &pol->bydst;
575 continue;
576 }
577 if (delpol)
578 break;
579 }
580 if (newpos)
581 hlist_add_after(newpos, &policy->bydst);
582 else
583 hlist_add_head(&policy->bydst, chain);
584 xfrm_pol_hold(policy);
585 net->xfrm.policy_count[dir]++;
586 atomic_inc(&flow_cache_genid);
587 if (delpol)
588 __xfrm_policy_unlink(delpol, dir);
589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
590 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
591 policy->curlft.add_time = get_seconds();
592 policy->curlft.use_time = 0;
593 if (!mod_timer(&policy->timer, jiffies + HZ))
594 xfrm_pol_hold(policy);
595 list_add(&policy->walk.all, &net->xfrm.policy_all);
596 write_unlock_bh(&xfrm_policy_lock);
597
598 if (delpol)
599 xfrm_policy_kill(delpol);
600 else if (xfrm_bydst_should_resize(net, dir, NULL))
601 schedule_work(&net->xfrm.policy_hash_work);
602
603 return 0;
604}
605EXPORT_SYMBOL(xfrm_policy_insert);
606
607struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
608 int dir, struct xfrm_selector *sel,
609 struct xfrm_sec_ctx *ctx, int delete,
610 int *err)
611{
612 struct xfrm_policy *pol, *ret;
613 struct hlist_head *chain;
614 struct hlist_node *entry;
615
616 *err = 0;
617 write_lock_bh(&xfrm_policy_lock);
618 chain = policy_hash_bysel(net, sel, sel->family, dir);
619 ret = NULL;
620 hlist_for_each_entry(pol, entry, chain, bydst) {
621 if (pol->type == type &&
622 (mark & pol->mark.m) == pol->mark.v &&
623 !selector_cmp(sel, &pol->selector) &&
624 xfrm_sec_ctx_match(ctx, pol->security)) {
625 xfrm_pol_hold(pol);
626 if (delete) {
627 *err = security_xfrm_policy_delete(
628 pol->security);
629 if (*err) {
630 write_unlock_bh(&xfrm_policy_lock);
631 return pol;
632 }
633 __xfrm_policy_unlink(pol, dir);
634 }
635 ret = pol;
636 break;
637 }
638 }
639 write_unlock_bh(&xfrm_policy_lock);
640
641 if (ret && delete)
642 xfrm_policy_kill(ret);
643 return ret;
644}
645EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
646
647struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
648 int dir, u32 id, int delete, int *err)
649{
650 struct xfrm_policy *pol, *ret;
651 struct hlist_head *chain;
652 struct hlist_node *entry;
653
654 *err = -ENOENT;
655 if (xfrm_policy_id2dir(id) != dir)
656 return NULL;
657
658 *err = 0;
659 write_lock_bh(&xfrm_policy_lock);
660 chain = net->xfrm.policy_byidx + idx_hash(net, id);
661 ret = NULL;
662 hlist_for_each_entry(pol, entry, chain, byidx) {
663 if (pol->type == type && pol->index == id &&
664 (mark & pol->mark.m) == pol->mark.v) {
665 xfrm_pol_hold(pol);
666 if (delete) {
667 *err = security_xfrm_policy_delete(
668 pol->security);
669 if (*err) {
670 write_unlock_bh(&xfrm_policy_lock);
671 return pol;
672 }
673 __xfrm_policy_unlink(pol, dir);
674 }
675 ret = pol;
676 break;
677 }
678 }
679 write_unlock_bh(&xfrm_policy_lock);
680
681 if (ret && delete)
682 xfrm_policy_kill(ret);
683 return ret;
684}
685EXPORT_SYMBOL(xfrm_policy_byid);
686
687#ifdef CONFIG_SECURITY_NETWORK_XFRM
688static inline int
689xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
690{
691 int dir, err = 0;
692
693 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
694 struct xfrm_policy *pol;
695 struct hlist_node *entry;
696 int i;
697
698 hlist_for_each_entry(pol, entry,
699 &net->xfrm.policy_inexact[dir], bydst) {
700 if (pol->type != type)
701 continue;
702 err = security_xfrm_policy_delete(pol->security);
703 if (err) {
704 xfrm_audit_policy_delete(pol, 0,
705 audit_info->loginuid,
706 audit_info->sessionid,
707 audit_info->secid);
708 return err;
709 }
710 }
711 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
712 hlist_for_each_entry(pol, entry,
713 net->xfrm.policy_bydst[dir].table + i,
714 bydst) {
715 if (pol->type != type)
716 continue;
717 err = security_xfrm_policy_delete(
718 pol->security);
719 if (err) {
720 xfrm_audit_policy_delete(pol, 0,
721 audit_info->loginuid,
722 audit_info->sessionid,
723 audit_info->secid);
724 return err;
725 }
726 }
727 }
728 }
729 return err;
730}
731#else
732static inline int
733xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
734{
735 return 0;
736}
737#endif
738
739int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
740{
741 int dir, err = 0, cnt = 0;
742
743 write_lock_bh(&xfrm_policy_lock);
744
745 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
746 if (err)
747 goto out;
748
749 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
750 struct xfrm_policy *pol;
751 struct hlist_node *entry;
752 int i;
753
754 again1:
755 hlist_for_each_entry(pol, entry,
756 &net->xfrm.policy_inexact[dir], bydst) {
757 if (pol->type != type)
758 continue;
759 __xfrm_policy_unlink(pol, dir);
760 write_unlock_bh(&xfrm_policy_lock);
761 cnt++;
762
763 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
764 audit_info->sessionid,
765 audit_info->secid);
766
767 xfrm_policy_kill(pol);
768
769 write_lock_bh(&xfrm_policy_lock);
770 goto again1;
771 }
772
773 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
774 again2:
775 hlist_for_each_entry(pol, entry,
776 net->xfrm.policy_bydst[dir].table + i,
777 bydst) {
778 if (pol->type != type)
779 continue;
780 __xfrm_policy_unlink(pol, dir);
781 write_unlock_bh(&xfrm_policy_lock);
782 cnt++;
783
784 xfrm_audit_policy_delete(pol, 1,
785 audit_info->loginuid,
786 audit_info->sessionid,
787 audit_info->secid);
788 xfrm_policy_kill(pol);
789
790 write_lock_bh(&xfrm_policy_lock);
791 goto again2;
792 }
793 }
794
795 }
796 if (!cnt)
797 err = -ESRCH;
798out:
799 write_unlock_bh(&xfrm_policy_lock);
800 return err;
801}
802EXPORT_SYMBOL(xfrm_policy_flush);
803
804int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
805 int (*func)(struct xfrm_policy *, int, int, void*),
806 void *data)
807{
808 struct xfrm_policy *pol;
809 struct xfrm_policy_walk_entry *x;
810 int error = 0;
811
812 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
813 walk->type != XFRM_POLICY_TYPE_ANY)
814 return -EINVAL;
815
816 if (list_empty(&walk->walk.all) && walk->seq != 0)
817 return 0;
818
819 write_lock_bh(&xfrm_policy_lock);
820 if (list_empty(&walk->walk.all))
821 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
822 else
823 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
824 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
825 if (x->dead)
826 continue;
827 pol = container_of(x, struct xfrm_policy, walk);
828 if (walk->type != XFRM_POLICY_TYPE_ANY &&
829 walk->type != pol->type)
830 continue;
831 error = func(pol, xfrm_policy_id2dir(pol->index),
832 walk->seq, data);
833 if (error) {
834 list_move_tail(&walk->walk.all, &x->all);
835 goto out;
836 }
837 walk->seq++;
838 }
839 if (walk->seq == 0) {
840 error = -ENOENT;
841 goto out;
842 }
843 list_del_init(&walk->walk.all);
844out:
845 write_unlock_bh(&xfrm_policy_lock);
846 return error;
847}
848EXPORT_SYMBOL(xfrm_policy_walk);
849
850void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
851{
852 INIT_LIST_HEAD(&walk->walk.all);
853 walk->walk.dead = 1;
854 walk->type = type;
855 walk->seq = 0;
856}
857EXPORT_SYMBOL(xfrm_policy_walk_init);
858
859void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
860{
861 if (list_empty(&walk->walk.all))
862 return;
863
864 write_lock_bh(&xfrm_policy_lock);
865 list_del(&walk->walk.all);
866 write_unlock_bh(&xfrm_policy_lock);
867}
868EXPORT_SYMBOL(xfrm_policy_walk_done);
869
870/*
871 * Find policy to apply to this flow.
872 *
873 * Returns 0 if policy found, else an -errno.
874 */
875static int xfrm_policy_match(const struct xfrm_policy *pol,
876 const struct flowi *fl,
877 u8 type, u16 family, int dir)
878{
879 const struct xfrm_selector *sel = &pol->selector;
880 int match, ret = -ESRCH;
881
882 if (pol->family != family ||
883 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
884 pol->type != type)
885 return ret;
886
887 match = xfrm_selector_match(sel, fl, family);
888 if (match)
889 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
890 dir);
891
892 return ret;
893}
894
895static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
896 const struct flowi *fl,
897 u16 family, u8 dir)
898{
899 int err;
900 struct xfrm_policy *pol, *ret;
901 const xfrm_address_t *daddr, *saddr;
902 struct hlist_node *entry;
903 struct hlist_head *chain;
904 u32 priority = ~0U;
905
906 daddr = xfrm_flowi_daddr(fl, family);
907 saddr = xfrm_flowi_saddr(fl, family);
908 if (unlikely(!daddr || !saddr))
909 return NULL;
910
911 read_lock_bh(&xfrm_policy_lock);
912 chain = policy_hash_direct(net, daddr, saddr, family, dir);
913 ret = NULL;
914 hlist_for_each_entry(pol, entry, chain, bydst) {
915 err = xfrm_policy_match(pol, fl, type, family, dir);
916 if (err) {
917 if (err == -ESRCH)
918 continue;
919 else {
920 ret = ERR_PTR(err);
921 goto fail;
922 }
923 } else {
924 ret = pol;
925 priority = ret->priority;
926 break;
927 }
928 }
929 chain = &net->xfrm.policy_inexact[dir];
930 hlist_for_each_entry(pol, entry, chain, bydst) {
931 err = xfrm_policy_match(pol, fl, type, family, dir);
932 if (err) {
933 if (err == -ESRCH)
934 continue;
935 else {
936 ret = ERR_PTR(err);
937 goto fail;
938 }
939 } else if (pol->priority < priority) {
940 ret = pol;
941 break;
942 }
943 }
944 if (ret)
945 xfrm_pol_hold(ret);
946fail:
947 read_unlock_bh(&xfrm_policy_lock);
948
949 return ret;
950}
951
952static struct xfrm_policy *
953__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
954{
955#ifdef CONFIG_XFRM_SUB_POLICY
956 struct xfrm_policy *pol;
957
958 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
959 if (pol != NULL)
960 return pol;
961#endif
962 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
963}
964
965static struct flow_cache_object *
966xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
967 u8 dir, struct flow_cache_object *old_obj, void *ctx)
968{
969 struct xfrm_policy *pol;
970
971 if (old_obj)
972 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
973
974 pol = __xfrm_policy_lookup(net, fl, family, dir);
975 if (IS_ERR_OR_NULL(pol))
976 return ERR_CAST(pol);
977
978 /* Resolver returns two references:
979 * one for cache and one for caller of flow_cache_lookup() */
980 xfrm_pol_hold(pol);
981
982 return &pol->flo;
983}
984
985static inline int policy_to_flow_dir(int dir)
986{
987 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
988 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
989 XFRM_POLICY_FWD == FLOW_DIR_FWD)
990 return dir;
991 switch (dir) {
992 default:
993 case XFRM_POLICY_IN:
994 return FLOW_DIR_IN;
995 case XFRM_POLICY_OUT:
996 return FLOW_DIR_OUT;
997 case XFRM_POLICY_FWD:
998 return FLOW_DIR_FWD;
999 }
1000}
1001
1002static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1003 const struct flowi *fl)
1004{
1005 struct xfrm_policy *pol;
1006
1007 read_lock_bh(&xfrm_policy_lock);
1008 if ((pol = sk->sk_policy[dir]) != NULL) {
1009 int match = xfrm_selector_match(&pol->selector, fl,
1010 sk->sk_family);
1011 int err = 0;
1012
1013 if (match) {
1014 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1015 pol = NULL;
1016 goto out;
1017 }
1018 err = security_xfrm_policy_lookup(pol->security,
1019 fl->flowi_secid,
1020 policy_to_flow_dir(dir));
1021 if (!err)
1022 xfrm_pol_hold(pol);
1023 else if (err == -ESRCH)
1024 pol = NULL;
1025 else
1026 pol = ERR_PTR(err);
1027 } else
1028 pol = NULL;
1029 }
1030out:
1031 read_unlock_bh(&xfrm_policy_lock);
1032 return pol;
1033}
1034
1035static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1036{
1037 struct net *net = xp_net(pol);
1038 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1039 pol->family, dir);
1040
1041 list_add(&pol->walk.all, &net->xfrm.policy_all);
1042 hlist_add_head(&pol->bydst, chain);
1043 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1044 net->xfrm.policy_count[dir]++;
1045 xfrm_pol_hold(pol);
1046
1047 if (xfrm_bydst_should_resize(net, dir, NULL))
1048 schedule_work(&net->xfrm.policy_hash_work);
1049}
1050
1051static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1052 int dir)
1053{
1054 struct net *net = xp_net(pol);
1055
1056 if (hlist_unhashed(&pol->bydst))
1057 return NULL;
1058
1059 hlist_del(&pol->bydst);
1060 hlist_del(&pol->byidx);
1061 list_del(&pol->walk.all);
1062 net->xfrm.policy_count[dir]--;
1063
1064 return pol;
1065}
1066
1067int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1068{
1069 write_lock_bh(&xfrm_policy_lock);
1070 pol = __xfrm_policy_unlink(pol, dir);
1071 write_unlock_bh(&xfrm_policy_lock);
1072 if (pol) {
1073 xfrm_policy_kill(pol);
1074 return 0;
1075 }
1076 return -ENOENT;
1077}
1078EXPORT_SYMBOL(xfrm_policy_delete);
1079
1080int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1081{
1082 struct net *net = xp_net(pol);
1083 struct xfrm_policy *old_pol;
1084
1085#ifdef CONFIG_XFRM_SUB_POLICY
1086 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1087 return -EINVAL;
1088#endif
1089
1090 write_lock_bh(&xfrm_policy_lock);
1091 old_pol = sk->sk_policy[dir];
1092 sk->sk_policy[dir] = pol;
1093 if (pol) {
1094 pol->curlft.add_time = get_seconds();
1095 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1096 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1097 }
1098 if (old_pol)
1099 /* Unlinking succeeds always. This is the only function
1100 * allowed to delete or replace socket policy.
1101 */
1102 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1103 write_unlock_bh(&xfrm_policy_lock);
1104
1105 if (old_pol) {
1106 xfrm_policy_kill(old_pol);
1107 }
1108 return 0;
1109}
1110
1111static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1112{
1113 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1114
1115 if (newp) {
1116 newp->selector = old->selector;
1117 if (security_xfrm_policy_clone(old->security,
1118 &newp->security)) {
1119 kfree(newp);
1120 return NULL; /* ENOMEM */
1121 }
1122 newp->lft = old->lft;
1123 newp->curlft = old->curlft;
1124 newp->mark = old->mark;
1125 newp->action = old->action;
1126 newp->flags = old->flags;
1127 newp->xfrm_nr = old->xfrm_nr;
1128 newp->index = old->index;
1129 newp->type = old->type;
1130 memcpy(newp->xfrm_vec, old->xfrm_vec,
1131 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1132 write_lock_bh(&xfrm_policy_lock);
1133 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1134 write_unlock_bh(&xfrm_policy_lock);
1135 xfrm_pol_put(newp);
1136 }
1137 return newp;
1138}
1139
1140int __xfrm_sk_clone_policy(struct sock *sk)
1141{
1142 struct xfrm_policy *p0 = sk->sk_policy[0],
1143 *p1 = sk->sk_policy[1];
1144
1145 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1146 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1147 return -ENOMEM;
1148 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1149 return -ENOMEM;
1150 return 0;
1151}
1152
1153static int
1154xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1155 unsigned short family)
1156{
1157 int err;
1158 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1159
1160 if (unlikely(afinfo == NULL))
1161 return -EINVAL;
1162 err = afinfo->get_saddr(net, local, remote);
1163 xfrm_policy_put_afinfo(afinfo);
1164 return err;
1165}
1166
1167/* Resolve list of templates for the flow, given policy. */
1168
1169static int
1170xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1171 struct xfrm_state **xfrm, unsigned short family)
1172{
1173 struct net *net = xp_net(policy);
1174 int nx;
1175 int i, error;
1176 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1177 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1178 xfrm_address_t tmp;
1179
1180 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1181 struct xfrm_state *x;
1182 xfrm_address_t *remote = daddr;
1183 xfrm_address_t *local = saddr;
1184 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1185
1186 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1187 tmpl->mode == XFRM_MODE_BEET) {
1188 remote = &tmpl->id.daddr;
1189 local = &tmpl->saddr;
1190 if (xfrm_addr_any(local, tmpl->encap_family)) {
1191 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1192 if (error)
1193 goto fail;
1194 local = &tmp;
1195 }
1196 }
1197
1198 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1199
1200 if (x && x->km.state == XFRM_STATE_VALID) {
1201 xfrm[nx++] = x;
1202 daddr = remote;
1203 saddr = local;
1204 continue;
1205 }
1206 if (x) {
1207 error = (x->km.state == XFRM_STATE_ERROR ?
1208 -EINVAL : -EAGAIN);
1209 xfrm_state_put(x);
1210 }
1211 else if (error == -ESRCH)
1212 error = -EAGAIN;
1213
1214 if (!tmpl->optional)
1215 goto fail;
1216 }
1217 return nx;
1218
1219fail:
1220 for (nx--; nx>=0; nx--)
1221 xfrm_state_put(xfrm[nx]);
1222 return error;
1223}
1224
1225static int
1226xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1227 struct xfrm_state **xfrm, unsigned short family)
1228{
1229 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1230 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1231 int cnx = 0;
1232 int error;
1233 int ret;
1234 int i;
1235
1236 for (i = 0; i < npols; i++) {
1237 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1238 error = -ENOBUFS;
1239 goto fail;
1240 }
1241
1242 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1243 if (ret < 0) {
1244 error = ret;
1245 goto fail;
1246 } else
1247 cnx += ret;
1248 }
1249
1250 /* found states are sorted for outbound processing */
1251 if (npols > 1)
1252 xfrm_state_sort(xfrm, tpp, cnx, family);
1253
1254 return cnx;
1255
1256 fail:
1257 for (cnx--; cnx>=0; cnx--)
1258 xfrm_state_put(tpp[cnx]);
1259 return error;
1260
1261}
1262
1263/* Check that the bundle accepts the flow and its components are
1264 * still valid.
1265 */
1266
1267static inline int xfrm_get_tos(const struct flowi *fl, int family)
1268{
1269 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1270 int tos;
1271
1272 if (!afinfo)
1273 return -EINVAL;
1274
1275 tos = afinfo->get_tos(fl);
1276
1277 xfrm_policy_put_afinfo(afinfo);
1278
1279 return tos;
1280}
1281
1282static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1283{
1284 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1285 struct dst_entry *dst = &xdst->u.dst;
1286
1287 if (xdst->route == NULL) {
1288 /* Dummy bundle - if it has xfrms we were not
1289 * able to build bundle as template resolution failed.
1290 * It means we need to try again resolving. */
1291 if (xdst->num_xfrms > 0)
1292 return NULL;
1293 } else {
1294 /* Real bundle */
1295 if (stale_bundle(dst))
1296 return NULL;
1297 }
1298
1299 dst_hold(dst);
1300 return flo;
1301}
1302
1303static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1304{
1305 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1306 struct dst_entry *dst = &xdst->u.dst;
1307
1308 if (!xdst->route)
1309 return 0;
1310 if (stale_bundle(dst))
1311 return 0;
1312
1313 return 1;
1314}
1315
1316static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1317{
1318 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1319 struct dst_entry *dst = &xdst->u.dst;
1320
1321 dst_free(dst);
1322}
1323
1324static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1325 .get = xfrm_bundle_flo_get,
1326 .check = xfrm_bundle_flo_check,
1327 .delete = xfrm_bundle_flo_delete,
1328};
1329
1330static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1331{
1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1333 struct dst_ops *dst_ops;
1334 struct xfrm_dst *xdst;
1335
1336 if (!afinfo)
1337 return ERR_PTR(-EINVAL);
1338
1339 switch (family) {
1340 case AF_INET:
1341 dst_ops = &net->xfrm.xfrm4_dst_ops;
1342 break;
1343#if IS_ENABLED(CONFIG_IPV6)
1344 case AF_INET6:
1345 dst_ops = &net->xfrm.xfrm6_dst_ops;
1346 break;
1347#endif
1348 default:
1349 BUG();
1350 }
1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352
1353 if (likely(xdst)) {
1354 memset(&xdst->u.rt6.rt6i_table, 0,
1355 sizeof(*xdst) - sizeof(struct dst_entry));
1356 xdst->flo.ops = &xfrm_bundle_fc_ops;
1357 } else
1358 xdst = ERR_PTR(-ENOBUFS);
1359
1360 xfrm_policy_put_afinfo(afinfo);
1361
1362 return xdst;
1363}
1364
1365static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1366 int nfheader_len)
1367{
1368 struct xfrm_policy_afinfo *afinfo =
1369 xfrm_policy_get_afinfo(dst->ops->family);
1370 int err;
1371
1372 if (!afinfo)
1373 return -EINVAL;
1374
1375 err = afinfo->init_path(path, dst, nfheader_len);
1376
1377 xfrm_policy_put_afinfo(afinfo);
1378
1379 return err;
1380}
1381
1382static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1383 const struct flowi *fl)
1384{
1385 struct xfrm_policy_afinfo *afinfo =
1386 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1387 int err;
1388
1389 if (!afinfo)
1390 return -EINVAL;
1391
1392 err = afinfo->fill_dst(xdst, dev, fl);
1393
1394 xfrm_policy_put_afinfo(afinfo);
1395
1396 return err;
1397}
1398
1399
1400/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1401 * all the metrics... Shortly, bundle a bundle.
1402 */
1403
1404static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1405 struct xfrm_state **xfrm, int nx,
1406 const struct flowi *fl,
1407 struct dst_entry *dst)
1408{
1409 struct net *net = xp_net(policy);
1410 unsigned long now = jiffies;
1411 struct net_device *dev;
1412 struct xfrm_mode *inner_mode;
1413 struct dst_entry *dst_prev = NULL;
1414 struct dst_entry *dst0 = NULL;
1415 int i = 0;
1416 int err;
1417 int header_len = 0;
1418 int nfheader_len = 0;
1419 int trailer_len = 0;
1420 int tos;
1421 int family = policy->selector.family;
1422 xfrm_address_t saddr, daddr;
1423
1424 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1425
1426 tos = xfrm_get_tos(fl, family);
1427 err = tos;
1428 if (tos < 0)
1429 goto put_states;
1430
1431 dst_hold(dst);
1432
1433 for (; i < nx; i++) {
1434 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1435 struct dst_entry *dst1 = &xdst->u.dst;
1436
1437 err = PTR_ERR(xdst);
1438 if (IS_ERR(xdst)) {
1439 dst_release(dst);
1440 goto put_states;
1441 }
1442
1443 if (xfrm[i]->sel.family == AF_UNSPEC) {
1444 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1445 xfrm_af2proto(family));
1446 if (!inner_mode) {
1447 err = -EAFNOSUPPORT;
1448 dst_release(dst);
1449 goto put_states;
1450 }
1451 } else
1452 inner_mode = xfrm[i]->inner_mode;
1453
1454 if (!dst_prev)
1455 dst0 = dst1;
1456 else {
1457 dst_prev->child = dst_clone(dst1);
1458 dst1->flags |= DST_NOHASH;
1459 }
1460
1461 xdst->route = dst;
1462 dst_copy_metrics(dst1, dst);
1463
1464 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1465 family = xfrm[i]->props.family;
1466 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1467 family);
1468 err = PTR_ERR(dst);
1469 if (IS_ERR(dst))
1470 goto put_states;
1471 } else
1472 dst_hold(dst);
1473
1474 dst1->xfrm = xfrm[i];
1475 xdst->xfrm_genid = xfrm[i]->genid;
1476
1477 dst1->obsolete = -1;
1478 dst1->flags |= DST_HOST;
1479 dst1->lastuse = now;
1480
1481 dst1->input = dst_discard;
1482 dst1->output = inner_mode->afinfo->output;
1483
1484 dst1->next = dst_prev;
1485 dst_prev = dst1;
1486
1487 header_len += xfrm[i]->props.header_len;
1488 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1489 nfheader_len += xfrm[i]->props.header_len;
1490 trailer_len += xfrm[i]->props.trailer_len;
1491 }
1492
1493 dst_prev->child = dst;
1494 dst0->path = dst;
1495
1496 err = -ENODEV;
1497 dev = dst->dev;
1498 if (!dev)
1499 goto free_dst;
1500
1501 /* Copy neighbour for reachability confirmation */
1502 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
1503
1504 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1505 xfrm_init_pmtu(dst_prev);
1506
1507 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1508 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1509
1510 err = xfrm_fill_dst(xdst, dev, fl);
1511 if (err)
1512 goto free_dst;
1513
1514 dst_prev->header_len = header_len;
1515 dst_prev->trailer_len = trailer_len;
1516 header_len -= xdst->u.dst.xfrm->props.header_len;
1517 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1518 }
1519
1520out:
1521 return dst0;
1522
1523put_states:
1524 for (; i < nx; i++)
1525 xfrm_state_put(xfrm[i]);
1526free_dst:
1527 if (dst0)
1528 dst_free(dst0);
1529 dst0 = ERR_PTR(err);
1530 goto out;
1531}
1532
1533static int inline
1534xfrm_dst_alloc_copy(void **target, const void *src, int size)
1535{
1536 if (!*target) {
1537 *target = kmalloc(size, GFP_ATOMIC);
1538 if (!*target)
1539 return -ENOMEM;
1540 }
1541 memcpy(*target, src, size);
1542 return 0;
1543}
1544
1545static int inline
1546xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1547{
1548#ifdef CONFIG_XFRM_SUB_POLICY
1549 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1550 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1551 sel, sizeof(*sel));
1552#else
1553 return 0;
1554#endif
1555}
1556
1557static int inline
1558xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1559{
1560#ifdef CONFIG_XFRM_SUB_POLICY
1561 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1562 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1563#else
1564 return 0;
1565#endif
1566}
1567
1568static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1569 struct xfrm_policy **pols,
1570 int *num_pols, int *num_xfrms)
1571{
1572 int i;
1573
1574 if (*num_pols == 0 || !pols[0]) {
1575 *num_pols = 0;
1576 *num_xfrms = 0;
1577 return 0;
1578 }
1579 if (IS_ERR(pols[0])) {
1580 *num_pols = 0;//CVE-2022-36879
1581 return PTR_ERR(pols[0]);
1582 }
1583 *num_xfrms = pols[0]->xfrm_nr;
1584
1585#ifdef CONFIG_XFRM_SUB_POLICY
1586 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1587 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1588 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1589 XFRM_POLICY_TYPE_MAIN,
1590 fl, family,
1591 XFRM_POLICY_OUT);
1592 if (pols[1]) {
1593 if (IS_ERR(pols[1])) {
1594 xfrm_pols_put(pols, *num_pols);
1595 *num_pols = 0;//CVE-2022-36879
1596 return PTR_ERR(pols[1]);
1597 }
1598 (*num_pols) ++;
1599 (*num_xfrms) += pols[1]->xfrm_nr;
1600 }
1601 }
1602#endif
1603 for (i = 0; i < *num_pols; i++) {
1604 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1605 *num_xfrms = -1;
1606 break;
1607 }
1608 }
1609
1610 return 0;
1611
1612}
1613
1614static struct xfrm_dst *
1615xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1616 const struct flowi *fl, u16 family,
1617 struct dst_entry *dst_orig)
1618{
1619 struct net *net = xp_net(pols[0]);
1620 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1621 struct dst_entry *dst;
1622 struct xfrm_dst *xdst;
1623 int err;
1624
1625 /* Try to instantiate a bundle */
1626 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1627 if (err <= 0) {
1628 if (err != 0 && err != -EAGAIN)
1629 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1630 return ERR_PTR(err);
1631 }
1632
1633 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1634 if (IS_ERR(dst)) {
1635 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1636 return ERR_CAST(dst);
1637 }
1638
1639 xdst = (struct xfrm_dst *)dst;
1640 xdst->num_xfrms = err;
1641 if (num_pols > 1)
1642 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1643 else
1644 err = xfrm_dst_update_origin(dst, fl);
1645 if (unlikely(err)) {
1646 dst_free(dst);
1647 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1648 return ERR_PTR(err);
1649 }
1650
1651 xdst->num_pols = num_pols;
1652 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1653 xdst->policy_genid = atomic_read(&pols[0]->genid);
1654
1655 return xdst;
1656}
1657
1658static struct flow_cache_object *
1659xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1660 struct flow_cache_object *oldflo, void *ctx)
1661{
1662 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1663 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1664 struct xfrm_dst *xdst, *new_xdst;
1665 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1666
1667 /* Check if the policies from old bundle are usable */
1668 xdst = NULL;
1669 if (oldflo) {
1670 xdst = container_of(oldflo, struct xfrm_dst, flo);
1671 num_pols = xdst->num_pols;
1672 num_xfrms = xdst->num_xfrms;
1673 pol_dead = 0;
1674 for (i = 0; i < num_pols; i++) {
1675 pols[i] = xdst->pols[i];
1676 pol_dead |= pols[i]->walk.dead;
1677 }
1678 if (pol_dead) {
1679 dst_free(&xdst->u.dst);
1680 xdst = NULL;
1681 num_pols = 0;
1682 num_xfrms = 0;
1683 oldflo = NULL;
1684 }
1685 }
1686
1687 /* Resolve policies to use if we couldn't get them from
1688 * previous cache entry */
1689 if (xdst == NULL) {
1690 num_pols = 1;
1691 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1692 err = xfrm_expand_policies(fl, family, pols,
1693 &num_pols, &num_xfrms);
1694 if (err < 0)
1695 goto inc_error;
1696 if (num_pols == 0)
1697 return NULL;
1698 if (num_xfrms <= 0)
1699 goto make_dummy_bundle;
1700 }
1701
1702 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1703 if (IS_ERR(new_xdst)) {
1704 err = PTR_ERR(new_xdst);
1705 if (err != -EAGAIN)
1706 goto error;
1707 if (oldflo == NULL)
1708 goto make_dummy_bundle;
1709 dst_hold(&xdst->u.dst);
1710 return oldflo;
1711 } else if (new_xdst == NULL) {
1712 num_xfrms = 0;
1713 if (oldflo == NULL)
1714 goto make_dummy_bundle;
1715 xdst->num_xfrms = 0;
1716 dst_hold(&xdst->u.dst);
1717 return oldflo;
1718 }
1719
1720 /* Kill the previous bundle */
1721 if (xdst) {
1722 /* The policies were stolen for newly generated bundle */
1723 xdst->num_pols = 0;
1724 dst_free(&xdst->u.dst);
1725 }
1726
1727 /* Flow cache does not have reference, it dst_free()'s,
1728 * but we do need to return one reference for original caller */
1729 dst_hold(&new_xdst->u.dst);
1730 return &new_xdst->flo;
1731
1732make_dummy_bundle:
1733 /* We found policies, but there's no bundles to instantiate:
1734 * either because the policy blocks, has no transformations or
1735 * we could not build template (no xfrm_states).*/
1736 xdst = xfrm_alloc_dst(net, family);
1737 if (IS_ERR(xdst)) {
1738 xfrm_pols_put(pols, num_pols);
1739 return ERR_CAST(xdst);
1740 }
1741 xdst->num_pols = num_pols;
1742 xdst->num_xfrms = num_xfrms;
1743 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1744
1745 dst_hold(&xdst->u.dst);
1746 return &xdst->flo;
1747
1748inc_error:
1749 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1750error:
1751 if (xdst != NULL)
1752 dst_free(&xdst->u.dst);
1753 else
1754 xfrm_pols_put(pols, num_pols);
1755 return ERR_PTR(err);
1756}
1757
1758static struct dst_entry *make_blackhole(struct net *net, u16 family,
1759 struct dst_entry *dst_orig)
1760{
1761 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1762 struct dst_entry *ret;
1763
1764 if (!afinfo) {
1765 dst_release(dst_orig);
1766 return ERR_PTR(-EINVAL);
1767 } else {
1768 ret = afinfo->blackhole_route(net, dst_orig);
1769 }
1770 xfrm_policy_put_afinfo(afinfo);
1771
1772 return ret;
1773}
1774
1775/* Main function: finds/creates a bundle for given flow.
1776 *
1777 * At the moment we eat a raw IP route. Mostly to speed up lookups
1778 * on interfaces with disabled IPsec.
1779 */
1780struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
1781 const struct flowi *fl,
1782 struct sock *sk, int flags)
1783{
1784 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1785 struct flow_cache_object *flo;
1786 struct xfrm_dst *xdst;
1787 struct dst_entry *dst, *route;
1788 u16 family = dst_orig->ops->family;
1789 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1790 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
1791
1792restart:
1793 dst = NULL;
1794 xdst = NULL;
1795 route = NULL;
1796
1797 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1798 num_pols = 1;
1799 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1800 err = xfrm_expand_policies(fl, family, pols,
1801 &num_pols, &num_xfrms);
1802 if (err < 0)
1803 goto dropdst;
1804
1805 if (num_pols) {
1806 if (num_xfrms <= 0) {
1807 drop_pols = num_pols;
1808 goto no_transform;
1809 }
1810
1811 xdst = xfrm_resolve_and_create_bundle(
1812 pols, num_pols, fl,
1813 family, dst_orig);
1814 if (IS_ERR(xdst)) {
1815 xfrm_pols_put(pols, num_pols);
1816 err = PTR_ERR(xdst);
1817 goto dropdst;
1818 } else if (xdst == NULL) {
1819 num_xfrms = 0;
1820 drop_pols = num_pols;
1821 goto no_transform;
1822 }
1823
1824 dst_hold(&xdst->u.dst);
1825
1826 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1827 xdst->u.dst.next = xfrm_policy_sk_bundles;
1828 xfrm_policy_sk_bundles = &xdst->u.dst;
1829 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1830
1831 route = xdst->route;
1832 }
1833 }
1834
1835 if (xdst == NULL) {
1836 /* To accelerate a bit... */
1837 if ((dst_orig->flags & DST_NOXFRM) ||
1838 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1839 goto nopol;
1840
1841 flo = flow_cache_lookup(net, fl, family, dir,
1842 xfrm_bundle_lookup, dst_orig);
1843 if (flo == NULL)
1844 goto nopol;
1845 if (IS_ERR(flo)) {
1846 err = PTR_ERR(flo);
1847 goto dropdst;
1848 }
1849 xdst = container_of(flo, struct xfrm_dst, flo);
1850
1851 num_pols = xdst->num_pols;
1852 num_xfrms = xdst->num_xfrms;
1853 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
1854 route = xdst->route;
1855 }
1856
1857 dst = &xdst->u.dst;
1858 if (route == NULL && num_xfrms > 0) {
1859 /* The only case when xfrm_bundle_lookup() returns a
1860 * bundle with null route, is when the template could
1861 * not be resolved. It means policies are there, but
1862 * bundle could not be created, since we don't yet
1863 * have the xfrm_state's. We need to wait for KM to
1864 * negotiate new SA's or bail out with error.*/
1865 if (net->xfrm.sysctl_larval_drop) {
1866 /* EREMOTE tells the caller to generate
1867 * a one-shot blackhole route. */
1868 dst_release(dst);
1869 xfrm_pols_put(pols, drop_pols);
1870 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1871
1872 return make_blackhole(net, family, dst_orig);
1873 }
1874 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
1875 DECLARE_WAITQUEUE(wait, current);
1876
1877 add_wait_queue(&net->xfrm.km_waitq, &wait);
1878 set_current_state(TASK_INTERRUPTIBLE);
1879 schedule();
1880 set_current_state(TASK_RUNNING);
1881 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1882
1883 if (!signal_pending(current)) {
1884 dst_release(dst);
1885 goto restart;
1886 }
1887
1888 err = -ERESTART;
1889 } else
1890 err = -EAGAIN;
1891
1892 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1893 goto error;
1894 }
1895
1896no_transform:
1897 if (num_pols == 0)
1898 goto nopol;
1899
1900 if ((flags & XFRM_LOOKUP_ICMP) &&
1901 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
1902 err = -ENOENT;
1903 goto error;
1904 }
1905
1906 for (i = 0; i < num_pols; i++)
1907 pols[i]->curlft.use_time = get_seconds();
1908
1909 if (num_xfrms < 0) {
1910 /* Prohibit the flow */
1911 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1912 err = -EPERM;
1913 goto error;
1914 } else if (num_xfrms > 0) {
1915 /* Flow transformed */
1916 dst_release(dst_orig);
1917 } else {
1918 /* Flow passes untransformed */
1919 dst_release(dst);
1920 dst = dst_orig;
1921 }
1922ok:
1923 xfrm_pols_put(pols, drop_pols);
1924 if (dst && dst->xfrm &&
1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
1926 dst->flags |= DST_XFRM_TUNNEL;
1927 return dst;
1928
1929nopol:
1930 if (!(flags & XFRM_LOOKUP_ICMP)) {
1931 dst = dst_orig;
1932 goto ok;
1933 }
1934 err = -ENOENT;
1935error:
1936 dst_release(dst);
1937dropdst:
1938 dst_release(dst_orig);
1939 xfrm_pols_put(pols, drop_pols);
1940 return ERR_PTR(err);
1941}
1942EXPORT_SYMBOL(xfrm_lookup);
1943
1944static inline int
1945xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
1946{
1947 struct xfrm_state *x;
1948
1949 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1950 return 0;
1951 x = skb->sp->xvec[idx];
1952 if (!x->type->reject)
1953 return 0;
1954 return x->type->reject(x, skb, fl);
1955}
1956
1957/* When skb is transformed back to its "native" form, we have to
1958 * check policy restrictions. At the moment we make this in maximally
1959 * stupid way. Shame on me. :-) Of course, connected sockets must
1960 * have policy cached at them.
1961 */
1962
1963static inline int
1964xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
1965 unsigned short family)
1966{
1967 if (xfrm_state_kern(x))
1968 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1969 return x->id.proto == tmpl->id.proto &&
1970 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1971 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1972 x->props.mode == tmpl->mode &&
1973 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1974 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1975 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1976 xfrm_state_addr_cmp(tmpl, x, family));
1977}
1978
1979/*
1980 * 0 or more than 0 is returned when validation is succeeded (either bypass
1981 * because of optional transport mode, or next index of the mathced secpath
1982 * state with the template.
1983 * -1 is returned when no matching template is found.
1984 * Otherwise "-2 - errored_index" is returned.
1985 */
1986static inline int
1987xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
1988 unsigned short family)
1989{
1990 int idx = start;
1991
1992 if (tmpl->optional) {
1993 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1994 return start;
1995 } else
1996 start = -1;
1997 for (; idx < sp->len; idx++) {
1998 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1999 return ++idx;
2000 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2001 if (start == -1)
2002 start = -2-idx;
2003 break;
2004 }
2005 }
2006 return start;
2007}
2008
2009int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2010 unsigned int family, int reverse)
2011{
2012 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2013 int err;
2014
2015 if (unlikely(afinfo == NULL))
2016 return -EAFNOSUPPORT;
2017
2018 afinfo->decode_session(skb, fl, reverse);
2019 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2020 xfrm_policy_put_afinfo(afinfo);
2021 return err;
2022}
2023EXPORT_SYMBOL(__xfrm_decode_session);
2024
2025static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2026{
2027 for (; k < sp->len; k++) {
2028 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2029 *idxp = k;
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035}
2036
2037int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2038 unsigned short family)
2039{
2040 struct net *net = dev_net(skb->dev);
2041 struct xfrm_policy *pol;
2042 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2043 int npols = 0;
2044 int xfrm_nr;
2045 int pi;
2046 int reverse;
2047 struct flowi fl;
2048 u8 fl_dir;
2049 int xerr_idx = -1;
2050
2051 reverse = dir & ~XFRM_POLICY_MASK;
2052 dir &= XFRM_POLICY_MASK;
2053 fl_dir = policy_to_flow_dir(dir);
2054
2055 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2056 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2057 return 0;
2058 }
2059
2060 nf_nat_decode_session(skb, &fl, family);
2061
2062 /* First, check used SA against their selectors. */
2063 if (skb->sp) {
2064 int i;
2065
2066 for (i=skb->sp->len-1; i>=0; i--) {
2067 struct xfrm_state *x = skb->sp->xvec[i];
2068 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2069 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2070 return 0;
2071 }
2072 }
2073 }
2074
2075 pol = NULL;
2076 if (sk && sk->sk_policy[dir]) {
2077 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2078 if (IS_ERR(pol)) {
2079 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2080 return 0;
2081 }
2082 }
2083
2084 if (!pol) {
2085 struct flow_cache_object *flo;
2086
2087 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2088 xfrm_policy_lookup, NULL);
2089 if (IS_ERR_OR_NULL(flo))
2090 pol = ERR_CAST(flo);
2091 else
2092 pol = container_of(flo, struct xfrm_policy, flo);
2093 }
2094
2095 if (IS_ERR(pol)) {
2096 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2097 return 0;
2098 }
2099
2100 if (!pol) {
2101 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2102 xfrm_secpath_reject(xerr_idx, skb, &fl);
2103 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2104 return 0;
2105 }
2106 return 1;
2107 }
2108
2109 pol->curlft.use_time = get_seconds();
2110
2111 pols[0] = pol;
2112 npols ++;
2113#ifdef CONFIG_XFRM_SUB_POLICY
2114 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2115 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2116 &fl, family,
2117 XFRM_POLICY_IN);
2118 if (pols[1]) {
2119 if (IS_ERR(pols[1])) {
2120 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2121 return 0;
2122 }
2123 pols[1]->curlft.use_time = get_seconds();
2124 npols ++;
2125 }
2126 }
2127#endif
2128
2129 if (pol->action == XFRM_POLICY_ALLOW) {
2130 struct sec_path *sp;
2131 static struct sec_path dummy;
2132 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2133 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2134 struct xfrm_tmpl **tpp = tp;
2135 int ti = 0;
2136 int i, k;
2137
2138 if ((sp = skb->sp) == NULL)
2139 sp = &dummy;
2140
2141 for (pi = 0; pi < npols; pi++) {
2142 if (pols[pi] != pol &&
2143 pols[pi]->action != XFRM_POLICY_ALLOW) {
2144 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2145 goto reject;
2146 }
2147 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2148 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2149 goto reject_error;
2150 }
2151 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2152 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2153 }
2154 xfrm_nr = ti;
2155 if (npols > 1) {
2156 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2157 tpp = stp;
2158 }
2159
2160 /* For each tunnel xfrm, find the first matching tmpl.
2161 * For each tmpl before that, find corresponding xfrm.
2162 * Order is _important_. Later we will implement
2163 * some barriers, but at the moment barriers
2164 * are implied between each two transformations.
2165 */
2166 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2167 k = xfrm_policy_ok(tpp[i], sp, k, family);
2168 if (k < 0) {
2169 if (k < -1)
2170 /* "-2 - errored_index" returned */
2171 xerr_idx = -(2+k);
2172 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2173 goto reject;
2174 }
2175 }
2176
2177 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2179 goto reject;
2180 }
2181
2182 xfrm_pols_put(pols, npols);
2183 return 1;
2184 }
2185 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2186
2187reject:
2188 xfrm_secpath_reject(xerr_idx, skb, &fl);
2189reject_error:
2190 xfrm_pols_put(pols, npols);
2191 return 0;
2192}
2193EXPORT_SYMBOL(__xfrm_policy_check);
2194
2195int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2196{
2197 struct net *net = dev_net(skb->dev);
2198 struct flowi fl;
2199 struct dst_entry *dst;
2200 int res = 1;
2201
2202 if (xfrm_decode_session(skb, &fl, family) < 0) {
2203 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2204 return 0;
2205 }
2206
2207 skb_dst_force(skb);
2208
2209 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2210 if (IS_ERR(dst)) {
2211 res = 0;
2212 dst = NULL;
2213 }
2214 skb_dst_set(skb, dst);
2215 return res;
2216}
2217EXPORT_SYMBOL(__xfrm_route_forward);
2218
2219/* Optimize later using cookies and generation ids. */
2220
2221static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2222{
2223 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2224 * to "-1" to force all XFRM destinations to get validated by
2225 * dst_ops->check on every use. We do this because when a
2226 * normal route referenced by an XFRM dst is obsoleted we do
2227 * not go looking around for all parent referencing XFRM dsts
2228 * so that we can invalidate them. It is just too much work.
2229 * Instead we make the checks here on every use. For example:
2230 *
2231 * XFRM dst A --> IPv4 dst X
2232 *
2233 * X is the "xdst->route" of A (X is also the "dst->path" of A
2234 * in this example). If X is marked obsolete, "A" will not
2235 * notice. That's what we are validating here via the
2236 * stale_bundle() check.
2237 *
2238 * When a policy's bundle is pruned, we dst_free() the XFRM
2239 * dst which causes it's ->obsolete field to be set to a
2240 * positive non-zero integer. If an XFRM dst has been pruned
2241 * like this, we want to force a new route lookup.
2242 */
2243 if (dst->obsolete < 0 && !stale_bundle(dst))
2244 return dst;
2245
2246 return NULL;
2247}
2248
2249static int stale_bundle(struct dst_entry *dst)
2250{
2251 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2252}
2253
2254void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2255{
2256 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2257 dst->dev = dev_net(dev)->loopback_dev;
2258 dev_hold(dst->dev);
2259 dev_put(dev);
2260 }
2261}
2262EXPORT_SYMBOL(xfrm_dst_ifdown);
2263
2264static void xfrm_link_failure(struct sk_buff *skb)
2265{
2266 /* Impossible. Such dst must be popped before reaches point of failure. */
2267}
2268
2269static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2270{
2271 if (dst) {
2272 if (dst->obsolete) {
2273 dst_release(dst);
2274 dst = NULL;
2275 }
2276 }
2277 return dst;
2278}
2279
2280static void __xfrm_garbage_collect(struct net *net)
2281{
2282 struct dst_entry *head, *next;
2283
2284 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2285 head = xfrm_policy_sk_bundles;
2286 xfrm_policy_sk_bundles = NULL;
2287 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2288
2289 while (head) {
2290 next = head->next;
2291 dst_free(head);
2292 head = next;
2293 }
2294}
2295
2296static void xfrm_garbage_collect(struct net *net)
2297{
2298 flow_cache_flush();
2299 __xfrm_garbage_collect(net);
2300}
2301
2302static void xfrm_garbage_collect_deferred(struct net *net)
2303{
2304 flow_cache_flush_deferred();
2305 __xfrm_garbage_collect(net);
2306}
2307
2308static void xfrm_init_pmtu(struct dst_entry *dst)
2309{
2310 do {
2311 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2312 u32 pmtu, route_mtu_cached;
2313
2314 pmtu = dst_mtu(dst->child);
2315 xdst->child_mtu_cached = pmtu;
2316
2317 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2318
2319 route_mtu_cached = dst_mtu(xdst->route);
2320 xdst->route_mtu_cached = route_mtu_cached;
2321
2322 if (pmtu > route_mtu_cached)
2323 pmtu = route_mtu_cached;
2324
2325 dst_metric_set(dst, RTAX_MTU, pmtu);
2326 } while ((dst = dst->next));
2327}
2328
2329/* Check that the bundle accepts the flow and its components are
2330 * still valid.
2331 */
2332
2333static int xfrm_bundle_ok(struct xfrm_dst *first)
2334{
2335 struct dst_entry *dst = &first->u.dst;
2336 struct xfrm_dst *last;
2337 u32 mtu;
2338
2339 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2340 (dst->dev && !netif_running(dst->dev)))
2341 return 0;
2342
2343 last = NULL;
2344
2345 do {
2346 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2347
2348 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2349 return 0;
2350 if (xdst->xfrm_genid != dst->xfrm->genid)
2351 return 0;
2352 if (xdst->num_pols > 0 &&
2353 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2354 return 0;
2355
2356 mtu = dst_mtu(dst->child);
2357 if (xdst->child_mtu_cached != mtu) {
2358 last = xdst;
2359 xdst->child_mtu_cached = mtu;
2360 }
2361
2362 if (!dst_check(xdst->route, xdst->route_cookie))
2363 return 0;
2364 mtu = dst_mtu(xdst->route);
2365 if (xdst->route_mtu_cached != mtu) {
2366 last = xdst;
2367 xdst->route_mtu_cached = mtu;
2368 }
2369
2370 dst = dst->child;
2371 } while (dst->xfrm);
2372
2373 if (likely(!last))
2374 return 1;
2375
2376 mtu = last->child_mtu_cached;
2377 for (;;) {
2378 dst = &last->u.dst;
2379
2380 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2381 if (mtu > last->route_mtu_cached)
2382 mtu = last->route_mtu_cached;
2383 dst_metric_set(dst, RTAX_MTU, mtu);
2384
2385 if (last == first)
2386 break;
2387
2388 last = (struct xfrm_dst *)last->u.dst.next;
2389 last->child_mtu_cached = mtu;
2390 }
2391
2392 return 1;
2393}
2394
2395static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2396{
2397 return dst_metric_advmss(dst->path);
2398}
2399
2400static unsigned int xfrm_mtu(const struct dst_entry *dst)
2401{
2402 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2403
2404 return mtu ? : dst_mtu(dst->path);
2405}
2406
2407static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
2408{
2409 return dst_neigh_lookup(dst->path, daddr);
2410}
2411
2412int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2413{
2414 struct net *net;
2415 int err = 0;
2416 if (unlikely(afinfo == NULL))
2417 return -EINVAL;
2418 if (unlikely(afinfo->family >= NPROTO))
2419 return -EAFNOSUPPORT;
2420 write_lock_bh(&xfrm_policy_afinfo_lock);
2421 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2422 err = -ENOBUFS;
2423 else {
2424 struct dst_ops *dst_ops = afinfo->dst_ops;
2425 if (likely(dst_ops->kmem_cachep == NULL))
2426 dst_ops->kmem_cachep = xfrm_dst_cache;
2427 if (likely(dst_ops->check == NULL))
2428 dst_ops->check = xfrm_dst_check;
2429 if (likely(dst_ops->default_advmss == NULL))
2430 dst_ops->default_advmss = xfrm_default_advmss;
2431 if (likely(dst_ops->mtu == NULL))
2432 dst_ops->mtu = xfrm_mtu;
2433 if (likely(dst_ops->negative_advice == NULL))
2434 dst_ops->negative_advice = xfrm_negative_advice;
2435 if (likely(dst_ops->link_failure == NULL))
2436 dst_ops->link_failure = xfrm_link_failure;
2437 if (likely(dst_ops->neigh_lookup == NULL))
2438 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2439 if (likely(afinfo->garbage_collect == NULL))
2440 afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2441 xfrm_policy_afinfo[afinfo->family] = afinfo;
2442 }
2443 write_unlock_bh(&xfrm_policy_afinfo_lock);
2444
2445 rtnl_lock();
2446 for_each_net(net) {
2447 struct dst_ops *xfrm_dst_ops;
2448
2449 switch (afinfo->family) {
2450 case AF_INET:
2451 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2452 break;
2453#if IS_ENABLED(CONFIG_IPV6)
2454 case AF_INET6:
2455 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2456 break;
2457#endif
2458 default:
2459 BUG();
2460 }
2461 *xfrm_dst_ops = *afinfo->dst_ops;
2462 }
2463 rtnl_unlock();
2464
2465 return err;
2466}
2467EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2468
2469int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2470{
2471 int err = 0;
2472 if (unlikely(afinfo == NULL))
2473 return -EINVAL;
2474 if (unlikely(afinfo->family >= NPROTO))
2475 return -EAFNOSUPPORT;
2476 write_lock_bh(&xfrm_policy_afinfo_lock);
2477 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2478 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2479 err = -EINVAL;
2480 else {
2481 struct dst_ops *dst_ops = afinfo->dst_ops;
2482 xfrm_policy_afinfo[afinfo->family] = NULL;
2483 dst_ops->kmem_cachep = NULL;
2484 dst_ops->check = NULL;
2485 dst_ops->negative_advice = NULL;
2486 dst_ops->link_failure = NULL;
2487 afinfo->garbage_collect = NULL;
2488 }
2489 }
2490 write_unlock_bh(&xfrm_policy_afinfo_lock);
2491 return err;
2492}
2493EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2494
2495static void __net_init xfrm_dst_ops_init(struct net *net)
2496{
2497 struct xfrm_policy_afinfo *afinfo;
2498
2499 read_lock_bh(&xfrm_policy_afinfo_lock);
2500 afinfo = xfrm_policy_afinfo[AF_INET];
2501 if (afinfo)
2502 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2503#if IS_ENABLED(CONFIG_IPV6)
2504 afinfo = xfrm_policy_afinfo[AF_INET6];
2505 if (afinfo)
2506 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2507#endif
2508 read_unlock_bh(&xfrm_policy_afinfo_lock);
2509}
2510
2511static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2512{
2513 struct xfrm_policy_afinfo *afinfo;
2514 if (unlikely(family >= NPROTO))
2515 return NULL;
2516 read_lock(&xfrm_policy_afinfo_lock);
2517 afinfo = xfrm_policy_afinfo[family];
2518 if (unlikely(!afinfo))
2519 read_unlock(&xfrm_policy_afinfo_lock);
2520 return afinfo;
2521}
2522
2523static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2524{
2525 read_unlock(&xfrm_policy_afinfo_lock);
2526}
2527
2528static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2529{
2530 struct net_device *dev = ptr;
2531
2532 switch (event) {
2533 case NETDEV_DOWN:
2534 xfrm_garbage_collect(dev_net(dev));
2535 }
2536 return NOTIFY_DONE;
2537}
2538
2539static struct notifier_block xfrm_dev_notifier = {
2540 .notifier_call = xfrm_dev_event,
2541};
2542
2543#ifdef CONFIG_XFRM_STATISTICS
2544static int __net_init xfrm_statistics_init(struct net *net)
2545{
2546 int rv;
2547
2548 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2549 sizeof(struct linux_xfrm_mib),
2550 __alignof__(struct linux_xfrm_mib)) < 0)
2551 return -ENOMEM;
2552 rv = xfrm_proc_init(net);
2553 if (rv < 0)
2554 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2555 return rv;
2556}
2557
2558static void xfrm_statistics_fini(struct net *net)
2559{
2560 xfrm_proc_fini(net);
2561 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2562}
2563#else
2564static int __net_init xfrm_statistics_init(struct net *net)
2565{
2566 return 0;
2567}
2568
2569static void xfrm_statistics_fini(struct net *net)
2570{
2571}
2572#endif
2573
2574static int __net_init xfrm_policy_init(struct net *net)
2575{
2576 unsigned int hmask, sz;
2577 int dir;
2578
2579 if (net_eq(net, &init_net))
2580 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2581 sizeof(struct xfrm_dst),
2582 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2583 NULL);
2584
2585 hmask = 8 - 1;
2586 sz = (hmask+1) * sizeof(struct hlist_head);
2587
2588 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2589 if (!net->xfrm.policy_byidx)
2590 goto out_byidx;
2591 net->xfrm.policy_idx_hmask = hmask;
2592
2593 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2594 struct xfrm_policy_hash *htab;
2595
2596 net->xfrm.policy_count[dir] = 0;
2597 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2598
2599 htab = &net->xfrm.policy_bydst[dir];
2600 htab->table = xfrm_hash_alloc(sz);
2601 if (!htab->table)
2602 goto out_bydst;
2603 htab->hmask = hmask;
2604 }
2605
2606 INIT_LIST_HEAD(&net->xfrm.policy_all);
2607 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2608 if (net_eq(net, &init_net))
2609 register_netdevice_notifier(&xfrm_dev_notifier);
2610 return 0;
2611
2612out_bydst:
2613 for (dir--; dir >= 0; dir--) {
2614 struct xfrm_policy_hash *htab;
2615
2616 htab = &net->xfrm.policy_bydst[dir];
2617 xfrm_hash_free(htab->table, sz);
2618 }
2619 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2620out_byidx:
2621 return -ENOMEM;
2622}
2623
2624static void xfrm_policy_fini(struct net *net)
2625{
2626 struct xfrm_audit audit_info;
2627 unsigned int sz;
2628 int dir;
2629
2630 flush_work(&net->xfrm.policy_hash_work);
2631#ifdef CONFIG_XFRM_SUB_POLICY
2632 audit_info.loginuid = -1;
2633 audit_info.sessionid = -1;
2634 audit_info.secid = 0;
2635 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2636#endif
2637 audit_info.loginuid = -1;
2638 audit_info.sessionid = -1;
2639 audit_info.secid = 0;
2640 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2641
2642 WARN_ON(!list_empty(&net->xfrm.policy_all));
2643
2644 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2645 struct xfrm_policy_hash *htab;
2646
2647 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2648
2649 htab = &net->xfrm.policy_bydst[dir];
2650 sz = (htab->hmask + 1);
2651 WARN_ON(!hlist_empty(htab->table));
2652 xfrm_hash_free(htab->table, sz);
2653 }
2654
2655 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2656 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2657 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2658}
2659
2660static int __net_init xfrm_net_init(struct net *net)
2661{
2662 int rv;
2663
2664 rv = xfrm_statistics_init(net);
2665 if (rv < 0)
2666 goto out_statistics;
2667 rv = xfrm_state_init(net);
2668 if (rv < 0)
2669 goto out_state;
2670 rv = xfrm_policy_init(net);
2671 if (rv < 0)
2672 goto out_policy;
2673 xfrm_dst_ops_init(net);
2674 rv = xfrm_sysctl_init(net);
2675 if (rv < 0)
2676 goto out_sysctl;
2677 return 0;
2678
2679out_sysctl:
2680 xfrm_policy_fini(net);
2681out_policy:
2682 xfrm_state_fini(net);
2683out_state:
2684 xfrm_statistics_fini(net);
2685out_statistics:
2686 return rv;
2687}
2688
2689static void __net_exit xfrm_net_exit(struct net *net)
2690{
2691 xfrm_sysctl_fini(net);
2692 xfrm_policy_fini(net);
2693 xfrm_state_fini(net);
2694 xfrm_statistics_fini(net);
2695}
2696
2697static struct pernet_operations __net_initdata xfrm_net_ops = {
2698 .init = xfrm_net_init,
2699 .exit = xfrm_net_exit,
2700};
2701
2702void __init xfrm_init(void)
2703{
2704 register_pernet_subsys(&xfrm_net_ops);
2705 xfrm_input_init();
2706}
2707
2708#ifdef CONFIG_AUDITSYSCALL
2709static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2710 struct audit_buffer *audit_buf)
2711{
2712 struct xfrm_sec_ctx *ctx = xp->security;
2713 struct xfrm_selector *sel = &xp->selector;
2714
2715 if (ctx)
2716 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2717 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2718
2719 switch(sel->family) {
2720 case AF_INET:
2721 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2722 if (sel->prefixlen_s != 32)
2723 audit_log_format(audit_buf, " src_prefixlen=%d",
2724 sel->prefixlen_s);
2725 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2726 if (sel->prefixlen_d != 32)
2727 audit_log_format(audit_buf, " dst_prefixlen=%d",
2728 sel->prefixlen_d);
2729 break;
2730 case AF_INET6:
2731 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2732 if (sel->prefixlen_s != 128)
2733 audit_log_format(audit_buf, " src_prefixlen=%d",
2734 sel->prefixlen_s);
2735 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2736 if (sel->prefixlen_d != 128)
2737 audit_log_format(audit_buf, " dst_prefixlen=%d",
2738 sel->prefixlen_d);
2739 break;
2740 }
2741}
2742
2743void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2744 uid_t auid, u32 sessionid, u32 secid)
2745{
2746 struct audit_buffer *audit_buf;
2747
2748 audit_buf = xfrm_audit_start("SPD-add");
2749 if (audit_buf == NULL)
2750 return;
2751 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2752 audit_log_format(audit_buf, " res=%u", result);
2753 xfrm_audit_common_policyinfo(xp, audit_buf);
2754 audit_log_end(audit_buf);
2755}
2756EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2757
2758void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2759 uid_t auid, u32 sessionid, u32 secid)
2760{
2761 struct audit_buffer *audit_buf;
2762
2763 audit_buf = xfrm_audit_start("SPD-delete");
2764 if (audit_buf == NULL)
2765 return;
2766 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2767 audit_log_format(audit_buf, " res=%u", result);
2768 xfrm_audit_common_policyinfo(xp, audit_buf);
2769 audit_log_end(audit_buf);
2770}
2771EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2772#endif
2773
2774#ifdef CONFIG_XFRM_MIGRATE
2775static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
2776 const struct xfrm_selector *sel_tgt)
2777{
2778 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2779 if (sel_tgt->family == sel_cmp->family &&
2780 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2781 sel_cmp->family) == 0 &&
2782 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2783 sel_cmp->family) == 0 &&
2784 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2785 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2786 return 1;
2787 }
2788 } else {
2789 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2790 return 1;
2791 }
2792 }
2793 return 0;
2794}
2795
2796static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
2797 u8 dir, u8 type)
2798{
2799 struct xfrm_policy *pol, *ret = NULL;
2800 struct hlist_node *entry;
2801 struct hlist_head *chain;
2802 u32 priority = ~0U;
2803
2804 read_lock_bh(&xfrm_policy_lock);
2805 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
2806 hlist_for_each_entry(pol, entry, chain, bydst) {
2807 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2808 pol->type == type) {
2809 ret = pol;
2810 priority = ret->priority;
2811 break;
2812 }
2813 }
2814 chain = &init_net.xfrm.policy_inexact[dir];
2815 hlist_for_each_entry(pol, entry, chain, bydst) {
2816 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2817 pol->type == type &&
2818 pol->priority < priority) {
2819 ret = pol;
2820 break;
2821 }
2822 }
2823
2824 if (ret)
2825 xfrm_pol_hold(ret);
2826
2827 read_unlock_bh(&xfrm_policy_lock);
2828
2829 return ret;
2830}
2831
2832static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
2833{
2834 int match = 0;
2835
2836 if (t->mode == m->mode && t->id.proto == m->proto &&
2837 (m->reqid == 0 || t->reqid == m->reqid)) {
2838 switch (t->mode) {
2839 case XFRM_MODE_TUNNEL:
2840 case XFRM_MODE_BEET:
2841 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2842 m->old_family) == 0 &&
2843 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2844 m->old_family) == 0) {
2845 match = 1;
2846 }
2847 break;
2848 case XFRM_MODE_TRANSPORT:
2849 /* in case of transport mode, template does not store
2850 any IP addresses, hence we just compare mode and
2851 protocol */
2852 match = 1;
2853 break;
2854 default:
2855 break;
2856 }
2857 }
2858 return match;
2859}
2860
2861/* update endpoint address(es) of template(s) */
2862static int xfrm_policy_migrate(struct xfrm_policy *pol,
2863 struct xfrm_migrate *m, int num_migrate)
2864{
2865 struct xfrm_migrate *mp;
2866 int i, j, n = 0;
2867
2868 write_lock_bh(&pol->lock);
2869 if (unlikely(pol->walk.dead)) {
2870 /* target policy has been deleted */
2871 write_unlock_bh(&pol->lock);
2872 return -ENOENT;
2873 }
2874
2875 for (i = 0; i < pol->xfrm_nr; i++) {
2876 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2877 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2878 continue;
2879 n++;
2880 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2881 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2882 continue;
2883 /* update endpoints */
2884 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2885 sizeof(pol->xfrm_vec[i].id.daddr));
2886 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2887 sizeof(pol->xfrm_vec[i].saddr));
2888 pol->xfrm_vec[i].encap_family = mp->new_family;
2889 /* flush bundles */
2890 atomic_inc(&pol->genid);
2891 }
2892 }
2893
2894 write_unlock_bh(&pol->lock);
2895
2896 if (!n)
2897 return -ENODATA;
2898
2899 return 0;
2900}
2901
2902static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
2903{
2904 int i, j;
2905
2906 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2907 return -EINVAL;
2908
2909 for (i = 0; i < num_migrate; i++) {
2910 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2911 m[i].old_family) == 0) &&
2912 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2913 m[i].old_family) == 0))
2914 return -EINVAL;
2915 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2916 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2917 return -EINVAL;
2918
2919 /* check if there is any duplicated entry */
2920 for (j = i + 1; j < num_migrate; j++) {
2921 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2922 sizeof(m[i].old_daddr)) &&
2923 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2924 sizeof(m[i].old_saddr)) &&
2925 m[i].proto == m[j].proto &&
2926 m[i].mode == m[j].mode &&
2927 m[i].reqid == m[j].reqid &&
2928 m[i].old_family == m[j].old_family)
2929 return -EINVAL;
2930 }
2931 }
2932
2933 return 0;
2934}
2935
2936int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2937 struct xfrm_migrate *m, int num_migrate,
2938 struct xfrm_kmaddress *k)
2939{
2940 int i, err, nx_cur = 0, nx_new = 0;
2941 struct xfrm_policy *pol = NULL;
2942 struct xfrm_state *x, *xc;
2943 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2944 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2945 struct xfrm_migrate *mp;
2946
2947 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2948 goto out;
2949
2950 //hub: CVE-2017-11600
2951 if(dir >= XFRM_POLICY_MAX*2){
2952 err = -EINVAL;
2953 goto out;
2954 }
2955
2956 /* Stage 1 - find policy */
2957 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2958 err = -ENOENT;
2959 goto out;
2960 }
2961
2962 /* Stage 2 - find and update state(s) */
2963 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2964 if ((x = xfrm_migrate_state_find(mp))) {
2965 x_cur[nx_cur] = x;
2966 nx_cur++;
2967 if ((xc = xfrm_state_migrate(x, mp))) {
2968 x_new[nx_new] = xc;
2969 nx_new++;
2970 } else {
2971 err = -ENODATA;
2972 goto restore_state;
2973 }
2974 }
2975 }
2976
2977 /* Stage 3 - update policy */
2978 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2979 goto restore_state;
2980
2981 /* Stage 4 - delete old state(s) */
2982 if (nx_cur) {
2983 xfrm_states_put(x_cur, nx_cur);
2984 xfrm_states_delete(x_cur, nx_cur);
2985 }
2986
2987 /* Stage 5 - announce */
2988 km_migrate(sel, dir, type, m, num_migrate, k);
2989
2990 xfrm_pol_put(pol);
2991
2992 return 0;
2993out:
2994 return err;
2995
2996restore_state:
2997 if (pol)
2998 xfrm_pol_put(pol);
2999 if (nx_cur)
3000 xfrm_states_put(x_cur, nx_cur);
3001 if (nx_new)
3002 xfrm_states_delete(x_new, nx_new);
3003
3004 return err;
3005}
3006EXPORT_SYMBOL(xfrm_migrate);
3007#endif