blob: ce1b262ce9646a563bebf6a6f01388793bf7525f [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/kmod.h>
19#include <linux/list.h>
20#include <linux/spinlock.h>
21#include <linux/workqueue.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/netfilter.h>
25#include <linux/module.h>
26#include <linux/cache.h>
27#include <linux/cpu.h>
28#include <linux/audit.h>
29#include <net/dst.h>
30#include <net/flow.h>
31#include <net/xfrm.h>
32#include <net/ip.h>
33#ifdef CONFIG_XFRM_STATISTICS
34#include <net/snmp.h>
35#endif
36
37#include "xfrm_hash.h"
38
39#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41#define XFRM_MAX_QUEUE_LEN 100
42
43struct xfrm_flo {
44 struct dst_entry *dst_orig;
45 u8 flags;
46};
47
48static DEFINE_SPINLOCK(xfrm_if_cb_lock);
49static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
50
51static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
52static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
53 __read_mostly;
54
55static struct kmem_cache *xfrm_dst_cache __ro_after_init;
56static __read_mostly seqcount_t xfrm_policy_hash_generation;
57
58static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
59static int stale_bundle(struct dst_entry *dst);
60static int xfrm_bundle_ok(struct xfrm_dst *xdst);
61static void xfrm_policy_queue_process(struct timer_list *t);
62
63static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
64static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
65 int dir);
66
67static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
68{
69 return refcount_inc_not_zero(&policy->refcnt);
70}
71
72static inline bool
73__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74{
75 const struct flowi4 *fl4 = &fl->u.ip4;
76
77 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
78 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
79 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
80 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
81 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
82 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
83}
84
85static inline bool
86__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
87{
88 const struct flowi6 *fl6 = &fl->u.ip6;
89
90 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
91 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
92 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
93 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
94 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
95 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
96}
97
98bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
99 unsigned short family)
100{
101 switch (family) {
102 case AF_INET:
103 return __xfrm4_selector_match(sel, fl);
104 case AF_INET6:
105 return __xfrm6_selector_match(sel, fl);
106 }
107 return false;
108}
109
110static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
111{
112 const struct xfrm_policy_afinfo *afinfo;
113
114 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
115 return NULL;
116 rcu_read_lock();
117 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
118 if (unlikely(!afinfo))
119 rcu_read_unlock();
120 return afinfo;
121}
122
123/* Called with rcu_read_lock(). */
124static const struct xfrm_if_cb *xfrm_if_get_cb(void)
125{
126 return rcu_dereference(xfrm_if_cb);
127}
128
129struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
130 const xfrm_address_t *saddr,
131 const xfrm_address_t *daddr,
132 int family, u32 mark)
133{
134 const struct xfrm_policy_afinfo *afinfo;
135 struct dst_entry *dst;
136
137 afinfo = xfrm_policy_get_afinfo(family);
138 if (unlikely(afinfo == NULL))
139 return ERR_PTR(-EAFNOSUPPORT);
140
141 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
142
143 rcu_read_unlock();
144
145 return dst;
146}
147EXPORT_SYMBOL(__xfrm_dst_lookup);
148
149static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
150 int tos, int oif,
151 xfrm_address_t *prev_saddr,
152 xfrm_address_t *prev_daddr,
153 int family, u32 mark)
154{
155 struct net *net = xs_net(x);
156 xfrm_address_t *saddr = &x->props.saddr;
157 xfrm_address_t *daddr = &x->id.daddr;
158 struct dst_entry *dst;
159
160 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
161 saddr = x->coaddr;
162 daddr = prev_daddr;
163 }
164 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
165 saddr = prev_saddr;
166 daddr = x->coaddr;
167 }
168
169 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
170
171 if (!IS_ERR(dst)) {
172 if (prev_saddr != saddr)
173 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
174 if (prev_daddr != daddr)
175 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
176 }
177
178 return dst;
179}
180
181static inline unsigned long make_jiffies(long secs)
182{
183 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
184 return MAX_SCHEDULE_TIMEOUT-1;
185 else
186 return secs*HZ;
187}
188
189static void xfrm_policy_timer(struct timer_list *t)
190{
191 struct xfrm_policy *xp = from_timer(xp, t, timer);
192 time64_t now = ktime_get_real_seconds();
193 time64_t next = TIME64_MAX;
194 int warn = 0;
195 int dir;
196
197 read_lock(&xp->lock);
198
199 if (unlikely(xp->walk.dead))
200 goto out;
201
202 dir = xfrm_policy_id2dir(xp->index);
203
204 if (xp->lft.hard_add_expires_seconds) {
205 time64_t tmo = xp->lft.hard_add_expires_seconds +
206 xp->curlft.add_time - now;
207 if (tmo <= 0)
208 goto expired;
209 if (tmo < next)
210 next = tmo;
211 }
212 if (xp->lft.hard_use_expires_seconds) {
213 time64_t tmo = xp->lft.hard_use_expires_seconds +
214 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
215 if (tmo <= 0)
216 goto expired;
217 if (tmo < next)
218 next = tmo;
219 }
220 if (xp->lft.soft_add_expires_seconds) {
221 time64_t tmo = xp->lft.soft_add_expires_seconds +
222 xp->curlft.add_time - now;
223 if (tmo <= 0) {
224 warn = 1;
225 tmo = XFRM_KM_TIMEOUT;
226 }
227 if (tmo < next)
228 next = tmo;
229 }
230 if (xp->lft.soft_use_expires_seconds) {
231 time64_t tmo = xp->lft.soft_use_expires_seconds +
232 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
233 if (tmo <= 0) {
234 warn = 1;
235 tmo = XFRM_KM_TIMEOUT;
236 }
237 if (tmo < next)
238 next = tmo;
239 }
240
241 if (warn)
242 km_policy_expired(xp, dir, 0, 0);
243 if (next != TIME64_MAX &&
244 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
245 xfrm_pol_hold(xp);
246
247out:
248 read_unlock(&xp->lock);
249 xfrm_pol_put(xp);
250 return;
251
252expired:
253 read_unlock(&xp->lock);
254 if (!xfrm_policy_delete(xp, dir))
255 km_policy_expired(xp, dir, 1, 0);
256 xfrm_pol_put(xp);
257}
258
259/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
260 * SPD calls.
261 */
262
263struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
264{
265 struct xfrm_policy *policy;
266
267 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
268
269 if (policy) {
270 write_pnet(&policy->xp_net, net);
271 INIT_LIST_HEAD(&policy->walk.all);
272 INIT_HLIST_NODE(&policy->bydst);
273 INIT_HLIST_NODE(&policy->byidx);
274 rwlock_init(&policy->lock);
275 refcount_set(&policy->refcnt, 1);
276 skb_queue_head_init(&policy->polq.hold_queue);
277 timer_setup(&policy->timer, xfrm_policy_timer, 0);
278 timer_setup(&policy->polq.hold_timer,
279 xfrm_policy_queue_process, 0);
280 }
281 return policy;
282}
283EXPORT_SYMBOL(xfrm_policy_alloc);
284
285static void xfrm_policy_destroy_rcu(struct rcu_head *head)
286{
287 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
288
289 security_xfrm_policy_free(policy->security);
290 kfree(policy);
291}
292
293/* Destroy xfrm_policy: descendant resources must be released to this moment. */
294
295void xfrm_policy_destroy(struct xfrm_policy *policy)
296{
297 BUG_ON(!policy->walk.dead);
298
299 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
300 BUG();
301
302 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
303}
304EXPORT_SYMBOL(xfrm_policy_destroy);
305
306/* Rule must be locked. Release descendant resources, announce
307 * entry dead. The rule must be unlinked from lists to the moment.
308 */
309
310static void xfrm_policy_kill(struct xfrm_policy *policy)
311{
312 policy->walk.dead = 1;
313
314 atomic_inc(&policy->genid);
315
316 if (del_timer(&policy->polq.hold_timer))
317 xfrm_pol_put(policy);
318 skb_queue_purge(&policy->polq.hold_queue);
319
320 if (del_timer(&policy->timer))
321 xfrm_pol_put(policy);
322
323 xfrm_pol_put(policy);
324}
325
326static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
327
328static inline unsigned int idx_hash(struct net *net, u32 index)
329{
330 return __idx_hash(index, net->xfrm.policy_idx_hmask);
331}
332
333/* calculate policy hash thresholds */
334static void __get_hash_thresh(struct net *net,
335 unsigned short family, int dir,
336 u8 *dbits, u8 *sbits)
337{
338 switch (family) {
339 case AF_INET:
340 *dbits = net->xfrm.policy_bydst[dir].dbits4;
341 *sbits = net->xfrm.policy_bydst[dir].sbits4;
342 break;
343
344 case AF_INET6:
345 *dbits = net->xfrm.policy_bydst[dir].dbits6;
346 *sbits = net->xfrm.policy_bydst[dir].sbits6;
347 break;
348
349 default:
350 *dbits = 0;
351 *sbits = 0;
352 }
353}
354
355static struct hlist_head *policy_hash_bysel(struct net *net,
356 const struct xfrm_selector *sel,
357 unsigned short family, int dir)
358{
359 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
360 unsigned int hash;
361 u8 dbits;
362 u8 sbits;
363
364 __get_hash_thresh(net, family, dir, &dbits, &sbits);
365 hash = __sel_hash(sel, family, hmask, dbits, sbits);
366
367 if (hash == hmask + 1)
368 return &net->xfrm.policy_inexact[dir];
369
370 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
371 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
372}
373
374static struct hlist_head *policy_hash_direct(struct net *net,
375 const xfrm_address_t *daddr,
376 const xfrm_address_t *saddr,
377 unsigned short family, int dir)
378{
379 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
380 unsigned int hash;
381 u8 dbits;
382 u8 sbits;
383
384 __get_hash_thresh(net, family, dir, &dbits, &sbits);
385 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
386
387 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
388 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
389}
390
391static void xfrm_dst_hash_transfer(struct net *net,
392 struct hlist_head *list,
393 struct hlist_head *ndsttable,
394 unsigned int nhashmask,
395 int dir)
396{
397 struct hlist_node *tmp, *entry0 = NULL;
398 struct xfrm_policy *pol;
399 unsigned int h0 = 0;
400 u8 dbits;
401 u8 sbits;
402
403redo:
404 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
405 unsigned int h;
406
407 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
408 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
409 pol->family, nhashmask, dbits, sbits);
410 if (!entry0) {
411 hlist_del_rcu(&pol->bydst);
412 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
413 h0 = h;
414 } else {
415 if (h != h0)
416 continue;
417 hlist_del_rcu(&pol->bydst);
418 hlist_add_behind_rcu(&pol->bydst, entry0);
419 }
420 entry0 = &pol->bydst;
421 }
422 if (!hlist_empty(list)) {
423 entry0 = NULL;
424 goto redo;
425 }
426}
427
428static void xfrm_idx_hash_transfer(struct hlist_head *list,
429 struct hlist_head *nidxtable,
430 unsigned int nhashmask)
431{
432 struct hlist_node *tmp;
433 struct xfrm_policy *pol;
434
435 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
436 unsigned int h;
437
438 h = __idx_hash(pol->index, nhashmask);
439 hlist_add_head(&pol->byidx, nidxtable+h);
440 }
441}
442
443static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
444{
445 return ((old_hmask + 1) << 1) - 1;
446}
447
448static void xfrm_bydst_resize(struct net *net, int dir)
449{
450 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
451 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
452 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
453 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
454 struct hlist_head *odst;
455 int i;
456
457 if (!ndst)
458 return;
459
460 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
461 write_seqcount_begin(&xfrm_policy_hash_generation);
462
463 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
464 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
465
466 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
467 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
468
469 for (i = hmask; i >= 0; i--)
470 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
471
472 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
473 net->xfrm.policy_bydst[dir].hmask = nhashmask;
474
475 write_seqcount_end(&xfrm_policy_hash_generation);
476 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
477
478 synchronize_rcu();
479
480 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
481}
482
483static void xfrm_byidx_resize(struct net *net, int total)
484{
485 unsigned int hmask = net->xfrm.policy_idx_hmask;
486 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
487 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
488 struct hlist_head *oidx = net->xfrm.policy_byidx;
489 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
490 int i;
491
492 if (!nidx)
493 return;
494
495 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
496
497 for (i = hmask; i >= 0; i--)
498 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
499
500 net->xfrm.policy_byidx = nidx;
501 net->xfrm.policy_idx_hmask = nhashmask;
502
503 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
504
505 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
506}
507
508static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
509{
510 unsigned int cnt = net->xfrm.policy_count[dir];
511 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
512
513 if (total)
514 *total += cnt;
515
516 if ((hmask + 1) < xfrm_policy_hashmax &&
517 cnt > hmask)
518 return 1;
519
520 return 0;
521}
522
523static inline int xfrm_byidx_should_resize(struct net *net, int total)
524{
525 unsigned int hmask = net->xfrm.policy_idx_hmask;
526
527 if ((hmask + 1) < xfrm_policy_hashmax &&
528 total > hmask)
529 return 1;
530
531 return 0;
532}
533
534void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
535{
536 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
537 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
538 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
539 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
540 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
541 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
542 si->spdhcnt = net->xfrm.policy_idx_hmask;
543 si->spdhmcnt = xfrm_policy_hashmax;
544}
545EXPORT_SYMBOL(xfrm_spd_getinfo);
546
547static DEFINE_MUTEX(hash_resize_mutex);
548static void xfrm_hash_resize(struct work_struct *work)
549{
550 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
551 int dir, total;
552
553 mutex_lock(&hash_resize_mutex);
554
555 total = 0;
556 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
557 if (xfrm_bydst_should_resize(net, dir, &total))
558 xfrm_bydst_resize(net, dir);
559 }
560 if (xfrm_byidx_should_resize(net, total))
561 xfrm_byidx_resize(net, total);
562
563 mutex_unlock(&hash_resize_mutex);
564}
565
566static void xfrm_hash_rebuild(struct work_struct *work)
567{
568 struct net *net = container_of(work, struct net,
569 xfrm.policy_hthresh.work);
570 unsigned int hmask;
571 struct xfrm_policy *pol;
572 struct xfrm_policy *policy;
573 struct hlist_head *chain;
574 struct hlist_head *odst;
575 struct hlist_node *newpos;
576 int i;
577 int dir;
578 unsigned seq;
579 u8 lbits4, rbits4, lbits6, rbits6;
580
581 mutex_lock(&hash_resize_mutex);
582
583 /* read selector prefixlen thresholds */
584 do {
585 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
586
587 lbits4 = net->xfrm.policy_hthresh.lbits4;
588 rbits4 = net->xfrm.policy_hthresh.rbits4;
589 lbits6 = net->xfrm.policy_hthresh.lbits6;
590 rbits6 = net->xfrm.policy_hthresh.rbits6;
591 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
592
593 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
594
595 /* reset the bydst and inexact table in all directions */
596 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
597 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
598 hmask = net->xfrm.policy_bydst[dir].hmask;
599 odst = net->xfrm.policy_bydst[dir].table;
600 for (i = hmask; i >= 0; i--)
601 INIT_HLIST_HEAD(odst + i);
602 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
603 /* dir out => dst = remote, src = local */
604 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
605 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
606 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
607 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
608 } else {
609 /* dir in/fwd => dst = local, src = remote */
610 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
611 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
612 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
613 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
614 }
615 }
616
617 /* re-insert all policies by order of creation */
618 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
619 if (policy->walk.dead ||
620 xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
621 /* skip socket policies */
622 continue;
623 }
624 newpos = NULL;
625 chain = policy_hash_bysel(net, &policy->selector,
626 policy->family,
627 xfrm_policy_id2dir(policy->index));
628 hlist_for_each_entry(pol, chain, bydst) {
629 if (policy->priority >= pol->priority)
630 newpos = &pol->bydst;
631 else
632 break;
633 }
634 if (newpos)
635 hlist_add_behind_rcu(&policy->bydst, newpos);
636 else
637 hlist_add_head_rcu(&policy->bydst, chain);
638 }
639
640 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
641
642 mutex_unlock(&hash_resize_mutex);
643}
644
645void xfrm_policy_hash_rebuild(struct net *net)
646{
647 schedule_work(&net->xfrm.policy_hthresh.work);
648}
649EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
650
651/* Generate new index... KAME seems to generate them ordered by cost
652 * of an absolute inpredictability of ordering of rules. This will not pass. */
653static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
654{
655 static u32 idx_generator;
656
657 for (;;) {
658 struct hlist_head *list;
659 struct xfrm_policy *p;
660 u32 idx;
661 int found;
662
663 if (!index) {
664 idx = (idx_generator | dir);
665 idx_generator += 8;
666 } else {
667 idx = index;
668 index = 0;
669 }
670
671 if (idx == 0)
672 idx = 8;
673 list = net->xfrm.policy_byidx + idx_hash(net, idx);
674 found = 0;
675 hlist_for_each_entry(p, list, byidx) {
676 if (p->index == idx) {
677 found = 1;
678 break;
679 }
680 }
681 if (!found)
682 return idx;
683 }
684}
685
686static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
687{
688 u32 *p1 = (u32 *) s1;
689 u32 *p2 = (u32 *) s2;
690 int len = sizeof(struct xfrm_selector) / sizeof(u32);
691 int i;
692
693 for (i = 0; i < len; i++) {
694 if (p1[i] != p2[i])
695 return 1;
696 }
697
698 return 0;
699}
700
701static void xfrm_policy_requeue(struct xfrm_policy *old,
702 struct xfrm_policy *new)
703{
704 struct xfrm_policy_queue *pq = &old->polq;
705 struct sk_buff_head list;
706
707 if (skb_queue_empty(&pq->hold_queue))
708 return;
709
710 __skb_queue_head_init(&list);
711
712 spin_lock_bh(&pq->hold_queue.lock);
713 skb_queue_splice_init(&pq->hold_queue, &list);
714 if (del_timer(&pq->hold_timer))
715 xfrm_pol_put(old);
716 spin_unlock_bh(&pq->hold_queue.lock);
717
718 pq = &new->polq;
719
720 spin_lock_bh(&pq->hold_queue.lock);
721 skb_queue_splice(&list, &pq->hold_queue);
722 pq->timeout = XFRM_QUEUE_TMO_MIN;
723 if (!mod_timer(&pq->hold_timer, jiffies))
724 xfrm_pol_hold(new);
725 spin_unlock_bh(&pq->hold_queue.lock);
726}
727
728static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
729 struct xfrm_policy *pol)
730{
731 u32 mark = policy->mark.v & policy->mark.m;
732
733 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
734 return true;
735
736 if ((mark & pol->mark.m) == pol->mark.v &&
737 policy->priority == pol->priority)
738 return true;
739
740 return false;
741}
742
743int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
744{
745 struct net *net = xp_net(policy);
746 struct xfrm_policy *pol;
747 struct xfrm_policy *delpol;
748 struct hlist_head *chain;
749 struct hlist_node *newpos;
750
751 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
752 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
753 delpol = NULL;
754 newpos = NULL;
755 hlist_for_each_entry(pol, chain, bydst) {
756 if (pol->type == policy->type &&
757 pol->if_id == policy->if_id &&
758 !selector_cmp(&pol->selector, &policy->selector) &&
759 xfrm_policy_mark_match(policy, pol) &&
760 xfrm_sec_ctx_match(pol->security, policy->security) &&
761 !WARN_ON(delpol)) {
762 if (excl) {
763 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
764 return -EEXIST;
765 }
766 delpol = pol;
767 if (policy->priority > pol->priority)
768 continue;
769 } else if (policy->priority >= pol->priority) {
770 newpos = &pol->bydst;
771 continue;
772 }
773 if (delpol)
774 break;
775 }
776 if (newpos)
777 hlist_add_behind_rcu(&policy->bydst, newpos);
778 else
779 hlist_add_head_rcu(&policy->bydst, chain);
780 __xfrm_policy_link(policy, dir);
781
782 /* After previous checking, family can either be AF_INET or AF_INET6 */
783 if (policy->family == AF_INET)
784 rt_genid_bump_ipv4(net);
785 else
786 rt_genid_bump_ipv6(net);
787
788 if (delpol) {
789 xfrm_policy_requeue(delpol, policy);
790 __xfrm_policy_unlink(delpol, dir);
791 }
792 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
793 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
794 policy->curlft.add_time = ktime_get_real_seconds();
795 policy->curlft.use_time = 0;
796 if (!mod_timer(&policy->timer, jiffies + HZ))
797 xfrm_pol_hold(policy);
798 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
799
800 if (delpol)
801 xfrm_policy_kill(delpol);
802 else if (xfrm_bydst_should_resize(net, dir, NULL))
803 schedule_work(&net->xfrm.policy_hash_work);
804
805 return 0;
806}
807EXPORT_SYMBOL(xfrm_policy_insert);
808
809struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
810 u8 type, int dir,
811 struct xfrm_selector *sel,
812 struct xfrm_sec_ctx *ctx, int delete,
813 int *err)
814{
815 struct xfrm_policy *pol, *ret;
816 struct hlist_head *chain;
817
818 *err = 0;
819 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
820 chain = policy_hash_bysel(net, sel, sel->family, dir);
821 ret = NULL;
822 hlist_for_each_entry(pol, chain, bydst) {
823 if (pol->type == type &&
824 pol->if_id == if_id &&
825 (mark & pol->mark.m) == pol->mark.v &&
826 !selector_cmp(sel, &pol->selector) &&
827 xfrm_sec_ctx_match(ctx, pol->security)) {
828 xfrm_pol_hold(pol);
829 if (delete) {
830 *err = security_xfrm_policy_delete(
831 pol->security);
832 if (*err) {
833 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
834 return pol;
835 }
836 __xfrm_policy_unlink(pol, dir);
837 }
838 ret = pol;
839 break;
840 }
841 }
842 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
843
844 if (ret && delete)
845 xfrm_policy_kill(ret);
846 return ret;
847}
848EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
849
850struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
851 u8 type, int dir, u32 id, int delete,
852 int *err)
853{
854 struct xfrm_policy *pol, *ret;
855 struct hlist_head *chain;
856
857 *err = -ENOENT;
858 if (xfrm_policy_id2dir(id) != dir)
859 return NULL;
860
861 *err = 0;
862 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
863 chain = net->xfrm.policy_byidx + idx_hash(net, id);
864 ret = NULL;
865 hlist_for_each_entry(pol, chain, byidx) {
866 if (pol->type == type && pol->index == id &&
867 pol->if_id == if_id &&
868 (mark & pol->mark.m) == pol->mark.v) {
869 xfrm_pol_hold(pol);
870 if (delete) {
871 *err = security_xfrm_policy_delete(
872 pol->security);
873 if (*err) {
874 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
875 return pol;
876 }
877 __xfrm_policy_unlink(pol, dir);
878 }
879 ret = pol;
880 break;
881 }
882 }
883 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
884
885 if (ret && delete)
886 xfrm_policy_kill(ret);
887 return ret;
888}
889EXPORT_SYMBOL(xfrm_policy_byid);
890
891#ifdef CONFIG_SECURITY_NETWORK_XFRM
892static inline int
893xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
894{
895 int dir, err = 0;
896
897 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
898 struct xfrm_policy *pol;
899 int i;
900
901 hlist_for_each_entry(pol,
902 &net->xfrm.policy_inexact[dir], bydst) {
903 if (pol->type != type)
904 continue;
905 err = security_xfrm_policy_delete(pol->security);
906 if (err) {
907 xfrm_audit_policy_delete(pol, 0, task_valid);
908 return err;
909 }
910 }
911 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
912 hlist_for_each_entry(pol,
913 net->xfrm.policy_bydst[dir].table + i,
914 bydst) {
915 if (pol->type != type)
916 continue;
917 err = security_xfrm_policy_delete(
918 pol->security);
919 if (err) {
920 xfrm_audit_policy_delete(pol, 0,
921 task_valid);
922 return err;
923 }
924 }
925 }
926 }
927 return err;
928}
929#else
930static inline int
931xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
932{
933 return 0;
934}
935#endif
936
937int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
938{
939 int dir, err = 0, cnt = 0;
940
941 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
942
943 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
944 if (err)
945 goto out;
946
947 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
948 struct xfrm_policy *pol;
949 int i;
950
951 again1:
952 hlist_for_each_entry(pol,
953 &net->xfrm.policy_inexact[dir], bydst) {
954 if (pol->type != type)
955 continue;
956 __xfrm_policy_unlink(pol, dir);
957 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
958 cnt++;
959
960 xfrm_audit_policy_delete(pol, 1, task_valid);
961
962 xfrm_policy_kill(pol);
963
964 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
965 goto again1;
966 }
967
968 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
969 again2:
970 hlist_for_each_entry(pol,
971 net->xfrm.policy_bydst[dir].table + i,
972 bydst) {
973 if (pol->type != type)
974 continue;
975 __xfrm_policy_unlink(pol, dir);
976 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
977 cnt++;
978
979 xfrm_audit_policy_delete(pol, 1, task_valid);
980 xfrm_policy_kill(pol);
981
982 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
983 goto again2;
984 }
985 }
986
987 }
988 if (!cnt)
989 err = -ESRCH;
990out:
991 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
992 return err;
993}
994EXPORT_SYMBOL(xfrm_policy_flush);
995
996int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
997 int (*func)(struct xfrm_policy *, int, int, void*),
998 void *data)
999{
1000 struct xfrm_policy *pol;
1001 struct xfrm_policy_walk_entry *x;
1002 int error = 0;
1003
1004 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1005 walk->type != XFRM_POLICY_TYPE_ANY)
1006 return -EINVAL;
1007
1008 if (list_empty(&walk->walk.all) && walk->seq != 0)
1009 return 0;
1010
1011 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1012 if (list_empty(&walk->walk.all))
1013 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1014 else
1015 x = list_first_entry(&walk->walk.all,
1016 struct xfrm_policy_walk_entry, all);
1017
1018 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1019 if (x->dead)
1020 continue;
1021 pol = container_of(x, struct xfrm_policy, walk);
1022 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1023 walk->type != pol->type)
1024 continue;
1025 error = func(pol, xfrm_policy_id2dir(pol->index),
1026 walk->seq, data);
1027 if (error) {
1028 list_move_tail(&walk->walk.all, &x->all);
1029 goto out;
1030 }
1031 walk->seq++;
1032 }
1033 if (walk->seq == 0) {
1034 error = -ENOENT;
1035 goto out;
1036 }
1037 list_del_init(&walk->walk.all);
1038out:
1039 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1040 return error;
1041}
1042EXPORT_SYMBOL(xfrm_policy_walk);
1043
1044void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1045{
1046 INIT_LIST_HEAD(&walk->walk.all);
1047 walk->walk.dead = 1;
1048 walk->type = type;
1049 walk->seq = 0;
1050}
1051EXPORT_SYMBOL(xfrm_policy_walk_init);
1052
1053void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1054{
1055 if (list_empty(&walk->walk.all))
1056 return;
1057
1058 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1059 list_del(&walk->walk.all);
1060 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1061}
1062EXPORT_SYMBOL(xfrm_policy_walk_done);
1063
1064/*
1065 * Find policy to apply to this flow.
1066 *
1067 * Returns 0 if policy found, else an -errno.
1068 */
1069static int xfrm_policy_match(const struct xfrm_policy *pol,
1070 const struct flowi *fl,
1071 u8 type, u16 family, int dir, u32 if_id)
1072{
1073 const struct xfrm_selector *sel = &pol->selector;
1074 int ret = -ESRCH;
1075 bool match;
1076
1077 if (pol->family != family ||
1078 pol->if_id != if_id ||
1079 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1080 pol->type != type)
1081 return ret;
1082
1083 match = xfrm_selector_match(sel, fl, family);
1084 if (match)
1085 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1086 dir);
1087
1088 return ret;
1089}
1090
1091static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1092 const struct flowi *fl,
1093 u16 family, u8 dir,
1094 u32 if_id)
1095{
1096 int err;
1097 struct xfrm_policy *pol, *ret;
1098 const xfrm_address_t *daddr, *saddr;
1099 struct hlist_head *chain;
1100 unsigned int sequence;
1101 u32 priority;
1102
1103 daddr = xfrm_flowi_daddr(fl, family);
1104 saddr = xfrm_flowi_saddr(fl, family);
1105 if (unlikely(!daddr || !saddr))
1106 return NULL;
1107
1108 rcu_read_lock();
1109 retry:
1110 do {
1111 sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1112 chain = policy_hash_direct(net, daddr, saddr, family, dir);
1113 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1114
1115 priority = ~0U;
1116 ret = NULL;
1117 hlist_for_each_entry_rcu(pol, chain, bydst) {
1118 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
1119 if (err) {
1120 if (err == -ESRCH)
1121 continue;
1122 else {
1123 ret = ERR_PTR(err);
1124 goto fail;
1125 }
1126 } else {
1127 ret = pol;
1128 priority = ret->priority;
1129 break;
1130 }
1131 }
1132 chain = &net->xfrm.policy_inexact[dir];
1133 hlist_for_each_entry_rcu(pol, chain, bydst) {
1134 if ((pol->priority >= priority) && ret)
1135 break;
1136
1137 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
1138 if (err) {
1139 if (err == -ESRCH)
1140 continue;
1141 else {
1142 ret = ERR_PTR(err);
1143 goto fail;
1144 }
1145 } else {
1146 ret = pol;
1147 break;
1148 }
1149 }
1150
1151 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1152 goto retry;
1153
1154 if (ret && !xfrm_pol_hold_rcu(ret))
1155 goto retry;
1156fail:
1157 rcu_read_unlock();
1158
1159 return ret;
1160}
1161
1162static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
1163 const struct flowi *fl,
1164 u16 family, u8 dir, u32 if_id)
1165{
1166#ifdef CONFIG_XFRM_SUB_POLICY
1167 struct xfrm_policy *pol;
1168
1169 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
1170 dir, if_id);
1171 if (pol != NULL)
1172 return pol;
1173#endif
1174 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
1175 dir, if_id);
1176}
1177
1178static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1179 const struct flowi *fl,
1180 u16 family, u32 if_id)
1181{
1182 struct xfrm_policy *pol;
1183
1184 rcu_read_lock();
1185 again:
1186 pol = rcu_dereference(sk->sk_policy[dir]);
1187 if (pol != NULL) {
1188 bool match;
1189 int err = 0;
1190
1191 if (pol->family != family) {
1192 pol = NULL;
1193 goto out;
1194 }
1195
1196 match = xfrm_selector_match(&pol->selector, fl, family);
1197 if (match) {
1198 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
1199 pol->if_id != if_id) {
1200 pol = NULL;
1201 goto out;
1202 }
1203 err = security_xfrm_policy_lookup(pol->security,
1204 fl->flowi_secid,
1205 dir);
1206 if (!err) {
1207 if (!xfrm_pol_hold_rcu(pol))
1208 goto again;
1209 } else if (err == -ESRCH) {
1210 pol = NULL;
1211 } else {
1212 pol = ERR_PTR(err);
1213 }
1214 } else
1215 pol = NULL;
1216 }
1217out:
1218 rcu_read_unlock();
1219 return pol;
1220}
1221
1222static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1223{
1224 struct net *net = xp_net(pol);
1225
1226 list_add(&pol->walk.all, &net->xfrm.policy_all);
1227 net->xfrm.policy_count[dir]++;
1228 xfrm_pol_hold(pol);
1229}
1230
1231static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1232 int dir)
1233{
1234 struct net *net = xp_net(pol);
1235
1236 if (list_empty(&pol->walk.all))
1237 return NULL;
1238
1239 /* Socket policies are not hashed. */
1240 if (!hlist_unhashed(&pol->bydst)) {
1241 hlist_del_rcu(&pol->bydst);
1242 hlist_del(&pol->byidx);
1243 }
1244
1245 list_del_init(&pol->walk.all);
1246 net->xfrm.policy_count[dir]--;
1247
1248 return pol;
1249}
1250
1251static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1252{
1253 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1254}
1255
1256static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1257{
1258 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1259}
1260
1261int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1262{
1263 struct net *net = xp_net(pol);
1264
1265 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1266 pol = __xfrm_policy_unlink(pol, dir);
1267 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1268 if (pol) {
1269 xfrm_policy_kill(pol);
1270 return 0;
1271 }
1272 return -ENOENT;
1273}
1274EXPORT_SYMBOL(xfrm_policy_delete);
1275
1276int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1277{
1278 struct net *net = sock_net(sk);
1279 struct xfrm_policy *old_pol;
1280
1281#ifdef CONFIG_XFRM_SUB_POLICY
1282 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1283 return -EINVAL;
1284#endif
1285
1286 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1287 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1288 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1289 if (pol) {
1290 pol->curlft.add_time = ktime_get_real_seconds();
1291 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1292 xfrm_sk_policy_link(pol, dir);
1293 }
1294 rcu_assign_pointer(sk->sk_policy[dir], pol);
1295 if (old_pol) {
1296 if (pol)
1297 xfrm_policy_requeue(old_pol, pol);
1298
1299 /* Unlinking succeeds always. This is the only function
1300 * allowed to delete or replace socket policy.
1301 */
1302 xfrm_sk_policy_unlink(old_pol, dir);
1303 }
1304 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1305
1306 if (old_pol) {
1307 xfrm_policy_kill(old_pol);
1308 }
1309 return 0;
1310}
1311
1312static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1313{
1314 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1315 struct net *net = xp_net(old);
1316
1317 if (newp) {
1318 newp->selector = old->selector;
1319 if (security_xfrm_policy_clone(old->security,
1320 &newp->security)) {
1321 kfree(newp);
1322 return NULL; /* ENOMEM */
1323 }
1324 newp->lft = old->lft;
1325 newp->curlft = old->curlft;
1326 newp->mark = old->mark;
1327 newp->if_id = old->if_id;
1328 newp->action = old->action;
1329 newp->flags = old->flags;
1330 newp->xfrm_nr = old->xfrm_nr;
1331 newp->index = old->index;
1332 newp->type = old->type;
1333 newp->family = old->family;
1334 memcpy(newp->xfrm_vec, old->xfrm_vec,
1335 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1336 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1337 xfrm_sk_policy_link(newp, dir);
1338 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1339 xfrm_pol_put(newp);
1340 }
1341 return newp;
1342}
1343
1344int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1345{
1346 const struct xfrm_policy *p;
1347 struct xfrm_policy *np;
1348 int i, ret = 0;
1349
1350 rcu_read_lock();
1351 for (i = 0; i < 2; i++) {
1352 p = rcu_dereference(osk->sk_policy[i]);
1353 if (p) {
1354 np = clone_policy(p, i);
1355 if (unlikely(!np)) {
1356 ret = -ENOMEM;
1357 break;
1358 }
1359 rcu_assign_pointer(sk->sk_policy[i], np);
1360 }
1361 }
1362 rcu_read_unlock();
1363 return ret;
1364}
1365
1366static int
1367xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1368 xfrm_address_t *remote, unsigned short family, u32 mark)
1369{
1370 int err;
1371 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1372
1373 if (unlikely(afinfo == NULL))
1374 return -EINVAL;
1375 err = afinfo->get_saddr(net, oif, local, remote, mark);
1376 rcu_read_unlock();
1377 return err;
1378}
1379
1380/* Resolve list of templates for the flow, given policy. */
1381
1382static int
1383xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1384 struct xfrm_state **xfrm, unsigned short family)
1385{
1386 struct net *net = xp_net(policy);
1387 int nx;
1388 int i, error;
1389 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1390 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1391 xfrm_address_t tmp;
1392
1393 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1394 struct xfrm_state *x;
1395 xfrm_address_t *remote = daddr;
1396 xfrm_address_t *local = saddr;
1397 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1398
1399 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1400 tmpl->mode == XFRM_MODE_BEET) {
1401 remote = &tmpl->id.daddr;
1402 local = &tmpl->saddr;
1403 if (xfrm_addr_any(local, tmpl->encap_family)) {
1404 error = xfrm_get_saddr(net, fl->flowi_oif,
1405 &tmp, remote,
1406 tmpl->encap_family, 0);
1407 if (error)
1408 goto fail;
1409 local = &tmp;
1410 }
1411 }
1412
1413 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
1414 family, policy->if_id);
1415
1416 if (x && x->km.state == XFRM_STATE_VALID) {
1417 xfrm[nx++] = x;
1418 daddr = remote;
1419 saddr = local;
1420 continue;
1421 }
1422 if (x) {
1423 error = (x->km.state == XFRM_STATE_ERROR ?
1424 -EINVAL : -EAGAIN);
1425 xfrm_state_put(x);
1426 } else if (error == -ESRCH) {
1427 error = -EAGAIN;
1428 }
1429
1430 if (!tmpl->optional)
1431 goto fail;
1432 }
1433 return nx;
1434
1435fail:
1436 for (nx--; nx >= 0; nx--)
1437 xfrm_state_put(xfrm[nx]);
1438 return error;
1439}
1440
1441static int
1442xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1443 struct xfrm_state **xfrm, unsigned short family)
1444{
1445 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1446 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1447 int cnx = 0;
1448 int error;
1449 int ret;
1450 int i;
1451
1452 for (i = 0; i < npols; i++) {
1453 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1454 error = -ENOBUFS;
1455 goto fail;
1456 }
1457
1458 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1459 if (ret < 0) {
1460 error = ret;
1461 goto fail;
1462 } else
1463 cnx += ret;
1464 }
1465
1466 /* found states are sorted for outbound processing */
1467 if (npols > 1)
1468 xfrm_state_sort(xfrm, tpp, cnx, family);
1469
1470 return cnx;
1471
1472 fail:
1473 for (cnx--; cnx >= 0; cnx--)
1474 xfrm_state_put(tpp[cnx]);
1475 return error;
1476
1477}
1478
1479static int xfrm_get_tos(const struct flowi *fl, int family)
1480{
1481 const struct xfrm_policy_afinfo *afinfo;
1482 int tos;
1483
1484 afinfo = xfrm_policy_get_afinfo(family);
1485 if (!afinfo)
1486 return 0;
1487
1488 tos = afinfo->get_tos(fl);
1489
1490 rcu_read_unlock();
1491
1492 return tos;
1493}
1494
1495static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1496{
1497 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1498 struct dst_ops *dst_ops;
1499 struct xfrm_dst *xdst;
1500
1501 if (!afinfo)
1502 return ERR_PTR(-EINVAL);
1503
1504 switch (family) {
1505 case AF_INET:
1506 dst_ops = &net->xfrm.xfrm4_dst_ops;
1507 break;
1508#if IS_ENABLED(CONFIG_IPV6)
1509 case AF_INET6:
1510 dst_ops = &net->xfrm.xfrm6_dst_ops;
1511 break;
1512#endif
1513 default:
1514 BUG();
1515 }
1516 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1517
1518 if (likely(xdst)) {
1519 struct dst_entry *dst = &xdst->u.dst;
1520
1521 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1522 } else
1523 xdst = ERR_PTR(-ENOBUFS);
1524
1525 rcu_read_unlock();
1526
1527 return xdst;
1528}
1529
1530static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1531 int nfheader_len)
1532{
1533 const struct xfrm_policy_afinfo *afinfo =
1534 xfrm_policy_get_afinfo(dst->ops->family);
1535 int err;
1536
1537 if (!afinfo)
1538 return -EINVAL;
1539
1540 err = afinfo->init_path(path, dst, nfheader_len);
1541
1542 rcu_read_unlock();
1543
1544 return err;
1545}
1546
1547static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1548 const struct flowi *fl)
1549{
1550 const struct xfrm_policy_afinfo *afinfo =
1551 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1552 int err;
1553
1554 if (!afinfo)
1555 return -EINVAL;
1556
1557 err = afinfo->fill_dst(xdst, dev, fl);
1558
1559 rcu_read_unlock();
1560
1561 return err;
1562}
1563
1564
1565/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1566 * all the metrics... Shortly, bundle a bundle.
1567 */
1568
1569static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1570 struct xfrm_state **xfrm,
1571 struct xfrm_dst **bundle,
1572 int nx,
1573 const struct flowi *fl,
1574 struct dst_entry *dst)
1575{
1576 struct net *net = xp_net(policy);
1577 unsigned long now = jiffies;
1578 struct net_device *dev;
1579 struct xfrm_mode *inner_mode;
1580 struct xfrm_dst *xdst_prev = NULL;
1581 struct xfrm_dst *xdst0 = NULL;
1582 int i = 0;
1583 int err;
1584 int header_len = 0;
1585 int nfheader_len = 0;
1586 int trailer_len = 0;
1587 int tos;
1588 int family = policy->selector.family;
1589 xfrm_address_t saddr, daddr;
1590
1591 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1592
1593 tos = xfrm_get_tos(fl, family);
1594
1595 dst_hold(dst);
1596
1597 for (; i < nx; i++) {
1598 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1599 struct dst_entry *dst1 = &xdst->u.dst;
1600
1601 err = PTR_ERR(xdst);
1602 if (IS_ERR(xdst)) {
1603 dst_release(dst);
1604 goto put_states;
1605 }
1606
1607 bundle[i] = xdst;
1608 if (!xdst_prev)
1609 xdst0 = xdst;
1610 else
1611 /* Ref count is taken during xfrm_alloc_dst()
1612 * No need to do dst_clone() on dst1
1613 */
1614 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
1615
1616 if (xfrm[i]->sel.family == AF_UNSPEC) {
1617 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1618 xfrm_af2proto(family));
1619 if (!inner_mode) {
1620 err = -EAFNOSUPPORT;
1621 dst_release(dst);
1622 goto put_states;
1623 }
1624 } else
1625 inner_mode = xfrm[i]->inner_mode;
1626
1627 xdst->route = dst;
1628 dst_copy_metrics(dst1, dst);
1629
1630 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1631 __u32 mark = 0;
1632
1633 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
1634 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1635
1636 family = xfrm[i]->props.family;
1637 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1638 &saddr, &daddr, family, mark);
1639 err = PTR_ERR(dst);
1640 if (IS_ERR(dst))
1641 goto put_states;
1642 } else
1643 dst_hold(dst);
1644
1645 dst1->xfrm = xfrm[i];
1646 xdst->xfrm_genid = xfrm[i]->genid;
1647
1648 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1649 dst1->flags |= DST_HOST;
1650 dst1->lastuse = now;
1651
1652 dst1->input = dst_discard;
1653 dst1->output = inner_mode->afinfo->output;
1654
1655 xdst_prev = xdst;
1656
1657 header_len += xfrm[i]->props.header_len;
1658 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1659 nfheader_len += xfrm[i]->props.header_len;
1660 trailer_len += xfrm[i]->props.trailer_len;
1661 }
1662
1663 xfrm_dst_set_child(xdst_prev, dst);
1664 xdst0->path = dst;
1665
1666 err = -ENODEV;
1667 dev = dst->dev;
1668 if (!dev)
1669 goto free_dst;
1670
1671 xfrm_init_path(xdst0, dst, nfheader_len);
1672 xfrm_init_pmtu(bundle, nx);
1673
1674 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
1675 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
1676 err = xfrm_fill_dst(xdst_prev, dev, fl);
1677 if (err)
1678 goto free_dst;
1679
1680 xdst_prev->u.dst.header_len = header_len;
1681 xdst_prev->u.dst.trailer_len = trailer_len;
1682 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
1683 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
1684 }
1685
1686 return &xdst0->u.dst;
1687
1688put_states:
1689 for (; i < nx; i++)
1690 xfrm_state_put(xfrm[i]);
1691free_dst:
1692 if (xdst0)
1693 dst_release_immediate(&xdst0->u.dst);
1694
1695 return ERR_PTR(err);
1696}
1697
1698static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1699 struct xfrm_policy **pols,
1700 int *num_pols, int *num_xfrms)
1701{
1702 int i;
1703
1704 if (*num_pols == 0 || !pols[0]) {
1705 *num_pols = 0;
1706 *num_xfrms = 0;
1707 return 0;
1708 }
1709 if (IS_ERR(pols[0]))
1710 return PTR_ERR(pols[0]);
1711
1712 *num_xfrms = pols[0]->xfrm_nr;
1713
1714#ifdef CONFIG_XFRM_SUB_POLICY
1715 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1716 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1717 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1718 XFRM_POLICY_TYPE_MAIN,
1719 fl, family,
1720 XFRM_POLICY_OUT,
1721 pols[0]->if_id);
1722 if (pols[1]) {
1723 if (IS_ERR(pols[1])) {
1724 xfrm_pols_put(pols, *num_pols);
1725 return PTR_ERR(pols[1]);
1726 }
1727 (*num_pols)++;
1728 (*num_xfrms) += pols[1]->xfrm_nr;
1729 }
1730 }
1731#endif
1732 for (i = 0; i < *num_pols; i++) {
1733 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1734 *num_xfrms = -1;
1735 break;
1736 }
1737 }
1738
1739 return 0;
1740
1741}
1742
1743static struct xfrm_dst *
1744xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1745 const struct flowi *fl, u16 family,
1746 struct dst_entry *dst_orig)
1747{
1748 struct net *net = xp_net(pols[0]);
1749 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1750 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
1751 struct xfrm_dst *xdst;
1752 struct dst_entry *dst;
1753 int err;
1754
1755 /* Try to instantiate a bundle */
1756 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1757 if (err <= 0) {
1758 if (err == 0)
1759 return NULL;
1760
1761 if (err != -EAGAIN)
1762 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1763 return ERR_PTR(err);
1764 }
1765
1766 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
1767 if (IS_ERR(dst)) {
1768 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1769 return ERR_CAST(dst);
1770 }
1771
1772 xdst = (struct xfrm_dst *)dst;
1773 xdst->num_xfrms = err;
1774 xdst->num_pols = num_pols;
1775 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1776 xdst->policy_genid = atomic_read(&pols[0]->genid);
1777
1778 return xdst;
1779}
1780
1781static void xfrm_policy_queue_process(struct timer_list *t)
1782{
1783 struct sk_buff *skb;
1784 struct sock *sk;
1785 struct dst_entry *dst;
1786 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
1787 struct net *net = xp_net(pol);
1788 struct xfrm_policy_queue *pq = &pol->polq;
1789 struct flowi fl;
1790 struct sk_buff_head list;
1791
1792 spin_lock(&pq->hold_queue.lock);
1793 skb = skb_peek(&pq->hold_queue);
1794 if (!skb) {
1795 spin_unlock(&pq->hold_queue.lock);
1796 goto out;
1797 }
1798 dst = skb_dst(skb);
1799 sk = skb->sk;
1800 xfrm_decode_session(skb, &fl, dst->ops->family);
1801 spin_unlock(&pq->hold_queue.lock);
1802
1803 dst_hold(xfrm_dst_path(dst));
1804 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1805 if (IS_ERR(dst))
1806 goto purge_queue;
1807
1808 if (dst->flags & DST_XFRM_QUEUE) {
1809 dst_release(dst);
1810
1811 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1812 goto purge_queue;
1813
1814 pq->timeout = pq->timeout << 1;
1815 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1816 xfrm_pol_hold(pol);
1817 goto out;
1818 }
1819
1820 dst_release(dst);
1821
1822 __skb_queue_head_init(&list);
1823
1824 spin_lock(&pq->hold_queue.lock);
1825 pq->timeout = 0;
1826 skb_queue_splice_init(&pq->hold_queue, &list);
1827 spin_unlock(&pq->hold_queue.lock);
1828
1829 while (!skb_queue_empty(&list)) {
1830 skb = __skb_dequeue(&list);
1831
1832 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1833 dst_hold(xfrm_dst_path(skb_dst(skb)));
1834 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
1835 if (IS_ERR(dst)) {
1836 kfree_skb(skb);
1837 continue;
1838 }
1839
1840 nf_reset(skb);
1841 skb_dst_drop(skb);
1842 skb_dst_set(skb, dst);
1843
1844 dst_output(net, skb->sk, skb);
1845 }
1846
1847out:
1848 xfrm_pol_put(pol);
1849 return;
1850
1851purge_queue:
1852 pq->timeout = 0;
1853 skb_queue_purge(&pq->hold_queue);
1854 xfrm_pol_put(pol);
1855}
1856
1857static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1858{
1859 unsigned long sched_next;
1860 struct dst_entry *dst = skb_dst(skb);
1861 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1862 struct xfrm_policy *pol = xdst->pols[0];
1863 struct xfrm_policy_queue *pq = &pol->polq;
1864
1865 if (unlikely(skb_fclone_busy(sk, skb))) {
1866 kfree_skb(skb);
1867 return 0;
1868 }
1869
1870 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1871 kfree_skb(skb);
1872 return -EAGAIN;
1873 }
1874
1875 skb_dst_force(skb);
1876
1877 spin_lock_bh(&pq->hold_queue.lock);
1878
1879 if (!pq->timeout)
1880 pq->timeout = XFRM_QUEUE_TMO_MIN;
1881
1882 sched_next = jiffies + pq->timeout;
1883
1884 if (del_timer(&pq->hold_timer)) {
1885 if (time_before(pq->hold_timer.expires, sched_next))
1886 sched_next = pq->hold_timer.expires;
1887 xfrm_pol_put(pol);
1888 }
1889
1890 __skb_queue_tail(&pq->hold_queue, skb);
1891 if (!mod_timer(&pq->hold_timer, sched_next))
1892 xfrm_pol_hold(pol);
1893
1894 spin_unlock_bh(&pq->hold_queue.lock);
1895
1896 return 0;
1897}
1898
1899static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1900 struct xfrm_flo *xflo,
1901 const struct flowi *fl,
1902 int num_xfrms,
1903 u16 family)
1904{
1905 int err;
1906 struct net_device *dev;
1907 struct dst_entry *dst;
1908 struct dst_entry *dst1;
1909 struct xfrm_dst *xdst;
1910
1911 xdst = xfrm_alloc_dst(net, family);
1912 if (IS_ERR(xdst))
1913 return xdst;
1914
1915 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
1916 net->xfrm.sysctl_larval_drop ||
1917 num_xfrms <= 0)
1918 return xdst;
1919
1920 dst = xflo->dst_orig;
1921 dst1 = &xdst->u.dst;
1922 dst_hold(dst);
1923 xdst->route = dst;
1924
1925 dst_copy_metrics(dst1, dst);
1926
1927 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1928 dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1929 dst1->lastuse = jiffies;
1930
1931 dst1->input = dst_discard;
1932 dst1->output = xdst_queue_output;
1933
1934 dst_hold(dst);
1935 xfrm_dst_set_child(xdst, dst);
1936 xdst->path = dst;
1937
1938 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1939
1940 err = -ENODEV;
1941 dev = dst->dev;
1942 if (!dev)
1943 goto free_dst;
1944
1945 err = xfrm_fill_dst(xdst, dev, fl);
1946 if (err)
1947 goto free_dst;
1948
1949out:
1950 return xdst;
1951
1952free_dst:
1953 dst_release(dst1);
1954 xdst = ERR_PTR(err);
1955 goto out;
1956}
1957
1958static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
1959 const struct flowi *fl,
1960 u16 family, u8 dir,
1961 struct xfrm_flo *xflo, u32 if_id)
1962{
1963 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1964 int num_pols = 0, num_xfrms = 0, err;
1965 struct xfrm_dst *xdst;
1966
1967 /* Resolve policies to use if we couldn't get them from
1968 * previous cache entry */
1969 num_pols = 1;
1970 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
1971 err = xfrm_expand_policies(fl, family, pols,
1972 &num_pols, &num_xfrms);
1973 if (err < 0)
1974 goto inc_error;
1975 if (num_pols == 0)
1976 return NULL;
1977 if (num_xfrms <= 0)
1978 goto make_dummy_bundle;
1979
1980 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
1981 xflo->dst_orig);
1982 if (IS_ERR(xdst)) {
1983 err = PTR_ERR(xdst);
1984 if (err == -EREMOTE) {
1985 xfrm_pols_put(pols, num_pols);
1986 return NULL;
1987 }
1988
1989 if (err != -EAGAIN)
1990 goto error;
1991 goto make_dummy_bundle;
1992 } else if (xdst == NULL) {
1993 num_xfrms = 0;
1994 goto make_dummy_bundle;
1995 }
1996
1997 return xdst;
1998
1999make_dummy_bundle:
2000 /* We found policies, but there's no bundles to instantiate:
2001 * either because the policy blocks, has no transformations or
2002 * we could not build template (no xfrm_states).*/
2003 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2004 if (IS_ERR(xdst)) {
2005 xfrm_pols_put(pols, num_pols);
2006 return ERR_CAST(xdst);
2007 }
2008 xdst->num_pols = num_pols;
2009 xdst->num_xfrms = num_xfrms;
2010 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2011
2012 return xdst;
2013
2014inc_error:
2015 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2016error:
2017 xfrm_pols_put(pols, num_pols);
2018 return ERR_PTR(err);
2019}
2020
2021static struct dst_entry *make_blackhole(struct net *net, u16 family,
2022 struct dst_entry *dst_orig)
2023{
2024 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2025 struct dst_entry *ret;
2026
2027 if (!afinfo) {
2028 dst_release(dst_orig);
2029 return ERR_PTR(-EINVAL);
2030 } else {
2031 ret = afinfo->blackhole_route(net, dst_orig);
2032 }
2033 rcu_read_unlock();
2034
2035 return ret;
2036}
2037
2038/* Finds/creates a bundle for given flow and if_id
2039 *
2040 * At the moment we eat a raw IP route. Mostly to speed up lookups
2041 * on interfaces with disabled IPsec.
2042 *
2043 * xfrm_lookup uses an if_id of 0 by default, and is provided for
2044 * compatibility
2045 */
2046struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
2047 struct dst_entry *dst_orig,
2048 const struct flowi *fl,
2049 const struct sock *sk,
2050 int flags, u32 if_id)
2051{
2052 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2053 struct xfrm_dst *xdst;
2054 struct dst_entry *dst, *route;
2055 u16 family = dst_orig->ops->family;
2056 u8 dir = XFRM_POLICY_OUT;
2057 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2058
2059 dst = NULL;
2060 xdst = NULL;
2061 route = NULL;
2062
2063 sk = sk_const_to_full_sk(sk);
2064 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2065 num_pols = 1;
2066 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
2067 if_id);
2068 err = xfrm_expand_policies(fl, family, pols,
2069 &num_pols, &num_xfrms);
2070 if (err < 0)
2071 goto dropdst;
2072
2073 if (num_pols) {
2074 if (num_xfrms <= 0) {
2075 drop_pols = num_pols;
2076 goto no_transform;
2077 }
2078
2079 xdst = xfrm_resolve_and_create_bundle(
2080 pols, num_pols, fl,
2081 family, dst_orig);
2082
2083 if (IS_ERR(xdst)) {
2084 xfrm_pols_put(pols, num_pols);
2085 err = PTR_ERR(xdst);
2086 if (err == -EREMOTE)
2087 goto nopol;
2088
2089 goto dropdst;
2090 } else if (xdst == NULL) {
2091 num_xfrms = 0;
2092 drop_pols = num_pols;
2093 goto no_transform;
2094 }
2095
2096 route = xdst->route;
2097 }
2098 }
2099
2100 if (xdst == NULL) {
2101 struct xfrm_flo xflo;
2102
2103 xflo.dst_orig = dst_orig;
2104 xflo.flags = flags;
2105
2106 /* To accelerate a bit... */
2107 if ((dst_orig->flags & DST_NOXFRM) ||
2108 !net->xfrm.policy_count[XFRM_POLICY_OUT])
2109 goto nopol;
2110
2111 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
2112 if (xdst == NULL)
2113 goto nopol;
2114 if (IS_ERR(xdst)) {
2115 err = PTR_ERR(xdst);
2116 goto dropdst;
2117 }
2118
2119 num_pols = xdst->num_pols;
2120 num_xfrms = xdst->num_xfrms;
2121 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2122 route = xdst->route;
2123 }
2124
2125 dst = &xdst->u.dst;
2126 if (route == NULL && num_xfrms > 0) {
2127 /* The only case when xfrm_bundle_lookup() returns a
2128 * bundle with null route, is when the template could
2129 * not be resolved. It means policies are there, but
2130 * bundle could not be created, since we don't yet
2131 * have the xfrm_state's. We need to wait for KM to
2132 * negotiate new SA's or bail out with error.*/
2133 if (net->xfrm.sysctl_larval_drop) {
2134 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2135 err = -EREMOTE;
2136 goto error;
2137 }
2138
2139 err = -EAGAIN;
2140
2141 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2142 goto error;
2143 }
2144
2145no_transform:
2146 if (num_pols == 0)
2147 goto nopol;
2148
2149 if ((flags & XFRM_LOOKUP_ICMP) &&
2150 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2151 err = -ENOENT;
2152 goto error;
2153 }
2154
2155 for (i = 0; i < num_pols; i++)
2156 pols[i]->curlft.use_time = ktime_get_real_seconds();
2157
2158 if (num_xfrms < 0) {
2159 /* Prohibit the flow */
2160 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2161 err = -EPERM;
2162 goto error;
2163 } else if (num_xfrms > 0) {
2164 /* Flow transformed */
2165 dst_release(dst_orig);
2166 } else {
2167 /* Flow passes untransformed */
2168 dst_release(dst);
2169 dst = dst_orig;
2170 }
2171ok:
2172 xfrm_pols_put(pols, drop_pols);
2173 if (dst && dst->xfrm &&
2174 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2175 dst->flags |= DST_XFRM_TUNNEL;
2176 return dst;
2177
2178nopol:
2179 if (!(flags & XFRM_LOOKUP_ICMP)) {
2180 dst = dst_orig;
2181 goto ok;
2182 }
2183 err = -ENOENT;
2184error:
2185 dst_release(dst);
2186dropdst:
2187 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2188 dst_release(dst_orig);
2189 xfrm_pols_put(pols, drop_pols);
2190 return ERR_PTR(err);
2191}
2192EXPORT_SYMBOL(xfrm_lookup_with_ifid);
2193
2194/* Main function: finds/creates a bundle for given flow.
2195 *
2196 * At the moment we eat a raw IP route. Mostly to speed up lookups
2197 * on interfaces with disabled IPsec.
2198 */
2199struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2200 const struct flowi *fl, const struct sock *sk,
2201 int flags)
2202{
2203 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
2204}
2205EXPORT_SYMBOL(xfrm_lookup);
2206
2207/* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2208 * Otherwise we may send out blackholed packets.
2209 */
2210struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2211 const struct flowi *fl,
2212 const struct sock *sk, int flags)
2213{
2214 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2215 flags | XFRM_LOOKUP_QUEUE |
2216 XFRM_LOOKUP_KEEP_DST_REF);
2217
2218 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2219 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2220
2221 if (IS_ERR(dst))
2222 dst_release(dst_orig);
2223
2224 return dst;
2225}
2226EXPORT_SYMBOL(xfrm_lookup_route);
2227
2228static inline int
2229xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2230{
2231 struct xfrm_state *x;
2232
2233 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2234 return 0;
2235 x = skb->sp->xvec[idx];
2236 if (!x->type->reject)
2237 return 0;
2238 return x->type->reject(x, skb, fl);
2239}
2240
2241/* When skb is transformed back to its "native" form, we have to
2242 * check policy restrictions. At the moment we make this in maximally
2243 * stupid way. Shame on me. :-) Of course, connected sockets must
2244 * have policy cached at them.
2245 */
2246
2247static inline int
2248xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2249 unsigned short family)
2250{
2251 if (xfrm_state_kern(x))
2252 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2253 return x->id.proto == tmpl->id.proto &&
2254 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2255 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2256 x->props.mode == tmpl->mode &&
2257 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2258 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2259 !(x->props.mode != XFRM_MODE_TRANSPORT &&
2260 xfrm_state_addr_cmp(tmpl, x, family));
2261}
2262
2263/*
2264 * 0 or more than 0 is returned when validation is succeeded (either bypass
2265 * because of optional transport mode, or next index of the mathced secpath
2266 * state with the template.
2267 * -1 is returned when no matching template is found.
2268 * Otherwise "-2 - errored_index" is returned.
2269 */
2270static inline int
2271xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2272 unsigned short family)
2273{
2274 int idx = start;
2275
2276 if (tmpl->optional) {
2277 if (tmpl->mode == XFRM_MODE_TRANSPORT)
2278 return start;
2279 } else
2280 start = -1;
2281 for (; idx < sp->len; idx++) {
2282 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2283 return ++idx;
2284 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2285 if (start == -1)
2286 start = -2-idx;
2287 break;
2288 }
2289 }
2290 return start;
2291}
2292
2293int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2294 unsigned int family, int reverse)
2295{
2296 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2297 int err;
2298
2299 if (unlikely(afinfo == NULL))
2300 return -EAFNOSUPPORT;
2301
2302 afinfo->decode_session(skb, fl, reverse);
2303
2304 err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2305 rcu_read_unlock();
2306 return err;
2307}
2308EXPORT_SYMBOL(__xfrm_decode_session);
2309
2310static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2311{
2312 for (; k < sp->len; k++) {
2313 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2314 *idxp = k;
2315 return 1;
2316 }
2317 }
2318
2319 return 0;
2320}
2321
2322int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2323 unsigned short family)
2324{
2325 struct net *net = dev_net(skb->dev);
2326 struct xfrm_policy *pol;
2327 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2328 int npols = 0;
2329 int xfrm_nr;
2330 int pi;
2331 int reverse;
2332 struct flowi fl;
2333 int xerr_idx = -1;
2334 const struct xfrm_if_cb *ifcb;
2335 struct xfrm_if *xi;
2336 u32 if_id = 0;
2337
2338 rcu_read_lock();
2339 ifcb = xfrm_if_get_cb();
2340
2341 if (ifcb) {
2342 xi = ifcb->decode_session(skb, family);
2343 if (xi) {
2344 if_id = xi->p.if_id;
2345 net = xi->net;
2346 }
2347 }
2348 rcu_read_unlock();
2349
2350 reverse = dir & ~XFRM_POLICY_MASK;
2351 dir &= XFRM_POLICY_MASK;
2352
2353 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2354 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2355 return 0;
2356 }
2357
2358 nf_nat_decode_session(skb, &fl, family);
2359
2360 /* First, check used SA against their selectors. */
2361 if (skb->sp) {
2362 int i;
2363
2364 for (i = skb->sp->len-1; i >= 0; i--) {
2365 struct xfrm_state *x = skb->sp->xvec[i];
2366 if (!xfrm_selector_match(&x->sel, &fl, family)) {
2367 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2368 return 0;
2369 }
2370 }
2371 }
2372
2373 pol = NULL;
2374 sk = sk_to_full_sk(sk);
2375 if (sk && sk->sk_policy[dir]) {
2376 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
2377 if (IS_ERR(pol)) {
2378 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2379 return 0;
2380 }
2381 }
2382
2383 if (!pol)
2384 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
2385
2386 if (IS_ERR(pol)) {
2387 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2388 return 0;
2389 }
2390
2391 if (!pol) {
2392 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2393 xfrm_secpath_reject(xerr_idx, skb, &fl);
2394 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2395 return 0;
2396 }
2397 return 1;
2398 }
2399
2400 pol->curlft.use_time = ktime_get_real_seconds();
2401
2402 pols[0] = pol;
2403 npols++;
2404#ifdef CONFIG_XFRM_SUB_POLICY
2405 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2406 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2407 &fl, family,
2408 XFRM_POLICY_IN, if_id);
2409 if (pols[1]) {
2410 if (IS_ERR(pols[1])) {
2411 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2412 return 0;
2413 }
2414 pols[1]->curlft.use_time = ktime_get_real_seconds();
2415 npols++;
2416 }
2417 }
2418#endif
2419
2420 if (pol->action == XFRM_POLICY_ALLOW) {
2421 struct sec_path *sp;
2422 static struct sec_path dummy;
2423 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2424 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2425 struct xfrm_tmpl **tpp = tp;
2426 int ti = 0;
2427 int i, k;
2428
2429 if ((sp = skb->sp) == NULL)
2430 sp = &dummy;
2431
2432 for (pi = 0; pi < npols; pi++) {
2433 if (pols[pi] != pol &&
2434 pols[pi]->action != XFRM_POLICY_ALLOW) {
2435 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2436 goto reject;
2437 }
2438 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2439 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2440 goto reject_error;
2441 }
2442 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2443 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2444 }
2445 xfrm_nr = ti;
2446 if (npols > 1) {
2447 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2448 tpp = stp;
2449 }
2450
2451 /* For each tunnel xfrm, find the first matching tmpl.
2452 * For each tmpl before that, find corresponding xfrm.
2453 * Order is _important_. Later we will implement
2454 * some barriers, but at the moment barriers
2455 * are implied between each two transformations.
2456 */
2457 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2458 k = xfrm_policy_ok(tpp[i], sp, k, family);
2459 if (k < 0) {
2460 if (k < -1)
2461 /* "-2 - errored_index" returned */
2462 xerr_idx = -(2+k);
2463 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2464 goto reject;
2465 }
2466 }
2467
2468 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2469 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2470 goto reject;
2471 }
2472
2473 xfrm_pols_put(pols, npols);
2474 return 1;
2475 }
2476 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2477
2478reject:
2479 xfrm_secpath_reject(xerr_idx, skb, &fl);
2480reject_error:
2481 xfrm_pols_put(pols, npols);
2482 return 0;
2483}
2484EXPORT_SYMBOL(__xfrm_policy_check);
2485
2486int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2487{
2488 struct net *net = dev_net(skb->dev);
2489 struct flowi fl;
2490 struct dst_entry *dst;
2491 int res = 1;
2492
2493 if (xfrm_decode_session(skb, &fl, family) < 0) {
2494 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2495 return 0;
2496 }
2497
2498 skb_dst_force(skb);
2499 if (!skb_dst(skb)) {
2500 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2501 return 0;
2502 }
2503
2504 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2505 if (IS_ERR(dst)) {
2506 res = 0;
2507 dst = NULL;
2508 }
2509 skb_dst_set(skb, dst);
2510 return res;
2511}
2512EXPORT_SYMBOL(__xfrm_route_forward);
2513
2514/* Optimize later using cookies and generation ids. */
2515
2516static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2517{
2518 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2519 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2520 * get validated by dst_ops->check on every use. We do this
2521 * because when a normal route referenced by an XFRM dst is
2522 * obsoleted we do not go looking around for all parent
2523 * referencing XFRM dsts so that we can invalidate them. It
2524 * is just too much work. Instead we make the checks here on
2525 * every use. For example:
2526 *
2527 * XFRM dst A --> IPv4 dst X
2528 *
2529 * X is the "xdst->route" of A (X is also the "dst->path" of A
2530 * in this example). If X is marked obsolete, "A" will not
2531 * notice. That's what we are validating here via the
2532 * stale_bundle() check.
2533 *
2534 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2535 * be marked on it.
2536 * This will force stale_bundle() to fail on any xdst bundle with
2537 * this dst linked in it.
2538 */
2539 if (dst->obsolete < 0 && !stale_bundle(dst))
2540 return dst;
2541
2542 return NULL;
2543}
2544
2545static int stale_bundle(struct dst_entry *dst)
2546{
2547 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2548}
2549
2550void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2551{
2552 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
2553 dst->dev = dev_net(dev)->loopback_dev;
2554 dev_hold(dst->dev);
2555 dev_put(dev);
2556 }
2557}
2558EXPORT_SYMBOL(xfrm_dst_ifdown);
2559
2560static void xfrm_link_failure(struct sk_buff *skb)
2561{
2562 /* Impossible. Such dst must be popped before reaches point of failure. */
2563}
2564
2565static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2566{
2567 if (dst) {
2568 if (dst->obsolete) {
2569 dst_release(dst);
2570 dst = NULL;
2571 }
2572 }
2573 return dst;
2574}
2575
2576static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
2577{
2578 while (nr--) {
2579 struct xfrm_dst *xdst = bundle[nr];
2580 u32 pmtu, route_mtu_cached;
2581 struct dst_entry *dst;
2582
2583 dst = &xdst->u.dst;
2584 pmtu = dst_mtu(xfrm_dst_child(dst));
2585 xdst->child_mtu_cached = pmtu;
2586
2587 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2588
2589 route_mtu_cached = dst_mtu(xdst->route);
2590 xdst->route_mtu_cached = route_mtu_cached;
2591
2592 if (pmtu > route_mtu_cached)
2593 pmtu = route_mtu_cached;
2594
2595 dst_metric_set(dst, RTAX_MTU, pmtu);
2596 }
2597}
2598
2599/* Check that the bundle accepts the flow and its components are
2600 * still valid.
2601 */
2602
2603static int xfrm_bundle_ok(struct xfrm_dst *first)
2604{
2605 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2606 struct dst_entry *dst = &first->u.dst;
2607 struct xfrm_dst *xdst;
2608 int start_from, nr;
2609 u32 mtu;
2610
2611 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
2612 (dst->dev && !netif_running(dst->dev)))
2613 return 0;
2614
2615 if (dst->flags & DST_XFRM_QUEUE)
2616 return 1;
2617
2618 start_from = nr = 0;
2619 do {
2620 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2621
2622 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2623 return 0;
2624 if (xdst->xfrm_genid != dst->xfrm->genid)
2625 return 0;
2626 if (xdst->num_pols > 0 &&
2627 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2628 return 0;
2629
2630 bundle[nr++] = xdst;
2631
2632 mtu = dst_mtu(xfrm_dst_child(dst));
2633 if (xdst->child_mtu_cached != mtu) {
2634 start_from = nr;
2635 xdst->child_mtu_cached = mtu;
2636 }
2637
2638 if (!dst_check(xdst->route, xdst->route_cookie))
2639 return 0;
2640 mtu = dst_mtu(xdst->route);
2641 if (xdst->route_mtu_cached != mtu) {
2642 start_from = nr;
2643 xdst->route_mtu_cached = mtu;
2644 }
2645
2646 dst = xfrm_dst_child(dst);
2647 } while (dst->xfrm);
2648
2649 if (likely(!start_from))
2650 return 1;
2651
2652 xdst = bundle[start_from - 1];
2653 mtu = xdst->child_mtu_cached;
2654 while (start_from--) {
2655 dst = &xdst->u.dst;
2656
2657 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2658 if (mtu > xdst->route_mtu_cached)
2659 mtu = xdst->route_mtu_cached;
2660 dst_metric_set(dst, RTAX_MTU, mtu);
2661 if (!start_from)
2662 break;
2663
2664 xdst = bundle[start_from - 1];
2665 xdst->child_mtu_cached = mtu;
2666 }
2667
2668 return 1;
2669}
2670
2671static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2672{
2673 return dst_metric_advmss(xfrm_dst_path(dst));
2674}
2675
2676static unsigned int xfrm_mtu(const struct dst_entry *dst)
2677{
2678 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2679
2680 return mtu ? : dst_mtu(xfrm_dst_path(dst));
2681}
2682
2683static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2684 const void *daddr)
2685{
2686 while (dst->xfrm) {
2687 const struct xfrm_state *xfrm = dst->xfrm;
2688
2689 dst = xfrm_dst_child(dst);
2690
2691 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2692 continue;
2693 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2694 daddr = xfrm->coaddr;
2695 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2696 daddr = &xfrm->id.daddr;
2697 }
2698 return daddr;
2699}
2700
2701static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2702 struct sk_buff *skb,
2703 const void *daddr)
2704{
2705 const struct dst_entry *path = xfrm_dst_path(dst);
2706
2707 if (!skb)
2708 daddr = xfrm_get_dst_nexthop(dst, daddr);
2709 return path->ops->neigh_lookup(path, skb, daddr);
2710}
2711
2712static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2713{
2714 const struct dst_entry *path = xfrm_dst_path(dst);
2715
2716 daddr = xfrm_get_dst_nexthop(dst, daddr);
2717 path->ops->confirm_neigh(path, daddr);
2718}
2719
2720int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2721{
2722 int err = 0;
2723
2724 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2725 return -EAFNOSUPPORT;
2726
2727 spin_lock(&xfrm_policy_afinfo_lock);
2728 if (unlikely(xfrm_policy_afinfo[family] != NULL))
2729 err = -EEXIST;
2730 else {
2731 struct dst_ops *dst_ops = afinfo->dst_ops;
2732 if (likely(dst_ops->kmem_cachep == NULL))
2733 dst_ops->kmem_cachep = xfrm_dst_cache;
2734 if (likely(dst_ops->check == NULL))
2735 dst_ops->check = xfrm_dst_check;
2736 if (likely(dst_ops->default_advmss == NULL))
2737 dst_ops->default_advmss = xfrm_default_advmss;
2738 if (likely(dst_ops->mtu == NULL))
2739 dst_ops->mtu = xfrm_mtu;
2740 if (likely(dst_ops->negative_advice == NULL))
2741 dst_ops->negative_advice = xfrm_negative_advice;
2742 if (likely(dst_ops->link_failure == NULL))
2743 dst_ops->link_failure = xfrm_link_failure;
2744 if (likely(dst_ops->neigh_lookup == NULL))
2745 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2746 if (likely(!dst_ops->confirm_neigh))
2747 dst_ops->confirm_neigh = xfrm_confirm_neigh;
2748 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2749 }
2750 spin_unlock(&xfrm_policy_afinfo_lock);
2751
2752 return err;
2753}
2754EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2755
2756void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2757{
2758 struct dst_ops *dst_ops = afinfo->dst_ops;
2759 int i;
2760
2761 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2762 if (xfrm_policy_afinfo[i] != afinfo)
2763 continue;
2764 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2765 break;
2766 }
2767
2768 synchronize_rcu();
2769
2770 dst_ops->kmem_cachep = NULL;
2771 dst_ops->check = NULL;
2772 dst_ops->negative_advice = NULL;
2773 dst_ops->link_failure = NULL;
2774}
2775EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2776
2777void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
2778{
2779 spin_lock(&xfrm_if_cb_lock);
2780 rcu_assign_pointer(xfrm_if_cb, ifcb);
2781 spin_unlock(&xfrm_if_cb_lock);
2782}
2783EXPORT_SYMBOL(xfrm_if_register_cb);
2784
2785void xfrm_if_unregister_cb(void)
2786{
2787 RCU_INIT_POINTER(xfrm_if_cb, NULL);
2788 synchronize_rcu();
2789}
2790EXPORT_SYMBOL(xfrm_if_unregister_cb);
2791
2792#ifdef CONFIG_XFRM_STATISTICS
2793static int __net_init xfrm_statistics_init(struct net *net)
2794{
2795 int rv;
2796 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2797 if (!net->mib.xfrm_statistics)
2798 return -ENOMEM;
2799 rv = xfrm_proc_init(net);
2800 if (rv < 0)
2801 free_percpu(net->mib.xfrm_statistics);
2802 return rv;
2803}
2804
2805static void xfrm_statistics_fini(struct net *net)
2806{
2807 xfrm_proc_fini(net);
2808 free_percpu(net->mib.xfrm_statistics);
2809}
2810#else
2811static int __net_init xfrm_statistics_init(struct net *net)
2812{
2813 return 0;
2814}
2815
2816static void xfrm_statistics_fini(struct net *net)
2817{
2818}
2819#endif
2820
2821static int __net_init xfrm_policy_init(struct net *net)
2822{
2823 unsigned int hmask, sz;
2824 int dir;
2825
2826 if (net_eq(net, &init_net))
2827 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2828 sizeof(struct xfrm_dst),
2829 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2830 NULL);
2831
2832 hmask = 8 - 1;
2833 sz = (hmask+1) * sizeof(struct hlist_head);
2834
2835 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2836 if (!net->xfrm.policy_byidx)
2837 goto out_byidx;
2838 net->xfrm.policy_idx_hmask = hmask;
2839
2840 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2841 struct xfrm_policy_hash *htab;
2842
2843 net->xfrm.policy_count[dir] = 0;
2844 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2845 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2846
2847 htab = &net->xfrm.policy_bydst[dir];
2848 htab->table = xfrm_hash_alloc(sz);
2849 if (!htab->table)
2850 goto out_bydst;
2851 htab->hmask = hmask;
2852 htab->dbits4 = 32;
2853 htab->sbits4 = 32;
2854 htab->dbits6 = 128;
2855 htab->sbits6 = 128;
2856 }
2857 net->xfrm.policy_hthresh.lbits4 = 32;
2858 net->xfrm.policy_hthresh.rbits4 = 32;
2859 net->xfrm.policy_hthresh.lbits6 = 128;
2860 net->xfrm.policy_hthresh.rbits6 = 128;
2861
2862 seqlock_init(&net->xfrm.policy_hthresh.lock);
2863
2864 INIT_LIST_HEAD(&net->xfrm.policy_all);
2865 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2866 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2867 return 0;
2868
2869out_bydst:
2870 for (dir--; dir >= 0; dir--) {
2871 struct xfrm_policy_hash *htab;
2872
2873 htab = &net->xfrm.policy_bydst[dir];
2874 xfrm_hash_free(htab->table, sz);
2875 }
2876 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2877out_byidx:
2878 return -ENOMEM;
2879}
2880
2881static void xfrm_policy_fini(struct net *net)
2882{
2883 unsigned int sz;
2884 int dir;
2885
2886 flush_work(&net->xfrm.policy_hash_work);
2887#ifdef CONFIG_XFRM_SUB_POLICY
2888 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2889#endif
2890 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2891
2892 WARN_ON(!list_empty(&net->xfrm.policy_all));
2893
2894 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2895 struct xfrm_policy_hash *htab;
2896
2897 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2898
2899 htab = &net->xfrm.policy_bydst[dir];
2900 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2901 WARN_ON(!hlist_empty(htab->table));
2902 xfrm_hash_free(htab->table, sz);
2903 }
2904
2905 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2906 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2907 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2908}
2909
2910static int __net_init xfrm_net_init(struct net *net)
2911{
2912 int rv;
2913
2914 /* Initialize the per-net locks here */
2915 spin_lock_init(&net->xfrm.xfrm_state_lock);
2916 spin_lock_init(&net->xfrm.xfrm_policy_lock);
2917 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2918
2919 rv = xfrm_statistics_init(net);
2920 if (rv < 0)
2921 goto out_statistics;
2922 rv = xfrm_state_init(net);
2923 if (rv < 0)
2924 goto out_state;
2925 rv = xfrm_policy_init(net);
2926 if (rv < 0)
2927 goto out_policy;
2928 rv = xfrm_sysctl_init(net);
2929 if (rv < 0)
2930 goto out_sysctl;
2931
2932 return 0;
2933
2934out_sysctl:
2935 xfrm_policy_fini(net);
2936out_policy:
2937 xfrm_state_fini(net);
2938out_state:
2939 xfrm_statistics_fini(net);
2940out_statistics:
2941 return rv;
2942}
2943
2944static void __net_exit xfrm_net_exit(struct net *net)
2945{
2946 xfrm_sysctl_fini(net);
2947 xfrm_policy_fini(net);
2948 xfrm_state_fini(net);
2949 xfrm_statistics_fini(net);
2950}
2951
2952static struct pernet_operations __net_initdata xfrm_net_ops = {
2953 .init = xfrm_net_init,
2954 .exit = xfrm_net_exit,
2955};
2956
2957void __init xfrm_init(void)
2958{
2959 register_pernet_subsys(&xfrm_net_ops);
2960 xfrm_dev_init();
2961 seqcount_init(&xfrm_policy_hash_generation);
2962 xfrm_input_init();
2963
2964 RCU_INIT_POINTER(xfrm_if_cb, NULL);
2965 synchronize_rcu();
2966}
2967
2968#ifdef CONFIG_AUDITSYSCALL
2969static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2970 struct audit_buffer *audit_buf)
2971{
2972 struct xfrm_sec_ctx *ctx = xp->security;
2973 struct xfrm_selector *sel = &xp->selector;
2974
2975 if (ctx)
2976 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2977 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2978
2979 switch (sel->family) {
2980 case AF_INET:
2981 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2982 if (sel->prefixlen_s != 32)
2983 audit_log_format(audit_buf, " src_prefixlen=%d",
2984 sel->prefixlen_s);
2985 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2986 if (sel->prefixlen_d != 32)
2987 audit_log_format(audit_buf, " dst_prefixlen=%d",
2988 sel->prefixlen_d);
2989 break;
2990 case AF_INET6:
2991 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2992 if (sel->prefixlen_s != 128)
2993 audit_log_format(audit_buf, " src_prefixlen=%d",
2994 sel->prefixlen_s);
2995 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2996 if (sel->prefixlen_d != 128)
2997 audit_log_format(audit_buf, " dst_prefixlen=%d",
2998 sel->prefixlen_d);
2999 break;
3000 }
3001}
3002
3003void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3004{
3005 struct audit_buffer *audit_buf;
3006
3007 audit_buf = xfrm_audit_start("SPD-add");
3008 if (audit_buf == NULL)
3009 return;
3010 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3011 audit_log_format(audit_buf, " res=%u", result);
3012 xfrm_audit_common_policyinfo(xp, audit_buf);
3013 audit_log_end(audit_buf);
3014}
3015EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3016
3017void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3018 bool task_valid)
3019{
3020 struct audit_buffer *audit_buf;
3021
3022 audit_buf = xfrm_audit_start("SPD-delete");
3023 if (audit_buf == NULL)
3024 return;
3025 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3026 audit_log_format(audit_buf, " res=%u", result);
3027 xfrm_audit_common_policyinfo(xp, audit_buf);
3028 audit_log_end(audit_buf);
3029}
3030EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3031#endif
3032
3033#ifdef CONFIG_XFRM_MIGRATE
3034static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3035 const struct xfrm_selector *sel_tgt)
3036{
3037 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3038 if (sel_tgt->family == sel_cmp->family &&
3039 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3040 sel_cmp->family) &&
3041 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3042 sel_cmp->family) &&
3043 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3044 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3045 return true;
3046 }
3047 } else {
3048 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3049 return true;
3050 }
3051 }
3052 return false;
3053}
3054
3055static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3056 u8 dir, u8 type, struct net *net)
3057{
3058 struct xfrm_policy *pol, *ret = NULL;
3059 struct hlist_head *chain;
3060 u32 priority = ~0U;
3061
3062 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3063 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3064 hlist_for_each_entry(pol, chain, bydst) {
3065 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3066 pol->type == type) {
3067 ret = pol;
3068 priority = ret->priority;
3069 break;
3070 }
3071 }
3072 chain = &net->xfrm.policy_inexact[dir];
3073 hlist_for_each_entry(pol, chain, bydst) {
3074 if ((pol->priority >= priority) && ret)
3075 break;
3076
3077 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3078 pol->type == type) {
3079 ret = pol;
3080 break;
3081 }
3082 }
3083
3084 xfrm_pol_hold(ret);
3085
3086 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3087
3088 return ret;
3089}
3090
3091static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3092{
3093 int match = 0;
3094
3095 if (t->mode == m->mode && t->id.proto == m->proto &&
3096 (m->reqid == 0 || t->reqid == m->reqid)) {
3097 switch (t->mode) {
3098 case XFRM_MODE_TUNNEL:
3099 case XFRM_MODE_BEET:
3100 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3101 m->old_family) &&
3102 xfrm_addr_equal(&t->saddr, &m->old_saddr,
3103 m->old_family)) {
3104 match = 1;
3105 }
3106 break;
3107 case XFRM_MODE_TRANSPORT:
3108 /* in case of transport mode, template does not store
3109 any IP addresses, hence we just compare mode and
3110 protocol */
3111 match = 1;
3112 break;
3113 default:
3114 break;
3115 }
3116 }
3117 return match;
3118}
3119
3120/* update endpoint address(es) of template(s) */
3121static int xfrm_policy_migrate(struct xfrm_policy *pol,
3122 struct xfrm_migrate *m, int num_migrate)
3123{
3124 struct xfrm_migrate *mp;
3125 int i, j, n = 0;
3126
3127 write_lock_bh(&pol->lock);
3128 if (unlikely(pol->walk.dead)) {
3129 /* target policy has been deleted */
3130 write_unlock_bh(&pol->lock);
3131 return -ENOENT;
3132 }
3133
3134 for (i = 0; i < pol->xfrm_nr; i++) {
3135 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3136 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3137 continue;
3138 n++;
3139 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3140 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3141 continue;
3142 /* update endpoints */
3143 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3144 sizeof(pol->xfrm_vec[i].id.daddr));
3145 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3146 sizeof(pol->xfrm_vec[i].saddr));
3147 pol->xfrm_vec[i].encap_family = mp->new_family;
3148 /* flush bundles */
3149 atomic_inc(&pol->genid);
3150 }
3151 }
3152
3153 write_unlock_bh(&pol->lock);
3154
3155 if (!n)
3156 return -ENODATA;
3157
3158 return 0;
3159}
3160
3161static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3162{
3163 int i, j;
3164
3165 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3166 return -EINVAL;
3167
3168 for (i = 0; i < num_migrate; i++) {
3169 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3170 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3171 return -EINVAL;
3172
3173 /* check if there is any duplicated entry */
3174 for (j = i + 1; j < num_migrate; j++) {
3175 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3176 sizeof(m[i].old_daddr)) &&
3177 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3178 sizeof(m[i].old_saddr)) &&
3179 m[i].proto == m[j].proto &&
3180 m[i].mode == m[j].mode &&
3181 m[i].reqid == m[j].reqid &&
3182 m[i].old_family == m[j].old_family)
3183 return -EINVAL;
3184 }
3185 }
3186
3187 return 0;
3188}
3189
3190int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3191 struct xfrm_migrate *m, int num_migrate,
3192 struct xfrm_kmaddress *k, struct net *net,
3193 struct xfrm_encap_tmpl *encap)
3194{
3195 int i, err, nx_cur = 0, nx_new = 0;
3196 struct xfrm_policy *pol = NULL;
3197 struct xfrm_state *x, *xc;
3198 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3199 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3200 struct xfrm_migrate *mp;
3201
3202 /* Stage 0 - sanity checks */
3203 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3204 goto out;
3205
3206 if (dir >= XFRM_POLICY_MAX) {
3207 err = -EINVAL;
3208 goto out;
3209 }
3210
3211 /* Stage 1 - find policy */
3212 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3213 err = -ENOENT;
3214 goto out;
3215 }
3216
3217 /* Stage 2 - find and update state(s) */
3218 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3219 if ((x = xfrm_migrate_state_find(mp, net))) {
3220 x_cur[nx_cur] = x;
3221 nx_cur++;
3222 xc = xfrm_state_migrate(x, mp, encap);
3223 if (xc) {
3224 x_new[nx_new] = xc;
3225 nx_new++;
3226 } else {
3227 err = -ENODATA;
3228 goto restore_state;
3229 }
3230 }
3231 }
3232
3233 /* Stage 3 - update policy */
3234 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3235 goto restore_state;
3236
3237 /* Stage 4 - delete old state(s) */
3238 if (nx_cur) {
3239 xfrm_states_put(x_cur, nx_cur);
3240 xfrm_states_delete(x_cur, nx_cur);
3241 }
3242
3243 /* Stage 5 - announce */
3244 km_migrate(sel, dir, type, m, num_migrate, k, encap);
3245
3246 xfrm_pol_put(pol);
3247
3248 return 0;
3249out:
3250 return err;
3251
3252restore_state:
3253 if (pol)
3254 xfrm_pol_put(pol);
3255 if (nx_cur)
3256 xfrm_states_put(x_cur, nx_cur);
3257 if (nx_new)
3258 xfrm_states_delete(x_new, nx_new);
3259
3260 return err;
3261}
3262EXPORT_SYMBOL(xfrm_migrate);
3263#endif