blob: 812845e7269e0b6a45c85875f6f560bcc0295a69 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13#include <linux/crypto.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/slab.h>
18#include <linux/socket.h>
19#include <linux/string.h>
20#include <linux/net.h>
21#include <linux/skbuff.h>
22#include <linux/pfkeyv2.h>
23#include <linux/ipsec.h>
24#include <linux/init.h>
25#include <linux/security.h>
26#include <net/sock.h>
27#include <net/xfrm.h>
28#include <net/netlink.h>
29#include <net/ah.h>
30#include <asm/uaccess.h>
31#if IS_ENABLED(CONFIG_IPV6)
32#include <linux/in6.h>
33#endif
34
35static inline int aead_len(struct xfrm_algo_aead *alg)
36{
37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
38}
39
40static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
41{
42 struct nlattr *rt = attrs[type];
43 struct xfrm_algo *algp;
44
45 if (!rt)
46 return 0;
47
48 algp = nla_data(rt);
49 if (nla_len(rt) < xfrm_alg_len(algp))
50 return -EINVAL;
51
52 switch (type) {
53 case XFRMA_ALG_AUTH:
54 case XFRMA_ALG_CRYPT:
55 case XFRMA_ALG_COMP:
56 break;
57
58 default:
59 return -EINVAL;
60 }
61
62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
63 return 0;
64}
65
66static int verify_auth_trunc(struct nlattr **attrs)
67{
68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
69 struct xfrm_algo_auth *algp;
70
71 if (!rt)
72 return 0;
73
74 algp = nla_data(rt);
75 if (nla_len(rt) < xfrm_alg_auth_len(algp))
76 return -EINVAL;
77
78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
79 return 0;
80}
81
82static int verify_aead(struct nlattr **attrs)
83{
84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
85 struct xfrm_algo_aead *algp;
86
87 if (!rt)
88 return 0;
89
90 algp = nla_data(rt);
91 if (nla_len(rt) < aead_len(algp))
92 return -EINVAL;
93
94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
95 return 0;
96}
97
98static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
99 xfrm_address_t **addrp)
100{
101 struct nlattr *rt = attrs[type];
102
103 if (rt && addrp)
104 *addrp = nla_data(rt);
105}
106
107static inline int verify_sec_ctx_len(struct nlattr **attrs)
108{
109 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
110 struct xfrm_user_sec_ctx *uctx;
111
112 if (!rt)
113 return 0;
114
115 uctx = nla_data(rt);
116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
117 return -EINVAL;
118
119 return 0;
120}
121
122static inline int verify_replay(struct xfrm_usersa_info *p,
123 struct nlattr **attrs)
124{
125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
126 struct xfrm_replay_state_esn *rs;
127
128 if (p->flags & XFRM_STATE_ESN) {
129 if (!rt)
130 return -EINVAL;
131
132 rs = nla_data(rt);
133
134 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
135 return -EINVAL;
136
137 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
138 nla_len(rt) != sizeof(*rs))
139 return -EINVAL;
140 }
141
142 if (!rt)
143 return 0;
144
145 if (p->id.proto != IPPROTO_ESP)
146 return -EINVAL;
147
148 if (p->replay_window != 0)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int verify_newsa_info(struct xfrm_usersa_info *p,
155 struct nlattr **attrs)
156{
157 int err;
158
159 err = -EINVAL;
160 switch (p->family) {
161 case AF_INET:
162 break;
163
164 case AF_INET6:
165#if IS_ENABLED(CONFIG_IPV6)
166 break;
167#else
168 err = -EAFNOSUPPORT;
169 goto out;
170#endif
171
172 default:
173 goto out;
174 }
175
176 err = -EINVAL;
177 switch (p->id.proto) {
178 case IPPROTO_AH:
179 if ((!attrs[XFRMA_ALG_AUTH] &&
180 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
181 attrs[XFRMA_ALG_AEAD] ||
182 attrs[XFRMA_ALG_CRYPT] ||
183 attrs[XFRMA_ALG_COMP] ||
184 attrs[XFRMA_TFCPAD])
185 goto out;
186 break;
187
188 case IPPROTO_ESP:
189 if (attrs[XFRMA_ALG_COMP])
190 goto out;
191 if (!attrs[XFRMA_ALG_AUTH] &&
192 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
193 !attrs[XFRMA_ALG_CRYPT] &&
194 !attrs[XFRMA_ALG_AEAD])
195 goto out;
196 if ((attrs[XFRMA_ALG_AUTH] ||
197 attrs[XFRMA_ALG_AUTH_TRUNC] ||
198 attrs[XFRMA_ALG_CRYPT]) &&
199 attrs[XFRMA_ALG_AEAD])
200 goto out;
201 if (attrs[XFRMA_TFCPAD] &&
202 p->mode != XFRM_MODE_TUNNEL)
203 goto out;
204 break;
205
206 case IPPROTO_COMP:
207 if (!attrs[XFRMA_ALG_COMP] ||
208 attrs[XFRMA_ALG_AEAD] ||
209 attrs[XFRMA_ALG_AUTH] ||
210 attrs[XFRMA_ALG_AUTH_TRUNC] ||
211 attrs[XFRMA_ALG_CRYPT] ||
212 attrs[XFRMA_TFCPAD])
213 goto out;
214 break;
215
216#if IS_ENABLED(CONFIG_IPV6)
217 case IPPROTO_DSTOPTS:
218 case IPPROTO_ROUTING:
219 if (attrs[XFRMA_ALG_COMP] ||
220 attrs[XFRMA_ALG_AUTH] ||
221 attrs[XFRMA_ALG_AUTH_TRUNC] ||
222 attrs[XFRMA_ALG_AEAD] ||
223 attrs[XFRMA_ALG_CRYPT] ||
224 attrs[XFRMA_ENCAP] ||
225 attrs[XFRMA_SEC_CTX] ||
226 attrs[XFRMA_TFCPAD] ||
227 !attrs[XFRMA_COADDR])
228 goto out;
229 break;
230#endif
231
232 default:
233 goto out;
234 }
235
236 if ((err = verify_aead(attrs)))
237 goto out;
238 if ((err = verify_auth_trunc(attrs)))
239 goto out;
240 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
241 goto out;
242 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
243 goto out;
244 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
245 goto out;
246 if ((err = verify_sec_ctx_len(attrs)))
247 goto out;
248 if ((err = verify_replay(p, attrs)))
249 goto out;
250
251 err = -EINVAL;
252 switch (p->mode) {
253 case XFRM_MODE_TRANSPORT:
254 case XFRM_MODE_TUNNEL:
255 case XFRM_MODE_ROUTEOPTIMIZATION:
256 case XFRM_MODE_BEET:
257 break;
258
259 default:
260 goto out;
261 }
262
263 err = 0;
264
265out:
266 return err;
267}
268
269static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
270 struct xfrm_algo_desc *(*get_byname)(const char *, int),
271 struct nlattr *rta)
272{
273 struct xfrm_algo *p, *ualg;
274 struct xfrm_algo_desc *algo;
275
276 if (!rta)
277 return 0;
278
279 ualg = nla_data(rta);
280
281 algo = get_byname(ualg->alg_name, 1);
282 if (!algo)
283 return -ENOSYS;
284 *props = algo->desc.sadb_alg_id;
285
286 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
287 if (!p)
288 return -ENOMEM;
289
290 strcpy(p->alg_name, algo->name);
291 *algpp = p;
292 return 0;
293}
294
295static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
296 struct nlattr *rta)
297{
298 struct xfrm_algo *ualg;
299 struct xfrm_algo_auth *p;
300 struct xfrm_algo_desc *algo;
301
302 if (!rta)
303 return 0;
304
305 ualg = nla_data(rta);
306
307 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
308 if (!algo)
309 return -ENOSYS;
310 *props = algo->desc.sadb_alg_id;
311
312 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
313 if (!p)
314 return -ENOMEM;
315
316 strcpy(p->alg_name, algo->name);
317 p->alg_key_len = ualg->alg_key_len;
318 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
319 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
320
321 *algpp = p;
322 return 0;
323}
324
325static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
326 struct nlattr *rta)
327{
328 struct xfrm_algo_auth *p, *ualg;
329 struct xfrm_algo_desc *algo;
330
331 if (!rta)
332 return 0;
333
334 ualg = nla_data(rta);
335
336 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
337 if (!algo)
338 return -ENOSYS;
339 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
340 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
341 return -EINVAL;
342 *props = algo->desc.sadb_alg_id;
343
344 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
345 if (!p)
346 return -ENOMEM;
347
348 strcpy(p->alg_name, algo->name);
349 if (!p->alg_trunc_len)
350 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
351
352 *algpp = p;
353 return 0;
354}
355
356static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
357 struct nlattr *rta)
358{
359 struct xfrm_algo_aead *p, *ualg;
360 struct xfrm_algo_desc *algo;
361
362 if (!rta)
363 return 0;
364
365 ualg = nla_data(rta);
366
367 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
368 if (!algo)
369 return -ENOSYS;
370 *props = algo->desc.sadb_alg_id;
371
372 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
373 if (!p)
374 return -ENOMEM;
375
376 strcpy(p->alg_name, algo->name);
377 *algpp = p;
378 return 0;
379}
380
381static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
382 struct nlattr *rp)
383{
384 struct xfrm_replay_state_esn *up;
385 int ulen;
386
387 if (!replay_esn || !rp)
388 return 0;
389
390 up = nla_data(rp);
391 ulen = xfrm_replay_state_esn_len(up);
392
393 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
394 return -EINVAL;
395
396 return 0;
397}
398
399static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
400 struct xfrm_replay_state_esn **preplay_esn,
401 struct nlattr *rta)
402{
403 struct xfrm_replay_state_esn *p, *pp, *up;
404 int klen, ulen;
405
406 if (!rta)
407 return 0;
408
409 up = nla_data(rta);
410 klen = xfrm_replay_state_esn_len(up);
411 ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
412
413 p = kzalloc(klen, GFP_KERNEL);
414 if (!p)
415 return -ENOMEM;
416
417 pp = kzalloc(klen, GFP_KERNEL);
418 if (!pp) {
419 kfree(p);
420 return -ENOMEM;
421 }
422
423 memcpy(p, up, ulen);
424 memcpy(pp, up, ulen);
425
426 *replay_esn = p;
427 *preplay_esn = pp;
428
429 return 0;
430}
431
432static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
433{
434 int len = 0;
435
436 if (xfrm_ctx) {
437 len += sizeof(struct xfrm_user_sec_ctx);
438 len += xfrm_ctx->ctx_len;
439 }
440 return len;
441}
442
443static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
444{
445 memcpy(&x->id, &p->id, sizeof(x->id));
446 memcpy(&x->sel, &p->sel, sizeof(x->sel));
447 memcpy(&x->lft, &p->lft, sizeof(x->lft));
448 x->props.mode = p->mode;
449 x->props.replay_window = p->replay_window;
450 x->props.reqid = p->reqid;
451 x->props.family = p->family;
452 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
453 x->props.flags = p->flags;
454
455 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
456 x->sel.family = p->family;
457}
458
459/*
460 * someday when pfkey also has support, we could have the code
461 * somehow made shareable and move it to xfrm_state.c - JHS
462 *
463*/
464static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
465 int update_esn)
466{
467 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
468 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
469 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
470 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
471 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
472
473 if (re) {
474 struct xfrm_replay_state_esn *replay_esn;
475 replay_esn = nla_data(re);
476 memcpy(x->replay_esn, replay_esn,
477 xfrm_replay_state_esn_len(replay_esn));
478 memcpy(x->preplay_esn, replay_esn,
479 xfrm_replay_state_esn_len(replay_esn));
480 }
481
482 if (rp) {
483 struct xfrm_replay_state *replay;
484 replay = nla_data(rp);
485 memcpy(&x->replay, replay, sizeof(*replay));
486 memcpy(&x->preplay, replay, sizeof(*replay));
487 }
488
489 if (lt) {
490 struct xfrm_lifetime_cur *ltime;
491 ltime = nla_data(lt);
492 x->curlft.bytes = ltime->bytes;
493 x->curlft.packets = ltime->packets;
494 x->curlft.add_time = ltime->add_time;
495 x->curlft.use_time = ltime->use_time;
496 }
497
498 if (et)
499 x->replay_maxage = nla_get_u32(et);
500
501 if (rt)
502 x->replay_maxdiff = nla_get_u32(rt);
503}
504
505static struct xfrm_state *xfrm_state_construct(struct net *net,
506 struct xfrm_usersa_info *p,
507 struct nlattr **attrs,
508 int *errp)
509{
510 struct xfrm_state *x = xfrm_state_alloc(net);
511 int err = -ENOMEM;
512
513 if (!x)
514 goto error_no_put;
515
516 copy_from_user_state(x, p);
517
518 if ((err = attach_aead(&x->aead, &x->props.ealgo,
519 attrs[XFRMA_ALG_AEAD])))
520 goto error;
521 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
522 attrs[XFRMA_ALG_AUTH_TRUNC])))
523 goto error;
524 if (!x->props.aalgo) {
525 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
526 attrs[XFRMA_ALG_AUTH])))
527 goto error;
528 }
529 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
530 xfrm_ealg_get_byname,
531 attrs[XFRMA_ALG_CRYPT])))
532 goto error;
533 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
534 xfrm_calg_get_byname,
535 attrs[XFRMA_ALG_COMP])))
536 goto error;
537
538 if (attrs[XFRMA_ENCAP]) {
539 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
540 sizeof(*x->encap), GFP_KERNEL);
541 if (x->encap == NULL)
542 goto error;
543 }
544
545 if (attrs[XFRMA_TFCPAD])
546 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
547
548 if (attrs[XFRMA_COADDR]) {
549 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
550 sizeof(*x->coaddr), GFP_KERNEL);
551 if (x->coaddr == NULL)
552 goto error;
553 }
554
555 xfrm_mark_get(attrs, &x->mark);
556
557 err = __xfrm_init_state(x, false);
558 if (err)
559 goto error;
560
561 if (attrs[XFRMA_SEC_CTX] &&
562 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
563 goto error;
564
565 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
566 attrs[XFRMA_REPLAY_ESN_VAL])))
567 goto error;
568
569 x->km.seq = p->seq;
570 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
571 /* sysctl_xfrm_aevent_etime is in 100ms units */
572 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
573
574 if ((err = xfrm_init_replay(x)))
575 goto error;
576
577 /* override default values from above */
578 xfrm_update_ae_params(x, attrs, 0);
579
580 return x;
581
582error:
583 x->km.state = XFRM_STATE_DEAD;
584 xfrm_state_put(x);
585error_no_put:
586 *errp = err;
587 return NULL;
588}
589
590static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
591 struct nlattr **attrs)
592{
593 struct net *net = sock_net(skb->sk);
594 struct xfrm_usersa_info *p = nlmsg_data(nlh);
595 struct xfrm_state *x;
596 int err;
597 struct km_event c;
598 uid_t loginuid = audit_get_loginuid(current);
599 u32 sessionid = audit_get_sessionid(current);
600 u32 sid;
601
602 err = verify_newsa_info(p, attrs);
603 if (err)
604 return err;
605
606 x = xfrm_state_construct(net, p, attrs, &err);
607 if (!x)
608 return err;
609
610 xfrm_state_hold(x);
611 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
612 err = xfrm_state_add(x);
613 else
614 err = xfrm_state_update(x);
615
616 security_task_getsecid(current, &sid);
617 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
618
619 if (err < 0) {
620 x->km.state = XFRM_STATE_DEAD;
621 __xfrm_state_put(x);
622 goto out;
623 }
624
625 c.seq = nlh->nlmsg_seq;
626 c.pid = nlh->nlmsg_pid;
627 c.event = nlh->nlmsg_type;
628
629 km_state_notify(x, &c);
630out:
631 xfrm_state_put(x);
632 return err;
633}
634
635static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
636 struct xfrm_usersa_id *p,
637 struct nlattr **attrs,
638 int *errp)
639{
640 struct xfrm_state *x = NULL;
641 struct xfrm_mark m;
642 int err;
643 u32 mark = xfrm_mark_get(attrs, &m);
644
645 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
646 err = -ESRCH;
647 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
648 } else {
649 xfrm_address_t *saddr = NULL;
650
651 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
652 if (!saddr) {
653 err = -EINVAL;
654 goto out;
655 }
656
657 err = -ESRCH;
658 x = xfrm_state_lookup_byaddr(net, mark,
659 &p->daddr, saddr,
660 p->proto, p->family);
661 }
662
663 out:
664 if (!x && errp)
665 *errp = err;
666 return x;
667}
668
669static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
670 struct nlattr **attrs)
671{
672 struct net *net = sock_net(skb->sk);
673 struct xfrm_state *x;
674 int err = -ESRCH;
675 struct km_event c;
676 struct xfrm_usersa_id *p = nlmsg_data(nlh);
677 uid_t loginuid = audit_get_loginuid(current);
678 u32 sessionid = audit_get_sessionid(current);
679 u32 sid;
680
681 x = xfrm_user_state_lookup(net, p, attrs, &err);
682 if (x == NULL)
683 return err;
684
685 if ((err = security_xfrm_state_delete(x)) != 0)
686 goto out;
687
688 if (xfrm_state_kern(x)) {
689 err = -EPERM;
690 goto out;
691 }
692
693 err = xfrm_state_delete(x);
694
695 if (err < 0)
696 goto out;
697
698 c.seq = nlh->nlmsg_seq;
699 c.pid = nlh->nlmsg_pid;
700 c.event = nlh->nlmsg_type;
701 km_state_notify(x, &c);
702
703out:
704 security_task_getsecid(current, &sid);
705 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
706 xfrm_state_put(x);
707 return err;
708}
709
710static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
711{
712 memset(p, 0, sizeof(*p));
713 memcpy(&p->id, &x->id, sizeof(p->id));
714 memcpy(&p->sel, &x->sel, sizeof(p->sel));
715 memcpy(&p->lft, &x->lft, sizeof(p->lft));
716 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
717 memcpy(&p->stats, &x->stats, sizeof(p->stats));
718 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
719 p->mode = x->props.mode;
720 p->replay_window = x->props.replay_window;
721 p->reqid = x->props.reqid;
722 p->family = x->props.family;
723 p->flags = x->props.flags;
724 p->seq = x->km.seq;
725}
726
727struct xfrm_dump_info {
728 struct sk_buff *in_skb;
729 struct sk_buff *out_skb;
730 u32 nlmsg_seq;
731 u16 nlmsg_flags;
732};
733
734static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
735{
736 struct xfrm_user_sec_ctx *uctx;
737 struct nlattr *attr;
738 int ctx_size = sizeof(*uctx) + s->ctx_len;
739
740 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
741 if (attr == NULL)
742 return -EMSGSIZE;
743
744 uctx = nla_data(attr);
745 uctx->exttype = XFRMA_SEC_CTX;
746 uctx->len = ctx_size;
747 uctx->ctx_doi = s->ctx_doi;
748 uctx->ctx_alg = s->ctx_alg;
749 uctx->ctx_len = s->ctx_len;
750 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
751
752 return 0;
753}
754
755static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
756{
757 struct xfrm_algo *algo;
758 struct nlattr *nla;
759
760 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
761 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
762 if (!nla)
763 return -EMSGSIZE;
764
765 algo = nla_data(nla);
766 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
767 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
768 algo->alg_key_len = auth->alg_key_len;
769
770 return 0;
771}
772
773/* Don't change this without updating xfrm_sa_len! */
774static int copy_to_user_state_extra(struct xfrm_state *x,
775 struct xfrm_usersa_info *p,
776 struct sk_buff *skb)
777{
778 copy_to_user_state(x, p);
779
780 if (x->coaddr)
781 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
782
783 if (x->lastused)
784 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
785
786 if (x->aead)
787 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
788 if (x->aalg) {
789 if (copy_to_user_auth(x->aalg, skb))
790 goto nla_put_failure;
791
792 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
793 xfrm_alg_auth_len(x->aalg), x->aalg);
794 }
795 if (x->ealg)
796 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
797 if (x->calg)
798 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
799
800 if (x->encap)
801 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
802
803 if (x->tfcpad)
804 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
805
806 if (xfrm_mark_put(skb, &x->mark))
807 goto nla_put_failure;
808
809 if (x->replay_esn)
810 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
811 xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
812
813 if (x->security && copy_sec_ctx(x->security, skb) < 0)
814 goto nla_put_failure;
815
816 return 0;
817
818nla_put_failure:
819 return -EMSGSIZE;
820}
821
822static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
823{
824 struct xfrm_dump_info *sp = ptr;
825 struct sk_buff *in_skb = sp->in_skb;
826 struct sk_buff *skb = sp->out_skb;
827 struct xfrm_usersa_info *p;
828 struct nlmsghdr *nlh;
829 int err;
830
831 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
832 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
833 if (nlh == NULL)
834 return -EMSGSIZE;
835
836 p = nlmsg_data(nlh);
837
838 err = copy_to_user_state_extra(x, p, skb);
839 if (err)
840 goto nla_put_failure;
841
842 nlmsg_end(skb, nlh);
843 return 0;
844
845nla_put_failure:
846 nlmsg_cancel(skb, nlh);
847 return err;
848}
849
850static int xfrm_dump_sa_done(struct netlink_callback *cb)
851{
852 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
853 xfrm_state_walk_done(walk);
854 return 0;
855}
856
857static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
858{
859 struct net *net = sock_net(skb->sk);
860 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
861 struct xfrm_dump_info info;
862
863 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
864 sizeof(cb->args) - sizeof(cb->args[0]));
865
866 info.in_skb = cb->skb;
867 info.out_skb = skb;
868 info.nlmsg_seq = cb->nlh->nlmsg_seq;
869 info.nlmsg_flags = NLM_F_MULTI;
870
871 if (!cb->args[0]) {
872 cb->args[0] = 1;
873 xfrm_state_walk_init(walk, 0);
874 }
875
876 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
877
878 return skb->len;
879}
880
881static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
882 struct xfrm_state *x, u32 seq)
883{
884 struct xfrm_dump_info info;
885 struct sk_buff *skb;
886 int err;
887
888 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
889 if (!skb)
890 return ERR_PTR(-ENOMEM);
891
892 info.in_skb = in_skb;
893 info.out_skb = skb;
894 info.nlmsg_seq = seq;
895 info.nlmsg_flags = 0;
896
897 err = dump_one_state(x, 0, &info);
898 if (err) {
899 kfree_skb(skb);
900 return ERR_PTR(err);
901 }
902
903 return skb;
904}
905
906static inline size_t xfrm_spdinfo_msgsize(void)
907{
908 return NLMSG_ALIGN(4)
909 + nla_total_size(sizeof(struct xfrmu_spdinfo))
910 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
911}
912
913static int build_spdinfo(struct sk_buff *skb, struct net *net,
914 u32 pid, u32 seq, u32 flags)
915{
916 struct xfrmk_spdinfo si;
917 struct xfrmu_spdinfo spc;
918 struct xfrmu_spdhinfo sph;
919 struct nlmsghdr *nlh;
920 u32 *f;
921
922 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
923 if (nlh == NULL) /* shouldn't really happen ... */
924 return -EMSGSIZE;
925
926 f = nlmsg_data(nlh);
927 *f = flags;
928 xfrm_spd_getinfo(net, &si);
929 spc.incnt = si.incnt;
930 spc.outcnt = si.outcnt;
931 spc.fwdcnt = si.fwdcnt;
932 spc.inscnt = si.inscnt;
933 spc.outscnt = si.outscnt;
934 spc.fwdscnt = si.fwdscnt;
935 sph.spdhcnt = si.spdhcnt;
936 sph.spdhmcnt = si.spdhmcnt;
937
938 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
939 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
940
941 return nlmsg_end(skb, nlh);
942
943nla_put_failure:
944 nlmsg_cancel(skb, nlh);
945 return -EMSGSIZE;
946}
947
948static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
949 struct nlattr **attrs)
950{
951 struct net *net = sock_net(skb->sk);
952 struct sk_buff *r_skb;
953 u32 *flags = nlmsg_data(nlh);
954 u32 spid = NETLINK_CB(skb).pid;
955 u32 seq = nlh->nlmsg_seq;
956
957 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
958 if (r_skb == NULL)
959 return -ENOMEM;
960
961 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
962 BUG();
963
964 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
965}
966
967static inline size_t xfrm_sadinfo_msgsize(void)
968{
969 return NLMSG_ALIGN(4)
970 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
971 + nla_total_size(4); /* XFRMA_SAD_CNT */
972}
973
974static int build_sadinfo(struct sk_buff *skb, struct net *net,
975 u32 pid, u32 seq, u32 flags)
976{
977 struct xfrmk_sadinfo si;
978 struct xfrmu_sadhinfo sh;
979 struct nlmsghdr *nlh;
980 u32 *f;
981
982 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
983 if (nlh == NULL) /* shouldn't really happen ... */
984 return -EMSGSIZE;
985
986 f = nlmsg_data(nlh);
987 *f = flags;
988 xfrm_sad_getinfo(net, &si);
989
990 sh.sadhmcnt = si.sadhmcnt;
991 sh.sadhcnt = si.sadhcnt;
992
993 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
994 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
995
996 return nlmsg_end(skb, nlh);
997
998nla_put_failure:
999 nlmsg_cancel(skb, nlh);
1000 return -EMSGSIZE;
1001}
1002
1003static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1004 struct nlattr **attrs)
1005{
1006 struct net *net = sock_net(skb->sk);
1007 struct sk_buff *r_skb;
1008 u32 *flags = nlmsg_data(nlh);
1009 u32 spid = NETLINK_CB(skb).pid;
1010 u32 seq = nlh->nlmsg_seq;
1011
1012 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1013 if (r_skb == NULL)
1014 return -ENOMEM;
1015
1016 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
1017 BUG();
1018
1019 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
1020}
1021
1022static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1023 struct nlattr **attrs)
1024{
1025 struct net *net = sock_net(skb->sk);
1026 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1027 struct xfrm_state *x;
1028 struct sk_buff *resp_skb;
1029 int err = -ESRCH;
1030
1031 x = xfrm_user_state_lookup(net, p, attrs, &err);
1032 if (x == NULL)
1033 goto out_noput;
1034
1035 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1036 if (IS_ERR(resp_skb)) {
1037 err = PTR_ERR(resp_skb);
1038 } else {
1039 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1040 }
1041 xfrm_state_put(x);
1042out_noput:
1043 return err;
1044}
1045
1046static int verify_userspi_info(struct xfrm_userspi_info *p)
1047{
1048 switch (p->info.id.proto) {
1049 case IPPROTO_AH:
1050 case IPPROTO_ESP:
1051 break;
1052
1053 case IPPROTO_COMP:
1054 /* IPCOMP spi is 16-bits. */
1055 if (p->max >= 0x10000)
1056 return -EINVAL;
1057 break;
1058
1059 default:
1060 return -EINVAL;
1061 }
1062
1063 if (p->min > p->max)
1064 return -EINVAL;
1065
1066 return 0;
1067}
1068
1069static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1070 struct nlattr **attrs)
1071{
1072 struct net *net = sock_net(skb->sk);
1073 struct xfrm_state *x;
1074 struct xfrm_userspi_info *p;
1075 struct sk_buff *resp_skb;
1076 xfrm_address_t *daddr;
1077 int family;
1078 int err;
1079 u32 mark;
1080 struct xfrm_mark m;
1081
1082 p = nlmsg_data(nlh);
1083 err = verify_userspi_info(p);
1084 if (err)
1085 goto out_noput;
1086
1087 family = p->info.family;
1088 daddr = &p->info.id.daddr;
1089
1090 x = NULL;
1091
1092 mark = xfrm_mark_get(attrs, &m);
1093 if (p->info.seq) {
1094 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1095 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
1096 xfrm_state_put(x);
1097 x = NULL;
1098 }
1099 }
1100
1101 if (!x)
1102 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1103 p->info.id.proto, daddr,
1104 &p->info.saddr, 1,
1105 family);
1106 err = -ENOENT;
1107 if (x == NULL)
1108 goto out_noput;
1109
1110 err = xfrm_alloc_spi(x, p->min, p->max);
1111 if (err)
1112 goto out;
1113
1114 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1115 if (IS_ERR(resp_skb)) {
1116 err = PTR_ERR(resp_skb);
1117 goto out;
1118 }
1119
1120 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1121
1122out:
1123 xfrm_state_put(x);
1124out_noput:
1125 return err;
1126}
1127
1128static int verify_policy_dir(u8 dir)
1129{
1130 switch (dir) {
1131 case XFRM_POLICY_IN:
1132 case XFRM_POLICY_OUT:
1133 case XFRM_POLICY_FWD:
1134 break;
1135
1136 default:
1137 return -EINVAL;
1138 }
1139
1140 return 0;
1141}
1142
1143static int verify_policy_type(u8 type)
1144{
1145 switch (type) {
1146 case XFRM_POLICY_TYPE_MAIN:
1147#ifdef CONFIG_XFRM_SUB_POLICY
1148 case XFRM_POLICY_TYPE_SUB:
1149#endif
1150 break;
1151
1152 default:
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158
1159static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1160{
1161 int ret;
1162
1163 switch (p->share) {
1164 case XFRM_SHARE_ANY:
1165 case XFRM_SHARE_SESSION:
1166 case XFRM_SHARE_USER:
1167 case XFRM_SHARE_UNIQUE:
1168 break;
1169
1170 default:
1171 return -EINVAL;
1172 }
1173
1174 switch (p->action) {
1175 case XFRM_POLICY_ALLOW:
1176 case XFRM_POLICY_BLOCK:
1177 break;
1178
1179 default:
1180 return -EINVAL;
1181 }
1182
1183 switch (p->sel.family) {
1184 case AF_INET:
1185 break;
1186
1187 case AF_INET6:
1188#if IS_ENABLED(CONFIG_IPV6)
1189 break;
1190#else
1191 return -EAFNOSUPPORT;
1192#endif
1193
1194 default:
1195 return -EINVAL;
1196 }
1197//hubÖÎÀí£ºCVE-2019-15666
1198 ret = verify_policy_dir(p->dir);
1199 if (ret)
1200 return ret;
1201 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
1202 return -EINVAL;
1203
1204 return 0;
1205}
1206
1207static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1208{
1209 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1210 struct xfrm_user_sec_ctx *uctx;
1211
1212 if (!rt)
1213 return 0;
1214
1215 uctx = nla_data(rt);
1216 return security_xfrm_policy_alloc(&pol->security, uctx);
1217}
1218
1219static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1220 int nr)
1221{
1222 int i;
1223
1224 xp->xfrm_nr = nr;
1225 for (i = 0; i < nr; i++, ut++) {
1226 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1227
1228 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1229 memcpy(&t->saddr, &ut->saddr,
1230 sizeof(xfrm_address_t));
1231 t->reqid = ut->reqid;
1232 t->mode = ut->mode;
1233 t->share = ut->share;
1234 t->optional = ut->optional;
1235 t->aalgos = ut->aalgos;
1236 t->ealgos = ut->ealgos;
1237 t->calgos = ut->calgos;
1238 /* If all masks are ~0, then we allow all algorithms. */
1239 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1240 t->encap_family = ut->family;
1241 }
1242}
1243
1244static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1245{
1246 int i;
1247
1248 if (nr > XFRM_MAX_DEPTH)
1249 return -EINVAL;
1250
1251 for (i = 0; i < nr; i++) {
1252 /* We never validated the ut->family value, so many
1253 * applications simply leave it at zero. The check was
1254 * never made and ut->family was ignored because all
1255 * templates could be assumed to have the same family as
1256 * the policy itself. Now that we will have ipv4-in-ipv6
1257 * and ipv6-in-ipv4 tunnels, this is no longer true.
1258 */
1259 if (!ut[i].family)
1260 ut[i].family = family;
1261
1262 switch (ut[i].family) {
1263 case AF_INET:
1264 break;
1265#if IS_ENABLED(CONFIG_IPV6)
1266 case AF_INET6:
1267 break;
1268#endif
1269 default:
1270 return -EINVAL;
1271 }
1272 }
1273
1274 return 0;
1275}
1276
1277static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1278{
1279 struct nlattr *rt = attrs[XFRMA_TMPL];
1280
1281 if (!rt) {
1282 pol->xfrm_nr = 0;
1283 } else {
1284 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1285 int nr = nla_len(rt) / sizeof(*utmpl);
1286 int err;
1287
1288 err = validate_tmpl(nr, utmpl, pol->family);
1289 if (err)
1290 return err;
1291
1292 copy_templates(pol, utmpl, nr);
1293 }
1294 return 0;
1295}
1296
1297static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1298{
1299 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1300 struct xfrm_userpolicy_type *upt;
1301 u8 type = XFRM_POLICY_TYPE_MAIN;
1302 int err;
1303
1304 if (rt) {
1305 upt = nla_data(rt);
1306 type = upt->type;
1307 }
1308
1309 err = verify_policy_type(type);
1310 if (err)
1311 return err;
1312
1313 *tp = type;
1314 return 0;
1315}
1316
1317static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1318{
1319 xp->priority = p->priority;
1320 xp->index = p->index;
1321 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1322 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1323 xp->action = p->action;
1324 xp->flags = p->flags;
1325 xp->family = p->sel.family;
1326 /* XXX xp->share = p->share; */
1327}
1328
1329static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1330{
1331 memset(p, 0, sizeof(*p));
1332 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1333 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1334 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1335 p->priority = xp->priority;
1336 p->index = xp->index;
1337 p->sel.family = xp->family;
1338 p->dir = dir;
1339 p->action = xp->action;
1340 p->flags = xp->flags;
1341 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1342}
1343
1344static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1345{
1346 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1347 int err;
1348
1349 if (!xp) {
1350 *errp = -ENOMEM;
1351 return NULL;
1352 }
1353
1354 copy_from_user_policy(xp, p);
1355
1356 err = copy_from_user_policy_type(&xp->type, attrs);
1357 if (err)
1358 goto error;
1359
1360 if (!(err = copy_from_user_tmpl(xp, attrs)))
1361 err = copy_from_user_sec_ctx(xp, attrs);
1362 if (err)
1363 goto error;
1364
1365 xfrm_mark_get(attrs, &xp->mark);
1366
1367 return xp;
1368 error:
1369 *errp = err;
1370 xp->walk.dead = 1;
1371 xfrm_policy_destroy(xp);
1372 return NULL;
1373}
1374
1375static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1376 struct nlattr **attrs)
1377{
1378 struct net *net = sock_net(skb->sk);
1379 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1380 struct xfrm_policy *xp;
1381 struct km_event c;
1382 int err;
1383 int excl;
1384 uid_t loginuid = audit_get_loginuid(current);
1385 u32 sessionid = audit_get_sessionid(current);
1386 u32 sid;
1387
1388 err = verify_newpolicy_info(p);
1389 if (err)
1390 return err;
1391 err = verify_sec_ctx_len(attrs);
1392 if (err)
1393 return err;
1394
1395 xp = xfrm_policy_construct(net, p, attrs, &err);
1396 if (!xp)
1397 return err;
1398
1399 /* shouldn't excl be based on nlh flags??
1400 * Aha! this is anti-netlink really i.e more pfkey derived
1401 * in netlink excl is a flag and you wouldnt need
1402 * a type XFRM_MSG_UPDPOLICY - JHS */
1403 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1404 err = xfrm_policy_insert(p->dir, xp, excl);
1405 security_task_getsecid(current, &sid);
1406 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1407
1408 if (err) {
1409 security_xfrm_policy_free(xp->security);
1410 kfree(xp);
1411 return err;
1412 }
1413
1414 c.event = nlh->nlmsg_type;
1415 c.seq = nlh->nlmsg_seq;
1416 c.pid = nlh->nlmsg_pid;
1417 km_policy_notify(xp, p->dir, &c);
1418
1419 xfrm_pol_put(xp);
1420
1421 return 0;
1422}
1423
1424static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1425{
1426 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1427 int i;
1428
1429 if (xp->xfrm_nr == 0)
1430 return 0;
1431
1432 for (i = 0; i < xp->xfrm_nr; i++) {
1433 struct xfrm_user_tmpl *up = &vec[i];
1434 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1435
1436 memset(up, 0, sizeof(*up));
1437 memcpy(&up->id, &kp->id, sizeof(up->id));
1438 up->family = kp->encap_family;
1439 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1440 up->reqid = kp->reqid;
1441 up->mode = kp->mode;
1442 up->share = kp->share;
1443 up->optional = kp->optional;
1444 up->aalgos = kp->aalgos;
1445 up->ealgos = kp->ealgos;
1446 up->calgos = kp->calgos;
1447 }
1448
1449 return nla_put(skb, XFRMA_TMPL,
1450 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1451}
1452
1453static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1454{
1455 if (x->security) {
1456 return copy_sec_ctx(x->security, skb);
1457 }
1458 return 0;
1459}
1460
1461static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1462{
1463 if (xp->security) {
1464 return copy_sec_ctx(xp->security, skb);
1465 }
1466 return 0;
1467}
1468static inline size_t userpolicy_type_attrsize(void)
1469{
1470#ifdef CONFIG_XFRM_SUB_POLICY
1471 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1472#else
1473 return 0;
1474#endif
1475}
1476
1477#ifdef CONFIG_XFRM_SUB_POLICY
1478static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1479{
1480 struct xfrm_userpolicy_type upt = {
1481 .type = type,
1482 };
1483
1484 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1485}
1486
1487#else
1488static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1489{
1490 return 0;
1491}
1492#endif
1493
1494static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1495{
1496 struct xfrm_dump_info *sp = ptr;
1497 struct xfrm_userpolicy_info *p;
1498 struct sk_buff *in_skb = sp->in_skb;
1499 struct sk_buff *skb = sp->out_skb;
1500 struct nlmsghdr *nlh;
1501
1502 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1503 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1504 if (nlh == NULL)
1505 return -EMSGSIZE;
1506
1507 p = nlmsg_data(nlh);
1508 copy_to_user_policy(xp, p, dir);
1509 if (copy_to_user_tmpl(xp, skb) < 0)
1510 goto nlmsg_failure;
1511 if (copy_to_user_sec_ctx(xp, skb))
1512 goto nlmsg_failure;
1513 if (copy_to_user_policy_type(xp->type, skb) < 0)
1514 goto nlmsg_failure;
1515 if (xfrm_mark_put(skb, &xp->mark))
1516 goto nla_put_failure;
1517
1518 nlmsg_end(skb, nlh);
1519 return 0;
1520
1521nla_put_failure:
1522nlmsg_failure:
1523 nlmsg_cancel(skb, nlh);
1524 return -EMSGSIZE;
1525}
1526
1527static int xfrm_dump_policy_done(struct netlink_callback *cb)
1528{
1529 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1530
1531 xfrm_policy_walk_done(walk);
1532 return 0;
1533}
1534
1535static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1536{
1537 struct net *net = sock_net(skb->sk);
1538 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1539 struct xfrm_dump_info info;
1540
1541 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1542 sizeof(cb->args) - sizeof(cb->args[0]));
1543
1544 info.in_skb = cb->skb;
1545 info.out_skb = skb;
1546 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1547 info.nlmsg_flags = NLM_F_MULTI;
1548
1549 if (!cb->args[0]) {
1550 cb->args[0] = 1;
1551 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1552 }
1553
1554 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1555
1556 return skb->len;
1557}
1558
1559static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1560 struct xfrm_policy *xp,
1561 int dir, u32 seq)
1562{
1563 struct xfrm_dump_info info;
1564 struct sk_buff *skb;
1565 int err;
1566
1567 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1568 if (!skb)
1569 return ERR_PTR(-ENOMEM);
1570
1571 info.in_skb = in_skb;
1572 info.out_skb = skb;
1573 info.nlmsg_seq = seq;
1574 info.nlmsg_flags = 0;
1575
1576 err = dump_one_policy(xp, dir, 0, &info);
1577 if (err) {
1578 kfree_skb(skb);
1579 return ERR_PTR(err);
1580 }
1581
1582 return skb;
1583}
1584
1585static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1586 struct nlattr **attrs)
1587{
1588 struct net *net = sock_net(skb->sk);
1589 struct xfrm_policy *xp;
1590 struct xfrm_userpolicy_id *p;
1591 u8 type = XFRM_POLICY_TYPE_MAIN;
1592 int err;
1593 struct km_event c;
1594 int delete;
1595 struct xfrm_mark m;
1596 u32 mark = xfrm_mark_get(attrs, &m);
1597
1598 p = nlmsg_data(nlh);
1599 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1600
1601 err = copy_from_user_policy_type(&type, attrs);
1602 if (err)
1603 return err;
1604
1605 err = verify_policy_dir(p->dir);
1606 if (err)
1607 return err;
1608
1609 if (p->index)
1610 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1611 else {
1612 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1613 struct xfrm_sec_ctx *ctx;
1614
1615 err = verify_sec_ctx_len(attrs);
1616 if (err)
1617 return err;
1618
1619 ctx = NULL;
1620 if (rt) {
1621 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1622
1623 err = security_xfrm_policy_alloc(&ctx, uctx);
1624 if (err)
1625 return err;
1626 }
1627 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1628 ctx, delete, &err);
1629 security_xfrm_policy_free(ctx);
1630 }
1631 if (xp == NULL)
1632 return -ENOENT;
1633
1634 if (!delete) {
1635 struct sk_buff *resp_skb;
1636
1637 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1638 if (IS_ERR(resp_skb)) {
1639 err = PTR_ERR(resp_skb);
1640 } else {
1641 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1642 NETLINK_CB(skb).pid);
1643 }
1644 } else {
1645 uid_t loginuid = audit_get_loginuid(current);
1646 u32 sessionid = audit_get_sessionid(current);
1647 u32 sid;
1648
1649 security_task_getsecid(current, &sid);
1650 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1651 sid);
1652
1653 if (err != 0)
1654 goto out;
1655
1656 c.data.byid = p->index;
1657 c.event = nlh->nlmsg_type;
1658 c.seq = nlh->nlmsg_seq;
1659 c.pid = nlh->nlmsg_pid;
1660 km_policy_notify(xp, p->dir, &c);
1661 }
1662
1663out:
1664 xfrm_pol_put(xp);
1665 return err;
1666}
1667
1668static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1669 struct nlattr **attrs)
1670{
1671 struct net *net = sock_net(skb->sk);
1672 struct km_event c;
1673 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1674 struct xfrm_audit audit_info;
1675 int err;
1676
1677 audit_info.loginuid = audit_get_loginuid(current);
1678 audit_info.sessionid = audit_get_sessionid(current);
1679 security_task_getsecid(current, &audit_info.secid);
1680 err = xfrm_state_flush(net, p->proto, &audit_info);
1681 if (err) {
1682 if (err == -ESRCH) /* empty table */
1683 return 0;
1684 return err;
1685 }
1686 c.data.proto = p->proto;
1687 c.event = nlh->nlmsg_type;
1688 c.seq = nlh->nlmsg_seq;
1689 c.pid = nlh->nlmsg_pid;
1690 c.net = net;
1691 km_state_notify(NULL, &c);
1692
1693 return 0;
1694}
1695
1696static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
1697{
1698 size_t replay_size = x->replay_esn ?
1699 xfrm_replay_state_esn_len(x->replay_esn) :
1700 sizeof(struct xfrm_replay_state);
1701
1702 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1703 + nla_total_size(replay_size)
1704 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1705 + nla_total_size(sizeof(struct xfrm_mark))
1706 + nla_total_size(4) /* XFRM_AE_RTHR */
1707 + nla_total_size(4); /* XFRM_AE_ETHR */
1708}
1709
1710static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1711{
1712 struct xfrm_aevent_id *id;
1713 struct nlmsghdr *nlh;
1714
1715 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1716 if (nlh == NULL)
1717 return -EMSGSIZE;
1718
1719 id = nlmsg_data(nlh);
1720 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1721 id->sa_id.spi = x->id.spi;
1722 id->sa_id.family = x->props.family;
1723 id->sa_id.proto = x->id.proto;
1724 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1725 id->reqid = x->props.reqid;
1726 id->flags = c->data.aevent;
1727
1728 if (x->replay_esn)
1729 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
1730 xfrm_replay_state_esn_len(x->replay_esn),
1731 x->replay_esn);
1732 else
1733 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1734
1735 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1736
1737 if (id->flags & XFRM_AE_RTHR)
1738 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1739
1740 if (id->flags & XFRM_AE_ETHR)
1741 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1742 x->replay_maxage * 10 / HZ);
1743
1744 if (xfrm_mark_put(skb, &x->mark))
1745 goto nla_put_failure;
1746
1747 return nlmsg_end(skb, nlh);
1748
1749nla_put_failure:
1750 nlmsg_cancel(skb, nlh);
1751 return -EMSGSIZE;
1752}
1753
1754static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1755 struct nlattr **attrs)
1756{
1757 struct net *net = sock_net(skb->sk);
1758 struct xfrm_state *x;
1759 struct sk_buff *r_skb;
1760 int err;
1761 struct km_event c;
1762 u32 mark;
1763 struct xfrm_mark m;
1764 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1765 struct xfrm_usersa_id *id = &p->sa_id;
1766
1767 mark = xfrm_mark_get(attrs, &m);
1768
1769 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1770 if (x == NULL)
1771 return -ESRCH;
1772
1773 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
1774 if (r_skb == NULL) {
1775 xfrm_state_put(x);
1776 return -ENOMEM;
1777 }
1778
1779 /*
1780 * XXX: is this lock really needed - none of the other
1781 * gets lock (the concern is things getting updated
1782 * while we are still reading) - jhs
1783 */
1784 spin_lock_bh(&x->lock);
1785 c.data.aevent = p->flags;
1786 c.seq = nlh->nlmsg_seq;
1787 c.pid = nlh->nlmsg_pid;
1788
1789 if (build_aevent(r_skb, x, &c) < 0)
1790 BUG();
1791 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1792 spin_unlock_bh(&x->lock);
1793 xfrm_state_put(x);
1794 return err;
1795}
1796
1797static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1798 struct nlattr **attrs)
1799{
1800 struct net *net = sock_net(skb->sk);
1801 struct xfrm_state *x;
1802 struct km_event c;
1803 int err = - EINVAL;
1804 u32 mark = 0;
1805 struct xfrm_mark m;
1806 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1807 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1808 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1809 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1810
1811 if (!lt && !rp && !re)
1812 return err;
1813
1814 /* pedantic mode - thou shalt sayeth replaceth */
1815 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1816 return err;
1817
1818 mark = xfrm_mark_get(attrs, &m);
1819
1820 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1821 if (x == NULL)
1822 return -ESRCH;
1823
1824 if (x->km.state != XFRM_STATE_VALID)
1825 goto out;
1826
1827 err = xfrm_replay_verify_len(x->replay_esn, rp);
1828 if (err)
1829 goto out;
1830
1831 spin_lock_bh(&x->lock);
1832 xfrm_update_ae_params(x, attrs, 1);
1833 spin_unlock_bh(&x->lock);
1834
1835 c.event = nlh->nlmsg_type;
1836 c.seq = nlh->nlmsg_seq;
1837 c.pid = nlh->nlmsg_pid;
1838 c.data.aevent = XFRM_AE_CU;
1839 km_state_notify(x, &c);
1840 err = 0;
1841out:
1842 xfrm_state_put(x);
1843 return err;
1844}
1845
1846static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1847 struct nlattr **attrs)
1848{
1849 struct net *net = sock_net(skb->sk);
1850 struct km_event c;
1851 u8 type = XFRM_POLICY_TYPE_MAIN;
1852 int err;
1853 struct xfrm_audit audit_info;
1854
1855 err = copy_from_user_policy_type(&type, attrs);
1856 if (err)
1857 return err;
1858
1859 audit_info.loginuid = audit_get_loginuid(current);
1860 audit_info.sessionid = audit_get_sessionid(current);
1861 security_task_getsecid(current, &audit_info.secid);
1862 err = xfrm_policy_flush(net, type, &audit_info);
1863 if (err) {
1864 if (err == -ESRCH) /* empty table */
1865 return 0;
1866 return err;
1867 }
1868
1869 c.data.type = type;
1870 c.event = nlh->nlmsg_type;
1871 c.seq = nlh->nlmsg_seq;
1872 c.pid = nlh->nlmsg_pid;
1873 c.net = net;
1874 km_policy_notify(NULL, 0, &c);
1875 return 0;
1876}
1877
1878static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1879 struct nlattr **attrs)
1880{
1881 struct net *net = sock_net(skb->sk);
1882 struct xfrm_policy *xp;
1883 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1884 struct xfrm_userpolicy_info *p = &up->pol;
1885 u8 type = XFRM_POLICY_TYPE_MAIN;
1886 int err = -ENOENT;
1887 struct xfrm_mark m;
1888 u32 mark = xfrm_mark_get(attrs, &m);
1889
1890 err = copy_from_user_policy_type(&type, attrs);
1891 if (err)
1892 return err;
1893
1894 err = verify_policy_dir(p->dir);
1895 if (err)
1896 return err;
1897
1898 if (p->index)
1899 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1900 else {
1901 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1902 struct xfrm_sec_ctx *ctx;
1903
1904 err = verify_sec_ctx_len(attrs);
1905 if (err)
1906 return err;
1907
1908 ctx = NULL;
1909 if (rt) {
1910 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1911
1912 err = security_xfrm_policy_alloc(&ctx, uctx);
1913 if (err)
1914 return err;
1915 }
1916 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1917 &p->sel, ctx, 0, &err);
1918 security_xfrm_policy_free(ctx);
1919 }
1920 if (xp == NULL)
1921 return -ENOENT;
1922
1923 if (unlikely(xp->walk.dead))
1924 goto out;
1925
1926 err = 0;
1927 if (up->hard) {
1928 uid_t loginuid = audit_get_loginuid(current);
1929 u32 sessionid = audit_get_sessionid(current);
1930 u32 sid;
1931
1932 security_task_getsecid(current, &sid);
1933 xfrm_policy_delete(xp, p->dir);
1934 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1935
1936 } else {
1937 // reset the timers here?
1938 WARN(1, "Dont know what to do with soft policy expire\n");
1939 }
1940 km_policy_expired(xp, p->dir, up->hard, current->pid);
1941
1942out:
1943 xfrm_pol_put(xp);
1944 return err;
1945}
1946
1947static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1948 struct nlattr **attrs)
1949{
1950 struct net *net = sock_net(skb->sk);
1951 struct xfrm_state *x;
1952 int err;
1953 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1954 struct xfrm_usersa_info *p = &ue->state;
1955 struct xfrm_mark m;
1956 u32 mark = xfrm_mark_get(attrs, &m);
1957
1958 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1959
1960 err = -ENOENT;
1961 if (x == NULL)
1962 return err;
1963
1964 spin_lock_bh(&x->lock);
1965 err = -EINVAL;
1966 if (x->km.state != XFRM_STATE_VALID)
1967 goto out;
1968 km_state_expired(x, ue->hard, current->pid);
1969
1970 if (ue->hard) {
1971 uid_t loginuid = audit_get_loginuid(current);
1972 u32 sessionid = audit_get_sessionid(current);
1973 u32 sid;
1974
1975 security_task_getsecid(current, &sid);
1976 __xfrm_state_delete(x);
1977 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1978 }
1979 err = 0;
1980out:
1981 spin_unlock_bh(&x->lock);
1982 xfrm_state_put(x);
1983 return err;
1984}
1985
1986static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1987 struct nlattr **attrs)
1988{
1989 struct net *net = sock_net(skb->sk);
1990 struct xfrm_policy *xp;
1991 struct xfrm_user_tmpl *ut;
1992 int i;
1993 struct nlattr *rt = attrs[XFRMA_TMPL];
1994 struct xfrm_mark mark;
1995
1996 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1997 struct xfrm_state *x = xfrm_state_alloc(net);
1998 int err = -ENOMEM;
1999
2000 if (!x)
2001 goto nomem;
2002
2003 xfrm_mark_get(attrs, &mark);
2004
2005 err = verify_newpolicy_info(&ua->policy);
2006 if (err)
2007 goto bad_policy;
2008
2009 /* build an XP */
2010 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2011 if (!xp)
2012 goto free_state;
2013
2014 memcpy(&x->id, &ua->id, sizeof(ua->id));
2015 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2016 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2017 xp->mark.m = x->mark.m = mark.m;
2018 xp->mark.v = x->mark.v = mark.v;
2019 ut = nla_data(rt);
2020 /* extract the templates and for each call km_key */
2021 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2022 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2023 memcpy(&x->id, &t->id, sizeof(x->id));
2024 x->props.mode = t->mode;
2025 x->props.reqid = t->reqid;
2026 x->props.family = ut->family;
2027 t->aalgos = ua->aalgos;
2028 t->ealgos = ua->ealgos;
2029 t->calgos = ua->calgos;
2030 err = km_query(x, t, xp);
2031
2032 }
2033
2034 kfree(x);
2035 kfree(xp);
2036
2037 return 0;
2038
2039bad_policy:
2040 WARN(1, "BAD policy passed\n");
2041free_state:
2042 kfree(x);
2043nomem:
2044 return err;
2045}
2046
2047#ifdef CONFIG_XFRM_MIGRATE
2048static int copy_from_user_migrate(struct xfrm_migrate *ma,
2049 struct xfrm_kmaddress *k,
2050 struct nlattr **attrs, int *num)
2051{
2052 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2053 struct xfrm_user_migrate *um;
2054 int i, num_migrate;
2055
2056 if (k != NULL) {
2057 struct xfrm_user_kmaddress *uk;
2058
2059 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2060 memcpy(&k->local, &uk->local, sizeof(k->local));
2061 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2062 k->family = uk->family;
2063 k->reserved = uk->reserved;
2064 }
2065
2066 um = nla_data(rt);
2067 num_migrate = nla_len(rt) / sizeof(*um);
2068
2069 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2070 return -EINVAL;
2071
2072 for (i = 0; i < num_migrate; i++, um++, ma++) {
2073 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2074 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2075 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2076 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2077
2078 ma->proto = um->proto;
2079 ma->mode = um->mode;
2080 ma->reqid = um->reqid;
2081
2082 ma->old_family = um->old_family;
2083 ma->new_family = um->new_family;
2084 }
2085
2086 *num = i;
2087 return 0;
2088}
2089
2090static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2091 struct nlattr **attrs)
2092{
2093 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2094 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2095 struct xfrm_kmaddress km, *kmp;
2096 u8 type;
2097 int err;
2098 int n = 0;
2099
2100 if (attrs[XFRMA_MIGRATE] == NULL)
2101 return -EINVAL;
2102
2103 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2104
2105 err = copy_from_user_policy_type(&type, attrs);
2106 if (err)
2107 return err;
2108
2109 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2110 if (err)
2111 return err;
2112
2113 if (!n)
2114 return 0;
2115
2116 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
2117
2118 return 0;
2119}
2120#else
2121static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2122 struct nlattr **attrs)
2123{
2124 return -ENOPROTOOPT;
2125}
2126#endif
2127
2128#ifdef CONFIG_XFRM_MIGRATE
2129static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2130{
2131 struct xfrm_user_migrate um;
2132
2133 memset(&um, 0, sizeof(um));
2134 um.proto = m->proto;
2135 um.mode = m->mode;
2136 um.reqid = m->reqid;
2137 um.old_family = m->old_family;
2138 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2139 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2140 um.new_family = m->new_family;
2141 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2142 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2143
2144 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2145}
2146
2147static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2148{
2149 struct xfrm_user_kmaddress uk;
2150
2151 memset(&uk, 0, sizeof(uk));
2152 uk.family = k->family;
2153 uk.reserved = k->reserved;
2154 memcpy(&uk.local, &k->local, sizeof(uk.local));
2155 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2156
2157 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2158}
2159
2160static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2161{
2162 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2163 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2164 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2165 + userpolicy_type_attrsize();
2166}
2167
2168static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2169 int num_migrate, const struct xfrm_kmaddress *k,
2170 const struct xfrm_selector *sel, u8 dir, u8 type)
2171{
2172 const struct xfrm_migrate *mp;
2173 struct xfrm_userpolicy_id *pol_id;
2174 struct nlmsghdr *nlh;
2175 int i;
2176
2177 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2178 if (nlh == NULL)
2179 return -EMSGSIZE;
2180
2181 pol_id = nlmsg_data(nlh);
2182 /* copy data from selector, dir, and type to the pol_id */
2183 memset(pol_id, 0, sizeof(*pol_id));
2184 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2185 pol_id->dir = dir;
2186
2187 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
2188 goto nlmsg_failure;
2189
2190 if (copy_to_user_policy_type(type, skb) < 0)
2191 goto nlmsg_failure;
2192
2193 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2194 if (copy_to_user_migrate(mp, skb) < 0)
2195 goto nlmsg_failure;
2196 }
2197
2198 return nlmsg_end(skb, nlh);
2199nlmsg_failure:
2200 nlmsg_cancel(skb, nlh);
2201 return -EMSGSIZE;
2202}
2203
2204static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2205 const struct xfrm_migrate *m, int num_migrate,
2206 const struct xfrm_kmaddress *k)
2207{
2208 struct net *net = &init_net;
2209 struct sk_buff *skb;
2210
2211 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2212 if (skb == NULL)
2213 return -ENOMEM;
2214
2215 /* build migrate */
2216 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2217 BUG();
2218
2219 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2220}
2221#else
2222static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2223 const struct xfrm_migrate *m, int num_migrate,
2224 const struct xfrm_kmaddress *k)
2225{
2226 return -ENOPROTOOPT;
2227}
2228#endif
2229
2230#define XMSGSIZE(type) sizeof(struct type)
2231
2232static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2233 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2234 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2235 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2236 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2237 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2238 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2239 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2240 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2241 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2242 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2243 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2244 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2245 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2246 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2247 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2248 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2249 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2250 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2251 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2252 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2253};
2254
2255#undef XMSGSIZE
2256
2257static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2258 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2259 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2260 [XFRMA_LASTUSED] = { .type = NLA_U64},
2261 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2262 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2263 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2264 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2265 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2266 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2267 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2268 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2269 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2270 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2271 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2272 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2273 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2274 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2275 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2276 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2277 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2278 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2279 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2280 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2281};
2282
2283static struct xfrm_link {
2284 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2285 int (*dump)(struct sk_buff *, struct netlink_callback *);
2286 int (*done)(struct netlink_callback *);
2287} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2288 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2289 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2290 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2291 .dump = xfrm_dump_sa,
2292 .done = xfrm_dump_sa_done },
2293 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2294 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2295 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2296 .dump = xfrm_dump_policy,
2297 .done = xfrm_dump_policy_done },
2298 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2299 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2300 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2301 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2302 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2303 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2304 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2305 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2306 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2307 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2308 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2309 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2310 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2311};
2312
2313static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2314{
2315 struct net *net = sock_net(skb->sk);
2316 struct nlattr *attrs[XFRMA_MAX+1];
2317 struct xfrm_link *link;
2318 int type, err;
2319
2320 type = nlh->nlmsg_type;
2321 if (type > XFRM_MSG_MAX)
2322 return -EINVAL;
2323
2324 type -= XFRM_MSG_BASE;
2325 link = &xfrm_dispatch[type];
2326
2327 /* All operations require privileges, even GET */
2328 if (!capable(CAP_NET_ADMIN))
2329 return -EPERM;
2330
2331 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2332 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2333 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2334 if (link->dump == NULL)
2335 return -EINVAL;
2336
2337 {
2338 struct netlink_dump_control c = {
2339 .dump = link->dump,
2340 .done = link->done,
2341 };
2342 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2343 }
2344 }
2345
2346 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2347 xfrma_policy);
2348 if (err < 0)
2349 return err;
2350
2351 if (link->doit == NULL)
2352 return -EINVAL;
2353
2354 return link->doit(skb, nlh, attrs);
2355}
2356
2357static void xfrm_netlink_rcv(struct sk_buff *skb)
2358{
2359 mutex_lock(&xfrm_cfg_mutex);
2360 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2361 mutex_unlock(&xfrm_cfg_mutex);
2362}
2363
2364static inline size_t xfrm_expire_msgsize(void)
2365{
2366 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2367 + nla_total_size(sizeof(struct xfrm_mark));
2368}
2369
2370static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2371{
2372 struct xfrm_user_expire *ue;
2373 struct nlmsghdr *nlh;
2374
2375 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2376 if (nlh == NULL)
2377 return -EMSGSIZE;
2378
2379 ue = nlmsg_data(nlh);
2380 copy_to_user_state(x, &ue->state);
2381 ue->hard = (c->data.hard != 0) ? 1 : 0;
2382
2383 if (xfrm_mark_put(skb, &x->mark))
2384 goto nla_put_failure;
2385
2386 return nlmsg_end(skb, nlh);
2387
2388nla_put_failure:
2389 return -EMSGSIZE;
2390}
2391
2392static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2393{
2394 struct net *net = xs_net(x);
2395 struct sk_buff *skb;
2396
2397 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2398 if (skb == NULL)
2399 return -ENOMEM;
2400
2401 if (build_expire(skb, x, c) < 0) {
2402 kfree_skb(skb);
2403 return -EMSGSIZE;
2404 }
2405
2406 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2407}
2408
2409static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2410{
2411 struct net *net = xs_net(x);
2412 struct sk_buff *skb;
2413
2414 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2415 if (skb == NULL)
2416 return -ENOMEM;
2417
2418 if (build_aevent(skb, x, c) < 0)
2419 BUG();
2420
2421 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2422}
2423
2424static int xfrm_notify_sa_flush(const struct km_event *c)
2425{
2426 struct net *net = c->net;
2427 struct xfrm_usersa_flush *p;
2428 struct nlmsghdr *nlh;
2429 struct sk_buff *skb;
2430 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2431
2432 skb = nlmsg_new(len, GFP_ATOMIC);
2433 if (skb == NULL)
2434 return -ENOMEM;
2435
2436 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2437 if (nlh == NULL) {
2438 kfree_skb(skb);
2439 return -EMSGSIZE;
2440 }
2441
2442 p = nlmsg_data(nlh);
2443 p->proto = c->data.proto;
2444
2445 nlmsg_end(skb, nlh);
2446
2447 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2448}
2449
2450static inline size_t xfrm_sa_len(struct xfrm_state *x)
2451{
2452 size_t l = 0;
2453 if (x->aead)
2454 l += nla_total_size(aead_len(x->aead));
2455 if (x->aalg) {
2456 l += nla_total_size(sizeof(struct xfrm_algo) +
2457 (x->aalg->alg_key_len + 7) / 8);
2458 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2459 }
2460 if (x->ealg)
2461 l += nla_total_size(xfrm_alg_len(x->ealg));
2462 if (x->calg)
2463 l += nla_total_size(sizeof(*x->calg));
2464 if (x->encap)
2465 l += nla_total_size(sizeof(*x->encap));
2466 if (x->tfcpad)
2467 l += nla_total_size(sizeof(x->tfcpad));
2468 if (x->replay_esn)
2469 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
2470 if (x->security)
2471 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2472 x->security->ctx_len);
2473 if (x->coaddr)
2474 l += nla_total_size(sizeof(*x->coaddr));
2475
2476 /* Must count x->lastused as it may become non-zero behind our back. */
2477 l += nla_total_size(sizeof(u64));
2478
2479 return l;
2480}
2481
2482static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2483{
2484 struct net *net = xs_net(x);
2485 struct xfrm_usersa_info *p;
2486 struct xfrm_usersa_id *id;
2487 struct nlmsghdr *nlh;
2488 struct sk_buff *skb;
2489 int len = xfrm_sa_len(x);
2490 int headlen;
2491
2492 headlen = sizeof(*p);
2493 if (c->event == XFRM_MSG_DELSA) {
2494 len += nla_total_size(headlen);
2495 headlen = sizeof(*id);
2496 len += nla_total_size(sizeof(struct xfrm_mark));
2497 }
2498 len += NLMSG_ALIGN(headlen);
2499
2500 skb = nlmsg_new(len, GFP_ATOMIC);
2501 if (skb == NULL)
2502 return -ENOMEM;
2503
2504 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2505 if (nlh == NULL)
2506 goto nla_put_failure;
2507
2508 p = nlmsg_data(nlh);
2509 if (c->event == XFRM_MSG_DELSA) {
2510 struct nlattr *attr;
2511
2512 id = nlmsg_data(nlh);
2513 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2514 id->spi = x->id.spi;
2515 id->family = x->props.family;
2516 id->proto = x->id.proto;
2517
2518 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2519 if (attr == NULL)
2520 goto nla_put_failure;
2521
2522 p = nla_data(attr);
2523 }
2524
2525 if (copy_to_user_state_extra(x, p, skb))
2526 goto nla_put_failure;
2527
2528 nlmsg_end(skb, nlh);
2529
2530 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2531
2532nla_put_failure:
2533 /* Somebody screwed up with xfrm_sa_len! */
2534 WARN_ON(1);
2535 kfree_skb(skb);
2536 return -1;
2537}
2538
2539static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2540{
2541
2542 switch (c->event) {
2543 case XFRM_MSG_EXPIRE:
2544 return xfrm_exp_state_notify(x, c);
2545 case XFRM_MSG_NEWAE:
2546 return xfrm_aevent_state_notify(x, c);
2547 case XFRM_MSG_DELSA:
2548 case XFRM_MSG_UPDSA:
2549 case XFRM_MSG_NEWSA:
2550 return xfrm_notify_sa(x, c);
2551 case XFRM_MSG_FLUSHSA:
2552 return xfrm_notify_sa_flush(c);
2553 default:
2554 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2555 c->event);
2556 break;
2557 }
2558
2559 return 0;
2560
2561}
2562
2563static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2564 struct xfrm_policy *xp)
2565{
2566 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2567 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2568 + nla_total_size(sizeof(struct xfrm_mark))
2569 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2570 + userpolicy_type_attrsize();
2571}
2572
2573static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2574 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2575 int dir)
2576{
2577 struct xfrm_user_acquire *ua;
2578 struct nlmsghdr *nlh;
2579 __u32 seq = xfrm_get_acqseq();
2580
2581 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2582 if (nlh == NULL)
2583 return -EMSGSIZE;
2584
2585 ua = nlmsg_data(nlh);
2586 memcpy(&ua->id, &x->id, sizeof(ua->id));
2587 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2588 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2589 copy_to_user_policy(xp, &ua->policy, dir);
2590 ua->aalgos = xt->aalgos;
2591 ua->ealgos = xt->ealgos;
2592 ua->calgos = xt->calgos;
2593 ua->seq = x->km.seq = seq;
2594
2595 if (copy_to_user_tmpl(xp, skb) < 0)
2596 goto nlmsg_failure;
2597 if (copy_to_user_state_sec_ctx(x, skb))
2598 goto nlmsg_failure;
2599 if (copy_to_user_policy_type(xp->type, skb) < 0)
2600 goto nlmsg_failure;
2601 if (xfrm_mark_put(skb, &xp->mark))
2602 goto nla_put_failure;
2603
2604 return nlmsg_end(skb, nlh);
2605
2606nla_put_failure:
2607nlmsg_failure:
2608 nlmsg_cancel(skb, nlh);
2609 return -EMSGSIZE;
2610}
2611
2612static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2613 struct xfrm_policy *xp, int dir)
2614{
2615 struct net *net = xs_net(x);
2616 struct sk_buff *skb;
2617
2618 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2619 if (skb == NULL)
2620 return -ENOMEM;
2621
2622 if (build_acquire(skb, x, xt, xp, dir) < 0)
2623 BUG();
2624
2625 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2626}
2627
2628/* User gives us xfrm_user_policy_info followed by an array of 0
2629 * or more templates.
2630 */
2631static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2632 u8 *data, int len, int *dir)
2633{
2634 struct net *net = sock_net(sk);
2635 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2636 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2637 struct xfrm_policy *xp;
2638 int nr;
2639
2640 switch (sk->sk_family) {
2641 case AF_INET:
2642 if (opt != IP_XFRM_POLICY) {
2643 *dir = -EOPNOTSUPP;
2644 return NULL;
2645 }
2646 break;
2647#if IS_ENABLED(CONFIG_IPV6)
2648 case AF_INET6:
2649 if (opt != IPV6_XFRM_POLICY) {
2650 *dir = -EOPNOTSUPP;
2651 return NULL;
2652 }
2653 break;
2654#endif
2655 default:
2656 *dir = -EINVAL;
2657 return NULL;
2658 }
2659
2660 *dir = -EINVAL;
2661
2662 if (len < sizeof(*p) ||
2663 verify_newpolicy_info(p))
2664 return NULL;
2665
2666 nr = ((len - sizeof(*p)) / sizeof(*ut));
2667 if (validate_tmpl(nr, ut, p->sel.family))
2668 return NULL;
2669
2670 if (p->dir > XFRM_POLICY_OUT)
2671 return NULL;
2672
2673 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2674 if (xp == NULL) {
2675 *dir = -ENOBUFS;
2676 return NULL;
2677 }
2678
2679 copy_from_user_policy(xp, p);
2680 xp->type = XFRM_POLICY_TYPE_MAIN;
2681 copy_templates(xp, ut, nr);
2682
2683 *dir = p->dir;
2684
2685 return xp;
2686}
2687
2688static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2689{
2690 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2691 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2692 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2693 + nla_total_size(sizeof(struct xfrm_mark))
2694 + userpolicy_type_attrsize();
2695}
2696
2697static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2698 int dir, const struct km_event *c)
2699{
2700 struct xfrm_user_polexpire *upe;
2701 struct nlmsghdr *nlh;
2702 int hard = c->data.hard;
2703
2704 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2705 if (nlh == NULL)
2706 return -EMSGSIZE;
2707
2708 upe = nlmsg_data(nlh);
2709 copy_to_user_policy(xp, &upe->pol, dir);
2710 if (copy_to_user_tmpl(xp, skb) < 0)
2711 goto nlmsg_failure;
2712 if (copy_to_user_sec_ctx(xp, skb))
2713 goto nlmsg_failure;
2714 if (copy_to_user_policy_type(xp->type, skb) < 0)
2715 goto nlmsg_failure;
2716 if (xfrm_mark_put(skb, &xp->mark))
2717 goto nla_put_failure;
2718 upe->hard = !!hard;
2719
2720 return nlmsg_end(skb, nlh);
2721
2722nla_put_failure:
2723nlmsg_failure:
2724 nlmsg_cancel(skb, nlh);
2725 return -EMSGSIZE;
2726}
2727
2728static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2729{
2730 struct net *net = xp_net(xp);
2731 struct sk_buff *skb;
2732
2733 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2734 if (skb == NULL)
2735 return -ENOMEM;
2736
2737 if (build_polexpire(skb, xp, dir, c) < 0)
2738 BUG();
2739
2740 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2741}
2742
2743static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2744{
2745 struct net *net = xp_net(xp);
2746 struct xfrm_userpolicy_info *p;
2747 struct xfrm_userpolicy_id *id;
2748 struct nlmsghdr *nlh;
2749 struct sk_buff *skb;
2750 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2751 int headlen;
2752
2753 headlen = sizeof(*p);
2754 if (c->event == XFRM_MSG_DELPOLICY) {
2755 len += nla_total_size(headlen);
2756 headlen = sizeof(*id);
2757 }
2758 len += userpolicy_type_attrsize();
2759 len += nla_total_size(sizeof(struct xfrm_mark));
2760 len += NLMSG_ALIGN(headlen);
2761
2762 skb = nlmsg_new(len, GFP_ATOMIC);
2763 if (skb == NULL)
2764 return -ENOMEM;
2765
2766 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2767 if (nlh == NULL)
2768 goto nlmsg_failure;
2769
2770 p = nlmsg_data(nlh);
2771 if (c->event == XFRM_MSG_DELPOLICY) {
2772 struct nlattr *attr;
2773
2774 id = nlmsg_data(nlh);
2775 memset(id, 0, sizeof(*id));
2776 id->dir = dir;
2777 if (c->data.byid)
2778 id->index = xp->index;
2779 else
2780 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2781
2782 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2783 if (attr == NULL)
2784 goto nlmsg_failure;
2785
2786 p = nla_data(attr);
2787 }
2788
2789 copy_to_user_policy(xp, p, dir);
2790 if (copy_to_user_tmpl(xp, skb) < 0)
2791 goto nlmsg_failure;
2792 if (copy_to_user_policy_type(xp->type, skb) < 0)
2793 goto nlmsg_failure;
2794
2795 if (xfrm_mark_put(skb, &xp->mark))
2796 goto nla_put_failure;
2797
2798 nlmsg_end(skb, nlh);
2799
2800 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2801
2802nla_put_failure:
2803nlmsg_failure:
2804 kfree_skb(skb);
2805 return -1;
2806}
2807
2808static int xfrm_notify_policy_flush(const struct km_event *c)
2809{
2810 struct net *net = c->net;
2811 struct nlmsghdr *nlh;
2812 struct sk_buff *skb;
2813
2814 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2815 if (skb == NULL)
2816 return -ENOMEM;
2817
2818 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2819 if (nlh == NULL)
2820 goto nlmsg_failure;
2821 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2822 goto nlmsg_failure;
2823
2824 nlmsg_end(skb, nlh);
2825
2826 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2827
2828nlmsg_failure:
2829 kfree_skb(skb);
2830 return -1;
2831}
2832
2833static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2834{
2835
2836 switch (c->event) {
2837 case XFRM_MSG_NEWPOLICY:
2838 case XFRM_MSG_UPDPOLICY:
2839 case XFRM_MSG_DELPOLICY:
2840 return xfrm_notify_policy(xp, dir, c);
2841 case XFRM_MSG_FLUSHPOLICY:
2842 return xfrm_notify_policy_flush(c);
2843 case XFRM_MSG_POLEXPIRE:
2844 return xfrm_exp_policy_notify(xp, dir, c);
2845 default:
2846 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2847 c->event);
2848 }
2849
2850 return 0;
2851
2852}
2853
2854static inline size_t xfrm_report_msgsize(void)
2855{
2856 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2857}
2858
2859static int build_report(struct sk_buff *skb, u8 proto,
2860 struct xfrm_selector *sel, xfrm_address_t *addr)
2861{
2862 struct xfrm_user_report *ur;
2863 struct nlmsghdr *nlh;
2864
2865 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2866 if (nlh == NULL)
2867 return -EMSGSIZE;
2868
2869 ur = nlmsg_data(nlh);
2870 ur->proto = proto;
2871 memcpy(&ur->sel, sel, sizeof(ur->sel));
2872
2873 if (addr)
2874 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2875
2876 return nlmsg_end(skb, nlh);
2877
2878nla_put_failure:
2879 nlmsg_cancel(skb, nlh);
2880 return -EMSGSIZE;
2881}
2882
2883static int xfrm_send_report(struct net *net, u8 proto,
2884 struct xfrm_selector *sel, xfrm_address_t *addr)
2885{
2886 struct sk_buff *skb;
2887
2888 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2889 if (skb == NULL)
2890 return -ENOMEM;
2891
2892 if (build_report(skb, proto, sel, addr) < 0)
2893 BUG();
2894
2895 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2896}
2897
2898static inline size_t xfrm_mapping_msgsize(void)
2899{
2900 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2901}
2902
2903static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2904 xfrm_address_t *new_saddr, __be16 new_sport)
2905{
2906 struct xfrm_user_mapping *um;
2907 struct nlmsghdr *nlh;
2908
2909 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2910 if (nlh == NULL)
2911 return -EMSGSIZE;
2912
2913 um = nlmsg_data(nlh);
2914
2915 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2916 um->id.spi = x->id.spi;
2917 um->id.family = x->props.family;
2918 um->id.proto = x->id.proto;
2919 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2920 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2921 um->new_sport = new_sport;
2922 um->old_sport = x->encap->encap_sport;
2923 um->reqid = x->props.reqid;
2924
2925 return nlmsg_end(skb, nlh);
2926}
2927
2928static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2929 __be16 sport)
2930{
2931 struct net *net = xs_net(x);
2932 struct sk_buff *skb;
2933
2934 if (x->id.proto != IPPROTO_ESP)
2935 return -EINVAL;
2936
2937 if (!x->encap)
2938 return -EINVAL;
2939
2940 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2941 if (skb == NULL)
2942 return -ENOMEM;
2943
2944 if (build_mapping(skb, x, ipaddr, sport) < 0)
2945 BUG();
2946
2947 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2948}
2949
2950static struct xfrm_mgr netlink_mgr = {
2951 .id = "netlink",
2952 .notify = xfrm_send_state_notify,
2953 .acquire = xfrm_send_acquire,
2954 .compile_policy = xfrm_compile_policy,
2955 .notify_policy = xfrm_send_policy_notify,
2956 .report = xfrm_send_report,
2957 .migrate = xfrm_send_migrate,
2958 .new_mapping = xfrm_send_mapping,
2959};
2960
2961static int __net_init xfrm_user_net_init(struct net *net)
2962{
2963 struct sock *nlsk;
2964
2965 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2966 xfrm_netlink_rcv, NULL, THIS_MODULE);
2967 if (nlsk == NULL)
2968 return -ENOMEM;
2969 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2970 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2971 return 0;
2972}
2973
2974static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2975{
2976 struct net *net;
2977 list_for_each_entry(net, net_exit_list, exit_list)
2978 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
2979 synchronize_net();
2980 list_for_each_entry(net, net_exit_list, exit_list)
2981 netlink_kernel_release(net->xfrm.nlsk_stash);
2982}
2983
2984static struct pernet_operations xfrm_user_net_ops = {
2985 .init = xfrm_user_net_init,
2986 .exit_batch = xfrm_user_net_exit,
2987};
2988
2989static int __init xfrm_user_init(void)
2990{
2991 int rv;
2992
2993 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2994
2995 rv = register_pernet_subsys(&xfrm_user_net_ops);
2996 if (rv < 0)
2997 return rv;
2998 rv = xfrm_register_km(&netlink_mgr);
2999 if (rv < 0)
3000 unregister_pernet_subsys(&xfrm_user_net_ops);
3001 return rv;
3002}
3003
3004static void __exit xfrm_user_exit(void)
3005{
3006 xfrm_unregister_km(&netlink_mgr);
3007 unregister_pernet_subsys(&xfrm_user_net_ops);
3008}
3009
3010module_init(xfrm_user_init);
3011module_exit(xfrm_user_exit);
3012MODULE_LICENSE("GPL");
3013MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3014