blob: e63e597142411705fc554569c394c10d74ef0cc0 [file] [log] [blame]
xf.libfc6e712025-02-07 01:54:34 -08001/* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13#include <linux/crypto.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/slab.h>
18#include <linux/socket.h>
19#include <linux/string.h>
20#include <linux/net.h>
21#include <linux/skbuff.h>
22#include <linux/pfkeyv2.h>
23#include <linux/ipsec.h>
24#include <linux/init.h>
25#include <linux/security.h>
26#include <net/sock.h>
27#include <net/xfrm.h>
28#include <net/netlink.h>
29#include <net/ah.h>
30#include <asm/uaccess.h>
31#if IS_ENABLED(CONFIG_IPV6)
32#include <linux/in6.h>
33#endif
34
35static inline int aead_len(struct xfrm_algo_aead *alg)
36{
37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
38}
39
40static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
41{
42 struct nlattr *rt = attrs[type];
43 struct xfrm_algo *algp;
44
45 if (!rt)
46 return 0;
47
48 algp = nla_data(rt);
49 if (nla_len(rt) < xfrm_alg_len(algp))
50 return -EINVAL;
51
52 switch (type) {
53 case XFRMA_ALG_AUTH:
54 case XFRMA_ALG_CRYPT:
55 case XFRMA_ALG_COMP:
56 break;
57
58 default:
59 return -EINVAL;
60 }
61
62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
63 return 0;
64}
65
66static int verify_auth_trunc(struct nlattr **attrs)
67{
68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
69 struct xfrm_algo_auth *algp;
70
71 if (!rt)
72 return 0;
73
74 algp = nla_data(rt);
75 if (nla_len(rt) < xfrm_alg_auth_len(algp))
76 return -EINVAL;
77
78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
79 return 0;
80}
81
82static int verify_aead(struct nlattr **attrs)
83{
84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
85 struct xfrm_algo_aead *algp;
86
87 if (!rt)
88 return 0;
89
90 algp = nla_data(rt);
91 if (nla_len(rt) < aead_len(algp))
92 return -EINVAL;
93
94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
95 return 0;
96}
97
98static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
99 xfrm_address_t **addrp)
100{
101 struct nlattr *rt = attrs[type];
102
103 if (rt && addrp)
104 *addrp = nla_data(rt);
105}
106
107static inline int verify_sec_ctx_len(struct nlattr **attrs)
108{
109 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
110 struct xfrm_user_sec_ctx *uctx;
111
112 if (!rt)
113 return 0;
114
115 uctx = nla_data(rt);
116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
117 return -EINVAL;
118
119 return 0;
120}
121
122static inline int verify_replay(struct xfrm_usersa_info *p,
123 struct nlattr **attrs)
124{
125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
126 struct xfrm_replay_state_esn *rs;
127
128 if (p->flags & XFRM_STATE_ESN) {
129 if (!rt)
130 return -EINVAL;
131
132 rs = nla_data(rt);
133
134 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
135 return -EINVAL;
136
137 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
138 nla_len(rt) != sizeof(*rs))
139 return -EINVAL;
140 }
141
142 if (!rt)
143 return 0;
144
145 if (p->id.proto != IPPROTO_ESP)
146 return -EINVAL;
147
148 if (p->replay_window != 0)
149 return -EINVAL;
150
151 return 0;
152}
153
154static int verify_newsa_info(struct xfrm_usersa_info *p,
155 struct nlattr **attrs)
156{
157 int err;
158
159 err = -EINVAL;
160 switch (p->family) {
161 case AF_INET:
162 break;
163
164 case AF_INET6:
165#if IS_ENABLED(CONFIG_IPV6)
166 break;
167#else
168 err = -EAFNOSUPPORT;
169 goto out;
170#endif
171
172 default:
173 goto out;
174 }
175
176 err = -EINVAL;
177 switch (p->id.proto) {
178 case IPPROTO_AH:
179 if ((!attrs[XFRMA_ALG_AUTH] &&
180 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
181 attrs[XFRMA_ALG_AEAD] ||
182 attrs[XFRMA_ALG_CRYPT] ||
183 attrs[XFRMA_ALG_COMP] ||
184 attrs[XFRMA_TFCPAD])
185 goto out;
186 break;
187
188 case IPPROTO_ESP:
189 if (attrs[XFRMA_ALG_COMP])
190 goto out;
191 if (!attrs[XFRMA_ALG_AUTH] &&
192 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
193 !attrs[XFRMA_ALG_CRYPT] &&
194 !attrs[XFRMA_ALG_AEAD])
195 goto out;
196 if ((attrs[XFRMA_ALG_AUTH] ||
197 attrs[XFRMA_ALG_AUTH_TRUNC] ||
198 attrs[XFRMA_ALG_CRYPT]) &&
199 attrs[XFRMA_ALG_AEAD])
200 goto out;
201 if (attrs[XFRMA_TFCPAD] &&
202 p->mode != XFRM_MODE_TUNNEL)
203 goto out;
204 break;
205
206 case IPPROTO_COMP:
207 if (!attrs[XFRMA_ALG_COMP] ||
208 attrs[XFRMA_ALG_AEAD] ||
209 attrs[XFRMA_ALG_AUTH] ||
210 attrs[XFRMA_ALG_AUTH_TRUNC] ||
211 attrs[XFRMA_ALG_CRYPT] ||
212 attrs[XFRMA_TFCPAD])
213 goto out;
214 break;
215
216#if IS_ENABLED(CONFIG_IPV6)
217 case IPPROTO_DSTOPTS:
218 case IPPROTO_ROUTING:
219 if (attrs[XFRMA_ALG_COMP] ||
220 attrs[XFRMA_ALG_AUTH] ||
221 attrs[XFRMA_ALG_AUTH_TRUNC] ||
222 attrs[XFRMA_ALG_AEAD] ||
223 attrs[XFRMA_ALG_CRYPT] ||
224 attrs[XFRMA_ENCAP] ||
225 attrs[XFRMA_SEC_CTX] ||
226 attrs[XFRMA_TFCPAD] ||
227 !attrs[XFRMA_COADDR])
228 goto out;
229 break;
230#endif
231
232 default:
233 goto out;
234 }
235
236 if ((err = verify_aead(attrs)))
237 goto out;
238 if ((err = verify_auth_trunc(attrs)))
239 goto out;
240 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
241 goto out;
242 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
243 goto out;
244 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
245 goto out;
246 if ((err = verify_sec_ctx_len(attrs)))
247 goto out;
248 if ((err = verify_replay(p, attrs)))
249 goto out;
250
251 err = -EINVAL;
252 switch (p->mode) {
253 case XFRM_MODE_TRANSPORT:
254 case XFRM_MODE_TUNNEL:
255 case XFRM_MODE_ROUTEOPTIMIZATION:
256 case XFRM_MODE_BEET:
257 break;
258
259 default:
260 goto out;
261 }
262
263 err = 0;
264
265out:
266 return err;
267}
268
269static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
270 struct xfrm_algo_desc *(*get_byname)(const char *, int),
271 struct nlattr *rta)
272{
273 struct xfrm_algo *p, *ualg;
274 struct xfrm_algo_desc *algo;
275
276 if (!rta)
277 return 0;
278
279 ualg = nla_data(rta);
280
281 algo = get_byname(ualg->alg_name, 1);
282 if (!algo)
283 return -ENOSYS;
284 *props = algo->desc.sadb_alg_id;
285
286 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
287 if (!p)
288 return -ENOMEM;
289
290 strcpy(p->alg_name, algo->name);
291 *algpp = p;
292 return 0;
293}
294
295static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
296 struct nlattr *rta)
297{
298 struct xfrm_algo *ualg;
299 struct xfrm_algo_auth *p;
300 struct xfrm_algo_desc *algo;
301
302 if (!rta)
303 return 0;
304
305 ualg = nla_data(rta);
306
307 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
308 if (!algo)
309 return -ENOSYS;
310 *props = algo->desc.sadb_alg_id;
311
312 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
313 if (!p)
314 return -ENOMEM;
315
316 strcpy(p->alg_name, algo->name);
317 p->alg_key_len = ualg->alg_key_len;
318 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
319 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
320
321 *algpp = p;
322 return 0;
323}
324
325static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
326 struct nlattr *rta)
327{
328 struct xfrm_algo_auth *p, *ualg;
329 struct xfrm_algo_desc *algo;
330
331 if (!rta)
332 return 0;
333
334 ualg = nla_data(rta);
335
336 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
337 if (!algo)
338 return -ENOSYS;
339 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
340 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
341 return -EINVAL;
342 *props = algo->desc.sadb_alg_id;
343
344 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
345 if (!p)
346 return -ENOMEM;
347
348 strcpy(p->alg_name, algo->name);
349 if (!p->alg_trunc_len)
350 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
351
352 *algpp = p;
353 return 0;
354}
355
356static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
357 struct nlattr *rta)
358{
359 struct xfrm_algo_aead *p, *ualg;
360 struct xfrm_algo_desc *algo;
361
362 if (!rta)
363 return 0;
364
365 ualg = nla_data(rta);
366
367 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
368 if (!algo)
369 return -ENOSYS;
370 *props = algo->desc.sadb_alg_id;
371
372 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
373 if (!p)
374 return -ENOMEM;
375
376 strcpy(p->alg_name, algo->name);
377 *algpp = p;
378 return 0;
379}
380
381static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
382 struct nlattr *rp)
383{
384 struct xfrm_replay_state_esn *up;
385 int ulen;
386
387 if (!replay_esn || !rp)
388 return 0;
389
390 up = nla_data(rp);
391 ulen = xfrm_replay_state_esn_len(up);
392
393 /* CVE-2017-7184 */
394 /* Check the overall length and the internal bitmap length to avoid
395 * potential overflow. */
396 if (nla_len(rp) < ulen ||
397 xfrm_replay_state_esn_len(replay_esn) != ulen ||
398 replay_esn->bmp_len != up->bmp_len)
399 return -EINVAL;
400
401 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
402 return -EINVAL;
403
404 return 0;
405}
406
407static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
408 struct xfrm_replay_state_esn **preplay_esn,
409 struct nlattr *rta)
410{
411 struct xfrm_replay_state_esn *p, *pp, *up;
412 int klen, ulen;
413
414 if (!rta)
415 return 0;
416
417 up = nla_data(rta);
418 klen = xfrm_replay_state_esn_len(up);
419 ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
420
421 p = kzalloc(klen, GFP_KERNEL);
422 if (!p)
423 return -ENOMEM;
424
425 pp = kzalloc(klen, GFP_KERNEL);
426 if (!pp) {
427 kfree(p);
428 return -ENOMEM;
429 }
430
431 memcpy(p, up, ulen);
432 memcpy(pp, up, ulen);
433
434 *replay_esn = p;
435 *preplay_esn = pp;
436
437 return 0;
438}
439
440static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
441{
442 int len = 0;
443
444 if (xfrm_ctx) {
445 len += sizeof(struct xfrm_user_sec_ctx);
446 len += xfrm_ctx->ctx_len;
447 }
448 return len;
449}
450
451static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
452{
453 memcpy(&x->id, &p->id, sizeof(x->id));
454 memcpy(&x->sel, &p->sel, sizeof(x->sel));
455 memcpy(&x->lft, &p->lft, sizeof(x->lft));
456 x->props.mode = p->mode;
457 x->props.replay_window = p->replay_window;
458 x->props.reqid = p->reqid;
459 x->props.family = p->family;
460 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
461 x->props.flags = p->flags;
462
463 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
464 x->sel.family = p->family;
465}
466
467/*
468 * someday when pfkey also has support, we could have the code
469 * somehow made shareable and move it to xfrm_state.c - JHS
470 *
471*/
472static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
473 int update_esn)
474{
475 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
476 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
477 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
478 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
479 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
480
481 //BDSA-2023-1941
482 if (re && x->replay_esn && x->preplay_esn) {
483 struct xfrm_replay_state_esn *replay_esn;
484 replay_esn = nla_data(re);
485 memcpy(x->replay_esn, replay_esn,
486 xfrm_replay_state_esn_len(replay_esn));
487 memcpy(x->preplay_esn, replay_esn,
488 xfrm_replay_state_esn_len(replay_esn));
489 }
490
491 if (rp) {
492 struct xfrm_replay_state *replay;
493 replay = nla_data(rp);
494 memcpy(&x->replay, replay, sizeof(*replay));
495 memcpy(&x->preplay, replay, sizeof(*replay));
496 }
497
498 if (lt) {
499 struct xfrm_lifetime_cur *ltime;
500 ltime = nla_data(lt);
501 x->curlft.bytes = ltime->bytes;
502 x->curlft.packets = ltime->packets;
503 x->curlft.add_time = ltime->add_time;
504 x->curlft.use_time = ltime->use_time;
505 }
506
507 if (et)
508 x->replay_maxage = nla_get_u32(et);
509
510 if (rt)
511 x->replay_maxdiff = nla_get_u32(rt);
512}
513
514static struct xfrm_state *xfrm_state_construct(struct net *net,
515 struct xfrm_usersa_info *p,
516 struct nlattr **attrs,
517 int *errp)
518{
519 struct xfrm_state *x = xfrm_state_alloc(net);
520 int err = -ENOMEM;
521
522 if (!x)
523 goto error_no_put;
524
525 copy_from_user_state(x, p);
526
527 if ((err = attach_aead(&x->aead, &x->props.ealgo,
528 attrs[XFRMA_ALG_AEAD])))
529 goto error;
530 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
531 attrs[XFRMA_ALG_AUTH_TRUNC])))
532 goto error;
533 if (!x->props.aalgo) {
534 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
535 attrs[XFRMA_ALG_AUTH])))
536 goto error;
537 }
538 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
539 xfrm_ealg_get_byname,
540 attrs[XFRMA_ALG_CRYPT])))
541 goto error;
542 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
543 xfrm_calg_get_byname,
544 attrs[XFRMA_ALG_COMP])))
545 goto error;
546
547 if (attrs[XFRMA_ENCAP]) {
548 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
549 sizeof(*x->encap), GFP_KERNEL);
550 if (x->encap == NULL)
551 goto error;
552 }
553
554 if (attrs[XFRMA_TFCPAD])
555 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
556
557 if (attrs[XFRMA_COADDR]) {
558 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
559 sizeof(*x->coaddr), GFP_KERNEL);
560 if (x->coaddr == NULL)
561 goto error;
562 }
563
564 xfrm_mark_get(attrs, &x->mark);
565
566 err = __xfrm_init_state(x, false);
567 if (err)
568 goto error;
569
570 if (attrs[XFRMA_SEC_CTX] &&
571 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
572 goto error;
573
574 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
575 attrs[XFRMA_REPLAY_ESN_VAL])))
576 goto error;
577
578 x->km.seq = p->seq;
579 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
580 /* sysctl_xfrm_aevent_etime is in 100ms units */
581 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
582
583 if ((err = xfrm_init_replay(x)))
584 goto error;
585
586 /* override default values from above */
587 xfrm_update_ae_params(x, attrs, 0);
588
589 return x;
590
591error:
592 x->km.state = XFRM_STATE_DEAD;
593 xfrm_state_put(x);
594error_no_put:
595 *errp = err;
596 return NULL;
597}
598
599static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
600 struct nlattr **attrs)
601{
602 struct net *net = sock_net(skb->sk);
603 struct xfrm_usersa_info *p = nlmsg_data(nlh);
604 struct xfrm_state *x;
605 int err;
606 struct km_event c;
607 uid_t loginuid = audit_get_loginuid(current);
608 u32 sessionid = audit_get_sessionid(current);
609 u32 sid;
610
611 err = verify_newsa_info(p, attrs);
612 if (err)
613 return err;
614
615 x = xfrm_state_construct(net, p, attrs, &err);
616 if (!x)
617 return err;
618
619 xfrm_state_hold(x);
620 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
621 err = xfrm_state_add(x);
622 else
623 err = xfrm_state_update(x);
624
625 security_task_getsecid(current, &sid);
626 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
627
628 if (err < 0) {
629 x->km.state = XFRM_STATE_DEAD;
630 __xfrm_state_put(x);
631 goto out;
632 }
633
634 c.seq = nlh->nlmsg_seq;
635 c.pid = nlh->nlmsg_pid;
636 c.event = nlh->nlmsg_type;
637
638 km_state_notify(x, &c);
639out:
640 xfrm_state_put(x);
641 return err;
642}
643
644static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
645 struct xfrm_usersa_id *p,
646 struct nlattr **attrs,
647 int *errp)
648{
649 struct xfrm_state *x = NULL;
650 struct xfrm_mark m;
651 int err;
652 u32 mark = xfrm_mark_get(attrs, &m);
653
654 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
655 err = -ESRCH;
656 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
657 } else {
658 xfrm_address_t *saddr = NULL;
659
660 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
661 if (!saddr) {
662 err = -EINVAL;
663 goto out;
664 }
665
666 err = -ESRCH;
667 x = xfrm_state_lookup_byaddr(net, mark,
668 &p->daddr, saddr,
669 p->proto, p->family);
670 }
671
672 out:
673 if (!x && errp)
674 *errp = err;
675 return x;
676}
677
678static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
679 struct nlattr **attrs)
680{
681 struct net *net = sock_net(skb->sk);
682 struct xfrm_state *x;
683 int err = -ESRCH;
684 struct km_event c;
685 struct xfrm_usersa_id *p = nlmsg_data(nlh);
686 uid_t loginuid = audit_get_loginuid(current);
687 u32 sessionid = audit_get_sessionid(current);
688 u32 sid;
689
690 x = xfrm_user_state_lookup(net, p, attrs, &err);
691 if (x == NULL)
692 return err;
693
694 if ((err = security_xfrm_state_delete(x)) != 0)
695 goto out;
696
697 if (xfrm_state_kern(x)) {
698 err = -EPERM;
699 goto out;
700 }
701
702 err = xfrm_state_delete(x);
703
704 if (err < 0)
705 goto out;
706
707 c.seq = nlh->nlmsg_seq;
708 c.pid = nlh->nlmsg_pid;
709 c.event = nlh->nlmsg_type;
710 km_state_notify(x, &c);
711
712out:
713 security_task_getsecid(current, &sid);
714 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
715 xfrm_state_put(x);
716 return err;
717}
718
719static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
720{
721 memset(p, 0, sizeof(*p));
722 memcpy(&p->id, &x->id, sizeof(p->id));
723 memcpy(&p->sel, &x->sel, sizeof(p->sel));
724 memcpy(&p->lft, &x->lft, sizeof(p->lft));
725 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
726 memcpy(&p->stats, &x->stats, sizeof(p->stats));
727 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
728 p->mode = x->props.mode;
729 p->replay_window = x->props.replay_window;
730 p->reqid = x->props.reqid;
731 p->family = x->props.family;
732 p->flags = x->props.flags;
733 p->seq = x->km.seq;
734}
735
736struct xfrm_dump_info {
737 struct sk_buff *in_skb;
738 struct sk_buff *out_skb;
739 u32 nlmsg_seq;
740 u16 nlmsg_flags;
741};
742
743static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
744{
745 struct xfrm_user_sec_ctx *uctx;
746 struct nlattr *attr;
747 int ctx_size = sizeof(*uctx) + s->ctx_len;
748
749 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
750 if (attr == NULL)
751 return -EMSGSIZE;
752
753 uctx = nla_data(attr);
754 uctx->exttype = XFRMA_SEC_CTX;
755 uctx->len = ctx_size;
756 uctx->ctx_doi = s->ctx_doi;
757 uctx->ctx_alg = s->ctx_alg;
758 uctx->ctx_len = s->ctx_len;
759 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
760
761 return 0;
762}
763
764static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
765{
766 struct xfrm_algo *algo;
767 struct nlattr *nla;
768
769 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
770 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
771 if (!nla)
772 return -EMSGSIZE;
773
774 algo = nla_data(nla);
775 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
776 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
777 algo->alg_key_len = auth->alg_key_len;
778
779 return 0;
780}
781
782/* Don't change this without updating xfrm_sa_len! */
783static int copy_to_user_state_extra(struct xfrm_state *x,
784 struct xfrm_usersa_info *p,
785 struct sk_buff *skb)
786{
787 copy_to_user_state(x, p);
788
789 if (x->coaddr)
790 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
791
792 if (x->lastused)
793 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
794
795 if (x->aead)
796 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
797 if (x->aalg) {
798 if (copy_to_user_auth(x->aalg, skb))
799 goto nla_put_failure;
800
801 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
802 xfrm_alg_auth_len(x->aalg), x->aalg);
803 }
804 if (x->ealg)
805 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
806 if (x->calg)
807 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
808
809 if (x->encap)
810 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
811
812 if (x->tfcpad)
813 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
814
815 if (xfrm_mark_put(skb, &x->mark))
816 goto nla_put_failure;
817
818 if (x->replay_esn)
819 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
820 xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn);
821
822 if (x->security && copy_sec_ctx(x->security, skb) < 0)
823 goto nla_put_failure;
824
825 return 0;
826
827nla_put_failure:
828 return -EMSGSIZE;
829}
830
831static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
832{
833 struct xfrm_dump_info *sp = ptr;
834 struct sk_buff *in_skb = sp->in_skb;
835 struct sk_buff *skb = sp->out_skb;
836 struct xfrm_usersa_info *p;
837 struct nlmsghdr *nlh;
838 int err;
839
840 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
841 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
842 if (nlh == NULL)
843 return -EMSGSIZE;
844
845 p = nlmsg_data(nlh);
846
847 err = copy_to_user_state_extra(x, p, skb);
848 if (err)
849 goto nla_put_failure;
850
851 nlmsg_end(skb, nlh);
852 return 0;
853
854nla_put_failure:
855 nlmsg_cancel(skb, nlh);
856 return err;
857}
858
859static int xfrm_dump_sa_done(struct netlink_callback *cb)
860{
861 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
862 xfrm_state_walk_done(walk);
863 return 0;
864}
865
866static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
867{
868 struct net *net = sock_net(skb->sk);
869 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
870 struct xfrm_dump_info info;
871
872 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
873 sizeof(cb->args) - sizeof(cb->args[0]));
874
875 info.in_skb = cb->skb;
876 info.out_skb = skb;
877 info.nlmsg_seq = cb->nlh->nlmsg_seq;
878 info.nlmsg_flags = NLM_F_MULTI;
879
880 if (!cb->args[0]) {
881 cb->args[0] = 1;
882 xfrm_state_walk_init(walk, 0);
883 }
884
885 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
886
887 return skb->len;
888}
889
890static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
891 struct xfrm_state *x, u32 seq)
892{
893 struct xfrm_dump_info info;
894 struct sk_buff *skb;
895 int err;
896
897 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
898 if (!skb)
899 return ERR_PTR(-ENOMEM);
900
901 info.in_skb = in_skb;
902 info.out_skb = skb;
903 info.nlmsg_seq = seq;
904 info.nlmsg_flags = 0;
905
906 err = dump_one_state(x, 0, &info);
907 if (err) {
908 kfree_skb(skb);
909 return ERR_PTR(err);
910 }
911
912 return skb;
913}
914
915static inline size_t xfrm_spdinfo_msgsize(void)
916{
917 return NLMSG_ALIGN(4)
918 + nla_total_size(sizeof(struct xfrmu_spdinfo))
919 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
920}
921
922static int build_spdinfo(struct sk_buff *skb, struct net *net,
923 u32 pid, u32 seq, u32 flags)
924{
925 struct xfrmk_spdinfo si;
926 struct xfrmu_spdinfo spc;
927 struct xfrmu_spdhinfo sph;
928 struct nlmsghdr *nlh;
929 u32 *f;
930
931 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
932 if (nlh == NULL) /* shouldn't really happen ... */
933 return -EMSGSIZE;
934
935 f = nlmsg_data(nlh);
936 *f = flags;
937 xfrm_spd_getinfo(net, &si);
938 spc.incnt = si.incnt;
939 spc.outcnt = si.outcnt;
940 spc.fwdcnt = si.fwdcnt;
941 spc.inscnt = si.inscnt;
942 spc.outscnt = si.outscnt;
943 spc.fwdscnt = si.fwdscnt;
944 sph.spdhcnt = si.spdhcnt;
945 sph.spdhmcnt = si.spdhmcnt;
946
947 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
948 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
949
950 return nlmsg_end(skb, nlh);
951
952nla_put_failure:
953 nlmsg_cancel(skb, nlh);
954 return -EMSGSIZE;
955}
956
957static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
958 struct nlattr **attrs)
959{
960 struct net *net = sock_net(skb->sk);
961 struct sk_buff *r_skb;
962 u32 *flags = nlmsg_data(nlh);
963 u32 spid = NETLINK_CB(skb).pid;
964 u32 seq = nlh->nlmsg_seq;
965
966 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
967 if (r_skb == NULL)
968 return -ENOMEM;
969
970 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
971 BUG();
972
973 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
974}
975
976static inline size_t xfrm_sadinfo_msgsize(void)
977{
978 return NLMSG_ALIGN(4)
979 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
980 + nla_total_size(4); /* XFRMA_SAD_CNT */
981}
982
983static int build_sadinfo(struct sk_buff *skb, struct net *net,
984 u32 pid, u32 seq, u32 flags)
985{
986 struct xfrmk_sadinfo si;
987 struct xfrmu_sadhinfo sh;
988 struct nlmsghdr *nlh;
989 u32 *f;
990
991 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
992 if (nlh == NULL) /* shouldn't really happen ... */
993 return -EMSGSIZE;
994
995 f = nlmsg_data(nlh);
996 *f = flags;
997 xfrm_sad_getinfo(net, &si);
998
999 sh.sadhmcnt = si.sadhmcnt;
1000 sh.sadhcnt = si.sadhcnt;
1001
1002 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
1003 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1004
1005 return nlmsg_end(skb, nlh);
1006
1007nla_put_failure:
1008 nlmsg_cancel(skb, nlh);
1009 return -EMSGSIZE;
1010}
1011
1012static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1013 struct nlattr **attrs)
1014{
1015 struct net *net = sock_net(skb->sk);
1016 struct sk_buff *r_skb;
1017 u32 *flags = nlmsg_data(nlh);
1018 u32 spid = NETLINK_CB(skb).pid;
1019 u32 seq = nlh->nlmsg_seq;
1020
1021 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1022 if (r_skb == NULL)
1023 return -ENOMEM;
1024
1025 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
1026 BUG();
1027
1028 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
1029}
1030
1031static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1032 struct nlattr **attrs)
1033{
1034 struct net *net = sock_net(skb->sk);
1035 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1036 struct xfrm_state *x;
1037 struct sk_buff *resp_skb;
1038 int err = -ESRCH;
1039
1040 x = xfrm_user_state_lookup(net, p, attrs, &err);
1041 if (x == NULL)
1042 goto out_noput;
1043
1044 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1045 if (IS_ERR(resp_skb)) {
1046 err = PTR_ERR(resp_skb);
1047 } else {
1048 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1049 }
1050 xfrm_state_put(x);
1051out_noput:
1052 return err;
1053}
1054
1055static int verify_userspi_info(struct xfrm_userspi_info *p)
1056{
1057 switch (p->info.id.proto) {
1058 case IPPROTO_AH:
1059 case IPPROTO_ESP:
1060 break;
1061
1062 case IPPROTO_COMP:
1063 /* IPCOMP spi is 16-bits. */
1064 if (p->max >= 0x10000)
1065 return -EINVAL;
1066 break;
1067
1068 default:
1069 return -EINVAL;
1070 }
1071
1072 if (p->min > p->max)
1073 return -EINVAL;
1074
1075 return 0;
1076}
1077
1078static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1079 struct nlattr **attrs)
1080{
1081 struct net *net = sock_net(skb->sk);
1082 struct xfrm_state *x;
1083 struct xfrm_userspi_info *p;
1084 struct sk_buff *resp_skb;
1085 xfrm_address_t *daddr;
1086 int family;
1087 int err;
1088 u32 mark;
1089 struct xfrm_mark m;
1090
1091 p = nlmsg_data(nlh);
1092 err = verify_userspi_info(p);
1093 if (err)
1094 goto out_noput;
1095
1096 family = p->info.family;
1097 daddr = &p->info.id.daddr;
1098
1099 x = NULL;
1100
1101 mark = xfrm_mark_get(attrs, &m);
1102 if (p->info.seq) {
1103 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1104 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
1105 xfrm_state_put(x);
1106 x = NULL;
1107 }
1108 }
1109
1110 if (!x)
1111 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1112 p->info.id.proto, daddr,
1113 &p->info.saddr, 1,
1114 family);
1115 err = -ENOENT;
1116 if (x == NULL)
1117 goto out_noput;
1118
1119 err = xfrm_alloc_spi(x, p->min, p->max);
1120 if (err)
1121 goto out;
1122
1123 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1124 if (IS_ERR(resp_skb)) {
1125 err = PTR_ERR(resp_skb);
1126 goto out;
1127 }
1128
1129 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
1130
1131out:
1132 xfrm_state_put(x);
1133out_noput:
1134 return err;
1135}
1136
1137static int verify_policy_dir(u8 dir)
1138{
1139 switch (dir) {
1140 case XFRM_POLICY_IN:
1141 case XFRM_POLICY_OUT:
1142 case XFRM_POLICY_FWD:
1143 break;
1144
1145 default:
1146 return -EINVAL;
1147 }
1148
1149 return 0;
1150}
1151
1152static int verify_policy_type(u8 type)
1153{
1154 switch (type) {
1155 case XFRM_POLICY_TYPE_MAIN:
1156#ifdef CONFIG_XFRM_SUB_POLICY
1157 case XFRM_POLICY_TYPE_SUB:
1158#endif
1159 break;
1160
1161 default:
1162 return -EINVAL;
1163 }
1164
1165 return 0;
1166}
1167
1168static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1169{
1170 int ret;
1171
1172 switch (p->share) {
1173 case XFRM_SHARE_ANY:
1174 case XFRM_SHARE_SESSION:
1175 case XFRM_SHARE_USER:
1176 case XFRM_SHARE_UNIQUE:
1177 break;
1178
1179 default:
1180 return -EINVAL;
1181 }
1182
1183 switch (p->action) {
1184 case XFRM_POLICY_ALLOW:
1185 case XFRM_POLICY_BLOCK:
1186 break;
1187
1188 default:
1189 return -EINVAL;
1190 }
1191
1192 switch (p->sel.family) {
1193 case AF_INET:
1194 break;
1195
1196 case AF_INET6:
1197#if IS_ENABLED(CONFIG_IPV6)
1198 break;
1199#else
1200 return -EAFNOSUPPORT;
1201#endif
1202
1203 default:
1204 return -EINVAL;
1205 }
1206//hubÖÎÀí£ºCVE-2019-15666
1207 ret = verify_policy_dir(p->dir);
1208 if (ret)
1209 return ret;
1210 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
1211 return -EINVAL;
1212
1213 return 0;
1214}
1215
1216static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1217{
1218 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1219 struct xfrm_user_sec_ctx *uctx;
1220
1221 if (!rt)
1222 return 0;
1223
1224 uctx = nla_data(rt);
1225 return security_xfrm_policy_alloc(&pol->security, uctx);
1226}
1227
1228static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1229 int nr)
1230{
1231 int i;
1232
1233 xp->xfrm_nr = nr;
1234 for (i = 0; i < nr; i++, ut++) {
1235 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1236
1237 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1238 memcpy(&t->saddr, &ut->saddr,
1239 sizeof(xfrm_address_t));
1240 t->reqid = ut->reqid;
1241 t->mode = ut->mode;
1242 t->share = ut->share;
1243 t->optional = ut->optional;
1244 t->aalgos = ut->aalgos;
1245 t->ealgos = ut->ealgos;
1246 t->calgos = ut->calgos;
1247 /* If all masks are ~0, then we allow all algorithms. */
1248 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1249 t->encap_family = ut->family;
1250 }
1251}
1252
1253static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1254{
1255 int i;
1256
1257 if (nr > XFRM_MAX_DEPTH)
1258 return -EINVAL;
1259
1260 for (i = 0; i < nr; i++) {
1261 /* We never validated the ut->family value, so many
1262 * applications simply leave it at zero. The check was
1263 * never made and ut->family was ignored because all
1264 * templates could be assumed to have the same family as
1265 * the policy itself. Now that we will have ipv4-in-ipv6
1266 * and ipv6-in-ipv4 tunnels, this is no longer true.
1267 */
1268 if (!ut[i].family)
1269 ut[i].family = family;
1270
1271 switch (ut[i].family) {
1272 case AF_INET:
1273 break;
1274#if IS_ENABLED(CONFIG_IPV6)
1275 case AF_INET6:
1276 break;
1277#endif
1278 default:
1279 return -EINVAL;
1280 }
1281 }
1282
1283 return 0;
1284}
1285
1286static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1287{
1288 struct nlattr *rt = attrs[XFRMA_TMPL];
1289
1290 if (!rt) {
1291 pol->xfrm_nr = 0;
1292 } else {
1293 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1294 int nr = nla_len(rt) / sizeof(*utmpl);
1295 int err;
1296
1297 err = validate_tmpl(nr, utmpl, pol->family);
1298 if (err)
1299 return err;
1300
1301 copy_templates(pol, utmpl, nr);
1302 }
1303 return 0;
1304}
1305
1306static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1307{
1308 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1309 struct xfrm_userpolicy_type *upt;
1310 u8 type = XFRM_POLICY_TYPE_MAIN;
1311 int err;
1312
1313 if (rt) {
1314 upt = nla_data(rt);
1315 type = upt->type;
1316 }
1317
1318 err = verify_policy_type(type);
1319 if (err)
1320 return err;
1321
1322 *tp = type;
1323 return 0;
1324}
1325
1326static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1327{
1328 xp->priority = p->priority;
1329 xp->index = p->index;
1330 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1331 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1332 xp->action = p->action;
1333 xp->flags = p->flags;
1334 xp->family = p->sel.family;
1335 /* XXX xp->share = p->share; */
1336}
1337
1338static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1339{
1340 memset(p, 0, sizeof(*p));
1341 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1342 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1343 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1344 p->priority = xp->priority;
1345 p->index = xp->index;
1346 p->sel.family = xp->family;
1347 p->dir = dir;
1348 p->action = xp->action;
1349 p->flags = xp->flags;
1350 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1351}
1352
1353static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1354{
1355 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1356 int err;
1357
1358 if (!xp) {
1359 *errp = -ENOMEM;
1360 return NULL;
1361 }
1362
1363 copy_from_user_policy(xp, p);
1364
1365 err = copy_from_user_policy_type(&xp->type, attrs);
1366 if (err)
1367 goto error;
1368
1369 if (!(err = copy_from_user_tmpl(xp, attrs)))
1370 err = copy_from_user_sec_ctx(xp, attrs);
1371 if (err)
1372 goto error;
1373
1374 xfrm_mark_get(attrs, &xp->mark);
1375
1376 return xp;
1377 error:
1378 *errp = err;
1379 xp->walk.dead = 1;
1380 xfrm_policy_destroy(xp);
1381 return NULL;
1382}
1383
1384static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1385 struct nlattr **attrs)
1386{
1387 struct net *net = sock_net(skb->sk);
1388 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1389 struct xfrm_policy *xp;
1390 struct km_event c;
1391 int err;
1392 int excl;
1393 uid_t loginuid = audit_get_loginuid(current);
1394 u32 sessionid = audit_get_sessionid(current);
1395 u32 sid;
1396
1397 err = verify_newpolicy_info(p);
1398 if (err)
1399 return err;
1400 err = verify_sec_ctx_len(attrs);
1401 if (err)
1402 return err;
1403
1404 xp = xfrm_policy_construct(net, p, attrs, &err);
1405 if (!xp)
1406 return err;
1407
1408 /* shouldn't excl be based on nlh flags??
1409 * Aha! this is anti-netlink really i.e more pfkey derived
1410 * in netlink excl is a flag and you wouldnt need
1411 * a type XFRM_MSG_UPDPOLICY - JHS */
1412 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1413 err = xfrm_policy_insert(p->dir, xp, excl);
1414 security_task_getsecid(current, &sid);
1415 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1416
1417 if (err) {
1418 security_xfrm_policy_free(xp->security);
1419 kfree(xp);
1420 return err;
1421 }
1422
1423 c.event = nlh->nlmsg_type;
1424 c.seq = nlh->nlmsg_seq;
1425 c.pid = nlh->nlmsg_pid;
1426 km_policy_notify(xp, p->dir, &c);
1427
1428 xfrm_pol_put(xp);
1429
1430 return 0;
1431}
1432
1433static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1434{
1435 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1436 int i;
1437
1438 if (xp->xfrm_nr == 0)
1439 return 0;
1440
1441 for (i = 0; i < xp->xfrm_nr; i++) {
1442 struct xfrm_user_tmpl *up = &vec[i];
1443 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1444
1445 memset(up, 0, sizeof(*up));
1446 memcpy(&up->id, &kp->id, sizeof(up->id));
1447 up->family = kp->encap_family;
1448 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1449 up->reqid = kp->reqid;
1450 up->mode = kp->mode;
1451 up->share = kp->share;
1452 up->optional = kp->optional;
1453 up->aalgos = kp->aalgos;
1454 up->ealgos = kp->ealgos;
1455 up->calgos = kp->calgos;
1456 }
1457
1458 return nla_put(skb, XFRMA_TMPL,
1459 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1460}
1461
1462static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1463{
1464 if (x->security) {
1465 return copy_sec_ctx(x->security, skb);
1466 }
1467 return 0;
1468}
1469
1470static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1471{
1472 if (xp->security) {
1473 return copy_sec_ctx(xp->security, skb);
1474 }
1475 return 0;
1476}
1477static inline size_t userpolicy_type_attrsize(void)
1478{
1479#ifdef CONFIG_XFRM_SUB_POLICY
1480 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1481#else
1482 return 0;
1483#endif
1484}
1485
1486#ifdef CONFIG_XFRM_SUB_POLICY
1487static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1488{
1489 struct xfrm_userpolicy_type upt = {
1490 .type = type,
1491 };
1492
1493 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1494}
1495
1496#else
1497static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1498{
1499 return 0;
1500}
1501#endif
1502
1503static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1504{
1505 struct xfrm_dump_info *sp = ptr;
1506 struct xfrm_userpolicy_info *p;
1507 struct sk_buff *in_skb = sp->in_skb;
1508 struct sk_buff *skb = sp->out_skb;
1509 struct nlmsghdr *nlh;
1510
1511 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1512 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1513 if (nlh == NULL)
1514 return -EMSGSIZE;
1515
1516 p = nlmsg_data(nlh);
1517 copy_to_user_policy(xp, p, dir);
1518 if (copy_to_user_tmpl(xp, skb) < 0)
1519 goto nlmsg_failure;
1520 if (copy_to_user_sec_ctx(xp, skb))
1521 goto nlmsg_failure;
1522 if (copy_to_user_policy_type(xp->type, skb) < 0)
1523 goto nlmsg_failure;
1524 if (xfrm_mark_put(skb, &xp->mark))
1525 goto nla_put_failure;
1526
1527 nlmsg_end(skb, nlh);
1528 return 0;
1529
1530nla_put_failure:
1531nlmsg_failure:
1532 nlmsg_cancel(skb, nlh);
1533 return -EMSGSIZE;
1534}
1535
1536static int xfrm_dump_policy_done(struct netlink_callback *cb)
1537{
1538 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1539
1540 xfrm_policy_walk_done(walk);
1541 return 0;
1542}
1543
1544static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1545{
1546 struct net *net = sock_net(skb->sk);
1547 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1548 struct xfrm_dump_info info;
1549
1550 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1551 sizeof(cb->args) - sizeof(cb->args[0]));
1552
1553 info.in_skb = cb->skb;
1554 info.out_skb = skb;
1555 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1556 info.nlmsg_flags = NLM_F_MULTI;
1557
1558 if (!cb->args[0]) {
1559 cb->args[0] = 1;
1560 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1561 }
1562
1563 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1564
1565 return skb->len;
1566}
1567
1568static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1569 struct xfrm_policy *xp,
1570 int dir, u32 seq)
1571{
1572 struct xfrm_dump_info info;
1573 struct sk_buff *skb;
1574 int err;
1575
1576 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1577 if (!skb)
1578 return ERR_PTR(-ENOMEM);
1579
1580 info.in_skb = in_skb;
1581 info.out_skb = skb;
1582 info.nlmsg_seq = seq;
1583 info.nlmsg_flags = 0;
1584
1585 err = dump_one_policy(xp, dir, 0, &info);
1586 if (err) {
1587 kfree_skb(skb);
1588 return ERR_PTR(err);
1589 }
1590
1591 return skb;
1592}
1593
1594static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1595 struct nlattr **attrs)
1596{
1597 struct net *net = sock_net(skb->sk);
1598 struct xfrm_policy *xp;
1599 struct xfrm_userpolicy_id *p;
1600 u8 type = XFRM_POLICY_TYPE_MAIN;
1601 int err;
1602 struct km_event c;
1603 int delete;
1604 struct xfrm_mark m;
1605 u32 mark = xfrm_mark_get(attrs, &m);
1606
1607 p = nlmsg_data(nlh);
1608 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1609
1610 err = copy_from_user_policy_type(&type, attrs);
1611 if (err)
1612 return err;
1613
1614 err = verify_policy_dir(p->dir);
1615 if (err)
1616 return err;
1617
1618 if (p->index)
1619 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1620 else {
1621 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1622 struct xfrm_sec_ctx *ctx;
1623
1624 err = verify_sec_ctx_len(attrs);
1625 if (err)
1626 return err;
1627
1628 ctx = NULL;
1629 if (rt) {
1630 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1631
1632 err = security_xfrm_policy_alloc(&ctx, uctx);
1633 if (err)
1634 return err;
1635 }
1636 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1637 ctx, delete, &err);
1638 security_xfrm_policy_free(ctx);
1639 }
1640 if (xp == NULL)
1641 return -ENOENT;
1642
1643 if (!delete) {
1644 struct sk_buff *resp_skb;
1645
1646 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1647 if (IS_ERR(resp_skb)) {
1648 err = PTR_ERR(resp_skb);
1649 } else {
1650 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1651 NETLINK_CB(skb).pid);
1652 }
1653 } else {
1654 uid_t loginuid = audit_get_loginuid(current);
1655 u32 sessionid = audit_get_sessionid(current);
1656 u32 sid;
1657
1658 security_task_getsecid(current, &sid);
1659 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1660 sid);
1661
1662 if (err != 0)
1663 goto out;
1664
1665 c.data.byid = p->index;
1666 c.event = nlh->nlmsg_type;
1667 c.seq = nlh->nlmsg_seq;
1668 c.pid = nlh->nlmsg_pid;
1669 km_policy_notify(xp, p->dir, &c);
1670 }
1671
1672out:
1673 xfrm_pol_put(xp);
1674 return err;
1675}
1676
1677static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1678 struct nlattr **attrs)
1679{
1680 struct net *net = sock_net(skb->sk);
1681 struct km_event c;
1682 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1683 struct xfrm_audit audit_info;
1684 int err;
1685
1686 audit_info.loginuid = audit_get_loginuid(current);
1687 audit_info.sessionid = audit_get_sessionid(current);
1688 security_task_getsecid(current, &audit_info.secid);
1689 err = xfrm_state_flush(net, p->proto, &audit_info);
1690 if (err) {
1691 if (err == -ESRCH) /* empty table */
1692 return 0;
1693 return err;
1694 }
1695 c.data.proto = p->proto;
1696 c.event = nlh->nlmsg_type;
1697 c.seq = nlh->nlmsg_seq;
1698 c.pid = nlh->nlmsg_pid;
1699 c.net = net;
1700 km_state_notify(NULL, &c);
1701
1702 return 0;
1703}
1704
1705static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
1706{
1707 size_t replay_size = x->replay_esn ?
1708 xfrm_replay_state_esn_len(x->replay_esn) :
1709 sizeof(struct xfrm_replay_state);
1710
1711 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1712 + nla_total_size(replay_size)
1713 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1714 + nla_total_size(sizeof(struct xfrm_mark))
1715 + nla_total_size(4) /* XFRM_AE_RTHR */
1716 + nla_total_size(4); /* XFRM_AE_ETHR */
1717}
1718
1719static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1720{
1721 struct xfrm_aevent_id *id;
1722 struct nlmsghdr *nlh;
1723
1724 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1725 if (nlh == NULL)
1726 return -EMSGSIZE;
1727
1728 id = nlmsg_data(nlh);
1729 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1730 id->sa_id.spi = x->id.spi;
1731 id->sa_id.family = x->props.family;
1732 id->sa_id.proto = x->id.proto;
1733 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1734 id->reqid = x->props.reqid;
1735 id->flags = c->data.aevent;
1736
1737 if (x->replay_esn)
1738 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL,
1739 xfrm_replay_state_esn_len(x->replay_esn),
1740 x->replay_esn);
1741 else
1742 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1743
1744 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1745
1746 if (id->flags & XFRM_AE_RTHR)
1747 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1748
1749 if (id->flags & XFRM_AE_ETHR)
1750 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1751 x->replay_maxage * 10 / HZ);
1752
1753 if (xfrm_mark_put(skb, &x->mark))
1754 goto nla_put_failure;
1755
1756 return nlmsg_end(skb, nlh);
1757
1758nla_put_failure:
1759 nlmsg_cancel(skb, nlh);
1760 return -EMSGSIZE;
1761}
1762
1763static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1764 struct nlattr **attrs)
1765{
1766 struct net *net = sock_net(skb->sk);
1767 struct xfrm_state *x;
1768 struct sk_buff *r_skb;
1769 int err;
1770 struct km_event c;
1771 u32 mark;
1772 struct xfrm_mark m;
1773 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1774 struct xfrm_usersa_id *id = &p->sa_id;
1775
1776 mark = xfrm_mark_get(attrs, &m);
1777
1778 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1779 if (x == NULL)
1780 return -ESRCH;
1781
1782 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
1783 if (r_skb == NULL) {
1784 xfrm_state_put(x);
1785 return -ENOMEM;
1786 }
1787
1788 /*
1789 * XXX: is this lock really needed - none of the other
1790 * gets lock (the concern is things getting updated
1791 * while we are still reading) - jhs
1792 */
1793 spin_lock_bh(&x->lock);
1794 c.data.aevent = p->flags;
1795 c.seq = nlh->nlmsg_seq;
1796 c.pid = nlh->nlmsg_pid;
1797
1798 if (build_aevent(r_skb, x, &c) < 0)
1799 BUG();
1800 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1801 spin_unlock_bh(&x->lock);
1802 xfrm_state_put(x);
1803 return err;
1804}
1805
1806static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1807 struct nlattr **attrs)
1808{
1809 struct net *net = sock_net(skb->sk);
1810 struct xfrm_state *x;
1811 struct km_event c;
1812 int err = - EINVAL;
1813 u32 mark = 0;
1814 struct xfrm_mark m;
1815 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1816 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1817 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1818 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1819
1820 if (!lt && !rp && !re)
1821 return err;
1822
1823 /* pedantic mode - thou shalt sayeth replaceth */
1824 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1825 return err;
1826
1827 mark = xfrm_mark_get(attrs, &m);
1828
1829 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1830 if (x == NULL)
1831 return -ESRCH;
1832
1833 if (x->km.state != XFRM_STATE_VALID)
1834 goto out;
1835
1836 err = xfrm_replay_verify_len(x->replay_esn, rp);
1837 if (err)
1838 goto out;
1839
1840 spin_lock_bh(&x->lock);
1841 xfrm_update_ae_params(x, attrs, 1);
1842 spin_unlock_bh(&x->lock);
1843
1844 c.event = nlh->nlmsg_type;
1845 c.seq = nlh->nlmsg_seq;
1846 c.pid = nlh->nlmsg_pid;
1847 c.data.aevent = XFRM_AE_CU;
1848 km_state_notify(x, &c);
1849 err = 0;
1850out:
1851 xfrm_state_put(x);
1852 return err;
1853}
1854
1855static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1856 struct nlattr **attrs)
1857{
1858 struct net *net = sock_net(skb->sk);
1859 struct km_event c;
1860 u8 type = XFRM_POLICY_TYPE_MAIN;
1861 int err;
1862 struct xfrm_audit audit_info;
1863
1864 err = copy_from_user_policy_type(&type, attrs);
1865 if (err)
1866 return err;
1867
1868 audit_info.loginuid = audit_get_loginuid(current);
1869 audit_info.sessionid = audit_get_sessionid(current);
1870 security_task_getsecid(current, &audit_info.secid);
1871 err = xfrm_policy_flush(net, type, &audit_info);
1872 if (err) {
1873 if (err == -ESRCH) /* empty table */
1874 return 0;
1875 return err;
1876 }
1877
1878 c.data.type = type;
1879 c.event = nlh->nlmsg_type;
1880 c.seq = nlh->nlmsg_seq;
1881 c.pid = nlh->nlmsg_pid;
1882 c.net = net;
1883 km_policy_notify(NULL, 0, &c);
1884 return 0;
1885}
1886
1887static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1888 struct nlattr **attrs)
1889{
1890 struct net *net = sock_net(skb->sk);
1891 struct xfrm_policy *xp;
1892 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1893 struct xfrm_userpolicy_info *p = &up->pol;
1894 u8 type = XFRM_POLICY_TYPE_MAIN;
1895 int err = -ENOENT;
1896 struct xfrm_mark m;
1897 u32 mark = xfrm_mark_get(attrs, &m);
1898
1899 err = copy_from_user_policy_type(&type, attrs);
1900 if (err)
1901 return err;
1902
1903 err = verify_policy_dir(p->dir);
1904 if (err)
1905 return err;
1906
1907 if (p->index)
1908 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1909 else {
1910 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1911 struct xfrm_sec_ctx *ctx;
1912
1913 err = verify_sec_ctx_len(attrs);
1914 if (err)
1915 return err;
1916
1917 ctx = NULL;
1918 if (rt) {
1919 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1920
1921 err = security_xfrm_policy_alloc(&ctx, uctx);
1922 if (err)
1923 return err;
1924 }
1925 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1926 &p->sel, ctx, 0, &err);
1927 security_xfrm_policy_free(ctx);
1928 }
1929 if (xp == NULL)
1930 return -ENOENT;
1931
1932 if (unlikely(xp->walk.dead))
1933 goto out;
1934
1935 err = 0;
1936 if (up->hard) {
1937 uid_t loginuid = audit_get_loginuid(current);
1938 u32 sessionid = audit_get_sessionid(current);
1939 u32 sid;
1940
1941 security_task_getsecid(current, &sid);
1942 xfrm_policy_delete(xp, p->dir);
1943 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1944
1945 } else {
1946 // reset the timers here?
1947 WARN(1, "Dont know what to do with soft policy expire\n");
1948 }
1949 km_policy_expired(xp, p->dir, up->hard, current->pid);
1950
1951out:
1952 xfrm_pol_put(xp);
1953 return err;
1954}
1955
1956static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1957 struct nlattr **attrs)
1958{
1959 struct net *net = sock_net(skb->sk);
1960 struct xfrm_state *x;
1961 int err;
1962 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1963 struct xfrm_usersa_info *p = &ue->state;
1964 struct xfrm_mark m;
1965 u32 mark = xfrm_mark_get(attrs, &m);
1966
1967 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1968
1969 err = -ENOENT;
1970 if (x == NULL)
1971 return err;
1972
1973 spin_lock_bh(&x->lock);
1974 err = -EINVAL;
1975 if (x->km.state != XFRM_STATE_VALID)
1976 goto out;
1977 km_state_expired(x, ue->hard, current->pid);
1978
1979 if (ue->hard) {
1980 uid_t loginuid = audit_get_loginuid(current);
1981 u32 sessionid = audit_get_sessionid(current);
1982 u32 sid;
1983
1984 security_task_getsecid(current, &sid);
1985 __xfrm_state_delete(x);
1986 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1987 }
1988 err = 0;
1989out:
1990 spin_unlock_bh(&x->lock);
1991 xfrm_state_put(x);
1992 return err;
1993}
1994
1995static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1996 struct nlattr **attrs)
1997{
1998 struct net *net = sock_net(skb->sk);
1999 struct xfrm_policy *xp;
2000 struct xfrm_user_tmpl *ut;
2001 int i;
2002 struct nlattr *rt = attrs[XFRMA_TMPL];
2003 struct xfrm_mark mark;
2004
2005 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2006 struct xfrm_state *x = xfrm_state_alloc(net);
2007 int err = -ENOMEM;
2008
2009 if (!x)
2010 goto nomem;
2011
2012 xfrm_mark_get(attrs, &mark);
2013
2014 err = verify_newpolicy_info(&ua->policy);
2015 if (err)
2016 goto bad_policy;
2017
2018 /* build an XP */
2019 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2020 if (!xp)
2021 goto free_state;
2022
2023 memcpy(&x->id, &ua->id, sizeof(ua->id));
2024 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2025 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2026 xp->mark.m = x->mark.m = mark.m;
2027 xp->mark.v = x->mark.v = mark.v;
2028 ut = nla_data(rt);
2029 /* extract the templates and for each call km_key */
2030 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2031 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2032 memcpy(&x->id, &t->id, sizeof(x->id));
2033 x->props.mode = t->mode;
2034 x->props.reqid = t->reqid;
2035 x->props.family = ut->family;
2036 t->aalgos = ua->aalgos;
2037 t->ealgos = ua->ealgos;
2038 t->calgos = ua->calgos;
2039 err = km_query(x, t, xp);
2040
2041 }
2042
2043 kfree(x);
2044 kfree(xp);
2045
2046 return 0;
2047
2048bad_policy:
2049 WARN(1, "BAD policy passed\n");
2050free_state:
2051 kfree(x);
2052nomem:
2053 return err;
2054}
2055
2056#ifdef CONFIG_XFRM_MIGRATE
2057static int copy_from_user_migrate(struct xfrm_migrate *ma,
2058 struct xfrm_kmaddress *k,
2059 struct nlattr **attrs, int *num)
2060{
2061 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2062 struct xfrm_user_migrate *um;
2063 int i, num_migrate;
2064
2065 if (k != NULL) {
2066 struct xfrm_user_kmaddress *uk;
2067
2068 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2069 memcpy(&k->local, &uk->local, sizeof(k->local));
2070 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2071 k->family = uk->family;
2072 k->reserved = uk->reserved;
2073 }
2074
2075 um = nla_data(rt);
2076 num_migrate = nla_len(rt) / sizeof(*um);
2077
2078 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2079 return -EINVAL;
2080
2081 for (i = 0; i < num_migrate; i++, um++, ma++) {
2082 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2083 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2084 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2085 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2086
2087 ma->proto = um->proto;
2088 ma->mode = um->mode;
2089 ma->reqid = um->reqid;
2090
2091 ma->old_family = um->old_family;
2092 ma->new_family = um->new_family;
2093 }
2094
2095 *num = i;
2096 return 0;
2097}
2098
2099static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2100 struct nlattr **attrs)
2101{
2102 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2103 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2104 struct xfrm_kmaddress km, *kmp;
2105 u8 type;
2106 int err;
2107 int n = 0;
2108
2109 if (attrs[XFRMA_MIGRATE] == NULL)
2110 return -EINVAL;
2111
2112 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2113
2114 err = copy_from_user_policy_type(&type, attrs);
2115 if (err)
2116 return err;
2117
2118 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2119 if (err)
2120 return err;
2121
2122 if (!n)
2123 return 0;
2124
2125 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
2126
2127 return 0;
2128}
2129#else
2130static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2131 struct nlattr **attrs)
2132{
2133 return -ENOPROTOOPT;
2134}
2135#endif
2136
2137#ifdef CONFIG_XFRM_MIGRATE
2138static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2139{
2140 struct xfrm_user_migrate um;
2141
2142 memset(&um, 0, sizeof(um));
2143 um.proto = m->proto;
2144 um.mode = m->mode;
2145 um.reqid = m->reqid;
2146 um.old_family = m->old_family;
2147 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2148 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2149 um.new_family = m->new_family;
2150 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2151 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2152
2153 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2154}
2155
2156static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2157{
2158 struct xfrm_user_kmaddress uk;
2159
2160 memset(&uk, 0, sizeof(uk));
2161 uk.family = k->family;
2162 uk.reserved = k->reserved;
2163 memcpy(&uk.local, &k->local, sizeof(uk.local));
2164 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2165
2166 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2167}
2168
2169static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2170{
2171 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2172 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2173 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2174 + userpolicy_type_attrsize();
2175}
2176
2177static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2178 int num_migrate, const struct xfrm_kmaddress *k,
2179 const struct xfrm_selector *sel, u8 dir, u8 type)
2180{
2181 const struct xfrm_migrate *mp;
2182 struct xfrm_userpolicy_id *pol_id;
2183 struct nlmsghdr *nlh;
2184 int i;
2185
2186 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2187 if (nlh == NULL)
2188 return -EMSGSIZE;
2189
2190 pol_id = nlmsg_data(nlh);
2191 /* copy data from selector, dir, and type to the pol_id */
2192 memset(pol_id, 0, sizeof(*pol_id));
2193 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2194 pol_id->dir = dir;
2195
2196 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
2197 goto nlmsg_failure;
2198
2199 if (copy_to_user_policy_type(type, skb) < 0)
2200 goto nlmsg_failure;
2201
2202 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2203 if (copy_to_user_migrate(mp, skb) < 0)
2204 goto nlmsg_failure;
2205 }
2206
2207 return nlmsg_end(skb, nlh);
2208nlmsg_failure:
2209 nlmsg_cancel(skb, nlh);
2210 return -EMSGSIZE;
2211}
2212
2213static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2214 const struct xfrm_migrate *m, int num_migrate,
2215 const struct xfrm_kmaddress *k)
2216{
2217 struct net *net = &init_net;
2218 struct sk_buff *skb;
2219
2220 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2221 if (skb == NULL)
2222 return -ENOMEM;
2223
2224 /* build migrate */
2225 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2226 BUG();
2227
2228 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2229}
2230#else
2231static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2232 const struct xfrm_migrate *m, int num_migrate,
2233 const struct xfrm_kmaddress *k)
2234{
2235 return -ENOPROTOOPT;
2236}
2237#endif
2238
2239#define XMSGSIZE(type) sizeof(struct type)
2240
2241static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2242 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2243 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2244 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2245 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2246 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2247 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2248 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2249 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2250 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2251 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2252 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2253 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2254 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2255 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2256 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2257 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2258 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2259 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2260 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2261 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2262};
2263
2264#undef XMSGSIZE
2265
2266static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2267 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2268 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2269 [XFRMA_LASTUSED] = { .type = NLA_U64},
2270 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2271 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2272 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2273 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2274 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2275 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2276 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2277 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2278 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2279 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2280 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2281 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2282 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2283 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2284 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2285 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2286 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2287 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2288 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2289 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2290};
2291
2292static struct xfrm_link {
2293 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2294 int (*dump)(struct sk_buff *, struct netlink_callback *);
2295 int (*done)(struct netlink_callback *);
2296} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2297 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2298 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2299 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2300 .dump = xfrm_dump_sa,
2301 .done = xfrm_dump_sa_done },
2302 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2303 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2304 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2305 .dump = xfrm_dump_policy,
2306 .done = xfrm_dump_policy_done },
2307 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2308 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2309 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2310 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2311 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2312 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2313 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2314 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2315 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2316 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2317 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2318 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2319 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2320};
2321
2322static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2323{
2324 struct net *net = sock_net(skb->sk);
2325 struct nlattr *attrs[XFRMA_MAX+1];
2326 struct xfrm_link *link;
2327 int type, err;
2328
2329 type = nlh->nlmsg_type;
2330 if (type > XFRM_MSG_MAX)
2331 return -EINVAL;
2332
2333 type -= XFRM_MSG_BASE;
2334 link = &xfrm_dispatch[type];
2335
2336 /* All operations require privileges, even GET */
2337 if (!capable(CAP_NET_ADMIN))
2338 return -EPERM;
2339
2340 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2341 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2342 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2343 if (link->dump == NULL)
2344 return -EINVAL;
2345
2346 {
2347 struct netlink_dump_control c = {
2348 .dump = link->dump,
2349 .done = link->done,
2350 };
2351 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2352 }
2353 }
2354
2355 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2356 xfrma_policy);
2357 if (err < 0)
2358 return err;
2359
2360 if (link->doit == NULL)
2361 return -EINVAL;
2362
2363 return link->doit(skb, nlh, attrs);
2364}
2365
2366static void xfrm_netlink_rcv(struct sk_buff *skb)
2367{
2368 mutex_lock(&xfrm_cfg_mutex);
2369 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2370 mutex_unlock(&xfrm_cfg_mutex);
2371}
2372
2373static inline size_t xfrm_expire_msgsize(void)
2374{
2375 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2376 + nla_total_size(sizeof(struct xfrm_mark));
2377}
2378
2379static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2380{
2381 struct xfrm_user_expire *ue;
2382 struct nlmsghdr *nlh;
2383
2384 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2385 if (nlh == NULL)
2386 return -EMSGSIZE;
2387
2388 ue = nlmsg_data(nlh);
2389 copy_to_user_state(x, &ue->state);
2390 ue->hard = (c->data.hard != 0) ? 1 : 0;
2391
2392 if (xfrm_mark_put(skb, &x->mark))
2393 goto nla_put_failure;
2394
2395 return nlmsg_end(skb, nlh);
2396
2397nla_put_failure:
2398 return -EMSGSIZE;
2399}
2400
2401static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2402{
2403 struct net *net = xs_net(x);
2404 struct sk_buff *skb;
2405
2406 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2407 if (skb == NULL)
2408 return -ENOMEM;
2409
2410 if (build_expire(skb, x, c) < 0) {
2411 kfree_skb(skb);
2412 return -EMSGSIZE;
2413 }
2414
2415 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2416}
2417
2418static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2419{
2420 struct net *net = xs_net(x);
2421 struct sk_buff *skb;
2422
2423 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2424 if (skb == NULL)
2425 return -ENOMEM;
2426
2427 if (build_aevent(skb, x, c) < 0)
2428 BUG();
2429
2430 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2431}
2432
2433static int xfrm_notify_sa_flush(const struct km_event *c)
2434{
2435 struct net *net = c->net;
2436 struct xfrm_usersa_flush *p;
2437 struct nlmsghdr *nlh;
2438 struct sk_buff *skb;
2439 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2440
2441 skb = nlmsg_new(len, GFP_ATOMIC);
2442 if (skb == NULL)
2443 return -ENOMEM;
2444
2445 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2446 if (nlh == NULL) {
2447 kfree_skb(skb);
2448 return -EMSGSIZE;
2449 }
2450
2451 p = nlmsg_data(nlh);
2452 p->proto = c->data.proto;
2453
2454 nlmsg_end(skb, nlh);
2455
2456 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2457}
2458
2459static inline size_t xfrm_sa_len(struct xfrm_state *x)
2460{
2461 size_t l = 0;
2462 if (x->aead)
2463 l += nla_total_size(aead_len(x->aead));
2464 if (x->aalg) {
2465 l += nla_total_size(sizeof(struct xfrm_algo) +
2466 (x->aalg->alg_key_len + 7) / 8);
2467 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2468 }
2469 if (x->ealg)
2470 l += nla_total_size(xfrm_alg_len(x->ealg));
2471 if (x->calg)
2472 l += nla_total_size(sizeof(*x->calg));
2473 if (x->encap)
2474 l += nla_total_size(sizeof(*x->encap));
2475 if (x->tfcpad)
2476 l += nla_total_size(sizeof(x->tfcpad));
2477 if (x->replay_esn)
2478 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
2479 if (x->security)
2480 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2481 x->security->ctx_len);
2482 if (x->coaddr)
2483 l += nla_total_size(sizeof(*x->coaddr));
2484
2485 /* Must count x->lastused as it may become non-zero behind our back. */
2486 l += nla_total_size(sizeof(u64));
2487
2488 return l;
2489}
2490
2491static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2492{
2493 struct net *net = xs_net(x);
2494 struct xfrm_usersa_info *p;
2495 struct xfrm_usersa_id *id;
2496 struct nlmsghdr *nlh;
2497 struct sk_buff *skb;
2498 int len = xfrm_sa_len(x);
2499 int headlen;
2500
2501 headlen = sizeof(*p);
2502 if (c->event == XFRM_MSG_DELSA) {
2503 len += nla_total_size(headlen);
2504 headlen = sizeof(*id);
2505 len += nla_total_size(sizeof(struct xfrm_mark));
2506 }
2507 len += NLMSG_ALIGN(headlen);
2508
2509 skb = nlmsg_new(len, GFP_ATOMIC);
2510 if (skb == NULL)
2511 return -ENOMEM;
2512
2513 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2514 if (nlh == NULL)
2515 goto nla_put_failure;
2516
2517 p = nlmsg_data(nlh);
2518 if (c->event == XFRM_MSG_DELSA) {
2519 struct nlattr *attr;
2520
2521 id = nlmsg_data(nlh);
2522 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2523 id->spi = x->id.spi;
2524 id->family = x->props.family;
2525 id->proto = x->id.proto;
2526
2527 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2528 if (attr == NULL)
2529 goto nla_put_failure;
2530
2531 p = nla_data(attr);
2532 }
2533
2534 if (copy_to_user_state_extra(x, p, skb))
2535 goto nla_put_failure;
2536
2537 nlmsg_end(skb, nlh);
2538
2539 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2540
2541nla_put_failure:
2542 /* Somebody screwed up with xfrm_sa_len! */
2543 WARN_ON(1);
2544 kfree_skb(skb);
2545 return -1;
2546}
2547
2548static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2549{
2550
2551 switch (c->event) {
2552 case XFRM_MSG_EXPIRE:
2553 return xfrm_exp_state_notify(x, c);
2554 case XFRM_MSG_NEWAE:
2555 return xfrm_aevent_state_notify(x, c);
2556 case XFRM_MSG_DELSA:
2557 case XFRM_MSG_UPDSA:
2558 case XFRM_MSG_NEWSA:
2559 return xfrm_notify_sa(x, c);
2560 case XFRM_MSG_FLUSHSA:
2561 return xfrm_notify_sa_flush(c);
2562 default:
2563 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2564 c->event);
2565 break;
2566 }
2567
2568 return 0;
2569
2570}
2571
2572static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2573 struct xfrm_policy *xp)
2574{
2575 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2576 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2577 + nla_total_size(sizeof(struct xfrm_mark))
2578 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2579 + userpolicy_type_attrsize();
2580}
2581
2582static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2583 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2584 int dir)
2585{
2586 struct xfrm_user_acquire *ua;
2587 struct nlmsghdr *nlh;
2588 __u32 seq = xfrm_get_acqseq();
2589
2590 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2591 if (nlh == NULL)
2592 return -EMSGSIZE;
2593
2594 ua = nlmsg_data(nlh);
2595 memcpy(&ua->id, &x->id, sizeof(ua->id));
2596 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2597 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2598 copy_to_user_policy(xp, &ua->policy, dir);
2599 ua->aalgos = xt->aalgos;
2600 ua->ealgos = xt->ealgos;
2601 ua->calgos = xt->calgos;
2602 ua->seq = x->km.seq = seq;
2603
2604 if (copy_to_user_tmpl(xp, skb) < 0)
2605 goto nlmsg_failure;
2606 if (copy_to_user_state_sec_ctx(x, skb))
2607 goto nlmsg_failure;
2608 if (copy_to_user_policy_type(xp->type, skb) < 0)
2609 goto nlmsg_failure;
2610 if (xfrm_mark_put(skb, &xp->mark))
2611 goto nla_put_failure;
2612
2613 return nlmsg_end(skb, nlh);
2614
2615nla_put_failure:
2616nlmsg_failure:
2617 nlmsg_cancel(skb, nlh);
2618 return -EMSGSIZE;
2619}
2620
2621static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2622 struct xfrm_policy *xp, int dir)
2623{
2624 struct net *net = xs_net(x);
2625 struct sk_buff *skb;
2626
2627 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2628 if (skb == NULL)
2629 return -ENOMEM;
2630
2631 if (build_acquire(skb, x, xt, xp, dir) < 0)
2632 BUG();
2633
2634 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2635}
2636
2637/* User gives us xfrm_user_policy_info followed by an array of 0
2638 * or more templates.
2639 */
2640static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2641 u8 *data, int len, int *dir)
2642{
2643 struct net *net = sock_net(sk);
2644 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2645 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2646 struct xfrm_policy *xp;
2647 int nr;
2648
2649 switch (sk->sk_family) {
2650 case AF_INET:
2651 if (opt != IP_XFRM_POLICY) {
2652 *dir = -EOPNOTSUPP;
2653 return NULL;
2654 }
2655 break;
2656#if IS_ENABLED(CONFIG_IPV6)
2657 case AF_INET6:
2658 if (opt != IPV6_XFRM_POLICY) {
2659 *dir = -EOPNOTSUPP;
2660 return NULL;
2661 }
2662 break;
2663#endif
2664 default:
2665 *dir = -EINVAL;
2666 return NULL;
2667 }
2668
2669 *dir = -EINVAL;
2670
2671 if (len < sizeof(*p) ||
2672 verify_newpolicy_info(p))
2673 return NULL;
2674
2675 nr = ((len - sizeof(*p)) / sizeof(*ut));
2676 if (validate_tmpl(nr, ut, p->sel.family))
2677 return NULL;
2678
2679 if (p->dir > XFRM_POLICY_OUT)
2680 return NULL;
2681
2682 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2683 if (xp == NULL) {
2684 *dir = -ENOBUFS;
2685 return NULL;
2686 }
2687
2688 copy_from_user_policy(xp, p);
2689 xp->type = XFRM_POLICY_TYPE_MAIN;
2690 copy_templates(xp, ut, nr);
2691
2692 *dir = p->dir;
2693
2694 return xp;
2695}
2696
2697static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2698{
2699 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2700 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2701 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2702 + nla_total_size(sizeof(struct xfrm_mark))
2703 + userpolicy_type_attrsize();
2704}
2705
2706static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2707 int dir, const struct km_event *c)
2708{
2709 struct xfrm_user_polexpire *upe;
2710 struct nlmsghdr *nlh;
2711 int hard = c->data.hard;
2712
2713 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2714 if (nlh == NULL)
2715 return -EMSGSIZE;
2716
2717 upe = nlmsg_data(nlh);
2718 copy_to_user_policy(xp, &upe->pol, dir);
2719 if (copy_to_user_tmpl(xp, skb) < 0)
2720 goto nlmsg_failure;
2721 if (copy_to_user_sec_ctx(xp, skb))
2722 goto nlmsg_failure;
2723 if (copy_to_user_policy_type(xp->type, skb) < 0)
2724 goto nlmsg_failure;
2725 if (xfrm_mark_put(skb, &xp->mark))
2726 goto nla_put_failure;
2727 upe->hard = !!hard;
2728
2729 return nlmsg_end(skb, nlh);
2730
2731nla_put_failure:
2732nlmsg_failure:
2733 nlmsg_cancel(skb, nlh);
2734 return -EMSGSIZE;
2735}
2736
2737static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2738{
2739 struct net *net = xp_net(xp);
2740 struct sk_buff *skb;
2741
2742 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2743 if (skb == NULL)
2744 return -ENOMEM;
2745
2746 if (build_polexpire(skb, xp, dir, c) < 0)
2747 BUG();
2748
2749 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2750}
2751
2752static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2753{
2754 struct net *net = xp_net(xp);
2755 struct xfrm_userpolicy_info *p;
2756 struct xfrm_userpolicy_id *id;
2757 struct nlmsghdr *nlh;
2758 struct sk_buff *skb;
2759 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2760 int headlen;
2761
2762 headlen = sizeof(*p);
2763 if (c->event == XFRM_MSG_DELPOLICY) {
2764 len += nla_total_size(headlen);
2765 headlen = sizeof(*id);
2766 }
2767 len += userpolicy_type_attrsize();
2768 len += nla_total_size(sizeof(struct xfrm_mark));
2769 len += NLMSG_ALIGN(headlen);
2770
2771 skb = nlmsg_new(len, GFP_ATOMIC);
2772 if (skb == NULL)
2773 return -ENOMEM;
2774
2775 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2776 if (nlh == NULL)
2777 goto nlmsg_failure;
2778
2779 p = nlmsg_data(nlh);
2780 if (c->event == XFRM_MSG_DELPOLICY) {
2781 struct nlattr *attr;
2782
2783 id = nlmsg_data(nlh);
2784 memset(id, 0, sizeof(*id));
2785 id->dir = dir;
2786 if (c->data.byid)
2787 id->index = xp->index;
2788 else
2789 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2790
2791 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2792 if (attr == NULL)
2793 goto nlmsg_failure;
2794
2795 p = nla_data(attr);
2796 }
2797
2798 copy_to_user_policy(xp, p, dir);
2799 if (copy_to_user_tmpl(xp, skb) < 0)
2800 goto nlmsg_failure;
2801 if (copy_to_user_policy_type(xp->type, skb) < 0)
2802 goto nlmsg_failure;
2803
2804 if (xfrm_mark_put(skb, &xp->mark))
2805 goto nla_put_failure;
2806
2807 nlmsg_end(skb, nlh);
2808
2809 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2810
2811nla_put_failure:
2812nlmsg_failure:
2813 kfree_skb(skb);
2814 return -1;
2815}
2816
2817static int xfrm_notify_policy_flush(const struct km_event *c)
2818{
2819 struct net *net = c->net;
2820 struct nlmsghdr *nlh;
2821 struct sk_buff *skb;
2822
2823 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2824 if (skb == NULL)
2825 return -ENOMEM;
2826
2827 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2828 if (nlh == NULL)
2829 goto nlmsg_failure;
2830 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2831 goto nlmsg_failure;
2832
2833 nlmsg_end(skb, nlh);
2834
2835 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2836
2837nlmsg_failure:
2838 kfree_skb(skb);
2839 return -1;
2840}
2841
2842static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2843{
2844
2845 switch (c->event) {
2846 case XFRM_MSG_NEWPOLICY:
2847 case XFRM_MSG_UPDPOLICY:
2848 case XFRM_MSG_DELPOLICY:
2849 return xfrm_notify_policy(xp, dir, c);
2850 case XFRM_MSG_FLUSHPOLICY:
2851 return xfrm_notify_policy_flush(c);
2852 case XFRM_MSG_POLEXPIRE:
2853 return xfrm_exp_policy_notify(xp, dir, c);
2854 default:
2855 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2856 c->event);
2857 }
2858
2859 return 0;
2860
2861}
2862
2863static inline size_t xfrm_report_msgsize(void)
2864{
2865 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2866}
2867
2868static int build_report(struct sk_buff *skb, u8 proto,
2869 struct xfrm_selector *sel, xfrm_address_t *addr)
2870{
2871 struct xfrm_user_report *ur;
2872 struct nlmsghdr *nlh;
2873
2874 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2875 if (nlh == NULL)
2876 return -EMSGSIZE;
2877
2878 ur = nlmsg_data(nlh);
2879 ur->proto = proto;
2880 memcpy(&ur->sel, sel, sizeof(ur->sel));
2881
2882 if (addr)
2883 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2884
2885 return nlmsg_end(skb, nlh);
2886
2887nla_put_failure:
2888 nlmsg_cancel(skb, nlh);
2889 return -EMSGSIZE;
2890}
2891
2892static int xfrm_send_report(struct net *net, u8 proto,
2893 struct xfrm_selector *sel, xfrm_address_t *addr)
2894{
2895 struct sk_buff *skb;
2896
2897 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2898 if (skb == NULL)
2899 return -ENOMEM;
2900
2901 if (build_report(skb, proto, sel, addr) < 0)
2902 BUG();
2903
2904 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2905}
2906
2907static inline size_t xfrm_mapping_msgsize(void)
2908{
2909 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2910}
2911
2912static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2913 xfrm_address_t *new_saddr, __be16 new_sport)
2914{
2915 struct xfrm_user_mapping *um;
2916 struct nlmsghdr *nlh;
2917
2918 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2919 if (nlh == NULL)
2920 return -EMSGSIZE;
2921
2922 um = nlmsg_data(nlh);
2923
2924 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2925 um->id.spi = x->id.spi;
2926 um->id.family = x->props.family;
2927 um->id.proto = x->id.proto;
2928 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2929 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2930 um->new_sport = new_sport;
2931 um->old_sport = x->encap->encap_sport;
2932 um->reqid = x->props.reqid;
2933
2934 return nlmsg_end(skb, nlh);
2935}
2936
2937static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2938 __be16 sport)
2939{
2940 struct net *net = xs_net(x);
2941 struct sk_buff *skb;
2942
2943 if (x->id.proto != IPPROTO_ESP)
2944 return -EINVAL;
2945
2946 if (!x->encap)
2947 return -EINVAL;
2948
2949 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2950 if (skb == NULL)
2951 return -ENOMEM;
2952
2953 if (build_mapping(skb, x, ipaddr, sport) < 0)
2954 BUG();
2955
2956 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2957}
2958
2959static struct xfrm_mgr netlink_mgr = {
2960 .id = "netlink",
2961 .notify = xfrm_send_state_notify,
2962 .acquire = xfrm_send_acquire,
2963 .compile_policy = xfrm_compile_policy,
2964 .notify_policy = xfrm_send_policy_notify,
2965 .report = xfrm_send_report,
2966 .migrate = xfrm_send_migrate,
2967 .new_mapping = xfrm_send_mapping,
2968};
2969
2970static int __net_init xfrm_user_net_init(struct net *net)
2971{
2972 struct sock *nlsk;
2973
2974 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2975 xfrm_netlink_rcv, NULL, THIS_MODULE);
2976 if (nlsk == NULL)
2977 return -ENOMEM;
2978 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
2979 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2980 return 0;
2981}
2982
2983static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
2984{
2985 struct net *net;
2986 list_for_each_entry(net, net_exit_list, exit_list)
2987 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
2988 synchronize_net();
2989 list_for_each_entry(net, net_exit_list, exit_list)
2990 netlink_kernel_release(net->xfrm.nlsk_stash);
2991}
2992
2993static struct pernet_operations xfrm_user_net_ops = {
2994 .init = xfrm_user_net_init,
2995 .exit_batch = xfrm_user_net_exit,
2996};
2997
2998static int __init xfrm_user_init(void)
2999{
3000 int rv;
3001
3002 printk(KERN_INFO "Initializing XFRM netlink socket\n");
3003
3004 rv = register_pernet_subsys(&xfrm_user_net_ops);
3005 if (rv < 0)
3006 return rv;
3007 rv = xfrm_register_km(&netlink_mgr);
3008 if (rv < 0)
3009 unregister_pernet_subsys(&xfrm_user_net_ops);
3010 return rv;
3011}
3012
3013static void __exit xfrm_user_exit(void)
3014{
3015 xfrm_unregister_km(&netlink_mgr);
3016 unregister_pernet_subsys(&xfrm_user_net_ops);
3017}
3018
3019module_init(xfrm_user_init);
3020module_exit(xfrm_user_exit);
3021MODULE_LICENSE("GPL");
3022MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3023